From c4a25635b60d08853a3e4eaae3ab34419a36cfa2 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 28 Jul 2016 15:46:23 -0700 Subject: mm: move vmscan writes and file write accounting to the node As reclaim is now node-based, it follows that page write activity due to page reclaim should also be accounted for on the node. For consistency, also account page writes and page dirtying on a per-node basis. After this patch, there are a few remaining zone counters that may appear strange but are fine. NUMA stats are still per-zone as this is a user-space interface that tools consume. NR_MLOCK, NR_SLAB_*, NR_PAGETABLE, NR_KERNEL_STACK and NR_BOUNCE are all allocations that potentially pin low memory and cannot trivially be reclaimed on demand. This information is still useful for debugging a page allocation failure warning. Link: http://lkml.kernel.org/r/1467970510-21195-21-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Hillf Danton Acked-by: Johannes Weiner Cc: Joonsoo Kim Cc: Minchan Kim Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 6 +++--- mm/vmscan.c | 4 ++-- mm/vmstat.c | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index f97591d9fa00..3c02aa603f5a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2461,7 +2461,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); __inc_node_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); - __inc_zone_page_state(page, NR_DIRTIED); + __inc_node_page_state(page, NR_DIRTIED); __inc_wb_stat(wb, WB_RECLAIMABLE); __inc_wb_stat(wb, WB_DIRTIED); task_io_account_write(PAGE_SIZE); @@ -2550,7 +2550,7 @@ void account_page_redirty(struct page *page) wb = unlocked_inode_to_wb_begin(inode, &locked); current->nr_dirtied--; - dec_zone_page_state(page, NR_DIRTIED); + dec_node_page_state(page, NR_DIRTIED); dec_wb_stat(wb, WB_DIRTIED); unlocked_inode_to_wb_end(inode, locked); } @@ -2787,7 +2787,7 @@ int test_clear_page_writeback(struct page *page) mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); dec_node_page_state(page, NR_WRITEBACK); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); - inc_zone_page_state(page, NR_WRITTEN); + inc_node_page_state(page, NR_WRITTEN); } unlock_page_memcg(page); return ret; diff --git a/mm/vmscan.c b/mm/vmscan.c index b797afec3057..9b61a55b6e38 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -612,7 +612,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ClearPageReclaim(page); } trace_mm_vmscan_writepage(page); - inc_zone_page_state(page, NR_VMSCAN_WRITE); + inc_node_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } @@ -1117,7 +1117,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * except we already have the page isolated * and know it's dirty */ - inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); + inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); SetPageReclaim(page); goto keep_locked; diff --git a/mm/vmstat.c b/mm/vmstat.c index 455392158062..bc94968400d0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -931,10 +931,6 @@ const char * const vmstat_text[] = { "nr_page_table_pages", "nr_kernel_stack", "nr_bounce", - "nr_vmscan_write", - "nr_vmscan_immediate_reclaim", - "nr_dirtied", - "nr_written", #if IS_ENABLED(CONFIG_ZSMALLOC) "nr_zspages", #endif @@ -971,6 +967,10 @@ const char * const vmstat_text[] = { "nr_shmem_pmdmapped", "nr_anon_transparent_hugepages", "nr_unstable", + "nr_vmscan_write", + "nr_vmscan_immediate_reclaim", + "nr_dirtied", + "nr_written", /* enum writeback_stat_item counters */ "nr_dirty_threshold", -- cgit v1.2.3