mm: workingset: add vmstat counter for shadow nodes
authorJohannes Weiner <hannes@cmpxchg.org>
Fri, 26 Oct 2018 22:06:39 +0000 (15:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2018 23:26:33 +0000 (16:26 -0700)
Make it easier to catch bugs in the shadow node shrinker by adding a
counter for the shadow nodes in circulation.

[akpm@linux-foundation.org: assert that irqs are disabled, for __inc_lruvec_page_state()]
[akpm@linux-foundation.org: s/WARN_ON_ONCE/VM_WARN_ON_ONCE/, per Johannes]
Link: http://lkml.kernel.org/r/20181009184732.762-4-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/vmstat.c
mm/workingset.c

index ba51d5bf7af106aa5215ed103e08d3ea92a63c3e..9f0caccd58332011e8b256abcfc14185e1e3fb35 100644 (file)
@@ -161,6 +161,7 @@ enum node_stat_item {
        NR_SLAB_UNRECLAIMABLE,
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
        NR_SLAB_UNRECLAIMABLE,
        NR_ISOLATED_ANON,       /* Temporary isolated pages from anon lru */
        NR_ISOLATED_FILE,       /* Temporary isolated pages from file lru */
+       WORKINGSET_NODES,
        WORKINGSET_REFAULT,
        WORKINGSET_ACTIVATE,
        WORKINGSET_RESTORE,
        WORKINGSET_REFAULT,
        WORKINGSET_ACTIVATE,
        WORKINGSET_RESTORE,
index d918f6192d15ab857dd42748e81ae79d42be797c..dab53430f63c2958aacc66e60c14d9840611245a 100644 (file)
@@ -1143,6 +1143,7 @@ const char * const vmstat_text[] = {
        "nr_slab_unreclaimable",
        "nr_isolated_anon",
        "nr_isolated_file",
        "nr_slab_unreclaimable",
        "nr_isolated_anon",
        "nr_isolated_file",
+       "workingset_nodes",
        "workingset_refault",
        "workingset_activate",
        "workingset_restore",
        "workingset_refault",
        "workingset_activate",
        "workingset_restore",
index 5a72c9d5e195aac81db295a39d08a45ca796d69a..7e6ef312cea598c3308a7ad10b26861a40e2af28 100644 (file)
@@ -377,12 +377,20 @@ void workingset_update_node(struct radix_tree_node *node)
         * already where they should be. The list_empty() test is safe
         * as node->private_list is protected by the i_pages lock.
         */
         * already where they should be. The list_empty() test is safe
         * as node->private_list is protected by the i_pages lock.
         */
+       VM_WARN_ON_ONCE(!irqs_disabled());  /* For __inc_lruvec_page_state */
+
        if (node->count && node->count == node->exceptional) {
        if (node->count && node->count == node->exceptional) {
-               if (list_empty(&node->private_list))
+               if (list_empty(&node->private_list)) {
                        list_lru_add(&shadow_nodes, &node->private_list);
                        list_lru_add(&shadow_nodes, &node->private_list);
+                       __inc_lruvec_page_state(virt_to_page(node),
+                                               WORKINGSET_NODES);
+               }
        } else {
        } else {
-               if (!list_empty(&node->private_list))
+               if (!list_empty(&node->private_list)) {
                        list_lru_del(&shadow_nodes, &node->private_list);
                        list_lru_del(&shadow_nodes, &node->private_list);
+                       __dec_lruvec_page_state(virt_to_page(node),
+                                               WORKINGSET_NODES);
+               }
        }
 }
 
        }
 }
 
@@ -473,6 +481,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        }
 
        list_lru_isolate(lru, item);
        }
 
        list_lru_isolate(lru, item);
+       __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
+
        spin_unlock(lru_lock);
 
        /*
        spin_unlock(lru_lock);
 
        /*