mm: list_lru: rename list_lru_per_memcg to list_lru_memcg
The name of list_lru_memcg was occupied before and became free since last commit. Rename list_lru_per_memcg to list_lru_memcg since the name is brief. Link: https://lkml.kernel.org/r/20220228122126.37293-16-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Cc: Alex Shi <alexs@kernel.org> Cc: Anna Schumaker <Anna.Schumaker@Netapp.com> Cc: Chao Yu <chao@kernel.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Fam Zheng <fam.zheng@bytedance.com> Cc: Jaegeuk Kim <jaegeuk@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kari Argillander <kari.argillander@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
be740503ed
commit
d70110704d
2 changed files with 10 additions and 10 deletions
|
@ -32,7 +32,7 @@ struct list_lru_one {
|
||||||
long nr_items;
|
long nr_items;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct list_lru_per_memcg {
|
struct list_lru_memcg {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
/* array of per cgroup per node lists, indexed by node id */
|
/* array of per cgroup per node lists, indexed by node id */
|
||||||
struct list_lru_one node[];
|
struct list_lru_one node[];
|
||||||
|
|
|
@ -53,7 +53,7 @@ static inline struct list_lru_one *
|
||||||
list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
|
||||||
{
|
{
|
||||||
if (list_lru_memcg_aware(lru) && idx >= 0) {
|
if (list_lru_memcg_aware(lru) && idx >= 0) {
|
||||||
struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx);
|
struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
|
||||||
|
|
||||||
return mlru ? &mlru->node[nid] : NULL;
|
return mlru ? &mlru->node[nid] : NULL;
|
||||||
}
|
}
|
||||||
|
@ -306,7 +306,7 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
|
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
|
||||||
struct list_lru_per_memcg *mlru;
|
struct list_lru_memcg *mlru;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
|
|
||||||
xa_for_each(&lru->xa, index, mlru) {
|
xa_for_each(&lru->xa, index, mlru) {
|
||||||
|
@ -335,10 +335,10 @@ static void init_one_lru(struct list_lru_one *l)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
|
static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
struct list_lru_per_memcg *mlru;
|
struct list_lru_memcg *mlru;
|
||||||
|
|
||||||
mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
|
mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
|
||||||
if (!mlru)
|
if (!mlru)
|
||||||
|
@ -352,7 +352,7 @@ static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
|
||||||
|
|
||||||
static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
|
static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
|
||||||
{
|
{
|
||||||
struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
|
struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The __list_lru_walk_one() can walk the list of this node.
|
* The __list_lru_walk_one() can walk the list of this node.
|
||||||
|
@ -374,7 +374,7 @@ static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
|
||||||
static void memcg_destroy_list_lru(struct list_lru *lru)
|
static void memcg_destroy_list_lru(struct list_lru *lru)
|
||||||
{
|
{
|
||||||
XA_STATE(xas, &lru->xa, 0);
|
XA_STATE(xas, &lru->xa, 0);
|
||||||
struct list_lru_per_memcg *mlru;
|
struct list_lru_memcg *mlru;
|
||||||
|
|
||||||
if (!list_lru_memcg_aware(lru))
|
if (!list_lru_memcg_aware(lru))
|
||||||
return;
|
return;
|
||||||
|
@ -475,7 +475,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
|
||||||
int i;
|
int i;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct list_lru_memcg_table {
|
struct list_lru_memcg_table {
|
||||||
struct list_lru_per_memcg *mlru;
|
struct list_lru_memcg *mlru;
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
} *table;
|
} *table;
|
||||||
XA_STATE(xas, &lru->xa, 0);
|
XA_STATE(xas, &lru->xa, 0);
|
||||||
|
@ -491,7 +491,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
|
||||||
/*
|
/*
|
||||||
* Because the list_lru can be reparented to the parent cgroup's
|
* Because the list_lru can be reparented to the parent cgroup's
|
||||||
* list_lru, we should make sure that this cgroup and all its
|
* list_lru, we should make sure that this cgroup and all its
|
||||||
* ancestors have allocated list_lru_per_memcg.
|
* ancestors have allocated list_lru_memcg.
|
||||||
*/
|
*/
|
||||||
for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
|
for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
|
||||||
if (memcg_list_lru_allocated(memcg, lru))
|
if (memcg_list_lru_allocated(memcg, lru))
|
||||||
|
@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
|
||||||
xas_lock_irqsave(&xas, flags);
|
xas_lock_irqsave(&xas, flags);
|
||||||
while (i--) {
|
while (i--) {
|
||||||
int index = READ_ONCE(table[i].memcg->kmemcg_id);
|
int index = READ_ONCE(table[i].memcg->kmemcg_id);
|
||||||
struct list_lru_per_memcg *mlru = table[i].mlru;
|
struct list_lru_memcg *mlru = table[i].mlru;
|
||||||
|
|
||||||
xas_set(&xas, index);
|
xas_set(&xas, index);
|
||||||
retry:
|
retry:
|
||||||
|
|
Loading…
Reference in a new issue