mm/kmemleak: use _irq lock/unlock variants in kmemleak_scan/_clear()

Patch series "mm/kmemleak: Avoid soft lockup in kmemleak_scan()", v2.

There are 3 RCU-based object iteration loops in kmemleak_scan().  Because
of the need to take RCU read lock, we can't insert cond_resched() into the
loop like other parts of the function.  As there can be millions of
objects to be scanned, it takes a while to iterate all of them.  The
kmemleak functionality is usually enabled in a debug kernel which is much
slower than a non-debug kernel.  With sufficient number of kmemleak
objects, the time to iterate them all may exceed 22s causing soft lockup.

  watchdog: BUG: soft lockup - CPU#3 stuck for 22s! [kmemleak:625]

This patch series make changes to the 3 object iteration loops in
kmemleak_scan() to prevent them from causing soft lockup.


This patch (of 3):

kmemleak_scan() is called only from the kmemleak scan thread or from write
to the kmemleak debugfs file.  Both are in task context and so we can
directly use the simpler _irq() lock/unlock calls instead of the more
complex _irqsave/_irqrestore variants.

Similarly, kmemleak_clear() is called only from write to the kmemleak
debugfs file. The same change can be applied.

Link: https://lkml.kernel.org/r/20220614220359.59282-1-longman@redhat.com
Link: https://lkml.kernel.org/r/20220614220359.59282-2-longman@redhat.com
Signed-off-by: Waiman Long <longman@redhat.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Waiman Long 2022-06-14 18:03:57 -04:00 committed by akpm
parent 55896f935a
commit 00c155066e

View file

@ -1470,7 +1470,6 @@ static void scan_gray_list(void)
*/
static void kmemleak_scan(void)
{
unsigned long flags;
struct kmemleak_object *object;
struct zone *zone;
int __maybe_unused i;
@ -1481,7 +1480,7 @@ static void kmemleak_scan(void)
/* prepare the kmemleak_object's */
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
raw_spin_lock_irqsave(&object->lock, flags);
raw_spin_lock_irq(&object->lock);
#ifdef DEBUG
/*
* With a few exceptions there should be a maximum of
@ -1509,7 +1508,7 @@ static void kmemleak_scan(void)
if (color_gray(object) && get_object(object))
list_add_tail(&object->gray_list, &gray_list);
raw_spin_unlock_irqrestore(&object->lock, flags);
raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
@ -1577,14 +1576,14 @@ static void kmemleak_scan(void)
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
raw_spin_lock_irqsave(&object->lock, flags);
raw_spin_lock_irq(&object->lock);
if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
&& update_checksum(object) && get_object(object)) {
/* color it gray temporarily */
object->count = object->min_count;
list_add_tail(&object->gray_list, &gray_list);
}
raw_spin_unlock_irqrestore(&object->lock, flags);
raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
@ -1604,7 +1603,7 @@ static void kmemleak_scan(void)
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
raw_spin_lock_irqsave(&object->lock, flags);
raw_spin_lock_irq(&object->lock);
if (unreferenced_object(object) &&
!(object->flags & OBJECT_REPORTED)) {
object->flags |= OBJECT_REPORTED;
@ -1614,7 +1613,7 @@ static void kmemleak_scan(void)
new_leaks++;
}
raw_spin_unlock_irqrestore(&object->lock, flags);
raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();
@ -1816,15 +1815,14 @@ static int dump_str_object_info(const char *str)
static void kmemleak_clear(void)
{
struct kmemleak_object *object;
unsigned long flags;
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
raw_spin_lock_irqsave(&object->lock, flags);
raw_spin_lock_irq(&object->lock);
if ((object->flags & OBJECT_REPORTED) &&
unreferenced_object(object))
__paint_it(object, KMEMLEAK_GREY);
raw_spin_unlock_irqrestore(&object->lock, flags);
raw_spin_unlock_irq(&object->lock);
}
rcu_read_unlock();