kprobes: rename kprobe_enabled to kprobes_all_disarmed

Rename kprobe_enabled to kprobes_all_disarmed and invert logic due to
avoiding naming confusion from per-probe disabling.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Masami Hiramatsu 2009-04-06 19:01:01 -07:00 committed by Linus Torvalds
parent cc00e9cfe0
commit e579abeb58

View file

@ -68,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
@ -598,7 +598,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
* If the old_p has gone, its breakpoint has been disarmed.
* We have to arm it again after preparing real kprobes.
*/
if (kprobe_enabled)
if (!kprobes_all_disarmed)
arch_arm_kprobe(ap);
}
@ -709,7 +709,7 @@ int __kprobes register_kprobe(struct kprobe *p)
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (kprobe_enabled)
if (!kprobes_all_disarmed)
arch_arm_kprobe(p);
out_unlock_text:
@ -751,7 +751,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
* enabled and not gone - otherwise, the breakpoint would
* already have been removed. We save on flushing icache.
*/
if (kprobe_enabled && !kprobe_gone(old_p)) {
if (!kprobes_all_disarmed && !kprobe_gone(old_p)) {
mutex_lock(&text_mutex);
arch_disarm_kprobe(p);
mutex_unlock(&text_mutex);
@ -1190,8 +1190,8 @@ static int __init init_kprobes(void)
}
}
/* By default, kprobes are enabled */
kprobe_enabled = true;
/* By default, kprobes are armed */
kprobes_all_disarmed = false;
err = arch_init_kprobes();
if (!err)
@ -1289,7 +1289,7 @@ static struct file_operations debugfs_kprobes_operations = {
.release = seq_release,
};
static void __kprobes enable_all_kprobes(void)
static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@ -1298,8 +1298,8 @@ static void __kprobes enable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
/* If kprobes are already enabled, just return */
if (kprobe_enabled)
/* If kprobes are armed, just return */
if (!kprobes_all_disarmed)
goto already_enabled;
mutex_lock(&text_mutex);
@ -1311,7 +1311,7 @@ static void __kprobes enable_all_kprobes(void)
}
mutex_unlock(&text_mutex);
kprobe_enabled = true;
kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
already_enabled:
@ -1319,7 +1319,7 @@ static void __kprobes enable_all_kprobes(void)
return;
}
static void __kprobes disable_all_kprobes(void)
static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@ -1328,11 +1328,11 @@ static void __kprobes disable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
/* If kprobes are already disabled, just return */
if (!kprobe_enabled)
/* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed)
goto already_disabled;
kprobe_enabled = false;
kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@ -1364,7 +1364,7 @@ static ssize_t read_enabled_file_bool(struct file *file,
{
char buf[3];
if (kprobe_enabled)
if (!kprobes_all_disarmed)
buf[0] = '1';
else
buf[0] = '0';
@ -1387,12 +1387,12 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
enable_all_kprobes();
arm_all_kprobes();
break;
case 'n':
case 'N':
case '0':
disable_all_kprobes();
disarm_all_kprobes();
break;
}