diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b21a7b246a0d..0c2161b1f057 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1045,6 +1045,66 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, return NULL; } +/* Find the smallest unused descriptor the "slow way" */ +static u32 slow_desc_lookup_olocked(struct binder_proc *proc) +{ + struct binder_ref *ref; + struct rb_node *n; + u32 desc; + + desc = 1; + for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { + ref = rb_entry(n, struct binder_ref, rb_node_desc); + if (ref->data.desc > desc) + break; + desc = ref->data.desc + 1; + } + + return desc; +} + +/* + * Find an available reference descriptor ID. The proc->outer_lock might + * be released in the process, in which case -EAGAIN is returned and the + * @desc should be considered invalid. + */ +static int get_ref_desc_olocked(struct binder_proc *proc, + struct binder_node *node, + u32 *desc) +{ + struct dbitmap *dmap = &proc->dmap; + unsigned long *new, bit; + unsigned int nbits; + + /* 0 is reserved for the context manager */ + if (node == proc->context->binder_context_mgr_node) { + *desc = 0; + return 0; + } + + if (!dbitmap_enabled(dmap)) { + *desc = slow_desc_lookup_olocked(proc); + return 0; + } + + if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) { + *desc = bit; + return 0; + } + + /* + * The dbitmap is full and needs to grow. The proc->outer_lock + * is briefly released to allocate the new bitmap safely. + */ + nbits = dbitmap_grow_nbits(dmap); + binder_proc_unlock(proc); + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_grow(dmap, new, nbits); + + return -EAGAIN; +} + /** * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref @@ -1068,12 +1128,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked( struct binder_node *node, struct binder_ref *new_ref) { - struct binder_context *context = proc->context; - struct rb_node **p = &proc->refs_by_node.rb_node; - struct rb_node *parent = NULL; struct binder_ref *ref; - struct rb_node *n; + struct rb_node *parent; + struct rb_node **p; + u32 desc; +retry: + p = &proc->refs_by_node.rb_node; + parent = NULL; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); @@ -1088,6 +1150,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked( if (!new_ref) return NULL; + /* might release the proc->outer_lock */ + if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN) + goto retry; + binder_stats_created(BINDER_STAT_REF); new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; @@ -1095,14 +1161,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; - for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { - ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->data.desc > new_ref->data.desc) - break; - new_ref->data.desc = ref->data.desc + 1; - } - + new_ref->data.desc = desc; p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; @@ -1131,6 +1190,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked( static void binder_cleanup_ref_olocked(struct binder_ref *ref) { + struct dbitmap *dmap = &ref->proc->dmap; bool delete_node = false; binder_debug(BINDER_DEBUG_INTERNAL_REFS, @@ -1138,6 +1198,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref) ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); + if (dbitmap_enabled(dmap)) + dbitmap_clear_bit(dmap, ref->data.desc); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); @@ -1298,6 +1360,25 @@ static void binder_free_ref(struct binder_ref *ref) kfree(ref); } +/* shrink descriptor bitmap if needed */ +static void try_shrink_dmap(struct binder_proc *proc) +{ + unsigned long *new; + int nbits; + + binder_proc_lock(proc); + nbits = dbitmap_shrink_nbits(&proc->dmap); + binder_proc_unlock(proc); + + if (!nbits) + return; + + new = bitmap_zalloc(nbits, GFP_KERNEL); + binder_proc_lock(proc); + dbitmap_shrink(&proc->dmap, new, nbits); + binder_proc_unlock(proc); +} + /** * binder_update_ref_for_handle() - inc/dec the ref for given handle * @proc: proc containing the ref @@ -1334,8 +1415,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc, *rdata = ref->data; binder_proc_unlock(proc); - if (delete_ref) + if (delete_ref) { binder_free_ref(ref); + try_shrink_dmap(proc); + } return ret; err_no_ref: @@ -4931,6 +5014,7 @@ static void binder_free_proc(struct binder_proc *proc) put_task_struct(proc->tsk); put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); + dbitmap_free(&proc->dmap); kfree(proc); } @@ -5634,6 +5718,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; + + dbitmap_init(&proc->dmap); spin_lock_init(&proc->inner_lock); spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 5b7c80b99ae8..7d4fc53f7a73 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -14,6 +14,7 @@ #include #include #include "binder_alloc.h" +#include "dbitmap.h" struct binder_context { struct binder_node *binder_context_mgr_node; @@ -368,6 +369,8 @@ struct binder_ref { * @freeze_wait: waitqueue of processes waiting for all outstanding * transactions to be processed * (protected by @inner_lock) + * @dmap dbitmap to manage available reference descriptors + * (protected by @outer_lock) * @todo: list of work for this process * (protected by @inner_lock) * @stats: per-process binder statistics @@ -417,7 +420,7 @@ struct binder_proc { bool sync_recv; bool async_recv; wait_queue_head_t freeze_wait; - + struct dbitmap dmap; struct list_head todo; struct binder_stats stats; struct list_head delivered_death; diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h new file mode 100644 index 000000000000..b8ac7b4764fd --- /dev/null +++ b/drivers/android/dbitmap.h @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2024 Google LLC + * + * dbitmap - dynamically sized bitmap library. + * + * Used by the binder driver to optimize the allocation of the smallest + * available descriptor ID. Each bit in the bitmap represents the state + * of an ID, with the exception of BIT(0) which is used exclusively to + * reference binder's context manager. + * + * A dbitmap can grow or shrink as needed. This part has been designed + * considering that users might need to briefly release their locks in + * order to allocate memory for the new bitmap. These operations then, + * are verified to determine if the grow or shrink is sill valid. + * + * This library does not provide protection against concurrent access + * by itself. Binder uses the proc->outer_lock for this purpose. + */ + +#ifndef _LINUX_DBITMAP_H +#define _LINUX_DBITMAP_H +#include + +#define NBITS_MIN BITS_PER_TYPE(unsigned long) + +struct dbitmap { + unsigned int nbits; + unsigned long *map; +}; + +static inline int dbitmap_enabled(struct dbitmap *dmap) +{ + return !!dmap->nbits; +} + +static inline void dbitmap_free(struct dbitmap *dmap) +{ + dmap->nbits = 0; + kfree(dmap->map); +} + +/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */ +static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap) +{ + unsigned int bit; + + if (dmap->nbits <= NBITS_MIN) + return 0; + + /* + * Determine if the bitmap can shrink based on the position of + * its last set bit. If the bit is within the first quarter of + * the bitmap then shrinking is possible. In this case, the + * bitmap should shrink to half its current size. + */ + bit = find_last_bit(dmap->map, dmap->nbits); + if (bit < (dmap->nbits >> 2)) + return dmap->nbits >> 1; + + /* + * Note that find_last_bit() returns dmap->nbits when no bits + * are set. While this is technically not possible here since + * BIT(0) is always set, this check is left for extra safety. + */ + if (bit == dmap->nbits) + return NBITS_MIN; + + return 0; +} + +/* Replace the internal bitmap with a new one of different size */ +static inline void +dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits) +{ + bitmap_copy(new, dmap->map, min(dmap->nbits, nbits)); + kfree(dmap->map); + dmap->map = new; + dmap->nbits = nbits; +} + +static inline void +dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits) +{ + if (!new) + return; + + /* + * Verify that shrinking to @nbits is still possible. The @new + * bitmap might have been allocated without locks, so this call + * could now be outdated. In this case, free @new and move on. + */ + if (!dbitmap_enabled(dmap) || dbitmap_shrink_nbits(dmap) != nbits) { + kfree(new); + return; + } + + dbitmap_replace(dmap, new, nbits); +} + +/* Returns the nbits that a dbitmap can grow to. */ +static inline unsigned int dbitmap_grow_nbits(struct dbitmap *dmap) +{ + return dmap->nbits << 1; +} + +static inline void +dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits) +{ + /* + * Verify that growing to @nbits is still possible. The @new + * bitmap might have been allocated without locks, so this call + * could now be outdated. In this case, free @new and move on. + */ + if (!dbitmap_enabled(dmap) || nbits <= dmap->nbits) { + kfree(new); + return; + } + + /* + * Check for ENOMEM after confirming the grow operation is still + * required. This ensures we only disable the dbitmap when it's + * necessary. Once the dbitmap is disabled, binder will fallback + * to slow_desc_lookup_olocked(). + */ + if (!new) { + dbitmap_free(dmap); + return; + } + + dbitmap_replace(dmap, new, nbits); +} + +/* + * Finds and sets the first zero bit in the bitmap. Upon success @bit + * is populated with the index and 0 is returned. Otherwise, -ENOSPC + * is returned to indicate that a dbitmap_grow() is needed. + */ +static inline int +dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit) +{ + unsigned long n; + + n = find_first_zero_bit(dmap->map, dmap->nbits); + if (n == dmap->nbits) + return -ENOSPC; + + *bit = n; + set_bit(n, dmap->map); + + return 0; +} + +static inline void +dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit) +{ + /* BIT(0) should always set for the context manager */ + if (bit) + clear_bit(bit, dmap->map); +} + +static inline int dbitmap_init(struct dbitmap *dmap) +{ + dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL); + if (!dmap->map) { + dmap->nbits = 0; + return -ENOMEM; + } + + dmap->nbits = NBITS_MIN; + /* BIT(0) is reserved for the context manager */ + set_bit(0, dmap->map); + + return 0; +} +#endif