rust_binder: use lock_vma_under_rcu() in use_page_slow()

There's no reason to lock the whole mm when we are doing operations on
the vma if we can help it, so to reduce contention, use the
lock_vma_under_rcu() abstraction.

Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Reviewed-by: Jann Horn <jannh@google.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Link: https://patch.msgid.link/20260218-binder-vma-rcu-v1-1-8bd45b2b1183@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Alice Ryhl 2026-02-18 15:13:23 +00:00 committed by Greg Kroah-Hartman
parent 2e303f0feb
commit a0b9b0f143
1 changed files with 19 additions and 18 deletions

View File

@ -435,24 +435,25 @@ impl ShrinkablePageRange {
// //
// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
// workqueue. // workqueue.
check_vma( let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) {
.mmap_read_lock() let vma_read;
.vma_lookup(vma_addr) let mmap_read;
.ok_or(ESRCH)?, let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
self, vma_read = ret;
) check_vma(&vma_read, self)
.ok_or(ESRCH)? } else {
.vm_insert_page(user_page_addr, &new_page) mmap_read = mm.mmap_read_lock();
.inspect_err(|err| { mmap_read
pr_warn!( .vma_lookup(vma_addr)
"Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", .and_then(|vma| check_vma(vma, self))
user_page_addr, };
vma_addr,
i, match vma {
err Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
) None => return Err(ESRCH),
})?; }
}
let inner = self.lock.lock(); let inner = self.lock.lock();