[SRU][J:linux-azure][PATCH 1/1] smb: client: Avoid race in open_cached_dir with lease breaks
John Cabaj
john.cabaj at canonical.com
Tue Jul 22 22:15:54 UTC 2025
From: Paul Aurich <paul at darkrain42.org>
BugLink: https://bugs.launchpad.net/bugs/2117524
A pre-existing valid cfid returned from find_or_create_cached_dir might
race with a lease break, meaning open_cached_dir doesn't consider it
valid, and thinks it's newly-constructed. This leaks a dentry reference
if the allocation occurs before the queued lease break work runs.
Avoid the race by extending holding the cfid_list_lock across
find_or_create_cached_dir and when the result is checked.
Cc: stable at vger.kernel.org
Reviewed-by: Henrique Carvalho <henrique.carvalho at suse.com>
Signed-off-by: Paul Aurich <paul at darkrain42.org>
Signed-off-by: Steve French <stfrench at microsoft.com>
(backported from commit 3ca02e63edccb78ef3659bebc68579c7224a6ca2)
[john-cabaj: context changes]
Signed-off-by: John Cabaj <john.cabaj at canonical.com>
---
fs/cifs/cached_dir.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
index 9718926205047..c1b3b936702dc 100644
--- a/fs/cifs/cached_dir.c
+++ b/fs/cifs/cached_dir.c
@@ -24,7 +24,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
{
struct cached_fid *cfid;
- spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
if (!strcmp(cfid->path, path)) {
/*
@@ -33,25 +32,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* being deleted due to a lease break.
*/
if (!cfid->time || !cfid->has_lease) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
kref_get(&cfid->refcount);
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
}
if (lookup_only) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
if (cfids->num_entries >= max_cached_dirs) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid = init_cached_dir(path);
if (cfid == NULL) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid->cfids = cfids;
@@ -59,7 +53,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
list_add(&cfid->entry, &cfids->entries);
cfid->on_list = true;
kref_get(&cfid->refcount);
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
@@ -164,8 +157,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
if (!utf16_path)
return -ENOMEM;
+ spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
+ spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path);
return -ENOENT;
}
@@ -174,7 +169,6 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
* entry or laundromat worker removed it from @cfids->entries. Caller
* will put last reference if the latter.
*/
- spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease) {
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
--
2.43.0
More information about the kernel-team
mailing list