diff options
author | NeilBrown | 2014-10-13 16:41:28 +1100 |
---|---|---|
committer | Greg Kroah-Hartman | 2014-11-07 10:53:25 -0800 |
commit | 2b75869bba676c248d8d25ae6d2bd9221dfffdb6 (patch) | |
tree | bcbf58a3e4da882e9cc4dda7da4b65f18a01fa81 /fs/kernfs | |
parent | 0936896056365349afa867c16e9f9100a6707cbf (diff) |
sysfs/kernfs: allow attributes to request write buffer be pre-allocated.
md/raid allows metadata management to be performed in user-space.
A various times, particularly on device failure, the metadata needs
to be updated before further writes can be permitted.
This means that the user-space program which updates metadata much
not block on writeout, and so must not allocate memory.
mlockall(MCL_CURRENT|MCL_FUTURE) and pre-allocation can avoid all
memory allocation issues for user-memory, but that does not help
kernel memory.
Several kernel objects can be pre-allocated. e.g. files opened before
any writes to the array are permitted.
However some kernel allocation happens in places that cannot be
pre-allocated.
In particular, writes to sysfs files (to tell md that it can now
allow writes to the array) allocate a buffer using GFP_KERNEL.
This patch allows attributes to be marked as "PREALLOC". In that case
the maximal buffer is allocated when the file is opened, and then used
on each write instead of allocating a new buffer.
As the same buffer is now shared for all writes on the same file
description, the mutex is extended to cover full use of the buffer
including the copy_from_user().
The new __ATTR_PREALLOC() 'or's a new flag in to the 'mode', which is
inspected by sysfs_add_file_mode_ns() to determine if the file should be
marked as requiring prealloc.
Despite the comment, we *do* use ->seq_show together with ->prealloc
in this patch. The next patch fixes that.
Signed-off-by: NeilBrown <neilb@suse.de>
Reviewed-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs')
-rw-r--r-- | fs/kernfs/file.c | 45 |
1 files changed, 30 insertions, 15 deletions
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 4429d6d9217f..70186e2e692a 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -106,7 +106,7 @@ static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) const struct kernfs_ops *ops; /* - * @of->mutex nests outside active ref and is just to ensure that + * @of->mutex nests outside active ref and is primarily to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); @@ -194,7 +194,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, return -ENOMEM; /* - * @of->mutex nests outside active ref and is just to ensure that + * @of->mutex nests outside active ref and is primarily to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); @@ -278,19 +278,16 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, len = min_t(size_t, count, PAGE_SIZE); } - buf = kmalloc(len + 1, GFP_KERNEL); + buf = of->prealloc_buf; + if (!buf) + buf = kmalloc(len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; - if (copy_from_user(buf, user_buf, len)) { - len = -EFAULT; - goto out_free; - } - buf[len] = '\0'; /* guarantee string termination */ - /* - * @of->mutex nests outside active ref and is just to ensure that - * the ops aren't called concurrently for the same open file. + * @of->mutex nests outside active ref and is used both to ensure that + * the ops aren't called concurrently for the same open file, and + * to provide exclusive access to ->prealloc_buf (when that exists). */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { @@ -299,19 +296,27 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, goto out_free; } + if (copy_from_user(buf, user_buf, len)) { + len = -EFAULT; + goto out_unlock; + } + buf[len] = '\0'; /* guarantee string termination */ + ops = kernfs_ops(of->kn); if (ops->write) len = ops->write(of, buf, len, *ppos); else len = -EINVAL; - kernfs_put_active(of->kn); - mutex_unlock(&of->mutex); - if (len > 0) *ppos += len; + +out_unlock: + kernfs_put_active(of->kn); + mutex_unlock(&of->mutex); out_free: - kfree(buf); + if (buf != of->prealloc_buf) + kfree(buf); return len; } @@ -685,6 +690,14 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) */ of->atomic_write_len = ops->atomic_write_len; + if (ops->prealloc) { + int len = of->atomic_write_len ?: PAGE_SIZE; + of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); + error = -ENOMEM; + if (!of->prealloc_buf) + goto err_free; + } + /* * Always instantiate seq_file even if read access doesn't use * seq_file or is not requested. This unifies private data access @@ -715,6 +728,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) err_close: seq_release(inode, file); err_free: + kfree(of->prealloc_buf); kfree(of); err_out: kernfs_put_active(kn); @@ -728,6 +742,7 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp) kernfs_put_open_node(kn, of); seq_release(inode, filp); + kfree(of->prealloc_buf); kfree(of); return 0; |