aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorEric Biggers2023-01-27 14:25:14 -0800
committerEric Biggers2023-01-28 15:10:12 -0800
commit51e4e3153ebc32d3280d5d17418ae6f1a44f1ec1 (patch)
tree55d0a9d3b02a0a403034e6130829e0901e6f93fa /fs
parent5d0f0e57ed900917836385527ce5b122fa1425a3 (diff)
fscrypt: support decrypting data from large folios
Try to make the filesystem-level decryption functions in fs/crypto/ aware of large folios. This includes making fscrypt_decrypt_bio() support the case where the bio contains large folios, and making fscrypt_decrypt_pagecache_blocks() take a folio instead of a page. There's no way to actually test this with large folios yet, but I've tested that this doesn't cause any regressions. Note that this patch just handles *decryption*, not encryption which will be a little more difficult. Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://lore.kernel.org/r/20230127224202.355629-1-ebiggers@kernel.org
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/crypto/bio.c10
-rw-r--r--fs/crypto/crypto.c28
-rw-r--r--fs/ext4/inode.c6
4 files changed, 25 insertions, 23 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 8499c79ae13d..623e77d6ef77 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -331,8 +331,8 @@ static void decrypt_bh(struct work_struct *work)
struct buffer_head *bh = ctx->bh;
int err;
- err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
- bh_offset(bh));
+ err = fscrypt_decrypt_pagecache_blocks(page_folio(bh->b_page),
+ bh->b_size, bh_offset(bh));
if (err == 0 && need_fsverity(bh)) {
/*
* We use different work queues for decryption and for verity
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 1b4403136d05..d57d0a020f71 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -30,13 +30,11 @@
*/
bool fscrypt_decrypt_bio(struct bio *bio)
{
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bv, bio, iter_all) {
- struct page *page = bv->bv_page;
- int err = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
- bv->bv_offset);
+ bio_for_each_folio_all(fi, bio) {
+ int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length,
+ fi.offset);
if (err) {
bio->bi_status = errno_to_blk_status(err);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index e78be66bbf01..bf642479269a 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -237,41 +237,43 @@ EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
- * pagecache page
- * @page: The locked pagecache page containing the block(s) to decrypt
+ * pagecache folio
+ * @folio: The locked pagecache folio containing the block(s) to decrypt
* @len: Total size of the block(s) to decrypt. Must be a nonzero
* multiple of the filesystem's block size.
- * @offs: Byte offset within @page of the first block to decrypt. Must be
+ * @offs: Byte offset within @folio of the first block to decrypt. Must be
* a multiple of the filesystem's block size.
*
- * The specified block(s) are decrypted in-place within the pagecache page,
- * which must still be locked and not uptodate. Normally, blocksize ==
- * PAGE_SIZE and the whole page is decrypted at once.
+ * The specified block(s) are decrypted in-place within the pagecache folio,
+ * which must still be locked and not uptodate.
*
* This is for use by the filesystem's ->readahead() method.
*
* Return: 0 on success; -errno on failure
*/
-int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
- unsigned int offs)
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs)
{
- const struct inode *inode = page->mapping->host;
+ const struct inode *inode = folio->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
- u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
+ u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
- unsigned int i;
+ size_t i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return -EINVAL;
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
+ struct page *page = folio_page(folio, i >> PAGE_SHIFT);
+
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
- page, blocksize, i, GFP_NOFS);
+ page, blocksize, i & ~PAGE_MASK,
+ GFP_NOFS);
if (err)
return err;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9d9f414f99fe..0fe1b746fe86 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1136,7 +1136,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
for (i = 0; i < nr_wait; i++) {
int err2;
- err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+ err2 = fscrypt_decrypt_pagecache_blocks(page_folio(page),
+ blocksize,
bh_offset(wait[i]));
if (err2) {
clear_buffer_uptodate(wait[i]);
@@ -3858,7 +3859,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
- err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
+ err = fscrypt_decrypt_pagecache_blocks(page_folio(page),
+ blocksize,
bh_offset(bh));
if (err) {
clear_buffer_uptodate(bh);