aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorDarrick J. Wong2020-05-01 16:00:56 -0700
committerDarrick J. Wong2020-05-08 08:50:01 -0700
commit17d29bf271ea48b253c93969a590a11a51c19c1f (patch)
treeaf9d88d3750065f3429379b4f555ad744fce26dc /fs
parentcc560a5a9540be2d907c0c170e29ebde98d13d63 (diff)
xfs: move log recovery buffer cancellation code to xfs_buf_item_recover.c
Move the helpers that handle incore buffer cancellation records to xfs_buf_item_recover.c since they're not directly related to the main log recovery machinery. No functional changes. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h2
-rw-r--r--fs/xfs/xfs_buf_item_recover.c104
-rw-r--r--fs/xfs/xfs_log_recover.c102
3 files changed, 104 insertions, 104 deletions
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 929366d58c35..641132d0e39d 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -120,9 +120,7 @@ struct xlog_recover {
void xlog_buf_readahead(struct xlog *log, xfs_daddr_t blkno, uint len,
const struct xfs_buf_ops *ops);
-bool xlog_add_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len);
bool xlog_is_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len);
-bool xlog_put_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len);
void xlog_recover_iodone(struct xfs_buf *bp);
void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type,
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
index 4ba2e27a15ca..04faa7310c4f 100644
--- a/fs/xfs/xfs_buf_item_recover.c
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -24,6 +24,110 @@
#include "xfs_quota.h"
/*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+ xfs_daddr_t bc_blkno;
+ uint bc_len;
+ int bc_refcount;
+ struct list_head bc_list;
+};
+
+static struct xfs_buf_cancel *
+xlog_find_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct list_head *bucket;
+ struct xfs_buf_cancel *bcp;
+
+ if (!log->l_buf_cancel_table)
+ return NULL;
+
+ bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+ list_for_each_entry(bcp, bucket, bc_list) {
+ if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+ return bcp;
+ }
+
+ return NULL;
+}
+
+static bool
+xlog_add_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct xfs_buf_cancel *bcp;
+
+ /*
+ * If we find an existing cancel record, this indicates that the buffer
+ * was cancelled multiple times. To ensure that during pass 2 we keep
+ * the record in the table until we reach its last occurrence in the
+ * log, a reference count is kept to tell how many times we expect to
+ * see this record during the second pass.
+ */
+ bcp = xlog_find_buffer_cancelled(log, blkno, len);
+ if (bcp) {
+ bcp->bc_refcount++;
+ return false;
+ }
+
+ bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
+ bcp->bc_blkno = blkno;
+ bcp->bc_len = len;
+ bcp->bc_refcount = 1;
+ list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
+ return true;
+}
+
+/*
+ * Check if there is and entry for blkno, len in the buffer cancel record table.
+ */
+bool
+xlog_is_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
+}
+
+/*
+ * Check if there is and entry for blkno, len in the buffer cancel record table,
+ * and decremented the reference count on it if there is one.
+ *
+ * Remove the cancel record once the refcount hits zero, so that if the same
+ * buffer is re-used again after its last cancellation we actually replay the
+ * changes made at that point.
+ */
+static bool
+xlog_put_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct xfs_buf_cancel *bcp;
+
+ bcp = xlog_find_buffer_cancelled(log, blkno, len);
+ if (!bcp) {
+ ASSERT(0);
+ return false;
+ }
+
+ if (--bcp->bc_refcount == 0) {
+ list_del(&bcp->bc_list);
+ kmem_free(bcp);
+ }
+ return true;
+}
+
+/* log buffer item recovery */
+
+/*
* Sort buffer items for log recovery. Most buffer items should end up on the
* buffer list and are recovered first, with the following exceptions:
*
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fa1b63bd9031..572e6707362a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -56,17 +56,6 @@ xlog_do_recovery_pass(
struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
/*
- * This structure is used during recovery to record the buf log items which
- * have been canceled and should not be replayed.
- */
-struct xfs_buf_cancel {
- xfs_daddr_t bc_blkno;
- uint bc_len;
- int bc_refcount;
- struct list_head bc_list;
-};
-
-/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
@@ -1964,97 +1953,6 @@ xlog_recover_reorder_trans(
return error;
}
-static struct xfs_buf_cancel *
-xlog_find_buffer_cancelled(
- struct xlog *log,
- xfs_daddr_t blkno,
- uint len)
-{
- struct list_head *bucket;
- struct xfs_buf_cancel *bcp;
-
- if (!log->l_buf_cancel_table)
- return NULL;
-
- bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
- list_for_each_entry(bcp, bucket, bc_list) {
- if (bcp->bc_blkno == blkno && bcp->bc_len == len)
- return bcp;
- }
-
- return NULL;
-}
-
-bool
-xlog_add_buffer_cancelled(
- struct xlog *log,
- xfs_daddr_t blkno,
- uint len)
-{
- struct xfs_buf_cancel *bcp;
-
- /*
- * If we find an existing cancel record, this indicates that the buffer
- * was cancelled multiple times. To ensure that during pass 2 we keep
- * the record in the table until we reach its last occurrence in the
- * log, a reference count is kept to tell how many times we expect to
- * see this record during the second pass.
- */
- bcp = xlog_find_buffer_cancelled(log, blkno, len);
- if (bcp) {
- bcp->bc_refcount++;
- return false;
- }
-
- bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
- bcp->bc_blkno = blkno;
- bcp->bc_len = len;
- bcp->bc_refcount = 1;
- list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
- return true;
-}
-
-/*
- * Check if there is and entry for blkno, len in the buffer cancel record table.
- */
-bool
-xlog_is_buffer_cancelled(
- struct xlog *log,
- xfs_daddr_t blkno,
- uint len)
-{
- return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
-}
-
-/*
- * Check if there is and entry for blkno, len in the buffer cancel record table,
- * and decremented the reference count on it if there is one.
- *
- * Remove the cancel record once the refcount hits zero, so that if the same
- * buffer is re-used again after its last cancellation we actually replay the
- * changes made at that point.
- */
-bool
-xlog_put_buffer_cancelled(
- struct xlog *log,
- xfs_daddr_t blkno,
- uint len)
-{
- struct xfs_buf_cancel *bcp;
-
- bcp = xlog_find_buffer_cancelled(log, blkno, len);
- if (!bcp) {
- ASSERT(0);
- return false;
- }
-
- if (--bcp->bc_refcount == 0) {
- list_del(&bcp->bc_list);
- kmem_free(bcp);
- }
- return true;
-}
-
void
xlog_buf_readahead(
struct xlog *log,