summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch')
-rw-r--r--debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch193
1 files changed, 193 insertions, 0 deletions
diff --git a/debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch b/debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch
new file mode 100644
index 000000000..3669e87b2
--- /dev/null
+++ b/debian/patches-rt/fs-buffer-Make-BH_Uptodate_Lock-bit_spin_lock-a-regu.patch
@@ -0,0 +1,193 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Nov 2019 18:54:20 +0100
+Subject: [PATCH] fs/buffer: Make BH_Uptodate_Lock bit_spin_lock a regular
+ spinlock_t
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+Bit spinlocks are problematic if PREEMPT_RT is enabled, because they
+disable preemption, which is undesired for latency reasons and breaks when
+regular spinlocks are taken within the bit_spinlock locked region because
+regular spinlocks are converted to 'sleeping spinlocks' on RT. So RT
+replaces the bit spinlocks with regular spinlocks to avoid this problem.
+Bit spinlocks are also not covered by lock debugging, e.g. lockdep.
+
+Substitute the BH_Uptodate_Lock bit spinlock with a regular spinlock.
+
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bigeasy: remove the wrapper and use always spinlock_t and move it into
+ the padding hole]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+v2…v3: rename uptodate_lock to b_uptodate_lock.
+
+v1…v2: Move the spinlock_t to the padding hole as per Jan Kara. pahole says
+its total size remained unchanged, before
+
+| atomic_t b_count; /* 96 4 */
+|
+| /* size: 104, cachelines: 2, members: 12 */
+| /* padding: 4 */
+| /* last cacheline: 40 bytes */
+
+after
+
+| atomic_t b_count; /* 96 4 */
+| spinlock_t uptodate_lock; /* 100 4 */
+|
+| /* size: 104, cachelines: 2, members: 13 */
+| /* last cacheline: 40 bytes */
+
+ fs/buffer.c | 19 +++++++------------
+ fs/ext4/page-io.c | 8 +++-----
+ fs/ntfs/aops.c | 9 +++------
+ include/linux/buffer_head.h | 6 +++---
+ 4 files changed, 16 insertions(+), 26 deletions(-)
+
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -275,8 +275,7 @@ static void end_buffer_async_read(struct
+ * decide that the page is now completely done.
+ */
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ spin_lock_irqsave(&first->b_uptodate_lock, flags);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -289,8 +288,7 @@ static void end_buffer_async_read(struct
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+
+ /*
+ * If none of the buffers had errors and they are all
+@@ -302,8 +300,7 @@ static void end_buffer_async_read(struct
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+ return;
+ }
+
+@@ -331,8 +328,7 @@ void end_buffer_async_write(struct buffe
+ }
+
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ spin_lock_irqsave(&first->b_uptodate_lock, flags);
+
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+@@ -344,14 +340,12 @@ void end_buffer_async_write(struct buffe
+ }
+ tmp = tmp->b_this_page;
+ }
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+ end_page_writeback(page);
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+ return;
+ }
+ EXPORT_SYMBOL(end_buffer_async_write);
+@@ -3368,6 +3362,7 @@ struct buffer_head *alloc_buffer_head(gf
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ if (ret) {
+ INIT_LIST_HEAD(&ret->b_assoc_buffers);
++ spin_lock_init(&ret->b_uptodate_lock);
+ preempt_disable();
+ __this_cpu_inc(bh_accounting.nr);
+ recalc_bh_state();
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -87,11 +87,10 @@ static void ext4_finish_bio(struct bio *
+ }
+ bh = head = page_buffers(page);
+ /*
+- * We check all buffers in the page under BH_Uptodate_Lock
++ * We check all buffers in the page under b_uptodate_lock
+ * to avoid races with other end io clearing async_write flags
+ */
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
++ spin_lock_irqsave(&head->b_uptodate_lock, flags);
+ do {
+ if (bh_offset(bh) < bio_start ||
+ bh_offset(bh) + bh->b_size > bio_end) {
+@@ -103,8 +102,7 @@ static void ext4_finish_bio(struct bio *
+ if (bio->bi_status)
+ buffer_io_error(bh);
+ } while ((bh = bh->b_this_page) != head);
+- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
+ if (!under_io) {
+ fscrypt_free_bounce_page(bounce_page);
+ end_page_writeback(page);
+--- a/fs/ntfs/aops.c
++++ b/fs/ntfs/aops.c
+@@ -92,8 +92,7 @@ static void ntfs_end_buffer_async_read(s
+ "0x%llx.", (unsigned long long)bh->b_blocknr);
+ }
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ spin_lock_irqsave(&first->b_uptodate_lock, flags);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -108,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+ /*
+ * If none of the buffers had errors then we can set the page uptodate,
+ * but we first have to perform the post read mst fixups, if the
+@@ -142,8 +140,7 @@ static void ntfs_end_buffer_async_read(s
+ unlock_page(page);
+ return;
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
+ return;
+ }
+
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -22,9 +22,6 @@ enum bh_state_bits {
+ BH_Dirty, /* Is dirty */
+ BH_Lock, /* Is locked */
+ BH_Req, /* Has been submitted for I/O */
+- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
+- * IO completion of other buffers in the page
+- */
+
+ BH_Mapped, /* Has a disk mapping */
+ BH_New, /* Disk mapping was newly created by get_block */
+@@ -76,6 +73,9 @@ struct buffer_head {
+ struct address_space *b_assoc_map; /* mapping this buffer is
+ associated with */
+ atomic_t b_count; /* users using this buffer_head */
++ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
++ * serialise IO completion of other
++ * buffers in the page */
+ };
+
+ /*