summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch')
-rw-r--r--debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch185
1 files changed, 0 insertions, 185 deletions
diff --git a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch b/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
deleted file mode 100644
index d641875b9..000000000
--- a/debian/patches-rt/fs-replace-bh_uptodate_lock-for-rt.patch
+++ /dev/null
@@ -1,185 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Mar 2011 09:18:52 +0100
-Subject: buffer_head: Replace bh_uptodate_lock for -rt
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.17-rt9.tar.xz
-
-Wrap the bit_spin_lock calls into a separate inline and add the RT
-replacements with a real spinlock.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- fs/buffer.c | 21 +++++++--------------
- fs/ext4/page-io.c | 6 ++----
- fs/ntfs/aops.c | 10 +++-------
- include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
- 4 files changed, 46 insertions(+), 25 deletions(-)
-
---- a/fs/buffer.c
-+++ b/fs/buffer.c
-@@ -275,8 +275,7 @@ static void end_buffer_async_read(struct
- * decide that the page is now completely done.
- */
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
-@@ -289,8 +288,7 @@ static void end_buffer_async_read(struct
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
-
- /*
- * If none of the buffers had errors and they are all
-@@ -302,9 +300,7 @@ static void end_buffer_async_read(struct
- return;
-
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /*
-@@ -331,8 +327,7 @@ void end_buffer_async_write(struct buffe
- }
-
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
-
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
-@@ -344,15 +339,12 @@ void end_buffer_async_write(struct buffe
- }
- tmp = tmp->b_this_page;
- }
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
- end_page_writeback(page);
- return;
-
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
- EXPORT_SYMBOL(end_buffer_async_write);
-
-@@ -3372,6 +3364,7 @@ struct buffer_head *alloc_buffer_head(gf
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
- if (ret) {
- INIT_LIST_HEAD(&ret->b_assoc_buffers);
-+ buffer_head_init_locks(ret);
- preempt_disable();
- __this_cpu_inc(bh_accounting.nr);
- recalc_bh_state();
---- a/fs/ext4/page-io.c
-+++ b/fs/ext4/page-io.c
-@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *
- * We check all buffers in the page under BH_Uptodate_Lock
- * to avoid races with other end io clearing async_write flags
- */
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
-+ flags = bh_uptodate_lock_irqsave(head);
- do {
- if (bh_offset(bh) < bio_start ||
- bh_offset(bh) + bh->b_size > bio_end) {
-@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *
- if (bio->bi_status)
- buffer_io_error(bh);
- } while ((bh = bh->b_this_page) != head);
-- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(head, flags);
- if (!under_io) {
- #ifdef CONFIG_FS_ENCRYPTION
- if (data_page)
---- a/fs/ntfs/aops.c
-+++ b/fs/ntfs/aops.c
-@@ -92,8 +92,7 @@ static void ntfs_end_buffer_async_read(s
- "0x%llx.", (unsigned long long)bh->b_blocknr);
- }
- first = page_buffers(page);
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
-+ flags = bh_uptodate_lock_irqsave(first);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
-@@ -108,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(first, flags);
- /*
- * If none of the buffers had errors then we can set the page uptodate,
- * but we first have to perform the post read mst fixups, if the
-@@ -142,9 +140,7 @@ static void ntfs_end_buffer_async_read(s
- unlock_page(page);
- return;
- still_busy:
-- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-- local_irq_restore(flags);
-- return;
-+ bh_uptodate_unlock_irqrestore(first, flags);
- }
-
- /**
---- a/include/linux/buffer_head.h
-+++ b/include/linux/buffer_head.h
-@@ -76,8 +76,42 @@ struct buffer_head {
- struct address_space *b_assoc_map; /* mapping this buffer is
- associated with */
- atomic_t b_count; /* users using this buffer_head */
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ spinlock_t b_uptodate_lock;
-+#endif
- };
-
-+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
-+{
-+ unsigned long flags;
-+
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ local_irq_save(flags);
-+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
-+#else
-+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
-+#endif
-+ return flags;
-+}
-+
-+static inline void
-+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
-+{
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
-+ local_irq_restore(flags);
-+#else
-+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
-+#endif
-+}
-+
-+static inline void buffer_head_init_locks(struct buffer_head *bh)
-+{
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ spin_lock_init(&bh->b_uptodate_lock);
-+#endif
-+}
-+
- /*
- * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
- * and buffer_foo() functions.