summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch')
-rw-r--r--debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch204
1 files changed, 0 insertions, 204 deletions
diff --git a/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch b/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch
deleted file mode 100644
index 85d45f49e..000000000
--- a/debian/patches-rt/kthread-convert-worker-lock-to-raw-spinlock.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-From: Julia Cartwright <julia@ni.com>
-Date: Fri, 28 Sep 2018 21:03:51 +0000
-Subject: [PATCH] kthread: convert worker lock to raw spinlock
-Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.10-rt7.tar.xz
-
-In order to enable the queuing of kthread work items from hardirq
-context even when PREEMPT_RT_FULL is enabled, convert the worker
-spin_lock to a raw_spin_lock.
-
-This is only acceptable to do because the work performed under the lock
-is well-bounded and minimal.
-
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Guenter Roeck <linux@roeck-us.net>
-Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
-Reported-by: Tim Sander <tim@krieglstein.org>
-Signed-off-by: Julia Cartwright <julia@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/kthread.h | 4 ++--
- kernel/kthread.c | 42 +++++++++++++++++++++---------------------
- 2 files changed, 23 insertions(+), 23 deletions(-)
-
---- a/include/linux/kthread.h
-+++ b/include/linux/kthread.h
-@@ -85,7 +85,7 @@ enum {
-
- struct kthread_worker {
- unsigned int flags;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct list_head work_list;
- struct list_head delayed_work_list;
- struct task_struct *task;
-@@ -106,7 +106,7 @@ struct kthread_delayed_work {
- };
-
- #define KTHREAD_WORKER_INIT(worker) { \
-- .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
---- a/kernel/kthread.c
-+++ b/kernel/kthread.c
-@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthrea
- struct lock_class_key *key)
- {
- memset(worker, 0, sizeof(struct kthread_worker));
-- spin_lock_init(&worker->lock);
-+ raw_spin_lock_init(&worker->lock);
- lockdep_set_class_and_name(&worker->lock, key, name);
- INIT_LIST_HEAD(&worker->work_list);
- INIT_LIST_HEAD(&worker->delayed_work_list);
-@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr)
-
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- worker->task = NULL;
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
- return 0;
- }
-
- work = NULL;
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- if (!list_empty(&worker->work_list)) {
- work = list_first_entry(&worker->work_list,
- struct kthread_work, node);
- list_del_init(&work->node);
- }
- worker->current_work = work;
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
-
- if (work) {
- __set_current_state(TASK_RUNNING);
-@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_w
- bool ret = false;
- unsigned long flags;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- if (!queuing_blocked(worker, work)) {
- kthread_insert_work(worker, work, &worker->work_list);
- ret = true;
- }
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_queue_work);
-@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struc
- if (WARN_ON_ONCE(!worker))
- return;
-
-- spin_lock(&worker->lock);
-+ raw_spin_lock(&worker->lock);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struc
- list_del_init(&work->node);
- kthread_insert_work(worker, work, &worker->work_list);
-
-- spin_unlock(&worker->lock);
-+ raw_spin_unlock(&worker->lock);
- }
- EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
-
-@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct k
- unsigned long flags;
- bool ret = false;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
-
- if (!queuing_blocked(worker, work)) {
- __kthread_queue_delayed_work(worker, dwork, delay);
- ret = true;
- }
-
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
-@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_w
- if (!worker)
- return;
-
-- spin_lock_irq(&worker->lock);
-+ raw_spin_lock_irq(&worker->lock);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_w
- else
- noop = true;
-
-- spin_unlock_irq(&worker->lock);
-+ raw_spin_unlock_irq(&worker->lock);
-
- if (!noop)
- wait_for_completion(&fwork.done);
-@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct
- * any queuing is blocked by setting the canceling counter.
- */
- work->canceling++;
-- spin_unlock_irqrestore(&worker->lock, *flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, *flags);
- del_timer_sync(&dwork->timer);
-- spin_lock_irqsave(&worker->lock, *flags);
-+ raw_spin_lock_irqsave(&worker->lock, *flags);
- work->canceling--;
- }
-
-@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kth
- unsigned long flags;
- int ret = false;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
-
- /* Do not bother with canceling when never queued. */
- if (!work->worker)
-@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kth
- fast_queue:
- __kthread_queue_delayed_work(worker, dwork, delay);
- out:
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
-@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(s
- if (!worker)
- goto out;
-
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- /* Work must not be used with >1 worker, see kthread_queue_work(). */
- WARN_ON_ONCE(work->worker != worker);
-
-@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(s
- * In the meantime, block any queuing by setting the canceling counter.
- */
- work->canceling++;
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- kthread_flush_work(work);
-- spin_lock_irqsave(&worker->lock, flags);
-+ raw_spin_lock_irqsave(&worker->lock, flags);
- work->canceling--;
-
- out_fast:
-- spin_unlock_irqrestore(&worker->lock, flags);
-+ raw_spin_unlock_irqrestore(&worker->lock, flags);
- out:
- return ret;
- }