summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch')
-rw-r--r--debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch685
1 files changed, 685 insertions, 0 deletions
diff --git a/debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch b/debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch
new file mode 100644
index 000000000..7fc27cb4b
--- /dev/null
+++ b/debian/patches-rt/0004-workqueue-Convert-the-locks-to-raw-type.patch
@@ -0,0 +1,685 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 22 May 2019 12:43:56 +0200
+Subject: [PATCH 4/4] workqueue: Convert the locks to raw type
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz
+
+After all the workqueue and the timer rework, we can finally make the
+worker_pool lock raw.
+The lock is not held over an unbounded period of time/iterations.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+[Salvatore Bonaccorso: Adjust for context changes due to backport of
+e66b39af00f4 ("workqueue: Fix pwq ref leak in rescuer_thread()") and
+def98c84b6cd ("workqueue: Fix spurious sanity check failures in
+destroy_workqueue()") in 5.4.4.]
+---
+ kernel/workqueue.c | 164 ++++++++++++++++++++++++++---------------------------
+ 1 file changed, 82 insertions(+), 82 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -146,7 +146,7 @@
+ /* struct worker is defined in workqueue_internal.h */
+
+ struct worker_pool {
+- spinlock_t lock; /* the pool lock */
++ raw_spinlock_t lock; /* the pool lock */
+ int cpu; /* I: the associated cpu */
+ int node; /* I: the associated node ID */
+ int id; /* I: pool ID */
+@@ -301,7 +301,7 @@
+
+ static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
+ static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
+-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
++static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+ static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+
+ static LIST_HEAD(workqueues); /* PR: list of all workqueues */
+@@ -826,7 +826,7 @@
+ * Wake up the first idle worker of @pool.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void wake_up_worker(struct worker_pool *pool)
+ {
+@@ -879,7 +879,7 @@
+ return;
+
+ worker->sleeping = 1;
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+@@ -898,7 +898,7 @@
+ if (next)
+ wake_up_process(next->task);
+ }
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**
+@@ -909,7 +909,7 @@
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * raw_spin_lock_irq(rq->lock)
+ *
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+@@ -940,7 +940,7 @@
+ * Set @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -965,7 +965,7 @@
+ * Clear @flags in @worker->flags and adjust nr_running accordingly.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock)
++ * raw_spin_lock_irq(pool->lock)
+ */
+ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
+ {
+@@ -1013,7 +1013,7 @@
+ * actually occurs, it should be easy to locate the culprit work function.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ *
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
+@@ -1048,7 +1048,7 @@
+ * nested inside outer list_for_each_entry_safe().
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ struct work_struct **nextp)
+@@ -1126,9 +1126,9 @@
+ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- spin_lock_irq(&pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ put_pwq(pwq);
+- spin_unlock_irq(&pwq->pool->lock);
++ raw_spin_unlock_irq(&pwq->pool->lock);
+ }
+ }
+
+@@ -1161,7 +1161,7 @@
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
+ {
+@@ -1260,7 +1260,7 @@
+ if (!pool)
+ goto fail;
+
+- spin_lock(&pool->lock);
++ raw_spin_lock(&pool->lock);
+ /*
+ * work->data is guaranteed to point to pwq only while the work
+ * item is queued on pwq->wq, and both updating work->data to point
+@@ -1289,11 +1289,11 @@
+ /* work->data points to pwq iff queued, point to pool */
+ set_work_pool_and_keep_pending(work, pool->id);
+
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ rcu_read_unlock();
+ return 1;
+ }
+- spin_unlock(&pool->lock);
++ raw_spin_unlock(&pool->lock);
+ fail:
+ rcu_read_unlock();
+ local_irq_restore(*flags);
+@@ -1314,7 +1314,7 @@
+ * work_struct flags.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+ struct list_head *head, unsigned int extra_flags)
+@@ -1429,7 +1429,7 @@
+ if (last_pool && last_pool != pwq->pool) {
+ struct worker *worker;
+
+- spin_lock(&last_pool->lock);
++ raw_spin_lock(&last_pool->lock);
+
+ worker = find_worker_executing_work(last_pool, work);
+
+@@ -1437,11 +1437,11 @@
+ pwq = worker->current_pwq;
+ } else {
+ /* meh... not running there, queue here */
+- spin_unlock(&last_pool->lock);
+- spin_lock(&pwq->pool->lock);
++ raw_spin_unlock(&last_pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+ } else {
+- spin_lock(&pwq->pool->lock);
++ raw_spin_lock(&pwq->pool->lock);
+ }
+
+ /*
+@@ -1454,7 +1454,7 @@
+ */
+ if (unlikely(!pwq->refcnt)) {
+ if (wq->flags & WQ_UNBOUND) {
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ cpu_relax();
+ goto retry;
+ }
+@@ -1486,7 +1486,7 @@
+ insert_work(pwq, work, worklist, work_flags);
+
+ out:
+- spin_unlock(&pwq->pool->lock);
++ raw_spin_unlock(&pwq->pool->lock);
+ rcu_read_unlock();
+ }
+
+@@ -1757,7 +1757,7 @@
+ * necessary.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_enter_idle(struct worker *worker)
+ {
+@@ -1797,7 +1797,7 @@
+ * @worker is leaving idle state. Update stats.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void worker_leave_idle(struct worker *worker)
+ {
+@@ -1935,11 +1935,11 @@
+ worker_attach_to_pool(worker, pool);
+
+ /* start the newly created worker */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ worker->pool->nr_workers++;
+ worker_enter_idle(worker);
+ wake_up_process(worker->task);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ return worker;
+
+@@ -1958,7 +1958,7 @@
+ * be idle.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void destroy_worker(struct worker *worker)
+ {
+@@ -1984,7 +1984,7 @@
+ {
+ struct worker_pool *pool = from_timer(pool, t, idle_timer);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ while (too_many_workers(pool)) {
+ struct worker *worker;
+@@ -2002,7 +2002,7 @@
+ destroy_worker(worker);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ static void send_mayday(struct work_struct *work)
+@@ -2033,8 +2033,8 @@
+ struct worker_pool *pool = from_timer(pool, t, mayday_timer);
+ struct work_struct *work;
+
+- spin_lock_irq(&pool->lock);
+- spin_lock(&wq_mayday_lock); /* for wq->maydays */
++ raw_spin_lock_irq(&pool->lock);
++ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
+
+ if (need_to_create_worker(pool)) {
+ /*
+@@ -2047,8 +2047,8 @@
+ send_mayday(work);
+ }
+
+- spin_unlock(&wq_mayday_lock);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
+ }
+@@ -2067,7 +2067,7 @@
+ * may_start_working() %true.
+ *
+ * LOCKING:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations. Called only from
+ * manager.
+ */
+@@ -2076,7 +2076,7 @@
+ __acquires(&pool->lock)
+ {
+ restart:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
+@@ -2092,7 +2092,7 @@
+ }
+
+ del_timer_sync(&pool->mayday_timer);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * This is necessary even after a new worker was just successfully
+ * created as @pool->lock was dropped and the new worker might have
+@@ -2115,7 +2115,7 @@
+ * and may_start_working() is true.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times. Does GFP_KERNEL allocations.
+ *
+ * Return:
+@@ -2154,7 +2154,7 @@
+ * call this function to process a work.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which is released and regrabbed.
++ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
+ */
+ static void process_one_work(struct worker *worker, struct work_struct *work)
+ __releases(&pool->lock)
+@@ -2236,7 +2236,7 @@
+ */
+ set_work_pool_and_clear_pending(work, pool->id);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+@@ -2291,7 +2291,7 @@
+ */
+ cond_resched();
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* clear cpu intensive status */
+ if (unlikely(cpu_intensive))
+@@ -2317,7 +2317,7 @@
+ * fetches a work from the top and executes it.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
++ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.
+ */
+ static void process_scheduled_works(struct worker *worker)
+@@ -2359,11 +2359,11 @@
+ /* tell the scheduler that this is a workqueue worker */
+ set_pf_worker(true);
+ woke_up:
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /* am I supposed to die? */
+ if (unlikely(worker->flags & WORKER_DIE)) {
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ WARN_ON_ONCE(!list_empty(&worker->entry));
+ set_pf_worker(false);
+
+@@ -2429,7 +2429,7 @@
+ */
+ worker_enter_idle(worker);
+ __set_current_state(TASK_IDLE);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ schedule();
+ goto woke_up;
+ }
+@@ -2483,7 +2483,7 @@
+ should_stop = kthread_should_stop();
+
+ /* see whether any pwq is asking for help */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+
+ while (!list_empty(&wq->maydays)) {
+ struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+@@ -2495,11 +2495,11 @@
+ __set_current_state(TASK_RUNNING);
+ list_del_init(&pwq->mayday_node);
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ worker_attach_to_pool(rescuer, pool);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * Slurp in all works issued via this workqueue and
+@@ -2528,7 +2528,7 @@
+ * incur MAYDAY_INTERVAL delay inbetween.
+ */
+ if (need_to_create_worker(pool)) {
+- spin_lock(&wq_mayday_lock);
++ raw_spin_lock(&wq_mayday_lock);
+ /*
+ * Queue iff we aren't racing destruction
+ * and somebody else hasn't queued it already.
+@@ -2537,7 +2537,7 @@
+ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ }
+- spin_unlock(&wq_mayday_lock);
++ raw_spin_unlock(&wq_mayday_lock);
+ }
+ }
+
+@@ -2555,14 +2555,14 @@
+ if (need_more_worker(pool))
+ wake_up_worker(pool);
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ worker_detach_from_pool(rescuer);
+
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ }
+
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ if (should_stop) {
+ __set_current_state(TASK_RUNNING);
+@@ -2642,7 +2642,7 @@
+ * underneath us, so we can't reliably determine pwq from @target.
+ *
+ * CONTEXT:
+- * spin_lock_irq(pool->lock).
++ * raw_spin_lock_irq(pool->lock).
+ */
+ static void insert_wq_barrier(struct pool_workqueue *pwq,
+ struct wq_barrier *barr,
+@@ -2729,7 +2729,7 @@
+ for_each_pwq(pwq, wq) {
+ struct worker_pool *pool = pwq->pool;
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ if (flush_color >= 0) {
+ WARN_ON_ONCE(pwq->flush_color != -1);
+@@ -2746,7 +2746,7 @@
+ pwq->work_color = work_color;
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
+@@ -2946,9 +2946,9 @@
+ for_each_pwq(pwq, wq) {
+ bool drained;
+
+- spin_lock_irq(&pwq->pool->lock);
++ raw_spin_lock_irq(&pwq->pool->lock);
+ drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+- spin_unlock_irq(&pwq->pool->lock);
++ raw_spin_unlock_irq(&pwq->pool->lock);
+
+ if (drained)
+ continue;
+@@ -2984,7 +2984,7 @@
+ return false;
+ }
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /* see the comment in try_to_grab_pending() with the same code */
+ pwq = get_work_pwq(work);
+ if (pwq) {
+@@ -3000,7 +3000,7 @@
+ check_flush_dependency(pwq->wq, work);
+
+ insert_wq_barrier(pwq, barr, work, worker);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ /*
+ * Force a lock recursion deadlock when using flush_work() inside a
+@@ -3019,7 +3019,7 @@
+ rcu_read_unlock();
+ return true;
+ already_gone:
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ rcu_read_unlock();
+ return false;
+ }
+@@ -3412,7 +3412,7 @@
+ */
+ static int init_worker_pool(struct worker_pool *pool)
+ {
+- spin_lock_init(&pool->lock);
++ raw_spin_lock_init(&pool->lock);
+ pool->id = -1;
+ pool->cpu = -1;
+ pool->node = NUMA_NO_NODE;
+@@ -3538,7 +3538,7 @@
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
+ */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ swait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+@@ -3546,7 +3546,7 @@
+ while ((worker = first_idle_worker(pool)))
+ destroy_worker(worker);
+ WARN_ON(pool->nr_workers || pool->nr_idle);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+
+ mutex_lock(&wq_pool_attach_mutex);
+ if (!list_empty(&pool->workers))
+@@ -3702,7 +3702,7 @@
+ return;
+
+ /* this function can be called during early boot w/ irq disabled */
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+
+ /*
+ * During [un]freezing, the caller is responsible for ensuring that
+@@ -3725,7 +3725,7 @@
+ pwq->max_active = 0;
+ }
+
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ }
+
+ /* initialize newly alloced @pwq which is associated with @wq and @pool */
+@@ -4127,9 +4127,9 @@
+
+ use_dfl_pwq:
+ mutex_lock(&wq->mutex);
+- spin_lock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ get_pwq(wq->dfl_pwq);
+- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
++ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+ out_unlock:
+ mutex_unlock(&wq->mutex);
+@@ -4342,9 +4342,9 @@
+ struct worker *rescuer = wq->rescuer;
+
+ /* this prevents new queueing */
+- spin_lock_irq(&wq_mayday_lock);
++ raw_spin_lock_irq(&wq_mayday_lock);
+ wq->rescuer = NULL;
+- spin_unlock_irq(&wq_mayday_lock);
++ raw_spin_unlock_irq(&wq_mayday_lock);
+
+ /* rescuer will empty maydays list before exiting */
+ kthread_stop(rescuer->task);
+@@ -4540,10 +4540,10 @@
+ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (pool) {
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (find_worker_executing_work(pool, work))
+ ret |= WORK_BUSY_RUNNING;
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ }
+ rcu_read_unlock();
+
+@@ -4750,10 +4750,10 @@
+ pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+
+ for_each_pwq(pwq, wq) {
+- spin_lock_irqsave(&pwq->pool->lock, flags);
++ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+ if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+ show_pwq(pwq);
+- spin_unlock_irqrestore(&pwq->pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4767,7 +4767,7 @@
+ struct worker *worker;
+ bool first = true;
+
+- spin_lock_irqsave(&pool->lock, flags);
++ raw_spin_lock_irqsave(&pool->lock, flags);
+ if (pool->nr_workers == pool->nr_idle)
+ goto next_pool;
+
+@@ -4786,7 +4786,7 @@
+ }
+ pr_cont("\n");
+ next_pool:
+- spin_unlock_irqrestore(&pool->lock, flags);
++ raw_spin_unlock_irqrestore(&pool->lock, flags);
+ /*
+ * We could be printing a lot from atomic context, e.g.
+ * sysrq-t -> show_workqueue_state(). Avoid triggering
+@@ -4816,7 +4816,7 @@
+ struct worker_pool *pool = worker->pool;
+
+ if (pool) {
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ /*
+ * ->desc tracks information (wq name or
+ * set_worker_desc()) for the latest execution. If
+@@ -4830,7 +4830,7 @@
+ scnprintf(buf + off, size - off, "-%s",
+ worker->desc);
+ }
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4861,7 +4861,7 @@
+
+ for_each_cpu_worker_pool(pool, cpu) {
+ mutex_lock(&wq_pool_attach_mutex);
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ /*
+ * We've blocked all attach/detach operations. Make all workers
+@@ -4875,7 +4875,7 @@
+
+ pool->flags |= POOL_DISASSOCIATED;
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ mutex_unlock(&wq_pool_attach_mutex);
+
+ /*
+@@ -4901,9 +4901,9 @@
+ * worker blocking could lead to lengthy stalls. Kick off
+ * unbound chain execution of currently pending work items.
+ */
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+ wake_up_worker(pool);
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+ }
+
+@@ -4930,7 +4930,7 @@
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+
+- spin_lock_irq(&pool->lock);
++ raw_spin_lock_irq(&pool->lock);
+
+ pool->flags &= ~POOL_DISASSOCIATED;
+
+@@ -4969,7 +4969,7 @@
+ WRITE_ONCE(worker->flags, worker_flags);
+ }
+
+- spin_unlock_irq(&pool->lock);
++ raw_spin_unlock_irq(&pool->lock);
+ }
+
+ /**