mirror of https://github.com/Qortal/Brooklyn
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1015 lines
27 KiB
1015 lines
27 KiB
// SPDX-License-Identifier: GPL-2.0-only |
|
/* |
|
* Generic helpers for smp ipi calls |
|
* |
|
* (C) Jens Axboe <[email protected]> 2008 |
|
*/ |
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|
|
|
#include <linux/irq_work.h> |
|
#include <linux/rcupdate.h> |
|
#include <linux/rculist.h> |
|
#include <linux/kernel.h> |
|
#include <linux/export.h> |
|
#include <linux/percpu.h> |
|
#include <linux/init.h> |
|
#include <linux/interrupt.h> |
|
#include <linux/gfp.h> |
|
#include <linux/smp.h> |
|
#include <linux/cpu.h> |
|
#include <linux/sched.h> |
|
#include <linux/sched/idle.h> |
|
#include <linux/hypervisor.h> |
|
#include <linux/sched/clock.h> |
|
#include <linux/nmi.h> |
|
#include <linux/sched/debug.h> |
|
|
|
#include "smpboot.h" |
|
#include "sched/smp.h" |
|
|
|
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK) |
|
|
|
struct call_function_data { |
|
call_single_data_t __percpu *csd; |
|
cpumask_var_t cpumask; |
|
cpumask_var_t cpumask_ipi; |
|
}; |
|
|
|
static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); |
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
|
|
|
static void flush_smp_call_function_queue(bool warn_cpu_offline); |
|
|
|
int smpcfd_prepare_cpu(unsigned int cpu) |
|
{ |
|
struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
|
|
|
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
|
cpu_to_node(cpu))) |
|
return -ENOMEM; |
|
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, |
|
cpu_to_node(cpu))) { |
|
free_cpumask_var(cfd->cpumask); |
|
return -ENOMEM; |
|
} |
|
cfd->csd = alloc_percpu(call_single_data_t); |
|
if (!cfd->csd) { |
|
free_cpumask_var(cfd->cpumask); |
|
free_cpumask_var(cfd->cpumask_ipi); |
|
return -ENOMEM; |
|
} |
|
|
|
return 0; |
|
} |
|
|
|
int smpcfd_dead_cpu(unsigned int cpu) |
|
{ |
|
struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
|
|
|
free_cpumask_var(cfd->cpumask); |
|
free_cpumask_var(cfd->cpumask_ipi); |
|
free_percpu(cfd->csd); |
|
return 0; |
|
} |
|
|
|
int smpcfd_dying_cpu(unsigned int cpu) |
|
{ |
|
/* |
|
* The IPIs for the smp-call-function callbacks queued by other |
|
* CPUs might arrive late, either due to hardware latencies or |
|
* because this CPU disabled interrupts (inside stop-machine) |
|
* before the IPIs were sent. So flush out any pending callbacks |
|
* explicitly (without waiting for the IPIs to arrive), to |
|
* ensure that the outgoing CPU doesn't go offline with work |
|
* still pending. |
|
*/ |
|
flush_smp_call_function_queue(false); |
|
irq_work_run(); |
|
return 0; |
|
} |
|
|
|
void __init call_function_init(void) |
|
{ |
|
int i; |
|
|
|
for_each_possible_cpu(i) |
|
init_llist_head(&per_cpu(call_single_queue, i)); |
|
|
|
smpcfd_prepare_cpu(smp_processor_id()); |
|
} |
|
|
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
|
|
|
static DEFINE_PER_CPU(call_single_data_t *, cur_csd); |
|
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); |
|
static DEFINE_PER_CPU(void *, cur_csd_info); |
|
|
|
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC) |
|
static atomic_t csd_bug_count = ATOMIC_INIT(0); |
|
|
|
/* Record current CSD work for current CPU, NULL to erase. */ |
|
static void csd_lock_record(struct __call_single_data *csd) |
|
{ |
|
if (!csd) { |
|
smp_mb(); /* NULL cur_csd after unlock. */ |
|
__this_cpu_write(cur_csd, NULL); |
|
return; |
|
} |
|
__this_cpu_write(cur_csd_func, csd->func); |
|
__this_cpu_write(cur_csd_info, csd->info); |
|
smp_wmb(); /* func and info before csd. */ |
|
__this_cpu_write(cur_csd, csd); |
|
smp_mb(); /* Update cur_csd before function call. */ |
|
/* Or before unlock, as the case may be. */ |
|
} |
|
|
|
static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd) |
|
{ |
|
unsigned int csd_type; |
|
|
|
csd_type = CSD_TYPE(csd); |
|
if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) |
|
return csd->node.dst; /* Other CSD_TYPE_ values might not have ->dst. */ |
|
return -1; |
|
} |
|
|
|
/* |
|
* Complain if too much time spent waiting. Note that only |
|
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, |
|
* so waiting on other types gets much less information. |
|
*/ |
|
static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id) |
|
{ |
|
int cpu = -1; |
|
int cpux; |
|
bool firsttime; |
|
u64 ts2, ts_delta; |
|
call_single_data_t *cpu_cur_csd; |
|
unsigned int flags = READ_ONCE(csd->node.u_flags); |
|
|
|
if (!(flags & CSD_FLAG_LOCK)) { |
|
if (!unlikely(*bug_id)) |
|
return true; |
|
cpu = csd_lock_wait_getcpu(csd); |
|
pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n", |
|
*bug_id, raw_smp_processor_id(), cpu); |
|
return true; |
|
} |
|
|
|
ts2 = sched_clock(); |
|
ts_delta = ts2 - *ts1; |
|
if (likely(ts_delta <= CSD_LOCK_TIMEOUT)) |
|
return false; |
|
|
|
firsttime = !*bug_id; |
|
if (firsttime) |
|
*bug_id = atomic_inc_return(&csd_bug_count); |
|
cpu = csd_lock_wait_getcpu(csd); |
|
if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) |
|
cpux = 0; |
|
else |
|
cpux = cpu; |
|
cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ |
|
pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n", |
|
firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0, |
|
cpu, csd->func, csd->info); |
|
if (cpu_cur_csd && csd != cpu_cur_csd) { |
|
pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n", |
|
*bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), |
|
READ_ONCE(per_cpu(cur_csd_info, cpux))); |
|
} else { |
|
pr_alert("\tcsd: CSD lock (#%d) %s.\n", |
|
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); |
|
} |
|
if (cpu >= 0) { |
|
if (!trigger_single_cpu_backtrace(cpu)) |
|
dump_cpu_task(cpu); |
|
if (!cpu_cur_csd) { |
|
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu); |
|
arch_send_call_function_single_ipi(cpu); |
|
} |
|
} |
|
dump_stack(); |
|
*ts1 = ts2; |
|
|
|
return false; |
|
} |
|
|
|
/* |
|
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources |
|
* |
|
* For non-synchronous ipi calls the csd can still be in use by the |
|
* previous function call. For multi-cpu calls its even more interesting |
|
* as we'll have to ensure no other cpu is observing our csd. |
|
*/ |
|
static __always_inline void csd_lock_wait(struct __call_single_data *csd) |
|
{ |
|
int bug_id = 0; |
|
u64 ts0, ts1; |
|
|
|
ts1 = ts0 = sched_clock(); |
|
for (;;) { |
|
if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id)) |
|
break; |
|
cpu_relax(); |
|
} |
|
smp_acquire__after_ctrl_dep(); |
|
} |
|
|
|
#else |
|
static void csd_lock_record(struct __call_single_data *csd) |
|
{ |
|
} |
|
|
|
static __always_inline void csd_lock_wait(struct __call_single_data *csd) |
|
{ |
|
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); |
|
} |
|
#endif |
|
|
|
static __always_inline void csd_lock(struct __call_single_data *csd) |
|
{ |
|
csd_lock_wait(csd); |
|
csd->node.u_flags |= CSD_FLAG_LOCK; |
|
|
|
/* |
|
* prevent CPU from reordering the above assignment |
|
* to ->flags with any subsequent assignments to other |
|
* fields of the specified call_single_data_t structure: |
|
*/ |
|
smp_wmb(); |
|
} |
|
|
|
static __always_inline void csd_unlock(struct __call_single_data *csd) |
|
{ |
|
WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK)); |
|
|
|
/* |
|
* ensure we're all done before releasing data: |
|
*/ |
|
smp_store_release(&csd->node.u_flags, 0); |
|
} |
|
|
|
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); |
|
|
|
void __smp_call_single_queue(int cpu, struct llist_node *node) |
|
{ |
|
/* |
|
* The list addition should be visible before sending the IPI |
|
* handler locks the list to pull the entry off it because of |
|
* normal cache coherency rules implied by spinlocks. |
|
* |
|
* If IPIs can go out of order to the cache coherency protocol |
|
* in an architecture, sufficient synchronisation should be added |
|
* to arch code to make it appear to obey cache coherency WRT |
|
* locking and barrier primitives. Generic code isn't really |
|
* equipped to do the right thing... |
|
*/ |
|
if (llist_add(node, &per_cpu(call_single_queue, cpu))) |
|
send_call_function_single_ipi(cpu); |
|
} |
|
|
|
/* |
|
* Insert a previously allocated call_single_data_t element |
|
* for execution on the given CPU. data must already have |
|
* ->func, ->info, and ->flags set. |
|
*/ |
|
static int generic_exec_single(int cpu, struct __call_single_data *csd) |
|
{ |
|
if (cpu == smp_processor_id()) { |
|
smp_call_func_t func = csd->func; |
|
void *info = csd->info; |
|
unsigned long flags; |
|
|
|
/* |
|
* We can unlock early even for the synchronous on-stack case, |
|
* since we're doing this from the same CPU.. |
|
*/ |
|
csd_lock_record(csd); |
|
csd_unlock(csd); |
|
local_irq_save(flags); |
|
func(info); |
|
csd_lock_record(NULL); |
|
local_irq_restore(flags); |
|
return 0; |
|
} |
|
|
|
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
|
csd_unlock(csd); |
|
return -ENXIO; |
|
} |
|
|
|
__smp_call_single_queue(cpu, &csd->node.llist); |
|
|
|
return 0; |
|
} |
|
|
|
/** |
|
* generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks |
|
* |
|
* Invoked by arch to handle an IPI for call function single. |
|
* Must be called with interrupts disabled. |
|
*/ |
|
void generic_smp_call_function_single_interrupt(void) |
|
{ |
|
flush_smp_call_function_queue(true); |
|
} |
|
|
|
/** |
|
* flush_smp_call_function_queue - Flush pending smp-call-function callbacks |
|
* |
|
* @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an |
|
* offline CPU. Skip this check if set to 'false'. |
|
* |
|
* Flush any pending smp-call-function callbacks queued on this CPU. This is |
|
* invoked by the generic IPI handler, as well as by a CPU about to go offline, |
|
* to ensure that all pending IPI callbacks are run before it goes completely |
|
* offline. |
|
* |
|
* Loop through the call_single_queue and run all the queued callbacks. |
|
* Must be called with interrupts disabled. |
|
*/ |
|
static void flush_smp_call_function_queue(bool warn_cpu_offline) |
|
{ |
|
call_single_data_t *csd, *csd_next; |
|
struct llist_node *entry, *prev; |
|
struct llist_head *head; |
|
static bool warned; |
|
|
|
lockdep_assert_irqs_disabled(); |
|
|
|
head = this_cpu_ptr(&call_single_queue); |
|
entry = llist_del_all(head); |
|
entry = llist_reverse_order(entry); |
|
|
|
/* There shouldn't be any pending callbacks on an offline CPU. */ |
|
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && |
|
!warned && !llist_empty(head))) { |
|
warned = true; |
|
WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); |
|
|
|
/* |
|
* We don't have to use the _safe() variant here |
|
* because we are not invoking the IPI handlers yet. |
|
*/ |
|
llist_for_each_entry(csd, entry, node.llist) { |
|
switch (CSD_TYPE(csd)) { |
|
case CSD_TYPE_ASYNC: |
|
case CSD_TYPE_SYNC: |
|
case CSD_TYPE_IRQ_WORK: |
|
pr_warn("IPI callback %pS sent to offline CPU\n", |
|
csd->func); |
|
break; |
|
|
|
case CSD_TYPE_TTWU: |
|
pr_warn("IPI task-wakeup sent to offline CPU\n"); |
|
break; |
|
|
|
default: |
|
pr_warn("IPI callback, unknown type %d, sent to offline CPU\n", |
|
CSD_TYPE(csd)); |
|
break; |
|
} |
|
} |
|
} |
|
|
|
/* |
|
* First; run all SYNC callbacks, people are waiting for us. |
|
*/ |
|
prev = NULL; |
|
llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { |
|
/* Do we wait until *after* callback? */ |
|
if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { |
|
smp_call_func_t func = csd->func; |
|
void *info = csd->info; |
|
|
|
if (prev) { |
|
prev->next = &csd_next->node.llist; |
|
} else { |
|
entry = &csd_next->node.llist; |
|
} |
|
|
|
csd_lock_record(csd); |
|
func(info); |
|
csd_unlock(csd); |
|
csd_lock_record(NULL); |
|
} else { |
|
prev = &csd->node.llist; |
|
} |
|
} |
|
|
|
if (!entry) |
|
return; |
|
|
|
/* |
|
* Second; run all !SYNC callbacks. |
|
*/ |
|
prev = NULL; |
|
llist_for_each_entry_safe(csd, csd_next, entry, node.llist) { |
|
int type = CSD_TYPE(csd); |
|
|
|
if (type != CSD_TYPE_TTWU) { |
|
if (prev) { |
|
prev->next = &csd_next->node.llist; |
|
} else { |
|
entry = &csd_next->node.llist; |
|
} |
|
|
|
if (type == CSD_TYPE_ASYNC) { |
|
smp_call_func_t func = csd->func; |
|
void *info = csd->info; |
|
|
|
csd_lock_record(csd); |
|
csd_unlock(csd); |
|
func(info); |
|
csd_lock_record(NULL); |
|
} else if (type == CSD_TYPE_IRQ_WORK) { |
|
irq_work_single(csd); |
|
} |
|
|
|
} else { |
|
prev = &csd->node.llist; |
|
} |
|
} |
|
|
|
/* |
|
* Third; only CSD_TYPE_TTWU is left, issue those. |
|
*/ |
|
if (entry) |
|
sched_ttwu_pending(entry); |
|
} |
|
|
|
void flush_smp_call_function_from_idle(void) |
|
{ |
|
unsigned long flags; |
|
|
|
if (llist_empty(this_cpu_ptr(&call_single_queue))) |
|
return; |
|
|
|
local_irq_save(flags); |
|
flush_smp_call_function_queue(true); |
|
if (local_softirq_pending()) |
|
do_softirq(); |
|
|
|
local_irq_restore(flags); |
|
} |
|
|
|
/* |
|
* smp_call_function_single - Run a function on a specific CPU |
|
* @func: The function to run. This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to the function. |
|
* @wait: If true, wait until function has completed on other CPUs. |
|
* |
|
* Returns 0 on success, else a negative status code. |
|
*/ |
|
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
|
int wait) |
|
{ |
|
call_single_data_t *csd; |
|
call_single_data_t csd_stack = { |
|
.node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, }, |
|
}; |
|
int this_cpu; |
|
int err; |
|
|
|
/* |
|
* prevent preemption and reschedule on another processor, |
|
* as well as CPU removal |
|
*/ |
|
this_cpu = get_cpu(); |
|
|
|
/* |
|
* Can deadlock when called with interrupts disabled. |
|
* We allow cpu's that are not yet online though, as no one else can |
|
* send smp call function interrupt to this cpu and as such deadlocks |
|
* can't happen. |
|
*/ |
|
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
|
&& !oops_in_progress); |
|
|
|
/* |
|
* When @wait we can deadlock when we interrupt between llist_add() and |
|
* arch_send_call_function_ipi*(); when !@wait we can deadlock due to |
|
* csd_lock() on because the interrupt context uses the same csd |
|
* storage. |
|
*/ |
|
WARN_ON_ONCE(!in_task()); |
|
|
|
csd = &csd_stack; |
|
if (!wait) { |
|
csd = this_cpu_ptr(&csd_data); |
|
csd_lock(csd); |
|
} |
|
|
|
csd->func = func; |
|
csd->info = info; |
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
|
csd->node.src = smp_processor_id(); |
|
csd->node.dst = cpu; |
|
#endif |
|
|
|
err = generic_exec_single(cpu, csd); |
|
|
|
if (wait) |
|
csd_lock_wait(csd); |
|
|
|
put_cpu(); |
|
|
|
return err; |
|
} |
|
EXPORT_SYMBOL(smp_call_function_single); |
|
|
|
/** |
|
* smp_call_function_single_async(): Run an asynchronous function on a |
|
* specific CPU. |
|
* @cpu: The CPU to run on. |
|
* @csd: Pre-allocated and setup data structure |
|
* |
|
* Like smp_call_function_single(), but the call is asynchonous and |
|
* can thus be done from contexts with disabled interrupts. |
|
* |
|
* The caller passes his own pre-allocated data structure |
|
* (ie: embedded in an object) and is responsible for synchronizing it |
|
* such that the IPIs performed on the @csd are strictly serialized. |
|
* |
|
* If the function is called with one csd which has not yet been |
|
* processed by previous call to smp_call_function_single_async(), the |
|
* function will return immediately with -EBUSY showing that the csd |
|
* object is still in progress. |
|
* |
|
* NOTE: Be careful, there is unfortunately no current debugging facility to |
|
* validate the correctness of this serialization. |
|
*/ |
|
int smp_call_function_single_async(int cpu, struct __call_single_data *csd) |
|
{ |
|
int err = 0; |
|
|
|
preempt_disable(); |
|
|
|
if (csd->node.u_flags & CSD_FLAG_LOCK) { |
|
err = -EBUSY; |
|
goto out; |
|
} |
|
|
|
csd->node.u_flags = CSD_FLAG_LOCK; |
|
smp_wmb(); |
|
|
|
err = generic_exec_single(cpu, csd); |
|
|
|
out: |
|
preempt_enable(); |
|
|
|
return err; |
|
} |
|
EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
|
|
|
/* |
|
* smp_call_function_any - Run a function on any of the given cpus |
|
* @mask: The mask of cpus it can run on. |
|
* @func: The function to run. This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to the function. |
|
* @wait: If true, wait until function has completed. |
|
* |
|
* Returns 0 on success, else a negative status code (if no cpus were online). |
|
* |
|
* Selection preference: |
|
* 1) current cpu if in @mask |
|
* 2) any cpu of current node if in @mask |
|
* 3) any other online cpu in @mask |
|
*/ |
|
int smp_call_function_any(const struct cpumask *mask, |
|
smp_call_func_t func, void *info, int wait) |
|
{ |
|
unsigned int cpu; |
|
const struct cpumask *nodemask; |
|
int ret; |
|
|
|
/* Try for same CPU (cheapest) */ |
|
cpu = get_cpu(); |
|
if (cpumask_test_cpu(cpu, mask)) |
|
goto call; |
|
|
|
/* Try for same node. */ |
|
nodemask = cpumask_of_node(cpu_to_node(cpu)); |
|
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; |
|
cpu = cpumask_next_and(cpu, nodemask, mask)) { |
|
if (cpu_online(cpu)) |
|
goto call; |
|
} |
|
|
|
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */ |
|
cpu = cpumask_any_and(mask, cpu_online_mask); |
|
call: |
|
ret = smp_call_function_single(cpu, func, info, wait); |
|
put_cpu(); |
|
return ret; |
|
} |
|
EXPORT_SYMBOL_GPL(smp_call_function_any); |
|
|
|
static void smp_call_function_many_cond(const struct cpumask *mask, |
|
smp_call_func_t func, void *info, |
|
bool wait, smp_cond_func_t cond_func) |
|
{ |
|
struct call_function_data *cfd; |
|
int cpu, next_cpu, this_cpu = smp_processor_id(); |
|
|
|
/* |
|
* Can deadlock when called with interrupts disabled. |
|
* We allow cpu's that are not yet online though, as no one else can |
|
* send smp call function interrupt to this cpu and as such deadlocks |
|
* can't happen. |
|
*/ |
|
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
|
&& !oops_in_progress && !early_boot_irqs_disabled); |
|
|
|
/* |
|
* When @wait we can deadlock when we interrupt between llist_add() and |
|
* arch_send_call_function_ipi*(); when !@wait we can deadlock due to |
|
* csd_lock() on because the interrupt context uses the same csd |
|
* storage. |
|
*/ |
|
WARN_ON_ONCE(!in_task()); |
|
|
|
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ |
|
cpu = cpumask_first_and(mask, cpu_online_mask); |
|
if (cpu == this_cpu) |
|
cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
|
|
|
/* No online cpus? We're done. */ |
|
if (cpu >= nr_cpu_ids) |
|
return; |
|
|
|
/* Do we have another CPU which isn't us? */ |
|
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
|
if (next_cpu == this_cpu) |
|
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); |
|
|
|
/* Fastpath: do that cpu by itself. */ |
|
if (next_cpu >= nr_cpu_ids) { |
|
if (!cond_func || cond_func(cpu, info)) |
|
smp_call_function_single(cpu, func, info, wait); |
|
return; |
|
} |
|
|
|
cfd = this_cpu_ptr(&cfd_data); |
|
|
|
cpumask_and(cfd->cpumask, mask, cpu_online_mask); |
|
__cpumask_clear_cpu(this_cpu, cfd->cpumask); |
|
|
|
/* Some callers race with other cpus changing the passed mask */ |
|
if (unlikely(!cpumask_weight(cfd->cpumask))) |
|
return; |
|
|
|
cpumask_clear(cfd->cpumask_ipi); |
|
for_each_cpu(cpu, cfd->cpumask) { |
|
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); |
|
|
|
if (cond_func && !cond_func(cpu, info)) |
|
continue; |
|
|
|
csd_lock(csd); |
|
if (wait) |
|
csd->node.u_flags |= CSD_TYPE_SYNC; |
|
csd->func = func; |
|
csd->info = info; |
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG |
|
csd->node.src = smp_processor_id(); |
|
csd->node.dst = cpu; |
|
#endif |
|
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) |
|
__cpumask_set_cpu(cpu, cfd->cpumask_ipi); |
|
} |
|
|
|
/* Send a message to all CPUs in the map */ |
|
arch_send_call_function_ipi_mask(cfd->cpumask_ipi); |
|
|
|
if (wait) { |
|
for_each_cpu(cpu, cfd->cpumask) { |
|
call_single_data_t *csd; |
|
|
|
csd = per_cpu_ptr(cfd->csd, cpu); |
|
csd_lock_wait(csd); |
|
} |
|
} |
|
} |
|
|
|
/** |
|
* smp_call_function_many(): Run a function on a set of other CPUs. |
|
* @mask: The set of cpus to run on (only runs on online subset). |
|
* @func: The function to run. This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to the function. |
|
* @wait: If true, wait (atomically) until function has completed |
|
* on other CPUs. |
|
* |
|
* If @wait is true, then returns once @func has returned. |
|
* |
|
* You must not call this function with disabled interrupts or from a |
|
* hardware interrupt handler or from a bottom half handler. Preemption |
|
* must be disabled when calling this function. |
|
*/ |
|
void smp_call_function_many(const struct cpumask *mask, |
|
smp_call_func_t func, void *info, bool wait) |
|
{ |
|
smp_call_function_many_cond(mask, func, info, wait, NULL); |
|
} |
|
EXPORT_SYMBOL(smp_call_function_many); |
|
|
|
/** |
|
* smp_call_function(): Run a function on all other CPUs. |
|
* @func: The function to run. This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to the function. |
|
* @wait: If true, wait (atomically) until function has completed |
|
* on other CPUs. |
|
* |
|
* Returns 0. |
|
* |
|
* If @wait is true, then returns once @func has returned; otherwise |
|
* it returns just before the target cpu calls @func. |
|
* |
|
* You must not call this function with disabled interrupts or from a |
|
* hardware interrupt handler or from a bottom half handler. |
|
*/ |
|
void smp_call_function(smp_call_func_t func, void *info, int wait) |
|
{ |
|
preempt_disable(); |
|
smp_call_function_many(cpu_online_mask, func, info, wait); |
|
preempt_enable(); |
|
} |
|
EXPORT_SYMBOL(smp_call_function); |
|
|
|
/* Setup configured maximum number of CPUs to activate */ |
|
unsigned int setup_max_cpus = NR_CPUS; |
|
EXPORT_SYMBOL(setup_max_cpus); |
|
|
|
|
|
/* |
|
* Setup routine for controlling SMP activation |
|
* |
|
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP |
|
* activation entirely (the MPS table probe still happens, though). |
|
* |
|
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer |
|
* greater than 0, limits the maximum number of CPUs activated in |
|
* SMP mode to <NUM>. |
|
*/ |
|
|
|
void __weak arch_disable_smp_support(void) { } |
|
|
|
static int __init nosmp(char *str) |
|
{ |
|
setup_max_cpus = 0; |
|
arch_disable_smp_support(); |
|
|
|
return 0; |
|
} |
|
|
|
early_param("nosmp", nosmp); |
|
|
|
/* this is hard limit */ |
|
static int __init nrcpus(char *str) |
|
{ |
|
int nr_cpus; |
|
|
|
if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) |
|
nr_cpu_ids = nr_cpus; |
|
|
|
return 0; |
|
} |
|
|
|
early_param("nr_cpus", nrcpus); |
|
|
|
static int __init maxcpus(char *str) |
|
{ |
|
get_option(&str, &setup_max_cpus); |
|
if (setup_max_cpus == 0) |
|
arch_disable_smp_support(); |
|
|
|
return 0; |
|
} |
|
|
|
early_param("maxcpus", maxcpus); |
|
|
|
/* Setup number of possible processor ids */ |
|
unsigned int nr_cpu_ids __read_mostly = NR_CPUS; |
|
EXPORT_SYMBOL(nr_cpu_ids); |
|
|
|
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ |
|
void __init setup_nr_cpu_ids(void) |
|
{ |
|
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; |
|
} |
|
|
|
/* Called by boot processor to activate the rest. */ |
|
void __init smp_init(void) |
|
{ |
|
int num_nodes, num_cpus; |
|
|
|
idle_threads_init(); |
|
cpuhp_threads_init(); |
|
|
|
pr_info("Bringing up secondary CPUs ...\n"); |
|
|
|
bringup_nonboot_cpus(setup_max_cpus); |
|
|
|
num_nodes = num_online_nodes(); |
|
num_cpus = num_online_cpus(); |
|
pr_info("Brought up %d node%s, %d CPU%s\n", |
|
num_nodes, (num_nodes > 1 ? "s" : ""), |
|
num_cpus, (num_cpus > 1 ? "s" : "")); |
|
|
|
/* Any cleanup work */ |
|
smp_cpus_done(setup_max_cpus); |
|
} |
|
|
|
/* |
|
* Call a function on all processors. May be used during early boot while |
|
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead |
|
* of local_irq_disable/enable(). |
|
*/ |
|
void on_each_cpu(smp_call_func_t func, void *info, int wait) |
|
{ |
|
unsigned long flags; |
|
|
|
preempt_disable(); |
|
smp_call_function(func, info, wait); |
|
local_irq_save(flags); |
|
func(info); |
|
local_irq_restore(flags); |
|
preempt_enable(); |
|
} |
|
EXPORT_SYMBOL(on_each_cpu); |
|
|
|
/** |
|
* on_each_cpu_mask(): Run a function on processors specified by |
|
* cpumask, which may include the local processor. |
|
* @mask: The set of cpus to run on (only runs on online subset). |
|
* @func: The function to run. This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to the function. |
|
* @wait: If true, wait (atomically) until function has completed |
|
* on other CPUs. |
|
* |
|
* If @wait is true, then returns once @func has returned. |
|
* |
|
* You must not call this function with disabled interrupts or from a |
|
* hardware interrupt handler or from a bottom half handler. The |
|
* exception is that it may be used during early boot while |
|
* early_boot_irqs_disabled is set. |
|
*/ |
|
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, |
|
void *info, bool wait) |
|
{ |
|
int cpu = get_cpu(); |
|
|
|
smp_call_function_many(mask, func, info, wait); |
|
if (cpumask_test_cpu(cpu, mask)) { |
|
unsigned long flags; |
|
local_irq_save(flags); |
|
func(info); |
|
local_irq_restore(flags); |
|
} |
|
put_cpu(); |
|
} |
|
EXPORT_SYMBOL(on_each_cpu_mask); |
|
|
|
/* |
|
* on_each_cpu_cond(): Call a function on each processor for which |
|
* the supplied function cond_func returns true, optionally waiting |
|
* for all the required CPUs to finish. This may include the local |
|
* processor. |
|
* @cond_func: A callback function that is passed a cpu id and |
|
* the info parameter. The function is called |
|
* with preemption disabled. The function should |
|
* return a blooean value indicating whether to IPI |
|
* the specified CPU. |
|
* @func: The function to run on all applicable CPUs. |
|
* This must be fast and non-blocking. |
|
* @info: An arbitrary pointer to pass to both functions. |
|
* @wait: If true, wait (atomically) until function has |
|
* completed on other CPUs. |
|
* |
|
* Preemption is disabled to protect against CPUs going offline but not online. |
|
* CPUs going online during the call will not be seen or sent an IPI. |
|
* |
|
* You must not call this function with disabled interrupts or |
|
* from a hardware interrupt handler or from a bottom half handler. |
|
*/ |
|
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, |
|
void *info, bool wait, const struct cpumask *mask) |
|
{ |
|
int cpu = get_cpu(); |
|
|
|
smp_call_function_many_cond(mask, func, info, wait, cond_func); |
|
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) { |
|
unsigned long flags; |
|
|
|
local_irq_save(flags); |
|
func(info); |
|
local_irq_restore(flags); |
|
} |
|
put_cpu(); |
|
} |
|
EXPORT_SYMBOL(on_each_cpu_cond_mask); |
|
|
|
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, |
|
void *info, bool wait) |
|
{ |
|
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); |
|
} |
|
EXPORT_SYMBOL(on_each_cpu_cond); |
|
|
|
static void do_nothing(void *unused) |
|
{ |
|
} |
|
|
|
/** |
|
* kick_all_cpus_sync - Force all cpus out of idle |
|
* |
|
* Used to synchronize the update of pm_idle function pointer. It's |
|
* called after the pointer is updated and returns after the dummy |
|
* callback function has been executed on all cpus. The execution of |
|
* the function can only happen on the remote cpus after they have |
|
* left the idle function which had been called via pm_idle function |
|
* pointer. So it's guaranteed that nothing uses the previous pointer |
|
* anymore. |
|
*/ |
|
void kick_all_cpus_sync(void) |
|
{ |
|
/* Make sure the change is visible before we kick the cpus */ |
|
smp_mb(); |
|
smp_call_function(do_nothing, NULL, 1); |
|
} |
|
EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |
|
|
|
/** |
|
* wake_up_all_idle_cpus - break all cpus out of idle |
|
* wake_up_all_idle_cpus try to break all cpus which is in idle state even |
|
* including idle polling cpus, for non-idle cpus, we will do nothing |
|
* for them. |
|
*/ |
|
void wake_up_all_idle_cpus(void) |
|
{ |
|
int cpu; |
|
|
|
preempt_disable(); |
|
for_each_online_cpu(cpu) { |
|
if (cpu == smp_processor_id()) |
|
continue; |
|
|
|
wake_up_if_idle(cpu); |
|
} |
|
preempt_enable(); |
|
} |
|
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); |
|
|
|
/** |
|
* smp_call_on_cpu - Call a function on a specific cpu |
|
* |
|
* Used to call a function on a specific cpu and wait for it to return. |
|
* Optionally make sure the call is done on a specified physical cpu via vcpu |
|
* pinning in order to support virtualized environments. |
|
*/ |
|
struct smp_call_on_cpu_struct { |
|
struct work_struct work; |
|
struct completion done; |
|
int (*func)(void *); |
|
void *data; |
|
int ret; |
|
int cpu; |
|
}; |
|
|
|
static void smp_call_on_cpu_callback(struct work_struct *work) |
|
{ |
|
struct smp_call_on_cpu_struct *sscs; |
|
|
|
sscs = container_of(work, struct smp_call_on_cpu_struct, work); |
|
if (sscs->cpu >= 0) |
|
hypervisor_pin_vcpu(sscs->cpu); |
|
sscs->ret = sscs->func(sscs->data); |
|
if (sscs->cpu >= 0) |
|
hypervisor_pin_vcpu(-1); |
|
|
|
complete(&sscs->done); |
|
} |
|
|
|
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) |
|
{ |
|
struct smp_call_on_cpu_struct sscs = { |
|
.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), |
|
.func = func, |
|
.data = par, |
|
.cpu = phys ? cpu : -1, |
|
}; |
|
|
|
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); |
|
|
|
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
|
return -ENXIO; |
|
|
|
queue_work_on(cpu, system_wq, &sscs.work); |
|
wait_for_completion(&sscs.done); |
|
|
|
return sscs.ret; |
|
} |
|
EXPORT_SYMBOL_GPL(smp_call_on_cpu);
|
|
|