request_irq
<h2>总结</h2>
<pre><code class="language-c">// 对于网卡场景而言:
// request_irq 申请的是 irq。需要注意:irq 是由一个数组 irq_desc 来维护的,并且 irq_desc 变量并非 per_cpu,所以 irq 是所有 CPU 共同使用的,并非每个 CPU 上单独搞一套 irq。
// 模型上,就和 /proc/interrupts 一 样。不过 irq 可以共享,即同一个 desc 中有多个 irqaction。同一个 CPU 可以多次申请同一个 irq。
// 申请的 irq 可以是 IRQF_PERCPU,是由 flag 来决定的。意义未搞清楚。
// 网卡场景下,request_irq 需要根据亲和性,找到一个 cpu 上的空闲 vector,然后在 per_cpu 变量 vector_irq 中,设置 vector -&gt; irq 的对应关系。
// 后面中断拿到的是 vector,就根据这个 CPU 上的 vector_irq 来找到 irq 号,从而在 irq_desc 中找到对应的处理函数。
// 为什么每个 CPU 上已经有 vector 中断向量号,还要搞出 irq 中断号呢?
// 因为每个 CPU 上都有各自的 vector,因此客观上标识一个硬件中断,就需要 cpuid + vector,这样使用不方便。而 irq_desc 变量并非 per_cpu,所以 irq 是所有 CPU 共同使用的。不同 CPU 上的相同的 vector 可以注册到不同的 irq 上。
request_irq -&gt; request_threaded_irq -&gt; __setup_irq -&gt; setup_affinity
</code></pre>
<p>> 参考文档: <a href="https://blog.csdn.net/ctgulvzhaole/article/details/115456983">https://blog.csdn.net/ctgulvzhaole/article/details/115456983</a></p>
<h2>分析</h2>
<pre><code class="language-c">//file: include/linux/interrupt.h
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}</code></pre>
<pre><code class="language-c">//file: kernel/irq/manage.c
/**
* request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
* If NULL and thread_fn != NULL the default
* primary handler is installed
* @thread_fn: Function called from the irq handler thread
* If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. From the point this
* call is made your handler function may be invoked. Since
* your handler function must clear any interrupt the board
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
* If you want to set up a threaded irq handler for your device
* then you need to supply @handler and @thread_fn. @handler is
* still called in hard interrupt context and has to check
* whether the interrupt originates from the device. If yes it
* needs to disable the interrupt on the device and return
* IRQ_WAKE_THREAD which will wake up the handler thread and run
* @thread_fn. This split handler design is necessary to support
* shared interrupts.
*
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
*
* If your interrupt is shared you must pass a non NULL dev_id
* as this is required when freeing the interrupt.
*
* Flags:
*
* IRQF_SHARED Interrupt is shared
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
int request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn, unsigned long irqflags,
const char *devname, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
int retval;
/*
* Sanity-check: shared interrupts must pass in a real dev-ID,
* otherwise we'll have trouble later trying to figure out
* which interrupt is which (messes up the interrupt freeing
* logic etc).
*/
if ((irqflags &amp; IRQF_SHARED) &amp;&amp; !dev_id)
return -EINVAL;
desc = irq_to_desc(irq); // #define irq_to_desc(irq) (&amp;irq_desc[irq])
if (!desc)
return -EINVAL;
if (!irq_settings_can_request(desc) ||
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
return -EINVAL;
if (!handler) {
if (!thread_fn)
return -EINVAL;
handler = irq_default_primary_handler;
}
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); // 新申请 irqaction,不影响已有的
if (!action)
return -ENOMEM;
action-&gt;handler = handler;
action-&gt;thread_fn = thread_fn;
action-&gt;flags = irqflags;
action-&gt;name = devname;
action-&gt;dev_id = dev_id;
chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action); // 继续分析
chip_bus_sync_unlock(desc);
if (retval)
kfree(action);
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
if (!retval &amp;&amp; (irqflags &amp; IRQF_SHARED)) {
/*
* It's a shared IRQ -- the driver ought to be prepared for it
* to happen immediately, so let's make sure....
* We disable the irq to make sure that a 'real' IRQ doesn't
* run in parallel with our fake.
*/
unsigned long flags;
disable_irq(irq);
local_irq_save(flags);
handler(irq, dev_id);
local_irq_restore(flags);
enable_irq(irq);
}
#endif
return retval;
}
EXPORT_SYMBOL(request_threaded_irq);
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
*/
static int
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
{
// ...
if (!shared) {
init_waitqueue_head(&amp;desc-&gt;wait_for_threads);
/* Setup the type (level, edge polarity) if configured: */
if (new-&gt;flags &amp; IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq,
new-&gt;flags &amp; IRQF_TRIGGER_MASK);
if (ret)
goto out_mask;
}
desc-&gt;istate &amp;= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
IRQS_ONESHOT | IRQS_WAITING);
irqd_clear(&amp;desc-&gt;irq_data, IRQD_IRQ_INPROGRESS);
if (new-&gt;flags &amp; IRQF_PERCPU) { // 每 CPU ?
irqd_set(&amp;desc-&gt;irq_data, IRQD_PER_CPU);
irq_settings_set_per_cpu(desc);
}
if (new-&gt;flags &amp; IRQF_ONESHOT)
desc-&gt;istate |= IRQS_ONESHOT;
if (irq_settings_can_autoenable(desc))
irq_startup(desc, true);
else
/* Undo nested disables: */
desc-&gt;depth = 1;
/* Exclude IRQ from balancing if requested */
if (new-&gt;flags &amp; IRQF_NOBALANCING) {
irq_settings_set_no_balancing(desc);
irqd_set(&amp;desc-&gt;irq_data, IRQD_NO_BALANCING);
}
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask); // 设置默认中断亲和性
}
// ...
}
/*
* Generic version of the affinity autoselector.
*/
static int
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
{
struct cpumask *set = irq_default_affinity;
int node = desc-&gt;irq_data.node;
/* Excludes PER_CPU and NO_BALANCE interrupts */
if (!irq_can_set_affinity(irq))
return 0;
/*
* Preserve an userspace affinity setup, but make sure that
* one of the targets is online.
*/
if (irqd_has_set(&amp;desc-&gt;irq_data, IRQD_AFFINITY_SET)) {
if (cpumask_intersects(desc-&gt;irq_data.affinity,
cpu_online_mask))
set = desc-&gt;irq_data.affinity;
else
irqd_clear(&amp;desc-&gt;irq_data, IRQD_AFFINITY_SET);
}
cpumask_and(mask, cpu_online_mask, set); // CPU 在线
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
/* make sure at least one of the cpus in nodemask is online */
if (cpumask_intersects(mask, nodemask))
cpumask_and(mask, mask, nodemask);
}
irq_do_set_affinity(&amp;desc-&gt;irq_data, mask, false); // 设置亲和性
return 0;
}
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
int ret;
ret = chip-&gt;irq_set_affinity(data, mask, force); // 设备相关函数?对于 x86 最终调用 __assign_irq_vector ?
switch (ret) {
case IRQ_SET_MASK_OK:
cpumask_copy(data-&gt;affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
ret = 0;
}
return ret;
}</code></pre>