/* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * * Fixed a disable_bh()/enable_bh() race (was causing a console lockup) * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) */ #include <linux/config.h> #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/smp_lock.h> #include <linux/init.h> #include <linux/tqueue.h> /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not. - These softirqs are not masked by global cli() and start_bh_atomic() (by clear reasons). Hence, old parts of code still using global locks MUST NOT use softirqs, but insert interfacing routines acquiring global locks. F.e. look at BHs implementation. Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself. - Bottom halves: globally serialized, grr... */ /* No separate irq_stat for s390, it is part of PSA */ #if !defined(CONFIG_ARCH_S390) irq_cpustat_t irq_stat[NR_CPUS]; #endif /* CONFIG_ARCH_S390 */ static struct softirq_action softirq_vec[32] __cacheline_aligned; 50 asmlinkage void do_softirq() { int cpu = smp_processor_id(); __u32 active, mask; 55 if (in_interrupt()) 56 return; 58 local_bh_disable(); local_irq_disable(); mask = softirq_mask(cpu); active = softirq_active(cpu) & mask; 64 if (active) { struct softirq_action *h; restart: /* Reset active bitmask before enabling irqs */ softirq_active(cpu) &= ~active; local_irq_enable(); h = softirq_vec; mask &= ~active; 76 do { 77 if (active & 1) h->action(h); h++; active >>= 1; 81 } while (active); local_irq_disable(); active = softirq_active(cpu); 86 if ((active &= mask) != 0) 87 goto retry; } 90 local_bh_enable(); /* Leave with locally disabled hard irqs. It is critical to close * window for infinite recursion, while we help local bh count, * it protected us. Now we are defenceless. */ 96 return; retry: 99 goto restart; } static spinlock_t softirq_mask_lock = SPIN_LOCK_UNLOCKED; 105 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data) { unsigned long flags; int i; 110 spin_lock_irqsave(&softirq_mask_lock, flags); softirq_vec[nr].data = data; softirq_vec[nr].action = action; 114 for (i=0; i<NR_CPUS; i++) softirq_mask(i) |= (1<<nr); 116 spin_unlock_irqrestore(&softirq_mask_lock, flags); } /* Tasklets */ struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned; 124 static void tasklet_action(struct softirq_action *a) { int cpu = smp_processor_id(); struct tasklet_struct *list; local_irq_disable(); list = tasklet_vec[cpu].list; tasklet_vec[cpu].list = NULL; local_irq_enable(); 134 while (list != NULL) { struct tasklet_struct *t = list; list = list->next; 139 if (tasklet_trylock(t)) { 140 if (atomic_read(&t->count) == 0) { clear_bit(TASKLET_STATE_SCHED, &t->state); t->func(t->data); /* * talklet_trylock() uses test_and_set_bit that imply * an mb when it returns zero, thus we need the explicit * mb only here: while closing the critical section. */ #ifdef CONFIG_SMP smp_mb__before_clear_bit(); #endif 152 tasklet_unlock(t); 153 continue; } 155 tasklet_unlock(t); } local_irq_disable(); t->next = tasklet_vec[cpu].list; tasklet_vec[cpu].list = t; __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); local_irq_enable(); } } struct tasklet_head tasklet_hi_vec[NR_CPUS] __cacheline_aligned; 169 static void tasklet_hi_action(struct softirq_action *a) { int cpu = smp_processor_id(); struct tasklet_struct *list; local_irq_disable(); list = tasklet_hi_vec[cpu].list; tasklet_hi_vec[cpu].list = NULL; local_irq_enable(); 179 while (list != NULL) { struct tasklet_struct *t = list; list = list->next; 184 if (tasklet_trylock(t)) { 185 if (atomic_read(&t->count) == 0) { clear_bit(TASKLET_STATE_SCHED, &t->state); t->func(t->data); 189 tasklet_unlock(t); 190 continue; } 192 tasklet_unlock(t); } local_irq_disable(); t->next = tasklet_hi_vec[cpu].list; tasklet_hi_vec[cpu].list = t; __cpu_raise_softirq(cpu, HI_SOFTIRQ); local_irq_enable(); } } 203 void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->func = func; t->data = data; t->state = 0; atomic_set(&t->count, 0); } 212 void tasklet_kill(struct tasklet_struct *t) { 214 if (in_interrupt()) printk("Attempt to kill tasklet from interrupt\n"); 217 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { current->state = TASK_RUNNING; 219 do { current->policy |= SCHED_YIELD; schedule(); 222 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } 224 tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } /* Old style BHs */ static void (*bh_base[32])(void); struct tasklet_struct bh_task_vec[32]; /* BHs are serialized by spinlock global_bh_lock. It is still possible to make synchronize_bh() as spin_unlock_wait(&global_bh_lock). This operation is not used by kernel now, so that this lock is not made private only due to wait_on_irq(). It can be removed only after auditing all the BHs. */ spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED; 246 static void bh_action(unsigned long nr) { int cpu = smp_processor_id(); 250 if (!spin_trylock(&global_bh_lock)) 251 goto resched; 253 if (!hardirq_trylock(cpu)) 254 goto resched_unlock; 256 if (bh_base[nr]) bh_base[nr](); 259 hardirq_endlock(cpu); 260 spin_unlock(&global_bh_lock); 261 return; resched_unlock: 264 spin_unlock(&global_bh_lock); resched: mark_bh(nr); } 269 void init_bh(int nr, void (*routine)(void)) { bh_base[nr] = routine; mb(); } 275 void remove_bh(int nr) { tasklet_kill(bh_task_vec+nr); bh_base[nr] = NULL; } 281 void __init softirq_init() { int i; 285 for (i=0; i<32; i++) tasklet_init(bh_task_vec+i, bh_action, i); open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL); open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL); } 292 void __run_task_queue(task_queue *list) { struct list_head head, *next; unsigned long flags; 297 spin_lock_irqsave(&tqueue_lock, flags); list_add(&head, list); list_del_init(list); 300 spin_unlock_irqrestore(&tqueue_lock, flags); next = head.next; 303 while (next != &head) { void (*f) (void *); struct tq_struct *p; void *data; p = list_entry(next, struct tq_struct, list); next = next->next; f = p->routine; data = p->data; wmb(); p->sync = 0; 314 if (f) f(data); } }