/* * linux/arch/i386/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/init.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/hardirq.h> extern void die(const char *,struct pt_regs *,long); /* * Ugly, ugly, but the goto's result in better assembly.. */ 31 int __verify_write(const void * addr, unsigned long size) { struct vm_area_struct * vma; unsigned long start = (unsigned long) addr; 36 if (!size) 37 return 1; vma = find_vma(current->mm, start); 40 if (!vma) 41 goto bad_area; 42 if (vma->vm_start > start) 43 goto check_stack; good_area: 46 if (!(vma->vm_flags & VM_WRITE)) 47 goto bad_area; size--; size += start & ~PAGE_MASK; size >>= PAGE_SHIFT; start &= PAGE_MASK; 53 for (;;) { 54 if (handle_mm_fault(current->mm, vma, start, 1) <= 0) 55 goto bad_area; 56 if (!size) 57 break; size--; start += PAGE_SIZE; 60 if (start < vma->vm_end) 61 continue; vma = vma->vm_next; 63 if (!vma || vma->vm_start != start) 64 goto bad_area; 65 if (!(vma->vm_flags & VM_WRITE)) 66 goto bad_area;; } 68 return 1; check_stack: 71 if (!(vma->vm_flags & VM_GROWSDOWN)) 72 goto bad_area; 73 if (expand_stack(vma, start) == 0) 74 goto good_area; bad_area: 77 return 0; } extern spinlock_t console_lock, timerlist_lock; /* * Unlock any spinlocks which will prevent us from getting the * message out (timerlist_lock is aquired through the * console unblank code) */ 87 void bust_spinlocks(void) { 89 spin_lock_init(&console_lock); 90 spin_lock_init(&timerlist_lock); } asmlinkage void do_invalid_op(struct pt_regs *, unsigned long); extern unsigned long idt; /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * error_code: * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user-mode */ 106 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long address; unsigned long page; unsigned long fixup; int write; siginfo_t info; /* get the address */ __asm__("movl %%cr2,%0":"=r" (address)); tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ 131 if (address >= TASK_SIZE) 132 goto vmalloc_fault; mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ 141 if (in_interrupt() || !mm) 142 goto no_context; down(&mm->mmap_sem); vma = find_vma(mm, address); 147 if (!vma) 148 goto bad_area; 149 if (vma->vm_start <= address) 150 goto good_area; 151 if (!(vma->vm_flags & VM_GROWSDOWN)) 152 goto bad_area; 153 if (error_code & 4) { /* * accessing the stack below %esp is always a bug. * The "+ 32" is there due to some instructions (like * pusha) doing post-decrement on the stack and that * doesn't show up until later.. */ 160 if (address + 32 < regs->esp) 161 goto bad_area; } 163 if (expand_stack(vma, address)) 164 goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; write = 0; 172 switch (error_code & 3) { 173 default: /* 3: write, present */ #ifdef TEST_VERIFY_AREA if (regs->cs == KERNEL_CS) printk("WP fault at %08lx\n", regs->eip); #endif /* fall through */ 179 case 2: /* write, not present */ 180 if (!(vma->vm_flags & VM_WRITE)) 181 goto bad_area; write++; 183 break; 184 case 1: /* read, present */ 185 goto bad_area; 186 case 0: /* read, not present */ 187 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 188 goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ 196 switch (handle_mm_fault(mm, vma, address, write)) { 197 case 1: tsk->min_flt++; 199 break; 200 case 2: tsk->maj_flt++; 202 break; 203 case 0: 204 goto do_sigbus; 205 default: 206 goto out_of_memory; } /* * Did it hit the DOS screen memory VA from vm86 mode? */ 212 if (regs->eflags & VM_MASK) { unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; 214 if (bit < 32) tsk->thread.screen_bitmap |= 1 << bit; } up(&mm->mmap_sem); 218 return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ 229 if (error_code & 4) { tsk->thread.cr2 = address; tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); 238 return; } /* * Pentium F0 0F C7 C8 bug workaround. */ 244 if (boot_cpu_data.f00f_bug) { unsigned long nr; nr = (address - idt) >> 3; 249 if (nr == 6) { do_invalid_op(regs, 0); 251 return; } } no_context: /* Are we prepared to handle this kernel fault? */ 257 if ((fixup = search_exception_table(regs->eip)) != 0) { regs->eip = fixup; 259 return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(); 269 if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 271 else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n",address); printk(" printing eip:\n"); printk("%08lx\n", regs->eip); asm("movl %%cr3,%0":"=r" (page)); page = ((unsigned long *) __va(page))[address >> 22]; printk(KERN_ALERT "*pde = %08lx\n", page); 279 if (page & 1) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; printk(KERN_ALERT "*pte = %08lx\n", page); } die("Oops", regs, error_code); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up(&mm->mmap_sem); printk("VM: killing process %s\n", tsk->comm); 295 if (error_code & 4) do_exit(SIGKILL); 297 goto no_context; do_sigbus: up(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.cr2 = address; tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; info.si_code = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ 316 if (!(error_code & 4)) 317 goto no_context; 318 return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. */ int offset = __pgd_offset(address); pgd_t *pgd, *pgd_k; pmd_t *pmd, *pmd_k; pgd = tsk->active_mm->pgd + offset; pgd_k = init_mm.pgd + offset; 333 if (!pgd_present(*pgd)) { 334 if (!pgd_present(*pgd_k)) 335 goto bad_area_nosemaphore; set_pgd(pgd, *pgd_k); 337 return; } pmd = pmd_offset(pgd, address); pmd_k = pmd_offset(pgd_k, address); 343 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 344 goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); 346 return; } }