[PATCH] x86_64 merge: arch + asm
[opensuse:kernel.git] / include / asm-x86_64 / mmu_context.h
1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
3
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8
9 /*
10  * Every architecture must define this function. It's the fastest
11  * way of searching a 168-bit bitmap where the first 128 bits are
12  * unlikely to be set. It's guaranteed that at least one of the 168
13  * bits is cleared.
14  */
15 #if MAX_RT_PRIO != 128 || MAX_PRIO != 168
16 # error update this function.
17 #endif
18
19 static inline int __sched_find_first_bit(unsigned long *b)
20 {
21         if (b[0])
22                 return __ffs(b[0]);
23         if (b[1])
24                 return __ffs(b[1]) + 64;
25         if (b[2])
26                 return __ffs(b[2]) + 128;
27 }
28
29 static inline int sched_find_first_bit(unsigned long *b)
30
31         int n = __sched_find_first_bit(b);
32         BUG_ON((unsigned)n > 167);
33         return n; 
34
35
36 /*
37  * possibly do the LDT unload here?
38  */
39 #define destroy_context(mm)             do { } while(0)
40 #define init_new_context(tsk,mm)        0
41
42 #ifdef CONFIG_SMP
43
44 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
45 {
46         if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
47                 cpu_tlbstate[cpu].state = TLBSTATE_LAZY;        
48 }
49 #else
50 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
51 {
52 }
53 #endif
54
55 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
56 {
57         if (likely(prev != next)) {
58                 /* stop flush ipis for the previous mm */
59                 clear_bit(cpu, &prev->cpu_vm_mask);
60                 /*
61                  * Re-load LDT if necessary
62                  */
63                 if (unlikely(prev->context.segments != next->context.segments))
64                         load_LDT(next);
65 #ifdef CONFIG_SMP
66                 cpu_tlbstate[cpu].state = TLBSTATE_OK;
67                 cpu_tlbstate[cpu].active_mm = next;
68 #endif
69                 set_bit(cpu, &next->cpu_vm_mask);
70                 set_bit(cpu, &next->context.cpuvalid);
71                 /* Re-load page tables */
72                 asm volatile("movq %0,level4_pgt": :"r" (__pa(next->pgd) | 7));
73                 __flush_tlb();
74         }
75 #ifdef CONFIG_SMP
76         else {
77                 cpu_tlbstate[cpu].state = TLBSTATE_OK;
78                 if(cpu_tlbstate[cpu].active_mm != next)
79                         BUG();
80                 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
81                         /* We were in lazy tlb mode and leave_mm disabled 
82                          * tlb flush IPI delivery. We must flush our tlb.
83                          */
84                         local_flush_tlb();
85                 }
86                 if (!test_and_set_bit(cpu, &next->context.cpuvalid))
87                         load_LDT(next);
88         }
89 #endif
90 }
91
92 #define activate_mm(prev, next) \
93         switch_mm((prev),(next),NULL,smp_processor_id())
94
95 #endif