- Linux 3.11.6 (bnc#841645).
[opensuse:kernel-source.git] / patches.kernel.org / patch-3.11.5-6
1 From: Jiri Slaby <jslaby@suse.cz>
2 Subject: Linux 3.11.6
3 Patch-mainline: 3.11.6
4 References: bnc#841645
5 Git-commit: 30e46b574a1db7d14404e52dca8e1aa5f5155fd2
6 Git-commit: 0e8c665699e953fa58dc1b0b0d09e5dce7343cc7
7 Git-commit: d8c633766ad88527f25d9f81a5c2f083d78a2b39
8 Git-commit: 6d07b68ce16ae9535955ba2059dedba5309c3ca1
9 Git-commit: 20b8875abcf2daa1dda5cf70bd6369df5e85d4c1
10 Git-commit: 7a25dd9e042b2b94202a67e5551112f4ac87285a
11 Git-commit: 32a2750010981216fb788c5190fb0e646abfab30
12 Git-commit: 530fcd16d87cd2417c472a581ba5a1e501556c86
13 Git-commit: 05603c44a7627793219b0bd9a7b236099dc9cd9d
14 Git-commit: 4718787d1f626f45ddb239912bc07266b9880044
15 Git-commit: d9a605e40b1376eb02b067d7690580255a0df68f
16 Git-commit: c2c737a0461e61a34676bd0bd1bc1a70a1b4e396
17 Git-commit: f42569b1388b1408b574a5e93a23a663647d4181
18 Git-commit: 2caacaa82a51b78fc0c800e206473874094287ed
19 Git-commit: c97cb9ccab8c85428ec21eff690642ad2ce1fa8a
20 Git-commit: 68eccc1dc345539d589ae78ee43b835c1a06a134
21 Git-commit: 3b1c4ad37741e53804ffe0a30dd01e08b2ab6241
22 Git-commit: 79ccf0f8c8e04e8b9eda6645ba0f63b0915a3075
23 Git-commit: 8b8d52ac382b17a19906b930cd69e2edb0aca8ba
24 Git-commit: 50b8f5aec04ebec7dbdf2adb17220b9148c99e63
25 Git-commit: aa3e146d04b6ae37939daeebaec060562b3db559
26 Git-commit: 89cd67b326fa95872cc2b4524cd807128db6071d
27 Git-commit: 5fd9c581862a4874c0bdaf16231d8873832bbb99
28 Git-commit: 671952a2a290a90017c64e75b7dd0343b0d005b4
29 Git-commit: c9976dcf55c8aaa7037427b239f15e5acfc01a3a
30 Git-commit: f3fc4884ebe6ae649d3723be14b219230d3b7fd2
31 Git-commit: 3f0116c3238a96bc18ad4b4acefe4e7be32fa861
32 Git-commit: 96d8df846f52a720c8ae1fadadfad7c9e733e336
33 Git-commit: 8b3c569a3999a8fd5a819f892525ab5520777c92
34 Git-commit: 4c4e45669de475573b15d968a6dca8d00124c9ad
35 Git-commit: 8612ed0d97abcf1c016d34755b7cf2060de71963
36 Git-commit: 5b24282846c064ee90d40fcb3a8f63b8e754fd28
37 Git-commit: 10469350e345599dfef3fa78a7c19fb230e674c1
38 Git-commit: 6c00350b573c0bd3635436e43e8696951dd6e1b6
39 Git-commit: 0752adfda15f0eca9859a76da3db1800e129ad43
40 Git-commit: c11eb222fd7d4db91196121dbf854178505d2751
41 Git-commit: 7efd0da2d17360e1cef91507dbe619db0ee2c691
42 Git-commit: 59b33f148cc08fb33cbe823fca1e34f7f023765e
43 Git-commit: cfc860253abd73e1681696c08ea268d33285a2c4
44 Git-commit: 6e4ea8e33b2057b85d75175dd89b93f5e26de3bc
45 Git-commit: 4871c1588f92c6c13f4713a7009f25f217055807
46 Git-commit: 25f2bd7f5add608c1d1405938f39c96927b275ca
47 Git-commit: 4cdbf7d346e7461c3b93a26707c852e2c9db3753
48 Git-commit: 9d05746e7b16d8565dddbe3200faa1e669d23bbf
49 Git-commit: 47d06e532e95b71c0db3839ebdef3fe8812fca2c
50 Git-commit: 88cfcf86aa3ada84d97195bcad74f4dadb4ae23b
51 Git-commit: c6cc3d58b4042f5cadae653ff8d3df26af1a0169
52 Git-commit: 39edac70e9aedf451fccaa851b273ace9fcca0bd
53 Git-commit: a9d14bc0b188a822e42787d01e56c06fe9750162
54
55 Signed-off-by: Jiri Slaby <jslaby@suse.cz>
56 ---
57 diff --git a/Makefile b/Makefile
58 index 83121b7..e87ba83 100644
59 --- a/Makefile
60 +++ b/Makefile
61 @@ -1,6 +1,6 @@
62  VERSION = 3
63  PATCHLEVEL = 11
64 -SUBLEVEL = 5
65 +SUBLEVEL = 6
66  EXTRAVERSION =
67  NAME = Linux for Workgroups
68  
69 diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
70 index 442ce5d..43de302 100644
71 --- a/arch/arc/include/asm/delay.h
72 +++ b/arch/arc/include/asm/delay.h
73 @@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
74  {
75         unsigned long loops;
76  
77 -       /* (long long) cast ensures 64 bit MPY - real or emulated
78 +       /* (u64) cast ensures 64 bit MPY - real or emulated
79          * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
80          */
81 -       loops = ((long long)(usecs * 4295 * HZ) *
82 -                (long long)(loops_per_jiffy)) >> 32;
83 +       loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
84  
85         __delay(loops);
86  }
87 diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
88 index f158197..b6a8c2d 100644
89 --- a/arch/arc/include/asm/spinlock.h
90 +++ b/arch/arc/include/asm/spinlock.h
91 @@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
92  
93  static inline void arch_spin_unlock(arch_spinlock_t *lock)
94  {
95 -       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
96 +       unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
97 +
98 +       __asm__ __volatile__(
99 +       "       ex  %0, [%1]            \n"
100 +       : "+r" (tmp)
101 +       : "r"(&(lock->slock))
102 +       : "memory");
103 +
104         smp_mb();
105  }
106  
107 diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
108 index 3242082..30c9baf 100644
109 --- a/arch/arc/include/asm/uaccess.h
110 +++ b/arch/arc/include/asm/uaccess.h
111 @@ -43,7 +43,7 @@
112   * Because it essentially checks if buffer end is within limit and @len is
113   * non-ngeative, which implies that buffer start will be within limit too.
114   *
115 - * The reason for rewriting being, for majorit yof cases, @len is generally
116 + * The reason for rewriting being, for majority of cases, @len is generally
117   * compile time constant, causing first sub-expression to be compile time
118   * subsumed.
119   *
120 @@ -53,7 +53,7 @@
121   *
122   */
123  #define __user_ok(addr, sz)    (((sz) <= TASK_SIZE) && \
124 -                                (((addr)+(sz)) <= get_fs()))
125 +                                ((addr) <= (get_fs() - (sz))))
126  #define __access_ok(addr, sz)  (unlikely(__kernel_ok) || \
127                                  likely(__user_ok((addr), (sz))))
128  
129 diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
130 index 3332385..5d76706 100644
131 --- a/arch/arc/kernel/ptrace.c
132 +++ b/arch/arc/kernel/ptrace.c
133 @@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
134         REG_IGNORE_ONE(pad2);
135         REG_IN_CHUNK(callee, efa, cregs);       /* callee_regs[r25..r13] */
136         REG_IGNORE_ONE(efa);                    /* efa update invalid */
137 -       REG_IN_ONE(stop_pc, &ptregs->ret);      /* stop_pc: PC update */
138 +       REG_IGNORE_ONE(stop_pc);                        /* PC updated via @ret */
139  
140         return ret;
141  }
142 diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
143 index ee6ef2f..7e95e1a 100644
144 --- a/arch/arc/kernel/signal.c
145 +++ b/arch/arc/kernel/signal.c
146 @@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
147  {
148         struct rt_sigframe __user *sf;
149         unsigned int magic;
150 -       int err;
151         struct pt_regs *regs = current_pt_regs();
152  
153         /* Always make any pending restarted system calls return -EINTR */
154 @@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
155         if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
156                 goto badframe;
157  
158 -       err = restore_usr_regs(regs, sf);
159 -       err |= __get_user(magic, &sf->sigret_magic);
160 -       if (err)
161 +       if (__get_user(magic, &sf->sigret_magic))
162                 goto badframe;
163  
164         if (unlikely(is_do_ss_needed(magic)))
165                 if (restore_altstack(&sf->uc.uc_stack))
166                         goto badframe;
167  
168 +       if (restore_usr_regs(regs, sf))
169 +               goto badframe;
170 +
171         /* Don't restart from sigreturn */
172         syscall_wont_restart(regs);
173  
174 @@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
175                 return 1;
176  
177         /*
178 +        * w/o SA_SIGINFO, struct ucontext is partially populated (only
179 +        * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
180 +        * during signal handler execution. This works for SA_SIGINFO as well
181 +        * although the semantics are now overloaded (the same reg state can be
182 +        * inspected by userland: but are they allowed to fiddle with it ?
183 +        */
184 +       err |= stash_usr_regs(sf, regs, set);
185 +
186 +       /*
187          * SA_SIGINFO requires 3 args to signal handler:
188          *  #1: sig-no (common to any handler)
189          *  #2: struct siginfo
190 @@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
191                 magic = MAGIC_SIGALTSTK;
192         }
193  
194 -       /*
195 -        * w/o SA_SIGINFO, struct ucontext is partially populated (only
196 -        * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
197 -        * during signal handler execution. This works for SA_SIGINFO as well
198 -        * although the semantics are now overloaded (the same reg state can be
199 -        * inspected by userland: but are they allowed to fiddle with it ?
200 -        */
201 -       err |= stash_usr_regs(sf, regs, set);
202         err |= __put_user(magic, &sf->sigret_magic);
203         if (err)
204                 return err;
205 diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
206 index c0f832f..00ad070 100644
207 --- a/arch/arc/kernel/unaligned.c
208 +++ b/arch/arc/kernel/unaligned.c
209 @@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
210                 regs->status32 &= ~STATUS_DE_MASK;
211         } else {
212                 regs->ret += state.instr_len;
213 +
214 +               /* handle zero-overhead-loop */
215 +               if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
216 +                       regs->ret = regs->lp_start;
217 +                       regs->lp_count--;
218 +               }
219         }
220  
221         return 0;
222 diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
223 index bfc198c..863c892 100644
224 --- a/arch/arm/include/asm/jump_label.h
225 +++ b/arch/arm/include/asm/jump_label.h
226 @@ -16,7 +16,7 @@
227  
228  static __always_inline bool arch_static_branch(struct static_key *key)
229  {
230 -       asm goto("1:\n\t"
231 +       asm_volatile_goto("1:\n\t"
232                  JUMP_LABEL_NOP "\n\t"
233                  ".pushsection __jump_table,  \"aw\"\n\t"
234                  ".word 1b, %l[l_yes], %c0\n\t"
235 diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
236 index 4d6d77e..e194f95 100644
237 --- a/arch/mips/include/asm/jump_label.h
238 +++ b/arch/mips/include/asm/jump_label.h
239 @@ -22,7 +22,7 @@
240  
241  static __always_inline bool arch_static_branch(struct static_key *key)
242  {
243 -       asm goto("1:\tnop\n\t"
244 +       asm_volatile_goto("1:\tnop\n\t"
245                 "nop\n\t"
246                 ".pushsection __jump_table,  \"aw\"\n\t"
247                 WORD_INSN " 1b, %l[l_yes], %0\n\t"
248 diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
249 index 4204d76..029e002 100644
250 --- a/arch/mips/kernel/octeon_switch.S
251 +++ b/arch/mips/kernel/octeon_switch.S
252 @@ -73,7 +73,7 @@
253  3:
254  
255  #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
256 -       PTR_L   t8, __stack_chk_guard
257 +       PTR_LA  t8, __stack_chk_guard
258         LONG_L  t9, TASK_STACK_CANARY(a1)
259         LONG_S  t9, 0(t8)
260  #endif
261 diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
262 index 38af83f..20b7b04 100644
263 --- a/arch/mips/kernel/r2300_switch.S
264 +++ b/arch/mips/kernel/r2300_switch.S
265 @@ -67,7 +67,7 @@ LEAF(resume)
266  1:
267  
268  #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
269 -       PTR_L   t8, __stack_chk_guard
270 +       PTR_LA  t8, __stack_chk_guard
271         LONG_L  t9, TASK_STACK_CANARY(a1)
272         LONG_S  t9, 0(t8)
273  #endif
274 diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
275 index 921238a..078de5e 100644
276 --- a/arch/mips/kernel/r4k_switch.S
277 +++ b/arch/mips/kernel/r4k_switch.S
278 @@ -69,7 +69,7 @@
279  1:
280  
281  #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
282 -       PTR_L   t8, __stack_chk_guard
283 +       PTR_LA  t8, __stack_chk_guard
284         LONG_L  t9, TASK_STACK_CANARY(a1)
285         LONG_S  t9, 0(t8)
286  #endif
287 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
288 index 04e47c6..b3f87a3 100644
289 --- a/arch/parisc/kernel/traps.c
290 +++ b/arch/parisc/kernel/traps.c
291 @@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
292         else {
293  
294             /*
295 -            * The kernel should never fault on its own address space.
296 +            * The kernel should never fault on its own address space,
297 +            * unless pagefault_disable() was called before.
298              */
299  
300 -           if (fault_space == 0) 
301 +           if (fault_space == 0 && !in_atomic())
302             {
303                 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
304                 parisc_terminate("Kernel Fault", regs, code, fault_address);
305 -       
306             }
307         }
308  
309 diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
310 index ae098c4..f016bb6 100644
311 --- a/arch/powerpc/include/asm/jump_label.h
312 +++ b/arch/powerpc/include/asm/jump_label.h
313 @@ -19,7 +19,7 @@
314  
315  static __always_inline bool arch_static_branch(struct static_key *key)
316  {
317 -       asm goto("1:\n\t"
318 +       asm_volatile_goto("1:\n\t"
319                  "nop\n\t"
320                  ".pushsection __jump_table,  \"aw\"\n\t"
321                  JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
322 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
323 index b02f91e..7bcd4d6 100644
324 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
325 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
326 @@ -1054,7 +1054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
327  BEGIN_FTR_SECTION
328         mfspr   r8, SPRN_DSCR
329         ld      r7, HSTATE_DSCR(r13)
330 -       std     r8, VCPU_DSCR(r7)
331 +       std     r8, VCPU_DSCR(r9)
332         mtspr   SPRN_DSCR, r7
333  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
334  
335 diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
336 index 6c32190..346b1c8 100644
337 --- a/arch/s390/include/asm/jump_label.h
338 +++ b/arch/s390/include/asm/jump_label.h
339 @@ -15,7 +15,7 @@
340  
341  static __always_inline bool arch_static_branch(struct static_key *key)
342  {
343 -       asm goto("0:    brcl 0,0\n"
344 +       asm_volatile_goto("0:   brcl 0,0\n"
345                 ".pushsection __jump_table, \"aw\"\n"
346                 ASM_ALIGN "\n"
347                 ASM_PTR " 0b, %l[label], %0\n"
348 diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
349 index 5080d16..ec2e2e2 100644
350 --- a/arch/sparc/include/asm/jump_label.h
351 +++ b/arch/sparc/include/asm/jump_label.h
352 @@ -9,7 +9,7 @@
353  
354  static __always_inline bool arch_static_branch(struct static_key *key)
355  {
356 -               asm goto("1:\n\t"
357 +               asm_volatile_goto("1:\n\t"
358                          "nop\n\t"
359                          "nop\n\t"
360                          ".pushsection __jump_table,  \"aw\"\n\t"
361 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
362 index 47538a6..7290585 100644
363 --- a/arch/x86/include/asm/cpufeature.h
364 +++ b/arch/x86/include/asm/cpufeature.h
365 @@ -373,7 +373,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
366                  * Catch too early usage of this before alternatives
367                  * have run.
368                  */
369 -               asm goto("1: jmp %l[t_warn]\n"
370 +               asm_volatile_goto("1: jmp %l[t_warn]\n"
371                          "2:\n"
372                          ".section .altinstructions,\"a\"\n"
373                          " .long 1b - .\n"
374 @@ -386,7 +386,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
375                          : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
376  #endif
377  
378 -               asm goto("1: jmp %l[t_no]\n"
379 +               asm_volatile_goto("1: jmp %l[t_no]\n"
380                          "2:\n"
381                          ".section .altinstructions,\"a\"\n"
382                          " .long 1b - .\n"
383 @@ -448,7 +448,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
384   * have. Thus, we force the jump to the widest, 4-byte, signed relative
385   * offset even though the last would often fit in less bytes.
386   */
387 -               asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
388 +               asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
389                          "2:\n"
390                          ".section .altinstructions,\"a\"\n"
391                          " .long 1b - .\n"              /* src offset */
392 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
393 index cccd07f..779c2ef 100644
394 --- a/arch/x86/include/asm/e820.h
395 +++ b/arch/x86/include/asm/e820.h
396 @@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
397  extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
398                         unsigned long start_addr, unsigned long long end_addr);
399  struct setup_data;
400 -extern void parse_e820_ext(struct setup_data *data);
401 +extern void parse_e820_ext(u64 phys_addr, u32 data_len);
402  
403  #if defined(CONFIG_X86_64) || \
404         (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
405 diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
406 index 3a16c14..0297669 100644
407 --- a/arch/x86/include/asm/jump_label.h
408 +++ b/arch/x86/include/asm/jump_label.h
409 @@ -13,7 +13,7 @@
410  
411  static __always_inline bool arch_static_branch(struct static_key *key)
412  {
413 -       asm goto("1:"
414 +       asm_volatile_goto("1:"
415                 STATIC_KEY_INITIAL_NOP
416                 ".pushsection __jump_table,  \"aw\" \n\t"
417                 _ASM_ALIGN "\n\t"
418 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
419 index d32abea..174da5f 100644
420 --- a/arch/x86/kernel/e820.c
421 +++ b/arch/x86/kernel/e820.c
422 @@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
423   * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
424   * linked list of struct setup_data, which is parsed here.
425   */
426 -void __init parse_e820_ext(struct setup_data *sdata)
427 +void __init parse_e820_ext(u64 phys_addr, u32 data_len)
428  {
429         int entries;
430         struct e820entry *extmap;
431 +       struct setup_data *sdata;
432  
433 +       sdata = early_memremap(phys_addr, data_len);
434         entries = sdata->len / sizeof(struct e820entry);
435         extmap = (struct e820entry *)(sdata->data);
436         __append_e820_map(extmap, entries);
437         sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
438 +       early_iounmap(sdata, data_len);
439         printk(KERN_INFO "e820: extended physical RAM map:\n");
440         e820_print_map("extended");
441  }
442 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
443 index f8ec578..234e1e3 100644
444 --- a/arch/x86/kernel/setup.c
445 +++ b/arch/x86/kernel/setup.c
446 @@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
447  static void __init parse_setup_data(void)
448  {
449         struct setup_data *data;
450 -       u64 pa_data;
451 +       u64 pa_data, pa_next;
452  
453         pa_data = boot_params.hdr.setup_data;
454         while (pa_data) {
455 -               u32 data_len, map_len;
456 +               u32 data_len, map_len, data_type;
457  
458                 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
459                               (u64)sizeof(struct setup_data));
460                 data = early_memremap(pa_data, map_len);
461                 data_len = data->len + sizeof(struct setup_data);
462 -               if (data_len > map_len) {
463 -                       early_iounmap(data, map_len);
464 -                       data = early_memremap(pa_data, data_len);
465 -                       map_len = data_len;
466 -               }
467 +               data_type = data->type;
468 +               pa_next = data->next;
469 +               early_iounmap(data, map_len);
470  
471 -               switch (data->type) {
472 +               switch (data_type) {
473                 case SETUP_E820_EXT:
474 -                       parse_e820_ext(data);
475 +                       parse_e820_ext(pa_data, data_len);
476                         break;
477                 case SETUP_DTB:
478                         add_dtb(pa_data);
479 @@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
480                 default:
481                         break;
482                 }
483 -               pa_data = data->next;
484 -               early_iounmap(data, map_len);
485 +               pa_data = pa_next;
486         }
487  }
488  
489 diff --git a/drivers/char/random.c b/drivers/char/random.c
490 index 0d91fe5..92e6c67 100644
491 --- a/drivers/char/random.c
492 +++ b/drivers/char/random.c
493 @@ -1462,12 +1462,11 @@ struct ctl_table random_table[] = {
494  
495  static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
496  
497 -static int __init random_int_secret_init(void)
498 +int random_int_secret_init(void)
499  {
500         get_random_bytes(random_int_secret, sizeof(random_int_secret));
501         return 0;
502  }
503 -late_initcall(random_int_secret_init);
504  
505  /*
506   * Get a random word for internal kernel use only. Similar to urandom but
507 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
508 index 342f1f3..c42d31c 100644
509 --- a/drivers/gpu/drm/i915/i915_reg.h
510 +++ b/drivers/gpu/drm/i915/i915_reg.h
511 @@ -3791,6 +3791,9 @@
512  #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
513  #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
514  
515 +#define HSW_SCRATCH1                           0xb038
516 +#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
517 +
518  #define HSW_FUSE_STRAP         0x42014
519  #define  HSW_CDCLK_LIMIT       (1 << 24)
520  
521 @@ -4624,6 +4627,9 @@
522  #define GEN7_ROW_CHICKEN2_GT2          0xf4f4
523  #define   DOP_CLOCK_GATING_DISABLE     (1<<0)
524  
525 +#define HSW_ROW_CHICKEN3               0xe49c
526 +#define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
527 +
528  #define G4X_AUD_VID_DID                        (dev_priv->info->display_mmio_offset + 0x62020)
529  #define INTEL_AUDIO_DEVCL              0x808629FB
530  #define INTEL_AUDIO_DEVBLC             0x80862801
531 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
532 index 7fc8a76..90a7c17 100644
533 --- a/drivers/gpu/drm/i915/intel_display.c
534 +++ b/drivers/gpu/drm/i915/intel_display.c
535 @@ -3890,8 +3890,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
536   * consider. */
537  void intel_connector_dpms(struct drm_connector *connector, int mode)
538  {
539 -       struct intel_encoder *encoder = intel_attached_encoder(connector);
540 -
541         /* All the simple cases only support two dpms states. */
542         if (mode != DRM_MODE_DPMS_ON)
543                 mode = DRM_MODE_DPMS_OFF;
544 @@ -3902,10 +3900,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
545         connector->dpms = mode;
546  
547         /* Only need to change hw state when actually enabled */
548 -       if (encoder->base.crtc)
549 -               intel_encoder_dpms(encoder, mode);
550 -       else
551 -               WARN_ON(encoder->connectors_active != false);
552 +       if (connector->encoder)
553 +               intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
554  
555         intel_modeset_check_state(connector->dev);
556  }
557 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
558 index b0e4a0b..cad0482 100644
559 --- a/drivers/gpu/drm/i915/intel_pm.c
560 +++ b/drivers/gpu/drm/i915/intel_pm.c
561 @@ -3603,8 +3603,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
562                                       dev_priv->rps.rpe_delay),
563                          dev_priv->rps.rpe_delay);
564  
565 -       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
566 -
567         valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
568  
569         /* requires MSI enabled */
570 @@ -4699,6 +4697,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
571         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
572                         GEN7_WA_L3_CHICKEN_MODE);
573  
574 +       /* L3 caching of data atomics doesn't work -- disable it. */
575 +       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
576 +       I915_WRITE(HSW_ROW_CHICKEN3,
577 +                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
578 +
579         /* This is required by WaCatErrorRejectionIssue:hsw */
580         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
581                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
582 @@ -5562,6 +5565,8 @@ void intel_pm_init(struct drm_device *dev)
583  
584         INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
585                           intel_gen6_powersave_work);
586 +
587 +       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
588  }
589  
590  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
591 diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
592 index 084e694..639b9aa 100644
593 --- a/drivers/gpu/drm/radeon/btc_dpm.c
594 +++ b/drivers/gpu/drm/radeon/btc_dpm.c
595 @@ -1913,7 +1913,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
596                         }
597                         j++;
598  
599 -                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
600 +                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
601                                 return -EINVAL;
602  
603                         tmp = RREG32(MC_PMG_CMD_MRS);
604 @@ -1928,7 +1928,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
605                         }
606                         j++;
607  
608 -                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
609 +                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
610                                 return -EINVAL;
611                         break;
612                 case MC_SEQ_RESERVE_M >> 2:
613 @@ -1942,7 +1942,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
614                         }
615                         j++;
616  
617 -                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
618 +                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
619                                 return -EINVAL;
620                         break;
621                 default:
622 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
623 index 94dab1e..8307883 100644
624 --- a/drivers/gpu/drm/radeon/evergreen.c
625 +++ b/drivers/gpu/drm/radeon/evergreen.c
626 @@ -3126,7 +3126,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
627                 rdev->config.evergreen.sx_max_export_size = 256;
628                 rdev->config.evergreen.sx_max_export_pos_size = 64;
629                 rdev->config.evergreen.sx_max_export_smx_size = 192;
630 -               rdev->config.evergreen.max_hw_contexts = 8;
631 +               rdev->config.evergreen.max_hw_contexts = 4;
632                 rdev->config.evergreen.sq_num_cf_insts = 2;
633  
634                 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
635 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
636 index 20fd17c..6be00c9 100644
637 --- a/drivers/gpu/drm/radeon/evergreend.h
638 +++ b/drivers/gpu/drm/radeon/evergreend.h
639 @@ -1494,7 +1494,7 @@
640   * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
641   */
642  #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
643 -                /* 0 - SRC_ADDR
644 +                /* 0 - DST_ADDR
645                  * 1 - GDS
646                  */
647  #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
648 @@ -1509,7 +1509,7 @@
649  #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
650  /* COMMAND */
651  #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
652 -#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
653 +#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
654                  /* 0 - none
655                  * 1 - 8 in 16
656                  * 2 - 8 in 32
657 diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
658 index 7c78083..d079cb1 100644
659 --- a/drivers/gpu/drm/radeon/r600d.h
660 +++ b/drivers/gpu/drm/radeon/r600d.h
661 @@ -1487,7 +1487,7 @@
662   */
663  #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
664  /* COMMAND */
665 -#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
666 +#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
667                  /* 0 - none
668                  * 1 - 8 in 16
669                  * 2 - 8 in 32
670 diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
671 index f4d6bce..12e8099 100644
672 --- a/drivers/gpu/drm/radeon/radeon_test.c
673 +++ b/drivers/gpu/drm/radeon/radeon_test.c
674 @@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
675         struct radeon_bo *vram_obj = NULL;
676         struct radeon_bo **gtt_obj = NULL;
677         uint64_t gtt_addr, vram_addr;
678 -       unsigned i, n, size;
679 -       int r, ring;
680 +       unsigned n, size;
681 +       int i, r, ring;
682  
683         switch (flag) {
684         case RADEON_TEST_COPY_DMA:
685 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
686 index 1cfba39..1c23b61 100644
687 --- a/drivers/gpu/drm/radeon/si_dpm.c
688 +++ b/drivers/gpu/drm/radeon/si_dpm.c
689 @@ -5174,7 +5174,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
690                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
691                         }
692                         j++;
693 -                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
694 +                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
695                                 return -EINVAL;
696  
697                         if (!pi->mem_gddr5) {
698 @@ -5184,7 +5184,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
699                                         table->mc_reg_table_entry[k].mc_data[j] =
700                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
701                                 j++;
702 -                               if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
703 +                               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
704                                         return -EINVAL;
705                         }
706                         break;
707 @@ -5197,7 +5197,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
708                                         (temp_reg & 0xffff0000) |
709                                         (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
710                         j++;
711 -                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
712 +                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
713                                 return -EINVAL;
714                         break;
715                 default:
716 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
717 index 2010d6b..a75d25a 100644
718 --- a/drivers/gpu/drm/radeon/sid.h
719 +++ b/drivers/gpu/drm/radeon/sid.h
720 @@ -1490,7 +1490,7 @@
721   * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
722   */
723  #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
724 -                /* 0 - SRC_ADDR
725 +                /* 0 - DST_ADDR
726                  * 1 - GDS
727                  */
728  #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
729 @@ -1505,7 +1505,7 @@
730  #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
731  /* COMMAND */
732  #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
733 -#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
734 +#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
735                  /* 0 - none
736                  * 1 - 8 in 16
737                  * 2 - 8 in 32
738 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
739 index 98814d1..3288f13 100644
740 --- a/drivers/hwmon/applesmc.c
741 +++ b/drivers/hwmon/applesmc.c
742 @@ -230,6 +230,7 @@ static int send_argument(const char *key)
743  
744  static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
745  {
746 +       u8 status, data = 0;
747         int i;
748  
749         if (send_command(cmd) || send_argument(key)) {
750 @@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
751                 return -EIO;
752         }
753  
754 +       /* This has no effect on newer (2012) SMCs */
755         if (send_byte(len, APPLESMC_DATA_PORT)) {
756                 pr_warn("%.4s: read len fail\n", key);
757                 return -EIO;
758 @@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
759                 buffer[i] = inb(APPLESMC_DATA_PORT);
760         }
761  
762 +       /* Read the data port until bit0 is cleared */
763 +       for (i = 0; i < 16; i++) {
764 +               udelay(APPLESMC_MIN_WAIT);
765 +               status = inb(APPLESMC_CMD_PORT);
766 +               if (!(status & 0x01))
767 +                       break;
768 +               data = inb(APPLESMC_DATA_PORT);
769 +       }
770 +       if (i)
771 +               pr_warn("flushed %d bytes, last value is: %d\n", i, data);
772 +
773         return 0;
774  }
775  
776 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
777 index 142b694d..e6b8dcd 100644
778 --- a/drivers/i2c/busses/i2c-omap.c
779 +++ b/drivers/i2c/busses/i2c-omap.c
780 @@ -944,6 +944,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
781                 /*
782                  * ProDB0017052: Clear ARDY bit twice
783                  */
784 +               if (stat & OMAP_I2C_STAT_ARDY)
785 +                       omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
786 +
787                 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
788                                         OMAP_I2C_STAT_AL)) {
789                         omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
790 diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
791 index 491419e..5c3d4df 100644
792 --- a/drivers/watchdog/kempld_wdt.c
793 +++ b/drivers/watchdog/kempld_wdt.c
794 @@ -35,7 +35,7 @@
795  #define KEMPLD_WDT_STAGE_TIMEOUT(x)    (0x1b + (x) * 4)
796  #define KEMPLD_WDT_STAGE_CFG(x)                (0x18 + (x))
797  #define STAGE_CFG_GET_PRESCALER(x)     (((x) & 0x30) >> 4)
798 -#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x30) << 4)
799 +#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x3) << 4)
800  #define STAGE_CFG_PRESCALER_MASK       0x30
801  #define STAGE_CFG_ACTION_MASK          0x7
802  #define STAGE_CFG_ASSERT               (1 << 3)
803 diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
804 index 4da59b4..381999c 100644
805 --- a/drivers/watchdog/ts72xx_wdt.c
806 +++ b/drivers/watchdog/ts72xx_wdt.c
807 @@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
808  
809         case WDIOC_GETSTATUS:
810         case WDIOC_GETBOOTSTATUS:
811 -               return put_user(0, p);
812 +               error = put_user(0, p);
813 +               break;
814  
815         case WDIOC_KEEPALIVE:
816                 ts72xx_wdt_kick(wdt);
817 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
818 index d3280b2..8220491 100644
819 --- a/fs/btrfs/inode.c
820 +++ b/fs/btrfs/inode.c
821 @@ -8036,7 +8036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
822  
823  
824         /* check for collisions, even if the  name isn't there */
825 -       ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
826 +       ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
827                              new_dentry->d_name.name,
828                              new_dentry->d_name.len);
829  
830 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
831 index c081e34..03e9beb 100644
832 --- a/fs/ext4/xattr.c
833 +++ b/fs/ext4/xattr.c
834 @@ -1350,6 +1350,8 @@ retry:
835                                     s_min_extra_isize) {
836                                         tried_min_extra_isize++;
837                                         new_extra_isize = s_min_extra_isize;
838 +                                       kfree(is); is = NULL;
839 +                                       kfree(bs); bs = NULL;
840                                         goto retry;
841                                 }
842                                 error = -1;
843 diff --git a/fs/statfs.c b/fs/statfs.c
844 index c219e733..083dc0a 100644
845 --- a/fs/statfs.c
846 +++ b/fs/statfs.c
847 @@ -94,7 +94,7 @@ retry:
848  
849  int fd_statfs(int fd, struct kstatfs *st)
850  {
851 -       struct fd f = fdget(fd);
852 +       struct fd f = fdget_raw(fd);
853         int error = -EBADF;
854         if (f.file) {
855                 error = vfs_statfs(&f.file->f_path, st);
856 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
857 index 842de22..ded4299 100644
858 --- a/include/linux/compiler-gcc4.h
859 +++ b/include/linux/compiler-gcc4.h
860 @@ -65,6 +65,21 @@
861  #define __visible __attribute__((externally_visible))
862  #endif
863  
864 +/*
865 + * GCC 'asm goto' miscompiles certain code sequences:
866 + *
867 + *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
868 + *
869 + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
870 + * Fixed in GCC 4.8.2 and later versions.
871 + *
872 + * (asm goto is automatically volatile - the naming reflects this.)
873 + */
874 +#if GCC_VERSION <= 40801
875 +# define asm_volatile_goto(x...)       do { asm goto(x); asm (""); } while (0)
876 +#else
877 +# define asm_volatile_goto(x...)       do { asm goto(x); } while (0)
878 +#endif
879  
880  #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
881  #if GCC_VERSION >= 40400
882 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
883 index c4d870b..19c19a5 100644
884 --- a/include/linux/ipc_namespace.h
885 +++ b/include/linux/ipc_namespace.h
886 @@ -22,7 +22,7 @@ struct ipc_ids {
887         int in_use;
888         unsigned short seq;
889         unsigned short seq_max;
890 -       struct rw_semaphore rw_mutex;
891 +       struct rw_semaphore rwsem;
892         struct idr ipcs_idr;
893         int next_id;
894  };
895 diff --git a/include/linux/random.h b/include/linux/random.h
896 index 3b9377d..6312dd9 100644
897 --- a/include/linux/random.h
898 +++ b/include/linux/random.h
899 @@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
900  extern void get_random_bytes(void *buf, int nbytes);
901  extern void get_random_bytes_arch(void *buf, int nbytes);
902  void generate_random_uuid(unsigned char uuid_out[16]);
903 +extern int random_int_secret_init(void);
904  
905  #ifndef MODULE
906  extern const struct file_operations random_fops, urandom_fops;
907 diff --git a/init/main.c b/init/main.c
908 index d03d2ec..586cd33 100644
909 --- a/init/main.c
910 +++ b/init/main.c
911 @@ -75,6 +75,7 @@
912  #include <linux/blkdev.h>
913  #include <linux/elevator.h>
914  #include <linux/sched_clock.h>
915 +#include <linux/random.h>
916  
917  #include <asm/io.h>
918  #include <asm/bugs.h>
919 @@ -778,6 +779,7 @@ static void __init do_basic_setup(void)
920         do_ctors();
921         usermodehelper_enable();
922         do_initcalls();
923 +       random_int_secret_init();
924  }
925  
926  static void __init do_pre_smp_initcalls(void)
927 diff --git a/ipc/msg.c b/ipc/msg.c
928 index a877c16..558aa91 100644
929 --- a/ipc/msg.c
930 +++ b/ipc/msg.c
931 @@ -70,8 +70,6 @@ struct msg_sender {
932  
933  #define msg_ids(ns)    ((ns)->ids[IPC_MSG_IDS])
934  
935 -#define msg_unlock(msq)                ipc_unlock(&(msq)->q_perm)
936 -
937  static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
938  static int newque(struct ipc_namespace *, struct ipc_params *);
939  #ifdef CONFIG_PROC_FS
940 @@ -181,7 +179,7 @@ static void msg_rcu_free(struct rcu_head *head)
941   * @ns: namespace
942   * @params: ptr to the structure that contains the key and msgflg
943   *
944 - * Called with msg_ids.rw_mutex held (writer)
945 + * Called with msg_ids.rwsem held (writer)
946   */
947  static int newque(struct ipc_namespace *ns, struct ipc_params *params)
948  {
949 @@ -267,8 +265,8 @@ static void expunge_all(struct msg_queue *msq, int res)
950   * removes the message queue from message queue ID IDR, and cleans up all the
951   * messages associated with this queue.
952   *
953 - * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
954 - * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
955 + * msg_ids.rwsem (writer) and the spinlock for this message queue are held
956 + * before freeque() is called. msg_ids.rwsem remains locked on exit.
957   */
958  static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
959  {
960 @@ -278,7 +276,8 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
961         expunge_all(msq, -EIDRM);
962         ss_wakeup(&msq->q_senders, 1);
963         msg_rmid(ns, msq);
964 -       msg_unlock(msq);
965 +       ipc_unlock_object(&msq->q_perm);
966 +       rcu_read_unlock();
967  
968         list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
969                 atomic_dec(&ns->msg_hdrs);
970 @@ -289,7 +288,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
971  }
972  
973  /*
974 - * Called with msg_ids.rw_mutex and ipcp locked.
975 + * Called with msg_ids.rwsem and ipcp locked.
976   */
977  static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
978  {
979 @@ -393,9 +392,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
980  }
981  
982  /*
983 - * This function handles some msgctl commands which require the rw_mutex
984 + * This function handles some msgctl commands which require the rwsem
985   * to be held in write mode.
986 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
987 + * NOTE: no locks must be held, the rwsem is taken inside this function.
988   */
989  static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
990                        struct msqid_ds __user *buf, int version)
991 @@ -410,7 +409,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
992                         return -EFAULT;
993         }
994  
995 -       down_write(&msg_ids(ns).rw_mutex);
996 +       down_write(&msg_ids(ns).rwsem);
997         rcu_read_lock();
998  
999         ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
1000 @@ -466,7 +465,7 @@ out_unlock0:
1001  out_unlock1:
1002         rcu_read_unlock();
1003  out_up:
1004 -       up_write(&msg_ids(ns).rw_mutex);
1005 +       up_write(&msg_ids(ns).rwsem);
1006         return err;
1007  }
1008  
1009 @@ -501,7 +500,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
1010                 msginfo.msgmnb = ns->msg_ctlmnb;
1011                 msginfo.msgssz = MSGSSZ;
1012                 msginfo.msgseg = MSGSEG;
1013 -               down_read(&msg_ids(ns).rw_mutex);
1014 +               down_read(&msg_ids(ns).rwsem);
1015                 if (cmd == MSG_INFO) {
1016                         msginfo.msgpool = msg_ids(ns).in_use;
1017                         msginfo.msgmap = atomic_read(&ns->msg_hdrs);
1018 @@ -512,7 +511,7 @@ static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
1019                         msginfo.msgtql = MSGTQL;
1020                 }
1021                 max_id = ipc_get_maxid(&msg_ids(ns));
1022 -               up_read(&msg_ids(ns).rw_mutex);
1023 +               up_read(&msg_ids(ns).rwsem);
1024                 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
1025                         return -EFAULT;
1026                 return (max_id < 0) ? 0 : max_id;
1027 diff --git a/ipc/namespace.c b/ipc/namespace.c
1028 index 7ee61bf..aba9a58 100644
1029 --- a/ipc/namespace.c
1030 +++ b/ipc/namespace.c
1031 @@ -81,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
1032         int next_id;
1033         int total, in_use;
1034  
1035 -       down_write(&ids->rw_mutex);
1036 +       down_write(&ids->rwsem);
1037  
1038         in_use = ids->in_use;
1039  
1040 @@ -89,11 +89,12 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
1041                 perm = idr_find(&ids->ipcs_idr, next_id);
1042                 if (perm == NULL)
1043                         continue;
1044 -               ipc_lock_by_ptr(perm);
1045 +               rcu_read_lock();
1046 +               ipc_lock_object(perm);
1047                 free(ns, perm);
1048                 total++;
1049         }
1050 -       up_write(&ids->rw_mutex);
1051 +       up_write(&ids->rwsem);
1052  }
1053  
1054  static void free_ipc_ns(struct ipc_namespace *ns)
1055 diff --git a/ipc/sem.c b/ipc/sem.c
1056 index 87614511..8e2bf30 100644
1057 --- a/ipc/sem.c
1058 +++ b/ipc/sem.c
1059 @@ -248,12 +248,20 @@ static void merge_queues(struct sem_array *sma)
1060   * Caller must own sem_perm.lock.
1061   * New simple ops cannot start, because simple ops first check
1062   * that sem_perm.lock is free.
1063 + * that a) sem_perm.lock is free and b) complex_count is 0.
1064   */
1065  static void sem_wait_array(struct sem_array *sma)
1066  {
1067         int i;
1068         struct sem *sem;
1069  
1070 +       if (sma->complex_count)  {
1071 +               /* The thread that increased sma->complex_count waited on
1072 +                * all sem->lock locks. Thus we don't need to wait again.
1073 +                */
1074 +               return;
1075 +       }
1076 +
1077         for (i = 0; i < sma->sem_nsems; i++) {
1078                 sem = sma->sem_base + i;
1079                 spin_unlock_wait(&sem->lock);
1080 @@ -365,7 +373,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
1081  }
1082  
1083  /*
1084 - * sem_lock_(check_) routines are called in the paths where the rw_mutex
1085 + * sem_lock_(check_) routines are called in the paths where the rwsem
1086   * is not held.
1087   *
1088   * The caller holds the RCU read lock.
1089 @@ -464,7 +472,7 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
1090   * @ns: namespace
1091   * @params: ptr to the structure that contains key, semflg and nsems
1092   *
1093 - * Called with sem_ids.rw_mutex held (as a writer)
1094 + * Called with sem_ids.rwsem held (as a writer)
1095   */
1096  
1097  static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1098 @@ -529,7 +537,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
1099  
1100  
1101  /*
1102 - * Called with sem_ids.rw_mutex and ipcp locked.
1103 + * Called with sem_ids.rwsem and ipcp locked.
1104   */
1105  static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1106  {
1107 @@ -540,7 +548,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
1108  }
1109  
1110  /*
1111 - * Called with sem_ids.rw_mutex and ipcp locked.
1112 + * Called with sem_ids.rwsem and ipcp locked.
1113   */
1114  static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
1115                                 struct ipc_params *params)
1116 @@ -910,6 +918,24 @@ again:
1117  }
1118  
1119  /**
1120 + * set_semotime(sma, sops) - set sem_otime
1121 + * @sma: semaphore array
1122 + * @sops: operations that modified the array, may be NULL
1123 + *
1124 + * sem_otime is replicated to avoid cache line trashing.
1125 + * This function sets one instance to the current time.
1126 + */
1127 +static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1128 +{
1129 +       if (sops == NULL) {
1130 +               sma->sem_base[0].sem_otime = get_seconds();
1131 +       } else {
1132 +               sma->sem_base[sops[0].sem_num].sem_otime =
1133 +                                                       get_seconds();
1134 +       }
1135 +}
1136 +
1137 +/**
1138   * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
1139   * @sma: semaphore array
1140   * @sops: operations that were performed
1141 @@ -959,17 +985,10 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
1142                         }
1143                 }
1144         }
1145 -       if (otime) {
1146 -               if (sops == NULL) {
1147 -                       sma->sem_base[0].sem_otime = get_seconds();
1148 -               } else {
1149 -                       sma->sem_base[sops[0].sem_num].sem_otime =
1150 -                                                               get_seconds();
1151 -               }
1152 -       }
1153 +       if (otime)
1154 +               set_semotime(sma, sops);
1155  }
1156  
1157 -
1158  /* The following counts are associated to each semaphore:
1159   *   semncnt        number of tasks waiting on semval being nonzero
1160   *   semzcnt        number of tasks waiting on semval being zero
1161 @@ -1031,8 +1050,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
1162         return semzcnt;
1163  }
1164  
1165 -/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
1166 - * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
1167 +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1168 + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1169   * remains locked on exit.
1170   */
1171  static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1172 @@ -1152,7 +1171,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1173                 seminfo.semmnu = SEMMNU;
1174                 seminfo.semmap = SEMMAP;
1175                 seminfo.semume = SEMUME;
1176 -               down_read(&sem_ids(ns).rw_mutex);
1177 +               down_read(&sem_ids(ns).rwsem);
1178                 if (cmd == SEM_INFO) {
1179                         seminfo.semusz = sem_ids(ns).in_use;
1180                         seminfo.semaem = ns->used_sems;
1181 @@ -1161,7 +1180,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1182                         seminfo.semaem = SEMAEM;
1183                 }
1184                 max_id = ipc_get_maxid(&sem_ids(ns));
1185 -               up_read(&sem_ids(ns).rw_mutex);
1186 +               up_read(&sem_ids(ns).rwsem);
1187                 if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 
1188                         return -EFAULT;
1189                 return (max_id < 0) ? 0: max_id;
1190 @@ -1467,9 +1486,9 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1191  }
1192  
1193  /*
1194 - * This function handles some semctl commands which require the rw_mutex
1195 + * This function handles some semctl commands which require the rwsem
1196   * to be held in write mode.
1197 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1198 + * NOTE: no locks must be held, the rwsem is taken inside this function.
1199   */
1200  static int semctl_down(struct ipc_namespace *ns, int semid,
1201                        int cmd, int version, void __user *p)
1202 @@ -1484,7 +1503,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
1203                         return -EFAULT;
1204         }
1205  
1206 -       down_write(&sem_ids(ns).rw_mutex);
1207 +       down_write(&sem_ids(ns).rwsem);
1208         rcu_read_lock();
1209  
1210         ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1211 @@ -1523,7 +1542,7 @@ out_unlock0:
1212  out_unlock1:
1213         rcu_read_unlock();
1214  out_up:
1215 -       up_write(&sem_ids(ns).rw_mutex);
1216 +       up_write(&sem_ids(ns).rwsem);
1217         return err;
1218  }
1219  
1220 @@ -1831,12 +1850,17 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1221  
1222         error = perform_atomic_semop(sma, sops, nsops, un,
1223                                         task_tgid_vnr(current));
1224 -       if (error <= 0) {
1225 -               if (alter && error == 0)
1226 +       if (error == 0) {
1227 +               /* If the operation was successful, then do
1228 +                * the required updates.
1229 +                */
1230 +               if (alter)
1231                         do_smart_update(sma, sops, nsops, 1, &tasks);
1232 -
1233 -               goto out_unlock_free;
1234 +               else
1235 +                       set_semotime(sma, sops);
1236         }
1237 +       if (error <= 0)
1238 +               goto out_unlock_free;
1239  
1240         /* We need to sleep on this operation, so we put the current
1241          * task into the pending queue and go to sleep.
1242 @@ -2095,6 +2119,14 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1243         struct sem_array *sma = it;
1244         time_t sem_otime;
1245  
1246 +       /*
1247 +        * The proc interface isn't aware of sem_lock(), it calls
1248 +        * ipc_lock_object() directly (in sysvipc_find_ipc).
1249 +        * In order to stay compatible with sem_lock(), we must wait until
1250 +        * all simple semop() calls have left their critical regions.
1251 +        */
1252 +       sem_wait_array(sma);
1253 +
1254         sem_otime = get_semotime(sma);
1255  
1256         return seq_printf(s,
1257 diff --git a/ipc/shm.c b/ipc/shm.c
1258 index 2d6833d..d697396 100644
1259 --- a/ipc/shm.c
1260 +++ b/ipc/shm.c
1261 @@ -19,6 +19,9 @@
1262   * namespaces support
1263   * OpenVZ, SWsoft Inc.
1264   * Pavel Emelianov <xemul@openvz.org>
1265 + *
1266 + * Better ipc lock (kern_ipc_perm.lock) handling
1267 + * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
1268   */
1269  
1270  #include <linux/slab.h>
1271 @@ -80,8 +83,8 @@ void shm_init_ns(struct ipc_namespace *ns)
1272  }
1273  
1274  /*
1275 - * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
1276 - * Only shm_ids.rw_mutex remains locked on exit.
1277 + * Called with shm_ids.rwsem (writer) and the shp structure locked.
1278 + * Only shm_ids.rwsem remains locked on exit.
1279   */
1280  static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1281  {
1282 @@ -124,8 +127,28 @@ void __init shm_init (void)
1283                                 IPC_SHM_IDS, sysvipc_shm_proc_show);
1284  }
1285  
1286 +static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
1287 +{
1288 +       struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
1289 +
1290 +       if (IS_ERR(ipcp))
1291 +               return ERR_CAST(ipcp);
1292 +
1293 +       return container_of(ipcp, struct shmid_kernel, shm_perm);
1294 +}
1295 +
1296 +static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
1297 +{
1298 +       struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
1299 +
1300 +       if (IS_ERR(ipcp))
1301 +               return ERR_CAST(ipcp);
1302 +
1303 +       return container_of(ipcp, struct shmid_kernel, shm_perm);
1304 +}
1305 +
1306  /*
1307 - * shm_lock_(check_) routines are called in the paths where the rw_mutex
1308 + * shm_lock_(check_) routines are called in the paths where the rwsem
1309   * is not necessarily held.
1310   */
1311  static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
1312 @@ -144,17 +167,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
1313         ipc_lock_object(&ipcp->shm_perm);
1314  }
1315  
1316 -static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
1317 -                                               int id)
1318 -{
1319 -       struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
1320 -
1321 -       if (IS_ERR(ipcp))
1322 -               return (struct shmid_kernel *)ipcp;
1323 -
1324 -       return container_of(ipcp, struct shmid_kernel, shm_perm);
1325 -}
1326 -
1327  static void shm_rcu_free(struct rcu_head *head)
1328  {
1329         struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
1330 @@ -191,7 +203,7 @@ static void shm_open(struct vm_area_struct *vma)
1331   * @ns: namespace
1332   * @shp: struct to free
1333   *
1334 - * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
1335 + * It has to be called with shp and shm_ids.rwsem (writer) locked,
1336   * but returns with shp unlocked and freed.
1337   */
1338  static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
1339 @@ -238,7 +250,7 @@ static void shm_close(struct vm_area_struct *vma)
1340         struct shmid_kernel *shp;
1341         struct ipc_namespace *ns = sfd->ns;
1342  
1343 -       down_write(&shm_ids(ns).rw_mutex);
1344 +       down_write(&shm_ids(ns).rwsem);
1345         /* remove from the list of attaches of the shm segment */
1346         shp = shm_lock(ns, sfd->id);
1347         BUG_ON(IS_ERR(shp));
1348 @@ -249,10 +261,10 @@ static void shm_close(struct vm_area_struct *vma)
1349                 shm_destroy(ns, shp);
1350         else
1351                 shm_unlock(shp);
1352 -       up_write(&shm_ids(ns).rw_mutex);
1353 +       up_write(&shm_ids(ns).rwsem);
1354  }
1355  
1356 -/* Called with ns->shm_ids(ns).rw_mutex locked */
1357 +/* Called with ns->shm_ids(ns).rwsem locked */
1358  static int shm_try_destroy_current(int id, void *p, void *data)
1359  {
1360         struct ipc_namespace *ns = data;
1361 @@ -283,7 +295,7 @@ static int shm_try_destroy_current(int id, void *p, void *data)
1362         return 0;
1363  }
1364  
1365 -/* Called with ns->shm_ids(ns).rw_mutex locked */
1366 +/* Called with ns->shm_ids(ns).rwsem locked */
1367  static int shm_try_destroy_orphaned(int id, void *p, void *data)
1368  {
1369         struct ipc_namespace *ns = data;
1370 @@ -294,7 +306,7 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1371          * We want to destroy segments without users and with already
1372          * exit'ed originating process.
1373          *
1374 -        * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
1375 +        * As shp->* are changed under rwsem, it's safe to skip shp locking.
1376          */
1377         if (shp->shm_creator != NULL)
1378                 return 0;
1379 @@ -308,10 +320,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
1380  
1381  void shm_destroy_orphaned(struct ipc_namespace *ns)
1382  {
1383 -       down_write(&shm_ids(ns).rw_mutex);
1384 +       down_write(&shm_ids(ns).rwsem);
1385         if (shm_ids(ns).in_use)
1386                 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
1387 -       up_write(&shm_ids(ns).rw_mutex);
1388 +       up_write(&shm_ids(ns).rwsem);
1389  }
1390  
1391  
1392 @@ -323,10 +335,10 @@ void exit_shm(struct task_struct *task)
1393                 return;
1394  
1395         /* Destroy all already created segments, but not mapped yet */
1396 -       down_write(&shm_ids(ns).rw_mutex);
1397 +       down_write(&shm_ids(ns).rwsem);
1398         if (shm_ids(ns).in_use)
1399                 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
1400 -       up_write(&shm_ids(ns).rw_mutex);
1401 +       up_write(&shm_ids(ns).rwsem);
1402  }
1403  
1404  static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1405 @@ -460,7 +472,7 @@ static const struct vm_operations_struct shm_vm_ops = {
1406   * @ns: namespace
1407   * @params: ptr to the structure that contains key, size and shmflg
1408   *
1409 - * Called with shm_ids.rw_mutex held as a writer.
1410 + * Called with shm_ids.rwsem held as a writer.
1411   */
1412  
1413  static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
1414 @@ -567,7 +579,7 @@ no_file:
1415  }
1416  
1417  /*
1418 - * Called with shm_ids.rw_mutex and ipcp locked.
1419 + * Called with shm_ids.rwsem and ipcp locked.
1420   */
1421  static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1422  {
1423 @@ -578,7 +590,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
1424  }
1425  
1426  /*
1427 - * Called with shm_ids.rw_mutex and ipcp locked.
1428 + * Called with shm_ids.rwsem and ipcp locked.
1429   */
1430  static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
1431                                 struct ipc_params *params)
1432 @@ -691,7 +703,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
1433  
1434  /*
1435   * Calculate and add used RSS and swap pages of a shm.
1436 - * Called with shm_ids.rw_mutex held as a reader
1437 + * Called with shm_ids.rwsem held as a reader
1438   */
1439  static void shm_add_rss_swap(struct shmid_kernel *shp,
1440         unsigned long *rss_add, unsigned long *swp_add)
1441 @@ -718,7 +730,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
1442  }
1443  
1444  /*
1445 - * Called with shm_ids.rw_mutex held as a reader
1446 + * Called with shm_ids.rwsem held as a reader
1447   */
1448  static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1449                 unsigned long *swp)
1450 @@ -747,9 +759,9 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
1451  }
1452  
1453  /*
1454 - * This function handles some shmctl commands which require the rw_mutex
1455 + * This function handles some shmctl commands which require the rwsem
1456   * to be held in write mode.
1457 - * NOTE: no locks must be held, the rw_mutex is taken inside this function.
1458 + * NOTE: no locks must be held, the rwsem is taken inside this function.
1459   */
1460  static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1461                        struct shmid_ds __user *buf, int version)
1462 @@ -764,14 +776,13 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1463                         return -EFAULT;
1464         }
1465  
1466 -       down_write(&shm_ids(ns).rw_mutex);
1467 +       down_write(&shm_ids(ns).rwsem);
1468         rcu_read_lock();
1469  
1470 -       ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
1471 -                              &shmid64.shm_perm, 0);
1472 +       ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
1473 +                                     &shmid64.shm_perm, 0);
1474         if (IS_ERR(ipcp)) {
1475                 err = PTR_ERR(ipcp);
1476 -               /* the ipc lock is not held upon failure */
1477                 goto out_unlock1;
1478         }
1479  
1480 @@ -779,14 +790,16 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1481  
1482         err = security_shm_shmctl(shp, cmd);
1483         if (err)
1484 -               goto out_unlock0;
1485 +               goto out_unlock1;
1486  
1487         switch (cmd) {
1488         case IPC_RMID:
1489 +               ipc_lock_object(&shp->shm_perm);
1490                 /* do_shm_rmid unlocks the ipc object and rcu */
1491                 do_shm_rmid(ns, ipcp);
1492                 goto out_up;
1493         case IPC_SET:
1494 +               ipc_lock_object(&shp->shm_perm);
1495                 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
1496                 if (err)
1497                         goto out_unlock0;
1498 @@ -794,6 +807,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
1499                 break;
1500         default:
1501                 err = -EINVAL;
1502 +               goto out_unlock1;
1503         }
1504  
1505  out_unlock0:
1506 @@ -801,33 +815,28 @@ out_unlock0:
1507  out_unlock1:
1508         rcu_read_unlock();
1509  out_up:
1510 -       up_write(&shm_ids(ns).rw_mutex);
1511 +       up_write(&shm_ids(ns).rwsem);
1512         return err;
1513  }
1514  
1515 -SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1516 +static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
1517 +                        int cmd, int version, void __user *buf)
1518  {
1519 +       int err;
1520         struct shmid_kernel *shp;
1521 -       int err, version;
1522 -       struct ipc_namespace *ns;
1523  
1524 -       if (cmd < 0 || shmid < 0) {
1525 -               err = -EINVAL;
1526 -               goto out;
1527 +       /* preliminary security checks for *_INFO */
1528 +       if (cmd == IPC_INFO || cmd == SHM_INFO) {
1529 +               err = security_shm_shmctl(NULL, cmd);
1530 +               if (err)
1531 +                       return err;
1532         }
1533  
1534 -       version = ipc_parse_version(&cmd);
1535 -       ns = current->nsproxy->ipc_ns;
1536 -
1537 -       switch (cmd) { /* replace with proc interface ? */
1538 +       switch (cmd) {
1539         case IPC_INFO:
1540         {
1541                 struct shminfo64 shminfo;
1542  
1543 -               err = security_shm_shmctl(NULL, cmd);
1544 -               if (err)
1545 -                       return err;
1546 -
1547                 memset(&shminfo, 0, sizeof(shminfo));
1548                 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
1549                 shminfo.shmmax = ns->shm_ctlmax;
1550 @@ -837,9 +846,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1551                 if(copy_shminfo_to_user (buf, &shminfo, version))
1552                         return -EFAULT;
1553  
1554 -               down_read(&shm_ids(ns).rw_mutex);
1555 +               down_read(&shm_ids(ns).rwsem);
1556                 err = ipc_get_maxid(&shm_ids(ns));
1557 -               up_read(&shm_ids(ns).rw_mutex);
1558 +               up_read(&shm_ids(ns).rwsem);
1559  
1560                 if(err<0)
1561                         err = 0;
1562 @@ -849,19 +858,15 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1563         {
1564                 struct shm_info shm_info;
1565  
1566 -               err = security_shm_shmctl(NULL, cmd);
1567 -               if (err)
1568 -                       return err;
1569 -
1570                 memset(&shm_info, 0, sizeof(shm_info));
1571 -               down_read(&shm_ids(ns).rw_mutex);
1572 +               down_read(&shm_ids(ns).rwsem);
1573                 shm_info.used_ids = shm_ids(ns).in_use;
1574                 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
1575                 shm_info.shm_tot = ns->shm_tot;
1576                 shm_info.swap_attempts = 0;
1577                 shm_info.swap_successes = 0;
1578                 err = ipc_get_maxid(&shm_ids(ns));
1579 -               up_read(&shm_ids(ns).rw_mutex);
1580 +               up_read(&shm_ids(ns).rwsem);
1581                 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
1582                         err = -EFAULT;
1583                         goto out;
1584 @@ -876,27 +881,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1585                 struct shmid64_ds tbuf;
1586                 int result;
1587  
1588 +               rcu_read_lock();
1589                 if (cmd == SHM_STAT) {
1590 -                       shp = shm_lock(ns, shmid);
1591 +                       shp = shm_obtain_object(ns, shmid);
1592                         if (IS_ERR(shp)) {
1593                                 err = PTR_ERR(shp);
1594 -                               goto out;
1595 +                               goto out_unlock;
1596                         }
1597                         result = shp->shm_perm.id;
1598                 } else {
1599 -                       shp = shm_lock_check(ns, shmid);
1600 +                       shp = shm_obtain_object_check(ns, shmid);
1601                         if (IS_ERR(shp)) {
1602                                 err = PTR_ERR(shp);
1603 -                               goto out;
1604 +                               goto out_unlock;
1605                         }
1606                         result = 0;
1607                 }
1608 +
1609                 err = -EACCES;
1610                 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
1611                         goto out_unlock;
1612 +
1613                 err = security_shm_shmctl(shp, cmd);
1614                 if (err)
1615                         goto out_unlock;
1616 +
1617                 memset(&tbuf, 0, sizeof(tbuf));
1618                 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
1619                 tbuf.shm_segsz  = shp->shm_segsz;
1620 @@ -906,43 +915,76 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1621                 tbuf.shm_cpid   = shp->shm_cprid;
1622                 tbuf.shm_lpid   = shp->shm_lprid;
1623                 tbuf.shm_nattch = shp->shm_nattch;
1624 -               shm_unlock(shp);
1625 -               if(copy_shmid_to_user (buf, &tbuf, version))
1626 +               rcu_read_unlock();
1627 +
1628 +               if (copy_shmid_to_user(buf, &tbuf, version))
1629                         err = -EFAULT;
1630                 else
1631                         err = result;
1632                 goto out;
1633         }
1634 +       default:
1635 +               return -EINVAL;
1636 +       }
1637 +
1638 +out_unlock:
1639 +       rcu_read_unlock();
1640 +out:
1641 +       return err;
1642 +}
1643 +
1644 +SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1645 +{
1646 +       struct shmid_kernel *shp;
1647 +       int err, version;
1648 +       struct ipc_namespace *ns;
1649 +
1650 +       if (cmd < 0 || shmid < 0)
1651 +               return -EINVAL;
1652 +
1653 +       version = ipc_parse_version(&cmd);
1654 +       ns = current->nsproxy->ipc_ns;
1655 +
1656 +       switch (cmd) {
1657 +       case IPC_INFO:
1658 +       case SHM_INFO:
1659 +       case SHM_STAT:
1660 +       case IPC_STAT:
1661 +               return shmctl_nolock(ns, shmid, cmd, version, buf);
1662 +       case IPC_RMID:
1663 +       case IPC_SET:
1664 +               return shmctl_down(ns, shmid, cmd, buf, version);
1665         case SHM_LOCK:
1666         case SHM_UNLOCK:
1667         {
1668                 struct file *shm_file;
1669  
1670 -               shp = shm_lock_check(ns, shmid);
1671 +               rcu_read_lock();
1672 +               shp = shm_obtain_object_check(ns, shmid);
1673                 if (IS_ERR(shp)) {
1674                         err = PTR_ERR(shp);
1675 -                       goto out;
1676 +                       goto out_unlock1;
1677                 }
1678  
1679                 audit_ipc_obj(&(shp->shm_perm));
1680 +               err = security_shm_shmctl(shp, cmd);
1681 +               if (err)
1682 +                       goto out_unlock1;
1683  
1684 +               ipc_lock_object(&shp->shm_perm);
1685                 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1686                         kuid_t euid = current_euid();
1687                         err = -EPERM;
1688                         if (!uid_eq(euid, shp->shm_perm.uid) &&
1689                             !uid_eq(euid, shp->shm_perm.cuid))
1690 -                               goto out_unlock;
1691 +                               goto out_unlock0;
1692                         if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
1693 -                               goto out_unlock;
1694 +                               goto out_unlock0;
1695                 }
1696  
1697 -               err = security_shm_shmctl(shp, cmd);
1698 -               if (err)
1699 -                       goto out_unlock;
1700 -
1701                 shm_file = shp->shm_file;
1702                 if (is_file_hugepages(shm_file))
1703 -                       goto out_unlock;
1704 +                       goto out_unlock0;
1705  
1706                 if (cmd == SHM_LOCK) {
1707                         struct user_struct *user = current_user();
1708 @@ -951,32 +993,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1709                                 shp->shm_perm.mode |= SHM_LOCKED;
1710                                 shp->mlock_user = user;
1711                         }
1712 -                       goto out_unlock;
1713 +                       goto out_unlock0;
1714                 }
1715  
1716                 /* SHM_UNLOCK */
1717                 if (!(shp->shm_perm.mode & SHM_LOCKED))
1718 -                       goto out_unlock;
1719 +                       goto out_unlock0;
1720                 shmem_lock(shm_file, 0, shp->mlock_user);
1721                 shp->shm_perm.mode &= ~SHM_LOCKED;
1722                 shp->mlock_user = NULL;
1723                 get_file(shm_file);
1724 -               shm_unlock(shp);
1725 +               ipc_unlock_object(&shp->shm_perm);
1726 +               rcu_read_unlock();
1727                 shmem_unlock_mapping(shm_file->f_mapping);
1728 +
1729                 fput(shm_file);
1730 -               goto out;
1731 -       }
1732 -       case IPC_RMID:
1733 -       case IPC_SET:
1734 -               err = shmctl_down(ns, shmid, cmd, buf, version);
1735                 return err;
1736 +       }
1737         default:
1738                 return -EINVAL;
1739         }
1740  
1741 -out_unlock:
1742 -       shm_unlock(shp);
1743 -out:
1744 +out_unlock0:
1745 +       ipc_unlock_object(&shp->shm_perm);
1746 +out_unlock1:
1747 +       rcu_read_unlock();
1748         return err;
1749  }
1750  
1751 @@ -1044,10 +1085,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1752          * additional creator id...
1753          */
1754         ns = current->nsproxy->ipc_ns;
1755 -       shp = shm_lock_check(ns, shmid);
1756 +       rcu_read_lock();
1757 +       shp = shm_obtain_object_check(ns, shmid);
1758         if (IS_ERR(shp)) {
1759                 err = PTR_ERR(shp);
1760 -               goto out;
1761 +               goto out_unlock;
1762         }
1763  
1764         err = -EACCES;
1765 @@ -1058,24 +1100,31 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1766         if (err)
1767                 goto out_unlock;
1768  
1769 +       ipc_lock_object(&shp->shm_perm);
1770         path = shp->shm_file->f_path;
1771         path_get(&path);
1772         shp->shm_nattch++;
1773         size = i_size_read(path.dentry->d_inode);
1774 -       shm_unlock(shp);
1775 +       ipc_unlock_object(&shp->shm_perm);
1776 +       rcu_read_unlock();
1777  
1778         err = -ENOMEM;
1779         sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1780 -       if (!sfd)
1781 -               goto out_put_dentry;
1782 +       if (!sfd) {
1783 +               path_put(&path);
1784 +               goto out_nattch;
1785 +       }
1786  
1787         file = alloc_file(&path, f_mode,
1788                           is_file_hugepages(shp->shm_file) ?
1789                                 &shm_file_operations_huge :
1790                                 &shm_file_operations);
1791         err = PTR_ERR(file);
1792 -       if (IS_ERR(file))
1793 -               goto out_free;
1794 +       if (IS_ERR(file)) {
1795 +               kfree(sfd);
1796 +               path_put(&path);
1797 +               goto out_nattch;
1798 +       }
1799  
1800         file->private_data = sfd;
1801         file->f_mapping = shp->shm_file->f_mapping;
1802 @@ -1101,7 +1150,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1803                     addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1804                         goto invalid;
1805         }
1806 -               
1807 +
1808         addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1809         *raddr = addr;
1810         err = 0;
1811 @@ -1116,7 +1165,7 @@ out_fput:
1812         fput(file);
1813  
1814  out_nattch:
1815 -       down_write(&shm_ids(ns).rw_mutex);
1816 +       down_write(&shm_ids(ns).rwsem);
1817         shp = shm_lock(ns, shmid);
1818         BUG_ON(IS_ERR(shp));
1819         shp->shm_nattch--;
1820 @@ -1124,20 +1173,13 @@ out_nattch:
1821                 shm_destroy(ns, shp);
1822         else
1823                 shm_unlock(shp);
1824 -       up_write(&shm_ids(ns).rw_mutex);
1825 -
1826 -out:
1827 +       up_write(&shm_ids(ns).rwsem);
1828         return err;
1829  
1830  out_unlock:
1831 -       shm_unlock(shp);
1832 -       goto out;
1833 -
1834 -out_free:
1835 -       kfree(sfd);
1836 -out_put_dentry:
1837 -       path_put(&path);
1838 -       goto out_nattch;
1839 +       rcu_read_unlock();
1840 +out:
1841 +       return err;
1842  }
1843  
1844  SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1845 @@ -1242,8 +1284,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1846  #else /* CONFIG_MMU */
1847         /* under NOMMU conditions, the exact address to be destroyed must be
1848          * given */
1849 -       retval = -EINVAL;
1850 -       if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1851 +       if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1852                 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1853                 retval = 0;
1854         }
1855 diff --git a/ipc/util.c b/ipc/util.c
1856 index 0c6566b..fdb8ae7 100644
1857 --- a/ipc/util.c
1858 +++ b/ipc/util.c
1859 @@ -15,6 +15,14 @@
1860   * Jun 2006 - namespaces ssupport
1861   *            OpenVZ, SWsoft Inc.
1862   *            Pavel Emelianov <xemul@openvz.org>
1863 + *
1864 + * General sysv ipc locking scheme:
1865 + *  when doing ipc id lookups, take the ids->rwsem
1866 + *      rcu_read_lock()
1867 + *          obtain the ipc object (kern_ipc_perm)
1868 + *          perform security, capabilities, auditing and permission checks, etc.
1869 + *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
1870 + *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
1871   */
1872  
1873  #include <linux/mm.h>
1874 @@ -119,7 +127,7 @@ __initcall(ipc_init);
1875   
1876  void ipc_init_ids(struct ipc_ids *ids)
1877  {
1878 -       init_rwsem(&ids->rw_mutex);
1879 +       init_rwsem(&ids->rwsem);
1880  
1881         ids->in_use = 0;
1882         ids->seq = 0;
1883 @@ -174,7 +182,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
1884   *     @ids: Identifier set
1885   *     @key: The key to find
1886   *     
1887 - *     Requires ipc_ids.rw_mutex locked.
1888 + *     Requires ipc_ids.rwsem locked.
1889   *     Returns the LOCKED pointer to the ipc structure if found or NULL
1890   *     if not.
1891   *     If key is found ipc points to the owning ipc structure
1892 @@ -197,7 +205,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1893                         continue;
1894                 }
1895  
1896 -               ipc_lock_by_ptr(ipc);
1897 +               rcu_read_lock();
1898 +               ipc_lock_object(ipc);
1899                 return ipc;
1900         }
1901  
1902 @@ -208,7 +217,7 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
1903   *     ipc_get_maxid   -       get the last assigned id
1904   *     @ids: IPC identifier set
1905   *
1906 - *     Called with ipc_ids.rw_mutex held.
1907 + *     Called with ipc_ids.rwsem held.
1908   */
1909  
1910  int ipc_get_maxid(struct ipc_ids *ids)
1911 @@ -246,7 +255,7 @@ int ipc_get_maxid(struct ipc_ids *ids)
1912   *     is returned. The 'new' entry is returned in a locked state on success.
1913   *     On failure the entry is not locked and a negative err-code is returned.
1914   *
1915 - *     Called with writer ipc_ids.rw_mutex held.
1916 + *     Called with writer ipc_ids.rwsem held.
1917   */
1918  int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
1919  {
1920 @@ -312,9 +321,9 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1921  {
1922         int err;
1923  
1924 -       down_write(&ids->rw_mutex);
1925 +       down_write(&ids->rwsem);
1926         err = ops->getnew(ns, params);
1927 -       up_write(&ids->rw_mutex);
1928 +       up_write(&ids->rwsem);
1929         return err;
1930  }
1931  
1932 @@ -331,7 +340,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
1933   *
1934   *     On success, the IPC id is returned.
1935   *
1936 - *     It is called with ipc_ids.rw_mutex and ipcp->lock held.
1937 + *     It is called with ipc_ids.rwsem and ipcp->lock held.
1938   */
1939  static int ipc_check_perms(struct ipc_namespace *ns,
1940                            struct kern_ipc_perm *ipcp,
1941 @@ -376,7 +385,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1942          * Take the lock as a writer since we are potentially going to add
1943          * a new entry + read locks are not "upgradable"
1944          */
1945 -       down_write(&ids->rw_mutex);
1946 +       down_write(&ids->rwsem);
1947         ipcp = ipc_findkey(ids, params->key);
1948         if (ipcp == NULL) {
1949                 /* key not used */
1950 @@ -402,7 +411,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1951                 }
1952                 ipc_unlock(ipcp);
1953         }
1954 -       up_write(&ids->rw_mutex);
1955 +       up_write(&ids->rwsem);
1956  
1957         return err;
1958  }
1959 @@ -413,7 +422,7 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
1960   *     @ids: IPC identifier set
1961   *     @ipcp: ipc perm structure containing the identifier to remove
1962   *
1963 - *     ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held
1964 + *     ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
1965   *     before this function is called, and remain locked on the exit.
1966   */
1967   
1968 @@ -613,7 +622,7 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
1969  }
1970  
1971  /**
1972 - * ipc_lock - Lock an ipc structure without rw_mutex held
1973 + * ipc_lock - Lock an ipc structure without rwsem held
1974   * @ids: IPC identifier set
1975   * @id: ipc id to look for
1976   *
1977 @@ -669,22 +678,6 @@ out:
1978         return out;
1979  }
1980  
1981 -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
1982 -{
1983 -       struct kern_ipc_perm *out;
1984 -
1985 -       out = ipc_lock(ids, id);
1986 -       if (IS_ERR(out))
1987 -               return out;
1988 -
1989 -       if (ipc_checkid(out, id)) {
1990 -               ipc_unlock(out);
1991 -               return ERR_PTR(-EIDRM);
1992 -       }
1993 -
1994 -       return out;
1995 -}
1996 -
1997  /**
1998   * ipcget - Common sys_*get() code
1999   * @ns : namsepace
2000 @@ -725,7 +718,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
2001  }
2002  
2003  /**
2004 - * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
2005 + * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
2006   * @ns:  the ipc namespace
2007   * @ids:  the table of ids where to look for the ipc
2008   * @id:   the id of the ipc to retrieve
2009 @@ -738,29 +731,13 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
2010   * It must be called without any lock held and
2011   *  - retrieves the ipc with the given id in the given table.
2012   *  - performs some audit and permission check, depending on the given cmd
2013 - *  - returns the ipc with the ipc lock held in case of success
2014 - *    or an err-code without any lock held otherwise.
2015 + *  - returns a pointer to the ipc object or otherwise, the corresponding error.
2016   *
2017 - * Call holding the both the rw_mutex and the rcu read lock.
2018 + * Call holding the both the rwsem and the rcu read lock.
2019   */
2020 -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
2021 -                                     struct ipc_ids *ids, int id, int cmd,
2022 -                                     struct ipc64_perm *perm, int extra_perm)
2023 -{
2024 -       struct kern_ipc_perm *ipcp;
2025 -
2026 -       ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
2027 -       if (IS_ERR(ipcp))
2028 -               goto out;
2029 -
2030 -       spin_lock(&ipcp->lock);
2031 -out:
2032 -       return ipcp;
2033 -}
2034 -
2035  struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
2036 -                                            struct ipc_ids *ids, int id, int cmd,
2037 -                                            struct ipc64_perm *perm, int extra_perm)
2038 +                                       struct ipc_ids *ids, int id, int cmd,
2039 +                                       struct ipc64_perm *perm, int extra_perm)
2040  {
2041         kuid_t euid;
2042         int err = -EPERM;
2043 @@ -838,7 +815,8 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
2044                 ipc = idr_find(&ids->ipcs_idr, pos);
2045                 if (ipc != NULL) {
2046                         *new_pos = pos + 1;
2047 -                       ipc_lock_by_ptr(ipc);
2048 +                       rcu_read_lock();
2049 +                       ipc_lock_object(ipc);
2050                         return ipc;
2051                 }
2052         }
2053 @@ -876,7 +854,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
2054          * Take the lock - this will be released by the corresponding
2055          * call to stop().
2056          */
2057 -       down_read(&ids->rw_mutex);
2058 +       down_read(&ids->rwsem);
2059  
2060         /* pos < 0 is invalid */
2061         if (*pos < 0)
2062 @@ -903,7 +881,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)
2063  
2064         ids = &iter->ns->ids[iface->ids];
2065         /* Release the lock we took in start() */
2066 -       up_read(&ids->rw_mutex);
2067 +       up_read(&ids->rwsem);
2068  }
2069  
2070  static int sysvipc_proc_show(struct seq_file *s, void *it)
2071 diff --git a/ipc/util.h b/ipc/util.h
2072 index 25299e7..f2f5036 100644
2073 --- a/ipc/util.h
2074 +++ b/ipc/util.h
2075 @@ -101,10 +101,10 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
2076  #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
2077  #define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
2078  
2079 -/* must be called with ids->rw_mutex acquired for writing */
2080 +/* must be called with ids->rwsem acquired for writing */
2081  int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
2082  
2083 -/* must be called with ids->rw_mutex acquired for reading */
2084 +/* must be called with ids->rwsem acquired for reading */
2085  int ipc_get_maxid(struct ipc_ids *);
2086  
2087  /* must be called with both locks acquired. */
2088 @@ -139,9 +139,6 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
2089  struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
2090                                              struct ipc_ids *ids, int id, int cmd,
2091                                              struct ipc64_perm *perm, int extra_perm);
2092 -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
2093 -                                     struct ipc_ids *ids, int id, int cmd,
2094 -                                     struct ipc64_perm *perm, int extra_perm);
2095  
2096  #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
2097    /* On IA-64, we always use the "64-bit version" of the IPC structures.  */ 
2098 @@ -182,19 +179,12 @@ static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm)
2099         assert_spin_locked(&perm->lock);
2100  }
2101  
2102 -static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
2103 -{
2104 -       rcu_read_lock();
2105 -       ipc_lock_object(perm);
2106 -}
2107 -
2108  static inline void ipc_unlock(struct kern_ipc_perm *perm)
2109  {
2110         ipc_unlock_object(perm);
2111         rcu_read_unlock();
2112  }
2113  
2114 -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id);
2115  struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
2116  int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
2117                         struct ipc_ops *ops, struct ipc_params *params);
2118 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2119 index 45850f6..4865756 100644
2120 --- a/sound/pci/hda/patch_hdmi.c
2121 +++ b/sound/pci/hda/patch_hdmi.c
2122 @@ -930,6 +930,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2123         }
2124  
2125         /*
2126 +        * always configure channel mapping, it may have been changed by the
2127 +        * user in the meantime
2128 +        */
2129 +       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2130 +                                  channels, per_pin->chmap,
2131 +                                  per_pin->chmap_set);
2132 +
2133 +       /*
2134          * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
2135          * sizeof(*dp_ai) to avoid partial match/update problems when
2136          * the user switches between HDMI/DP monitors.
2137 @@ -940,20 +948,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
2138                             "pin=%d channels=%d\n",
2139                             pin_nid,
2140                             channels);
2141 -               hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2142 -                                          channels, per_pin->chmap,
2143 -                                          per_pin->chmap_set);
2144                 hdmi_stop_infoframe_trans(codec, pin_nid);
2145                 hdmi_fill_audio_infoframe(codec, pin_nid,
2146                                             ai.bytes, sizeof(ai));
2147                 hdmi_start_infoframe_trans(codec, pin_nid);
2148 -       } else {
2149 -               /* For non-pcm audio switch, setup new channel mapping
2150 -                * accordingly */
2151 -               if (per_pin->non_pcm != non_pcm)
2152 -                       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
2153 -                                                  channels, per_pin->chmap,
2154 -                                                  per_pin->chmap_set);
2155         }
2156  
2157         per_pin->non_pcm = non_pcm;
2158 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2159 index 389db4c..1383f38 100644
2160 --- a/sound/pci/hda/patch_realtek.c
2161 +++ b/sound/pci/hda/patch_realtek.c
2162 @@ -3308,6 +3308,15 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
2163         }
2164  }
2165  
2166 +static void alc290_fixup_mono_speakers(struct hda_codec *codec,
2167 +                                      const struct hda_fixup *fix, int action)
2168 +{
2169 +       if (action == HDA_FIXUP_ACT_PRE_PROBE)
2170 +               /* Remove DAC node 0x03, as it seems to be
2171 +                  giving mono output */
2172 +               snd_hda_override_wcaps(codec, 0x03, 0);
2173 +}
2174 +
2175  enum {
2176         ALC269_FIXUP_SONY_VAIO,
2177         ALC275_FIXUP_SONY_VAIO_GPIO2,
2178 @@ -3331,9 +3340,12 @@ enum {
2179         ALC269_FIXUP_HP_GPIO_LED,
2180         ALC269_FIXUP_INV_DMIC,
2181         ALC269_FIXUP_LENOVO_DOCK,
2182 +       ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
2183         ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
2184         ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
2185         ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
2186 +       ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2187 +       ALC290_FIXUP_MONO_SPEAKERS,
2188         ALC269_FIXUP_HEADSET_MODE,
2189         ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
2190         ALC269_FIXUP_ASUS_X101_FUNC,
2191 @@ -3521,6 +3533,15 @@ static const struct hda_fixup alc269_fixups[] = {
2192                 .chained = true,
2193                 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2194         },
2195 +       [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
2196 +               .type = HDA_FIXUP_PINS,
2197 +               .v.pins = (const struct hda_pintbl[]) {
2198 +                       { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2199 +                       { }
2200 +               },
2201 +               .chained = true,
2202 +               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
2203 +       },
2204         [ALC269_FIXUP_HEADSET_MODE] = {
2205                 .type = HDA_FIXUP_FUNC,
2206                 .v.func = alc_fixup_headset_mode,
2207 @@ -3529,6 +3550,13 @@ static const struct hda_fixup alc269_fixups[] = {
2208                 .type = HDA_FIXUP_FUNC,
2209                 .v.func = alc_fixup_headset_mode_no_hp_mic,
2210         },
2211 +       [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
2212 +               .type = HDA_FIXUP_PINS,
2213 +               .v.pins = (const struct hda_pintbl[]) {
2214 +                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
2215 +                       { }
2216 +               },
2217 +       },
2218         [ALC269_FIXUP_ASUS_X101_FUNC] = {
2219                 .type = HDA_FIXUP_FUNC,
2220                 .v.func = alc269_fixup_x101_headset_mic,
2221 @@ -3595,6 +3623,12 @@ static const struct hda_fixup alc269_fixups[] = {
2222                         { }
2223                 },
2224         },
2225 +       [ALC290_FIXUP_MONO_SPEAKERS] = {
2226 +               .type = HDA_FIXUP_FUNC,
2227 +               .v.func = alc290_fixup_mono_speakers,
2228 +               .chained = true,
2229 +               .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
2230 +       },
2231  };
2232  
2233  static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2234 @@ -3631,6 +3665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2235         SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2236         SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2237         SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
2238 +       SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
2239         SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2240         SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
2241         SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
2242 @@ -3651,6 +3686,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2243         SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2244         SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
2245         SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
2246 +       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
2247         SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
2248         SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2249         SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2250 @@ -4345,6 +4381,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2251         SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2252         SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2253         SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
2254 +       SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
2255         SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
2256         SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
2257         SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
2258 diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
2259 index 63fb521..6234a51 100644
2260 --- a/sound/usb/usx2y/usbusx2yaudio.c
2261 +++ b/sound/usb/usx2y/usbusx2yaudio.c
2262 @@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
2263         usX2Y_clients_stop(usX2Y);
2264  }
2265  
2266 -static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
2267 -                                struct snd_usX2Y_substream *subs, struct urb *urb)
2268 -{
2269 -       snd_printk(KERN_ERR
2270 -"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
2271 -"Most probably some urb of usb-frame %i is still missing.\n"
2272 -"Cause could be too long delays in usb-hcd interrupt handling.\n",
2273 -                  usb_get_current_frame_number(usX2Y->dev),
2274 -                  subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
2275 -                  usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
2276 -       usX2Y_clients_stop(usX2Y);
2277 -}
2278 -
2279  static void i_usX2Y_urb_complete(struct urb *urb)
2280  {
2281         struct snd_usX2Y_substream *subs = urb->context;
2282 @@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
2283                 usX2Y_error_urb_status(usX2Y, subs, urb);
2284                 return;
2285         }
2286 -       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2287 -               subs->completed_urb = urb;
2288 -       else {
2289 -               usX2Y_error_sequence(usX2Y, subs, urb);
2290 -               return;
2291 -       }
2292 +
2293 +       subs->completed_urb = urb;
2294 +
2295         {
2296                 struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
2297                         *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
2298 diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
2299 index f2a1acd..814d0e8 100644
2300 --- a/sound/usb/usx2y/usx2yhwdeppcm.c
2301 +++ b/sound/usb/usx2y/usx2yhwdeppcm.c
2302 @@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
2303                 usX2Y_error_urb_status(usX2Y, subs, urb);
2304                 return;
2305         }
2306 -       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
2307 -               subs->completed_urb = urb;
2308 -       else {
2309 -               usX2Y_error_sequence(usX2Y, subs, urb);
2310 -               return;
2311 -       }
2312  
2313 +       subs->completed_urb = urb;
2314         capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
2315         capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
2316         playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];