Checking patch include/atomic.h... error: while searching for: #include /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the bit width of *MEM. The calling macro puts parens around MEM and following args. */ #define __atomic_val_bysize(pre, post, mem, ...) \ ({ \ __typeof ((__typeof (*(mem))) *(mem)) __atg1_result; \ if (sizeof (*mem) == 1) \ __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 2) \ __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 4) \ __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 8) \ __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \ else \ abort (); \ __atg1_result; \ }) #define __atomic_bool_bysize(pre, post, mem, ...) \ ({ \ int __atg2_result; \ if (sizeof (*mem) == 1) \ __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 2) \ __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 4) \ __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \ else if (sizeof (*mem) == 8) \ __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \ else \ abort (); \ __atg2_result; \ }) /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. Return the old *MEM value. */ #undef atomic_compare_and_exchange_val_acq # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __atg3_old = (oldval); \ error: patch failed: include/atomic.h:41 error: while searching for: __atg3_old; \ }) #undef atomic_compare_and_exchange_val_rel #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __atg3_old = (oldval); \ error: patch failed: include/atomic.h:86 error: while searching for: /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. Return zero if *MEM was changed or non-zero if no exchange happened. */ #undef atomic_compare_and_exchange_bool_acq #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __atg3_old = (oldval); \ error: patch failed: include/atomic.h:96 error: while searching for: #ifndef atomic_full_barrier # define atomic_full_barrier() __asm ("" ::: "memory") #endif error: patch failed: include/atomic.h:143 Checking patch sysdeps/aarch64/atomic-machine.h... error: while searching for: #define _AARCH64_ATOMIC_MACHINE_H 1 #define __HAVE_64B_ATOMICS 1 #define USE_ATOMIC_COMPILER_BUILTINS 1 #define ATOMIC_EXCHANGE_USES_CAS 0 /* Compare and exchange. For all "bool" routines, we return FALSE if exchange succesful. */ # define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) /* Compare and exchange with "acquire" semantics, ie barrier after. */ # define atomic_compare_and_exchange_bool_acq(mem, new, old) \ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ mem, new, old, __ATOMIC_ACQUIRE) # define atomic_compare_and_exchange_val_acq(mem, new, old) \ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ mem, new, old, __ATOMIC_ACQUIRE) /* Compare and exchange with "release" semantics, ie barrier before. */ # define atomic_compare_and_exchange_val_rel(mem, new, old) \ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ mem, new, old, __ATOMIC_RELEASE) /* Barrier macro. */ #define atomic_full_barrier() __sync_synchronize() #endif error: patch failed: sysdeps/aarch64/atomic-machine.h:20 Checking patch sysdeps/alpha/atomic-machine.h... error: while searching for: #include #define __HAVE_64B_ATOMICS 1 #define USE_ATOMIC_COMPILER_BUILTINS 0 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 #define __MB " mb\n" /* Compare and exchange. For all of the "xxx" routines, we expect a "__prev" and a "__cmp" variable to be provided by the enclosing scope, in which values are returned. */ #define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \ ({ \ unsigned long __tmp, __snew, __addr64; \ __asm__ __volatile__ ( \ mb1 \ " andnot %[__addr8],7,%[__addr64]\n" \ " insbl %[__new],%[__addr8],%[__snew]\n" \ "1: ldq_l %[__tmp],0(%[__addr64])\n" \ " extbl %[__tmp],%[__addr8],%[__prev]\n" \ " cmpeq %[__prev],%[__old],%[__cmp]\n" \ " beq %[__cmp],2f\n" \ " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \ " or %[__snew],%[__tmp],%[__tmp]\n" \ " stq_c %[__tmp],0(%[__addr64])\n" \ " beq %[__tmp],1b\n" \ mb2 \ "2:" \ : [__prev] "=&r" (__prev), \ [__snew] "=&r" (__snew), \ [__tmp] "=&r" (__tmp), \ [__cmp] "=&r" (__cmp), \ [__addr64] "=&r" (__addr64) \ : [__addr8] "r" (mem), \ [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \ [__new] "r" (new) \ : "memory"); \ }) #define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \ ({ \ unsigned long __tmp, __snew, __addr64; \ __asm__ __volatile__ ( \ mb1 \ " andnot %[__addr16],7,%[__addr64]\n" \ " inswl %[__new],%[__addr16],%[__snew]\n" \ "1: ldq_l %[__tmp],0(%[__addr64])\n" \ " extwl %[__tmp],%[__addr16],%[__prev]\n" \ " cmpeq %[__prev],%[__old],%[__cmp]\n" \ " beq %[__cmp],2f\n" \ " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \ " or %[__snew],%[__tmp],%[__tmp]\n" \ " stq_c %[__tmp],0(%[__addr64])\n" \ " beq %[__tmp],1b\n" \ mb2 \ "2:" \ : [__prev] "=&r" (__prev), \ [__snew] "=&r" (__snew), \ [__tmp] "=&r" (__tmp), \ [__cmp] "=&r" (__cmp), \ [__addr64] "=&r" (__addr64) \ : [__addr16] "r" (mem), \ [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \ [__new] "r" (new) \ : "memory"); \ }) #define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \ ({ \ __asm__ __volatile__ ( \ mb1 \ "1: ldl_l %[__prev],%[__mem]\n" \ " cmpeq %[__prev],%[__old],%[__cmp]\n" \ " beq %[__cmp],2f\n" \ " mov %[__new],%[__cmp]\n" \ " stl_c %[__cmp],%[__mem]\n" \ " beq %[__cmp],1b\n" \ mb2 \ "2:" \ : [__prev] "=&r" (__prev), \ [__cmp] "=&r" (__cmp) \ : [__mem] "m" (*(mem)), \ [__old] "Ir" ((uint64_t)(int32_t)(uint64_t)(old)), \ [__new] "Ir" (new) \ : "memory"); \ }) #define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \ ({ \ __asm__ __volatile__ ( \ mb1 \ "1: ldq_l %[__prev],%[__mem]\n" \ " cmpeq %[__prev],%[__old],%[__cmp]\n" \ " beq %[__cmp],2f\n" \ " mov %[__new],%[__cmp]\n" \ " stq_c %[__cmp],%[__mem]\n" \ " beq %[__cmp],1b\n" \ mb2 \ "2:" \ : [__prev] "=&r" (__prev), \ [__cmp] "=&r" (__cmp) \ : [__mem] "m" (*(mem)), \ [__old] "Ir" ((uint64_t)(old)), \ [__new] "Ir" (new) \ : "memory"); \ }) /* For all "bool" routines, we return FALSE if exchange succesful. */ #define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \ ({ unsigned long __prev; int __cmp; \ __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \ !__cmp; }) #define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \ ({ unsigned long __prev; int __cmp; \ __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \ !__cmp; }) #define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \ ({ unsigned long __prev; int __cmp; \ __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \ !__cmp; }) #define __arch_compare_and_exchange_bool_ error: patch failed: sysdeps/alpha/atomic-machine.h:18 Checking patch sysdeps/arc/atomic-machine.h... Checking patch sysdeps/arm/atomic-machine.h... Checking patch sysdeps/csky/atomic-machine.h... Checking patch sysdeps/generic/atomic-machine.h... Checking patch sysdeps/generic/malloc-machine.h... Checking patch sysdeps/ia64/atomic-machine.h... error: while searching for: #include #define __HAVE_64B_ATOMICS 1 #define USE_ATOMIC_COMPILER_BUILTINS 0 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 0 #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ (!__sync_bool_compare_and_swap ((mem), (int) (long) (oldval), \ (int) (long) (newval))) #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ (!__sync_bool_compare_and_swap ((mem), (long) (oldval), \ (long) (newval))) #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ __sync_val_compare_and_swap ((mem), (int) (long) (oldval), \ (int) (long) (newval)) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ __sync_val_compare_and_swap ((mem), (long) (oldval), (long) (newval)) /* Atomically store newval and return the old value. */ #define atomic_exchange_acq(mem, value) \ __sync_lock_test_and_set (mem, value) #define atomic_exchange_rel(mem, value) \ (__sync_synchronize (), __sync_lock_test_and_set (mem, value)) #define atomic_exchange_and_add(mem, value) \ __sync_fetch_and_add ((mem), (value)) #define atomic_decrement_if_positive(mem) \ ({ __typeof (*mem) __oldval, __val; \ __typeof (mem) __memp = (mem); \ \ __val = (*__memp); \ do \ { \ __oldval = __val; \ if (__builtin_expect (__val <= 0, 0)) \ break; \ __val = atomic_compare_and_exchange_val_acq (__memp, __oldval - 1, \ __oldval); \ } \ while (__builtin_expect (__val != __oldval, 0)); \ __oldval; }) #define atomic_full_barrier() __sync_synchronize () error: patch failed: sysdeps/ia64/atomic-machine.h:18 Checking patch sysdeps/m68k/coldfire/atomic-machine.h... Checking patch sysdeps/m68k/m680x0/m68020/atomic-machine.h... error: while searching for: /* GCC does not support lock-free 64-bit atomic_load/store. */ #define __HAVE_64B_ATOMICS 0 #define USE_ATOMIC_COMPILER_BUILTINS 0 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __ret; \ __asm __volatile ("cas%.b %0,%2,%1" \ : "=d" (__ret), "+m" (*(mem)) \ : "d" (newval), "0" (oldval)); \ __ret; }) #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __ret; \ __asm __volatile ("cas%.w %0,%2,%1" \ : "=d" (__ret), "+m" (*(mem)) \ : "d" (newval), "0" (oldval)); \ __ret; }) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __ret; \ __asm __volatile ("cas%.l %0,%2,%1" \ : "=d" (__ret), "+m" (*(mem)) \ : "d" (newval), "0" (oldval)); \ __ret; }) # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __ret; \ __typeof (mem) __memp = (mem); \ __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \ : "=d" (__ret) \ : "d" ((__typeof (*(mem))) (newval)), "r" (__memp), \ "r" ((char *) __memp + 4), "0" (oldval) \ : "memory"); \ __ret; }) #define atomic_exchange_acq(mem, newvalue) \ ({ __typeof (*(mem)) __result = *(mem); \ if (sizeof (*(mem)) == 1) \ __asm __volatile ("1: cas%.b %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)) \ : "d" (newvalue), "0" (__result)); \ else if (sizeof (*(mem)) == 2) \ __asm __volatile ("1: cas%.w %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)) \ : "d" (newvalue), "0" (__result)); \ else if (sizeof (*(mem)) == 4) \ __asm __volatile ("1: cas%.l %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)) \ : "d" (newvalue), "0" (__result)); \ else \ { \ __typeof (mem) __memp = (mem); \ __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \ " jbne 1b" \ : "=d" (__result) \ : "d" ((__typeof (*(mem))) (newvalue)), \ "r" (__memp), "r" ((char *) __memp + 4), \ "0" (__result) \ : "memory"); \ } \ __result; }) #define atomic_exchange_and_add(mem, value) \ ({ __typeof (*(mem)) __result = *(mem); \ __typeof (*(mem)) __temp; \ if (sizeof (*(mem)) == 1) \ __asm __volatile ("1: move%.b %0,%2;" \ " add%.b %3,%2;" \ " cas%.b %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)), \ "=&d" (__temp) \ : "d" (value), "0" (__result)); \ else if (sizeof (*(mem)) == 2) \ __asm __volatile ("1: move%.w %0,%2;" \ " add%.w %3,%2;" \ " cas%.w %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)), \ "=&d" (__temp) \ : "d" (value), "0" (__result)); \ else if (sizeof (*(mem)) == 4) \ __asm __volatile ("1: move%.l %0,%2;" \ " add%.l %3,%2;" \ " cas%.l %0,%2,%1;" \ " jbne 1b" \ : "=d" (__result), "+m" (*(mem)), \ "=&d" (__temp) \ : "d" (value), "0" (__result)); \ else \ { \ __typeof (mem) __memp = (mem); \ __asm __volatile ("1: move%.l %0,%1;" \ " move%.l %R0,%R1;" \ " add%.l %R2,%R1;" \ " addx%.l % error: patch failed: sysdeps/m68k/m680x0/m68020/atomic-machine.h:17 Checking patch sysdeps/microblaze/atomic-machine.h... error: while searching for: #include #define __HAVE_64B_ATOMICS 0 #define USE_ATOMIC_COMPILER_BUILTINS 0 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 /* Microblaze does not have byte and halfword forms of load and reserve and store conditional. So for microblaze we stub out the 8- and 16-bit forms. */ #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ int test; \ __asm __volatile ( \ " addc r0, r0, r0;" \ "1: lwx %0, %3, r0;" \ " addic %1, r0, 0;" \ " bnei %1, 1b;" \ " cmp %1, %0, %4;" \ " bnei %1, 2f;" \ " swx %5, %3, r0;" \ " addic %1, r0, 0;" \ " bnei %1, 1b;" \ "2:" \ : "=&r" (__tmp), \ "=&r" (test), \ "=m" (*__memp) \ : "r" (__memp), \ "r" (oldval), \ "r" (newval) \ : "cc", "memory"); \ __tmp; \ }) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __result; \ if (sizeof (*mem) == 4) \ __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ else if (sizeof (*mem) == 8) \ __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ else \ abort (); \ __result; \ }) #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __result; \ if (sizeof (*mem) == 4) \ __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \ else if (sizeof (*mem) == 8) \ __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \ else \ abort (); error: patch failed: sysdeps/microblaze/atomic-machine.h:19 Checking patch sysdeps/mips/atomic-machine.h... Checking patch sysdeps/or1k/atomic-machine.h... Checking patch sysdeps/powerpc/atomic-machine.h... error: while searching for: /* Atomic operations. PowerPC Common version. Copyright (C) 2003-2022 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see . */ /* * Never include sysdeps/powerpc/atomic-machine.h directly. * Alway use include/atomic.h which will include either * sysdeps/powerpc/powerpc32/atomic-machine.h * or * sysdeps/powerpc/powerpc64/atomic-machine.h * as appropriate and which in turn include this file. */ /* * Powerpc does not have byte and halfword forms of load and reserve and * store conditional. So for powerpc we stub out the 8- and 16-bit forms. */ #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ (abort (), 0) #define __ARCH_ACQ_INSTR "isync" #ifndef __ARCH_REL_INSTR # define __ARCH_REL_INSTR "sync" #endif #ifndef MUTEX_HINT_ACQ # define MUTEX_HINT_ACQ #endif #ifndef MUTEX_HINT_REL # define MUTEX_HINT_REL #endif #define atomic_full_barrier() __asm ("sync" ::: "memory") #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ __asm __volatile ( \ "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ " cmpw %0,%2\n" \ " bne 2f\n" \ " stwcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " __ARCH_ACQ_INSTR \ : "=&r" (__tmp) \ : "b" (__memp), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp; \ }) #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ __asm __volatile (__ARCH_REL_INSTR "\n" \ "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \ " cmpw %0,%2\n" \ " bne 2f\n" \ " stwcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " \ : "=&r" (__tmp) \ : "b" (__memp), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp; \ }) #define __arch_atomic_exchange_32_acq(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile ( \ "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ " stwcx. %3,0,%2\n" \ " bne- 1b\n" \ " " __ARCH_ACQ_INSTR \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "m" (*mem) \ : "cr0", "memory"); \ __val; \ }) #define __arch_atomic_exchange_32_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile (__ARCH_REL_INSTR "\n" \ "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \ " stwcx. %3,0,%2\n" \ " bne- 1b" \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "m" (*mem) \ : "cr0", "memory"); \ __val; \ }) #define __arch_atomic_exchange_and_add_32(mem, value) \ ({ \ __typeof (*mem) error: patch failed: sysdeps/powerpc/atomic-machine.h:1 error: removal patch leaves file contents error: sysdeps/powerpc/atomic-machine.h: patch does not apply Checking patch sysdeps/powerpc/powerpc32/atomic-machine.h... error: while searching for: # define MUTEX_HINT_REL #endif #define __HAVE_64B_ATOMICS 0 #define USE_ATOMIC_COMPILER_BUILTINS 0 #define ATOMIC_EXCHANGE_USES_CAS 1 /* * The 32-bit exchange_bool is different on powerpc64 because the subf * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned * (a load word and zero (high 32) form). So powerpc64 has a slightly * different version in sysdeps/powerpc/powerpc64/atomic-machine.h. */ #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp; \ __asm __volatile ( \ "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ " stwcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " __ARCH_ACQ_INSTR \ : "=&r" (__tmp) \ : "b" (mem), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp != 0; \ }) /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of load and reserve (ldarx) and store conditional (stdcx.) instructions. So for powerpc32 we stub out the 64-bit forms. */ #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ (abort (), 0) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define __arch_atomic_exchange_64_acq(mem, value) \ ({ abort (); (*mem) = (value); }) #define __arch_atomic_exchange_64_rel(mem, value) \ ({ abort (); (*mem) = (value); }) #define __arch_atomic_exchange_and_add_64(mem, value) \ ({ abort (); (*mem) = (value); }) #define __arch_atomic_exchange_and_add_64_acq(mem, value) \ ({ abort (); (*mem) = (value); }) #define __arch_atomic_exchange_and_add_64_rel(mem, value) \ ({ abort (); (*mem) = (value); }) #define __arch_atomic_decrement_val_64(mem) \ ({ abort (); (*mem)--; }) #define __arch_atomic_decrement_if_positive_64(mem) \ ({ abort (); (*mem)--; }) #ifdef _ARCH_PWR4 /* * Newer powerpc64 processors support the new "light weight" sync (lwsync) error: patch failed: sysdeps/powerpc/powerpc32/atomic-machine.h:32 Hunk #2 succeeded at 104 (offset 57 lines). Hunk #3 succeeded at 114 (offset 57 lines). Checking patch sysdeps/powerpc/powerpc64/atomic-machine.h... error: while searching for: # define MUTEX_HINT_REL #endif #define __HAVE_64B_ATOMICS 1 #define USE_ATOMIC_COMPILER_BUILTINS 0 #define ATOMIC_EXCHANGE_USES_CAS 1 /* The 32-bit exchange_bool is different on powerpc64 because the subf does signed 64-bit arithmetic while the lwarx is 32-bit unsigned (a load word and zero (high 32) form) load. In powerpc64 register values are 64-bit by default, including oldval. The value in old val unknown sign extension, lwarx loads the 32-bit value as unsigned. So we explicitly clear the high 32 bits in oldval. */ #define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ ({ \ unsigned int __tmp, __tmp2; \ __asm __volatile (" clrldi %1,%1,32\n" \ "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ " subf. %0,%1,%0\n" \ " bne 2f\n" \ " stwcx. %4,0,%2\n" \ " bne- 1b\n" \ "2: " __ARCH_ACQ_INSTR \ : "=&r" (__tmp), "=r" (__tmp2) \ : "b" (mem), "1" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp != 0; \ }) /* * Only powerpc64 processors support Load doubleword and reserve index (ldarx) * and Store doubleword conditional indexed (stdcx) instructions. So here * we define the 64-bit forms. */ #define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ ({ \ unsigned long __tmp; \ __asm __volatile ( \ "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ " subf. %0,%2,%0\n" \ " bne 2f\n" \ " stdcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " __ARCH_ACQ_INSTR \ : "=&r" (__tmp) \ : "b" (mem), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp != 0; \ }) #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ __asm __volatile ( \ "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \ " cmpd %0,%2\n" \ " bne 2f\n" \ " stdcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " __ARCH_ACQ_INSTR \ : "=&r" (__tmp) \ : "b" (__memp), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp; \ }) #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ ({ \ __typeof (*(mem)) __tmp; \ __typeof (mem) __memp = (mem); \ __asm __volatile (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \ " cmpd %0,%2\n" \ " bne 2f\n" \ " stdcx. %3,0,%1\n" \ " bne- 1b\n" \ "2: " \ : "=&r" (__tmp) \ : "b" (__memp), "r" (oldval), "r" (newval) \ : "cr0", "memory"); \ __tmp; \ }) #define __arch_atomic_exchange_64_acq(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \ " stdcx. %3,0,%2\n" \ " bne- 1b\n" \ " " __ARCH_ACQ_INSTR \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "m" (*mem) \ : "cr0", "memory"); \ __val; \ }) #define __arch_atomic_exchange_64_rel(mem, value) \ ({ \ __typeof (*mem) __val; \ __asm __volatile (__ARCH_REL_INSTR "\n" \ "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \ " stdcx. %3,0,%2\n" \ " bne- 1b" \ : "=&r" (__val), "=m" (*mem) \ : "b" (mem), "r" (value), "m" (*mem) \ : "cr0", "memory"); \ __val; error: patch failed: sysdeps/powerpc/powerpc64/atomic-machine.h:32 Hunk #2 succeeded at 229 (offset 185 lines). Checking patch sysdeps/s390/atomic-machine.h... error: while searching for: #endif #define ATOMIC_EXCHANGE_USES_CAS 1 /* Implement some of the non-C11 atomic macros from include/atomic.h with help of the C11 atomic builtins. The other non-C11 atomic macros are using the macros defined here. */ /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. Return the old *MEM value. */ #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ ({ __atomic_check_size((mem)); \ typeof ((__typeof (*(mem))) *(mem)) __atg1_oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__atg1_oldval, \ newval, 1, __ATOMIC_ACQUIRE, \ __ATOMIC_RELAXED); \ __atg1_oldval; }) #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ ({ __atomic_check_size((mem)); \ typeof ((__typeof (*(mem))) *(mem)) __atg1_2_oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__atg1_2_oldval, \ newval, 1, __ATOMIC_RELEASE, \ __ATOMIC_RELAXED); \ __atg1_2_oldval; }) /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. Return zero if *MEM was changed or non-zero if no exchange happened. */ #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ ({ __atomic_check_size((mem)); \ typeof ((__typeof (*(mem))) *(mem)) __atg2_oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__atg2_oldval, newval, \ 1, __ATOMIC_ACQUIRE, \ __ATOMIC_RELAXED); }) /* Add VALUE to *MEM and return the old value of *MEM. */ /* The gcc builtin uses load-and-add instruction on z196 zarch and higher cpus instead of a loop with compare-and-swap instruction. */ # define atomic_exchange_and_add_acq(mem, operand) \ ({ __atomic_check_size((mem)); \ __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); }) # define atomic_exchange_and_add_rel(mem, operand) \ ({ __atomic_check_size((mem)); \ __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); }) error: patch failed: sysdeps/s390/atomic-machine.h:40 Checking patch sysdeps/sparc/atomic-machine.h... Checking patch sysdeps/unix/sysv/linux/hppa/atomic-machine.h... Checking patch sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h... Checking patch sysdeps/unix/sysv/linux/nios2/atomic-machine.h... Checking patch sysdeps/unix/sysv/linux/riscv/atomic-machine.h... error: while searching for: #ifndef _LINUX_RISCV_BITS_ATOMIC_H #define _LINUX_RISCV_BITS_ATOMIC_H 1 #define atomic_full_barrier() __sync_synchronize () #ifdef __riscv_atomic # define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) # define USE_ATOMIC_COMPILER_BUILTINS 1 # define ATOMIC_EXCHANGE_USES_CAS 0 /* Compare and exchange. For all "bool" routines, we return FALSE if exchange succesful. */ # define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ }) # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) # define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ ({ \ typeof (*mem) __oldval = (oldval); \ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ model, __ATOMIC_RELAXED); \ __oldval; \ }) /* Atomic compare and exchange. */ # define atomic_compare_and_exchange_bool_acq(mem, new, old) \ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ mem, new, old, __ATOMIC_ACQUIRE) # define atomic_compare_and_exchange_val_acq(mem, new, old) \ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ mem, new, old, __ATOMIC_ACQUIRE) # define atomic_compare_and_exchange_val_rel(mem, new, old) \ __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ mem, new, old, __ATOMIC_RELEASE) /* Atomic exchange (without compare). */ # define __arch_exchange_8_int(mem, newval, model) \ __atomic_exchange_n (mem, newval, model) # define __arch_exchange_16_int(mem, newval, model) \ __atomic_exchange_n (mem, newval, model) # define __arch_exchange_32_int(mem, newval, model) \ __atomic_exchange_n (mem, newval, model) # define __arch_exchange_64_int(mem, newval, model) \ __atomic_exchange_n (mem, newval, model) /* Atomically add value and return the previous (unincremented) value. */ # define __arch_exchange_and_add_8_int(mem, value, model) \ __atomic_fetch_add (mem, value, model) # define __arch_exchange_and_add_16_int(mem, value, model) \ __atomic_fetch_add (mem, value, model) # define __arch_exchange_and_add_32_int(mem, value, model) \ __atomic_fetch_add (mem, value, model) # define __arch_exchange_and_add_64_int(mem, value, model) \ __atomic_fetch_add (mem, value, model) # define atomic_exchange_and_add_acq(mem, value) \ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ error: patch failed: sysdeps/unix/sysv/linux/riscv/atomic-machine.h:19 Checking patch sysdeps/unix/sysv/linux/sh/atomic-machine.h... error: while searching for: . */ #define __HAVE_64B_ATOMICS 0 #define USE_ATOMIC_COMPILER_BUILTINS 0 /* XXX Is this actually correct? */ #define ATOMIC_EXCHANGE_USES_CAS 1 /* SH kernel has implemented a gUSA ("g" User Space Atomicity) support for the user space atomicity. The atomicity macros use this scheme. Reference: Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity Emulation with Little Kernel Modification", Linux Conference 2002, Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in Japanese). B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for Uniprocessors", Proceedings of the Fifth Architectural Support for Programming Languages and Operating Systems (ASPLOS), pp. 223-233, October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps SuperH ABI: r15: -(size of atomic instruction sequence) < 0 r0: end point r1: saved stack pointer */ #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ __asm __volatile ("\ mova 1f,r0\n\ .align 2\n\ mov r15,r1\n\ mov #(0f-1f),r15\n\ 0: mov.b @%1,%0\n\ cmp/eq %0,%3\n\ bf 1f\n\ mov.b %2,@%1\n\ 1: mov r1,r15"\ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \ : "r0", "r1", "t", "memory"); \ __result; }) #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ __asm __volatile ("\ mova 1f,r0\n\ mov r15,r1\n\ .align 2\n\ mov #(0f-1f),r15\n\ mov #-8,r15\n\ 0: mov.w @%1,%0\n\ cmp/eq %0,%3\n\ bf 1f\n\ mov.w %2,@%1\n\ 1: mov r1,r15"\ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \ : "r0", "r1", "t", "memory"); \ __result; }) #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ ({ __typeof (*(mem)) __result; \ __asm __volatile ("\ mova 1f,r0\n\ .align 2\n\ mov r15,r1\n\ mov #(0f-1f),r15\n\ 0: mov.l @%1,%0\n\ cmp/eq %0,%3\n\ bf 1f\n\ mov.l %2,@%1\n\ 1: mov r1,r15"\ : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \ : "r0", "r1", "t", "memory"); \ __result; }) /* XXX We do not really need 64-bit compare-and-exchange. At least not in the moment. Using it would mean causing portability problems since not many other 32-bit architectures have support for such an operation. So don't define any code for now. */ # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ (abort (), (__typeof (*mem)) 0) #define atomic_exchange_and_add(mem, value) \ ({ __typeof (*(mem)) __result, __tmp, __value = (value); \ if (sizeof (*(mem)) == 1) \ __asm __volatile ("\ mova 1f,r0\n\ .align 2\n\ mov r15,r1\n\ mov #(0f-1f),r15\n\ 0: mov.b @%2,%0\n\ mov %1,r2\n\ add %0,r2\n\ mov.b r2,@%2\n\ 1: mov r1,r15"\ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 2) \ __asm __volatile ("\ mova 1f,r0\n\ .align 2\n\ mov r15,r1\n\ mov #(0f-1f),r15\n\ 0: mov.w @%2,%0\n\ mov %1,r2\n\ add %0,r2\n\ mov.w r2,@%2\n\ 1: mov r1,r15"\ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \ : "r0", "r1", "r2", "memory"); \ else if (sizeof (*(mem)) == 4) \ __asm __volatile ("\ mova 1f,r0\n\ .align 2\n\ mov r15,r1\n\ mov #(0f-1f),r15\n\ 0: mov.l @%2,%0\n\ mov %1,r2\n\ add %0,r2\n\ mov.l r2,@%2\n\ 1: mov r1,r15"\ : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \ : "r0", "r1", "r2", "memory"); \ else \ { \ __typeof (mem) memp = (mem); \ do \ __result = *memp; \ while (__arch_compare_and_exchange_val_64_acq \ (memp, __result + __value, __result) == __result); \ (void) __value; \ } \ __result; }) error: patch failed: sysdeps/unix/sysv/linux/sh/atomic-machine.h:17 Checking patch sysdeps/x86/atomic-machine.h... error: while searching for: #ifndef _X86_ATOMIC_MACHINE_H #define _X86_ATOMIC_MACHINE_H 1 #include #include /* For mach. */ #include /* For cast_to_integer. */ #define LOCK_PREFIX "lock;" #define USE_ATOMIC_COMPILER_BUILTINS 1 #ifdef __x86_64__ # define __HAVE_64B_ATOMICS 1 # define SP_REG "rsp" #else /* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the i386 psABI supplement provides only 4-byte alignment for uint64_t inside structs, so it is currently not possible to use 64-bit atomics on this platform. */ # define __HAVE_64B_ATOMICS 0 # define SP_REG "esp" #endif #define ATOMIC_EXCHANGE_USES_CAS 0 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ __sync_val_compare_and_swap (mem, oldval, newval) #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ (! __sync_bool_compare_and_swap (mem, oldval, newval)) /* We don't use mfence because it is supposedly slower due to having to provide stronger guarantees (e.g., regarding self-modifying code). */ #define atomic_full_barrier() \ __asm __volatile (LOCK_PREFIX "orl $0, (%%" SP_REG ")" ::: "memory") #define atomic_read_barrier() __asm ("" ::: "memory") #define atomic_write_barrier() __asm ("" ::: "memory") error: patch failed: sysdeps/x86/atomic-machine.h:19 Applying patch include/atomic.h with 4 rejects... Rejected hunk #1. Rejected hunk #2. Rejected hunk #3. Rejected hunk #4. Applying patch sysdeps/aarch64/atomic-machine.h with 1 reject... Rejected hunk #1. Applying patch sysdeps/alpha/atomic-machine.h with 1 reject... Rejected hunk #1. Applied patch sysdeps/arc/atomic-machine.h cleanly. Applied patch sysdeps/arm/atomic-machine.h cleanly. Applied patch sysdeps/csky/atomic-machine.h cleanly. Applied patch sysdeps/generic/atomic-machine.h cleanly. Applied patch sysdeps/generic/malloc-machine.h cleanly. Applying patch sysdeps/ia64/atomic-machine.h with 1 reject... Rejected hunk #1. Applied patch sysdeps/m68k/coldfire/atomic-machine.h cleanly. Applying patch sysdeps/m68k/m680x0/m68020/atomic-machine.h with 1 reject... Rejected hunk #1. Applying patch sysdeps/microblaze/atomic-machine.h with 1 reject... Rejected hunk #1. Applied patch sysdeps/mips/atomic-machine.h cleanly. Applied patch sysdeps/or1k/atomic-machine.h cleanly. Applying patch sysdeps/powerpc/powerpc32/atomic-machine.h with 1 reject... Rejected hunk #1. Hunk #2 applied cleanly. Hunk #3 applied cleanly. Applying patch sysdeps/powerpc/powerpc64/atomic-machine.h with 1 reject... Rejected hunk #1. Hunk #2 applied cleanly. Applying patch sysdeps/s390/atomic-machine.h with 1 reject... Hunk #1 applied cleanly. Rejected hunk #2. Applied patch sysdeps/sparc/atomic-machine.h cleanly. Applied patch sysdeps/unix/sysv/linux/hppa/atomic-machine.h cleanly. Applied patch sysdeps/unix/sysv/linux/m68k/coldfire/atomic-machine.h cleanly. Applied patch sysdeps/unix/sysv/linux/nios2/atomic-machine.h cleanly. Applying patch sysdeps/unix/sysv/linux/riscv/atomic-machine.h with 1 reject... Rejected hunk #1. Applying patch sysdeps/unix/sysv/linux/sh/atomic-machine.h with 1 reject... Rejected hunk #1. Applying patch sysdeps/x86/atomic-machine.h with 1 reject... Rejected hunk #1.