From mboxrd@z Thu Jan 1 00:00:00 1970 X-Msuck: nntp://news.gmane.org/gmane.linux.lib.musl.general/1679 Path: news.gmane.org!not-for-mail From: idunham@lavabit.com Newsgroups: gmane.linux.lib.musl.general Subject: [PATCH 2/1] Problem is static inline Date: Wed, 22 Aug 2012 23:20:22 -0700 (PDT) Message-ID: <62688.132.241.65.179.1345702822.squirrel@lavabit.com> References: <12609.132.241.65.179.1345698455.squirrel@lavabit.com> <59250.132.241.65.179.1345702298.squirrel@lavabit.com> Reply-To: musl@lists.openwall.com NNTP-Posting-Host: plane.gmane.org Mime-Version: 1.0 Content-Type: multipart/mixed;boundary="----=_20120822232022_88332" X-Trace: ger.gmane.org 1345702838 12521 80.91.229.3 (23 Aug 2012 06:20:38 GMT) X-Complaints-To: usenet@ger.gmane.org NNTP-Posting-Date: Thu, 23 Aug 2012 06:20:38 +0000 (UTC) To: musl@lists.openwall.com Original-X-From: musl-return-1680-gllmg-musl=m.gmane.org@lists.openwall.com Thu Aug 23 08:20:39 2012 Return-path: Envelope-to: gllmg-musl@plane.gmane.org Original-Received: from mother.openwall.net ([195.42.179.200]) by plane.gmane.org with smtp (Exim 4.69) (envelope-from ) id 1T4QmU-0001do-5n for gllmg-musl@plane.gmane.org; Thu, 23 Aug 2012 08:20:38 +0200 Original-Received: (qmail 32625 invoked by uid 550); 23 Aug 2012 06:20:36 -0000 Mailing-List: contact musl-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: Original-Received: (qmail 32617 invoked from network); 23 Aug 2012 06:20:35 -0000 DomainKey-Signature: a=rsa-sha1; q=dns; c=nofws; s=lavabit; d=lavabit.com; b=pcwky6ppWDillML/u3DFjrjq6bUEPnpHMwQ3FC9dHLOpCjOpjTNInf4OT2Nchsg2T4lr0hozsCDLy7LxsXyVMkHYcGwB+y5dhruOpoqoFEuu2YaWKSWvnjsUHH8jTEutUrdubQDBfRsAQhZJa71YbivaA64mYLsxgVD9Nm3nBuE=; h=Message-ID:In-Reply-To:References:Date:Subject:From:To:User-Agent:MIME-Version:Content-Type; In-Reply-To: <59250.132.241.65.179.1345702298.squirrel@lavabit.com> User-Agent: SquirrelMail/1.4.13 Xref: news.gmane.org gmane.linux.lib.musl.general:1679 Archived-At: ------=_20120822232022_88332 Content-Type: text/plain; charset="iso-8859-1" Content-Transfer-Encoding: 8bit >> I've been trying to get musl compatability patches for libuv merged >> upstream, and I have it building, but there's one sticking point: >> Upstream insists on using --std=c89 (I guess for portability to other >> platforms). >> This makes GCC choke on "long" in . > I tried fixing it, and ended up finding that the issue was the "static > inline" in the header. > > For future reference: > sed -e 's/static inline/#if __STDC_VERSION__ >= > 199901L\ninline\n#endif\nstatic/g' -i > is what I used. > > HTH, > Isaac Dunham I grepped and found that there were three other headers in each arch that shared the same problem. Here's the patch. Isaac Dunham ------=_20120822232022_88332 Content-Type: text/plain; name="inline2.diff" Content-Transfer-Encoding: 8bit Content-Disposition: attachment; filename="inline2.diff" diff --git a/arch/arm/atomic.h b/arch/arm/atomic.h index f434a0c..2312aca 100644 --- a/arch/arm/atomic.h +++ b/arch/arm/atomic.h @@ -3,7 +3,10 @@ #include -static inline int a_ctz_l(unsigned long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_l(unsigned long x) { static const char debruijn32[32] = { 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, @@ -12,7 +15,10 @@ static inline int a_ctz_l(unsigned long x) return debruijn32[(x&-x)*0x076be629 >> 27]; } -static inline int a_ctz_64(uint64_t x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_64(uint64_t x) { uint32_t y = x; if (!y) { @@ -22,7 +28,10 @@ static inline int a_ctz_64(uint64_t x) return a_ctz_l(y); } -static inline int a_cas(volatile int *p, int t, int s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_cas(volatile int *p, int t, int s) { int old; for (;;) { @@ -33,17 +42,26 @@ static inline int a_cas(volatile int *p, int t, int s) } } -static inline void *a_cas_p(volatile void *p, void *t, void *s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_cas_p(volatile void *p, void *t, void *s) { return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_cas_l(volatile void *p, long t, long s) { return a_cas(p, t, s); } -static inline int a_swap(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_swap(volatile int *x, int v) { int old; do old = *x; @@ -51,7 +69,10 @@ static inline int a_swap(volatile int *x, int v) return old; } -static inline int a_fetch_add(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_fetch_add(volatile int *x, int v) { int old; do old = *x; @@ -59,52 +80,79 @@ static inline int a_fetch_add(volatile int *x, int v) return old; } -static inline void a_inc(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_inc(volatile int *x) { a_fetch_add(x, 1); } -static inline void a_dec(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_dec(volatile int *x) { a_fetch_add(x, -1); } -static inline void a_store(volatile int *p, int x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store(volatile int *p, int x) { *p=x; } -static inline void a_spin() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_spin() { } -static inline void a_crash() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_crash() { *(volatile char *)0=0; } -static inline void a_and(volatile int *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and(volatile int *p, int v) { int old; do old = *p; while (a_cas(p, old, old&v) != old); } -static inline void a_or(volatile int *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or(volatile int *p, int v) { int old; do old = *p; while (a_cas(p, old, old|v) != old); } -static inline void a_and_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and_64(volatile uint64_t *p, uint64_t v) { union { uint64_t v; uint32_t r[2]; } u = { v }; a_and((int *)p, u.r[0]); a_and((int *)p+1, u.r[1]); } -static inline void a_or_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_64(volatile uint64_t *p, uint64_t v) { union { uint64_t v; uint32_t r[2]; } u = { v }; a_or((int *)p, u.r[0]); diff --git a/arch/arm/reloc.h b/arch/arm/reloc.h index 10e89aa..9c79a75 100644 --- a/arch/arm/reloc.h +++ b/arch/arm/reloc.h @@ -6,7 +6,10 @@ #define IS_COPY(x) ((x)==R_ARM_COPY) #define IS_PLT(x) ((x)==R_ARM_JUMP_SLOT) -static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) { switch(type) { case R_ARM_ABS32: diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h index 77b0b3b..7588b79 100644 --- a/arch/i386/atomic.h +++ b/arch/i386/atomic.h @@ -3,7 +3,10 @@ #include -static inline int a_ctz_64(uint64_t x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_64(uint64_t x) { int r; __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:" @@ -11,81 +14,120 @@ static inline int a_ctz_64(uint64_t x) return r; } -static inline int a_ctz_l(unsigned long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_l(unsigned long x) { long r; __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) ); return r; } -static inline void a_and_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and_64(volatile uint64_t *p, uint64_t v) { __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)" : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" ); } -static inline void a_or_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_64(volatile uint64_t *p, uint64_t v) { __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)" : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" ); } -static inline void a_store_l(volatile void *p, long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store_l(volatile void *p, long x) { __asm__( "movl %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); } -static inline void a_or_l(volatile void *p, long v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_l(volatile void *p, long v) { __asm__( "lock ; orl %1, %0" : "=m"(*(long *)p) : "r"(v) : "memory" ); } -static inline void *a_cas_p(volatile void *p, void *t, void *s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_cas_p(volatile void *p, void *t, void *s) { __asm__( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline long a_cas_l(volatile void *p, long t, long s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_cas_l(volatile void *p, long t, long s) { __asm__( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline int a_cas(volatile int *p, int t, int s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_cas(volatile int *p, int t, int s) { __asm__( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline void *a_swap_p(void *volatile *x, void *v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_swap_p(void *volatile *x, void *v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); return v; } -static inline long a_swap_l(volatile void *x, long v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_swap_l(volatile void *x, long v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); return v; } -static inline void a_or(volatile void *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or(volatile void *p, int v) { __asm__( "lock ; orl %1, %0" : "=m"(*(int *)p) : "r"(v) : "memory" ); } -static inline void a_and(volatile void *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and(volatile void *p, int v) { __asm__( "lock ; andl %1, %0" : "=m"(*(int *)p) : "r"(v) : "memory" ); } -static inline int a_swap(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_swap(volatile int *x, int v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); return v; @@ -93,33 +135,51 @@ static inline int a_swap(volatile int *x, int v) #define a_xchg a_swap -static inline int a_fetch_add(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_fetch_add(volatile int *x, int v) { __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); return v; } -static inline void a_inc(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_inc(volatile int *x) { __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" ); } -static inline void a_dec(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_dec(volatile int *x) { __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" ); } -static inline void a_store(volatile int *p, int x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store(volatile int *p, int x) { __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" ); } -static inline void a_spin() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_spin() { __asm__ __volatile__( "pause" : : : "memory" ); } -static inline void a_crash() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_crash() { __asm__ __volatile__( "hlt" : : : "memory" ); } diff --git a/arch/i386/pthread_arch.h b/arch/i386/pthread_arch.h index 0ea0aac..86568e8 100644 --- a/arch/i386/pthread_arch.h +++ b/arch/i386/pthread_arch.h @@ -1,4 +1,7 @@ -static inline struct pthread *__pthread_self() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static struct pthread *__pthread_self() { struct pthread *self; __asm__ __volatile__ ("movl %%gs:0,%0" : "=r" (self) ); diff --git a/arch/i386/reloc.h b/arch/i386/reloc.h index 490113a..aacfd82 100644 --- a/arch/i386/reloc.h +++ b/arch/i386/reloc.h @@ -6,7 +6,10 @@ #define IS_COPY(x) ((x)==R_386_COPY) #define IS_PLT(x) ((x)==R_386_JMP_SLOT) -static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) { switch(type) { case R_386_32: diff --git a/arch/mips/atomic.h b/arch/mips/atomic.h index f3478ef..e8db4c1 100644 --- a/arch/mips/atomic.h +++ b/arch/mips/atomic.h @@ -3,7 +3,10 @@ #include -static inline int a_ctz_l(unsigned long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_l(unsigned long x) { static const char debruijn32[32] = { 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, @@ -12,7 +15,10 @@ static inline int a_ctz_l(unsigned long x) return debruijn32[(x&-x)*0x076be629 >> 27]; } -static inline int a_ctz_64(uint64_t x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_64(uint64_t x) { uint32_t y = x; if (!y) { @@ -22,7 +28,10 @@ static inline int a_ctz_64(uint64_t x) return a_ctz_l(y); } -static inline int a_cas(volatile int *p, int t, int s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_cas(volatile int *p, int t, int s) { int dummy; __asm__ __volatile__( @@ -41,18 +50,27 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void *a_cas_p(volatile void *p, void *t, void *s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_cas_p(volatile void *p, void *t, void *s) { return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_cas_l(volatile void *p, long t, long s) { return a_cas(p, t, s); } -static inline int a_swap(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_swap(volatile int *x, int v) { int old, dummy; __asm__ __volatile__( @@ -70,7 +88,10 @@ static inline int a_swap(volatile int *x, int v) return old; } -static inline int a_fetch_add(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_fetch_add(volatile int *x, int v) { int old, dummy; __asm__ __volatile__( @@ -88,7 +109,10 @@ static inline int a_fetch_add(volatile int *x, int v) return old; } -static inline void a_inc(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_inc(volatile int *x) { int dummy; __asm__ __volatile__( @@ -105,7 +129,10 @@ static inline void a_inc(volatile int *x) : "=&r"(dummy) : "r"(x) : "memory" ); } -static inline void a_dec(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_dec(volatile int *x) { int dummy; __asm__ __volatile__( @@ -122,7 +149,10 @@ static inline void a_dec(volatile int *x) : "=&r"(dummy) : "r"(x) : "memory" ); } -static inline void a_store(volatile int *p, int x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store(volatile int *p, int x) { int dummy; __asm__ __volatile__( @@ -139,16 +169,25 @@ static inline void a_store(volatile int *p, int x) : "=&r"(dummy) : "r"(p), "r"(x) : "memory" ); } -static inline void a_spin() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_spin() { } -static inline void a_crash() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_crash() { *(volatile char *)0=0; } -static inline void a_and(volatile int *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and(volatile int *p, int v) { int dummy; __asm__ __volatile__( @@ -165,7 +204,10 @@ static inline void a_and(volatile int *p, int v) : "=&r"(dummy) : "r"(p), "r"(v) : "memory" ); } -static inline void a_or(volatile int *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or(volatile int *p, int v) { int dummy; __asm__ __volatile__( @@ -182,14 +224,20 @@ static inline void a_or(volatile int *p, int v) : "=&r"(dummy) : "r"(p), "r"(v) : "memory" ); } -static inline void a_and_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and_64(volatile uint64_t *p, uint64_t v) { union { uint64_t v; uint32_t r[2]; } u = { v }; a_and((int *)p, u.r[0]); a_and((int *)p+1, u.r[1]); } -static inline void a_or_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_64(volatile uint64_t *p, uint64_t v) { union { uint64_t v; uint32_t r[2]; } u = { v }; a_or((int *)p, u.r[0]); diff --git a/arch/mips/pthread_arch.h b/arch/mips/pthread_arch.h index f75379c..204c217 100644 --- a/arch/mips/pthread_arch.h +++ b/arch/mips/pthread_arch.h @@ -1,4 +1,7 @@ -static inline struct pthread *__pthread_self() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static struct pthread *__pthread_self() { struct pthread *self; __asm__ __volatile__ (".word 0x7c03e83b" : "=v" (self) ); diff --git a/arch/mips/reloc.h b/arch/mips/reloc.h index e892526..d748416 100644 --- a/arch/mips/reloc.h +++ b/arch/mips/reloc.h @@ -6,7 +6,10 @@ #define IS_COPY(x) ((x)==R_MIPS_COPY) #define IS_PLT(x) 1 -static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) { switch(type) { case R_MIPS_JUMP_SLOT: diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h index 0d3da6f..1d9d9f9 100644 --- a/arch/x86_64/atomic.h +++ b/arch/x86_64/atomic.h @@ -3,88 +3,130 @@ #include -static inline int a_ctz_64(uint64_t x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_64(uint64_t x) { long r; __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) ); return r; } -static inline int a_ctz_l(unsigned long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_ctz_l(unsigned long x) { long r; __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) ); return r; } -static inline void a_and_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and_64(volatile uint64_t *p, uint64_t v) { __asm__( "lock ; andq %1, %0" : "=m"(*(long *)p) : "r"(v) : "memory" ); } -static inline void a_or_64(volatile uint64_t *p, uint64_t v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_64(volatile uint64_t *p, uint64_t v) { __asm__( "lock ; orq %1, %0" : "=m"(*(long *)p) : "r"(v) : "memory" ); } -static inline void a_store_l(volatile void *p, long x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store_l(volatile void *p, long x) { __asm__( "movq %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); } -static inline void a_or_l(volatile void *p, long v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or_l(volatile void *p, long v) { __asm__( "lock ; orq %1, %0" : "=m"(*(long *)p) : "r"(v) : "memory" ); } -static inline void *a_cas_p(volatile void *p, void *t, void *s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_cas_p(volatile void *p, void *t, void *s) { __asm__( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline long a_cas_l(volatile void *p, long t, long s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_cas_l(volatile void *p, long t, long s) { __asm__( "lock ; cmpxchg %3, %1" : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline int a_cas(volatile int *p, int t, int s) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_cas(volatile int *p, int t, int s) { __asm__( "lock ; cmpxchgl %3, %1" : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" ); return t; } -static inline void *a_swap_p(void *volatile *x, void *v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void *a_swap_p(void *volatile *x, void *v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); return v; } -static inline long a_swap_l(volatile void *x, long v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static long a_swap_l(volatile void *x, long v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); return v; } -static inline void a_or(volatile void *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_or(volatile void *p, int v) { __asm__( "lock ; orl %1, %0" : "=m"(*(int *)p) : "r"(v) : "memory" ); } -static inline void a_and(volatile void *p, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_and(volatile void *p, int v) { __asm__( "lock ; andl %1, %0" : "=m"(*(int *)p) : "r"(v) : "memory" ); } -static inline int a_swap(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_swap(volatile int *x, int v) { __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); return v; @@ -92,33 +134,51 @@ static inline int a_swap(volatile int *x, int v) #define a_xchg a_swap -static inline int a_fetch_add(volatile int *x, int v) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static int a_fetch_add(volatile int *x, int v) { __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); return v; } -static inline void a_inc(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_inc(volatile int *x) { __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" ); } -static inline void a_dec(volatile int *x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_dec(volatile int *x) { __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" ); } -static inline void a_store(volatile int *p, int x) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_store(volatile int *p, int x) { __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" ); } -static inline void a_spin() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_spin() { __asm__ __volatile__( "pause" : : : "memory" ); } -static inline void a_crash() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void a_crash() { __asm__ __volatile__( "hlt" : : : "memory" ); } diff --git a/arch/x86_64/pthread_arch.h b/arch/x86_64/pthread_arch.h index 836187f..83d1447 100644 --- a/arch/x86_64/pthread_arch.h +++ b/arch/x86_64/pthread_arch.h @@ -1,4 +1,7 @@ -static inline struct pthread *__pthread_self() +#if __STDC_VERSION__ >= 199901L +inline +#endif +static struct pthread *__pthread_self() { struct pthread *self; __asm__ __volatile__ ("movq %%fs:0,%0" : "=r" (self) ); diff --git a/arch/x86_64/reloc.h b/arch/x86_64/reloc.h index b0bbfb3..8825fcd 100644 --- a/arch/x86_64/reloc.h +++ b/arch/x86_64/reloc.h @@ -7,7 +7,10 @@ #define IS_COPY(x) ((x)==R_X86_64_COPY) #define IS_PLT(x) ((x)==R_X86_64_JUMP_SLOT) -static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) +#if __STDC_VERSION__ >= 199901L +inline +#endif +static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend) { switch(type) { case R_X86_64_GLOB_DAT: ------=_20120822232022_88332--