mailing list of musl libc
 help / color / mirror / code / Atom feed
From: idunham@lavabit.com
To: musl@lists.openwall.com
Subject: [PATCH 2/1] Problem is static inline
Date: Wed, 22 Aug 2012 23:20:22 -0700 (PDT)	[thread overview]
Message-ID: <62688.132.241.65.179.1345702822.squirrel@lavabit.com> (raw)
In-Reply-To: <59250.132.241.65.179.1345702298.squirrel@lavabit.com>

[-- Attachment #1: Type: text/plain, Size: 681 bytes --]

>> I've been trying to get musl compatability patches for libuv merged
>> upstream, and I have it building, but there's one sticking point:
>> Upstream insists on using --std=c89 (I guess for portability to other
>> platforms).
>> This makes GCC choke on "long" in <bits/syscall.h>.
> I tried fixing it, and ended up finding that the issue was the "static
> inline" in the header.
>
> For future reference:
> sed -e 's/static inline/#if __STDC_VERSION__ >=
> 199901L\ninline\n#endif\nstatic/g' -i <filename>
> is what I used.
>
> HTH,
> Isaac Dunham

I grepped and found that there were three other headers in each arch that
shared the same problem.
Here's the patch.

Isaac Dunham

[-- Attachment #2: inline2.diff --]
[-- Type: text/plain, Size: 21362 bytes --]

diff --git a/arch/arm/atomic.h b/arch/arm/atomic.h
index f434a0c..2312aca 100644
--- a/arch/arm/atomic.h
+++ b/arch/arm/atomic.h
@@ -3,7 +3,10 @@
 
 #include <stdint.h>
 
-static inline int a_ctz_l(unsigned long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_l(unsigned long x)
 {
 	static const char debruijn32[32] = {
 		0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
@@ -12,7 +15,10 @@ static inline int a_ctz_l(unsigned long x)
 	return debruijn32[(x&-x)*0x076be629 >> 27];
 }
 
-static inline int a_ctz_64(uint64_t x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_64(uint64_t x)
 {
 	uint32_t y = x;
 	if (!y) {
@@ -22,7 +28,10 @@ static inline int a_ctz_64(uint64_t x)
 	return a_ctz_l(y);
 }
 
-static inline int a_cas(volatile int *p, int t, int s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_cas(volatile int *p, int t, int s)
 {
 	int old;
 	for (;;) {
@@ -33,17 +42,26 @@ static inline int a_cas(volatile int *p, int t, int s)
 	}
 }
 
-static inline void *a_cas_p(volatile void *p, void *t, void *s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_cas_p(volatile void *p, void *t, void *s)
 {
 	return (void *)a_cas(p, (int)t, (int)s);
 }
 
-static inline long a_cas_l(volatile void *p, long t, long s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_cas_l(volatile void *p, long t, long s)
 {
 	return a_cas(p, t, s);
 }
 
-static inline int a_swap(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_swap(volatile int *x, int v)
 {
 	int old;
 	do old = *x;
@@ -51,7 +69,10 @@ static inline int a_swap(volatile int *x, int v)
 	return old;
 }
 
-static inline int a_fetch_add(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_fetch_add(volatile int *x, int v)
 {
 	int old;
 	do old = *x;
@@ -59,52 +80,79 @@ static inline int a_fetch_add(volatile int *x, int v)
 	return old;
 }
 
-static inline void a_inc(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_inc(volatile int *x)
 {
 	a_fetch_add(x, 1);
 }
 
-static inline void a_dec(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_dec(volatile int *x)
 {
 	a_fetch_add(x, -1);
 }
 
-static inline void a_store(volatile int *p, int x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store(volatile int *p, int x)
 {
 	*p=x;
 }
 
-static inline void a_spin()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_spin()
 {
 }
 
-static inline void a_crash()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_crash()
 {
 	*(volatile char *)0=0;
 }
 
-static inline void a_and(volatile int *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and(volatile int *p, int v)
 {
 	int old;
 	do old = *p;
 	while (a_cas(p, old, old&v) != old);
 }
 
-static inline void a_or(volatile int *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or(volatile int *p, int v)
 {
 	int old;
 	do old = *p;
 	while (a_cas(p, old, old|v) != old);
 }
 
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and_64(volatile uint64_t *p, uint64_t v)
 {
 	union { uint64_t v; uint32_t r[2]; } u = { v };
 	a_and((int *)p, u.r[0]);
 	a_and((int *)p+1, u.r[1]);
 }
 
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_64(volatile uint64_t *p, uint64_t v)
 {
 	union { uint64_t v; uint32_t r[2]; } u = { v };
 	a_or((int *)p, u.r[0]);
diff --git a/arch/arm/reloc.h b/arch/arm/reloc.h
index 10e89aa..9c79a75 100644
--- a/arch/arm/reloc.h
+++ b/arch/arm/reloc.h
@@ -6,7 +6,10 @@
 #define IS_COPY(x) ((x)==R_ARM_COPY)
 #define IS_PLT(x) ((x)==R_ARM_JUMP_SLOT)
 
-static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
 {
 	switch(type) {
 	case R_ARM_ABS32:
diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h
index 77b0b3b..7588b79 100644
--- a/arch/i386/atomic.h
+++ b/arch/i386/atomic.h
@@ -3,7 +3,10 @@
 
 #include <stdint.h>
 
-static inline int a_ctz_64(uint64_t x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_64(uint64_t x)
 {
 	int r;
 	__asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
@@ -11,81 +14,120 @@ static inline int a_ctz_64(uint64_t x)
 	return r;
 }
 
-static inline int a_ctz_l(unsigned long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_l(unsigned long x)
 {
 	long r;
 	__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
 	return r;
 }
 
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and_64(volatile uint64_t *p, uint64_t v)
 {
 	__asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
 		: : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
 }
 
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_64(volatile uint64_t *p, uint64_t v)
 {
 	__asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
 		: : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
 }
 
-static inline void a_store_l(volatile void *p, long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store_l(volatile void *p, long x)
 {
 	__asm__( "movl %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
 }
 
-static inline void a_or_l(volatile void *p, long v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_l(volatile void *p, long v)
 {
 	__asm__( "lock ; orl %1, %0"
 		: "=m"(*(long *)p) : "r"(v) : "memory" );
 }
 
-static inline void *a_cas_p(volatile void *p, void *t, void *s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_cas_p(volatile void *p, void *t, void *s)
 {
 	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline long a_cas_l(volatile void *p, long t, long s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_cas_l(volatile void *p, long t, long s)
 {
 	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline int a_cas(volatile int *p, int t, int s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_cas(volatile int *p, int t, int s)
 {
 	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline void *a_swap_p(void *volatile *x, void *v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_swap_p(void *volatile *x, void *v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
 	return v;
 }
-static inline long a_swap_l(volatile void *x, long v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_swap_l(volatile void *x, long v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
 	return v;
 }
 
-static inline void a_or(volatile void *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or(volatile void *p, int v)
 {
 	__asm__( "lock ; orl %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }
 
-static inline void a_and(volatile void *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and(volatile void *p, int v)
 {
 	__asm__( "lock ; andl %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }
 
-static inline int a_swap(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_swap(volatile int *x, int v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
 	return v;
@@ -93,33 +135,51 @@ static inline int a_swap(volatile int *x, int v)
 
 #define a_xchg a_swap
 
-static inline int a_fetch_add(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_fetch_add(volatile int *x, int v)
 {
 	__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
 	return v;
 }
 
-static inline void a_inc(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_inc(volatile int *x)
 {
 	__asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
 }
 
-static inline void a_dec(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_dec(volatile int *x)
 {
 	__asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
 }
 
-static inline void a_store(volatile int *p, int x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store(volatile int *p, int x)
 {
 	__asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" );
 }
 
-static inline void a_spin()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_spin()
 {
 	__asm__ __volatile__( "pause" : : : "memory" );
 }
 
-static inline void a_crash()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_crash()
 {
 	__asm__ __volatile__( "hlt" : : : "memory" );
 }
diff --git a/arch/i386/pthread_arch.h b/arch/i386/pthread_arch.h
index 0ea0aac..86568e8 100644
--- a/arch/i386/pthread_arch.h
+++ b/arch/i386/pthread_arch.h
@@ -1,4 +1,7 @@
-static inline struct pthread *__pthread_self()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static struct pthread *__pthread_self()
 {
 	struct pthread *self;
 	__asm__ __volatile__ ("movl %%gs:0,%0" : "=r" (self) );
diff --git a/arch/i386/reloc.h b/arch/i386/reloc.h
index 490113a..aacfd82 100644
--- a/arch/i386/reloc.h
+++ b/arch/i386/reloc.h
@@ -6,7 +6,10 @@
 #define IS_COPY(x) ((x)==R_386_COPY)
 #define IS_PLT(x) ((x)==R_386_JMP_SLOT)
 
-static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
 {
 	switch(type) {
 	case R_386_32:
diff --git a/arch/mips/atomic.h b/arch/mips/atomic.h
index f3478ef..e8db4c1 100644
--- a/arch/mips/atomic.h
+++ b/arch/mips/atomic.h
@@ -3,7 +3,10 @@
 
 #include <stdint.h>
 
-static inline int a_ctz_l(unsigned long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_l(unsigned long x)
 {
 	static const char debruijn32[32] = {
 		0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
@@ -12,7 +15,10 @@ static inline int a_ctz_l(unsigned long x)
 	return debruijn32[(x&-x)*0x076be629 >> 27];
 }
 
-static inline int a_ctz_64(uint64_t x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_64(uint64_t x)
 {
 	uint32_t y = x;
 	if (!y) {
@@ -22,7 +28,10 @@ static inline int a_ctz_64(uint64_t x)
 	return a_ctz_l(y);
 }
 
-static inline int a_cas(volatile int *p, int t, int s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_cas(volatile int *p, int t, int s)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -41,18 +50,27 @@ static inline int a_cas(volatile int *p, int t, int s)
         return t;
 }
 
-static inline void *a_cas_p(volatile void *p, void *t, void *s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_cas_p(volatile void *p, void *t, void *s)
 {
 	return (void *)a_cas(p, (int)t, (int)s);
 }
 
-static inline long a_cas_l(volatile void *p, long t, long s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_cas_l(volatile void *p, long t, long s)
 {
 	return a_cas(p, t, s);
 }
 
 
-static inline int a_swap(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_swap(volatile int *x, int v)
 {
 	int old, dummy;
 	__asm__ __volatile__(
@@ -70,7 +88,10 @@ static inline int a_swap(volatile int *x, int v)
         return old;
 }
 
-static inline int a_fetch_add(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_fetch_add(volatile int *x, int v)
 {
 	int old, dummy;
 	__asm__ __volatile__(
@@ -88,7 +109,10 @@ static inline int a_fetch_add(volatile int *x, int v)
         return old;
 }
 
-static inline void a_inc(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_inc(volatile int *x)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -105,7 +129,10 @@ static inline void a_inc(volatile int *x)
 		: "=&r"(dummy) : "r"(x) : "memory" );
 }
 
-static inline void a_dec(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_dec(volatile int *x)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -122,7 +149,10 @@ static inline void a_dec(volatile int *x)
 		: "=&r"(dummy) : "r"(x) : "memory" );
 }
 
-static inline void a_store(volatile int *p, int x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store(volatile int *p, int x)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -139,16 +169,25 @@ static inline void a_store(volatile int *p, int x)
 		: "=&r"(dummy) : "r"(p), "r"(x) : "memory" );
 }
 
-static inline void a_spin()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_spin()
 {
 }
 
-static inline void a_crash()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_crash()
 {
 	*(volatile char *)0=0;
 }
 
-static inline void a_and(volatile int *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and(volatile int *p, int v)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -165,7 +204,10 @@ static inline void a_and(volatile int *p, int v)
 		: "=&r"(dummy) : "r"(p), "r"(v) : "memory" );
 }
 
-static inline void a_or(volatile int *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or(volatile int *p, int v)
 {
 	int dummy;
 	__asm__ __volatile__(
@@ -182,14 +224,20 @@ static inline void a_or(volatile int *p, int v)
 		: "=&r"(dummy) : "r"(p), "r"(v) : "memory" );
 }
 
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and_64(volatile uint64_t *p, uint64_t v)
 {
 	union { uint64_t v; uint32_t r[2]; } u = { v };
 	a_and((int *)p, u.r[0]);
 	a_and((int *)p+1, u.r[1]);
 }
 
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_64(volatile uint64_t *p, uint64_t v)
 {
 	union { uint64_t v; uint32_t r[2]; } u = { v };
 	a_or((int *)p, u.r[0]);
diff --git a/arch/mips/pthread_arch.h b/arch/mips/pthread_arch.h
index f75379c..204c217 100644
--- a/arch/mips/pthread_arch.h
+++ b/arch/mips/pthread_arch.h
@@ -1,4 +1,7 @@
-static inline struct pthread *__pthread_self()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static struct pthread *__pthread_self()
 {
 	struct pthread *self;
 	__asm__ __volatile__ (".word 0x7c03e83b" : "=v" (self) );
diff --git a/arch/mips/reloc.h b/arch/mips/reloc.h
index e892526..d748416 100644
--- a/arch/mips/reloc.h
+++ b/arch/mips/reloc.h
@@ -6,7 +6,10 @@
 #define IS_COPY(x) ((x)==R_MIPS_COPY)
 #define IS_PLT(x) 1
 
-static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
 {
 	switch(type) {
 	case R_MIPS_JUMP_SLOT:
diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h
index 0d3da6f..1d9d9f9 100644
--- a/arch/x86_64/atomic.h
+++ b/arch/x86_64/atomic.h
@@ -3,88 +3,130 @@
 
 #include <stdint.h>
 
-static inline int a_ctz_64(uint64_t x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_64(uint64_t x)
 {
 	long r;
 	__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
 	return r;
 }
 
-static inline int a_ctz_l(unsigned long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_ctz_l(unsigned long x)
 {
 	long r;
 	__asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
 	return r;
 }
 
-static inline void a_and_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and_64(volatile uint64_t *p, uint64_t v)
 {
 	__asm__( "lock ; andq %1, %0"
 			 : "=m"(*(long *)p) : "r"(v) : "memory" );
 }
 
-static inline void a_or_64(volatile uint64_t *p, uint64_t v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_64(volatile uint64_t *p, uint64_t v)
 {
 	__asm__( "lock ; orq %1, %0"
 			 : "=m"(*(long *)p) : "r"(v) : "memory" );
 }
 
-static inline void a_store_l(volatile void *p, long x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store_l(volatile void *p, long x)
 {
 	__asm__( "movq %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
 }
 
-static inline void a_or_l(volatile void *p, long v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or_l(volatile void *p, long v)
 {
 	__asm__( "lock ; orq %1, %0"
 		: "=m"(*(long *)p) : "r"(v) : "memory" );
 }
 
-static inline void *a_cas_p(volatile void *p, void *t, void *s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_cas_p(volatile void *p, void *t, void *s)
 {
 	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline long a_cas_l(volatile void *p, long t, long s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_cas_l(volatile void *p, long t, long s)
 {
 	__asm__( "lock ; cmpxchg %3, %1"
 		: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline int a_cas(volatile int *p, int t, int s)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_cas(volatile int *p, int t, int s)
 {
 	__asm__( "lock ; cmpxchgl %3, %1"
 		: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
 	return t;
 }
 
-static inline void *a_swap_p(void *volatile *x, void *v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void *a_swap_p(void *volatile *x, void *v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
 	return v;
 }
-static inline long a_swap_l(volatile void *x, long v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static long a_swap_l(volatile void *x, long v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
 	return v;
 }
 
-static inline void a_or(volatile void *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_or(volatile void *p, int v)
 {
 	__asm__( "lock ; orl %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }
 
-static inline void a_and(volatile void *p, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_and(volatile void *p, int v)
 {
 	__asm__( "lock ; andl %1, %0"
 		: "=m"(*(int *)p) : "r"(v) : "memory" );
 }
 
-static inline int a_swap(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_swap(volatile int *x, int v)
 {
 	__asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
 	return v;
@@ -92,33 +134,51 @@ static inline int a_swap(volatile int *x, int v)
 
 #define a_xchg a_swap
 
-static inline int a_fetch_add(volatile int *x, int v)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static int a_fetch_add(volatile int *x, int v)
 {
 	__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
 	return v;
 }
 
-static inline void a_inc(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_inc(volatile int *x)
 {
 	__asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
 }
 
-static inline void a_dec(volatile int *x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_dec(volatile int *x)
 {
 	__asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
 }
 
-static inline void a_store(volatile int *p, int x)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_store(volatile int *p, int x)
 {
 	__asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" );
 }
 
-static inline void a_spin()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_spin()
 {
 	__asm__ __volatile__( "pause" : : : "memory" );
 }
 
-static inline void a_crash()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void a_crash()
 {
 	__asm__ __volatile__( "hlt" : : : "memory" );
 }
diff --git a/arch/x86_64/pthread_arch.h b/arch/x86_64/pthread_arch.h
index 836187f..83d1447 100644
--- a/arch/x86_64/pthread_arch.h
+++ b/arch/x86_64/pthread_arch.h
@@ -1,4 +1,7 @@
-static inline struct pthread *__pthread_self()
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static struct pthread *__pthread_self()
 {
 	struct pthread *self;
 	__asm__ __volatile__ ("movq %%fs:0,%0" : "=r" (self) );
diff --git a/arch/x86_64/reloc.h b/arch/x86_64/reloc.h
index b0bbfb3..8825fcd 100644
--- a/arch/x86_64/reloc.h
+++ b/arch/x86_64/reloc.h
@@ -7,7 +7,10 @@
 #define IS_COPY(x) ((x)==R_X86_64_COPY)
 #define IS_PLT(x) ((x)==R_X86_64_JUMP_SLOT)
 
-static inline void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
+#if __STDC_VERSION__ >= 199901L
+inline
+#endif
+static void do_single_reloc(size_t *reloc_addr, int type, size_t sym_val, size_t sym_size, unsigned char *base_addr, size_t addend)
 {
 	switch(type) {
 	case R_X86_64_GLOB_DAT:

  reply	other threads:[~2012-08-23  6:20 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-08-23  5:07 compatability: bits/syscall.h requires C99 idunham
2012-08-23  6:11 ` [PATCH] Problem is static inline idunham
2012-08-23  6:20   ` idunham [this message]
2012-08-23  6:43     ` [PATCH 2/1] " Szabolcs Nagy
2012-08-23  6:51   ` [PATCH] " Szabolcs Nagy
2012-08-23 12:18   ` Rich Felker
2012-08-23 12:31 ` compatability: bits/syscall.h requires C99 John Spencer
2012-08-23 12:34   ` Rich Felker
2012-08-24  0:25     ` Isaac Dunham
2012-08-24  2:07       ` Rich Felker
2012-08-24  2:34         ` Rich Felker
2012-08-24  3:31           ` [PATCH/RFC] __inline for C89 compilers (take 3?) Isaac Dunham
2012-08-24  7:53           ` compatability: bits/syscall.h requires C99 Szabolcs Nagy
2012-08-30 22:45             ` [PATCH/RFC] inline cleanup/C89 support Isaac Dunham
2012-08-31  8:34               ` Szabolcs Nagy
2012-08-31 19:27                 ` Isaac Dunham
2012-09-02 16:51               ` Rich Felker
2012-09-04 15:49                 ` philomath
2012-09-04 17:44                   ` Rich Felker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=62688.132.241.65.179.1345702822.squirrel@lavabit.com \
    --to=idunham@lavabit.com \
    --cc=musl@lists.openwall.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://git.vuxu.org/mirror/musl/

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).