summaryrefslogtreecommitdiff
path: root/src/atomic
diff options
context:
space:
mode:
authorDavid Ludwig <dludwig@pobox.com>2013-08-12 22:29:55 -0400
committerDavid Ludwig <dludwig@pobox.com>2013-08-12 22:29:55 -0400
commit373ffd0dacc489060ad6c8102909dbc1bb5a0653 (patch)
treeffe2e8034b8ef26262e63f6edaf941458b781db9 /src/atomic
parent31d9bd334be04e49e9f5cf91fb53aaf61fe55abf (diff)
parent34710c77defe6f78dbfecdf0e53fde24728f21d4 (diff)
WinRT: merged with SDL 2.0.0 codebase (aka. SDL hg rev d6a8fa507a45)
Diffstat (limited to 'src/atomic')
-rw-r--r--src/atomic/SDL_atomic.c20
-rw-r--r--src/atomic/SDL_spinlock.c10
2 files changed, 22 insertions, 8 deletions
diff --git a/src/atomic/SDL_atomic.c b/src/atomic/SDL_atomic.c
index 6cf2384a5d..c747b12aa2 100644
--- a/src/atomic/SDL_atomic.c
+++ b/src/atomic/SDL_atomic.c
@@ -29,8 +29,8 @@
*/
#undef SDL_AtomicCAS
#undef SDL_AtomicCASPtr
-
-/*
+
+/*
If any of the operations are not provided then we must emulate some
of them. That means we need a nice implementation of spin locks
that avoids the "one big lock" problem. We use a vector of spin
@@ -40,7 +40,7 @@
To generate the index of the lock we first shift by 3 bits to get
rid on the zero bits that result from 32 and 64 bit allignment of
data. We then mask off all but 5 bits and use those 5 bits as an
- index into the table.
+ index into the table.
Picking the lock this way insures that accesses to the same data at
the same time will go to the same lock. OTOH, accesses to different
@@ -101,4 +101,18 @@ SDL_AtomicCASPtr(void **a, void *oldval, void *newval)
return retval;
}
+#if defined(__GNUC__) && defined(__arm__) && \
+ (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__))
+__asm__(
+" .align 2\n"
+" .globl _SDL_MemoryBarrierRelease\n"
+" .globl _SDL_MemoryBarrierAcquire\n"
+"_SDL_MemoryBarrierRelease:\n"
+"_SDL_MemoryBarrierAcquire:\n"
+" mov r0, #0\n"
+" mcr p15, 0, r0, c7, c10, 5\n"
+" bx lr\n"
+);
+#endif /* __GNUC__ && __arm__ && ARMV6 */
+
/* vi: set ts=4 sw=4 expandtab: */
diff --git a/src/atomic/SDL_spinlock.c b/src/atomic/SDL_spinlock.c
index b50565974b..f90528c353 100644
--- a/src/atomic/SDL_spinlock.c
+++ b/src/atomic/SDL_spinlock.c
@@ -76,11 +76,11 @@ SDL_AtomicTryLock(SDL_SpinLock *lock)
return (result == 0);
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
- int result;
- __asm__ __volatile__(
+ int result;
+ __asm__ __volatile__(
"lock ; xchgl %0, (%1)\n"
: "=r" (result) : "r" (lock), "0" (1) : "cc", "memory");
- return (result == 0);
+ return (result == 0);
#elif defined(__MACOSX__) || defined(__IPHONEOS__)
/* Maybe used for PowerPC, but the Intel asm or gcc atomics are favored. */
@@ -114,10 +114,10 @@ SDL_AtomicUnlock(SDL_SpinLock *lock)
#elif HAVE_GCC_ATOMICS || HAVE_GCC_SYNC_LOCK_TEST_AND_SET
__sync_lock_release(lock);
-
+
#elif HAVE_PTHREAD_SPINLOCK
pthread_spin_unlock(lock);
-
+
#else
*lock = 0;
#endif