psl1ght
A free SDK for Sony's PS3 console
Loading...
Searching...
No Matches
atomic.h
Go to the documentation of this file.
1
5/*
6 * PowerPC atomic operations
7 *
8 * Copied from the linux 2.6.x kernel sources:
9 * - renamed functions to psl1ght convention
10 * - removed all kernel dependencies
11 * - removed PPC_ACQUIRE_BARRIER, PPC_RELEASE_BARRIER macros
12 * - removed PPC405_ERR77 macro
13 *
14 */
15
16#ifndef __SYS_ATOMIC_H__
17#define __SYS_ATOMIC_H__
18
19#include <ppu-types.h>
20
21typedef struct { volatile u32 counter; } atomic_t;
22typedef struct { volatile u64 counter; } atomic64_t;
23
24static inline u32 sysAtomicRead(const atomic_t *v)
25{
26 u32 t;
27
28 __asm__ volatile("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
29
30 return t;
31}
32
33static inline void sysAtomicSet(atomic_t *v, int i)
34{
35 __asm__ volatile("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
36}
37
38static inline void sysAtomicAdd(u32 a, atomic_t *v)
39{
40 u32 t;
41
42 __asm__ volatile(
43"1: lwarx %0,0,%3 # atomic_add\n\
44 add %0,%2,%0\n"
45" stwcx. %0,0,%3 \n\
46 bne- 1b"
47 : "=&r" (t), "+m" (v->counter)
48 : "r" (a), "r" (&v->counter)
49 : "cc");
50}
51
52static inline u32 sysAtomicAddReturn(u32 a, atomic_t *v)
53{
54 u32 t;
55
56 __asm__ volatile(
57"1: lwarx %0,0,%2 # atomic_add_return\n\
58 add %0,%1,%0\n"
59" stwcx. %0,0,%2 \n\
60 bne- 1b"
61 : "=&r" (t)
62 : "r" (a), "r" (&v->counter)
63 : "cc", "memory");
64
65 return t;
66}
67
68#define sysAtomicAddNegative(a, v) (sysAtomicAddReturn((a), (v)) < 0)
69
70static inline void sysAtomicSub(u32 a, atomic_t *v)
71{
72 u32 t;
73
74 __asm__ volatile(
75"1: lwarx %0,0,%3 # atomic_sub\n\
76 subf %0,%2,%0\n"
77" stwcx. %0,0,%3 \n\
78 bne- 1b"
79 : "=&r" (t), "+m" (v->counter)
80 : "r" (a), "r" (&v->counter)
81 : "cc");
82}
83
84static inline u32 sysAtomicSubReturn(u32 a, atomic_t *v)
85{
86 u32 t;
87
88 __asm__ volatile(
89"1: lwarx %0,0,%2 # atomic_sub_return\n\
90 subf %0,%1,%0\n"
91" stwcx. %0,0,%2 \n\
92 bne- 1b"
93 : "=&r" (t)
94 : "r" (a), "r" (&v->counter)
95 : "cc", "memory");
96
97 return t;
98}
99
100static inline void sysAtomicInc(atomic_t *v)
101{
102 u32 t;
103
104 __asm__ volatile(
105"1: lwarx %0,0,%2 # atomic_inc\n\
106 addic %0,%0,1\n"
107" stwcx. %0,0,%2 \n\
108 bne- 1b"
109 : "=&r" (t), "+m" (v->counter)
110 : "r" (&v->counter)
111 : "cc", "xer");
112}
113
114static inline u32 sysAtomicIncReturn(atomic_t *v)
115{
116 u32 t;
117
118 __asm__ volatile(
119"1: lwarx %0,0,%1 # atomic_inc_return\n\
120 addic %0,%0,1\n"
121" stwcx. %0,0,%1 \n\
122 bne- 1b"
123 : "=&r" (t)
124 : "r" (&v->counter)
125 : "cc", "xer", "memory");
126
127 return t;
128}
129
130/*
131 * atomic_inc_and_test - increment and test
132 * @v: pointer of type atomic_t
133 *
134 * Atomically increments @v by 1
135 * and returns true if the result is zero, or false for all
136 * other cases.
137 */
138#define sysAtomicIncAndTest(v) (sysAtomicIncReturn(v) == 0)
139
140static inline void sysAtomicDec(atomic_t *v)
141{
142 u32 t;
143
144 __asm__ volatile(
145"1: lwarx %0,0,%2 # atomic_dec\n\
146 addic %0,%0,-1\n"
147" stwcx. %0,0,%2\n\
148 bne- 1b"
149 : "=&r" (t), "+m" (v->counter)
150 : "r" (&v->counter)
151 : "cc", "xer");
152}
153
154static inline u32 sysAtomicDecReturn(atomic_t *v)
155{
156 u32 t;
157
158 __asm__ volatile(
159"1: lwarx %0,0,%1 # atomic_dec_return\n\
160 addic %0,%0,-1\n"
161" stwcx. %0,0,%1\n\
162 bne- 1b"
163 : "=&r" (t)
164 : "r" (&v->counter)
165 : "cc", "xer", "memory");
166
167 return t;
168}
169
170/*
171 * Atomic exchange
172 *
173 * Changes the memory location '*ptr' to be val and returns
174 * the previous value stored there.
175 */
176static inline u32 __xchg_u32(volatile void *p, u32 val)
177{
178 u32 prev;
179
180 __asm__ volatile(
181"1: lwarx %0,0,%2 \n"
182" stwcx. %3,0,%2 \n\
183 bne- 1b"
184 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
185 : "r" (p), "r" (val)
186 : "cc", "memory");
187
188 return prev;
189}
190
191static inline u32 __xchg_u64(volatile void *p, u32 val)
192{
193 u32 prev;
194
195 __asm__ volatile(
196"1: ldarx %0,0,%2 \n"
197" stdcx. %3,0,%2 \n\
198 bne- 1b"
199 : "=&r" (prev), "+m" (*(volatile u32 *)p)
200 : "r" (p), "r" (val)
201 : "cc", "memory");
202
203 return prev;
204}
205
206/*
207 * This function doesn't exist, so you'll get a linker error
208 * if something tries to do an invalid xchg().
209 */
211
212static inline u32 __xchg(volatile void *ptr, u32 x, unsigned int size)
213{
214 switch (size) {
215 case 4:
216 return __xchg_u32(ptr, x);
217 case 8:
218 return __xchg_u64(ptr, x);
219 }
221 return x;
222}
223
224#define xchg(ptr,x) \
225 ({ \
226 __typeof__(*(ptr)) _x_ = (x); \
227 (__typeof__(*(ptr))) __xchg((ptr), (u32)_x_, sizeof(*(ptr))); \
228 })
229
230/*
231 * Compare and exchange - if *p == old, set it to new,
232 * and return the old value of *p.
233 */
234static inline u64
235__cmpxchg_u32(volatile unsigned int *p, u64 oldv, u64 newv)
236{
237 unsigned int prev;
238
239 __asm__ volatile (
240"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
241 cmpw 0,%0,%3\n\
242 bne- 2f\n"
243" stwcx. %4,0,%2\n\
244 bne- 1b"
245 "\n\
2462:"
247 : "=&r" (prev), "+m" (*p)
248 : "r" (p), "r" (oldv), "r" (newv)
249 : "cc", "memory");
250
251 return prev;
252}
253
254static inline u64
255__cmpxchg_u64(volatile u64 *p, u64 oldv, u64 newv)
256{
257 u64 prev;
258
259 __asm__ volatile (
260"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
261 cmpd 0,%0,%3\n\
262 bne- 2f\n\
263 stdcx. %4,0,%2\n\
264 bne- 1b"
265 "\n\
2662:"
267 : "=&r" (prev), "+m" (*p)
268 : "r" (p), "r" (oldv), "r" (newv)
269 : "cc", "memory");
270
271 return prev;
272}
273
274/* This function doesn't exist, so you'll get a linker error
275 if something tries to do an invalid cmpxchg(). */
277
278static inline u64
279__cmpxchg(volatile void *ptr, u64 oldv, u64 newv,
280 unsigned int size)
281{
282 switch (size) {
283 case 4:
284 return __cmpxchg_u32((volatile u32*)ptr, oldv, newv);
285 case 8:
286 return __cmpxchg_u64((volatile u64*)ptr, oldv, newv);
287 }
289 return oldv;
290}
291
292#define cmpxchg(ptr, o, n) \
293 ({ \
294 __typeof__(*(ptr)) _o_ = (o); \
295 __typeof__(*(ptr)) _n_ = (n); \
296 (__typeof__(*(ptr))) __cmpxchg((ptr), (u64)_o_, \
297 (u64)_n_, sizeof(*(ptr))); \
298 })
299
300#define sysAtomicCompareAndSwap(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
301#define sysAtomicSwap(v, newv) (xchg(&((v)->counter), newv))
302
312static inline u32 sysAtomicAddUnless(atomic_t *v, u32 a, u32 u)
313{
314 u32 t;
315
316 __asm__ volatile (
317"1: lwarx %0,0,%1 # atomic_add_unless\n\
318 cmpw 0,%0,%3 \n\
319 beq- 2f \n\
320 add %0,%2,%0 \n"
321" stwcx. %0,0,%1 \n\
322 bne- 1b \n"
323" subf %0,%2,%0 \n\
3242:"
325 : "=&r" (t)
326 : "r" (&v->counter), "r" (a), "r" (u)
327 : "cc", "memory");
328
329 return t != u;
330}
331
332#define sysAtomicIncNotZero(v) sysAtomicAddUnless((v), 1, 0)
333
334#define sysAtomicSubAndTest(a, v) (sysAtomicSubReturn((a), (v)) == 0)
335#define sysAtomicDecAndTest(v) (sysAtomicDecReturn((v)) == 0)
336
337/*
338 * Atomically test *v and decrement if it is greater than 0.
339 * The function returns the old value of *v minus 1, even if
340 * the atomic variable, v, was not decremented.
341 */
342static inline u32 sysAtomicDecIfPositive(atomic_t *v)
343{
344 u32 t;
345
346 __asm__ volatile(
347"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
348 cmpwi %0,1\n\
349 addi %0,%0,-1\n\
350 blt- 2f\n"
351" stwcx. %0,0,%1\n\
352 bne- 1b"
353 "\n\
3542:" : "=&b" (t)
355 : "r" (&v->counter)
356 : "cc", "memory");
357
358 return t;
359}
360
361static inline u64 sysAtomic64Read(const atomic64_t *v)
362{
363 u64 t;
364
365 __asm__ volatile("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
366
367 return t;
368}
369
370static inline void sysAtomic64Set(atomic64_t *v, u64 i)
371{
372 __asm__ volatile("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
373}
374
375static inline void sysAtomic64Add(u64 a, atomic64_t *v)
376{
377 u64 t;
378
379 __asm__ volatile(
380"1: ldarx %0,0,%3 # atomic64_add\n\
381 add %0,%2,%0\n\
382 stdcx. %0,0,%3 \n\
383 bne- 1b"
384 : "=&r" (t), "+m" (v->counter)
385 : "r" (a), "r" (&v->counter)
386 : "cc");
387}
388
389static inline u64 sysAtomic64AddReturn(u64 a, atomic64_t *v)
390{
391 u64 t;
392
393 __asm__ volatile(
394"1: ldarx %0,0,%2 # atomic64_add_return\n\
395 add %0,%1,%0\n\
396 stdcx. %0,0,%2 \n\
397 bne- 1b"
398 : "=&r" (t)
399 : "r" (a), "r" (&v->counter)
400 : "cc", "memory");
401
402 return t;
403}
404
405#define sysAtomic64AddNegative(a, v) (sysAtomic64AddReturn((a), (v)) < 0)
406
407static inline void sysAtomic64Sub(u64 a, atomic64_t *v)
408{
409 u64 t;
410
411 __asm__ volatile(
412"1: ldarx %0,0,%3 # atomic64_sub\n\
413 subf %0,%2,%0\n\
414 stdcx. %0,0,%3 \n\
415 bne- 1b"
416 : "=&r" (t), "+m" (v->counter)
417 : "r" (a), "r" (&v->counter)
418 : "cc");
419}
420
421static inline u64 sysAtomic64SubReturn(u64 a, atomic64_t *v)
422{
423 u64 t;
424
425 __asm__ volatile(
426"1: ldarx %0,0,%2 # atomic64_sub_return\n\
427 subf %0,%1,%0\n\
428 stdcx. %0,0,%2 \n\
429 bne- 1b"
430 : "=&r" (t)
431 : "r" (a), "r" (&v->counter)
432 : "cc", "memory");
433
434 return t;
435}
436
437static inline void sysAtomic64Inc(atomic64_t *v)
438{
439 u64 t;
440
441 __asm__ volatile(
442"1: ldarx %0,0,%2 # atomic64_inc\n\
443 addic %0,%0,1\n\
444 stdcx. %0,0,%2 \n\
445 bne- 1b"
446 : "=&r" (t), "+m" (v->counter)
447 : "r" (&v->counter)
448 : "cc", "xer");
449}
450
451static inline u64 sysAtomic64IncReturn(atomic64_t *v)
452{
453 u64 t;
454
455 __asm__ volatile(
456"1: ldarx %0,0,%1 # atomic64_inc_return\n\
457 addic %0,%0,1\n\
458 stdcx. %0,0,%1 \n\
459 bne- 1b"
460 : "=&r" (t)
461 : "r" (&v->counter)
462 : "cc", "xer", "memory");
463
464 return t;
465}
466
467/*
468 * atomic64_inc_and_test - increment and test
469 * @v: pointer of type atomic64_t
470 *
471 * Atomically increments @v by 1
472 * and returns true if the result is zero, or false for all
473 * other cases.
474 */
475#define sysAtomic64IncAndTest(v) (sysAtomic64IncReturn(v) == 0)
476
477static inline void sysAtomic64Dec(atomic64_t *v)
478{
479 u64 t;
480
481 __asm__ volatile(
482"1: ldarx %0,0,%2 # atomic64_dec\n\
483 addic %0,%0,-1\n\
484 stdcx. %0,0,%2\n\
485 bne- 1b"
486 : "=&r" (t), "+m" (v->counter)
487 : "r" (&v->counter)
488 : "cc", "xer");
489}
490
491static inline u64 sysAtomic64DecReturn(atomic64_t *v)
492{
493 u64 t;
494
495 __asm__ volatile(
496"1: ldarx %0,0,%1 # atomic64_dec_return\n\
497 addic %0,%0,-1\n\
498 stdcx. %0,0,%1\n\
499 bne- 1b"
500 : "=&r" (t)
501 : "r" (&v->counter)
502 : "cc", "xer", "memory");
503
504 return t;
505}
506
507#define sysAtomic64SubAndTest(a, v) (sysAtomic64SubReturn((a), (v)) == 0)
508#define sysAtomic64DecAndTest(v) (sysAtomic64DecReturn((v)) == 0)
509
510/*
511 * Atomically test *v and decrement if it is greater than 0.
512 * The function returns the old value of *v minus 1.
513 */
515{
516 u64 t;
517
518 __asm__ volatile(
519"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
520 addic. %0,%0,-1\n\
521 blt- 2f\n\
522 stdcx. %0,0,%1\n\
523 bne- 1b"
524 "\n\
5252:" : "=&r" (t)
526 : "r" (&v->counter)
527 : "cc", "xer", "memory");
528
529 return t;
530}
531
532#define sysAtomic64CompareAndSwap(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
533#define sysAtomic64Swap(v, newv) (xchg(&((v)->counter), newv))
534
544static inline u32 sysAtomic64AddUnless(atomic64_t *v, u64 a, u64 u)
545{
546 u64 t;
547
548 __asm__ volatile (
549"1: ldarx %0,0,%1 # atomic_add_unless\n\
550 cmpd 0,%0,%3 \n\
551 beq- 2f \n\
552 add %0,%2,%0 \n"
553" stdcx. %0,0,%1 \n\
554 bne- 1b \n"
555" subf %0,%2,%0 \n\
5562:"
557 : "=&r" (t)
558 : "r" (&v->counter), "r" (a), "r" (u)
559 : "cc", "memory");
560
561 return t != u;
562}
563
564#define sysAtomic64IncNotZero(v) sysAtomic64AddUnless((v), 1, 0)
565
566#endif /* __SYS_ATOMIC_H__ */
static u32 sysAtomicIncReturn(atomic_t *v)
Definition atomic.h:114
static void sysAtomicAdd(u32 a, atomic_t *v)
Definition atomic.h:38
static u64 __cmpxchg_u64(volatile u64 *p, u64 oldv, u64 newv)
Definition atomic.h:255
static void sysAtomic64Inc(atomic64_t *v)
Definition atomic.h:437
void __cmpxchg_called_with_bad_pointer(void)
static u32 __xchg(volatile void *ptr, u32 x, unsigned int size)
Definition atomic.h:212
static u32 __xchg_u64(volatile void *p, u32 val)
Definition atomic.h:191
static u32 sysAtomicAddReturn(u32 a, atomic_t *v)
Definition atomic.h:52
static u32 sysAtomicDecReturn(atomic_t *v)
Definition atomic.h:154
static u32 sysAtomicAddUnless(atomic_t *v, u32 a, u32 u)
Definition atomic.h:312
static u64 sysAtomic64Read(const atomic64_t *v)
Definition atomic.h:361
static u32 sysAtomicDecIfPositive(atomic_t *v)
Definition atomic.h:342
static u64 sysAtomic64SubReturn(u64 a, atomic64_t *v)
Definition atomic.h:421
static u64 sysAtomic64DecReturn(atomic64_t *v)
Definition atomic.h:491
static u64 sysAtomic64DecIfPositive(atomic64_t *v)
Definition atomic.h:514
static void sysAtomic64Sub(u64 a, atomic64_t *v)
Definition atomic.h:407
static void sysAtomicDec(atomic_t *v)
Definition atomic.h:140
static void sysAtomic64Set(atomic64_t *v, u64 i)
Definition atomic.h:370
static void sysAtomic64Dec(atomic64_t *v)
Definition atomic.h:477
static u64 __cmpxchg_u32(volatile unsigned int *p, u64 oldv, u64 newv)
Definition atomic.h:235
static void sysAtomic64Add(u64 a, atomic64_t *v)
Definition atomic.h:375
static void sysAtomicInc(atomic_t *v)
Definition atomic.h:100
static void sysAtomicSub(u32 a, atomic_t *v)
Definition atomic.h:70
static void sysAtomicSet(atomic_t *v, int i)
Definition atomic.h:33
static u32 sysAtomicSubReturn(u32 a, atomic_t *v)
Definition atomic.h:84
static u64 sysAtomic64IncReturn(atomic64_t *v)
Definition atomic.h:451
static u32 __xchg_u32(volatile void *p, u32 val)
Definition atomic.h:176
static u64 __cmpxchg(volatile void *ptr, u64 oldv, u64 newv, unsigned int size)
Definition atomic.h:279
static u64 sysAtomic64AddReturn(u64 a, atomic64_t *v)
Definition atomic.h:389
static u32 sysAtomic64AddUnless(atomic64_t *v, u64 a, u64 u)
Definition atomic.h:544
static u32 sysAtomicRead(const atomic_t *v)
Definition atomic.h:24
void __xchg_called_with_bad_pointer(void)
heap_block * prev
Definition heap.h:3
uintptr_t size
Definition heap.h:1
volatile u64 counter
Definition atomic.h:22
volatile u32 counter
Definition atomic.h:21