Subject: Atomic operations for NetBSD
To: None <tech-kern@NetBSD.org>
From: Tonnerre <tonnerre@thundrix.ch>
List: tech-kern
Date: 06/11/2005 01:58:38
--WIyZ46R2i8wDzkSu
Content-Type: text/plain; charset=iso-8859-1
Content-Disposition: inline
Content-Transfer-Encoding: quoted-printable
Salut,
The following patch implements atomic operations for NetBSD, but only
for i386, alpha, and PowerPC based architectures. The generic way for
architectures that don't support to execute those functions in an atomic
fashion would be to lock, execute the functions, and unlock. Sure, this
sucks, but well.
(Unless the architecture doesn't support SMP either, in which case we
can just execute the operations)
I'd like to get this interface implemented for more architectures though.
Comments are very welcome. The interface is needed for DRI.
(Also, please excuse that I'm not working on DRI at the time, I'm busy
16 to 20 hours a day with the EU patent directive)
Tonnerre
--=20
Index: sys/arch/alpha/include/atomic.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
RCS file: /usr/home/cvsweb/cvsroot/src/sys/arch/alpha/include/atomic.h,v
retrieving revision 1.7
diff -u -r1.7 atomic.h
--- sys/arch/alpha/include/atomic.h 17 Dec 2001 23:34:57 -0000 1.7
+++ sys/arch/alpha/include/atomic.h 10 Jun 2005 23:29:42 -0000
@@ -44,6 +44,136 @@
#ifndef _ALPHA_ATOMIC_H_
#define _ALPHA_ATOMIC_H_
=20
+typedef struct { volatile long val; } atomic_t;
+
+static __inline void
+atomic_add(long inc, atomic_t *t)
+{
+ unsigned long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_add\n"
+ "1: ldq_l %0,%1\n"
+ " addq %0,%2,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ " br 3f\n"
+ "2: br 1b\n"
+ "3:\n"
+ " # END atomic_add" : "=3D&r" (l), "=3Dm" (t->val) : "Ir" (inc),
+ "m" (t->val));
+}
+
+static __inline void
+atomic_inc(atomic_t *t)
+{
+ atomic_add(1, t);
+}
+
+static __inline long
+atomic_addret(long inc, atomic_t *t)
+{
+ long res, l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_addret\n"
+ "1: ldq_l %0,%1\n"
+ " addq %0,%3,%2\n"
+ " addq %0,%3,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ " br 3f\n"
+ "2: br 1b\n"
+ "3:\n"
+ " # END atomic_addret\n" : "=3D&r" (l), "=3Dm" (t->val), "=3D&r" (res)
+ : "Ir" (inc), "m" (t->val) : "memory");
+
+ return res;
+}
+
+static __inline long
+atomic_testinc(atomic_t *t)
+{
+ return atomic_addret(1, t) =3D=3D 0;
+}
+
+static __inline long
+atomic_incret(atomic_t *t)
+{
+ return atomic_addret(1, t);
+}
+
+static __inline void
+atomic_sub(long dec, atomic_t *t)
+{ =20
+ unsigned long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_sub\n"
+ "1: ldq_l %0,%1\n"
+ " subq %0,%2,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ " br 3f\n"
+ "2: br 1b\n"
+ "3:\n"
+ " # END atomic_sub" : "=3D&r" (l), "=3Dm" (t->val) : "Ir" (dec),
+ "m" (t->val));
+}
+
+static __inline void
+atomic_dec(atomic_t *t)
+{
+ atomic_sub(1, t);
+}
+
+static __inline long
+atomic_subret(long dec, atomic_t *t)
+{
+ long res, l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_subret\n"
+ "1: ldq_l %0,%1\n"
+ " subq %0,%3,%2\n"
+ " subq %0,%3,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ " br 3f\n"
+ "2: br 1b\n"
+ "3:\n"
+ " # END atomic_subret" : "=3D&r" (l), "=3Dm" (t->val), "=3D&r" (res) :
+ "Ir" (dec), "m" (t->val) : "memory");
+
+ return res;
+}
+
+static __inline long
+atomic_testdec(atomic_t *t)
+{
+ return atomic_subret(1, t) =3D=3D 0;
+}
+
+static __inline long
+atomic_decret(atomic_t *t)
+{
+ return atomic_subret(1, t);
+}
+
+static __inline void
+atomic_set(atomic_t *t, long val)
+{
+ t->val =3D val;
+}
+
+static __inline long
+atomic_ret(atomic_t *t)
+{
+ return t->val;
+}
+
/*
* atomic_setbits_ulong:
*
Index: sys/arch/i386/include/atomic.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
RCS file: /usr/home/cvsweb/cvsroot/src/sys/arch/i386/include/atomic.h,v
retrieving revision 1.3
diff -u -r1.3 atomic.h
--- sys/arch/i386/include/atomic.h 26 Feb 2003 21:28:59 -0000 1.3
+++ sys/arch/i386/include/atomic.h 10 Jun 2005 22:57:26 -0000
@@ -43,6 +43,94 @@
=20
#ifndef _LOCORE
=20
+typedef struct { volatile long val; } atomic_t;
+
+static __inline void
+atomic_inc(atomic_t *t)
+{
+ __asm__ volatile ("lock ; incl %0" : "=3Dm" (t->val) : "m" (t->val));
+}
+
+static __inline long
+atomic_testinc(atomic_t *t)
+{
+ unsigned char c;
+ __asm__ volatile ("lock ; incl %0; sete %1" : "=3Dm" (t->val), "=3Dqm" (c)
+ : "m" (t->val) : "memory");
+ return (c !=3D 0);
+}
+
+static __inline void
+atomic_dec(atomic_t *t)
+{
+ __asm__ volatile ("lock ; decl %0" : "=3Dm" (t->val) : "m" (t->val));
+}
+
+static __inline long
+atomic_testdec(atomic_t *t)
+{
+ unsigned char c;
+
+ __asm__ volatile ("lock ; decl %0; sete %1" : "=3Dm" (t->val), "=3Dqm" (c)
+ : "m" (t->val) : "memory");
+ return (c !=3D 0);
+}
+
+static __inline void
+atomic_add(long inc, atomic_t *t)
+{
+ __asm__ volatile ("lock ; addl %1,%0" : "=3Dm" (t->val) : "ir" (inc),
+ "m" (t->val));
+}
+
+static __inline long
+atomic_addret(long inc, atomic_t *t)
+{
+ long i;
+
+ i =3D inc;
+ __asm__ volatile ("lock ; xaddl %0,%1" : "=3Dr" (inc) : "m" (t->val),
+ "0" (inc));
+ return inc + i;
+}
+
+static __inline long
+atomic_incret(atomic_t *t)
+{
+ return atomic_addret(1, t);
+}
+
+static __inline void
+atomic_sub(long dec, atomic_t *t)
+{
+ __asm__ volatile ("lock ; subl %1,%0" : "=3Dm" (t->val) : "ir" (dec),
+ "m" (t->val));
+}
+
+static __inline long
+atomic_subret(long dec, atomic_t *t)
+{
+ return atomic_addret(-dec, t);
+}
+
+static __inline long
+atomic_decret(atomic_t *t)
+{
+ return atomic_subret(1, t);
+}
+
+static __inline void
+atomic_set(atomic_t *t, long val)
+{
+ t->val =3D val;
+}
+
+static __inline long
+atomic_ret(atomic_t *t)
+{
+ return t->val;
+}
+
static __inline unsigned long
x86_atomic_testset_ul (volatile u_int32_t *ptr, unsigned long val) {
__asm__ volatile ("xchgl %0,(%2)" :"=3Dr" (val):"0" (val),"r" (ptr));
Index: sys/arch/powerpc/include/atomic.h
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
RCS file: /usr/home/cvsweb/cvsroot/src/sys/arch/powerpc/include/atomic.h,v
retrieving revision 1.2
diff -u -r1.2 atomic.h
--- sys/arch/powerpc/include/atomic.h 25 Nov 2002 01:36:35 -0000 1.2
+++ sys/arch/powerpc/include/atomic.h 10 Jun 2005 23:36:24 -0000
@@ -10,6 +10,174 @@
#ifndef _POWERPC_ATOMIC_H_
#define _POWERPC_ATOMIC_H_
=20
+typedef struct { volatile long val; } atomic_t;
+
+static __inline void
+atomic_inc(atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_inc\n"
+ "1: lwarx %0,0,%2\n"
+ " addic %0,%0,1\n"
+ " stwcx. %0,0,%2\n"
+ " bne- 1b\n"
+ "# END atomic_inc" : "=3D&r" (l), "=3Dm" (t->val) : "r" (&t->val),
+ "m" (t->val) : "cc");
+}
+
+static __inline long
+atomic_incret(atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_incret\n"
+ "1: lwarx %0,0,%1\n"
+ " addic %0,%0,1\n"
+ " stwcx. %0,0,%1\n"
+ " bne- 1b\n"
+ " isync\n"
+ "# END atomic_incret" : "=3D&r" (l) : "r" (&t->val) : "cc", "memory");
+
+ return l;
+}
+
+static __inline long
+atomic_testinc(atomic_t *t)
+{
+ return atomic_incret(t) =3D=3D 0;
+}
+
+static __inline void
+atomic_dec(atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_dec\n"
+ "1: lwarx %0,0,%2\n"
+ " addic %0,%0,-1\n"
+ " stwcx. %0,0,%2\n"
+ " bne 1b\n"
+ "# END atomic_dec" : "=3D&r" (l), "=3Dm" (t->val) : "r" (&t->val),
+ "m" (t->val) : "cc");
+}
+
+static __inline long
+atomic_decret(atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_decret\n"
+ "1: lwarx %0,0,%1\n"
+ " addic %0,%0,-1\n"
+ " stwcx. %0,0,%1\n"
+ " bne- 1b\n"
+ " isync\n"
+ "# END atomic_decret" : "=3D&r" (l) : "r" (&t->val) : "cc", "memory");
+
+ return l;
+}
+
+static __inline long
+atomic_testdec(atomic_t *t)
+{
+ return atomic_decret(t) =3D=3D 0;
+}
+
+static __inline void
+atomic_add(long inc, atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_add\n"
+ "1: lwarx %0,0,%3\n"
+ " add %0,%2,%0\n"
+ " stwcx. %0,0,%3\n"
+ " bne- 1b\n"
+ "# END atomic_add" : "=3D&r" (l), "=3Dm" (t->val) : "r" (inc),
+ "r" (&t->val), "m" (t->val) : "cc");
+}
+
+static __inline long
+atomic_addret(long inc, atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_addret\n"
+ "1: lwarx %0,0,%2\n"
+ " add %0,%1,%0\n"
+ " stwcx. %0,0,%2\n"
+ " bne- 1b\n"
+ " isync\n"
+ "# END atomic_addret" : "=3D&r" (l) : "r" (inc), "r" (&t->val) :
+ "cc", "memory");
+
+ return l;
+}
+
+static __inline long
+atomic_testadd(long inc, atomic_t *t)
+{
+ return atomic_addret(inc, t) =3D=3D 0;
+}
+
+static __inline void
+atomic_sub(long dec, atomic_t *t)
+{ =20
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_sub\n"
+ "1: lwarx %0,0,%3\n"
+ " subf %0,%2,%0\n"
+ " stwcx. %0,0,%3\n"
+ " bne- 1b\n"
+ "# END atomic_sub" : "=3D&r" (l), "=3Dm" (t->val) : "r" (dec),
+ "r" (&t->val), "m" (t->val) : "cc");
+}
+
+static __inline long
+atomic_subret(long dec, atomic_t *t)
+{
+ long l;
+
+ __asm__ volatile (
+ "# BEGIN atomic_subret\n"
+ "1: lwarx %0,0,%2\n"
+ " subf %0,%1,%0\n"
+ " stwcx. %0,0,%2\n"
+ " bne- 1b\n"
+ " isync\n"
+ "# END atomic_subret" : "=3D&r" (l) : "r" (dec), "r" (&t->val) :
+ "cc", "memory");
+
+ return l;
+}
+
+static __inline long
+atomic_testsub(long inc, atomic_t *t)
+{
+ return atomic_subret(inc, t) =3D=3D 0;
+}
+
+static __inline void
+atomic_set(atomic_t *t, long val)
+{
+ t->val =3D val;
+}
+
+static __inline long
+atomic_ret(atomic_t *t)
+{
+ return t->val;
+}
+
/*
* atomic_setbits_ulong:
*
--WIyZ46R2i8wDzkSu
Content-Type: application/pgp-signature
Content-Disposition: inline
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.1 (NetBSD)
iD8DBQFCqikuXUVlAbfmNMIRAhn2AKCTPE9JU3j1SURcNC1uLnZMa6l2RACfQ7m0
jp0eIxaQrE7YbGJ8GPPWO80=
=oemP
-----END PGP SIGNATURE-----
--WIyZ46R2i8wDzkSu--