Subject: Atomic ops API
To: NetBSD Kernel Technical Discussion List <tech-kern@netbsd.org>
From: Jason Thorpe <thorpej@shagadelic.org>
List: tech-kern
Date: 03/12/2007 23:28:47
--Apple-Mail-4-39115954
Content-Type: text/plain;
charset=US-ASCII;
format=flowed;
delsp=yes
Content-Transfer-Encoding: 7bit
I would like to propose the following as NetBSD's API for atomic
operations, both in user-space and the kernel. See attached header
file for a summary of the operations.
--Apple-Mail-4-39115954
Content-Disposition: attachment;
filename=atomic.h
Content-Type: application/octet-stream;
x-unix-mode=0644;
name=atomic.h
Content-Transfer-Encoding: 7bit
/* $NetBSD$ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(_SYS_ATOMIC_H_)
#define _SYS_ATOMIC_H_
#if defined(_KERNEL)
#include <sys/types.h>
#else
#include <stdint.h>
#include <stdbool.h>
#endif
/*
* Atomic ADD
*/
void atomic_add_8(volatile uint8_t *, int8_t);
void atomic_add_char(volatile unsigned char *, signed char);
void atomic_add_16(volatile uint16_t *, int16_t);
void atomic_add_short(volatile unsigned short *, short);
void atomic_add_32(volatile uint32_t *, int32_t);
void atomic_add_int(volatile unsigned int *, int);
void atomic_add_long(volatile unsigned long *, long);
void atomic_add_ptr(volatile void *, ssize_t);
#if defined(__HAVE_ATOMIC64_OPS)
void atomic_add_64(volatile uint64_t *, int64_t);
#endif
uint8_t atomic_add_8_nv(volatile uint8_t *, int8_t);
unsigned char atomic_add_char_nv(volatile unsigned char *, signed char);
uint16_t atomic_add_16_nv(volatile uint16_t *, int16_t);
unsigned short atomic_add_short_nv(volatile unsigned short *, short);
uint32_t atomic_add_32_nv(volatile uint32_t *, int32_t);
unsigned int atomic_add_int_nv(volatile unsigned int *, int);
unsigned long atomic_add_long_nv(volatile unsigned long *, long);
void * atomic_add_ptr_nv(volatile void *, ssize_t);
#if defined(__HAVE_ATOMIC64_OPS)
uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* Atomic AND
*/
void atomic_and_8(volatile uint8_t *, uint8_t);
void atomic_and_uchar(volatile unsigned char *, unsigned char);
void atomic_and_16(volatile uint16_t *, uint16_t);
void atomic_and_ushort(volatile unsigned short *, unsigned short);
void atomic_and_32(volatile uint32_t *, uint32_t);
void atomic_and_uint(volatile unsigned int *, unsigned int);
void atomic_and_ulong(volatile unsigned long *, unsigned long);
#if defined(__HAVE_ATOMIC64_OPS)
void atomic_and_64(volatile uint64_t *, uint64_t);
#endif
uint8_t atomic_and_8_nv(volatile uint8_t *, uint8_t);
unsigned char atomic_and_uchar_nv(volatile unsigned char *, unsigned char);
uint16_t atomic_and_16_nv(volatile uint16_t *, uint16_t);
unsigned short atomic_and_ushort_nv(volatile unsigned short *, unsigned short);
uint32_t atomic_and_32_nv(volatile uint32_t *, uint32_t);
unsigned int atomic_and_uint_nv(volatile unsigned int *, unsigned int);
unsigned long atomic_and_ulong_nv(volatile unsigned long *, unsigned long);
#if defined(__HAVE_ATOMIC64_OPS)
uint64_t atomic_and_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* Atomic OR
*/
void atomic_or_8(volatile uint8_t *, uint8_t);
void atomic_or_uchar(volatile unsigned char *, unsigned char);
void atomic_or_16(volatile uint16_t *, uint16_t);
void atomic_or_ushort(volatile unsigned short *, unsigned short);
void atomic_or_32(volatile uint32_t *, uint32_t);
void atomic_or_uint(volatile unsigned int *, unsigned int);
void atomic_or_ulong(volatile unsigned long *, unsigned long);
#if defined(__HAVE_ATOMIC64_OPS)
void atomic_or_64(volatile uint64_t *, uint64_t);
#endif
uint8_t atomic_or_8_nv(volatile uint8_t *, uint8_t);
unsigned char atomic_or_uchar_nv(volatile unsigned char *, unsigned char);
uint16_t atomic_or_16_nv(volatile uint16_t *, uint16_t);
unsigned short atomic_or_ushort_nv(volatile unsigned short *, unsigned short);
uint32_t atomic_or_32_nv(volatile uint32_t *, uint32_t);
unsigned int atomic_or_uint_nv(volatile unsigned int *, unsigned int);
unsigned long atomic_or_ulong_nv(volatile unsigned long *, unsigned long);
#if defined(__HAVE_ATOMIC64_OPS)
uint64_t atomic_or_64_nv(volatile uint64_t *, uint64_t);
#endif
/*
* Atomic COMPARE-AND-STORE
*/
bool atomic_cas_8(volatile uint8_t *, uint8_t, uint8_t);
bool atomic_cas_uchar(volatile unsigned char *, unsigned char,
unsigned char);
bool atomic_cas_16(volatile uint16_t *, uint16_t, uint16_t);
bool atomic_cas_ushort(volatile unsinged short *, unsigned short,
unsigned short);
bool atomic_cas_32(volatile uint32_t *, uint32_t, uint32_t);
bool atomic_cas_uint(volatile unsigned int *, unsigned int,
unsigned int);
bool atomic_cas_ulong(volatile unsigned long *, unsigned long,
unsigned long);
bool atomic_cas_ptr(volatile void *, void *, void *);
#if defined(__HAVE_ATOMIC64_OPS)
bool atomic_cas_64(volatile uint64_t *, uint64_t, uint64_t);
#endif
/*
* Atomic DECREMENT
*/
void atomic_dec_8(volatile uint8_t *);
void atomic_dec_uchar(volatile unsigned char *);
void atomic_dec_16(volatile uint16_t *);
void atomic_dec_ushort(volatile unsigned short *);
void atomic_dec_32(volatile uint32_t *);
void atomic_dec_uint(volatile unsigned int *);
void atomic_dec_ulong(volatile unsigned long *);
void atomic_dec_ptr(volatile void *);
#if defined(__HAVE_ATOMIC64_OPS)
void atomic_dec_64(volatile uint64_t *);
#endif
uint8_t atomic_dec_8_nv(volatile uint8_t *);
unsigned char atomic_dec_uchar_nv(volatile unsigned char *);
uint16_t atomic_dec_16_nv(volatile uint16_t *);
unsigned short atomic_dec_ushort_nv(volatile unsigned short *);
uint32_t atomic_dec_32_nv(volatile uint32_t *);
unsigned int atomic_dec_uint_nv(volatile unsigned int *);
unsigned long atomic_dec_ulong_nv(volatile unsigned long *);
void * atomic_dec_ptr_nv(volatile void *);
#if defined(__HAVE_ATOMIC64_OPS)
uint64_t atomic_dec_64_nv(volatile uint64_t *);
#endif
/*
* Atomic INCREMENT
*/
void atomic_inc_8(volatile uint8_t *);
void atomic_inc_uchar(volatile unsigned char *);
void atomic_inc_16(volatile uint16_t *);
void atomic_inc_ushort(volatile unsigned short *);
void atomic_inc_32(volatile uint32_t *);
void atomic_inc_uint(volatile unsigned int *);
void atomic_inc_ulong(volatile unsigned long *);
void atomic_inc_ptr(volatile void *);
#if defined(__HAVE_ATOMIC64_OPS)
void atomic_inc_64(volatile uint64_t *);
#endif
uint8_t atomic_inc_8_nv(volatile uint8_t *);
unsigned char atomic_inc_uchar_nv(volatile unsigned char *);
uint16_t atomic_inc_16_nv(volatile uint16_t *);
unsigned short atomic_inc_ushort_nv(volatile unsigned short *);
uint32_t atomic_inc_32_nv(volatile uint32_t *);
unsigned int atomic_inc_uint_nv(volatile unsigned int *);
unsigned long atomic_inc_ulong_nv(volatile unsigned long *);
void * atomic_inc_ptr_nv(volatile void *);
#if defined(__HAVE_ATOMIC64_OPS)
uint64_t atomic_inc_64_nv(volatile uint64_t *);
#endif
#endif /* ! _SYS_ATOMIC_H_ */
--Apple-Mail-4-39115954
Content-Type: text/plain;
charset=US-ASCII;
format=flowed;
delsp=yes
Content-Transfer-Encoding: 7bit
The API is borrowed from Solaris. There is one major difference,
however. Solaris defines "cas" as COMPARE-AND-SWAP, returning the
previous value of the memory cell. I have defined it as COMPARE-AND-
STORE, returning true if the ops succeeds, or false otherwise. The
reason for this is because some platforms will have extreme difficulty
implementing COMPARE-AND-SWAP, whereas COMPARE-AND-STORE is somewhat
easier for these platforms.
The documentation will note the following caveats:
1. 8-bit and 16-bit operations are to be used sparingly because, on
some (most?) platforms, they are extremely expensive compared to the
corresponding 32-bit (or 64-bit, if available) operation.
2. The "nv" versions of most functions return the new value of the
memory cell resulting from the operation. This should be used only
when the new value is actually required (e.g. when decrementing a
reference count and checking for it reaching zero), because it may be
significantly more expensive than the plain version of the operation.
The same API will be available in kernel and user-space.
Comments appreciated.
I have ARM and Alpha implemented, along with a significant chunk of C
code that can be used by several platforms.
-- thorpej
--Apple-Mail-4-39115954--