mutex.h
上传用户:romrleung
上传日期:2022-05-23
资源大小:18897k
文件大小:28k
- /*-
- * See the file LICENSE for redistribution information.
- *
- * Copyright (c) 1996-2002
- * Sleepycat Software. All rights reserved.
- *
- * $Id: mutex.h,v 11.71 2002/09/10 01:36:48 bostic Exp $
- */
- #ifndef _DB_MUTEX_H_
- #define _DB_MUTEX_H_
- /*
- * Some of the Berkeley DB ports require single-threading at various
- * places in the code. In those cases, these #defines will be set.
- */
- #define DB_BEGIN_SINGLE_THREAD
- #define DB_END_SINGLE_THREAD
- /*********************************************************************
- * POSIX.1 pthreads interface.
- *********************************************************************/
- #ifdef HAVE_MUTEX_PTHREADS
- #include <pthread.h>
- #define MUTEX_FIELDS
- pthread_mutex_t mutex; /* Mutex. */
- pthread_cond_t cond; /* Condition variable. */
- #endif
- /*********************************************************************
- * Solaris lwp threads interface.
- *
- * !!!
- * We use LWP mutexes on Solaris instead of UI or POSIX mutexes (both of
- * which are available), for two reasons. First, the Solaris C library
- * includes versions of the both UI and POSIX thread mutex interfaces, but
- * they are broken in that they don't support inter-process locking, and
- * there's no way to detect it, e.g., calls to configure the mutexes for
- * inter-process locking succeed without error. So, we use LWP mutexes so
- * that we don't fail in fairly undetectable ways because the application
- * wasn't linked with the appropriate threads library. Second, there were
- * bugs in SunOS 5.7 (Solaris 7) where if an application loaded the C library
- * before loading the libthread/libpthread threads libraries (e.g., by using
- * dlopen to load the DB library), the pwrite64 interface would be translated
- * into a call to pwrite and DB would drop core.
- *********************************************************************/
- #ifdef HAVE_MUTEX_SOLARIS_LWP
- /*
- * XXX
- * Don't change <synch.h> to <sys/lwp.h> -- although lwp.h is listed in the
- * Solaris manual page as the correct include to use, it causes the Solaris
- * compiler on SunOS 2.6 to fail.
- */
- #include <synch.h>
- #define MUTEX_FIELDS
- lwp_mutex_t mutex; /* Mutex. */
- lwp_cond_t cond; /* Condition variable. */
- #endif
- /*********************************************************************
- * Solaris/Unixware threads interface.
- *********************************************************************/
- #ifdef HAVE_MUTEX_UI_THREADS
- #include <thread.h>
- #include <synch.h>
- #define MUTEX_FIELDS
- mutex_t mutex; /* Mutex. */
- cond_t cond; /* Condition variable. */
- #endif
- /*********************************************************************
- * AIX C library functions.
- *********************************************************************/
- #ifdef HAVE_MUTEX_AIX_CHECK_LOCK
- #include <sys/atomic_op.h>
- typedef int tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) 0
- #define MUTEX_SET(x) (!_check_lock(x, 0, 1))
- #define MUTEX_UNSET(x) _clear_lock(x, 0)
- #endif
- #endif
- /*********************************************************************
- * General C library functions (msemaphore).
- *
- * !!!
- * Check for HPPA as a special case, because it requires unusual alignment,
- * and doesn't support semaphores in malloc(3) or shmget(2) memory.
- *
- * !!!
- * Do not remove the MSEM_IF_NOWAIT flag. The problem is that if a single
- * process makes two msem_lock() calls in a row, the second one returns an
- * error. We depend on the fact that we can lock against ourselves in the
- * locking subsystem, where we set up a mutex so that we can block ourselves.
- * Tested on OSF1 v4.0.
- *********************************************************************/
- #ifdef HAVE_MUTEX_HPPA_MSEM_INIT
- #define MUTEX_NO_MALLOC_LOCKS
- #define MUTEX_NO_SHMGET_LOCKS
- #define MUTEX_ALIGN 16
- #endif
- #if defined(HAVE_MUTEX_MSEM_INIT) || defined(HAVE_MUTEX_HPPA_MSEM_INIT)
- #include <sys/mman.h>
- typedef msemaphore tsl_t;
- #ifndef MUTEX_ALIGN
- #define MUTEX_ALIGN sizeof(int)
- #endif
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) (msem_init(x, MSEM_UNLOCKED) <= (msemaphore *)0)
- #define MUTEX_SET(x) (!msem_lock(x, MSEM_IF_NOWAIT))
- #define MUTEX_UNSET(x) msem_unlock(x, 0)
- #endif
- #endif
- /*********************************************************************
- * Plan 9 library functions.
- *********************************************************************/
- #ifdef HAVE_MUTEX_PLAN9
- typedef Lock tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #define MUTEX_INIT(x) (memset(x, 0, sizeof(Lock)), 0)
- #define MUTEX_SET(x) canlock(x)
- #define MUTEX_UNSET(x) unlock(x)
- #endif
- /*********************************************************************
- * Reliant UNIX C library functions.
- *********************************************************************/
- #ifdef HAVE_MUTEX_RELIANTUNIX_INITSPIN
- #include <ulocks.h>
- typedef spinlock_t tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) (initspin(x, 1), 0)
- #define MUTEX_SET(x) (cspinlock(x) == 0)
- #define MUTEX_UNSET(x) spinunlock(x)
- #endif
- #endif
- /*********************************************************************
- * General C library functions (POSIX 1003.1 sema_XXX).
- *
- * !!!
- * Never selected by autoconfig in this release (semaphore calls are known
- * to not work in Solaris 5.5).
- *********************************************************************/
- #ifdef HAVE_MUTEX_SEMA_INIT
- #include <synch.h>
- typedef sema_t tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_DESTROY(x) sema_destroy(x)
- #define MUTEX_INIT(x) (sema_init(x, 1, USYNC_PROCESS, NULL) != 0)
- #define MUTEX_SET(x) (sema_wait(x) == 0)
- #define MUTEX_UNSET(x) sema_post(x)
- #endif
- #endif
- /*********************************************************************
- * SGI C library functions.
- *********************************************************************/
- #ifdef HAVE_MUTEX_SGI_INIT_LOCK
- #include <abi_mutex.h>
- typedef abilock_t tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) (init_lock(x) != 0)
- #define MUTEX_SET(x) (!acquire_lock(x))
- #define MUTEX_UNSET(x) release_lock(x)
- #endif
- #endif
- /*********************************************************************
- * Solaris C library functions.
- *
- * !!!
- * These are undocumented functions, but they're the only ones that work
- * correctly as far as we know.
- *********************************************************************/
- #ifdef HAVE_MUTEX_SOLARIS_LOCK_TRY
- #include <sys/machlock.h>
- typedef lock_t tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) 0
- #define MUTEX_SET(x) _lock_try(x)
- #define MUTEX_UNSET(x) _lock_clear(x)
- #endif
- #endif
- /*********************************************************************
- * VMS.
- *********************************************************************/
- #ifdef HAVE_MUTEX_VMS
- #include <sys/mman.h>;
- #include <builtins.h>
- typedef unsigned char tsl_t;
- #define MUTEX_ALIGN sizeof(unsigned int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #ifdef __ALPHA
- #define MUTEX_SET(tsl) (!__TESTBITSSI(tsl, 0))
- #else /* __VAX */
- #define MUTEX_SET(tsl) (!(int)_BBSSI(0, tsl))
- #endif
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * VxWorks
- * Use basic binary semaphores in VxWorks, as we currently do not need
- * any special features. We do need the ability to single-thread the
- * entire system, however, because VxWorks doesn't support the open(2)
- * flag O_EXCL, the mechanism we normally use to single thread access
- * when we're first looking for a DB environment.
- *********************************************************************/
- #ifdef HAVE_MUTEX_VXWORKS
- #include "taskLib.h"
- typedef SEM_ID tsl_t;
- #define MUTEX_ALIGN sizeof(unsigned int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_SET(tsl) (semTake((*tsl), WAIT_FOREVER) == OK)
- #define MUTEX_UNSET(tsl) (semGive((*tsl)))
- #define MUTEX_INIT(tsl)
- ((*(tsl) = semBCreate(SEM_Q_FIFO, SEM_FULL)) == NULL)
- #define MUTEX_DESTROY(tsl) semDelete(*tsl)
- #endif
- /*
- * Use the taskLock() mutex to eliminate a race where two tasks are
- * trying to initialize the global lock at the same time.
- */
- #undef DB_BEGIN_SINGLE_THREAD
- #define DB_BEGIN_SINGLE_THREAD
- do {
- if (DB_GLOBAL(db_global_init))
- (void)semTake(DB_GLOBAL(db_global_lock), WAIT_FOREVER);
- else {
- taskLock();
- if (DB_GLOBAL(db_global_init)) {
- taskUnlock();
- (void)semTake(DB_GLOBAL(db_global_lock),
- WAIT_FOREVER);
- continue;
- }
- DB_GLOBAL(db_global_lock) =
- semBCreate(SEM_Q_FIFO, SEM_EMPTY);
- if (DB_GLOBAL(db_global_lock) != NULL)
- DB_GLOBAL(db_global_init) = 1;
- taskUnlock();
- }
- } while (DB_GLOBAL(db_global_init) == 0)
- #undef DB_END_SINGLE_THREAD
- #define DB_END_SINGLE_THREAD (void)semGive(DB_GLOBAL(db_global_lock))
- #endif
- /*********************************************************************
- * Win16
- *
- * Win16 spinlocks are simple because we cannot possibly be preempted.
- *
- * !!!
- * We should simplify this by always returning a no-need-to-lock lock
- * when we initialize the mutex.
- *********************************************************************/
- #ifdef HAVE_MUTEX_WIN16
- typedef unsigned int tsl_t;
- #define MUTEX_ALIGN sizeof(unsigned int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) 0
- #define MUTEX_SET(tsl) (*(tsl) = 1)
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #endif
- #endif
- /*********************************************************************
- * Win32
- *********************************************************************/
- #ifdef HAVE_MUTEX_WIN32
- #define MUTEX_FIELDS
- LONG tas;
- LONG nwaiters;
- union {
- HANDLE event; /* Windows event HANDLE for wakeups */
- u_int32_t id; /* ID used for shared mutexes */
- } /* anonymous */;
- #if defined(LOAD_ACTUAL_MUTEX_CODE)
- #define MUTEX_SET(tsl) (!InterlockedExchange((PLONG)tsl, 1))
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * 68K/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_68K_GCC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For gcc/68K, 0 is clear, 1 is set.
- */
- #define MUTEX_SET(tsl) ({
- register tsl_t *__l = (tsl);
- int __r;
- asm volatile("tas %1; n
- seq %0"
- : "=dm" (__r), "=m" (*__l)
- : "1" (*__l)
- );
- __r & 1;
- })
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * ALPHA/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_ALPHA_GCC_ASSEMBLY
- typedef u_int32_t tsl_t;
- #define MUTEX_ALIGN 4
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For gcc/alpha. Should return 0 if could not acquire the lock, 1 if
- * lock was acquired properly.
- */
- #ifdef __GNUC__
- static inline int
- MUTEX_SET(tsl_t *tsl) {
- register tsl_t *__l = tsl;
- register tsl_t __r;
- asm volatile(
- "1: ldl_l %0,%2n"
- " blbs %0,2fn"
- " or $31,1,%0n"
- " stl_c %0,%1n"
- " beq %0,3fn"
- " mbn"
- " br 3fn"
- "2: xor %0,%0n"
- "3:"
- : "=&r"(__r), "=m"(*__l) : "1"(*__l) : "memory");
- return __r;
- }
- /*
- * Unset mutex. Judging by Alpha Architecture Handbook, the mb instruction
- * might be necessary before unlocking
- */
- static inline int
- MUTEX_UNSET(tsl_t *tsl) {
- asm volatile(" mbn");
- return *tsl = 0;
- }
- #endif
- #ifdef __DECC
- #include <alpha/builtins.h>
- #define MUTEX_SET(tsl) (__LOCK_LONG_RETRY((tsl), 1) != 0)
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #endif
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * ARM/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_ARM_GCC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For arm/gcc, 0 is clear, 1 is set.
- */
- #define MUTEX_SET(tsl) ({
- int __r;
- asm volatile("swpb %0, %1, [%2]"
- : "=r" (__r)
- : "0" (1), "r" (tsl)
- : "memory"
- );
- __r & 1;
- })
- #define MUTEX_UNSET(tsl) (*(volatile tsl_t *)(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * HPPA/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_HPPA_GCC_ASSEMBLY
- typedef u_int32_t tsl_t;
- #define MUTEX_ALIGN 16
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * The PA-RISC has a "load and clear" instead of a "test and set" instruction.
- * The 32-bit word used by that instruction must be 16-byte aligned. We could
- * use the "aligned" attribute in GCC but that doesn't work for stack variables.
- */
- #define MUTEX_SET(tsl) ({
- register tsl_t *__l = (tsl);
- int __r;
- asm volatile("ldcws 0(%1),%0" : "=r" (__r) : "r" (__l));
- __r & 1;
- })
- #define MUTEX_UNSET(tsl) (*(tsl) = -1)
- #define MUTEX_INIT(tsl) (MUTEX_UNSET(tsl), 0)
- #endif
- #endif
- /*********************************************************************
- * IA64/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_IA64_GCC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For gcc/ia64, 0 is clear, 1 is set.
- */
- #define MUTEX_SET(tsl) ({
- register tsl_t *__l = (tsl);
- long __r;
- asm volatile("xchg1 %0=%1,%3" : "=r"(__r), "=m"(*__l) : "1"(*__l), "r"(1));
- __r ^ 1;
- })
- /*
- * Store through a "volatile" pointer so we get a store with "release"
- * semantics.
- */
- #define MUTEX_UNSET(tsl) (*(volatile unsigned char *)(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * PowerPC/gcc assembly.
- *********************************************************************/
- #if defined(HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY) ||
- (HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY)
- typedef u_int32_t tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * The PowerPC does a sort of pseudo-atomic locking. You set up a
- * 'reservation' on a chunk of memory containing a mutex by loading the
- * mutex value with LWARX. If the mutex has an 'unlocked' (arbitrary)
- * value, you then try storing into it with STWCX. If no other process or
- * thread broke your 'reservation' by modifying the memory containing the
- * mutex, then the STCWX succeeds; otherwise it fails and you try to get
- * a reservation again.
- *
- * While mutexes are explicitly 4 bytes, a 'reservation' applies to an
- * entire cache line, normally 32 bytes, aligned naturally. If the mutex
- * lives near data that gets changed a lot, there's a chance that you'll
- * see more broken reservations than you might otherwise. The only
- * situation in which this might be a problem is if one processor is
- * beating on a variable in the same cache block as the mutex while another
- * processor tries to acquire the mutex. That's bad news regardless
- * because of the way it bashes caches, but if you can't guarantee that a
- * mutex will reside in a relatively quiescent cache line, you might
- * consider padding the mutex to force it to live in a cache line by
- * itself. No, you aren't guaranteed that cache lines are 32 bytes. Some
- * embedded processors use 16-byte cache lines, while some 64-bit
- * processors use 128-bit cache lines. But assuming a 32-byte cache line
- * won't get you into trouble for now.
- *
- * If mutex locking is a bottleneck, then you can speed it up by adding a
- * regular LWZ load before the LWARX load, so that you can test for the
- * common case of a locked mutex without wasting cycles making a reservation.
- *
- * 'set' mutexes have the value 1, like on Intel; the returned value from
- * MUTEX_SET() is 1 if the mutex previously had its low bit clear, 0 otherwise.
- *
- * Mutexes on Mac OS X work the same way as the standard PowerPC version, but
- * the assembler syntax is subtly different -- the standard PowerPC version
- * assembles but doesn't work correctly. This version makes (unnecessary?)
- * use of a stupid linker trick: __db_mutex_tas_dummy is never called, but the
- * ___db_mutex_set label is used as a function name.
- */
- #ifdef HAVE_MUTEX_PPC_APPLE_GCC_ASSEMBLY
- extern int __db_mutex_set __P((volatile tsl_t *));
- void
- __db_mutex_tas_dummy()
- {
- __asm__ __volatile__(" n
- .globl ___db_mutex_set n
- ___db_mutex_set: n
- lwarx r5,0,r3 n
- cmpwi r5,0 n
- bne fail n
- addi r5,r5,1 n
- stwcx. r5,0,r3 n
- beq success n
- fail: n
- li r3,0 n
- blr n
- success: n
- li r3,1 n
- blr");
- }
- #define MUTEX_SET(tsl) __db_mutex_set(tsl)
- #endif
- #ifdef HAVE_MUTEX_PPC_GENERIC_GCC_ASSEMBLY
- #define MUTEX_SET(tsl) ({
- int __one = 1;
- int __r;
- tsl_t *__l = (tsl);
- asm volatile ("
- 0:
- lwarx %0,0,%1;
- cmpwi %0,0;
- bne 1f;
- stwcx. %2,0,%1;
- bne- 0b;
- 1:"
- : "=&r" (__r)
- : "r" (__l), "r" (__one));
- !(__r & 1);
- })
- #endif
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * S/390 32-bit assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_S390_GCC_ASSEMBLY
- typedef int tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For gcc/S390, 0 is clear, 1 is set.
- */
- static inline int
- MUTEX_SET(tsl_t *tsl) {
- register tsl_t *__l = (tsl);
- int __r;
- asm volatile(
- " la 1,%1n"
- " lhi 0,1n"
- " l %0,%1n"
- "0: cs %0,0,0(1)n"
- " jl 0b"
- : "=&d" (__r), "+m" (*__l)
- : : "0", "1", "cc");
- return !__r;
- }
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * SCO/cc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_SCO_X86_CC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * UnixWare has threads in libthread, but OpenServer doesn't (yet).
- *
- * For cc/x86, 0 is clear, 1 is set.
- */
- #if defined(__USLC__)
- asm int
- _tsl_set(void *tsl)
- {
- %mem tsl
- movl tsl, %ecx
- movl $1, %eax
- lock
- xchgb (%ecx),%al
- xorl $1,%eax
- }
- #endif
- #define MUTEX_SET(tsl) _tsl_set(tsl)
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * Sparc/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_SPARC_GCC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- *
- * The ldstub instruction takes the location specified by its first argument
- * (a register containing a memory address) and loads its contents into its
- * second argument (a register) and atomically sets the contents the location
- * specified by its first argument to a byte of 1s. (The value in the second
- * argument is never read, but only overwritten.)
- *
- * The stbar is needed for v8, and is implemented as membar #sync on v9,
- * so is functional there as well. For v7, stbar may generate an illegal
- * instruction and we have no way to tell what we're running on. Some
- * operating systems notice and skip this instruction in the fault handler.
- *
- * For gcc/sparc, 0 is clear, 1 is set.
- */
- #define MUTEX_SET(tsl) ({
- register tsl_t *__l = (tsl);
- register tsl_t __r;
- __asm__ volatile
- ("ldstub [%1],%0; stbar"
- : "=r"( __r) : "r" (__l));
- !__r;
- })
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*********************************************************************
- * UTS/cc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_UTS_CC_ASSEMBLY
- typedef int tsl_t;
- #define MUTEX_ALIGN sizeof(int)
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #define MUTEX_INIT(x) 0
- #define MUTEX_SET(x) (!uts_lock(x, 1))
- #define MUTEX_UNSET(x) (*(x) = 0)
- #endif
- #endif
- /*********************************************************************
- * x86/gcc assembly.
- *********************************************************************/
- #ifdef HAVE_MUTEX_X86_GCC_ASSEMBLY
- typedef unsigned char tsl_t;
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- /*
- * For gcc/x86, 0 is clear, 1 is set.
- */
- #define MUTEX_SET(tsl) ({
- register tsl_t *__l = (tsl);
- int __r;
- asm volatile("movl $1,%%eax; lock; xchgb %1,%%al; xorl $1,%%eax"
- : "=&a" (__r), "=m" (*__l)
- : "1" (*__l)
- );
- __r & 1;
- })
- #define MUTEX_UNSET(tsl) (*(tsl) = 0)
- #define MUTEX_INIT(tsl) MUTEX_UNSET(tsl)
- #endif
- #endif
- /*
- * Mutex alignment defaults to one byte.
- *
- * !!!
- * Various systems require different alignments for mutexes (the worst we've
- * seen so far is 16-bytes on some HP architectures). Malloc(3) is assumed
- * to return reasonable alignment, all other mutex users must ensure proper
- * alignment locally.
- */
- #ifndef MUTEX_ALIGN
- #define MUTEX_ALIGN 1
- #endif
- /*
- * Mutex destruction defaults to a no-op.
- */
- #ifdef LOAD_ACTUAL_MUTEX_CODE
- #ifndef MUTEX_DESTROY
- #define MUTEX_DESTROY(x)
- #endif
- #endif
- /*
- * !!!
- * These defines are separated into the u_int8_t flags stored in the
- * mutex below, and the 32 bit flags passed to __db_mutex_setup.
- * But they must co-exist and not overlap. Flags to __db_mutex_setup are:
- *
- * MUTEX_ALLOC - Use when the mutex to initialize needs to be allocated.
- * The 'ptr' arg to __db_mutex_setup should be a DB_MUTEX ** whenever
- * you use this flag. If this flag is not set, the 'ptr' arg is
- * a DB_MUTEX *.
- * MUTEX_NO_RECORD - Explicitly do not record the mutex in the region.
- * Otherwise the mutex will be recorded by default. If you set
- * this you need to understand why you don't need it recorded. The
- * *only* ones not recorded are those that are part of region structures
- * that only get destroyed when the regions are destroyed.
- * MUTEX_NO_RLOCK - Explicitly do not lock the given region otherwise
- * the region will be locked by default.
- * MUTEX_SELF_BLOCK - Set if self blocking mutex.
- * MUTEX_THREAD - Set if mutex is a thread-only mutex.
- */
- #define MUTEX_IGNORE 0x001 /* Ignore, no lock required. */
- #define MUTEX_INITED 0x002 /* Mutex is successfully initialized */
- #define MUTEX_MPOOL 0x004 /* Allocated from mpool. */
- #define MUTEX_SELF_BLOCK 0x008 /* Must block self. */
- /* Flags only, may be larger than 0xff. */
- #define MUTEX_ALLOC 0x00000100 /* Allocate and init a mutex */
- #define MUTEX_NO_RECORD 0x00000200 /* Do not record lock */
- #define MUTEX_NO_RLOCK 0x00000400 /* Do not acquire region lock */
- #define MUTEX_THREAD 0x00000800 /* Thread-only mutex. */
- /* Mutex. */
- struct __mutex_t {
- #ifdef HAVE_MUTEX_THREADS
- #ifdef MUTEX_FIELDS
- MUTEX_FIELDS
- #else
- tsl_t tas; /* Test and set. */
- #endif
- u_int32_t spins; /* Spins before block. */
- u_int32_t locked; /* !0 if locked. */
- #else
- u_int32_t off; /* Byte offset to lock. */
- u_int32_t pid; /* Lock holder: 0 or process pid. */
- #endif
- u_int32_t mutex_set_wait; /* Granted after wait. */
- u_int32_t mutex_set_nowait; /* Granted without waiting. */
- u_int32_t mutex_set_spin; /* Granted without spinning. */
- u_int32_t mutex_set_spins; /* Total number of spins. */
- #ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- roff_t reg_off; /* Shared lock info offset. */
- #endif
- u_int8_t flags; /* MUTEX_XXX */
- };
- /* Redirect calls to the correct functions. */
- #ifdef HAVE_MUTEX_THREADS
- #if defined(HAVE_MUTEX_PTHREADS) ||
- defined(HAVE_MUTEX_SOLARIS_LWP) ||
- defined(HAVE_MUTEX_UI_THREADS)
- #define __db_mutex_init_int(a, b, c, d) __db_pthread_mutex_init(a, b, d)
- #define __db_mutex_lock(a, b) __db_pthread_mutex_lock(a, b)
- #define __db_mutex_unlock(a, b) __db_pthread_mutex_unlock(a, b)
- #define __db_mutex_destroy(a) __db_pthread_mutex_destroy(a)
- #elif defined(HAVE_MUTEX_WIN32)
- #define __db_mutex_init_int(a, b, c, d) __db_win32_mutex_init(a, b, d)
- #define __db_mutex_lock(a, b) __db_win32_mutex_lock(a, b)
- #define __db_mutex_unlock(a, b) __db_win32_mutex_unlock(a, b)
- #define __db_mutex_destroy(a) __db_win32_mutex_destroy(a)
- #else
- #define __db_mutex_init_int(a, b, c, d) __db_tas_mutex_init(a, b, d)
- #define __db_mutex_lock(a, b) __db_tas_mutex_lock(a, b)
- #define __db_mutex_unlock(a, b) __db_tas_mutex_unlock(a, b)
- #define __db_mutex_destroy(a) __db_tas_mutex_destroy(a)
- #endif
- #else
- #define __db_mutex_init_int(a, b, c, d) __db_fcntl_mutex_init(a, b, c)
- #define __db_mutex_lock(a, b) __db_fcntl_mutex_lock(a, b)
- #define __db_mutex_unlock(a, b) __db_fcntl_mutex_unlock(a, b)
- #define __db_mutex_destroy(a) __db_fcntl_mutex_destroy(a)
- #endif
- /* Redirect system resource calls to correct functions */
- #ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- #define __db_maintinit(a, b, c) __db_shreg_maintinit(a, b, c)
- #define __db_shlocks_clear(a, b, c) __db_shreg_locks_clear(a, b, c)
- #define __db_shlocks_destroy(a, b) __db_shreg_locks_destroy(a, b)
- #define __db_mutex_init(a, b, c, d, e, f)
- __db_shreg_mutex_init(a, b, c, d, e, f)
- #else
- #define __db_maintinit(a, b, c)
- #define __db_shlocks_clear(a, b, c)
- #define __db_shlocks_destroy(a, b)
- #define __db_mutex_init(a, b, c, d, e, f) __db_mutex_init_int(a, b, c, d)
- #endif
- /*
- * Lock/unlock a mutex. If the mutex was marked as uninteresting, the thread
- * of control can proceed without it.
- *
- * If the lock is for threads-only, then it was optionally not allocated and
- * file handles aren't necessary, as threaded applications aren't supported by
- * fcntl(2) locking.
- */
- #ifdef DIAGNOSTIC
- /*
- * XXX
- * We want to switch threads as often as possible. Yield every time
- * we get a mutex to ensure contention.
- */
- #define MUTEX_LOCK(dbenv, mp)
- if (!F_ISSET((mp), MUTEX_IGNORE))
- DB_ASSERT(__db_mutex_lock(dbenv, mp) == 0);
- if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
- __os_yield(NULL, 1);
- #else
- #define MUTEX_LOCK(dbenv, mp)
- if (!F_ISSET((mp), MUTEX_IGNORE))
- (void)__db_mutex_lock(dbenv, mp);
- #endif
- #define MUTEX_UNLOCK(dbenv, mp)
- if (!F_ISSET((mp), MUTEX_IGNORE))
- (void)__db_mutex_unlock(dbenv, mp);
- #define MUTEX_THREAD_LOCK(dbenv, mp)
- if (mp != NULL)
- MUTEX_LOCK(dbenv, mp)
- #define MUTEX_THREAD_UNLOCK(dbenv, mp)
- if (mp != NULL)
- MUTEX_UNLOCK(dbenv, mp)
- /*
- * We use a single file descriptor for fcntl(2) locking, and (generally) the
- * object's offset in a shared region as the byte that we're locking. So,
- * there's a (remote) possibility that two objects might have the same offsets
- * such that the locks could conflict, resulting in deadlock. To avoid this
- * possibility, we offset the region offset by a small integer value, using a
- * different offset for each subsystem's locks. Since all region objects are
- * suitably aligned, the offset guarantees that we don't collide with another
- * region's objects.
- */
- #define DB_FCNTL_OFF_GEN 0 /* Everything else. */
- #define DB_FCNTL_OFF_LOCK 1 /* Lock subsystem offset. */
- #define DB_FCNTL_OFF_MPOOL 2 /* Mpool subsystem offset. */
- #ifdef HAVE_MUTEX_SYSTEM_RESOURCES
- /*
- * When the underlying mutexes require library (most likely heap) or system
- * resources, we have to clean up when we discard mutexes (for the library
- * resources) and both when discarding mutexes and after application failure
- * (for the mutexes requiring system resources). This violates the rule that
- * we never look at a shared region after application failure, but we've no
- * other choice. In those cases, the #define HAVE_MUTEX_SYSTEM_RESOURCES is
- * set.
- *
- * To support mutex release after application failure, allocate thread-handle
- * mutexes in shared memory instead of in the heap. The number of slots we
- * allocate for this purpose isn't configurable, but this tends to be an issue
- * only on embedded systems where we don't expect large server applications.
- */
- #define DB_MAX_HANDLES 100 /* Mutex slots for handles. */
- #endif
- #endif /* !_DB_MUTEX_H_ */