stl_threads.h

00001 /*
00002  * Copyright (c) 1997-1999
00003  * Silicon Graphics Computer Systems, Inc.
00004  *
00005  * Permission to use, copy, modify, distribute and sell this software
00006  * and its documentation for any purpose is hereby granted without fee,
00007  * provided that the above copyright notice appear in all copies and
00008  * that both that copyright notice and this permission notice appear
00009  * in supporting documentation.  Silicon Graphics makes no
00010  * representations about the suitability of this software for any
00011  * purpose.  It is provided "as is" without express or implied warranty.
00012  */
00013 
00014 // WARNING: This is an internal header file, included by other C++
00015 // standard library headers.  You should not attempt to use this header
00016 // file directly.
00017 // Stl_config.h should be included before this file.
00018 
00019 #ifndef __SGI_STL_INTERNAL_THREADS_H
00020 #define __SGI_STL_INTERNAL_THREADS_H
00021 
00022 // Supported threading models are native SGI, pthreads, uithreads
00023 // (similar to pthreads, but based on an earlier draft of the Posix
00024 // threads standard), and Win32 threads.  Uithread support by Jochen
00025 // Schlick, 1999.
00026 
00027 #if defined(__STL_SGI_THREADS)
00028 #include <mutex.h>
00029 #include <time.h>
00030 #elif defined(__STL_PTHREADS)
00031 #include <pthread.h>
00032 #elif defined(__STL_UITHREADS)
00033 #include <thread.h>
00034 #include <synch.h>
00035 #elif defined(__STL_WIN32THREADS)
00036 #include <windows.h>
00037 #endif
00038 
00039 __STL_BEGIN_NAMESPACE
00040 
00041 
00042 // Class _Refcount_Base provides a type, _RC_t, a data member,
00043 // _M_ref_count, and member functions _M_incr and _M_decr, which perform
00044 // atomic preincrement/predecrement.  The constructor initializes 
00045 // _M_ref_count.
00046 
00047 // Hack for SGI o32 compilers.
00048 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
00049     (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
00050 #  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  
00051 #  define __test_and_set(__l,__v)  test_and_set(__l,__v)
00052 #endif /* o32 */
00053 
00054 struct _Refcount_Base
00055 {
00056   // The type _RC_t
00057 # ifdef __STL_WIN32THREADS
00058   typedef long _RC_t;
00059 # else
00060   typedef size_t _RC_t;
00061 #endif
00062   
00063   // The data member _M_ref_count
00064    volatile _RC_t _M_ref_count;
00065 
00066   // Constructor
00067 # ifdef __STL_PTHREADS
00068   pthread_mutex_t _M_ref_count_lock;
00069   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00070     { pthread_mutex_init(&_M_ref_count_lock, 0); }
00071 # elif defined(__STL_UITHREADS)
00072   mutex_t         _M_ref_count_lock;
00073   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00074     { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
00075 # else
00076   _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
00077 # endif
00078 
00079   // _M_incr and _M_decr
00080 # ifdef __STL_SGI_THREADS
00081   void _M_incr() {  __add_and_fetch(&_M_ref_count, 1); }
00082   _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
00083 # elif defined (__STL_WIN32THREADS)
00084    void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
00085   _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
00086 # elif defined(__STL_PTHREADS)
00087   void _M_incr() {
00088     pthread_mutex_lock(&_M_ref_count_lock);
00089     ++_M_ref_count;
00090     pthread_mutex_unlock(&_M_ref_count_lock);
00091   }
00092   _RC_t _M_decr() {
00093     pthread_mutex_lock(&_M_ref_count_lock);
00094     volatile _RC_t __tmp = --_M_ref_count;
00095     pthread_mutex_unlock(&_M_ref_count_lock);
00096     return __tmp;
00097   }
00098 # elif defined(__STL_UITHREADS)
00099   void _M_incr() {
00100     mutex_lock(&_M_ref_count_lock);
00101     ++_M_ref_count;
00102     mutex_unlock(&_M_ref_count_lock);
00103   }
00104   _RC_t _M_decr() {
00105     mutex_lock(&_M_ref_count_lock);
00106     /*volatile*/ _RC_t __tmp = --_M_ref_count;
00107     mutex_unlock(&_M_ref_count_lock);
00108     return __tmp;
00109   }
00110 # else  /* No threads */
00111   void _M_incr() { ++_M_ref_count; }
00112   _RC_t _M_decr() { return --_M_ref_count; }
00113 # endif
00114 };
00115 
00116 // Atomic swap on unsigned long
00117 // This is guaranteed to behave as though it were atomic only if all
00118 // possibly concurrent updates use _Atomic_swap.
00119 // In some cases the operation is emulated with a lock.
00120 # ifdef __STL_SGI_THREADS
00121     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00122 #       if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
00123             return test_and_set(__p, __q);
00124 #       else
00125             return __test_and_set(__p, (unsigned long)__q);
00126 #       endif
00127     }
00128 # elif defined(__STL_WIN32THREADS)
00129     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00130         return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
00131     }
00132 # elif defined(__STL_PTHREADS)
00133     // We use a template here only to get a unique initialized instance.
00134     template<int __dummy>
00135     struct _Swap_lock_struct {
00136         static pthread_mutex_t _S_swap_lock;
00137     };
00138 
00139     template<int __dummy>
00140     pthread_mutex_t
00141     _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
00142 
00143     // This should be portable, but performance is expected
00144     // to be quite awful.  This really needs platform specific
00145     // code.
00146     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00147         pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00148         unsigned long __result = *__p;
00149         *__p = __q;
00150         pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00151         return __result;
00152     }
00153 # elif defined(__STL_UITHREADS)
00154     // We use a template here only to get a unique initialized instance.
00155     template<int __dummy>
00156     struct _Swap_lock_struct {
00157         static mutex_t _S_swap_lock;
00158     };
00159 
00160     template<int __dummy>
00161     mutex_t
00162     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00163 
00164     // This should be portable, but performance is expected
00165     // to be quite awful.  This really needs platform specific
00166     // code.
00167     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00168         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00169         unsigned long __result = *__p;
00170         *__p = __q;
00171         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00172         return __result;
00173     }
00174 # elif defined (__STL_SOLARIS_THREADS)
00175     // any better solutions ?
00176     // We use a template here only to get a unique initialized instance.
00177     template<int __dummy>
00178     struct _Swap_lock_struct {
00179         static mutex_t _S_swap_lock;
00180     };
00181 
00182 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
00183     template<int __dummy>
00184     mutex_t
00185     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00186 #  else
00187     __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock, 
00188                        =DEFAULTMUTEX);
00189 # endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */
00190 
00191     // This should be portable, but performance is expected
00192     // to be quite awful.  This really needs platform specific
00193     // code.
00194     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00195         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00196         unsigned long __result = *__p;
00197         *__p = __q;
00198         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00199         return __result;
00200     }
00201 # else
00202     static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00203         unsigned long __result = *__p;
00204         *__p = __q;
00205         return __result;
00206     }
00207 # endif
00208 
00209 // Locking class.  Note that this class *does not have a constructor*.
00210 // It must be initialized either statically, with __STL_MUTEX_INITIALIZER,
00211 // or dynamically, by explicitly calling the _M_initialize member function.
00212 // (This is similar to the ways that a pthreads mutex can be initialized.)
00213 // There are explicit member functions for acquiring and releasing the lock.
00214 
00215 // There is no constructor because static initialization is essential for
00216 // some uses, and only a class aggregate (see section 8.5.1 of the C++
00217 // standard) can be initialized that way.  That means we must have no
00218 // constructors, no base classes, no virtual functions, and no private or
00219 // protected members.
00220 
00221 // Helper struct.  This is a workaround for various compilers that don't
00222 // handle static variables in inline functions properly.
00223 template <int __inst>
00224 struct _STL_mutex_spin {
00225   enum { __low_max = 30, __high_max = 1000 };
00226   // Low if we suspect uniprocessor, high for multiprocessor.
00227 
00228   static unsigned __max;
00229   static unsigned __last;
00230 };
00231 
00232 template <int __inst>
00233 unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
00234 
00235 template <int __inst>
00236 unsigned _STL_mutex_spin<__inst>::__last = 0;
00237 
00238 struct _STL_mutex_lock
00239 {
00240 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00241   // It should be relatively easy to get this to work on any modern Unix.
00242   volatile unsigned long _M_lock;
00243   void _M_initialize() { _M_lock = 0; }
00244   static void _S_nsec_sleep(int __log_nsec) {
00245 #     ifdef __STL_SGI_THREADS
00246           struct timespec __ts;
00247           /* Max sleep is 2**27nsec ~ 60msec      */
00248           __ts.tv_sec = 0;
00249           __ts.tv_nsec = 1 << __log_nsec;
00250           nanosleep(&__ts, 0);
00251 #     elif defined(__STL_WIN32THREADS)
00252           if (__log_nsec <= 20) {
00253               Sleep(0);
00254           } else {
00255               Sleep(1 << (__log_nsec - 20));
00256           }
00257 #     else
00258 #       error unimplemented
00259 #     endif
00260   }
00261   void _M_acquire_lock() {
00262     volatile unsigned long* __lock = &this->_M_lock;
00263 
00264     if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00265       return;
00266     }
00267     unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
00268     unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
00269     volatile unsigned __junk = 17;      // Value doesn't matter.
00270     unsigned __i;
00271     for (__i = 0; __i < __my_spin_max; __i++) {
00272       if (__i < __my_last_spins/2 || *__lock) {
00273         __junk *= __junk; __junk *= __junk;
00274         __junk *= __junk; __junk *= __junk;
00275         continue;
00276       }
00277       if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00278         // got it!
00279         // Spinning worked.  Thus we're probably not being scheduled
00280         // against the other process with which we were contending.
00281         // Thus it makes sense to spin longer the next time.
00282         _STL_mutex_spin<0>::__last = __i;
00283         _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
00284         return;
00285       }
00286     }
00287     // We are probably being scheduled against the other process.  Sleep.
00288     _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
00289     for (__i = 0 ;; ++__i) {
00290       int __log_nsec = __i + 6;
00291 
00292       if (__log_nsec > 27) __log_nsec = 27;
00293       if (!_Atomic_swap((unsigned long *)__lock, 1)) {
00294         return;
00295       }
00296       _S_nsec_sleep(__log_nsec);
00297     }
00298   }
00299   void _M_release_lock() {
00300     volatile unsigned long* __lock = &_M_lock;
00301 #   if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
00302         asm("sync");
00303         *__lock = 0;
00304 #   elif defined(__STL_SGI_THREADS) && __mips >= 3 \
00305          && (defined (_ABIN32) || defined(_ABI64))
00306         __lock_release(__lock);
00307 #   else 
00308         *__lock = 0;
00309         // This is not sufficient on many multiprocessors, since
00310         // writes to protected variables and the lock may be reordered.
00311 #   endif
00312   }
00313 
00314 // We no longer use win32 critical sections.
00315 // They appear to be slower in the contention-free case,
00316 // and they appear difficult to initialize without introducing a race.
00317 
00318 #elif defined(__STL_PTHREADS)
00319   pthread_mutex_t _M_lock;
00320   void _M_initialize()   { pthread_mutex_init(&_M_lock, NULL); }
00321   void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
00322   void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
00323 #elif defined(__STL_UITHREADS)
00324   mutex_t _M_lock;
00325   void _M_initialize()   { mutex_init(&_M_lock, USYNC_THREAD, 0); }
00326   void _M_acquire_lock() { mutex_lock(&_M_lock); }
00327   void _M_release_lock() { mutex_unlock(&_M_lock); }
00328 #else /* No threads */
00329   void _M_initialize()   {}
00330   void _M_acquire_lock() {}
00331   void _M_release_lock() {}
00332 #endif
00333 };
00334 
00335 #ifdef __STL_PTHREADS
00336 // Pthreads locks must be statically initialized to something other than
00337 // the default value of zero.
00338 #   define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
00339 #elif defined(__STL_UITHREADS)
00340 // UIthreads locks must be statically initialized to something other than
00341 // the default value of zero.
00342 #   define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
00343 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00344 #   define __STL_MUTEX_INITIALIZER = { 0 }
00345 #else
00346 #   define __STL_MUTEX_INITIALIZER
00347 #endif
00348 
00349 
00350 // A locking class that uses _STL_mutex_lock.  The constructor takes a
00351 // reference to an _STL_mutex_lock, and acquires a lock.  The
00352 // destructor releases the lock.  It's not clear that this is exactly
00353 // the right functionality.  It will probably change in the future.
00354 
00355 struct _STL_auto_lock
00356 {
00357   _STL_mutex_lock& _M_lock;
00358   
00359   _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
00360     { _M_lock._M_acquire_lock(); }
00361   ~_STL_auto_lock() { _M_lock._M_release_lock(); }
00362 
00363 private:
00364   void operator=(const _STL_auto_lock&);
00365   _STL_auto_lock(const _STL_auto_lock&);
00366 };
00367 
00368 __STL_END_NAMESPACE
00369 
00370 #endif /* __SGI_STL_INTERNAL_THREADS_H */
00371 
00372 // Local Variables:
00373 // mode:C++
00374 // End:

Generated on Mon Jun 5 10:20:44 2006 for Intelligence.kdevelop by  doxygen 1.4.6