mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*- 00002 00003 // Copyright (C) 2003, 2004 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 /** @file ext/mt_allocator.h 00031 * This file is a GNU extension to the Standard C++ Library. 00032 * You should only include this header if you are using GCC 3 or later. 00033 */ 00034 00035 #ifndef _MT_ALLOCATOR_H 00036 #define _MT_ALLOCATOR_H 1 00037 00038 #include <new> 00039 #include <cstdlib> 00040 #include <bits/functexcept.h> 00041 #include <bits/gthr.h> 00042 #include <bits/atomicity.h> 00043 00044 namespace __gnu_cxx 00045 { 00046 /** 00047 * This is a fixed size (power of 2) allocator which - when 00048 * compiled with thread support - will maintain one freelist per 00049 * size per thread plus a "global" one. Steps are taken to limit 00050 * the per thread freelist sizes (by returning excess back to 00051 * "global"). 00052 * 00053 * Further details: 00054 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html 00055 */ 00056 template<typename _Tp> 00057 class __mt_alloc 00058 { 00059 public: 00060 typedef size_t size_type; 00061 typedef ptrdiff_t difference_type; 00062 typedef _Tp* pointer; 00063 typedef const _Tp* const_pointer; 00064 typedef _Tp& reference; 00065 typedef const _Tp& const_reference; 00066 typedef _Tp value_type; 00067 00068 template<typename _Tp1> 00069 struct rebind 00070 { typedef __mt_alloc<_Tp1> other; }; 00071 00072 __mt_alloc() throw() 00073 { 00074 // XXX 00075 } 00076 00077 __mt_alloc(const __mt_alloc&) throw() 00078 { 00079 // XXX 00080 } 00081 00082 template<typename _Tp1> 00083 __mt_alloc(const __mt_alloc<_Tp1>& obj) throw() 00084 { 00085 // XXX 00086 } 00087 00088 ~__mt_alloc() throw() { } 00089 00090 pointer 00091 address(reference __x) const 00092 { return &__x; } 00093 00094 const_pointer 00095 address(const_reference __x) const 00096 { return &__x; } 00097 00098 size_type 00099 max_size() const throw() 00100 { return size_t(-1) / sizeof(_Tp); } 00101 00102 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00103 // 402. wrong new expression in [some_] allocator::construct 00104 void 00105 construct(pointer __p, const _Tp& __val) 00106 { ::new(__p) _Tp(__val); } 00107 00108 void 00109 destroy(pointer __p) { __p->~_Tp(); } 00110 00111 pointer 00112 allocate(size_type __n, const void* = 0); 00113 00114 void 00115 deallocate(pointer __p, size_type __n); 00116 00117 // Variables used to configure the behavior of the allocator, 00118 // assigned and explained in detail below. 00119 struct _Tune 00120 { 00121 // Alignment needed. 00122 // NB: In any case must be >= sizeof(_Block_record), that 00123 // is 4 on 32 bit machines and 8 on 64 bit machines. 00124 size_t _M_align; 00125 00126 // Allocation requests (after round-up to power of 2) below 00127 // this value will be handled by the allocator. A raw new/ 00128 // call will be used for requests larger than this value. 00129 size_t _M_max_bytes; 00130 00131 // Size in bytes of the smallest bin. 00132 // NB: Must be a power of 2 and >= _M_align. 00133 size_t _M_min_bin; 00134 00135 // In order to avoid fragmenting and minimize the number of 00136 // new() calls we always request new memory using this 00137 // value. Based on previous discussions on the libstdc++ 00138 // mailing list we have choosen the value below. 00139 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html 00140 size_t _M_chunk_size; 00141 00142 // The maximum number of supported threads. For 00143 // single-threaded operation, use one. Maximum values will 00144 // vary depending on details of the underlying system. (For 00145 // instance, Linux 2.4.18 reports 4070 in 00146 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports 00147 // 65534) 00148 size_t _M_max_threads; 00149 00150 // Each time a deallocation occurs in a threaded application 00151 // we make sure that there are no more than 00152 // _M_freelist_headroom % of used memory on the freelist. If 00153 // the number of additional records is more than 00154 // _M_freelist_headroom % of the freelist, we move these 00155 // records back to the global pool. 00156 size_t _M_freelist_headroom; 00157 00158 // Set to true forces all allocations to use new(). 00159 bool _M_force_new; 00160 00161 explicit 00162 _Tune() 00163 : _M_align(8), _M_max_bytes(128), _M_min_bin(8), 00164 _M_chunk_size(4096 - 4 * sizeof(void*)), 00165 _M_max_threads(4096), _M_freelist_headroom(10), 00166 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false) 00167 { } 00168 00169 explicit 00170 _Tune(size_t __align, size_t __maxb, size_t __minbin, 00171 size_t __chunk, size_t __maxthreads, size_t __headroom, 00172 bool __force) 00173 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin), 00174 _M_chunk_size(__chunk), _M_max_threads(__maxthreads), 00175 _M_freelist_headroom(__headroom), _M_force_new(__force) 00176 { } 00177 }; 00178 00179 static const _Tune 00180 _S_get_options() 00181 { return _S_options; } 00182 00183 static void 00184 _S_set_options(_Tune __t) 00185 { 00186 if (!_S_init) 00187 _S_options = __t; 00188 } 00189 00190 private: 00191 // We need to create the initial lists and set up some variables 00192 // before we can answer to the first request for memory. 00193 #ifdef __GTHREADS 00194 static __gthread_once_t _S_once; 00195 #endif 00196 static bool _S_init; 00197 00198 static void 00199 _S_initialize(); 00200 00201 // Configuration options. 00202 static _Tune _S_options; 00203 00204 // Using short int as type for the binmap implies we are never 00205 // caching blocks larger than 65535 with this allocator 00206 typedef unsigned short int _Binmap_type; 00207 static _Binmap_type* _S_binmap; 00208 00209 // Each requesting thread is assigned an id ranging from 1 to 00210 // _S_max_threads. Thread id 0 is used as a global memory pool. 00211 // In order to get constant performance on the thread assignment 00212 // routine, we keep a list of free ids. When a thread first 00213 // requests memory we remove the first record in this list and 00214 // stores the address in a __gthread_key. When initializing the 00215 // __gthread_key we specify a destructor. When this destructor 00216 // (i.e. the thread dies) is called, we return the thread id to 00217 // the front of this list. 00218 #ifdef __GTHREADS 00219 struct _Thread_record 00220 { 00221 // Points to next free thread id record. NULL if last record in list. 00222 _Thread_record* volatile _M_next; 00223 00224 // Thread id ranging from 1 to _S_max_threads. 00225 size_t _M_id; 00226 }; 00227 00228 static _Thread_record* volatile _S_thread_freelist_first; 00229 static __gthread_mutex_t _S_thread_freelist_mutex; 00230 static __gthread_key_t _S_thread_key; 00231 00232 static void 00233 _S_destroy_thread_key(void* __freelist_pos); 00234 #endif 00235 00236 static size_t 00237 _S_get_thread_id(); 00238 00239 union _Block_record 00240 { 00241 // Points to the block_record of the next free block. 00242 _Block_record* volatile _M_next; 00243 00244 #ifdef __GTHREADS 00245 // The thread id of the thread which has requested this block. 00246 size_t _M_thread_id; 00247 #endif 00248 }; 00249 00250 struct _Bin_record 00251 { 00252 // An "array" of pointers to the first free block for each 00253 // thread id. Memory to this "array" is allocated in _S_initialize() 00254 // for _S_max_threads + global pool 0. 00255 _Block_record** volatile _M_first; 00256 00257 #ifdef __GTHREADS 00258 // An "array" of counters used to keep track of the amount of 00259 // blocks that are on the freelist/used for each thread id. 00260 // Memory to these "arrays" is allocated in _S_initialize() for 00261 // _S_max_threads + global pool 0. 00262 size_t* volatile _M_free; 00263 size_t* volatile _M_used; 00264 00265 // Each bin has its own mutex which is used to ensure data 00266 // integrity while changing "ownership" on a block. The mutex 00267 // is initialized in _S_initialize(). 00268 __gthread_mutex_t* _M_mutex; 00269 #endif 00270 }; 00271 00272 // An "array" of bin_records each of which represents a specific 00273 // power of 2 size. Memory to this "array" is allocated in 00274 // _S_initialize(). 00275 static _Bin_record* volatile _S_bin; 00276 00277 // Actual value calculated in _S_initialize(). 00278 static size_t _S_bin_size; 00279 }; 00280 00281 template<typename _Tp> 00282 typename __mt_alloc<_Tp>::pointer 00283 __mt_alloc<_Tp>:: 00284 allocate(size_type __n, const void*) 00285 { 00286 // Although the test in __gthread_once() would suffice, we wrap 00287 // test of the once condition in our own unlocked check. This 00288 // saves one function call to pthread_once() (which itself only 00289 // tests for the once value unlocked anyway and immediately 00290 // returns if set) 00291 if (!_S_init) 00292 { 00293 #ifdef __GTHREADS 00294 if (__gthread_active_p()) 00295 __gthread_once(&_S_once, _S_initialize); 00296 #endif 00297 if (!_S_init) 00298 _S_initialize(); 00299 } 00300 00301 // Requests larger than _M_max_bytes are handled by new/delete 00302 // directly. 00303 const size_t __bytes = __n * sizeof(_Tp); 00304 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new) 00305 { 00306 void* __ret = ::operator new(__bytes); 00307 return static_cast<_Tp*>(__ret); 00308 } 00309 00310 // Round up to power of 2 and figure out which bin to use. 00311 const size_t __which = _S_binmap[__bytes]; 00312 const size_t __thread_id = _S_get_thread_id(); 00313 00314 // Find out if we have blocks on our freelist. If so, go ahead 00315 // and use them directly without having to lock anything. 00316 const _Bin_record& __bin = _S_bin[__which]; 00317 _Block_record* __block = NULL; 00318 if (__bin._M_first[__thread_id] == NULL) 00319 { 00320 // NB: For alignment reasons, we can't use the first _M_align 00321 // bytes, even when sizeof(_Block_record) < _M_align. 00322 const size_t __bin_size = ((_S_options._M_min_bin << __which) 00323 + _S_options._M_align); 00324 size_t __block_count = _S_options._M_chunk_size / __bin_size; 00325 00326 // Are we using threads? 00327 // - Yes, check if there are free blocks on the global 00328 // list. If so, grab up to __block_count blocks in one 00329 // lock and change ownership. If the global list is 00330 // empty, we allocate a new chunk and add those blocks 00331 // directly to our own freelist (with us as owner). 00332 // - No, all operations are made directly to global pool 0 00333 // no need to lock or change ownership but check for free 00334 // blocks on global list (and if not add new ones) and 00335 // get the first one. 00336 #ifdef __GTHREADS 00337 if (__gthread_active_p()) 00338 { 00339 __gthread_mutex_lock(__bin._M_mutex); 00340 if (__bin._M_first[0] == NULL) 00341 { 00342 // No need to hold the lock when we are adding a 00343 // whole chunk to our own list. 00344 __gthread_mutex_unlock(__bin._M_mutex); 00345 00346 void* __v = ::operator new(_S_options._M_chunk_size); 00347 __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v); 00348 __bin._M_free[__thread_id] = __block_count; 00349 00350 --__block_count; 00351 __block = __bin._M_first[__thread_id]; 00352 while (__block_count-- > 0) 00353 { 00354 char* __c = reinterpret_cast<char*>(__block) + __bin_size; 00355 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 00356 __block = __block->_M_next; 00357 } 00358 __block->_M_next = NULL; 00359 } 00360 else 00361 { 00362 // Is the number of required blocks greater than or 00363 // equal to the number that can be provided by the 00364 // global free list? 00365 __bin._M_first[__thread_id] = __bin._M_first[0]; 00366 if (__block_count >= __bin._M_free[0]) 00367 { 00368 __bin._M_free[__thread_id] = __bin._M_free[0]; 00369 __bin._M_free[0] = 0; 00370 __bin._M_first[0] = NULL; 00371 } 00372 else 00373 { 00374 __bin._M_free[__thread_id] = __block_count; 00375 __bin._M_free[0] -= __block_count; 00376 --__block_count; 00377 __block = __bin._M_first[0]; 00378 while (__block_count-- > 0) 00379 __block = __block->_M_next; 00380 __bin._M_first[0] = __block->_M_next; 00381 __block->_M_next = NULL; 00382 } 00383 __gthread_mutex_unlock(__bin._M_mutex); 00384 } 00385 } 00386 else 00387 #endif 00388 { 00389 void* __v = ::operator new(_S_options._M_chunk_size); 00390 __bin._M_first[0] = static_cast<_Block_record*>(__v); 00391 00392 --__block_count; 00393 __block = __bin._M_first[0]; 00394 while (__block_count-- > 0) 00395 { 00396 char* __c = reinterpret_cast<char*>(__block) + __bin_size; 00397 __block->_M_next = reinterpret_cast<_Block_record*>(__c); 00398 __block = __block->_M_next; 00399 } 00400 __block->_M_next = NULL; 00401 } 00402 } 00403 00404 __block = __bin._M_first[__thread_id]; 00405 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next; 00406 #ifdef __GTHREADS 00407 if (__gthread_active_p()) 00408 { 00409 __block->_M_thread_id = __thread_id; 00410 --__bin._M_free[__thread_id]; 00411 ++__bin._M_used[__thread_id]; 00412 } 00413 #endif 00414 00415 char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align; 00416 return static_cast<_Tp*>(static_cast<void*>(__c)); 00417 } 00418 00419 template<typename _Tp> 00420 void 00421 __mt_alloc<_Tp>:: 00422 deallocate(pointer __p, size_type __n) 00423 { 00424 // Requests larger than _M_max_bytes are handled by operators 00425 // new/delete directly. 00426 const size_t __bytes = __n * sizeof(_Tp); 00427 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new) 00428 { 00429 ::operator delete(__p); 00430 return; 00431 } 00432 00433 // Round up to power of 2 and figure out which bin to use. 00434 const size_t __which = _S_binmap[__bytes]; 00435 const _Bin_record& __bin = _S_bin[__which]; 00436 00437 char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align; 00438 _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 00439 00440 #ifdef __GTHREADS 00441 if (__gthread_active_p()) 00442 { 00443 // Calculate the number of records to remove from our freelist: 00444 // in order to avoid too much contention we wait until the 00445 // number of records is "high enough". 00446 const size_t __thread_id = _S_get_thread_id(); 00447 00448 long __remove = ((__bin._M_free[__thread_id] 00449 * _S_options._M_freelist_headroom) 00450 - __bin._M_used[__thread_id]); 00451 if (__remove > static_cast<long>(100 * (_S_bin_size - __which) 00452 * _S_options._M_freelist_headroom) 00453 && __remove > static_cast<long>(__bin._M_free[__thread_id])) 00454 { 00455 _Block_record* __tmp = __bin._M_first[__thread_id]; 00456 _Block_record* __first = __tmp; 00457 __remove /= _S_options._M_freelist_headroom; 00458 const long __removed = __remove; 00459 --__remove; 00460 while (__remove-- > 0) 00461 __tmp = __tmp->_M_next; 00462 __bin._M_first[__thread_id] = __tmp->_M_next; 00463 __bin._M_free[__thread_id] -= __removed; 00464 00465 __gthread_mutex_lock(__bin._M_mutex); 00466 __tmp->_M_next = __bin._M_first[0]; 00467 __bin._M_first[0] = __first; 00468 __bin._M_free[0] += __removed; 00469 __gthread_mutex_unlock(__bin._M_mutex); 00470 } 00471 00472 // Return this block to our list and update counters and 00473 // owner id as needed. 00474 --__bin._M_used[__block->_M_thread_id]; 00475 00476 __block->_M_next = __bin._M_first[__thread_id]; 00477 __bin._M_first[__thread_id] = __block; 00478 00479 ++__bin._M_free[__thread_id]; 00480 } 00481 else 00482 #endif 00483 { 00484 // Single threaded application - return to global pool. 00485 __block->_M_next = __bin._M_first[0]; 00486 __bin._M_first[0] = __block; 00487 } 00488 } 00489 00490 template<typename _Tp> 00491 void 00492 __mt_alloc<_Tp>:: 00493 _S_initialize() 00494 { 00495 // This method is called on the first allocation (when _S_init is still 00496 // false) to create the bins. 00497 00498 // Ensure that the static initialization of _S_options has 00499 // happened. This depends on (a) _M_align == 0 being an invalid 00500 // value that is only present at startup, and (b) the real 00501 // static initialization that happens later not actually 00502 // changing anything. 00503 if (_S_options._M_align == 0) 00504 new (&_S_options) _Tune; 00505 00506 // _M_force_new must not change after the first allocate(), 00507 // which in turn calls this method, so if it's false, it's false 00508 // forever and we don't need to return here ever again. 00509 if (_S_options._M_force_new) 00510 { 00511 _S_init = true; 00512 return; 00513 } 00514 00515 // Calculate the number of bins required based on _M_max_bytes. 00516 // _S_bin_size is statically-initialized to one. 00517 size_t __bin_size = _S_options._M_min_bin; 00518 while (_S_options._M_max_bytes > __bin_size) 00519 { 00520 __bin_size <<= 1; 00521 ++_S_bin_size; 00522 } 00523 00524 // Setup the bin map for quick lookup of the relevant bin. 00525 const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type); 00526 _S_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 00527 00528 _Binmap_type* __bp = _S_binmap; 00529 _Binmap_type __bin_max = _S_options._M_min_bin; 00530 _Binmap_type __bint = 0; 00531 for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct) 00532 { 00533 if (__ct > __bin_max) 00534 { 00535 __bin_max <<= 1; 00536 ++__bint; 00537 } 00538 *__bp++ = __bint; 00539 } 00540 00541 // Initialize _S_bin and its members. 00542 void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size); 00543 _S_bin = static_cast<_Bin_record*>(__v); 00544 00545 // If __gthread_active_p() create and initialize the list of 00546 // free thread ids. Single threaded applications use thread id 0 00547 // directly and have no need for this. 00548 #ifdef __GTHREADS 00549 if (__gthread_active_p()) 00550 { 00551 const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads; 00552 __v = ::operator new(__k); 00553 _S_thread_freelist_first = static_cast<_Thread_record*>(__v); 00554 00555 // NOTE! The first assignable thread id is 1 since the 00556 // global pool uses id 0 00557 size_t __i; 00558 for (__i = 1; __i < _S_options._M_max_threads; ++__i) 00559 { 00560 _Thread_record& __tr = _S_thread_freelist_first[__i - 1]; 00561 __tr._M_next = &_S_thread_freelist_first[__i]; 00562 __tr._M_id = __i; 00563 } 00564 00565 // Set last record. 00566 _S_thread_freelist_first[__i - 1]._M_next = NULL; 00567 _S_thread_freelist_first[__i - 1]._M_id = __i; 00568 00569 // Make sure this is initialized. 00570 #ifndef __GTHREAD_MUTEX_INIT 00571 __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex); 00572 #endif 00573 // Initialize per thread key to hold pointer to 00574 // _S_thread_freelist. 00575 __gthread_key_create(&_S_thread_key, _S_destroy_thread_key); 00576 00577 const size_t __max_threads = _S_options._M_max_threads + 1; 00578 for (size_t __n = 0; __n < _S_bin_size; ++__n) 00579 { 00580 _Bin_record& __bin = _S_bin[__n]; 00581 __v = ::operator new(sizeof(_Block_record*) * __max_threads); 00582 __bin._M_first = static_cast<_Block_record**>(__v); 00583 00584 __v = ::operator new(sizeof(size_t) * __max_threads); 00585 __bin._M_free = static_cast<size_t*>(__v); 00586 00587 __v = ::operator new(sizeof(size_t) * __max_threads); 00588 __bin._M_used = static_cast<size_t*>(__v); 00589 00590 __v = ::operator new(sizeof(__gthread_mutex_t)); 00591 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 00592 00593 #ifdef __GTHREAD_MUTEX_INIT 00594 { 00595 // Do not copy a POSIX/gthr mutex once in use. 00596 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 00597 *__bin._M_mutex = __tmp; 00598 } 00599 #else 00600 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 00601 #endif 00602 00603 for (size_t __threadn = 0; __threadn < __max_threads; 00604 ++__threadn) 00605 { 00606 __bin._M_first[__threadn] = NULL; 00607 __bin._M_free[__threadn] = 0; 00608 __bin._M_used[__threadn] = 0; 00609 } 00610 } 00611 } 00612 else 00613 #endif 00614 for (size_t __n = 0; __n < _S_bin_size; ++__n) 00615 { 00616 _Bin_record& __bin = _S_bin[__n]; 00617 __v = ::operator new(sizeof(_Block_record*)); 00618 __bin._M_first = static_cast<_Block_record**>(__v); 00619 __bin._M_first[0] = NULL; 00620 } 00621 00622 _S_init = true; 00623 } 00624 00625 template<typename _Tp> 00626 size_t 00627 __mt_alloc<_Tp>:: 00628 _S_get_thread_id() 00629 { 00630 #ifdef __GTHREADS 00631 // If we have thread support and it's active we check the thread 00632 // key value and return its id or if it's not set we take the 00633 // first record from _S_thread_freelist and sets the key and 00634 // returns it's id. 00635 if (__gthread_active_p()) 00636 { 00637 _Thread_record* __freelist_pos = 00638 static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key)); 00639 if (__freelist_pos == NULL) 00640 { 00641 // Since _S_options._M_max_threads must be larger than 00642 // the theoretical max number of threads of the OS the 00643 // list can never be empty. 00644 __gthread_mutex_lock(&_S_thread_freelist_mutex); 00645 __freelist_pos = _S_thread_freelist_first; 00646 _S_thread_freelist_first = _S_thread_freelist_first->_M_next; 00647 __gthread_mutex_unlock(&_S_thread_freelist_mutex); 00648 00649 __gthread_setspecific(_S_thread_key, 00650 static_cast<void*>(__freelist_pos)); 00651 } 00652 return __freelist_pos->_M_id; 00653 } 00654 #endif 00655 // Otherwise (no thread support or inactive) all requests are 00656 // served from the global pool 0. 00657 return 0; 00658 } 00659 00660 #ifdef __GTHREADS 00661 template<typename _Tp> 00662 void 00663 __mt_alloc<_Tp>:: 00664 _S_destroy_thread_key(void* __freelist_pos) 00665 { 00666 // Return this thread id record to front of thread_freelist. 00667 __gthread_mutex_lock(&_S_thread_freelist_mutex); 00668 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos); 00669 __tr->_M_next = _S_thread_freelist_first; 00670 _S_thread_freelist_first = __tr; 00671 __gthread_mutex_unlock(&_S_thread_freelist_mutex); 00672 } 00673 #endif 00674 00675 template<typename _Tp> 00676 inline bool 00677 operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&) 00678 { return true; } 00679 00680 template<typename _Tp> 00681 inline bool 00682 operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&) 00683 { return false; } 00684 00685 template<typename _Tp> 00686 bool __mt_alloc<_Tp>::_S_init = false; 00687 00688 template<typename _Tp> 00689 typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options; 00690 00691 template<typename _Tp> 00692 typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap; 00693 00694 template<typename _Tp> 00695 typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin; 00696 00697 template<typename _Tp> 00698 size_t __mt_alloc<_Tp>::_S_bin_size = 1; 00699 00700 // Actual initialization in _S_initialize(). 00701 #ifdef __GTHREADS 00702 template<typename _Tp> 00703 __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT; 00704 00705 template<typename _Tp> 00706 typename __mt_alloc<_Tp>::_Thread_record* 00707 volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL; 00708 00709 template<typename _Tp> 00710 __gthread_key_t __mt_alloc<_Tp>::_S_thread_key; 00711 00712 template<typename _Tp> 00713 __gthread_mutex_t 00714 #ifdef __GTHREAD_MUTEX_INIT 00715 __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT; 00716 #else 00717 __mt_alloc<_Tp>::_S_thread_freelist_mutex; 00718 #endif 00719 #endif 00720 } // namespace __gnu_cxx 00721 00722 #endif

Generated on Sun Jul 25 00:12:33 2004 for libstdc++ source by doxygen 1.3.7