mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*- 00002 00003 // Copyright (C) 2003, 2004 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 /** @file ext/mt_allocator.h 00031 * This file is a GNU extension to the Standard C++ Library. 00032 * You should only include this header if you are using GCC 3 or later. 00033 */ 00034 00035 #ifndef _MT_ALLOCATOR_H 00036 #define _MT_ALLOCATOR_H 1 00037 00038 #include <new> 00039 #include <cstdlib> 00040 #include <bits/functexcept.h> 00041 #include <bits/gthr.h> 00042 #include <bits/atomicity.h> 00043 00044 namespace __gnu_cxx 00045 { 00046 /** 00047 * This is a fixed size (power of 2) allocator which - when 00048 * compiled with thread support - will maintain one freelist per 00049 * size per thread plus a "global" one. Steps are taken to limit 00050 * the per thread freelist sizes (by returning excess back to 00051 * "global"). 00052 * 00053 * Further details: 00054 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html 00055 */ 00056 typedef void (*__destroy_handler)(void*); 00057 typedef void (*__create_handler)(void); 00058 00059 class __pool_base 00060 { 00061 public: 00062 // Variables used to configure the behavior of the allocator, 00063 // assigned and explained in detail below. 00064 struct _Tune 00065 { 00066 // Alignment needed. 00067 // NB: In any case must be >= sizeof(_Block_record), that 00068 // is 4 on 32 bit machines and 8 on 64 bit machines. 00069 size_t _M_align; 00070 00071 // Allocation requests (after round-up to power of 2) below 00072 // this value will be handled by the allocator. A raw new/ 00073 // call will be used for requests larger than this value. 00074 size_t _M_max_bytes; 00075 00076 // Size in bytes of the smallest bin. 00077 // NB: Must be a power of 2 and >= _M_align. 00078 size_t _M_min_bin; 00079 00080 // In order to avoid fragmenting and minimize the number of 00081 // new() calls we always request new memory using this 00082 // value. Based on previous discussions on the libstdc++ 00083 // mailing list we have choosen the value below. 00084 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html 00085 size_t _M_chunk_size; 00086 00087 // The maximum number of supported threads. For 00088 // single-threaded operation, use one. Maximum values will 00089 // vary depending on details of the underlying system. (For 00090 // instance, Linux 2.4.18 reports 4070 in 00091 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports 00092 // 65534) 00093 size_t _M_max_threads; 00094 00095 // Each time a deallocation occurs in a threaded application 00096 // we make sure that there are no more than 00097 // _M_freelist_headroom % of used memory on the freelist. If 00098 // the number of additional records is more than 00099 // _M_freelist_headroom % of the freelist, we move these 00100 // records back to the global pool. 00101 size_t _M_freelist_headroom; 00102 00103 // Set to true forces all allocations to use new(). 00104 bool _M_force_new; 00105 00106 explicit 00107 _Tune() 00108 : _M_align(8), _M_max_bytes(128), _M_min_bin(8), 00109 _M_chunk_size(4096 - 4 * sizeof(void*)), 00110 _M_max_threads(4096), _M_freelist_headroom(10), 00111 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false) 00112 { } 00113 00114 explicit 00115 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 00116 size_t __maxthreads, size_t __headroom, bool __force) 00117 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin), 00118 _M_chunk_size(__chunk), _M_max_threads(__maxthreads), 00119 _M_freelist_headroom(__headroom), _M_force_new(__force) 00120 { } 00121 }; 00122 00123 const _Tune& 00124 _M_get_options() const 00125 { return _M_options; } 00126 00127 void 00128 _M_set_options(_Tune __t) 00129 { 00130 if (!_M_init) 00131 _M_options = __t; 00132 } 00133 00134 bool 00135 _M_check_threshold(size_t __bytes) 00136 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; } 00137 00138 size_t 00139 _M_get_binmap(size_t __bytes) 00140 { return _M_binmap[__bytes]; } 00141 00142 explicit __pool_base() 00143 : _M_init(false), _M_options(_Tune()), _M_binmap(NULL) { } 00144 00145 protected: 00146 // We need to create the initial lists and set up some variables 00147 // before we can answer to the first request for memory. 00148 bool _M_init; 00149 00150 // Configuration options. 00151 _Tune _M_options; 00152 00153 // Using short int as type for the binmap implies we are never 00154 // caching blocks larger than 65535 with this allocator. 00155 typedef unsigned short int _Binmap_type; 00156 _Binmap_type* _M_binmap; 00157 }; 00158 00159 // Data describing the underlying memory pool, parameterized on 00160 // threading support. 00161 template<bool _Thread> 00162 class __pool; 00163 00164 template<> 00165 class __pool<true>; 00166 00167 template<> 00168 class __pool<false>; 00169 00170 00171 #ifdef __GTHREADS 00172 // Specialization for thread enabled, via gthreads.h. 00173 template<> 00174 class __pool<true> : public __pool_base 00175 { 00176 public: 00177 // Each requesting thread is assigned an id ranging from 1 to 00178 // _S_max_threads. Thread id 0 is used as a global memory pool. 00179 // In order to get constant performance on the thread assignment 00180 // routine, we keep a list of free ids. When a thread first 00181 // requests memory we remove the first record in this list and 00182 // stores the address in a __gthread_key. When initializing the 00183 // __gthread_key we specify a destructor. When this destructor 00184 // (i.e. the thread dies) is called, we return the thread id to 00185 // the front of this list. 00186 struct _Thread_record 00187 { 00188 // Points to next free thread id record. NULL if last record in list. 00189 _Thread_record* volatile _M_next; 00190 00191 // Thread id ranging from 1 to _S_max_threads. 00192 size_t _M_id; 00193 }; 00194 00195 union _Block_record 00196 { 00197 // Points to the block_record of the next free block. 00198 _Block_record* volatile _M_next; 00199 00200 // The thread id of the thread which has requested this block. 00201 size_t _M_thread_id; 00202 }; 00203 00204 struct _Bin_record 00205 { 00206 // An "array" of pointers to the first free block for each 00207 // thread id. Memory to this "array" is allocated in _S_initialize() 00208 // for _S_max_threads + global pool 0. 00209 _Block_record** volatile _M_first; 00210 00211 // An "array" of counters used to keep track of the amount of 00212 // blocks that are on the freelist/used for each thread id. 00213 // Memory to these "arrays" is allocated in _S_initialize() for 00214 // _S_max_threads + global pool 0. 00215 size_t* volatile _M_free; 00216 size_t* volatile _M_used; 00217 00218 // Each bin has its own mutex which is used to ensure data 00219 // integrity while changing "ownership" on a block. The mutex 00220 // is initialized in _S_initialize(). 00221 __gthread_mutex_t* _M_mutex; 00222 }; 00223 00224 void 00225 _M_initialize(__destroy_handler __d); 00226 00227 void 00228 _M_initialize_once(__create_handler __c) 00229 { 00230 // Although the test in __gthread_once() would suffice, we 00231 // wrap test of the once condition in our own unlocked 00232 // check. This saves one function call to pthread_once() 00233 // (which itself only tests for the once value unlocked anyway 00234 // and immediately returns if set) 00235 if (__builtin_expect(_M_init == false, false)) 00236 { 00237 if (__gthread_active_p()) 00238 __gthread_once(&_M_once, __c); 00239 if (!_M_init) 00240 __c(); 00241 } 00242 } 00243 00244 char* 00245 _M_reserve_memory(size_t __bytes, const size_t __thread_id); 00246 00247 void 00248 _M_reclaim_memory(char* __p, size_t __bytes); 00249 00250 const _Bin_record& 00251 _M_get_bin(size_t __which) 00252 { return _M_bin[__which]; } 00253 00254 void 00255 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 00256 size_t __thread_id) 00257 { 00258 if (__gthread_active_p()) 00259 { 00260 __block->_M_thread_id = __thread_id; 00261 --__bin._M_free[__thread_id]; 00262 ++__bin._M_used[__thread_id]; 00263 } 00264 } 00265 00266 void 00267 _M_destroy_thread_key(void* __freelist_pos); 00268 00269 size_t 00270 _M_get_thread_id(); 00271 00272 explicit __pool() 00273 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL) 00274 { 00275 // On some platforms, __gthread_once_t is an aggregate. 00276 __gthread_once_t __tmp = __GTHREAD_ONCE_INIT; 00277 _M_once = __tmp; 00278 } 00279 00280 private: 00281 // An "array" of bin_records each of which represents a specific 00282 // power of 2 size. Memory to this "array" is allocated in 00283 // _M_initialize(). 00284 _Bin_record* volatile _M_bin; 00285 00286 // Actual value calculated in _M_initialize(). 00287 size_t _M_bin_size; 00288 00289 __gthread_once_t _M_once; 00290 00291 _Thread_record* _M_thread_freelist; 00292 }; 00293 #endif 00294 00295 // Specialization for single thread. 00296 template<> 00297 class __pool<false> : public __pool_base 00298 { 00299 public: 00300 union _Block_record 00301 { 00302 // Points to the block_record of the next free block. 00303 _Block_record* volatile _M_next; 00304 }; 00305 00306 struct _Bin_record 00307 { 00308 // An "array" of pointers to the first free block for each 00309 // thread id. Memory to this "array" is allocated in _S_initialize() 00310 // for _S_max_threads + global pool 0. 00311 _Block_record** volatile _M_first; 00312 }; 00313 00314 void 00315 _M_initialize_once() 00316 { 00317 if (__builtin_expect(_M_init == false, false)) 00318 _M_initialize(); 00319 } 00320 00321 char* 00322 _M_reserve_memory(size_t __bytes, const size_t __thread_id); 00323 00324 void 00325 _M_reclaim_memory(char* __p, size_t __bytes); 00326 00327 size_t 00328 _M_get_thread_id() { return 0; } 00329 00330 const _Bin_record& 00331 _M_get_bin(size_t __which) 00332 { return _M_bin[__which]; } 00333 00334 void 00335 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t) 00336 { } 00337 00338 explicit __pool() 00339 : _M_bin(NULL), _M_bin_size(1) { } 00340 00341 private: 00342 // An "array" of bin_records each of which represents a specific 00343 // power of 2 size. Memory to this "array" is allocated in 00344 // _M_initialize(). 00345 _Bin_record* volatile _M_bin; 00346 00347 // Actual value calculated in _M_initialize(). 00348 size_t _M_bin_size; 00349 00350 void 00351 _M_initialize(); 00352 }; 00353 00354 00355 template<bool _Thread> 00356 struct __common_pool_policy 00357 { 00358 template<typename _Tp1, bool _Thread1 = _Thread> 00359 struct _M_rebind; 00360 00361 template<typename _Tp1> 00362 struct _M_rebind<_Tp1, true> 00363 { typedef __common_pool_policy<true> other; }; 00364 00365 template<typename _Tp1> 00366 struct _M_rebind<_Tp1, false> 00367 { typedef __common_pool_policy<false> other; }; 00368 00369 typedef __pool<_Thread> __pool_type; 00370 static __pool_type _S_data; 00371 00372 static __pool_type& 00373 _S_get_pool(); 00374 00375 static void 00376 _S_initialize_once() 00377 { 00378 static bool __init; 00379 if (__builtin_expect(__init == false, false)) 00380 { 00381 _S_get_pool()._M_initialize_once(); 00382 __init = true; 00383 } 00384 } 00385 }; 00386 00387 template<> 00388 struct __common_pool_policy<true>; 00389 00390 #ifdef __GTHREADS 00391 template<> 00392 struct __common_pool_policy<true> 00393 { 00394 template<typename _Tp1, bool _Thread1 = true> 00395 struct _M_rebind; 00396 00397 template<typename _Tp1> 00398 struct _M_rebind<_Tp1, true> 00399 { typedef __common_pool_policy<true> other; }; 00400 00401 template<typename _Tp1> 00402 struct _M_rebind<_Tp1, false> 00403 { typedef __common_pool_policy<false> other; }; 00404 00405 typedef __pool<true> __pool_type; 00406 static __pool_type _S_data; 00407 00408 static __pool_type& 00409 _S_get_pool(); 00410 00411 static void 00412 _S_destroy_thread_key(void* __freelist_pos) 00413 { _S_get_pool()._M_destroy_thread_key(__freelist_pos); } 00414 00415 static void 00416 _S_initialize() 00417 { _S_get_pool()._M_initialize(_S_destroy_thread_key); } 00418 00419 static void 00420 _S_initialize_once() 00421 { 00422 static bool __init; 00423 if (__builtin_expect(__init == false, false)) 00424 { 00425 _S_get_pool()._M_initialize_once(_S_initialize); 00426 __init = true; 00427 } 00428 } 00429 }; 00430 #endif 00431 00432 template<typename _Tp, bool _Thread> 00433 struct __per_type_pool_policy 00434 { 00435 template<typename _Tp1, bool _Thread1 = _Thread> 00436 struct _M_rebind; 00437 00438 template<typename _Tp1> 00439 struct _M_rebind<_Tp1, false> 00440 { typedef __per_type_pool_policy<_Tp1, false> other; }; 00441 00442 template<typename _Tp1> 00443 struct _M_rebind<_Tp1, true> 00444 { typedef __per_type_pool_policy<_Tp1, true> other; }; 00445 00446 typedef __pool<_Thread> __pool_type; 00447 static __pool_type _S_data; 00448 00449 static __pool_type& 00450 _S_get_pool( ) { return _S_data; } 00451 00452 static void 00453 _S_initialize_once() 00454 { 00455 static bool __init; 00456 if (__builtin_expect(__init == false, false)) 00457 { 00458 _S_get_pool()._M_initialize_once(); 00459 __init = true; 00460 } 00461 } 00462 }; 00463 00464 template<typename _Tp, bool _Thread> 00465 __pool<_Thread> 00466 __per_type_pool_policy<_Tp, _Thread>::_S_data; 00467 00468 template<typename _Tp> 00469 struct __per_type_pool_policy<_Tp, true>; 00470 00471 #ifdef __GTHREADS 00472 template<typename _Tp> 00473 struct __per_type_pool_policy<_Tp, true> 00474 { 00475 template<typename _Tp1, bool _Thread1 = true> 00476 struct _M_rebind; 00477 00478 template<typename _Tp1> 00479 struct _M_rebind<_Tp1, false> 00480 { typedef __per_type_pool_policy<_Tp1, false> other; }; 00481 00482 template<typename _Tp1> 00483 struct _M_rebind<_Tp1, true> 00484 { typedef __per_type_pool_policy<_Tp1, true> other; }; 00485 00486 typedef __pool<true> __pool_type; 00487 static __pool_type _S_data; 00488 00489 static __pool_type& 00490 _S_get_pool( ) { return _S_data; } 00491 00492 static void 00493 _S_destroy_thread_key(void* __freelist_pos) 00494 { _S_get_pool()._M_destroy_thread_key(__freelist_pos); } 00495 00496 static void 00497 _S_initialize() 00498 { _S_get_pool()._M_initialize(_S_destroy_thread_key); } 00499 00500 static void 00501 _S_initialize_once() 00502 { 00503 static bool __init; 00504 if (__builtin_expect(__init == false, false)) 00505 { 00506 _S_get_pool()._M_initialize_once(_S_initialize); 00507 __init = true; 00508 } 00509 } 00510 }; 00511 00512 template<typename _Tp> 00513 __pool<true> 00514 __per_type_pool_policy<_Tp, true>::_S_data; 00515 #endif 00516 00517 #ifdef __GTHREADS 00518 typedef __common_pool_policy<true> __default_policy; 00519 #else 00520 typedef __common_pool_policy<false> __default_policy; 00521 #endif 00522 00523 template<typename _Tp> 00524 class __mt_alloc_base 00525 { 00526 public: 00527 typedef size_t size_type; 00528 typedef ptrdiff_t difference_type; 00529 typedef _Tp* pointer; 00530 typedef const _Tp* const_pointer; 00531 typedef _Tp& reference; 00532 typedef const _Tp& const_reference; 00533 typedef _Tp value_type; 00534 00535 pointer 00536 address(reference __x) const 00537 { return &__x; } 00538 00539 const_pointer 00540 address(const_reference __x) const 00541 { return &__x; } 00542 00543 size_type 00544 max_size() const throw() 00545 { return size_t(-1) / sizeof(_Tp); } 00546 00547 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00548 // 402. wrong new expression in [some_] allocator::construct 00549 void 00550 construct(pointer __p, const _Tp& __val) 00551 { ::new(__p) _Tp(__val); } 00552 00553 void 00554 destroy(pointer __p) { __p->~_Tp(); } 00555 }; 00556 00557 template<typename _Tp, typename _Poolp = __default_policy> 00558 class __mt_alloc : public __mt_alloc_base<_Tp>, _Poolp 00559 { 00560 public: 00561 typedef size_t size_type; 00562 typedef ptrdiff_t difference_type; 00563 typedef _Tp* pointer; 00564 typedef const _Tp* const_pointer; 00565 typedef _Tp& reference; 00566 typedef const _Tp& const_reference; 00567 typedef _Tp value_type; 00568 typedef _Poolp __policy_type; 00569 typedef typename _Poolp::__pool_type __pool_type; 00570 00571 template<typename _Tp1, typename _Poolp1 = _Poolp> 00572 struct rebind 00573 { 00574 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type; 00575 typedef __mt_alloc<_Tp1, pol_type> other; 00576 }; 00577 00578 __mt_alloc() throw() 00579 { 00580 // XXX 00581 } 00582 00583 __mt_alloc(const __mt_alloc&) throw() 00584 { 00585 // XXX 00586 } 00587 00588 template<typename _Tp1, typename _Poolp1> 00589 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>& obj) throw() 00590 { 00591 // XXX 00592 } 00593 00594 ~__mt_alloc() throw() { } 00595 00596 pointer 00597 allocate(size_type __n, const void* = 0); 00598 00599 void 00600 deallocate(pointer __p, size_type __n); 00601 00602 const __pool_base::_Tune 00603 _M_get_options() 00604 { 00605 // Return a copy, not a reference, for external consumption. 00606 return __pool_base::_Tune(this->_S_get_pool()._M_get_options()); 00607 } 00608 00609 void 00610 _M_set_options(__pool_base::_Tune __t) 00611 { this->_S_get_pool()._M_set_options(__t); } 00612 }; 00613 00614 template<typename _Tp, typename _Poolp> 00615 typename __mt_alloc<_Tp, _Poolp>::pointer 00616 __mt_alloc<_Tp, _Poolp>:: 00617 allocate(size_type __n, const void*) 00618 { 00619 this->_S_initialize_once(); 00620 00621 // Requests larger than _M_max_bytes are handled by new/delete 00622 // directly. 00623 __pool_type& __pl = this->_S_get_pool(); 00624 const size_t __bytes = __n * sizeof(_Tp); 00625 if (__pl._M_check_threshold(__bytes)) 00626 { 00627 void* __ret = ::operator new(__bytes); 00628 return static_cast<_Tp*>(__ret); 00629 } 00630 00631 // Round up to power of 2 and figure out which bin to use. 00632 const size_t __which = __pl._M_get_binmap(__bytes); 00633 const size_t __thread_id = __pl._M_get_thread_id(); 00634 00635 // Find out if we have blocks on our freelist. If so, go ahead 00636 // and use them directly without having to lock anything. 00637 char* __c; 00638 typedef typename __pool_type::_Bin_record _Bin_record; 00639 const _Bin_record& __bin = __pl._M_get_bin(__which); 00640 if (__bin._M_first[__thread_id]) 00641 { 00642 // Already reserved. 00643 typedef typename __pool_type::_Block_record _Block_record; 00644 _Block_record* __block = __bin._M_first[__thread_id]; 00645 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next; 00646 00647 __pl._M_adjust_freelist(__bin, __block, __thread_id); 00648 const __pool_base::_Tune& __options = __pl._M_get_options(); 00649 __c = reinterpret_cast<char*>(__block) + __options._M_align; 00650 } 00651 else 00652 { 00653 // Null, reserve. 00654 __c = __pl._M_reserve_memory(__bytes, __thread_id); 00655 } 00656 return static_cast<_Tp*>(static_cast<void*>(__c)); 00657 } 00658 00659 template<typename _Tp, typename _Poolp> 00660 void 00661 __mt_alloc<_Tp, _Poolp>:: 00662 deallocate(pointer __p, size_type __n) 00663 { 00664 // Requests larger than _M_max_bytes are handled by operators 00665 // new/delete directly. 00666 __pool_type& __pl = this->_S_get_pool(); 00667 const size_t __bytes = __n * sizeof(_Tp); 00668 if (__pl._M_check_threshold(__bytes)) 00669 ::operator delete(__p); 00670 else 00671 __pl._M_reclaim_memory(reinterpret_cast<char*>(__p), __bytes); 00672 } 00673 00674 template<typename _Tp, typename _Poolp> 00675 inline bool 00676 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&) 00677 { return true; } 00678 00679 template<typename _Tp, typename _Poolp> 00680 inline bool 00681 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&) 00682 { return false; } 00683 } // namespace __gnu_cxx 00684 00685 #endif

Generated on Sun Sep 12 15:49:58 2004 for libstdc++ source by doxygen 1.3.8