libstdc++
|
00001 // MT-optimized allocator -*- C++ -*- 00002 00003 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 00004 // Free Software Foundation, Inc. 00005 // 00006 // This file is part of the GNU ISO C++ Library. This library is free 00007 // software; you can redistribute it and/or modify it under the 00008 // terms of the GNU General Public License as published by the 00009 // Free Software Foundation; either version 3, or (at your option) 00010 // any later version. 00011 00012 // This library is distributed in the hope that it will be useful, 00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00015 // GNU General Public License for more details. 00016 00017 // Under Section 7 of GPL version 3, you are granted additional 00018 // permissions described in the GCC Runtime Library Exception, version 00019 // 3.1, as published by the Free Software Foundation. 00020 00021 // You should have received a copy of the GNU General Public License and 00022 // a copy of the GCC Runtime Library Exception along with this program; 00023 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00024 // <http://www.gnu.org/licenses/>. 00025 00026 /** @file ext/mt_allocator.h 00027 * This file is a GNU extension to the Standard C++ Library. 00028 */ 00029 00030 #ifndef _MT_ALLOCATOR_H 00031 #define _MT_ALLOCATOR_H 1 00032 00033 #include <new> 00034 #include <cstdlib> 00035 #include <bits/functexcept.h> 00036 #include <ext/atomicity.h> 00037 #include <bits/move.h> 00038 00039 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) 00040 00041 using std::size_t; 00042 using std::ptrdiff_t; 00043 00044 typedef void (*__destroy_handler)(void*); 00045 00046 /// Base class for pool object. 00047 struct __pool_base 00048 { 00049 // Using short int as type for the binmap implies we are never 00050 // caching blocks larger than 32768 with this allocator. 00051 typedef unsigned short int _Binmap_type; 00052 00053 // Variables used to configure the behavior of the allocator, 00054 // assigned and explained in detail below. 00055 struct _Tune 00056 { 00057 // Compile time constants for the default _Tune values. 00058 enum { _S_align = 8 }; 00059 enum { _S_max_bytes = 128 }; 00060 enum { _S_min_bin = 8 }; 00061 enum { _S_chunk_size = 4096 - 4 * sizeof(void*) }; 00062 enum { _S_max_threads = 4096 }; 00063 enum { _S_freelist_headroom = 10 }; 00064 00065 // Alignment needed. 00066 // NB: In any case must be >= sizeof(_Block_record), that 00067 // is 4 on 32 bit machines and 8 on 64 bit machines. 00068 size_t _M_align; 00069 00070 // Allocation requests (after round-up to power of 2) below 00071 // this value will be handled by the allocator. A raw new/ 00072 // call will be used for requests larger than this value. 00073 // NB: Must be much smaller than _M_chunk_size and in any 00074 // case <= 32768. 00075 size_t _M_max_bytes; 00076 00077 // Size in bytes of the smallest bin. 00078 // NB: Must be a power of 2 and >= _M_align (and of course 00079 // much smaller than _M_max_bytes). 00080 size_t _M_min_bin; 00081 00082 // In order to avoid fragmenting and minimize the number of 00083 // new() calls we always request new memory using this 00084 // value. Based on previous discussions on the libstdc++ 00085 // mailing list we have chosen the value below. 00086 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html 00087 // NB: At least one order of magnitude > _M_max_bytes. 00088 size_t _M_chunk_size; 00089 00090 // The maximum number of supported threads. For 00091 // single-threaded operation, use one. Maximum values will 00092 // vary depending on details of the underlying system. (For 00093 // instance, Linux 2.4.18 reports 4070 in 00094 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports 00095 // 65534) 00096 size_t _M_max_threads; 00097 00098 // Each time a deallocation occurs in a threaded application 00099 // we make sure that there are no more than 00100 // _M_freelist_headroom % of used memory on the freelist. If 00101 // the number of additional records is more than 00102 // _M_freelist_headroom % of the freelist, we move these 00103 // records back to the global pool. 00104 size_t _M_freelist_headroom; 00105 00106 // Set to true forces all allocations to use new(). 00107 bool _M_force_new; 00108 00109 explicit 00110 _Tune() 00111 : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin), 00112 _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 00113 _M_freelist_headroom(_S_freelist_headroom), 00114 _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false) 00115 { } 00116 00117 explicit 00118 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 00119 size_t __maxthreads, size_t __headroom, bool __force) 00120 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin), 00121 _M_chunk_size(__chunk), _M_max_threads(__maxthreads), 00122 _M_freelist_headroom(__headroom), _M_force_new(__force) 00123 { } 00124 }; 00125 00126 struct _Block_address 00127 { 00128 void* _M_initial; 00129 _Block_address* _M_next; 00130 }; 00131 00132 const _Tune& 00133 _M_get_options() const 00134 { return _M_options; } 00135 00136 void 00137 _M_set_options(_Tune __t) 00138 { 00139 if (!_M_init) 00140 _M_options = __t; 00141 } 00142 00143 bool 00144 _M_check_threshold(size_t __bytes) 00145 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; } 00146 00147 size_t 00148 _M_get_binmap(size_t __bytes) 00149 { return _M_binmap[__bytes]; } 00150 00151 size_t 00152 _M_get_align() 00153 { return _M_options._M_align; } 00154 00155 explicit 00156 __pool_base() 00157 : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { } 00158 00159 explicit 00160 __pool_base(const _Tune& __options) 00161 : _M_options(__options), _M_binmap(NULL), _M_init(false) { } 00162 00163 private: 00164 explicit 00165 __pool_base(const __pool_base&); 00166 00167 __pool_base& 00168 operator=(const __pool_base&); 00169 00170 protected: 00171 // Configuration options. 00172 _Tune _M_options; 00173 00174 _Binmap_type* _M_binmap; 00175 00176 // Configuration of the pool object via _M_options can happen 00177 // after construction but before initialization. After 00178 // initialization is complete, this variable is set to true. 00179 bool _M_init; 00180 }; 00181 00182 00183 /** 00184 * @brief Data describing the underlying memory pool, parameterized on 00185 * threading support. 00186 */ 00187 template<bool _Thread> 00188 class __pool; 00189 00190 /// Specialization for single thread. 00191 template<> 00192 class __pool<false> : public __pool_base 00193 { 00194 public: 00195 union _Block_record 00196 { 00197 // Points to the block_record of the next free block. 00198 _Block_record* _M_next; 00199 }; 00200 00201 struct _Bin_record 00202 { 00203 // An "array" of pointers to the first free block. 00204 _Block_record** _M_first; 00205 00206 // A list of the initial addresses of all allocated blocks. 00207 _Block_address* _M_address; 00208 }; 00209 00210 void 00211 _M_initialize_once() 00212 { 00213 if (__builtin_expect(_M_init == false, false)) 00214 _M_initialize(); 00215 } 00216 00217 void 00218 _M_destroy() throw(); 00219 00220 char* 00221 _M_reserve_block(size_t __bytes, const size_t __thread_id); 00222 00223 void 00224 _M_reclaim_block(char* __p, size_t __bytes); 00225 00226 size_t 00227 _M_get_thread_id() { return 0; } 00228 00229 const _Bin_record& 00230 _M_get_bin(size_t __which) 00231 { return _M_bin[__which]; } 00232 00233 void 00234 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t) 00235 { } 00236 00237 explicit __pool() 00238 : _M_bin(NULL), _M_bin_size(1) { } 00239 00240 explicit __pool(const __pool_base::_Tune& __tune) 00241 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { } 00242 00243 private: 00244 // An "array" of bin_records each of which represents a specific 00245 // power of 2 size. Memory to this "array" is allocated in 00246 // _M_initialize(). 00247 _Bin_record* _M_bin; 00248 00249 // Actual value calculated in _M_initialize(). 00250 size_t _M_bin_size; 00251 00252 void 00253 _M_initialize(); 00254 }; 00255 00256 #ifdef __GTHREADS 00257 /// Specialization for thread enabled, via gthreads.h. 00258 template<> 00259 class __pool<true> : public __pool_base 00260 { 00261 public: 00262 // Each requesting thread is assigned an id ranging from 1 to 00263 // _S_max_threads. Thread id 0 is used as a global memory pool. 00264 // In order to get constant performance on the thread assignment 00265 // routine, we keep a list of free ids. When a thread first 00266 // requests memory we remove the first record in this list and 00267 // stores the address in a __gthread_key. When initializing the 00268 // __gthread_key we specify a destructor. When this destructor 00269 // (i.e. the thread dies) is called, we return the thread id to 00270 // the front of this list. 00271 struct _Thread_record 00272 { 00273 // Points to next free thread id record. NULL if last record in list. 00274 _Thread_record* _M_next; 00275 00276 // Thread id ranging from 1 to _S_max_threads. 00277 size_t _M_id; 00278 }; 00279 00280 union _Block_record 00281 { 00282 // Points to the block_record of the next free block. 00283 _Block_record* _M_next; 00284 00285 // The thread id of the thread which has requested this block. 00286 size_t _M_thread_id; 00287 }; 00288 00289 struct _Bin_record 00290 { 00291 // An "array" of pointers to the first free block for each 00292 // thread id. Memory to this "array" is allocated in 00293 // _S_initialize() for _S_max_threads + global pool 0. 00294 _Block_record** _M_first; 00295 00296 // A list of the initial addresses of all allocated blocks. 00297 _Block_address* _M_address; 00298 00299 // An "array" of counters used to keep track of the amount of 00300 // blocks that are on the freelist/used for each thread id. 00301 // - Note that the second part of the allocated _M_used "array" 00302 // actually hosts (atomic) counters of reclaimed blocks: in 00303 // _M_reserve_block and in _M_reclaim_block those numbers are 00304 // subtracted from the first ones to obtain the actual size 00305 // of the "working set" of the given thread. 00306 // - Memory to these "arrays" is allocated in _S_initialize() 00307 // for _S_max_threads + global pool 0. 00308 size_t* _M_free; 00309 size_t* _M_used; 00310 00311 // Each bin has its own mutex which is used to ensure data 00312 // integrity while changing "ownership" on a block. The mutex 00313 // is initialized in _S_initialize(). 00314 __gthread_mutex_t* _M_mutex; 00315 }; 00316 00317 // XXX GLIBCXX_ABI Deprecated 00318 void 00319 _M_initialize(__destroy_handler); 00320 00321 void 00322 _M_initialize_once() 00323 { 00324 if (__builtin_expect(_M_init == false, false)) 00325 _M_initialize(); 00326 } 00327 00328 void 00329 _M_destroy() throw(); 00330 00331 char* 00332 _M_reserve_block(size_t __bytes, const size_t __thread_id); 00333 00334 void 00335 _M_reclaim_block(char* __p, size_t __bytes); 00336 00337 const _Bin_record& 00338 _M_get_bin(size_t __which) 00339 { return _M_bin[__which]; } 00340 00341 void 00342 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 00343 size_t __thread_id) 00344 { 00345 if (__gthread_active_p()) 00346 { 00347 __block->_M_thread_id = __thread_id; 00348 --__bin._M_free[__thread_id]; 00349 ++__bin._M_used[__thread_id]; 00350 } 00351 } 00352 00353 // XXX GLIBCXX_ABI Deprecated 00354 void 00355 _M_destroy_thread_key(void*); 00356 00357 size_t 00358 _M_get_thread_id(); 00359 00360 explicit __pool() 00361 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL) 00362 { } 00363 00364 explicit __pool(const __pool_base::_Tune& __tune) 00365 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1), 00366 _M_thread_freelist(NULL) 00367 { } 00368 00369 private: 00370 // An "array" of bin_records each of which represents a specific 00371 // power of 2 size. Memory to this "array" is allocated in 00372 // _M_initialize(). 00373 _Bin_record* _M_bin; 00374 00375 // Actual value calculated in _M_initialize(). 00376 size_t _M_bin_size; 00377 00378 _Thread_record* _M_thread_freelist; 00379 void* _M_thread_freelist_initial; 00380 00381 void 00382 _M_initialize(); 00383 }; 00384 #endif 00385 00386 template<template <bool> class _PoolTp, bool _Thread> 00387 struct __common_pool 00388 { 00389 typedef _PoolTp<_Thread> pool_type; 00390 00391 static pool_type& 00392 _S_get_pool() 00393 { 00394 static pool_type _S_pool; 00395 return _S_pool; 00396 } 00397 }; 00398 00399 template<template <bool> class _PoolTp, bool _Thread> 00400 struct __common_pool_base; 00401 00402 template<template <bool> class _PoolTp> 00403 struct __common_pool_base<_PoolTp, false> 00404 : public __common_pool<_PoolTp, false> 00405 { 00406 using __common_pool<_PoolTp, false>::_S_get_pool; 00407 00408 static void 00409 _S_initialize_once() 00410 { 00411 static bool __init; 00412 if (__builtin_expect(__init == false, false)) 00413 { 00414 _S_get_pool()._M_initialize_once(); 00415 __init = true; 00416 } 00417 } 00418 }; 00419 00420 #ifdef __GTHREADS 00421 template<template <bool> class _PoolTp> 00422 struct __common_pool_base<_PoolTp, true> 00423 : public __common_pool<_PoolTp, true> 00424 { 00425 using __common_pool<_PoolTp, true>::_S_get_pool; 00426 00427 static void 00428 _S_initialize() 00429 { _S_get_pool()._M_initialize_once(); } 00430 00431 static void 00432 _S_initialize_once() 00433 { 00434 static bool __init; 00435 if (__builtin_expect(__init == false, false)) 00436 { 00437 if (__gthread_active_p()) 00438 { 00439 // On some platforms, __gthread_once_t is an aggregate. 00440 static __gthread_once_t __once = __GTHREAD_ONCE_INIT; 00441 __gthread_once(&__once, _S_initialize); 00442 } 00443 00444 // Double check initialization. May be necessary on some 00445 // systems for proper construction when not compiling with 00446 // thread flags. 00447 _S_get_pool()._M_initialize_once(); 00448 __init = true; 00449 } 00450 } 00451 }; 00452 #endif 00453 00454 /// Policy for shared __pool objects. 00455 template<template <bool> class _PoolTp, bool _Thread> 00456 struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread> 00457 { 00458 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 00459 bool _Thread1 = _Thread> 00460 struct _M_rebind 00461 { typedef __common_pool_policy<_PoolTp1, _Thread1> other; }; 00462 00463 using __common_pool_base<_PoolTp, _Thread>::_S_get_pool; 00464 using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once; 00465 }; 00466 00467 00468 template<typename _Tp, template <bool> class _PoolTp, bool _Thread> 00469 struct __per_type_pool 00470 { 00471 typedef _Tp value_type; 00472 typedef _PoolTp<_Thread> pool_type; 00473 00474 static pool_type& 00475 _S_get_pool() 00476 { 00477 // Sane defaults for the _PoolTp. 00478 typedef typename pool_type::_Block_record _Block_record; 00479 const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record) 00480 ? __alignof__(_Tp) : sizeof(_Block_record)); 00481 00482 typedef typename __pool_base::_Tune _Tune; 00483 static _Tune _S_tune(__a, sizeof(_Tp) * 64, 00484 sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a, 00485 sizeof(_Tp) * size_t(_Tune::_S_chunk_size), 00486 _Tune::_S_max_threads, 00487 _Tune::_S_freelist_headroom, 00488 std::getenv("GLIBCXX_FORCE_NEW") ? true : false); 00489 static pool_type _S_pool(_S_tune); 00490 return _S_pool; 00491 } 00492 }; 00493 00494 template<typename _Tp, template <bool> class _PoolTp, bool _Thread> 00495 struct __per_type_pool_base; 00496 00497 template<typename _Tp, template <bool> class _PoolTp> 00498 struct __per_type_pool_base<_Tp, _PoolTp, false> 00499 : public __per_type_pool<_Tp, _PoolTp, false> 00500 { 00501 using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool; 00502 00503 static void 00504 _S_initialize_once() 00505 { 00506 static bool __init; 00507 if (__builtin_expect(__init == false, false)) 00508 { 00509 _S_get_pool()._M_initialize_once(); 00510 __init = true; 00511 } 00512 } 00513 }; 00514 00515 #ifdef __GTHREADS 00516 template<typename _Tp, template <bool> class _PoolTp> 00517 struct __per_type_pool_base<_Tp, _PoolTp, true> 00518 : public __per_type_pool<_Tp, _PoolTp, true> 00519 { 00520 using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool; 00521 00522 static void 00523 _S_initialize() 00524 { _S_get_pool()._M_initialize_once(); } 00525 00526 static void 00527 _S_initialize_once() 00528 { 00529 static bool __init; 00530 if (__builtin_expect(__init == false, false)) 00531 { 00532 if (__gthread_active_p()) 00533 { 00534 // On some platforms, __gthread_once_t is an aggregate. 00535 static __gthread_once_t __once = __GTHREAD_ONCE_INIT; 00536 __gthread_once(&__once, _S_initialize); 00537 } 00538 00539 // Double check initialization. May be necessary on some 00540 // systems for proper construction when not compiling with 00541 // thread flags. 00542 _S_get_pool()._M_initialize_once(); 00543 __init = true; 00544 } 00545 } 00546 }; 00547 #endif 00548 00549 /// Policy for individual __pool objects. 00550 template<typename _Tp, template <bool> class _PoolTp, bool _Thread> 00551 struct __per_type_pool_policy 00552 : public __per_type_pool_base<_Tp, _PoolTp, _Thread> 00553 { 00554 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 00555 bool _Thread1 = _Thread> 00556 struct _M_rebind 00557 { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; }; 00558 00559 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool; 00560 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once; 00561 }; 00562 00563 00564 /// Base class for _Tp dependent member functions. 00565 template<typename _Tp> 00566 class __mt_alloc_base 00567 { 00568 public: 00569 typedef size_t size_type; 00570 typedef ptrdiff_t difference_type; 00571 typedef _Tp* pointer; 00572 typedef const _Tp* const_pointer; 00573 typedef _Tp& reference; 00574 typedef const _Tp& const_reference; 00575 typedef _Tp value_type; 00576 00577 pointer 00578 address(reference __x) const 00579 { return &__x; } 00580 00581 const_pointer 00582 address(const_reference __x) const 00583 { return &__x; } 00584 00585 size_type 00586 max_size() const throw() 00587 { return size_t(-1) / sizeof(_Tp); } 00588 00589 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00590 // 402. wrong new expression in [some_] allocator::construct 00591 void 00592 construct(pointer __p, const _Tp& __val) 00593 { ::new((void *)__p) _Tp(__val); } 00594 00595 #ifdef __GXX_EXPERIMENTAL_CXX0X__ 00596 template<typename... _Args> 00597 void 00598 construct(pointer __p, _Args&&... __args) 00599 { ::new((void *)__p) _Tp(std::forward<_Args>(__args)...); } 00600 #endif 00601 00602 void 00603 destroy(pointer __p) { __p->~_Tp(); } 00604 }; 00605 00606 #ifdef __GTHREADS 00607 #define __thread_default true 00608 #else 00609 #define __thread_default false 00610 #endif 00611 00612 /** 00613 * @brief This is a fixed size (power of 2) allocator which - when 00614 * compiled with thread support - will maintain one freelist per 00615 * size per thread plus a "global" one. Steps are taken to limit 00616 * the per thread freelist sizes (by returning excess back to 00617 * the "global" list). 00618 * @ingroup allocators 00619 * 00620 * Further details: 00621 * http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt12ch32.html 00622 */ 00623 template<typename _Tp, 00624 typename _Poolp = __common_pool_policy<__pool, __thread_default> > 00625 class __mt_alloc : public __mt_alloc_base<_Tp> 00626 { 00627 public: 00628 typedef size_t size_type; 00629 typedef ptrdiff_t difference_type; 00630 typedef _Tp* pointer; 00631 typedef const _Tp* const_pointer; 00632 typedef _Tp& reference; 00633 typedef const _Tp& const_reference; 00634 typedef _Tp value_type; 00635 typedef _Poolp __policy_type; 00636 typedef typename _Poolp::pool_type __pool_type; 00637 00638 template<typename _Tp1, typename _Poolp1 = _Poolp> 00639 struct rebind 00640 { 00641 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type; 00642 typedef __mt_alloc<_Tp1, pol_type> other; 00643 }; 00644 00645 __mt_alloc() throw() { } 00646 00647 __mt_alloc(const __mt_alloc&) throw() { } 00648 00649 template<typename _Tp1, typename _Poolp1> 00650 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { } 00651 00652 ~__mt_alloc() throw() { } 00653 00654 pointer 00655 allocate(size_type __n, const void* = 0); 00656 00657 void 00658 deallocate(pointer __p, size_type __n); 00659 00660 const __pool_base::_Tune 00661 _M_get_options() 00662 { 00663 // Return a copy, not a reference, for external consumption. 00664 return __policy_type::_S_get_pool()._M_get_options(); 00665 } 00666 00667 void 00668 _M_set_options(__pool_base::_Tune __t) 00669 { __policy_type::_S_get_pool()._M_set_options(__t); } 00670 }; 00671 00672 template<typename _Tp, typename _Poolp> 00673 typename __mt_alloc<_Tp, _Poolp>::pointer 00674 __mt_alloc<_Tp, _Poolp>:: 00675 allocate(size_type __n, const void*) 00676 { 00677 if (__builtin_expect(__n > this->max_size(), false)) 00678 std::__throw_bad_alloc(); 00679 00680 __policy_type::_S_initialize_once(); 00681 00682 // Requests larger than _M_max_bytes are handled by operator 00683 // new/delete directly. 00684 __pool_type& __pool = __policy_type::_S_get_pool(); 00685 const size_t __bytes = __n * sizeof(_Tp); 00686 if (__pool._M_check_threshold(__bytes)) 00687 { 00688 void* __ret = ::operator new(__bytes); 00689 return static_cast<_Tp*>(__ret); 00690 } 00691 00692 // Round up to power of 2 and figure out which bin to use. 00693 const size_t __which = __pool._M_get_binmap(__bytes); 00694 const size_t __thread_id = __pool._M_get_thread_id(); 00695 00696 // Find out if we have blocks on our freelist. If so, go ahead 00697 // and use them directly without having to lock anything. 00698 char* __c; 00699 typedef typename __pool_type::_Bin_record _Bin_record; 00700 const _Bin_record& __bin = __pool._M_get_bin(__which); 00701 if (__bin._M_first[__thread_id]) 00702 { 00703 // Already reserved. 00704 typedef typename __pool_type::_Block_record _Block_record; 00705 _Block_record* __block = __bin._M_first[__thread_id]; 00706 __bin._M_first[__thread_id] = __block->_M_next; 00707 00708 __pool._M_adjust_freelist(__bin, __block, __thread_id); 00709 __c = reinterpret_cast<char*>(__block) + __pool._M_get_align(); 00710 } 00711 else 00712 { 00713 // Null, reserve. 00714 __c = __pool._M_reserve_block(__bytes, __thread_id); 00715 } 00716 return static_cast<_Tp*>(static_cast<void*>(__c)); 00717 } 00718 00719 template<typename _Tp, typename _Poolp> 00720 void 00721 __mt_alloc<_Tp, _Poolp>:: 00722 deallocate(pointer __p, size_type __n) 00723 { 00724 if (__builtin_expect(__p != 0, true)) 00725 { 00726 // Requests larger than _M_max_bytes are handled by 00727 // operators new/delete directly. 00728 __pool_type& __pool = __policy_type::_S_get_pool(); 00729 const size_t __bytes = __n * sizeof(_Tp); 00730 if (__pool._M_check_threshold(__bytes)) 00731 ::operator delete(__p); 00732 else 00733 __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes); 00734 } 00735 } 00736 00737 template<typename _Tp, typename _Poolp> 00738 inline bool 00739 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&) 00740 { return true; } 00741 00742 template<typename _Tp, typename _Poolp> 00743 inline bool 00744 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&) 00745 { return false; } 00746 00747 #undef __thread_default 00748 00749 _GLIBCXX_END_NAMESPACE 00750 00751 #endif