* [Bug libstdc++/16614] Excessive resource usage in __mt_alloc
2004-07-18 12:32 [Bug libstdc++/16614] New: Excessive resource usage in __mt_alloc peturr02 at ru dot is
` (2 preceding siblings ...)
2004-07-22 3:33 ` bkoz at gcc dot gnu dot org
@ 2004-07-22 3:35 ` bkoz at gcc dot gnu dot org
2004-09-01 22:17 ` cvs-commit at gcc dot gnu dot org
` (4 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: bkoz at gcc dot gnu dot org @ 2004-07-22 3:35 UTC (permalink / raw)
To: gcc-bugs
------- Additional Comments From bkoz at gcc dot gnu dot org 2004-07-22 03:35 -------
(From update of attachment 6797)
>Index: ChangeLog
>===================================================================
>RCS file: /cvs/gcc/gcc/libstdc++-v3/ChangeLog,v
>retrieving revision 1.2583
>diff -c -p -r1.2583 ChangeLog
>*** ChangeLog 21 Jul 2004 18:54:51 -0000 1.2583
>--- ChangeLog 22 Jul 2004 02:55:10 -0000
>***************
>*** 1,3 ****
>--- 1,13 ----
>+ 2004-07-21 benjamin kosnik <bkoz@redhat.com>
>+
>+ * include/ext/mt_allocator.h (__mt_base): Single pool.
>+ * src/allocator.cc: Split into...
>+ * src/allocator_pool.cc: ...this.
>+ * src/allocator_mt.cc: Add definitions for __mt_base.
>+ * src/Makefile.am (sources): Split allocator.cc to
>+ allocator_pool.cc and allocator_mt.cc.
>+ * src/Makefile.in: Regenerate.
>+
> 2004-07-21 Benjamin Kosnik <bkoz@redhat.com>
>
> * docs/doxygen/mainpage.html: Clarify links.
>Index: config/linker-map.gnu
>===================================================================
>RCS file: /cvs/gcc/gcc/libstdc++-v3/config/linker-map.gnu,v
>retrieving revision 1.67
>diff -c -p -r1.67 linker-map.gnu
>*** config/linker-map.gnu 8 Jul 2004 05:24:33 -0000 1.67
>--- config/linker-map.gnu 22 Jul 2004 02:55:10 -0000
>*************** GLIBCXX_3.4.2 {
>*** 259,264 ****
>--- 259,273 ----
> _ZN9__gnu_cxx11__pool_base16_M_get_free_listE[jm];
> _ZN9__gnu_cxx11__pool_base12_M_get_mutexEv;
>
>+ _ZN9__gnu_cxx9__mt_base7_S_initE;
>+ _ZN9__gnu_cxx9__mt_base10_S_optionsE;
>+ _ZN9__gnu_cxx9__mt_base6_S_binE;
>+ _ZN9__gnu_cxx9__mt_base11_S_bin_sizeE;
>+ _ZN9__gnu_cxx9__mt_base9_S_binmapE;
>+ _ZN9__gnu_cxx9__mt_base7_S_onceE;
>+ _ZN9__gnu_cxx9__mt_base16_S_get_thread_idEv;
>+ _ZN9__gnu_cxx9__mt_base13_S_initializeEv;
>+
> } GLIBCXX_3.4.1;
>
> # Symbols in the support library (libsupc++) have their own tag.
>Index: include/ext/mt_allocator.h
>===================================================================
>RCS file: /cvs/gcc/gcc/libstdc++-v3/include/ext/mt_allocator.h,v
>retrieving revision 1.33
>diff -c -p -r1.33 mt_allocator.h
>*** include/ext/mt_allocator.h 14 Jul 2004 06:37:17 -0000 1.33
>--- include/ext/mt_allocator.h 22 Jul 2004 02:55:10 -0000
>*************** namespace __gnu_cxx
>*** 53,60 ****
> * Further details:
> * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
> */
> template<typename _Tp>
>! class __mt_alloc
> {
> public:
> typedef size_t size_type;
>--- 53,229 ----
> * Further details:
> * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
> */
>+ class __mt_base
>+ {
>+ public:
>+ // Variables used to configure the behavior of the allocator,
>+ // assigned and explained in detail below.
>+ struct _Tune
>+ {
>+ // Alignment needed.
>+ // NB: In any case must be >= sizeof(_Block_record), that
>+ // is 4 on 32 bit machines and 8 on 64 bit machines.
>+ size_t _M_align;
>+
>+ // Allocation requests (after round-up to power of 2) below
>+ // this value will be handled by the allocator. A raw new/
>+ // call will be used for requests larger than this value.
>+ size_t _M_max_bytes;
>+
>+ // Size in bytes of the smallest bin.
>+ // NB: Must be a power of 2 and >= _M_align.
>+ size_t _M_min_bin;
>+
>+ // In order to avoid fragmenting and minimize the number of
>+ // new() calls we always request new memory using this
>+ // value. Based on previous discussions on the libstdc++
>+ // mailing list we have choosen the value below.
>+ // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
>+ size_t _M_chunk_size;
>+
>+ // The maximum number of supported threads. For
>+ // single-threaded operation, use one. Maximum values will
>+ // vary depending on details of the underlying system. (For
>+ // instance, Linux 2.4.18 reports 4070 in
>+ // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
>+ // 65534)
>+ size_t _M_max_threads;
>+
>+ // Each time a deallocation occurs in a threaded application
>+ // we make sure that there are no more than
>+ // _M_freelist_headroom % of used memory on the freelist. If
>+ // the number of additional records is more than
>+ // _M_freelist_headroom % of the freelist, we move these
>+ // records back to the global pool.
>+ size_t _M_freelist_headroom;
>+
>+ // Set to true forces all allocations to use new().
>+ bool _M_force_new;
>+
>+ explicit
>+ _Tune()
>+ : _M_align(8), _M_max_bytes(128), _M_min_bin(8),
>+ _M_chunk_size(4096 - 4 * sizeof(void*)),
>+ _M_max_threads(4096), _M_freelist_headroom(10),
>+ _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
>+ { }
>+
>+ explicit
>+ _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
>+ size_t __maxthreads, size_t __headroom, bool __force)
>+ : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
>+ _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
>+ _M_freelist_headroom(__headroom), _M_force_new(__force)
>+ { }
>+ };
>+
>+ static const _Tune
>+ _S_get_options()
>+ { return _S_options; }
>+
>+ static void
>+ _S_set_options(_Tune __t)
>+ {
>+ if (!_S_init)
>+ _S_options = __t;
>+ }
>+
>+ protected:
>+ static void
>+ _S_initialize();
>+
>+ #ifdef __GTHREADS
>+ static void
>+ _S_destroy_thread_key(void* __freelist_pos);
>+ #endif
>+
>+ static size_t
>+ _S_get_thread_id();
>+
>+ private:
>+ // We need to create the initial lists and set up some variables
>+ // before we can answer to the first request for memory.
>+ #ifdef __GTHREADS
>+ static __gthread_once_t _S_once;
>+ #endif
>+ static bool _S_init;
>+
>+ // Configuration options.
>+ static _Tune _S_options;
>+
>+ // Using short int as type for the binmap implies we are never
>+ // caching blocks larger than 65535 with this allocator
>+ typedef unsigned short int _Binmap_type;
>+ static _Binmap_type* _S_binmap;
>+
>+ // Each requesting thread is assigned an id ranging from 1 to
>+ // _S_max_threads. Thread id 0 is used as a global memory pool.
>+ // In order to get constant performance on the thread assignment
>+ // routine, we keep a list of free ids. When a thread first
>+ // requests memory we remove the first record in this list and
>+ // stores the address in a __gthread_key. When initializing the
>+ // __gthread_key we specify a destructor. When this destructor
>+ // (i.e. the thread dies) is called, we return the thread id to
>+ // the front of this list.
>+ #ifdef __GTHREADS
>+ struct _Thread_record
>+ {
>+ // Points to next free thread id record. NULL if last record in list.
>+ _Thread_record* volatile _M_next;
>+
>+ // Thread id ranging from 1 to _S_max_threads.
>+ size_t _M_id;
>+ };
>+
>+ static _Thread_record* volatile _S_thread_freelist_first;
>+ static __gthread_mutex_t _S_thread_freelist_mutex;
>+ static __gthread_key_t _S_thread_key;
>+ #endif
>+
>+ union _Block_record
>+ {
>+ // Points to the block_record of the next free block.
>+ _Block_record* volatile _M_next;
>+
>+ #ifdef __GTHREADS
>+ // The thread id of the thread which has requested this block.
>+ size_t _M_thread_id;
>+ #endif
>+ };
>+
>+ struct _Bin_record
>+ {
>+ // An "array" of pointers to the first free block for each
>+ // thread id. Memory to this "array" is allocated in _S_initialize()
>+ // for _S_max_threads + global pool 0.
>+ _Block_record** volatile _M_first;
>+
>+ #ifdef __GTHREADS
>+ // An "array" of counters used to keep track of the amount of
>+ // blocks that are on the freelist/used for each thread id.
>+ // Memory to these "arrays" is allocated in _S_initialize() for
>+ // _S_max_threads + global pool 0.
>+ size_t* volatile _M_free;
>+ size_t* volatile _M_used;
>+
>+ // Each bin has its own mutex which is used to ensure data
>+ // integrity while changing "ownership" on a block. The mutex
>+ // is initialized in _S_initialize().
>+ __gthread_mutex_t* _M_mutex;
>+ #endif
>+ };
>+
>+ // An "array" of bin_records each of which represents a specific
>+ // power of 2 size. Memory to this "array" is allocated in
>+ // _S_initialize().
>+ static _Bin_record* volatile _S_bin;
>+
>+ // Actual value calculated in _S_initialize().
>+ static size_t _S_bin_size;
>+ };
>+
> template<typename _Tp>
>! class __mt_alloc : public __mt_base
> {
> public:
> typedef size_t size_type;
>*************** namespace __gnu_cxx
>*** 113,281 ****
>
> void
> deallocate(pointer __p, size_type __n);
>-
>- // Variables used to configure the behavior of the allocator,
>- // assigned and explained in detail below.
>- struct _Tune
>- {
>- // Alignment needed.
>- // NB: In any case must be >= sizeof(_Block_record), that
>- // is 4 on 32 bit machines and 8 on 64 bit machines.
>- size_t _M_align;
>-
>- // Allocation requests (after round-up to power of 2) below
>- // this value will be handled by the allocator. A raw new/
>- // call will be used for requests larger than this value.
>- size_t _M_max_bytes;
>-
>- // Size in bytes of the smallest bin.
>- // NB: Must be a power of 2 and >= _M_align.
>- size_t _M_min_bin;
>-
>- // In order to avoid fragmenting and minimize the number of
>- // new() calls we always request new memory using this
>- // value. Based on previous discussions on the libstdc++
>- // mailing list we have choosen the value below.
>- // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
>- size_t _M_chunk_size;
>-
>- // The maximum number of supported threads. For
>- // single-threaded operation, use one. Maximum values will
>- // vary depending on details of the underlying system. (For
>- // instance, Linux 2.4.18 reports 4070 in
>- // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
>- // 65534)
>- size_t _M_max_threads;
>-
>- // Each time a deallocation occurs in a threaded application
>- // we make sure that there are no more than
>- // _M_freelist_headroom % of used memory on the freelist. If
>- // the number of additional records is more than
>- // _M_freelist_headroom % of the freelist, we move these
>- // records back to the global pool.
>- size_t _M_freelist_headroom;
>-
>- // Set to true forces all allocations to use new().
>- bool _M_force_new;
>-
>- explicit
>- _Tune()
>- : _M_align(8), _M_max_bytes(128), _M_min_bin(8),
>- _M_chunk_size(4096 - 4 * sizeof(void*)),
>- _M_max_threads(4096), _M_freelist_headroom(10),
>- _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
>- { }
>-
>- explicit
>- _Tune(size_t __align, size_t __maxb, size_t __minbin,
>- size_t __chunk, size_t __maxthreads, size_t __headroom,
>- bool __force)
>- : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
>- _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
>- _M_freelist_headroom(__headroom), _M_force_new(__force)
>- { }
>- };
>-
>- static const _Tune
>- _S_get_options()
>- { return _S_options; }
>-
>- static void
>- _S_set_options(_Tune __t)
>- {
>- if (!_S_init)
>- _S_options = __t;
>- }
>-
>- private:
>- // We need to create the initial lists and set up some variables
>- // before we can answer to the first request for memory.
>- #ifdef __GTHREADS
>- static __gthread_once_t _S_once;
>- #endif
>- static bool _S_init;
>-
>- static void
>- _S_initialize();
>-
>- // Configuration options.
>- static _Tune _S_options;
>-
>- // Using short int as type for the binmap implies we are never
>- // caching blocks larger than 65535 with this allocator
>- typedef unsigned short int _Binmap_type;
>- static _Binmap_type* _S_binmap;
>-
>- // Each requesting thread is assigned an id ranging from 1 to
>- // _S_max_threads. Thread id 0 is used as a global memory pool.
>- // In order to get constant performance on the thread assignment
>- // routine, we keep a list of free ids. When a thread first
>- // requests memory we remove the first record in this list and
>- // stores the address in a __gthread_key. When initializing the
>- // __gthread_key we specify a destructor. When this destructor
>- // (i.e. the thread dies) is called, we return the thread id to
>- // the front of this list.
>- #ifdef __GTHREADS
>- struct _Thread_record
>- {
>- // Points to next free thread id record. NULL if last record in list.
>- _Thread_record* volatile _M_next;
>-
>- // Thread id ranging from 1 to _S_max_threads.
>- size_t _M_id;
>- };
>-
>- static _Thread_record* volatile _S_thread_freelist_first;
>- static __gthread_mutex_t _S_thread_freelist_mutex;
>- static __gthread_key_t _S_thread_key;
>-
>- static void
>- _S_destroy_thread_key(void* __freelist_pos);
>- #endif
>-
>- static size_t
>- _S_get_thread_id();
>-
>- union _Block_record
>- {
>- // Points to the block_record of the next free block.
>- _Block_record* volatile _M_next;
>-
>- #ifdef __GTHREADS
>- // The thread id of the thread which has requested this block.
>- size_t _M_thread_id;
>- #endif
>- };
>-
>- struct _Bin_record
>- {
>- // An "array" of pointers to the first free block for each
>- // thread id. Memory to this "array" is allocated in _S_initialize()
>- // for _S_max_threads + global pool 0.
>- _Block_record** volatile _M_first;
>-
>- #ifdef __GTHREADS
>- // An "array" of counters used to keep track of the amount of
>- // blocks that are on the freelist/used for each thread id.
>- // Memory to these "arrays" is allocated in _S_initialize() for
>- // _S_max_threads + global pool 0.
>- size_t* volatile _M_free;
>- size_t* volatile _M_used;
>-
>- // Each bin has its own mutex which is used to ensure data
>- // integrity while changing "ownership" on a block. The mutex
>- // is initialized in _S_initialize().
>- __gthread_mutex_t* _M_mutex;
>- #endif
>- };
>-
>- // An "array" of bin_records each of which represents a specific
>- // power of 2 size. Memory to this "array" is allocated in
>- // _S_initialize().
>- static _Bin_record* volatile _S_bin;
>-
>- // Actual value calculated in _S_initialize().
>- static size_t _S_bin_size;
> };
>
> template<typename _Tp>
>--- 282,287 ----
>*************** namespace __gnu_cxx
>*** 488,678 ****
> }
>
> template<typename _Tp>
>- void
>- __mt_alloc<_Tp>::
>- _S_initialize()
>- {
>- // This method is called on the first allocation (when _S_init is still
>- // false) to create the bins.
>-
>- // Ensure that the static initialization of _S_options has
>- // happened. This depends on (a) _M_align == 0 being an invalid
>- // value that is only present at startup, and (b) the real
>- // static initialization that happens later not actually
>- // changing anything.
>- if (_S_options._M_align == 0)
>- new (&_S_options) _Tune;
>-
>- // _M_force_new must not change after the first allocate(),
>- // which in turn calls this method, so if it's false, it's false
>- // forever and we don't need to return here ever again.
>- if (_S_options._M_force_new)
>- {
>- _S_init = true;
>- return;
>- }
>-
>- // Calculate the number of bins required based on _M_max_bytes.
>- // _S_bin_size is statically-initialized to one.
>- size_t __bin_size = _S_options._M_min_bin;
>- while (_S_options._M_max_bytes > __bin_size)
>- {
>- __bin_size <<= 1;
>- ++_S_bin_size;
>- }
>-
>- // Setup the bin map for quick lookup of the relevant bin.
>- const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
>- _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
>-
>- _Binmap_type* __bp = _S_binmap;
>- _Binmap_type __bin_max = _S_options._M_min_bin;
>- _Binmap_type __bint = 0;
>- for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
>- {
>- if (__ct > __bin_max)
>- {
>- __bin_max <<= 1;
>- ++__bint;
>- }
>- *__bp++ = __bint;
>- }
>-
>- // Initialize _S_bin and its members.
>- void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
>- _S_bin = static_cast<_Bin_record*>(__v);
>-
>- // If __gthread_active_p() create and initialize the list of
>- // free thread ids. Single threaded applications use thread id 0
>- // directly and have no need for this.
>- #ifdef __GTHREADS
>- if (__gthread_active_p())
>- {
>- const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
>- __v = ::operator new(__k);
>- _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
>-
>- // NOTE! The first assignable thread id is 1 since the
>- // global pool uses id 0
>- size_t __i;
>- for (__i = 1; __i < _S_options._M_max_threads; ++__i)
>- {
>- _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
>- __tr._M_next = &_S_thread_freelist_first[__i];
>- __tr._M_id = __i;
>- }
>-
>- // Set last record.
>- _S_thread_freelist_first[__i - 1]._M_next = NULL;
>- _S_thread_freelist_first[__i - 1]._M_id = __i;
>-
>- // Make sure this is initialized.
>- #ifndef __GTHREAD_MUTEX_INIT
>- __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
>- #endif
>- // Initialize per thread key to hold pointer to
>- // _S_thread_freelist.
>- __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
>-
>- const size_t __max_threads = _S_options._M_max_threads + 1;
>- for (size_t __n = 0; __n < _S_bin_size; ++__n)
>- {
>- _Bin_record& __bin = _S_bin[__n];
>- __v = ::operator new(sizeof(_Block_record*) * __max_threads);
>- __bin._M_first = static_cast<_Block_record**>(__v);
>-
>- __v = ::operator new(sizeof(size_t) * __max_threads);
>- __bin._M_free = static_cast<size_t*>(__v);
>-
>- __v = ::operator new(sizeof(size_t) * __max_threads);
>- __bin._M_used = static_cast<size_t*>(__v);
>-
>- __v = ::operator new(sizeof(__gthread_mutex_t));
>- __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
>-
>- #ifdef __GTHREAD_MUTEX_INIT
>- {
>- // Do not copy a POSIX/gthr mutex once in use.
>- __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
>- *__bin._M_mutex = __tmp;
>- }
>- #else
>- { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
>- #endif
>-
>- for (size_t __threadn = 0; __threadn < __max_threads;
>- ++__threadn)
>- {
>- __bin._M_first[__threadn] = NULL;
>- __bin._M_free[__threadn] = 0;
>- __bin._M_used[__threadn] = 0;
>- }
>- }
>- }
>- else
>- #endif
>- for (size_t __n = 0; __n < _S_bin_size; ++__n)
>- {
>- _Bin_record& __bin = _S_bin[__n];
>- __v = ::operator new(sizeof(_Block_record*));
>- __bin._M_first = static_cast<_Block_record**>(__v);
>- __bin._M_first[0] = NULL;
>- }
>-
>- _S_init = true;
>- }
>-
>- template<typename _Tp>
>- size_t
>- __mt_alloc<_Tp>::
>- _S_get_thread_id()
>- {
>- #ifdef __GTHREADS
>- // If we have thread support and it's active we check the thread
>- // key value and return its id or if it's not set we take the
>- // first record from _S_thread_freelist and sets the key and
>- // returns it's id.
>- if (__gthread_active_p())
>- {
>- _Thread_record* __freelist_pos =
>- static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key));
>- if (__freelist_pos == NULL)
>- {
>- // Since _S_options._M_max_threads must be larger than
>- // the theoretical max number of threads of the OS the
>- // list can never be empty.
>- __gthread_mutex_lock(&_S_thread_freelist_mutex);
>- __freelist_pos = _S_thread_freelist_first;
>- _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
>- __gthread_mutex_unlock(&_S_thread_freelist_mutex);
>-
>- __gthread_setspecific(_S_thread_key,
>- static_cast<void*>(__freelist_pos));
>- }
>- return __freelist_pos->_M_id;
>- }
>- #endif
>- // Otherwise (no thread support or inactive) all requests are
>- // served from the global pool 0.
>- return 0;
>- }
>-
>- #ifdef __GTHREADS
>- template<typename _Tp>
>- void
>- __mt_alloc<_Tp>::
>- _S_destroy_thread_key(void* __freelist_pos)
>- {
>- // Return this thread id record to front of thread_freelist.
>- __gthread_mutex_lock(&_S_thread_freelist_mutex);
>- _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
>- __tr->_M_next = _S_thread_freelist_first;
>- _S_thread_freelist_first = __tr;
>- __gthread_mutex_unlock(&_S_thread_freelist_mutex);
>- }
>- #endif
>-
>- template<typename _Tp>
> inline bool
> operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
> { return true; }
>--- 494,499 ----
>*************** namespace __gnu_cxx
>*** 681,722 ****
> inline bool
> operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
> { return false; }
>-
>- template<typename _Tp>
>- bool __mt_alloc<_Tp>::_S_init = false;
>-
>- template<typename _Tp>
>- typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
>-
>- template<typename _Tp>
>- typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
>-
>- template<typename _Tp>
>- typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
>-
>- template<typename _Tp>
>- size_t __mt_alloc<_Tp>::_S_bin_size = 1;
>-
>- // Actual initialization in _S_initialize().
>- #ifdef __GTHREADS
>- template<typename _Tp>
>- __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
>-
>- template<typename _Tp>
>- typename __mt_alloc<_Tp>::_Thread_record*
>- volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
>-
>- template<typename _Tp>
>- __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
>-
>- template<typename _Tp>
>- __gthread_mutex_t
>- #ifdef __GTHREAD_MUTEX_INIT
>- __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
>- #else
>- __mt_alloc<_Tp>::_S_thread_freelist_mutex;
>- #endif
>- #endif
> } // namespace __gnu_cxx
>
> #endif
>--- 502,507 ----
>Index: src/Makefile.am
>===================================================================
>RCS file: /cvs/gcc/gcc/libstdc++-v3/src/Makefile.am,v
>retrieving revision 1.144
>diff -c -p -r1.144 Makefile.am
>*** src/Makefile.am 16 Apr 2004 19:04:05 -0000 1.144
>--- src/Makefile.am 22 Jul 2004 02:55:10 -0000
>*************** basic_file.cc: ${glibcxx_srcdir}/$(BASIC
>*** 96,102 ****
>
> # Sources present in the src directory.
> sources = \
>! allocator.cc \
> codecvt.cc \
> complex_io.cc \
> ctype.cc \
>--- 96,103 ----
>
> # Sources present in the src directory.
> sources = \
>! allocator_pool.cc \
>! allocator_mt.cc \
> codecvt.cc \
> complex_io.cc \
> ctype.cc \
>Index: src/Makefile.in
>===================================================================
>RCS file: /cvs/gcc/gcc/libstdc++-v3/src/Makefile.in,v
>retrieving revision 1.205
>diff -c -p -r1.205 Makefile.in
>*** src/Makefile.in 2 Jul 2004 23:40:18 -0000 1.205
>--- src/Makefile.in 22 Jul 2004 02:55:10 -0000
>*************** am__objects_1 = atomicity.lo codecvt_mem
>*** 58,71 ****
> ctype_members.lo messages_members.lo monetary_members.lo \
> numeric_members.lo time_members.lo
> am__objects_2 = basic_file.lo c++locale.lo
>! am__objects_3 = allocator.lo codecvt.lo complex_io.lo ctype.lo \
>! debug.lo debug_list.lo functexcept.lo globals_locale.lo \
>! globals_io.lo ios.lo ios_failure.lo ios_init.lo ios_locale.lo \
>! limits.lo list.lo locale.lo locale_init.lo locale_facets.lo \
>! localename.lo stdexcept.lo strstream.lo tree.lo \
>! allocator-inst.lo concept-inst.lo fstream-inst.lo ext-inst.lo \
>! io-inst.lo istream-inst.lo locale-inst.lo locale-misc-inst.lo \
>! misc-inst.lo ostream-inst.lo sstream-inst.lo streambuf-inst.lo \
> string-inst.lo valarray-inst.lo wlocale-inst.lo \
> wstring-inst.lo $(am__objects_1) $(am__objects_2)
> am_libstdc___la_OBJECTS = $(am__objects_3)
>--- 58,72 ----
> ctype_members.lo messages_members.lo monetary_members.lo \
> numeric_members.lo time_members.lo
> am__objects_2 = basic_file.lo c++locale.lo
>! am__objects_3 = allocator_pool.lo allocator_mt.lo codecvt.lo \
>! complex_io.lo ctype.lo debug.lo debug_list.lo functexcept.lo \
>! globals_locale.lo globals_io.lo ios.lo ios_failure.lo \
>! ios_init.lo ios_locale.lo limits.lo list.lo locale.lo \
>! locale_init.lo locale_facets.lo localename.lo stdexcept.lo \
>! strstream.lo tree.lo allocator-inst.lo concept-inst.lo \
>! fstream-inst.lo ext-inst.lo io-inst.lo istream-inst.lo \
>! locale-inst.lo locale-misc-inst.lo misc-inst.lo \
>! ostream-inst.lo sstream-inst.lo streambuf-inst.lo \
> string-inst.lo valarray-inst.lo wlocale-inst.lo \
> wstring-inst.lo $(am__objects_1) $(am__objects_2)
> am_libstdc___la_OBJECTS = $(am__objects_3)
>*************** host_sources_extra = \
>*** 295,301 ****
>
> # Sources present in the src directory.
> sources = \
>! allocator.cc \
> codecvt.cc \
> complex_io.cc \
> ctype.cc \
>--- 296,303 ----
>
> # Sources present in the src directory.
> sources = \
>! allocator_pool.cc \
>! allocator_mt.cc \
> codecvt.cc \
> complex_io.cc \
> ctype.cc \
>Index: src/allocator.cc
>===================================================================
>RCS file: src/allocator.cc
>diff -N src/allocator.cc
>*** src/allocator.cc 1 Jul 2004 14:49:29 -0000 1.9
>--- /dev/null 1 Jan 1970 00:00:00 -0000
>***************
>*** 1,169 ****
>- // Allocator details.
>-
>- // Copyright (C) 2004 Free Software Foundation, Inc.
>- //
>- // This file is part of the GNU ISO C++ Library. This library is free
>- // software; you can redistribute it and/or modify it under the
>- // terms of the GNU General Public License as published by the
>- // Free Software Foundation; either version 2, or (at your option)
>- // any later version.
>-
>- // This library is distributed in the hope that it will be useful,
>- // but WITHOUT ANY WARRANTY; without even the implied warranty of
>- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>- // GNU General Public License for more details.
>-
>- // You should have received a copy of the GNU General Public License along
>- // with this library; see the file COPYING. If not, write to the Free
>- // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
>- // USA.
>-
>- // As a special exception, you may use this file as part of a free software
>- // library without restriction. Specifically, if other files instantiate
>- // templates or use macros or inline functions from this file, or you compile
>- // this file and link it with other files to produce an executable, this
>- // file does not by itself cause the resulting executable to be covered by
>- // the GNU General Public License. This exception does not however
>- // invalidate any other reasons why the executable file might be covered by
>- // the GNU General Public License.
>-
>- //
>- // ISO C++ 14882:
>- //
>-
>- #include <bits/c++config.h>
>- #include <memory>
>- #include <ext/mt_allocator.h>
>- #include <ext/pool_allocator.h>
>-
>- namespace __gnu_internal
>- {
>- __glibcxx_mutex_define_initialized(palloc_init_mutex);
>- }
>-
>- namespace __gnu_cxx
>- {
>- // Definitions for __pool_alloc_base.
>- __pool_base::_Obj* volatile*
>- __pool_base::_M_get_free_list(size_t __bytes)
>- {
>- size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1);
>- return _S_free_list + __i;
>- }
>-
>- mutex_type&
>- __pool_base::_M_get_mutex()
>- { return __gnu_internal::palloc_init_mutex; }
>-
>- // Allocate memory in large chunks in order to avoid fragmenting the
>- // heap too much. Assume that __n is properly aligned. We hold the
>- // allocation lock.
>- char*
>- __pool_base::_M_allocate_chunk(size_t __n, int& __nobjs)
>- {
>- char* __result;
>- size_t __total_bytes = __n * __nobjs;
>- size_t __bytes_left = _S_end_free - _S_start_free;
>-
>- if (__bytes_left >= __total_bytes)
>- {
>- __result = _S_start_free;
>- _S_start_free += __total_bytes;
>- return __result ;
>- }
>- else if (__bytes_left >= __n)
>- {
>- __nobjs = (int)(__bytes_left / __n);
>- __total_bytes = __n * __nobjs;
>- __result = _S_start_free;
>- _S_start_free += __total_bytes;
>- return __result;
>- }
>- else
>- {
>- // Try to make use of the left-over piece.
>- if (__bytes_left > 0)
>- {
>- _Obj* volatile* __free_list = _M_get_free_list(__bytes_left);
>- ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
>- *__free_list = (_Obj*)(void*)_S_start_free;
>- }
>-
>- size_t __bytes_to_get = (2 * __total_bytes
>- + _M_round_up(_S_heap_size >> 4));
>- _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
>- if (_S_start_free == 0)
>- {
>- // Try to make do with what we have. That can't hurt. We
>- // do not try smaller requests, since that tends to result
>- // in disaster on multi-process machines.
>- size_t __i = __n;
>- for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
>- {
>- _Obj* volatile* __free_list = _M_get_free_list(__i);
>- _Obj* __p = *__free_list;
>- if (__p != 0)
>- {
>- *__free_list = __p->_M_free_list_link;
>- _S_start_free = (char*)__p;
>- _S_end_free = _S_start_free + __i;
>- return _M_allocate_chunk(__n, __nobjs);
>- // Any leftover piece will eventually make it to the
>- // right free list.
>- }
>- }
>- _S_end_free = 0; // In case of exception.
>-
>- // This should either throw an exception or remedy the situation.
>- // Thus we assume it succeeded.
>- _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
>- }
>- _S_heap_size += __bytes_to_get;
>- _S_end_free = _S_start_free + __bytes_to_get;
>- return _M_allocate_chunk(__n, __nobjs);
>- }
>- }
>-
>- // Returns an object of size __n, and optionally adds to "size
>- // __n"'s free list. We assume that __n is properly aligned. We
>- // hold the allocation lock.
>- void*
>- __pool_base::_M_refill(size_t __n)
>- {
>- int __nobjs = 20;
>- char* __chunk = _M_allocate_chunk(__n, __nobjs);
>- _Obj* volatile* __free_list;
>- _Obj* __result;
>- _Obj* __current_obj;
>- _Obj* __next_obj;
>-
>- if (__nobjs == 1)
>- return __chunk;
>- __free_list = _M_get_free_list(__n);
>-
>- // Build free list in chunk.
>- __result = (_Obj*)(void*)__chunk;
>- *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
>- for (int __i = 1; ; __i++)
>- {
>- __current_obj = __next_obj;
>- __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
>- if (__nobjs - 1 == __i)
>- {
>- __current_obj->_M_free_list_link = 0;
>- break;
>- }
>- else
>- __current_obj->_M_free_list_link = __next_obj;
>- }
>- return __result;
>- }
>-
>- __pool_base::_Obj* volatile __pool_base::_S_free_list[_S_free_list_size];
>-
>- char* __pool_base::_S_start_free = 0;
>-
>- char* __pool_base::_S_end_free = 0;
>-
>- size_t __pool_base::_S_heap_size = 0;
>- } // namespace __gnu_cxx
>--- 0 ----
>Index: src/allocator_mt.cc
>===================================================================
>RCS file: src/allocator_mt.cc
>diff -N src/allocator_mt.cc
>*** /dev/null 1 Jan 1970 00:00:00 -0000
>--- src/allocator_mt.cc 22 Jul 2004 02:55:11 -0000
>***************
>*** 0 ****
>--- 1,245 ----
>+ // Allocator details.
>+
>+ // Copyright (C) 2004 Free Software Foundation, Inc.
>+ //
>+ // This file is part of the GNU ISO C++ Library. This library is free
>+ // software; you can redistribute it and/or modify it under the
>+ // terms of the GNU General Public License as published by the
>+ // Free Software Foundation; either version 2, or (at your option)
>+ // any later version.
>+
>+ // This library is distributed in the hope that it will be useful,
>+ // but WITHOUT ANY WARRANTY; without even the implied warranty of
>+ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>+ // GNU General Public License for more details.
>+
>+ // You should have received a copy of the GNU General Public License along
>+ // with this library; see the file COPYING. If not, write to the Free
>+ // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
>+ // USA.
>+
>+ // As a special exception, you may use this file as part of a free software
>+ // library without restriction. Specifically, if other files instantiate
>+ // templates or use macros or inline functions from this file, or you compile
>+ // this file and link it with other files to produce an executable, this
>+ // file does not by itself cause the resulting executable to be covered by
>+ // the GNU General Public License. This exception does not however
>+ // invalidate any other reasons why the executable file might be covered by
>+ // the GNU General Public License.
>+
>+ //
>+ // ISO C++ 14882:
>+ //
>+
>+ #include <bits/c++config.h>
>+ #include <memory>
>+ #include <ext/mt_allocator.h>
>+
>+ namespace __gnu_cxx
>+ {
>+ void
>+ __mt_base::_S_initialize()
>+ {
>+ // This method is called on the first allocation (when _S_init is still
>+ // false) to create the bins.
>+
>+ // Ensure that the static initialization of _S_options has
>+ // happened. This depends on (a) _M_align == 0 being an invalid
>+ // value that is only present at startup, and (b) the real
>+ // static initialization that happens later not actually
>+ // changing anything.
>+ if (_S_options._M_align == 0)
>+ new (&_S_options) _Tune;
>+
>+ // _M_force_new must not change after the first allocate(),
>+ // which in turn calls this method, so if it's false, it's false
>+ // forever and we don't need to return here ever again.
>+ if (_S_options._M_force_new)
>+ {
>+ _S_init = true;
>+ return;
>+ }
>+
>+ // Calculate the number of bins required based on _M_max_bytes.
>+ // _S_bin_size is statically-initialized to one.
>+ size_t __bin_size = _S_options._M_min_bin;
>+ while (_S_options._M_max_bytes > __bin_size)
>+ {
>+ __bin_size <<= 1;
>+ ++_S_bin_size;
>+ }
>+
>+ // Setup the bin map for quick lookup of the relevant bin.
>+ const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
>+ _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
>+
>+ _Binmap_type* __bp = _S_binmap;
>+ _Binmap_type __bin_max = _S_options._M_min_bin;
>+ _Binmap_type __bint = 0;
>+ for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
>+ {
>+ if (__ct > __bin_max)
>+ {
>+ __bin_max <<= 1;
>+ ++__bint;
>+ }
>+ *__bp++ = __bint;
>+ }
>+
>+ // Initialize _S_bin and its members.
>+ void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
>+ _S_bin = static_cast<_Bin_record*>(__v);
>+
>+ // If __gthread_active_p() create and initialize the list of
>+ // free thread ids. Single threaded applications use thread id 0
>+ // directly and have no need for this.
>+ #ifdef __GTHREADS
>+ if (__gthread_active_p())
>+ {
>+ const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
>+ __v = ::operator new(__k);
>+ _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
>+
>+ // NOTE! The first assignable thread id is 1 since the
>+ // global pool uses id 0
>+ size_t __i;
>+ for (__i = 1; __i < _S_options._M_max_threads; ++__i)
>+ {
>+ _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
>+ __tr._M_next = &_S_thread_freelist_first[__i];
>+ __tr._M_id = __i;
>+ }
>+
>+ // Set last record.
>+ _S_thread_freelist_first[__i - 1]._M_next = NULL;
>+ _S_thread_freelist_first[__i - 1]._M_id = __i;
>+
>+ // Make sure this is initialized.
>+ #ifndef __GTHREAD_MUTEX_INIT
>+ __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
>+ #endif
>+ // Initialize per thread key to hold pointer to
>+ // _S_thread_freelist.
>+ __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
>+
>+ const size_t __max_threads = _S_options._M_max_threads + 1;
>+ for (size_t __n = 0; __n < _S_bin_size; ++__n)
>+ {
>+ _Bin_record& __bin = _S_bin[__n];
>+ __v = ::operator new(sizeof(_Block_record*) * __max_threads);
>+ __bin._M_first = static_cast<_Block_record**>(__v);
>+
>+ __v = ::operator new(sizeof(size_t) * __max_threads);
>+ __bin._M_free = static_cast<size_t*>(__v);
>+
>+ __v = ::operator new(sizeof(size_t) * __max_threads);
>+ __bin._M_used = static_cast<size_t*>(__v);
>+
>+ __v = ::operator new(sizeof(__gthread_mutex_t));
>+ __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
>+
>+ #ifdef __GTHREAD_MUTEX_INIT
>+ {
>+ // Do not copy a POSIX/gthr mutex once in use.
>+ __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
>+ *__bin._M_mutex = __tmp;
>+ }
>+ #else
>+ { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
>+ #endif
>+
>+ for (size_t __threadn = 0; __threadn < __max_threads;
>+ ++__threadn)
>+ {
>+ __bin._M_first[__threadn] = NULL;
>+ __bin._M_free[__threadn] = 0;
>+ __bin._M_used[__threadn] = 0;
>+ }
>+ }
>+ }
>+ else
>+ #endif
>+ for (size_t __n = 0; __n < _S_bin_size; ++__n)
>+ {
>+ _Bin_record& __bin = _S_bin[__n];
>+ __v = ::operator new(sizeof(_Block_record*));
>+ __bin._M_first = static_cast<_Block_record**>(__v);
>+ __bin._M_first[0] = NULL;
>+ }
>+
>+ _S_init = true;
>+ }
>+
>+ size_t
>+ __mt_base::_S_get_thread_id()
>+ {
>+ #ifdef __GTHREADS
>+ // If we have thread support and it's active we check the thread
>+ // key value and return its id or if it's not set we take the
>+ // first record from _S_thread_freelist and sets the key and
>+ // returns it's id.
>+ if (__gthread_active_p())
>+ {
>+ _Thread_record* __freelist_pos =
>+ static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key));
>+ if (__freelist_pos == NULL)
>+ {
>+ // Since _S_options._M_max_threads must be larger than
>+ // the theoretical max number of threads of the OS the
>+ // list can never be empty.
>+ __gthread_mutex_lock(&_S_thread_freelist_mutex);
>+ __freelist_pos = _S_thread_freelist_first;
>+ _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
>+ __gthread_mutex_unlock(&_S_thread_freelist_mutex);
>+
>+ __gthread_setspecific(_S_thread_key,
>+ static_cast<void*>(__freelist_pos));
>+ }
>+ return __freelist_pos->_M_id;
>+ }
>+ #endif
>+ // Otherwise (no thread support or inactive) all requests are
>+ // served from the global pool 0.
>+ return 0;
>+ }
>+
>+ #ifdef __GTHREADS
>+ void
>+ __mt_base::_S_destroy_thread_key(void* __freelist_pos)
>+ {
>+ // Return this thread id record to front of thread_freelist.
>+ __gthread_mutex_lock(&_S_thread_freelist_mutex);
>+ _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
>+ __tr->_M_next = _S_thread_freelist_first;
>+ _S_thread_freelist_first = __tr;
>+ __gthread_mutex_unlock(&_S_thread_freelist_mutex);
>+ }
>+ #endif
>+
>+ bool __mt_base::_S_init = false;
>+
>+ __mt_base::_Tune __mt_base::_S_options;
>+
>+ __mt_base::_Binmap_type* __mt_base::_S_binmap;
>+
>+ __mt_base::_Bin_record* volatile __mt_base::_S_bin;
>+
>+ size_t __mt_base::_S_bin_size = 1;
>+
>+ // Actual initialization in _S_initialize().
>+ #ifdef __GTHREADS
>+ __gthread_once_t __mt_base::_S_once = __GTHREAD_ONCE_INIT;
>+
>+ __mt_base::_Thread_record*
>+ volatile __mt_base::_S_thread_freelist_first = NULL;
>+
>+ __gthread_key_t __mt_base::_S_thread_key;
>+
>+ __gthread_mutex_t
>+ #ifdef __GTHREAD_MUTEX_INIT
>+ __mt_base::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
>+ #else
>+ __mt_base::_S_thread_freelist_mutex;
>+ #endif
>+ #endif
>+ } // namespace __gnu_cxx
>Index: src/allocator_pool.cc
>===================================================================
>RCS file: src/allocator_pool.cc
>diff -N src/allocator_pool.cc
>*** /dev/null 1 Jan 1970 00:00:00 -0000
>--- src/allocator_pool.cc 22 Jul 2004 02:55:11 -0000
>***************
>*** 0 ****
>--- 1,168 ----
>+ // Allocator details.
>+
>+ // Copyright (C) 2004 Free Software Foundation, Inc.
>+ //
>+ // This file is part of the GNU ISO C++ Library. This library is free
>+ // software; you can redistribute it and/or modify it under the
>+ // terms of the GNU General Public License as published by the
>+ // Free Software Foundation; either version 2, or (at your option)
>+ // any later version.
>+
>+ // This library is distributed in the hope that it will be useful,
>+ // but WITHOUT ANY WARRANTY; without even the implied warranty of
>+ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>+ // GNU General Public License for more details.
>+
>+ // You should have received a copy of the GNU General Public License along
>+ // with this library; see the file COPYING. If not, write to the Free
>+ // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
>+ // USA.
>+
>+ // As a special exception, you may use this file as part of a free software
>+ // library without restriction. Specifically, if other files instantiate
>+ // templates or use macros or inline functions from this file, or you compile
>+ // this file and link it with other files to produce an executable, this
>+ // file does not by itself cause the resulting executable to be covered by
>+ // the GNU General Public License. This exception does not however
>+ // invalidate any other reasons why the executable file might be covered by
>+ // the GNU General Public License.
>+
>+ //
>+ // ISO C++ 14882:
>+ //
>+
>+ #include <bits/c++config.h>
>+ #include <memory>
>+ #include <ext/pool_allocator.h>
>+
>+ namespace __gnu_internal
>+ {
>+ __glibcxx_mutex_define_initialized(palloc_init_mutex);
>+ }
>+
>+ namespace __gnu_cxx
>+ {
>+ // Definitions for __pool_alloc_base.
>+ __pool_base::_Obj* volatile*
>+ __pool_base::_M_get_free_list(size_t __bytes)
>+ {
>+ size_t __i = ((__bytes + (size_t)_S_align - 1) / (size_t)_S_align - 1);
>+ return _S_free_list + __i;
>+ }
>+
>+ mutex_type&
>+ __pool_base::_M_get_mutex()
>+ { return __gnu_internal::palloc_init_mutex; }
>+
>+ // Allocate memory in large chunks in order to avoid fragmenting the
>+ // heap too much. Assume that __n is properly aligned. We hold the
>+ // allocation lock.
>+ char*
>+ __pool_base::_M_allocate_chunk(size_t __n, int& __nobjs)
>+ {
>+ char* __result;
>+ size_t __total_bytes = __n * __nobjs;
>+ size_t __bytes_left = _S_end_free - _S_start_free;
>+
>+ if (__bytes_left >= __total_bytes)
>+ {
>+ __result = _S_start_free;
>+ _S_start_free += __total_bytes;
>+ return __result ;
>+ }
>+ else if (__bytes_left >= __n)
>+ {
>+ __nobjs = (int)(__bytes_left / __n);
>+ __total_bytes = __n * __nobjs;
>+ __result = _S_start_free;
>+ _S_start_free += __total_bytes;
>+ return __result;
>+ }
>+ else
>+ {
>+ // Try to make use of the left-over piece.
>+ if (__bytes_left > 0)
>+ {
>+ _Obj* volatile* __free_list = _M_get_free_list(__bytes_left);
>+ ((_Obj*)(void*)_S_start_free)->_M_free_list_link = *__free_list;
>+ *__free_list = (_Obj*)(void*)_S_start_free;
>+ }
>+
>+ size_t __bytes_to_get = (2 * __total_bytes
>+ + _M_round_up(_S_heap_size >> 4));
>+ _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
>+ if (_S_start_free == 0)
>+ {
>+ // Try to make do with what we have. That can't hurt. We
>+ // do not try smaller requests, since that tends to result
>+ // in disaster on multi-process machines.
>+ size_t __i = __n;
>+ for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
>+ {
>+ _Obj* volatile* __free_list = _M_get_free_list(__i);
>+ _Obj* __p = *__free_list;
>+ if (__p != 0)
>+ {
>+ *__free_list = __p->_M_free_list_link;
>+ _S_start_free = (char*)__p;
>+ _S_end_free = _S_start_free + __i;
>+ return _M_allocate_chunk(__n, __nobjs);
>+ // Any leftover piece will eventually make it to the
>+ // right free list.
>+ }
>+ }
>+ _S_end_free = 0; // In case of exception.
>+
>+ // This should either throw an exception or remedy the situation.
>+ // Thus we assume it succeeded.
>+ _S_start_free = static_cast<char*>(::operator new(__bytes_to_get));
>+ }
>+ _S_heap_size += __bytes_to_get;
>+ _S_end_free = _S_start_free + __bytes_to_get;
>+ return _M_allocate_chunk(__n, __nobjs);
>+ }
>+ }
>+
>+ // Returns an object of size __n, and optionally adds to "size
>+ // __n"'s free list. We assume that __n is properly aligned. We
>+ // hold the allocation lock.
>+ void*
>+ __pool_base::_M_refill(size_t __n)
>+ {
>+ int __nobjs = 20;
>+ char* __chunk = _M_allocate_chunk(__n, __nobjs);
>+ _Obj* volatile* __free_list;
>+ _Obj* __result;
>+ _Obj* __current_obj;
>+ _Obj* __next_obj;
>+
>+ if (__nobjs == 1)
>+ return __chunk;
>+ __free_list = _M_get_free_list(__n);
>+
>+ // Build free list in chunk.
>+ __result = (_Obj*)(void*)__chunk;
>+ *__free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
>+ for (int __i = 1; ; __i++)
>+ {
>+ __current_obj = __next_obj;
>+ __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
>+ if (__nobjs - 1 == __i)
>+ {
>+ __current_obj->_M_free_list_link = 0;
>+ break;
>+ }
>+ else
>+ __current_obj->_M_free_list_link = __next_obj;
>+ }
>+ return __result;
>+ }
>+
>+ __pool_base::_Obj* volatile __pool_base::_S_free_list[_S_free_list_size];
>+
>+ char* __pool_base::_S_start_free = 0;
>+
>+ char* __pool_base::_S_end_free = 0;
>+
>+ size_t __pool_base::_S_heap_size = 0;
>+ } // namespace __gnu_cxx
--
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16614
^ permalink raw reply [flat|nested] 10+ messages in thread