Sierra Toolkit  Version of the Day
fixed_pool_eastl.h
1 /*
2 Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
3 
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 
8 1. Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 2. Redistributions in binary form must reproduce the above copyright
11  notice, this list of conditions and the following disclaimer in the
12  documentation and/or other materials provided with the distribution.
13 3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
14  its contributors may be used to endorse or promote products derived
15  from this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
18 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28 
30 // EASTL/internal/fixed_pool.h
31 // Written and maintained by Paul Pedriana - 2005.
33 
35 // This file implements the following
36 // aligned_buffer
37 // fixed_pool_base
38 // fixed_pool
39 // fixed_pool_with_overflow
40 // fixed_hashtable_allocator
41 // fixed_vector_allocator
42 // fixed_swap
43 //
45 
46 
47 #ifndef EASTL_INTERNAL_FIXED_POOL_H
48 #define EASTL_INTERNAL_FIXED_POOL_H
49 
50 
51 #include <stk_util/util/config_eastl.h>
52 #include <stk_util/util/functional_eastl.h>
53 #include <stk_util/util/memory_eastl.h>
54 #include <stk_util/util/allocator_eastl.h>
55 #include <stk_util/util/type_traits_eastl.h>
56 
57 #ifdef _MSC_VER
58  #pragma warning(push, 0)
59  #include <new>
60  #pragma warning(pop)
61 #else
62  #include <new>
63 #endif
64 
65 
66 
67 namespace eastl
68 {
69 
74  #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
75  #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
76  #endif
77 
78 
79 
81  // aligned_buffer
83 
104  typedef char EASTL_MAY_ALIAS aligned_buffer_char;
105 
106  template <size_t size, size_t alignment>
107  struct aligned_buffer { aligned_buffer_char buffer[size]; };
108 
109  template<size_t size>
110  struct aligned_buffer<size, 2> { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
111 
112  template<size_t size>
113  struct aligned_buffer<size, 4> { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
114 
115  template<size_t size>
116  struct aligned_buffer<size, 8> { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
117 
118  template<size_t size>
119  struct aligned_buffer<size, 16> { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
120 
121  template<size_t size>
122  struct aligned_buffer<size, 32> { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
123 
124  template<size_t size>
125  struct aligned_buffer<size, 64> { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
126 
127  template<size_t size>
128  struct aligned_buffer<size, 128> { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
129 
130  #if !defined(EA_PLATFORM_PSP) // This compiler fails to compile alignment >= 256 and gives an error.
131 
132  template<size_t size>
133  struct aligned_buffer<size, 256> { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
134 
135  template<size_t size>
136  struct aligned_buffer<size, 512> { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
137 
138  template<size_t size>
139  struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
140 
141  template<size_t size>
142  struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
143 
144  template<size_t size>
145  struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
146 
147  #endif // EA_PLATFORM_PSP
148 
149 
150 
152  // fixed_pool_base
154 
161  struct EASTL_API fixed_pool_base
162  {
163  public:
166  fixed_pool_base(void* pMemory = NULL)
167  : mpHead((Link*)pMemory)
168  , mpNext((Link*)pMemory)
169  , mpCapacity((Link*)pMemory)
170  #if EASTL_DEBUG
171  , mnNodeSize(0) // This is normally set in the init function.
172  #endif
173  {
174  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
175  mnCurrentSize = 0;
176  mnPeakSize = 0;
177  #endif
178  }
179 
180 
184  {
185  // By design we do nothing. We don't attempt to deep-copy member data.
186  return *this;
187  }
188 
189 
197  void init(void* pMemory, size_t memorySize, size_t nodeSize,
198  size_t alignment, size_t alignmentOffset = 0);
199 
200 
206  size_t peak_size() const
207  {
208  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
209  return mnPeakSize;
210  #else
211  return 0;
212  #endif
213  }
214 
215 
220  bool can_allocate() const
221  {
222  return (mpHead != NULL) || (mpNext != mpCapacity);
223  }
224 
225  public:
228  struct Link
229  {
230  Link* mpNext;
231  };
232 
233  Link* mpHead;
234  Link* mpNext;
235  Link* mpCapacity;
236  size_t mnNodeSize;
237 
238  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
239  uint32_t mnCurrentSize;
240  uint32_t mnPeakSize;
241  #endif
242 
243  }; // fixed_pool_base
244 
245 
246 
247 
248 
250  // fixed_pool
252 
260  class EASTL_API fixed_pool : public fixed_pool_base
261  {
262  public:
271  fixed_pool(void* pMemory = NULL)
272  : fixed_pool_base(pMemory)
273  {
274  }
275 
276 
281  fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
282  size_t alignment, size_t alignmentOffset = 0)
283  {
284  init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
285  }
286 
287 
291  {
292  // By design we do nothing. We don't attempt to deep-copy member data.
293  return *this;
294  }
295 
296 
302  void* allocate()
303  {
304  Link* pLink = mpHead;
305 
306  if(pLink) // If we have space...
307  {
308  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
309  if(++mnCurrentSize > mnPeakSize)
310  mnPeakSize = mnCurrentSize;
311  #endif
312 
313  mpHead = pLink->mpNext;
314  return pLink;
315  }
316  else
317  {
318  // If there's no free node in the free list, just
319  // allocate another from the reserved memory area
320 
321  if(mpNext != mpCapacity)
322  {
323  pLink = mpNext;
324 
325  mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
326 
327  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
328  if(++mnCurrentSize > mnPeakSize)
329  mnPeakSize = mnCurrentSize;
330  #endif
331 
332  return pLink;
333  }
334 
335  // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error.
336  return NULL;
337  }
338  }
339 
340 
347  void deallocate(void* p)
348  {
349  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
350  --mnCurrentSize;
351  #endif
352 
353  ((Link*)p)->mpNext = mpHead;
354  mpHead = ((Link*)p);
355  }
356 
357 
359 
360 
361  const char* get_name() const
362  {
363  return EASTL_FIXED_POOL_DEFAULT_NAME;
364  }
365 
366 
367  void set_name(const char*)
368  {
369  // Nothing to do. We don't allocate memory.
370  }
371 
372  }; // fixed_pool
373 
374 
375 
376 
377 
379  // fixed_pool_with_overflow
381 
384  template <typename Allocator = EASTLAllocatorType>
386  {
387  public:
388  fixed_pool_with_overflow(void* pMemory = NULL)
389  : fixed_pool_base(pMemory),
390  mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
391  {
392  // Leave mpPoolBegin, mpPoolEnd uninitialized.
393  }
394 
395 
396  fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
397  size_t alignment, size_t alignmentOffset = 0)
398  : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
399  {
400  fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
401 
402  mpPoolBegin = pMemory;
403  }
404 
405 
409  {
410  #if EASTL_ALLOCATOR_COPY_ENABLED
411  mOverflowAllocator = x.mOverflowAllocator;
412  #else
413  (void)x;
414  #endif
415 
416  return *this;
417  }
418 
419 
420  void init(void* pMemory, size_t memorySize, size_t nodeSize,
421  size_t alignment, size_t alignmentOffset = 0)
422  {
423  fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
424 
425  mpPoolBegin = pMemory;
426  }
427 
428 
429  void* allocate()
430  {
431  void* p = NULL;
432  Link* pLink = mpHead;
433 
434  if(pLink)
435  {
436  // Unlink from chain
437  p = pLink;
438  mpHead = pLink->mpNext;
439  }
440  else
441  {
442  // If there's no free node in the free list, just
443  // allocate another from the reserved memory area
444 
445  if(mpNext != mpCapacity)
446  {
447  p = pLink = mpNext;
448  mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
449  }
450  else
451  p = mOverflowAllocator.allocate(mnNodeSize);
452  }
453 
454  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
455  if(p && (++mnCurrentSize > mnPeakSize))
456  mnPeakSize = mnCurrentSize;
457  #endif
458 
459  return p;
460  }
461 
462 
463  void deallocate(void* p)
464  {
465  #if EASTL_FIXED_SIZE_TRACKING_ENABLED
466  --mnCurrentSize;
467  #endif
468 
469  if((p >= mpPoolBegin) && (p < mpCapacity))
470  {
471  ((Link*)p)->mpNext = mpHead;
472  mpHead = ((Link*)p);
473  }
474  else
475  mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
476  }
477 
478 
480 
481 
482  const char* get_name() const
483  {
484  return mOverflowAllocator.get_name();
485  }
486 
487 
488  void set_name(const char* pName)
489  {
490  mOverflowAllocator.set_name(pName);
491  }
492 
493  public:
494  Allocator mOverflowAllocator;
495  void* mpPoolBegin; // Ideally we wouldn't need this member variable. he problem is that the information
496  // about the pool buffer and object size is stored in the owning container and we
497  //can't have access to it without increasing the amount of code we need and by templating more code.
498  //It may turn out that simply storing data here is smaller in the end.
499 
500  }; // fixed_pool_with_overflow
501 
502 
503 
504 
505 
507  // fixed_node_allocator
509 
532  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
534  {
535  public:
536  typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type pool_type;
538  typedef Allocator overflow_allocator_type;
539 
540  enum
541  {
542  kNodeSize = nodeSize,
543  kNodeCount = nodeCount,
544  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
545  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
546  kNodeAlignment = nodeAlignment,
547  kNodeAlignmentOffset = nodeAlignmentOffset
548  };
549 
550  public:
551  pool_type mPool;
552 
553  public:
554  //fixed_node_allocator(const char* pName)
555  //{
556  // mPool.set_name(pName);
557  //}
558 
559 
560  fixed_node_allocator(void* pNodeBuffer)
561  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
562  {
563  }
564 
565 
581  : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
582  {
583  // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
584  // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
585  // doing the copying above.
586  mPool = x.mPool;
587  }
588 
589 
590  this_type& operator=(const this_type& x)
591  {
592  mPool = x.mPool;
593  return *this;
594  }
595 
596 
597  void* allocate(size_t n, int /*flags*/ = 0)
598  {
599  (void)n;
600  EASTL_ASSERT(n == kNodeSize);
601  return mPool.allocate();
602  }
603 
604 
605  void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
606  {
607  (void)n;
608  EASTL_ASSERT(n == kNodeSize);
609  return mPool.allocate();
610  }
611 
612 
613  void deallocate(void* p, size_t)
614  {
615  mPool.deallocate(p);
616  }
617 
618 
623  bool can_allocate() const
624  {
625  return mPool.can_allocate();
626  }
627 
628 
634  void reset(void* pNodeBuffer)
635  {
636  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
637  }
638 
639 
640  const char* get_name() const
641  {
642  return mPool.get_name();
643  }
644 
645 
646  void set_name(const char* pName)
647  {
648  mPool.set_name(pName);
649  }
650 
651 
652  overflow_allocator_type& get_overflow_allocator()
653  {
654  return mPool.mOverflowAllocator;
655  }
656 
657 
658  void set_overflow_allocator(const overflow_allocator_type& allocator)
659  {
660  mPool.mOverflowAllocator = allocator;
661  }
662 
663  }; // fixed_node_allocator
664 
665 
666  // This is a near copy of the code above, with the only difference being
667  // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
668  // and the get_overflow_allocator / set_overflow_allocator functions.
669  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
670  class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
671  {
672  public:
673  typedef fixed_pool pool_type;
674  typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
675  typedef Allocator overflow_allocator_type;
676 
677  enum
678  {
679  kNodeSize = nodeSize,
680  kNodeCount = nodeCount,
681  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets
682  //sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
683  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
684  kNodeAlignment = nodeAlignment,
685  kNodeAlignmentOffset = nodeAlignmentOffset
686  };
687 
688  public:
689  pool_type mPool;
690 
691  public:
692  fixed_node_allocator(void* pNodeBuffer)
693  : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
694  {
695  }
696 
697 
698  fixed_node_allocator(const this_type& x)
699  : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
700  {
701  }
702 
703 
704  this_type& operator=(const this_type& x)
705  {
706  mPool = x.mPool;
707  return *this;
708  }
709 
710 
711  void* allocate(size_t n, int /*flags*/ = 0)
712  {
713  (void)n;
714  EASTL_ASSERT(n == kNodeSize);
715  return mPool.allocate();
716  }
717 
718 
719  void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
720  {
721  (void)n;
722  EASTL_ASSERT(n == kNodeSize);
723  return mPool.allocate();
724  }
725 
726 
727  void deallocate(void* p, size_t)
728  {
729  mPool.deallocate(p);
730  }
731 
732 
733  bool can_allocate() const
734  {
735  return mPool.can_allocate();
736  }
737 
738 
739  void reset(void* pNodeBuffer)
740  {
741  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
742  }
743 
744 
745  const char* get_name() const
746  {
747  return mPool.get_name();
748  }
749 
750 
751  void set_name(const char* pName)
752  {
753  mPool.set_name(pName);
754  }
755 
756 
757  overflow_allocator_type& get_overflow_allocator()
758  {
759  EASTL_ASSERT(false);
760  return *(overflow_allocator_type*)NULL; // This is not pretty.
761  }
762 
763 
764  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
765  {
766  // We don't have an overflow allocator.
767  EASTL_ASSERT(false);
768  }
769 
770  }; // fixed_node_allocator
771 
772 
773 
775  // global operators
777 
778  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
779  inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
780  const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
781  {
782  return (&a == &b); // They are only equal if they are the same object.
783  }
784 
785 
786  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
787  inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
788  const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
789  {
790  return (&a != &b); // They are only equal if they are the same object.
791  }
792 
793 
794 
795 
796 
797 
799  // fixed_hashtable_allocator
801 
815  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
817  {
818  public:
819  typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type pool_type;
821  typedef Allocator overflow_allocator_type;
822 
823  enum
824  {
825  kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
826  kBucketsSize = bucketCount * sizeof(void*),
827  kNodeSize = nodeSize,
828  kNodeCount = nodeCount,
829  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the
830  //compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
831  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
832  // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
833  kNodeAlignment = nodeAlignment,
834  kNodeAlignmentOffset = nodeAlignmentOffset,
835  kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
836  };
837 
838  protected:
839  pool_type mPool;
840  void* mpBucketBuffer;
841 
842  public:
843  //fixed_hashtable_allocator(const char* pName)
844  //{
845  // mPool.set_name(pName);
846  //}
847 
848  fixed_hashtable_allocator(void* pNodeBuffer)
849  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
850  mpBucketBuffer(NULL)
851  {
852  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
853  }
854 
855 
856  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
857  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
858  mpBucketBuffer(pBucketBuffer)
859  {
860  }
861 
862 
869  : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
870  mpBucketBuffer(x.mpBucketBuffer)
871  {
872  // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
873  // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
874  // doing the copying above.
875  mPool = x.mPool;
876  }
877 
878 
880  {
881  mPool = x.mPool;
882  return *this; // Do nothing. Ignore the source type.
883  }
884 
885 
886  void* allocate(size_t n, int flags = 0)
887  {
888  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
889  EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
890  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
891  {
892  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
893  return mPool.allocate();
894  }
895 
896  EASTL_ASSERT(n <= kBucketsSize);
897  return mpBucketBuffer;
898  }
899 
900 
901  void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
902  {
903  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
904  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
905  {
906  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
907  return mPool.allocate();
908  }
909 
910  // To consider: allow for bucket allocations to overflow.
911  EASTL_ASSERT(n <= kBucketsSize);
912  return mpBucketBuffer;
913  }
914 
915 
916  void deallocate(void* p, size_t)
917  {
918  if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
919  mPool.deallocate(p);
920  }
921 
922 
923  bool can_allocate() const
924  {
925  return mPool.can_allocate();
926  }
927 
928 
929  void reset(void* pNodeBuffer)
930  {
931  // No need to modify mpBucketBuffer, as that is constant.
932  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
933  }
934 
935 
936  const char* get_name() const
937  {
938  return mPool.get_name();
939  }
940 
941 
942  void set_name(const char* pName)
943  {
944  mPool.set_name(pName);
945  }
946 
947 
948  overflow_allocator_type& get_overflow_allocator()
949  {
950  return mPool.mOverflowAllocator;
951  }
952 
953 
954  void set_overflow_allocator(const overflow_allocator_type& allocator)
955  {
956  mPool.mOverflowAllocator = allocator;
957  }
958 
959  }; // fixed_hashtable_allocator
960 
961 
962  // This is a near copy of the code above, with the only difference being
963  // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
964  // and the get_overflow_allocator / set_overflow_allocator functions.
965  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
966  class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
967  {
968  public:
969  typedef fixed_pool pool_type;
970  typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator> this_type;
971  typedef Allocator overflow_allocator_type;
972 
973  enum
974  {
975  kBucketCount = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
976  kBucketsSize = bucketCount * sizeof(void*),
977  kNodeSize = nodeSize,
978  kNodeCount = nodeCount,
979  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler
980  //sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
981  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize
982  //in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
983  kNodeAlignment = nodeAlignment,
984  kNodeAlignmentOffset = nodeAlignmentOffset,
985  kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
986  };
987 
988  protected:
989  pool_type mPool;
990  void* mpBucketBuffer;
991 
992  public:
993  //fixed_hashtable_allocator(const char* pName)
994  //{
995  // mPool.set_name(pName);
996  //}
997 
998  fixed_hashtable_allocator(void* pNodeBuffer)
999  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1000  mpBucketBuffer(NULL)
1001  {
1002  // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
1003  }
1004 
1005 
1006  fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
1007  : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1008  mpBucketBuffer(pBucketBuffer)
1009  {
1010  }
1011 
1012 
1018  fixed_hashtable_allocator(const this_type& x)
1019  : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
1020  mpBucketBuffer(x.mpBucketBuffer)
1021  {
1022  }
1023 
1024 
1025  fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
1026  {
1027  mPool = x.mPool;
1028  return *this; // Do nothing. Ignore the source type.
1029  }
1030 
1031 
1032  void* allocate(size_t n, int flags = 0)
1033  {
1034  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1035  EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
1036  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1037  {
1038  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
1039  return mPool.allocate();
1040  }
1041 
1042  EASTL_ASSERT(n <= kBucketsSize);
1043  return mpBucketBuffer;
1044  }
1045 
1046 
1047  void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
1048  {
1049  // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
1050  if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
1051  {
1052  EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
1053  return mPool.allocate();
1054  }
1055 
1056  // To consider: allow for bucket allocations to overflow.
1057  EASTL_ASSERT(n <= kBucketsSize);
1058  return mpBucketBuffer;
1059  }
1060 
1061 
1062  void deallocate(void* p, size_t)
1063  {
1064  if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
1065  mPool.deallocate(p);
1066  }
1067 
1068 
1069  bool can_allocate() const
1070  {
1071  return mPool.can_allocate();
1072  }
1073 
1074 
1075  void reset(void* pNodeBuffer)
1076  {
1077  // No need to modify mpBucketBuffer, as that is constant.
1078  mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
1079  }
1080 
1081 
1082  const char* get_name() const
1083  {
1084  return mPool.get_name();
1085  }
1086 
1087 
1088  void set_name(const char* pName)
1089  {
1090  mPool.set_name(pName);
1091  }
1092 
1093 
1094  overflow_allocator_type& get_overflow_allocator()
1095  {
1096  EASTL_ASSERT(false);
1097  return *(overflow_allocator_type*)NULL; // This is not pretty.
1098  }
1099 
1100  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
1101  {
1102  // We don't have an overflow allocator.
1103  EASTL_ASSERT(false);
1104  }
1105 
1106  }; // fixed_hashtable_allocator
1107 
1108 
1110  // global operators
1112 
1113  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
1114  inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1115  const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1116  {
1117  return (&a == &b); // They are only equal if they are the same object.
1118  }
1119 
1120 
1121  template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
1122  inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1123  const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1124  {
1125  return (&a != &b); // They are only equal if they are the same object.
1126  }
1127 
1128 
1129 
1130 
1131 
1132 
1134  // fixed_vector_allocator
1136 
1147  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
1149  {
1150  public:
1152  typedef Allocator overflow_allocator_type;
1153 
1154  enum
1155  {
1156  kNodeSize = nodeSize,
1157  kNodeCount = nodeCount,
1158  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T)
1159  //to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
1160  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1161  kNodeAlignment = nodeAlignment,
1162  kNodeAlignmentOffset = nodeAlignmentOffset
1163  };
1164 
1165  public:
1166  overflow_allocator_type mOverflowAllocator;
1167  void* mpPoolBegin; // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
1168 
1169  public:
1170  //fixed_vector_allocator(const char* pName = NULL)
1171  //{
1172  // mOverflowAllocator.set_name(pName);
1173  //}
1174 
1175  fixed_vector_allocator(void* pNodeBuffer)
1176  : mpPoolBegin(pNodeBuffer)
1177  {
1178  }
1179 
1180  fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
1181  {
1182  #if EASTL_ALLOCATOR_COPY_ENABLED
1183  mOverflowAllocator = x.mOverflowAllocator;
1184  #else
1185  (void)x;
1186  #endif
1187 
1188  return *this; // Do nothing. Ignore the source type.
1189  }
1190 
1191  void* allocate(size_t n, int flags = 0)
1192  {
1193  return mOverflowAllocator.allocate(n, flags);
1194  }
1195 
1196  void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
1197  {
1198  return mOverflowAllocator.allocate(n, alignment, offset, flags);
1199  }
1200 
1201  void deallocate(void* p, size_t n)
1202  {
1203  if(p != mpPoolBegin)
1204  mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
1205  }
1206 
1207  const char* get_name() const
1208  {
1209  return mOverflowAllocator.get_name();
1210  }
1211 
1212  void set_name(const char* pName)
1213  {
1214  mOverflowAllocator.set_name(pName);
1215  }
1216 
1217  overflow_allocator_type& get_overflow_allocator()
1218  {
1219  return mOverflowAllocator;
1220  }
1221 
1222  void set_overflow_allocator(const overflow_allocator_type& allocator)
1223  {
1224  mOverflowAllocator = allocator;
1225  }
1226 
1227  }; // fixed_vector_allocator
1228 
1229 
1230  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
1231  class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
1232  {
1233  public:
1235  typedef Allocator overflow_allocator_type;
1236 
1237  enum
1238  {
1239  kNodeSize = nodeSize,
1240  kNodeCount = nodeCount,
1241  kNodesSize = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets
1242  //sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
1243  kBufferSize = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
1244  kNodeAlignment = nodeAlignment,
1245  kNodeAlignmentOffset = nodeAlignmentOffset
1246  };
1247 
1248  //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
1249  //{
1250  //}
1251 
1252  fixed_vector_allocator(void* /*pNodeBuffer*/)
1253  {
1254  }
1255 
1256  void* allocate(size_t /*n*/, int /*flags*/ = 0)
1257  {
1258  EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
1259  return NULL;
1260  }
1261 
1262  void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
1263  {
1264  EASTL_ASSERT(false);
1265  return NULL;
1266  }
1267 
1268  void deallocate(void* /*p*/, size_t /*n*/)
1269  {
1270  }
1271 
1272  const char* get_name() const
1273  {
1274  return EASTL_FIXED_POOL_DEFAULT_NAME;
1275  }
1276 
1277  void set_name(const char* /*pName*/)
1278  {
1279  }
1280 
1281  overflow_allocator_type& get_overflow_allocator()
1282  {
1283  EASTL_ASSERT(false);
1284  overflow_allocator_type* pNULL = NULL;
1285  return *pNULL; // This is not pretty, but it should never execute.
1286  }
1287 
1288  void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
1289  {
1290  // We don't have an overflow allocator.
1291  EASTL_ASSERT(false);
1292  }
1293 
1294  }; // fixed_vector_allocator
1295 
1296 
1298  // global operators
1300 
1301  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
1302  inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1303  const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1304  {
1305  return (&a == &b); // They are only equal if they are the same object.
1306  }
1307 
1308 
1309  template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
1310  inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
1311  const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
1312  {
1313  return (&a != &b); // They are only equal if they are the same object.
1314  }
1315 
1316 
1317 
1318 
1319 
1321  // fixed_swap
1323 
1332  template <typename Container>
1333  void fixed_swap(Container& a, Container& b)
1334  {
1335  // We must do a brute-force swap, because fixed containers cannot share memory allocations.
1336  eastl::less<size_t> compare;
1337 
1338  if(compare(sizeof(a), EASTL_MAX_STACK_USAGE)) // Using compare instead of just '<' avoids a stubborn compiler warning.
1339  {
1340  // Note: The C++ language does not define what happens when you declare
1341  // an object in too small of stack space but the object is never created.
1342  // This may result in a stack overflow exception on some systems, depending
1343  // on how they work and possibly depending on enabled debug functionality.
1344 
1345  const Container temp(a); // Can't use global swap because that could
1346  a = b; // itself call this swap function in return.
1347  b = temp;
1348  }
1349  else
1350  {
1351  EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
1352  void* const pMemory = allocator.allocate(sizeof(a));
1353 
1354  if(pMemory)
1355  {
1356  Container* const pTemp = ::new(pMemory) Container(a);
1357  a = b;
1358  b = *pTemp;
1359 
1360  pTemp->~Container();
1361  allocator.deallocate(pMemory, sizeof(a));
1362  }
1363  }
1364  }
1365 
1366 
1367 
1368 } // namespace eastl
1369 
1370 
1371 #endif // Header include guard
fixed_hashtable_allocator(const this_type &x)
char EASTL_MAY_ALIAS aligned_buffer_char
void reset(void *pNodeBuffer)
fixed_pool(void *pMemory=NULL)
fixed_pool(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
fixed_node_allocator(const this_type &x)
fixed_pool_base(void *pMemory=NULL)
fixed_pool_base & operator=(const fixed_pool_base &)
fixed_pool & operator=(const fixed_pool &)
void reset(MPI_Comm new_comm)
Function reset determines new parallel_size and parallel_rank. Flushes, closes, and reopens log files...
Definition: Env.cpp:1067
fixed_pool_with_overflow & operator=(const fixed_pool_with_overflow &x)
void deallocate(void *p)
void init(void *pMemory, size_t memorySize, size_t nodeSize, size_t alignment, size_t alignmentOffset=0)
EA Standard Template Library.
void fixed_swap(Container &a, Container &b)