00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "cache_aligned_allocator.h"
00027 #include "aligned_space.h"
00028 #include <string.h>
00029
00030 #if _WIN32||_WIN64
00031 #include "machine/windows_api.h"
00032 #else
00033 #include <pthread.h>
00034 #endif
00035
00036 namespace tbb {
00037
00039 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00040
00041 namespace interface6 {
00042
00044 namespace internal {
00045
00046 template<ets_key_usage_type ETS_key_type>
00047 class ets_base: tbb::internal::no_copy {
00048 protected:
00049 #if _WIN32||_WIN64
00050 typedef DWORD key_type;
00051 #else
00052 typedef pthread_t key_type;
00053 #endif
00054 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00055 public:
00056 #endif
00057 struct slot;
00058
00059 struct array {
00060 array* next;
00061 size_t lg_size;
00062 slot& at( size_t k ) {
00063 return ((slot*)(void*)(this+1))[k];
00064 }
00065 size_t size() const {return (size_t)1<<lg_size;}
00066 size_t mask() const {return size()-1;}
00067 size_t start( size_t h ) const {
00068 return h>>(8*sizeof(size_t)-lg_size);
00069 }
00070 };
00071 struct slot {
00072 key_type key;
00073 void* ptr;
00074 bool empty() const {return !key;}
00075 bool match( key_type k ) const {return key==k;}
00076 bool claim( key_type k ) {
00077 __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
00078 return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
00079 }
00080 };
00081 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00082 protected:
00083 #endif
00084
00085 static key_type key_of_current_thread() {
00086 tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
00087 key_type k;
00088 memcpy( &k, &id, sizeof(k) );
00089 return k;
00090 }
00091
00093
00095 atomic<array*> my_root;
00096 atomic<size_t> my_count;
00097 virtual void* create_local() = 0;
00098 virtual void* create_array(size_t _size) = 0;
00099 virtual void free_array(void* ptr, size_t _size) = 0;
00100 array* allocate( size_t lg_size ) {
00101 size_t n = 1<<lg_size;
00102 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
00103 a->lg_size = lg_size;
00104 std::memset( a+1, 0, n*sizeof(slot) );
00105 return a;
00106 }
00107 void free(array* a) {
00108 size_t n = 1<<(a->lg_size);
00109 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
00110 }
00111 static size_t hash( key_type k ) {
00112
00113
00114 #if __TBB_WORDSIZE == 4
00115 return uintptr_t(k)*0x9E3779B9;
00116 #else
00117 return uintptr_t(k)*0x9E3779B97F4A7C15;
00118 #endif
00119 }
00120
00121 ets_base() {my_root=NULL; my_count=0;}
00122 virtual ~ets_base();
00123 void* table_lookup( bool& exists );
00124 void table_clear();
00125 slot& table_find( key_type k ) {
00126 size_t h = hash(k);
00127 array* r = my_root;
00128 size_t mask = r->mask();
00129 for(size_t i = r->start(h);;i=(i+1)&mask) {
00130 slot& s = r->at(i);
00131 if( s.empty() || s.match(k) )
00132 return s;
00133 }
00134 }
00135 void table_reserve_for_copy( const ets_base& other ) {
00136 __TBB_ASSERT(!my_root,NULL);
00137 __TBB_ASSERT(!my_count,NULL);
00138 if( other.my_root ) {
00139 array* a = allocate(other.my_root->lg_size);
00140 a->next = NULL;
00141 my_root = a;
00142 my_count = other.my_count;
00143 }
00144 }
00145 };
00146
00147 template<ets_key_usage_type ETS_key_type>
00148 ets_base<ETS_key_type>::~ets_base() {
00149 __TBB_ASSERT(!my_root, NULL);
00150 }
00151
00152 template<ets_key_usage_type ETS_key_type>
00153 void ets_base<ETS_key_type>::table_clear() {
00154 while( array* r = my_root ) {
00155 my_root = r->next;
00156 free(r);
00157 }
00158 my_count = 0;
00159 }
00160
00161 template<ets_key_usage_type ETS_key_type>
00162 void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
00163 const key_type k = key_of_current_thread();
00164
00165 __TBB_ASSERT(k!=0,NULL);
00166 void* found;
00167 size_t h = hash(k);
00168 for( array* r=my_root; r; r=r->next ) {
00169 size_t mask=r->mask();
00170 for(size_t i = r->start(h); ;i=(i+1)&mask) {
00171 slot& s = r->at(i);
00172 if( s.empty() ) break;
00173 if( s.match(k) ) {
00174 if( r==my_root ) {
00175
00176 exists = true;
00177 return s.ptr;
00178 } else {
00179
00180 exists = true;
00181 found = s.ptr;
00182 goto insert;
00183 }
00184 }
00185 }
00186 }
00187
00188 exists = false;
00189 found = create_local();
00190 {
00191 size_t c = ++my_count;
00192 array* r = my_root;
00193 if( !r || c>r->size()/2 ) {
00194 size_t s = r ? r->lg_size : 2;
00195 while( c>size_t(1)<<(s-1) ) ++s;
00196 array* a = allocate(s);
00197 for(;;) {
00198 a->next = my_root;
00199 array* new_r = my_root.compare_and_swap(a,r);
00200 if( new_r==r ) break;
00201 if( new_r->lg_size>=s ) {
00202
00203 free(a);
00204 break;
00205 }
00206 r = new_r;
00207 }
00208 }
00209 }
00210 insert:
00211
00212 array* ir = my_root;
00213 size_t mask = ir->mask();
00214 for(size_t i = ir->start(h);;i=(i+1)&mask) {
00215 slot& s = ir->at(i);
00216 if( s.empty() ) {
00217 if( s.claim(k) ) {
00218 s.ptr = found;
00219 return found;
00220 }
00221 }
00222 }
00223 }
00224
00226 template <>
00227 class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
00228 typedef ets_base<ets_no_key> super;
00229 #if _WIN32||_WIN64
00230 typedef DWORD tls_key_t;
00231 void create_key() { my_key = TlsAlloc(); }
00232 void destroy_key() { TlsFree(my_key); }
00233 void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
00234 void* get_tls() { return (void *)TlsGetValue(my_key); }
00235 #else
00236 typedef pthread_key_t tls_key_t;
00237 void create_key() { pthread_key_create(&my_key, NULL); }
00238 void destroy_key() { pthread_key_delete(my_key); }
00239 void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
00240 void* get_tls() const { return pthread_getspecific(my_key); }
00241 #endif
00242 tls_key_t my_key;
00243 virtual void* create_local() = 0;
00244 virtual void* create_array(size_t _size) = 0;
00245 virtual void free_array(void* ptr, size_t _size) = 0;
00246 public:
00247 ets_base() {create_key();}
00248 ~ets_base() {destroy_key();}
00249 void* table_lookup( bool& exists ) {
00250 void* found = get_tls();
00251 if( found ) {
00252 exists=true;
00253 } else {
00254 found = super::table_lookup(exists);
00255 set_tls(found);
00256 }
00257 return found;
00258 }
00259 void table_clear() {
00260 destroy_key();
00261 create_key();
00262 super::table_clear();
00263 }
00264 };
00265
00267 template< typename Container, typename Value >
00268 class enumerable_thread_specific_iterator
00269 #if defined(_WIN64) && defined(_MSC_VER)
00270
00271 : public std::iterator<std::random_access_iterator_tag,Value>
00272 #endif
00273 {
00275
00276 Container *my_container;
00277 typename Container::size_type my_index;
00278 mutable Value *my_value;
00279
00280 template<typename C, typename T>
00281 friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset,
00282 const enumerable_thread_specific_iterator<C,T>& v );
00283
00284 template<typename C, typename T, typename U>
00285 friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
00286 const enumerable_thread_specific_iterator<C,U>& j );
00287
00288 template<typename C, typename T, typename U>
00289 friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
00290 const enumerable_thread_specific_iterator<C,U>& j );
00291
00292 template<typename C, typename T, typename U>
00293 friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00294
00295 template<typename C, typename U>
00296 friend class enumerable_thread_specific_iterator;
00297
00298 public:
00299
00300 enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
00301 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00302
00304 enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00305
00306 template<typename U>
00307 enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00308 my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00309
00310 enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00311 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00312 }
00313
00314 enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00315 my_index += offset;
00316 my_value = NULL;
00317 return *this;
00318 }
00319
00320 enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00321 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00322 }
00323
00324 enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00325 my_index -= offset;
00326 my_value = NULL;
00327 return *this;
00328 }
00329
00330 Value& operator*() const {
00331 Value* value = my_value;
00332 if( !value ) {
00333 value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00334 }
00335 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00336 return *value;
00337 }
00338
00339 Value& operator[]( ptrdiff_t k ) const {
00340 return (*my_container)[my_index + k].value;
00341 }
00342
00343 Value* operator->() const {return &operator*();}
00344
00345 enumerable_thread_specific_iterator& operator++() {
00346 ++my_index;
00347 my_value = NULL;
00348 return *this;
00349 }
00350
00351 enumerable_thread_specific_iterator& operator--() {
00352 --my_index;
00353 my_value = NULL;
00354 return *this;
00355 }
00356
00358 enumerable_thread_specific_iterator operator++(int) {
00359 enumerable_thread_specific_iterator result = *this;
00360 ++my_index;
00361 my_value = NULL;
00362 return result;
00363 }
00364
00366 enumerable_thread_specific_iterator operator--(int) {
00367 enumerable_thread_specific_iterator result = *this;
00368 --my_index;
00369 my_value = NULL;
00370 return result;
00371 }
00372
00373
00374 typedef ptrdiff_t difference_type;
00375 typedef Value value_type;
00376 typedef Value* pointer;
00377 typedef Value& reference;
00378 typedef std::random_access_iterator_tag iterator_category;
00379 };
00380
00381 template<typename Container, typename T>
00382 enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset,
00383 const enumerable_thread_specific_iterator<Container,T>& v ) {
00384 return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00385 }
00386
00387 template<typename Container, typename T, typename U>
00388 bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
00389 const enumerable_thread_specific_iterator<Container,U>& j ) {
00390 return i.my_index==j.my_index && i.my_container == j.my_container;
00391 }
00392
00393 template<typename Container, typename T, typename U>
00394 bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
00395 const enumerable_thread_specific_iterator<Container,U>& j ) {
00396 return !(i==j);
00397 }
00398
00399 template<typename Container, typename T, typename U>
00400 bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
00401 const enumerable_thread_specific_iterator<Container,U>& j ) {
00402 return i.my_index<j.my_index;
00403 }
00404
00405 template<typename Container, typename T, typename U>
00406 bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
00407 const enumerable_thread_specific_iterator<Container,U>& j ) {
00408 return j<i;
00409 }
00410
00411 template<typename Container, typename T, typename U>
00412 bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
00413 const enumerable_thread_specific_iterator<Container,U>& j ) {
00414 return !(i<j);
00415 }
00416
00417 template<typename Container, typename T, typename U>
00418 bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
00419 const enumerable_thread_specific_iterator<Container,U>& j ) {
00420 return !(j<i);
00421 }
00422
00423 template<typename Container, typename T, typename U>
00424 ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
00425 const enumerable_thread_specific_iterator<Container,U>& j ) {
00426 return i.my_index-j.my_index;
00427 }
00428
00429 template<typename SegmentedContainer, typename Value >
00430 class segmented_iterator
00431 #if defined(_WIN64) && defined(_MSC_VER)
00432 : public std::iterator<std::input_iterator_tag, Value>
00433 #endif
00434 {
00435 template<typename C, typename T, typename U>
00436 friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00437
00438 template<typename C, typename T, typename U>
00439 friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00440
00441 template<typename C, typename U>
00442 friend class segmented_iterator;
00443
00444 public:
00445
00446 segmented_iterator() {my_segcont = NULL;}
00447
00448 segmented_iterator( const SegmentedContainer& _segmented_container ) :
00449 my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00450 outer_iter(my_segcont->end()) { }
00451
00452 ~segmented_iterator() {}
00453
00454 typedef typename SegmentedContainer::iterator outer_iterator;
00455 typedef typename SegmentedContainer::value_type InnerContainer;
00456 typedef typename InnerContainer::iterator inner_iterator;
00457
00458
00459 typedef ptrdiff_t difference_type;
00460 typedef Value value_type;
00461 typedef typename SegmentedContainer::size_type size_type;
00462 typedef Value* pointer;
00463 typedef Value& reference;
00464 typedef std::input_iterator_tag iterator_category;
00465
00466
00467 template<typename U>
00468 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00469 my_segcont(other.my_segcont),
00470 outer_iter(other.outer_iter),
00471
00472 inner_iter(other.inner_iter)
00473 {}
00474
00475
00476 template<typename U>
00477 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00478 if(this != &other) {
00479 my_segcont = other.my_segcont;
00480 outer_iter = other.outer_iter;
00481 if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00482 }
00483 return *this;
00484 }
00485
00486
00487
00488
00489 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00490 __TBB_ASSERT(my_segcont != NULL, NULL);
00491
00492 for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00493 if( !outer_iter->empty() ) {
00494 inner_iter = outer_iter->begin();
00495 break;
00496 }
00497 }
00498 return *this;
00499 }
00500
00501
00502 segmented_iterator& operator++() {
00503 advance_me();
00504 return *this;
00505 }
00506
00507
00508 segmented_iterator operator++(int) {
00509 segmented_iterator tmp = *this;
00510 operator++();
00511 return tmp;
00512 }
00513
00514 bool operator==(const outer_iterator& other_outer) const {
00515 __TBB_ASSERT(my_segcont != NULL, NULL);
00516 return (outer_iter == other_outer &&
00517 (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00518 }
00519
00520 bool operator!=(const outer_iterator& other_outer) const {
00521 return !operator==(other_outer);
00522
00523 }
00524
00525
00526 reference operator*() const {
00527 __TBB_ASSERT(my_segcont != NULL, NULL);
00528 __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00529 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
00530 return *inner_iter;
00531 }
00532
00533
00534 pointer operator->() const { return &operator*();}
00535
00536 private:
00537 SegmentedContainer* my_segcont;
00538 outer_iterator outer_iter;
00539 inner_iterator inner_iter;
00540
00541 void advance_me() {
00542 __TBB_ASSERT(my_segcont != NULL, NULL);
00543 __TBB_ASSERT(outer_iter != my_segcont->end(), NULL);
00544 __TBB_ASSERT(inner_iter != outer_iter->end(), NULL);
00545 ++inner_iter;
00546 while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00547 inner_iter = outer_iter->begin();
00548 }
00549 }
00550 };
00551
00552 template<typename SegmentedContainer, typename T, typename U>
00553 bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
00554 const segmented_iterator<SegmentedContainer,U>& j ) {
00555 if(i.my_segcont != j.my_segcont) return false;
00556 if(i.my_segcont == NULL) return true;
00557 if(i.outer_iter != j.outer_iter) return false;
00558 if(i.outer_iter == i.my_segcont->end()) return true;
00559 return i.inner_iter == j.inner_iter;
00560 }
00561
00562
00563 template<typename SegmentedContainer, typename T, typename U>
00564 bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
00565 const segmented_iterator<SegmentedContainer,U>& j ) {
00566 return !(i==j);
00567 }
00568
00569 template<typename T>
00570 struct destruct_only: tbb::internal::no_copy {
00571 tbb::aligned_space<T,1> value;
00572 ~destruct_only() {value.begin()[0].~T();}
00573 };
00574
00575 template<typename T>
00576 struct construct_by_default: tbb::internal::no_assign {
00577 void construct(void*where) {new(where) T();}
00578 construct_by_default( int ) {}
00579 };
00580
00581 template<typename T>
00582 struct construct_by_exemplar: tbb::internal::no_assign {
00583 const T exemplar;
00584 void construct(void*where) {new(where) T(exemplar);}
00585 construct_by_exemplar( const T& t ) : exemplar(t) {}
00586 };
00587
00588 template<typename T, typename Finit>
00589 struct construct_by_finit: tbb::internal::no_assign {
00590 Finit f;
00591 void construct(void* where) {new(where) T(f());}
00592 construct_by_finit( const Finit& f_ ) : f(f_) {}
00593 };
00594
00595
00596 template<typename T>
00597 class callback_base {
00598 public:
00599
00600 virtual callback_base* clone() = 0;
00601
00602 virtual void destroy() = 0;
00603
00604 virtual ~callback_base() { }
00605
00606 virtual void construct(void* where) = 0;
00607 };
00608
00609 template <typename T, typename Constructor>
00610 class callback_leaf: public callback_base<T>, Constructor {
00611 template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
00612
00613 typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
00614
00615 callback_base<T>* clone() {
00616 void* where = my_allocator_type().allocate(1);
00617 return new(where) callback_leaf(*this);
00618 }
00619
00620 void destroy() {
00621 my_allocator_type().destroy(this);
00622 my_allocator_type().deallocate(this,1);
00623 }
00624
00625 void construct(void* where) {
00626 Constructor::construct(where);
00627 }
00628 public:
00629 template<typename X>
00630 static callback_base<T>* make( const X& x ) {
00631 void* where = my_allocator_type().allocate(1);
00632 return new(where) callback_leaf(x);
00633 }
00634 };
00635
00637
00642 template<typename U, size_t ModularSize>
00643 struct ets_element {
00644 char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
00645 void unconstruct() {
00646 tbb::internal::punned_cast<U*>(&value)->~U();
00647 }
00648 };
00649
00650 }
00652
00654
00673 template <typename T,
00674 typename Allocator=cache_aligned_allocator<T>,
00675 ets_key_usage_type ETS_key_type=ets_no_key >
00676 class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
00677
00678 template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00679
00680 typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
00681
00683 template<typename I>
00684 class generic_range_type: public blocked_range<I> {
00685 public:
00686 typedef T value_type;
00687 typedef T& reference;
00688 typedef const T& const_reference;
00689 typedef I iterator;
00690 typedef ptrdiff_t difference_type;
00691 generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
00692 template<typename U>
00693 generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
00694 generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00695 };
00696
00697 typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00698 typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00699
00700 internal::callback_base<T> *my_construct_callback;
00701
00702 internal_collection_type my_locals;
00703
00704 void* create_local() {
00705 #if TBB_DEPRECATED
00706 void* lref = &my_locals[my_locals.push_back(padded_element())];
00707 #else
00708 void* lref = &*my_locals.push_back(padded_element());
00709 #endif
00710 my_construct_callback->construct(lref);
00711 return lref;
00712 }
00713
00714 void unconstruct_locals() {
00715 for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00716 cvi->unconstruct();
00717 }
00718 }
00719
00720 typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
00721
00722
00723 void* create_array(size_t _size) {
00724 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00725 return array_allocator_type().allocate(nelements);
00726 }
00727
00728 void free_array( void* _ptr, size_t _size) {
00729 size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00730 array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
00731 }
00732
00733 public:
00734
00736 typedef Allocator allocator_type;
00737 typedef T value_type;
00738 typedef T& reference;
00739 typedef const T& const_reference;
00740 typedef T* pointer;
00741 typedef const T* const_pointer;
00742 typedef typename internal_collection_type::size_type size_type;
00743 typedef typename internal_collection_type::difference_type difference_type;
00744
00745
00746 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00747 typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00748
00749
00750 typedef generic_range_type< iterator > range_type;
00751 typedef generic_range_type< const_iterator > const_range_type;
00752
00754 enumerable_thread_specific() :
00755 my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(0) )
00756 {}
00757
00759 template <typename Finit>
00760 enumerable_thread_specific( Finit finit ) :
00761 my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) )
00762 {}
00763
00765 enumerable_thread_specific(const T& exemplar) :
00766 my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
00767 {}
00768
00770 ~enumerable_thread_specific() {
00771 my_construct_callback->destroy();
00772 this->clear();
00773
00774 }
00775
00777 reference local() {
00778 bool exists;
00779 return local(exists);
00780 }
00781
00783 reference local(bool& exists) {
00784 __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented");
00785 void* ptr = this->table_lookup(exists);
00786 return *(T*)ptr;
00787 }
00788
00790 size_type size() const { return my_locals.size(); }
00791
00793 bool empty() const { return my_locals.empty(); }
00794
00796 iterator begin() { return iterator( my_locals, 0 ); }
00798 iterator end() { return iterator(my_locals, my_locals.size() ); }
00799
00801 const_iterator begin() const { return const_iterator(my_locals, 0); }
00802
00804 const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00805
00807 range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
00808
00810 const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00811
00813 void clear() {
00814 unconstruct_locals();
00815 my_locals.clear();
00816 this->table_clear();
00817
00818
00819 }
00820
00821 private:
00822
00823 template<typename U, typename A2, ets_key_usage_type C2>
00824 void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
00825
00826 public:
00827
00828 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00829 enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
00830 {
00831 internal_copy(other);
00832 }
00833
00834 enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
00835 {
00836 internal_copy(other);
00837 }
00838
00839 private:
00840
00841 template<typename U, typename A2, ets_key_usage_type C2>
00842 enumerable_thread_specific &
00843 internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00844 if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00845 this->clear();
00846 my_construct_callback->destroy();
00847 my_construct_callback = 0;
00848 internal_copy( other );
00849 }
00850 return *this;
00851 }
00852
00853 public:
00854
00855
00856 enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00857 return internal_assign(other);
00858 }
00859
00860 template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00861 enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00862 {
00863 return internal_assign(other);
00864 }
00865
00866
00867 template <typename combine_func_t>
00868 T combine(combine_func_t f_combine) {
00869 if(begin() == end()) {
00870 internal::destruct_only<T> location;
00871 my_construct_callback->construct(location.value.begin());
00872 return *location.value.begin();
00873 }
00874 const_iterator ci = begin();
00875 T my_result = *ci;
00876 while(++ci != end())
00877 my_result = f_combine( my_result, *ci );
00878 return my_result;
00879 }
00880
00881
00882 template <typename combine_func_t>
00883 void combine_each(combine_func_t f_combine) {
00884 for(const_iterator ci = begin(); ci != end(); ++ci) {
00885 f_combine( *ci );
00886 }
00887 }
00888
00889 };
00890
00891 template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
00892 template<typename U, typename A2, ets_key_usage_type C2>
00893 void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
00894
00895 my_construct_callback = other.my_construct_callback->clone();
00896
00897 typedef internal::ets_base<ets_no_key> base;
00898 __TBB_ASSERT(my_locals.size()==0,NULL);
00899 this->table_reserve_for_copy( other );
00900 for( base::array* r=other.my_root; r; r=r->next ) {
00901 for( size_t i=0; i<r->size(); ++i ) {
00902 base::slot& s1 = r->at(i);
00903 if( !s1.empty() ) {
00904 base::slot& s2 = this->table_find(s1.key);
00905 if( s2.empty() ) {
00906 #if TBB_DEPRECATED
00907 void* lref = &my_locals[my_locals.push_back(padded_element())];
00908 #else
00909 void* lref = &*my_locals.push_back(padded_element());
00910 #endif
00911 s2.ptr = new(lref) T(*(U*)s1.ptr);
00912 s2.key = s1.key;
00913 } else {
00914
00915 }
00916 }
00917 }
00918 }
00919 }
00920
00921 template< typename Container >
00922 class flattened2d {
00923
00924
00925 typedef typename Container::value_type conval_type;
00926
00927 public:
00928
00930 typedef typename conval_type::size_type size_type;
00931 typedef typename conval_type::difference_type difference_type;
00932 typedef typename conval_type::allocator_type allocator_type;
00933 typedef typename conval_type::value_type value_type;
00934 typedef typename conval_type::reference reference;
00935 typedef typename conval_type::const_reference const_reference;
00936 typedef typename conval_type::pointer pointer;
00937 typedef typename conval_type::const_pointer const_pointer;
00938
00939 typedef typename internal::segmented_iterator<Container, value_type> iterator;
00940 typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00941
00942 flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
00943 my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00944
00945 flattened2d( const Container &c ) :
00946 my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00947
00948 iterator begin() { return iterator(*my_container) = my_begin; }
00949 iterator end() { return iterator(*my_container) = my_end; }
00950 const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00951 const_iterator end() const { return const_iterator(*my_container) = my_end; }
00952
00953 size_type size() const {
00954 size_type tot_size = 0;
00955 for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00956 tot_size += i->size();
00957 }
00958 return tot_size;
00959 }
00960
00961 private:
00962
00963 Container *my_container;
00964 typename Container::const_iterator my_begin;
00965 typename Container::const_iterator my_end;
00966
00967 };
00968
00969 template <typename Container>
00970 flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00971 return flattened2d<Container>(c, b, e);
00972 }
00973
00974 template <typename Container>
00975 flattened2d<Container> flatten2d(const Container &c) {
00976 return flattened2d<Container>(c);
00977 }
00978
00979 }
00980
00981 namespace internal {
00982 using interface6::internal::segmented_iterator;
00983 }
00984
00985 using interface6::enumerable_thread_specific;
00986 using interface6::flattened2d;
00987 using interface6::flatten2d;
00988
00989 }
00990
00991 #endif