_concurrent_unordered_internal.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 /* Container implementations in this header are based on PPL implementations 
00022    provided by Microsoft. */
00023 
00024 #ifndef __TBB_concurrent_unordered_internal_H
00025 #define __TBB_concurrent_unordered_internal_H
00026 
00027 #include "tbb_stddef.h"
00028 
00029 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00030     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
00031     #pragma warning (push)
00032     #pragma warning (disable: 4530)
00033 #endif
00034 
00035 #include <iterator>
00036 #include <utility>      // Need std::pair
00037 #include <functional>
00038 #include <string>       // For tbb_hasher
00039 #include <cstring>      // Need std::memset
00040 
00041 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00042     #pragma warning (pop)
00043 #endif
00044 
00045 #include "atomic.h"
00046 #include "tbb_exception.h"
00047 #include "tbb_allocator.h"
00048 
00049 namespace tbb {
00050 namespace interface5 {
00052 namespace internal {
00053 
00054 template <typename T, typename Allocator>
00055 class split_ordered_list;
00056 template <typename Traits>
00057 class concurrent_unordered_base;
00058 
00059 // Forward list iterators (without skipping dummy elements)
00060 template<class Solist, typename Value>
00061 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
00062 {
00063     template <typename T, typename Allocator>
00064     friend class split_ordered_list;
00065     template <typename Traits>
00066     friend class concurrent_unordered_base;
00067     template<class M, typename V>
00068     friend class flist_iterator;
00069 
00070     typedef typename Solist::nodeptr_t nodeptr_t;
00071 public:
00072     typedef typename Solist::value_type value_type;
00073     typedef typename Solist::difference_type difference_type;
00074     typedef typename Solist::pointer pointer;
00075     typedef typename Solist::reference reference;
00076 
00077     flist_iterator() : my_node_ptr(0) {}
00078     flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
00079         : my_node_ptr(other.my_node_ptr) {}
00080 
00081     reference operator*() const { return my_node_ptr->my_element; }
00082     pointer operator->() const { return &**this; }
00083 
00084     flist_iterator& operator++() {
00085         my_node_ptr = my_node_ptr->my_next;
00086         return *this;
00087     }
00088 
00089     flist_iterator operator++(int) {
00090         flist_iterator tmp = *this;
00091         ++*this;
00092         return tmp;
00093     }
00094 
00095 protected:
00096     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
00097     nodeptr_t get_node_ptr() const { return my_node_ptr; }
00098 
00099     nodeptr_t my_node_ptr;
00100 
00101     template<typename M, typename T, typename U>
00102     friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
00103     template<typename M, typename T, typename U>
00104     friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
00105 };
00106 
00107 template<typename Solist, typename T, typename U>
00108 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
00109     return i.my_node_ptr == j.my_node_ptr;
00110 }
00111 template<typename Solist, typename T, typename U>
00112 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
00113     return i.my_node_ptr != j.my_node_ptr;
00114 }
00115 
00116 // Split-order list iterators, needed to skip dummy elements
00117 template<class Solist, typename Value>
00118 class solist_iterator : public flist_iterator<Solist, Value>
00119 {
00120     typedef flist_iterator<Solist, Value> base_type;
00121     typedef typename Solist::nodeptr_t nodeptr_t;
00122     using base_type::get_node_ptr;
00123     template <typename T, typename Allocator>
00124     friend class split_ordered_list;
00125     template<class M, typename V>
00126     friend class solist_iterator;
00127     template<typename M, typename T, typename U>
00128     friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
00129     template<typename M, typename T, typename U>
00130     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
00131 
00132     const Solist *my_list_ptr;
00133     solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
00134 
00135 public:
00136     typedef typename Solist::value_type value_type;
00137     typedef typename Solist::difference_type difference_type;
00138     typedef typename Solist::pointer pointer;
00139     typedef typename Solist::reference reference;
00140 
00141     solist_iterator() {}
00142     solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
00143         : base_type(other), my_list_ptr(other.my_list_ptr) {}
00144 
00145     reference operator*() const {
00146         return this->base_type::operator*();
00147     }
00148 
00149     pointer operator->() const {
00150         return (&**this);
00151     }
00152 
00153     solist_iterator& operator++() {
00154         do ++(*(base_type *)this);
00155         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00156 
00157         return (*this);
00158     }
00159 
00160     solist_iterator operator++(int) {
00161         solist_iterator tmp = *this;
00162         do ++*this;
00163         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00164 
00165         return (tmp);
00166     }
00167 };
00168 
00169 template<typename Solist, typename T, typename U>
00170 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
00171     return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
00172 }
00173 template<typename Solist, typename T, typename U>
00174 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
00175     return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
00176 }
00177 
00178 // Forward type and class definitions
00179 typedef size_t sokey_t;
00180 
00181 // Forward list in which elements are sorted in a split-order
00182 template <typename T, typename Allocator>
00183 class split_ordered_list
00184 {
00185 public:
00186     typedef split_ordered_list<T, Allocator> self_type;
00187     typedef typename Allocator::template rebind<T>::other allocator_type;
00188     struct node;
00189     typedef node *nodeptr_t;
00190 
00191     typedef typename allocator_type::size_type size_type;
00192     typedef typename allocator_type::difference_type difference_type;
00193     typedef typename allocator_type::pointer pointer;
00194     typedef typename allocator_type::const_pointer const_pointer;
00195     typedef typename allocator_type::reference reference;
00196     typedef typename allocator_type::const_reference const_reference;
00197     typedef typename allocator_type::value_type value_type;
00198 
00199     typedef solist_iterator<self_type, const value_type> const_iterator;
00200     typedef solist_iterator<self_type, value_type> iterator;
00201     typedef flist_iterator<self_type, const value_type> raw_const_iterator;
00202     typedef flist_iterator<self_type, value_type> raw_iterator;
00203 
00204     // Node that holds the element in a split-ordered list
00205     struct node : tbb::internal::no_assign
00206     {
00207         // Initialize the node with the given order key
00208         void init(sokey_t order_key) {
00209             my_order_key = order_key;
00210             my_next = NULL;
00211         }
00212 
00213         // Return the order key (needed for hashing)
00214         sokey_t get_order_key() const { // TODO: remove
00215             return my_order_key;
00216         }
00217 
00218         // Inserts the new element in the list in an atomic fashion
00219         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
00220         {
00221             // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
00222             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
00223 
00224             if (exchange_node == current_node) // TODO: why this branch?
00225             {
00226                 // Operation succeeded, return the new node
00227                 return new_node;
00228             }
00229             else
00230             {
00231                 // Operation failed, return the "interfering" node
00232                 return exchange_node;
00233             }
00234         }
00235 
00236         // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
00237         // in the hash table to quickly index into the right subsection of the split-ordered list.
00238         bool is_dummy() const {
00239             return (my_order_key & 0x1) == 0;
00240         }
00241 
00242 
00243         nodeptr_t  my_next;      // Next element in the list
00244         value_type my_element;   // Element storage
00245         sokey_t    my_order_key; // Order key for this element
00246     };
00247 
00248     // Allocate a new node with the given order key and value
00249     nodeptr_t create_node(sokey_t order_key, const T &value) {
00250         nodeptr_t pnode = my_node_allocator.allocate(1);
00251 
00252         __TBB_TRY {
00253             new(static_cast<void*>(&pnode->my_element)) T(value);
00254             pnode->init(order_key);
00255         } __TBB_CATCH(...) {
00256             my_node_allocator.deallocate(pnode, 1);
00257             __TBB_RETHROW();
00258         }
00259 
00260         return (pnode);
00261     }
00262 
00263     // Allocate a new node with the given order key; used to allocate dummy nodes
00264     nodeptr_t create_node(sokey_t order_key) {
00265         nodeptr_t pnode = my_node_allocator.allocate(1);
00266 
00267         __TBB_TRY {
00268             new(static_cast<void*>(&pnode->my_element)) T();
00269             pnode->init(order_key);
00270         } __TBB_CATCH(...) {
00271             my_node_allocator.deallocate(pnode, 1);
00272             __TBB_RETHROW();
00273         }
00274 
00275         return (pnode);
00276     }
00277 
00278    split_ordered_list(allocator_type a = allocator_type())
00279        : my_node_allocator(a), my_element_count(0)
00280     {
00281         // Immediately allocate a dummy node with order key of 0. This node
00282         // will always be the head of the list.
00283         my_head = create_node(0);
00284     }
00285 
00286     ~split_ordered_list()
00287     {
00288         // Clear the list
00289         clear();
00290 
00291         // Remove the head element which is not cleared by clear()
00292         nodeptr_t pnode = my_head;
00293         my_head = NULL;
00294 
00295         __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
00296 
00297         destroy_node(pnode);
00298     }
00299 
00300     // Common forward list functions
00301 
00302     allocator_type get_allocator() const {
00303         return (my_node_allocator);
00304     }
00305 
00306     void clear() {
00307         nodeptr_t pnext;
00308         nodeptr_t pnode = my_head;
00309 
00310         __TBB_ASSERT(my_head != NULL, "Invalid head list node");
00311         pnext = pnode->my_next;
00312         pnode->my_next = NULL;
00313         pnode = pnext;
00314 
00315         while (pnode != NULL)
00316         {
00317             pnext = pnode->my_next;
00318             destroy_node(pnode);
00319             pnode = pnext;
00320         }
00321 
00322         my_element_count = 0;
00323     }
00324 
00325     // Returns a first non-dummy element in the SOL
00326     iterator begin() {
00327         return first_real_iterator(raw_begin());
00328     }
00329 
00330     // Returns a first non-dummy element in the SOL
00331     const_iterator begin() const {
00332         return first_real_iterator(raw_begin());
00333     }
00334 
00335     iterator end() {
00336         return (iterator(0, this));
00337     }
00338 
00339     const_iterator end() const {
00340         return (const_iterator(0, this));
00341     }
00342 
00343     const_iterator cbegin() const {
00344         return (((const self_type *)this)->begin());
00345     }
00346 
00347     const_iterator cend() const {
00348         return (((const self_type *)this)->end());
00349     }
00350 
00351     // Checks if the number of elements (non-dummy) is 0
00352     bool empty() const {
00353         return (my_element_count == 0);
00354     }
00355 
00356     // Returns the number of non-dummy elements in the list
00357     size_type size() const {
00358         return my_element_count;
00359     }
00360 
00361     // Returns the maximum size of the list, determined by the allocator
00362     size_type max_size() const {
00363         return my_node_allocator.max_size();
00364     }
00365 
00366     // Swaps 'this' list with the passed in one
00367     void swap(self_type& other)
00368     {
00369         if (this == &other)
00370         {
00371             // Nothing to do
00372             return;
00373         }
00374 
00375         std::swap(my_element_count, other.my_element_count);
00376         std::swap(my_head, other.my_head);
00377     }
00378 
00379     // Split-order list functions
00380 
00381     // Returns a first element in the SOL, which is always a dummy
00382     raw_iterator raw_begin() {
00383         return raw_iterator(my_head);
00384     }
00385 
00386     // Returns a first element in the SOL, which is always a dummy
00387     raw_const_iterator raw_begin() const {
00388         return raw_const_iterator(my_head);
00389     }
00390 
00391     raw_iterator raw_end() {
00392         return raw_iterator(0);
00393     }
00394 
00395     raw_const_iterator raw_end() const {
00396         return raw_const_iterator(0);
00397     }
00398 
00399     static sokey_t get_order_key(const raw_const_iterator& it) {
00400         return it.get_node_ptr()->get_order_key();
00401     }
00402 
00403     static sokey_t get_safe_order_key(const raw_const_iterator& it) {
00404         if( !it.get_node_ptr() ) return sokey_t(~0U);
00405         return it.get_node_ptr()->get_order_key();
00406     }
00407 
00408     // Returns a public iterator version of the internal iterator. Public iterator must not
00409     // be a dummy private iterator.
00410     iterator get_iterator(raw_iterator it) {
00411         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00412         return iterator(it.get_node_ptr(), this);
00413     }
00414 
00415     // Returns a public iterator version of the internal iterator. Public iterator must not
00416     // be a dummy private iterator.
00417     const_iterator get_iterator(raw_const_iterator it) const {
00418         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00419         return const_iterator(it.get_node_ptr(), this);
00420     }
00421 
00422     // Returns a non-const version of the raw_iterator
00423     raw_iterator get_iterator(raw_const_iterator it) {
00424         return raw_iterator(it.get_node_ptr());
00425     }
00426 
00427     // Returns a non-const version of the iterator
00428     static iterator get_iterator(const_iterator it) {
00429         return iterator(it.my_node_ptr, it.my_list_ptr);
00430     }
00431 
00432     // Returns a public iterator version of a first non-dummy internal iterator at or after
00433     // the passed in internal iterator.
00434     iterator first_real_iterator(raw_iterator it)
00435     {
00436         // Skip all dummy, internal only iterators
00437         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00438             ++it;
00439 
00440         return iterator(it.get_node_ptr(), this);
00441     }
00442 
00443     // Returns a public iterator version of a first non-dummy internal iterator at or after
00444     // the passed in internal iterator.
00445     const_iterator first_real_iterator(raw_const_iterator it) const
00446     {
00447         // Skip all dummy, internal only iterators
00448         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00449             ++it;
00450 
00451         return const_iterator(it.get_node_ptr(), this);
00452     }
00453 
00454     // Erase an element using the allocator
00455     void destroy_node(nodeptr_t pnode) {
00456         my_node_allocator.destroy(pnode);
00457         my_node_allocator.deallocate(pnode, 1);
00458     }
00459 
00460     // Try to insert a new element in the list. If insert fails, return the node that
00461     // was inserted instead.
00462     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
00463         new_node->my_next = current_node;
00464         return previous->atomic_set_next(new_node, current_node);
00465     }
00466 
00467     // Insert a new element between passed in iterators
00468     std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
00469     {
00470         nodeptr_t pnode = create_node(order_key, value);
00471         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
00472 
00473         if (inserted_node == pnode)
00474         {
00475             // If the insert succeeded, check that the order is correct and increment the element count
00476             check_range();
00477             *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
00478             return std::pair<iterator, bool>(iterator(pnode, this), true);
00479         }
00480         else
00481         {
00482             // If the insert failed (element already there), then delete the new one
00483             destroy_node(pnode);
00484             return std::pair<iterator, bool>(end(), false);
00485         }
00486     }
00487 
00488     // Insert a new dummy element, starting search at a parent dummy element
00489     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
00490     {
00491         raw_iterator last = raw_end();
00492         raw_iterator where = it;
00493 
00494         __TBB_ASSERT(where != last, "Invalid head node");
00495 
00496         ++where;
00497 
00498         // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
00499         nodeptr_t dummy_node = create_node(order_key);
00500 
00501         for (;;)
00502         {
00503             __TBB_ASSERT(it != last, "Invalid head list node");
00504 
00505             // If the head iterator is at the end of the list, or past the point where this dummy
00506             // node needs to be inserted, then try to insert it.
00507             if (where == last || get_order_key(where) > order_key)
00508             {
00509                 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
00510 
00511                 // Try to insert it in the right place
00512                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
00513 
00514                 if (inserted_node == dummy_node)
00515                 {
00516                     // Insertion succeeded, check the list for order violations
00517                     check_range();
00518                     return raw_iterator(dummy_node);
00519                 }
00520                 else
00521                 {
00522                     // Insertion failed: either dummy node was inserted by another thread, or
00523                     // a real element was inserted at exactly the same place as dummy node.
00524                     // Proceed with the search from the previous location where order key was
00525                     // known to be larger (note: this is legal only because there is no safe
00526                     // concurrent erase operation supported).
00527                     where = it;
00528                     ++where;
00529                     continue;
00530                 }
00531             }
00532             else if (get_order_key(where) == order_key)
00533             {
00534                 // Another dummy node with the same value found, discard the new one.
00535                 destroy_node(dummy_node);
00536                 return where;
00537             }
00538 
00539             // Move the iterator forward
00540             it = where;
00541             ++where;
00542         }
00543 
00544     }
00545 
00546     // This erase function can handle both real and dummy nodes
00547     void erase_node(raw_iterator previous, raw_const_iterator& where)
00548     {
00549         nodeptr_t pnode = (where++).get_node_ptr();
00550         nodeptr_t prevnode = previous.get_node_ptr();
00551         __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
00552         prevnode->my_next = pnode->my_next;
00553 
00554         destroy_node(pnode);
00555     }
00556 
00557     // Erase the element (previous node needs to be passed because this is a forward only list)
00558     iterator erase_node(raw_iterator previous, const_iterator where)
00559     {
00560         raw_const_iterator it = where;
00561         erase_node(previous, it);
00562         my_element_count--;
00563 
00564         return get_iterator(first_real_iterator(it));
00565     }
00566 
00567     // Move all elements from the passed in split-ordered list to this one
00568     void move_all(self_type& source)
00569     {
00570         raw_const_iterator first = source.raw_begin();
00571         raw_const_iterator last = source.raw_end();
00572 
00573         if (first == last)
00574             return;
00575 
00576         nodeptr_t previous_node = my_head;
00577         raw_const_iterator begin_iterator = first++;
00578 
00579         // Move all elements one by one, including dummy ones
00580         for (raw_const_iterator it = first; it != last;)
00581         {
00582             nodeptr_t pnode = it.get_node_ptr();
00583 
00584             nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
00585             previous_node = try_insert(previous_node, dummy_node, NULL);
00586             __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
00587             raw_const_iterator where = it++;
00588             source.erase_node(get_iterator(begin_iterator), where);
00589         }
00590         check_range();
00591     }
00592 
00593 
00594 private:
00595 
00596     // Check the list for order violations
00597     void check_range()
00598     {
00599 #if TBB_USE_ASSERT
00600         for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
00601         {
00602             raw_iterator next_iterator = it;
00603             ++next_iterator;
00604 
00605             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
00606         }
00607 #endif
00608     }
00609 
00610     typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
00611     size_type                                             my_element_count;   // Total item count, not counting dummy nodes
00612     nodeptr_t                                             my_head;            // pointer to head node
00613 };
00614 
00615 // Template class for hash compare
00616 template<typename Key, typename Hasher, typename Key_equality>
00617 class hash_compare
00618 {
00619 public:
00620     hash_compare() {}
00621 
00622     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
00623 
00624     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
00625 
00626     size_t operator()(const Key& key) const {
00627         return ((size_t)my_hash_object(key));
00628     }
00629 
00630     bool operator()(const Key& key1, const Key& key2) const {
00631         return (!my_key_compare_object(key1, key2));
00632     }
00633 
00634     Hasher       my_hash_object;        // The hash object
00635     Key_equality my_key_compare_object; // The equality comparator object
00636 };
00637 
00638 #if _MSC_VER
00639 #pragma warning(push)
00640 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
00641 #endif
00642 
00643 template <typename Traits>
00644 class concurrent_unordered_base : public Traits
00645 {
00646 protected:
00647     // Type definitions
00648     typedef concurrent_unordered_base<Traits> self_type;
00649     typedef typename Traits::value_type value_type;
00650     typedef typename Traits::key_type key_type;
00651     typedef typename Traits::hash_compare hash_compare;
00652     typedef typename Traits::value_compare value_compare;
00653     typedef typename Traits::allocator_type allocator_type;
00654     typedef typename allocator_type::pointer pointer;
00655     typedef typename allocator_type::const_pointer const_pointer;
00656     typedef typename allocator_type::reference reference;
00657     typedef typename allocator_type::const_reference const_reference;
00658     typedef typename allocator_type::size_type size_type;
00659     typedef typename allocator_type::difference_type difference_type;
00660     typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
00661     typedef typename solist_t::nodeptr_t nodeptr_t;
00662     // Iterators that walk the entire split-order list, including dummy nodes
00663     typedef typename solist_t::raw_iterator raw_iterator;
00664     typedef typename solist_t::raw_const_iterator raw_const_iterator;
00665     typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
00666     typedef typename solist_t::const_iterator const_iterator;
00667     typedef iterator local_iterator;
00668     typedef const_iterator const_local_iterator;
00669     using Traits::my_hash_compare;
00670     using Traits::get_key;
00671     using Traits::allow_multimapping;
00672 
00673 private:
00674     typedef std::pair<iterator, iterator> pairii_t;
00675     typedef std::pair<const_iterator, const_iterator> paircc_t;
00676 
00677     static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
00678     static const size_type initial_bucket_number = 8;                               // Initial number of buckets
00679     static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
00680 
00681 protected:
00682     // Constructors/Destructors
00683     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
00684         const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
00685         : Traits(hc), my_solist(a),
00686           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
00687     {
00688         if( n_of_buckets == 0) ++n_of_buckets;
00689         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
00690         internal_init();
00691     }
00692 
00693     concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
00694         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
00695     {
00696         internal_copy(right);
00697     }
00698 
00699     concurrent_unordered_base(const concurrent_unordered_base& right)
00700         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
00701     {
00702         internal_init();
00703         internal_copy(right);
00704     }
00705 
00706     concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
00707         if (this != &right)
00708             internal_copy(right);
00709         return (*this);
00710     }
00711 
00712     ~concurrent_unordered_base() {
00713         // Delete all node segments
00714         internal_clear();
00715     }
00716 
00717 public:
00718     allocator_type get_allocator() const {
00719         return my_solist.get_allocator();
00720     }
00721 
00722     // Size and capacity function
00723     bool empty() const {
00724         return my_solist.empty();
00725     }
00726 
00727     size_type size() const {
00728         return my_solist.size();
00729     }
00730 
00731     size_type max_size() const {
00732         return my_solist.max_size();
00733     }
00734 
00735     // Iterators 
00736     iterator begin() {
00737         return my_solist.begin();
00738     }
00739 
00740     const_iterator begin() const {
00741         return my_solist.begin();
00742     }
00743 
00744     iterator end() {
00745         return my_solist.end();
00746     }
00747 
00748     const_iterator end() const {
00749         return my_solist.end();
00750     }
00751 
00752     const_iterator cbegin() const {
00753         return my_solist.cbegin();
00754     }
00755 
00756     const_iterator cend() const {
00757         return my_solist.cend();
00758     }
00759 
00760     // Parallel traversal support
00761     class const_range_type : tbb::internal::no_assign {
00762         const concurrent_unordered_base &my_table;
00763         raw_const_iterator my_begin_node;
00764         raw_const_iterator my_end_node;
00765         mutable raw_const_iterator my_midpoint_node;
00766     public:
00768         typedef typename concurrent_unordered_base::size_type size_type;
00769         typedef typename concurrent_unordered_base::value_type value_type;
00770         typedef typename concurrent_unordered_base::reference reference;
00771         typedef typename concurrent_unordered_base::difference_type difference_type;
00772         typedef typename concurrent_unordered_base::const_iterator iterator;
00773 
00775         bool empty() const {return my_begin_node == my_end_node;}
00776 
00778         bool is_divisible() const {
00779             return my_midpoint_node != my_end_node;
00780         }
00782         const_range_type( const_range_type &r, split ) : 
00783             my_table(r.my_table), my_end_node(r.my_end_node)
00784         {
00785             r.my_end_node = my_begin_node = r.my_midpoint_node;
00786             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00787             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00788             set_midpoint();
00789             r.set_midpoint();
00790         }
00792         const_range_type( const concurrent_unordered_base &a_table ) : 
00793             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
00794             my_end_node(a_table.my_solist.end())
00795         {
00796             set_midpoint();
00797         }
00798         iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
00799         iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
00801         size_type grainsize() const { return 1; }
00802 
00804         void set_midpoint() const {
00805             if( my_begin_node == my_end_node ) // not divisible
00806                 my_midpoint_node = my_end_node;
00807             else {
00808                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
00809                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
00810                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
00811                 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
00812                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
00813                 if( my_midpoint_node == my_begin_node )
00814                     my_midpoint_node = my_end_node;
00815 #if TBB_USE_ASSERT
00816                 else {
00817                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
00818                     __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
00819                     __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
00820                 }
00821 #endif // TBB_USE_ASSERT
00822             }
00823         }
00824     };
00825 
00826     class range_type : public const_range_type {
00827     public:
00828         typedef typename concurrent_unordered_base::iterator iterator;
00830         range_type( range_type &r, split ) : const_range_type( r, split() ) {}
00832         range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
00833 
00834         iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
00835         iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
00836     };
00837 
00838     range_type range() {
00839         return range_type( *this );
00840     }
00841 
00842     const_range_type range() const {
00843         return const_range_type( *this );
00844     }
00845 
00846     // Modifiers
00847     std::pair<iterator, bool> insert(const value_type& value) {
00848         return internal_insert(value);
00849     }
00850 
00851     iterator insert(const_iterator, const value_type& value) {
00852         // Ignore hint
00853         return insert(value).first;
00854     }
00855 
00856     template<class Iterator>
00857     void insert(Iterator first, Iterator last) {
00858         for (Iterator it = first; it != last; ++it)
00859             insert(*it);
00860     }
00861 
00862     iterator unsafe_erase(const_iterator where) {
00863         return internal_erase(where);
00864     }
00865 
00866     iterator unsafe_erase(const_iterator first, const_iterator last) {
00867         while (first != last)
00868             unsafe_erase(first++);
00869         return my_solist.get_iterator(first);
00870     }
00871 
00872     size_type unsafe_erase(const key_type& key) {
00873         pairii_t where = equal_range(key);
00874         size_type item_count = internal_distance(where.first, where.second);
00875         unsafe_erase(where.first, where.second);
00876         return item_count;
00877     }
00878 
00879     void swap(concurrent_unordered_base& right) {
00880         if (this != &right) {
00881             std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
00882             my_solist.swap(right.my_solist);
00883             internal_swap_buckets(right);
00884             std::swap(my_number_of_buckets, right.my_number_of_buckets);
00885             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
00886         }
00887     }
00888 
00889     // Observers
00890     void clear() {
00891         // Clear list
00892         my_solist.clear();
00893 
00894         // Clear buckets
00895         internal_clear();
00896     }
00897 
00898     // Lookup
00899     iterator find(const key_type& key) {
00900         return internal_find(key);
00901     }
00902 
00903     const_iterator find(const key_type& key) const {
00904         return const_cast<self_type*>(this)->internal_find(key);
00905     }
00906 
00907     size_type count(const key_type& key) const {
00908         if(allow_multimapping) {
00909             paircc_t answer = equal_range(key);
00910             size_type item_count = internal_distance(answer.first, answer.second);
00911             return item_count;
00912         } else {
00913             return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
00914         }
00915     }
00916 
00917     std::pair<iterator, iterator> equal_range(const key_type& key) {
00918         return internal_equal_range(key);
00919     }
00920 
00921     std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
00922         return const_cast<self_type*>(this)->internal_equal_range(key);
00923     }
00924 
00925     // Bucket interface - for debugging 
00926     size_type unsafe_bucket_count() const {
00927         return my_number_of_buckets;
00928     }
00929 
00930     size_type unsafe_max_bucket_count() const {
00931         return segment_size(pointers_per_table-1);
00932     }
00933 
00934     size_type unsafe_bucket_size(size_type bucket) {
00935         size_type item_count = 0;
00936         if (is_initialized(bucket)) {
00937             raw_iterator it = get_bucket(bucket);
00938             ++it;
00939             for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
00940                 ++item_count;
00941         }
00942         return item_count;
00943     }
00944 
00945     size_type unsafe_bucket(const key_type& key) const {
00946         sokey_t order_key = (sokey_t) my_hash_compare(key);
00947         size_type bucket = order_key % my_number_of_buckets;
00948         return bucket;
00949     }
00950 
00951     // If the bucket is initialized, return a first non-dummy element in it
00952     local_iterator unsafe_begin(size_type bucket) {
00953         if (!is_initialized(bucket))
00954             return end();
00955 
00956         raw_iterator it = get_bucket(bucket);
00957         return my_solist.first_real_iterator(it);
00958     }
00959 
00960     // If the bucket is initialized, return a first non-dummy element in it
00961     const_local_iterator unsafe_begin(size_type bucket) const
00962     {
00963         if (!is_initialized(bucket))
00964             return end();
00965 
00966         raw_const_iterator it = get_bucket(bucket);
00967         return my_solist.first_real_iterator(it);
00968     }
00969 
00970     // @REVIEW: Takes O(n)
00971     // Returns the iterator after the last non-dummy element in the bucket
00972     local_iterator unsafe_end(size_type bucket)
00973     {
00974         if (!is_initialized(bucket))
00975             return end();
00976 
00977         raw_iterator it = get_bucket(bucket);
00978     
00979         // Find the end of the bucket, denoted by the dummy element
00980         do ++it;
00981         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00982 
00983         // Return the first real element past the end of the bucket
00984         return my_solist.first_real_iterator(it);
00985     }
00986 
00987     // @REVIEW: Takes O(n)
00988     // Returns the iterator after the last non-dummy element in the bucket
00989     const_local_iterator unsafe_end(size_type bucket) const
00990     {
00991         if (!is_initialized(bucket))
00992             return end();
00993 
00994         raw_const_iterator it = get_bucket(bucket);
00995     
00996         // Find the end of the bucket, denoted by the dummy element
00997         do ++it;
00998         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00999 
01000         // Return the first real element past the end of the bucket
01001         return my_solist.first_real_iterator(it);
01002     }
01003 
01004     const_local_iterator unsafe_cbegin(size_type bucket) const {
01005         return ((const self_type *) this)->begin();
01006     }
01007 
01008     const_local_iterator unsafe_cend(size_type bucket) const {
01009         return ((const self_type *) this)->end();
01010     }
01011 
01012     // Hash policy
01013     float load_factor() const {
01014         return (float) size() / (float) unsafe_bucket_count();
01015     }
01016 
01017     float max_load_factor() const {
01018         return my_maximum_bucket_size;
01019     }
01020 
01021     void max_load_factor(float newmax) {
01022         if (newmax != newmax || newmax < 0)
01023             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
01024         my_maximum_bucket_size = newmax;
01025     }
01026 
01027     // This function is a noop, because the underlying split-ordered list
01028     // is already sorted, so an increase in the bucket number will be
01029     // reflected next time this bucket is touched.
01030     void rehash(size_type buckets) {
01031         size_type current_buckets = my_number_of_buckets;
01032         if (current_buckets >= buckets)
01033             return;
01034         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
01035     }
01036 
01037 private:
01038 
01039     // Initialize the hash and keep the first bucket open
01040     void internal_init() {
01041         // Allocate an array of segment pointers
01042         memset(my_buckets, 0, pointers_per_table * sizeof(void *));
01043 
01044         // Insert the first element in the split-ordered list
01045         raw_iterator dummy_node = my_solist.raw_begin();
01046         set_bucket(0, dummy_node);
01047     }
01048 
01049     void internal_clear() {
01050         for (size_type index = 0; index < pointers_per_table; ++index) {
01051             if (my_buckets[index] != NULL) {
01052                 size_type sz = segment_size(index);
01053                 for (size_type index2 = 0; index2 < sz; ++index2)
01054                     my_allocator.destroy(&my_buckets[index][index2]);
01055                 my_allocator.deallocate(my_buckets[index], sz);
01056                 my_buckets[index] = 0;
01057             }
01058         }
01059     }
01060 
01061     void internal_copy(const self_type& right) {
01062         clear();
01063 
01064         my_maximum_bucket_size = right.my_maximum_bucket_size;
01065         my_number_of_buckets = right.my_number_of_buckets;
01066 
01067         __TBB_TRY {
01068             insert(right.begin(), right.end());
01069             my_hash_compare = right.my_hash_compare;
01070         } __TBB_CATCH(...) {
01071             my_solist.clear();
01072             __TBB_RETHROW();
01073         }
01074     }
01075 
01076     void internal_swap_buckets(concurrent_unordered_base& right)
01077     {
01078         // Swap all node segments
01079         for (size_type index = 0; index < pointers_per_table; ++index)
01080         {
01081             raw_iterator * iterator_pointer = my_buckets[index];
01082             my_buckets[index] = right.my_buckets[index];
01083             right.my_buckets[index] = iterator_pointer;
01084         }
01085     }
01086 
01087     // Hash APIs
01088     size_type internal_distance(const_iterator first, const_iterator last) const
01089     {
01090         size_type num = 0;
01091 
01092         for (const_iterator it = first; it != last; ++it)
01093             ++num;
01094 
01095         return num;
01096     }
01097 
01098     // Insert an element in the hash given its value
01099     std::pair<iterator, bool> internal_insert(const value_type& value)
01100     {
01101         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
01102         size_type bucket = order_key % my_number_of_buckets;
01103 
01104         // If bucket is empty, initialize it first
01105         if (!is_initialized(bucket))
01106             init_bucket(bucket);
01107 
01108         size_type new_count;
01109         order_key = split_order_key_regular(order_key);
01110         raw_iterator it = get_bucket(bucket);
01111         raw_iterator last = my_solist.raw_end();
01112         raw_iterator where = it;
01113 
01114         __TBB_ASSERT(where != last, "Invalid head node");
01115 
01116         // First node is a dummy node
01117         ++where;
01118 
01119         for (;;)
01120         {
01121             if (where == last || solist_t::get_order_key(where) > order_key)
01122             {
01123                 // Try to insert it in the right place
01124                 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
01125                 
01126                 if (result.second)
01127                 {
01128                     // Insertion succeeded, adjust the table size, if needed
01129                     adjust_table_size(new_count, my_number_of_buckets);
01130                     return result;
01131                 }
01132                 else
01133                 {
01134                     // Insertion failed: either the same node was inserted by another thread, or
01135                     // another element was inserted at exactly the same place as this node.
01136                     // Proceed with the search from the previous location where order key was
01137                     // known to be larger (note: this is legal only because there is no safe
01138                     // concurrent erase operation supported).
01139                     where = it;
01140                     ++where;
01141                     continue;
01142                 }
01143             }
01144             else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
01145             {
01146                 // Element already in the list, return it
01147                 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
01148             }
01149 
01150             // Move the iterator forward
01151             it = where;
01152             ++where;
01153         }
01154     }
01155 
01156     // Find the element in the split-ordered list
01157     iterator internal_find(const key_type& key)
01158     {
01159         sokey_t order_key = (sokey_t) my_hash_compare(key);
01160         size_type bucket = order_key % my_number_of_buckets;
01161 
01162         // If bucket is empty, initialize it first
01163         if (!is_initialized(bucket))
01164             init_bucket(bucket);
01165 
01166         order_key = split_order_key_regular(order_key);
01167         raw_iterator last = my_solist.raw_end();
01168 
01169         for (raw_iterator it = get_bucket(bucket); it != last; ++it)
01170         {
01171             if (solist_t::get_order_key(it) > order_key)
01172             {
01173                 // If the order key is smaller than the current order key, the element
01174                 // is not in the hash.
01175                 return end();
01176             }
01177             else if (solist_t::get_order_key(it) == order_key)
01178             {
01179                 // The fact that order keys match does not mean that the element is found.
01180                 // Key function comparison has to be performed to check whether this is the
01181                 // right element. If not, keep searching while order key is the same.
01182                 if (!my_hash_compare(get_key(*it), key))
01183                     return my_solist.get_iterator(it);
01184             }
01185         }
01186 
01187         return end();
01188     }
01189 
01190     // Erase an element from the list. This is not a concurrency safe function.
01191     iterator internal_erase(const_iterator it)
01192     {
01193         key_type key = get_key(*it);
01194         sokey_t order_key = (sokey_t) my_hash_compare(key);
01195         size_type bucket = order_key % my_number_of_buckets;
01196 
01197         // If bucket is empty, initialize it first
01198         if (!is_initialized(bucket))
01199             init_bucket(bucket);
01200 
01201         order_key = split_order_key_regular(order_key);
01202 
01203         raw_iterator previous = get_bucket(bucket);
01204         raw_iterator last = my_solist.raw_end();
01205         raw_iterator where = previous;
01206 
01207         __TBB_ASSERT(where != last, "Invalid head node");
01208 
01209         // First node is a dummy node
01210         ++where;
01211 
01212         for (;;) {
01213             if (where == last)
01214                 return end();
01215             else if (my_solist.get_iterator(where) == it)
01216                 return my_solist.erase_node(previous, it);
01217 
01218             // Move the iterator forward
01219             previous = where;
01220             ++where;
01221         }
01222     }
01223 
01224     // Return the [begin, end) pair of iterators with the same key values.
01225     // This operation makes sense only if mapping is many-to-one.
01226     pairii_t internal_equal_range(const key_type& key)
01227     {
01228         sokey_t order_key = (sokey_t) my_hash_compare(key);
01229         size_type bucket = order_key % my_number_of_buckets;
01230 
01231         // If bucket is empty, initialize it first
01232         if (!is_initialized(bucket))
01233             init_bucket(bucket);
01234 
01235         order_key = split_order_key_regular(order_key);
01236         raw_iterator end_it = my_solist.raw_end();
01237 
01238         for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
01239         {
01240             if (solist_t::get_order_key(it) > order_key)
01241             {
01242                 // There is no element with the given key
01243                 return pairii_t(end(), end());
01244             }
01245             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01246             {
01247                 iterator first = my_solist.get_iterator(it);
01248                 iterator last = first;
01249                 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
01250                 return pairii_t(first, last);
01251             }
01252         }
01253 
01254         return pairii_t(end(), end());
01255     }
01256 
01257     // Bucket APIs
01258     void init_bucket(size_type bucket)
01259     {
01260         // Bucket 0 has no parent. Initialize it and return.
01261         if (bucket == 0) {
01262             internal_init();
01263             return;
01264         }
01265 
01266         size_type parent_bucket = get_parent(bucket);
01267 
01268         // All parent_bucket buckets have to be initialized before this bucket is
01269         if (!is_initialized(parent_bucket))
01270             init_bucket(parent_bucket);
01271 
01272         raw_iterator parent = get_bucket(parent_bucket);
01273 
01274         // Create a dummy first node in this bucket
01275         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
01276         set_bucket(bucket, dummy_node);
01277     }
01278 
01279     void adjust_table_size(size_type total_elements, size_type current_size)
01280     {
01281         // Grow the table by a factor of 2 if possible and needed
01282         if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
01283         {
01284             // Double the size of the hash only if size has not changed inbetween loads
01285             __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
01286             //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
01287             //due to overzealous compiler warnings in /Wp64 mode
01288         }
01289     }
01290 
01291     size_type get_parent(size_type bucket) const
01292     {
01293         // Unsets bucket's most significant turned-on bit
01294         size_type msb = __TBB_Log2((uintptr_t)bucket);
01295         return bucket & ~(size_type(1) << msb);
01296     }
01297 
01298 
01299     // Dynamic sized array (segments)
01301     static size_type segment_index_of( size_type index ) {
01302         return size_type( __TBB_Log2( uintptr_t(index|1) ) );
01303     }
01304 
01306     static size_type segment_base( size_type k ) {
01307         return (size_type(1)<<k & ~size_type(1));
01308     }
01309 
01311     static size_type segment_size( size_type k ) {
01312         return k? size_type(1)<<k : 2;
01313     }
01314 
01315     raw_iterator get_bucket(size_type bucket) const {
01316         size_type segment = segment_index_of(bucket);
01317         bucket -= segment_base(segment);
01318         __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
01319         return my_buckets[segment][bucket];
01320     }
01321 
01322     void set_bucket(size_type bucket, raw_iterator dummy_head) {
01323         size_type segment = segment_index_of(bucket);
01324         bucket -= segment_base(segment);
01325 
01326         if (my_buckets[segment] == NULL) {
01327             size_type sz = segment_size(segment);
01328             raw_iterator * new_segment = my_allocator.allocate(sz);
01329             std::memset(new_segment, 0, sz*sizeof(raw_iterator));
01330 
01331             if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
01332                 my_allocator.deallocate(new_segment, sz);
01333         }
01334 
01335         my_buckets[segment][bucket] = dummy_head;
01336     }
01337 
01338     bool is_initialized(size_type bucket) const {
01339         size_type segment = segment_index_of(bucket);
01340         bucket -= segment_base(segment);
01341 
01342         if (my_buckets[segment] == NULL)
01343             return false;
01344 
01345         raw_iterator it = my_buckets[segment][bucket];
01346         return (it.get_node_ptr() != NULL);
01347     }
01348 
01349     // Utilities for keys
01350 
01351     // A regular order key has its original hash value reversed and the last bit set
01352     sokey_t split_order_key_regular(sokey_t order_key) const {
01353         return __TBB_ReverseBits(order_key) | 0x1;
01354     }
01355 
01356     // A dummy order key has its original hash value reversed and the last bit unset
01357     sokey_t split_order_key_dummy(sokey_t order_key) const {
01358         return __TBB_ReverseBits(order_key) & ~(0x1);
01359     }
01360 
01361     // Shared variables
01362     atomic<size_type>                                             my_number_of_buckets;       // Current table size
01363     solist_t                                                      my_solist;                  // List where all the elements are kept
01364     typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
01365     float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
01366     atomic<raw_iterator*>                                         my_buckets[pointers_per_table]; // The segment table
01367 };
01368 #if _MSC_VER
01369 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
01370 #endif
01371 
01373 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
01374 } // namespace internal
01377 template<typename T>
01378 inline size_t tbb_hasher( const T& t ) {
01379     return static_cast<size_t>( t ) * internal::hash_multiplier;
01380 }
01381 template<typename P>
01382 inline size_t tbb_hasher( P* ptr ) {
01383     size_t const h = reinterpret_cast<size_t>( ptr );
01384     return (h >> 3) ^ h;
01385 }
01386 template<typename E, typename S, typename A>
01387 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
01388     size_t h = 0;
01389     for( const E* c = s.c_str(); *c; ++c )
01390         h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
01391     return h;
01392 }
01393 template<typename F, typename S>
01394 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
01395     return tbb_hasher(p.first) ^ tbb_hasher(p.second);
01396 }
01397 } // namespace interface5
01398 using interface5::tbb_hasher;
01399 } // namespace tbb
01400 #endif// __TBB_concurrent_unordered_internal_H

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.