88 std::atomic<bool> keepGoing{
true };
92 constexpr int hardware_destructive_interference_size = 64;
93 struct alignas( hardware_destructive_interference_size ) S
95 std::atomic<size_t> processedBits{ 0 };
97 static_assert(
alignof( S ) == hardware_destructive_interference_size );
98 static_assert(
sizeof( S ) == hardware_destructive_interference_size );
101 tbb::parallel_for( range, [&] (
const tbb::blocked_range<size_t>& subRange )
104 size_t myProcessedBits = 0;
105 const auto callingThreadLock = callingThreadMutex.
tryLock();
106 const bool report = progressCb && callingThreadLock;
107 auto c = callMaker();
110 if ( !keepGoing.load( std::memory_order_relaxed ) )
113 if ( ( ++myProcessedBits % reportProgressEveryBit ) == 0 )
117 if ( !progressCb(
float( myProcessedBits + s.processedBits.load( std::memory_order_relaxed ) ) /
bitRange.size() ) )
118 keepGoing.store(
false, std::memory_order_relaxed );
122 s.processedBits.fetch_add( myProcessedBits, std::memory_order_relaxed );
127 const auto total = myProcessedBits + s.processedBits.fetch_add( myProcessedBits, std::memory_order_relaxed );
128 if ( report && !progressCb(
float( total ) /
bitRange.size() ) )
129 keepGoing.store(
false, std::memory_order_relaxed );
131 return keepGoing.load( std::memory_order_relaxed );
135inline bool ForAllRanged(
const BS & bs,
const CM & callMaker, F && f, ProgressCallback progressCb,
size_t reportProgressEveryBit = 1024 )
137 return ForAllRanged(
bitRange( bs ), callMaker, std::forward<F>( f ), progressCb, reportProgressEveryBit );