36 #ifndef VIGRA_THREADING_HXX 
   37 #define VIGRA_THREADING_HXX 
   44 #ifndef VIGRA_SINGLE_THREADED 
   46 #ifndef VIGRA_NO_STD_THREADING 
   47 # if defined(__clang__) 
   48 #  if (!__has_include(<thread>) || !__has_include(<mutex>) || !__has_include(<atomic>)) 
   49 #    define VIGRA_NO_STD_THREADING 
   52 #  if defined(__GNUC__) && (!defined(_GLIBCXX_HAS_GTHREADS) || !defined(_GLIBCXX_USE_C99_STDINT_TR1) || !defined(_GLIBCXX_USE_SCHED_YIELD)) 
   53 #    define VIGRA_NO_STD_THREADING 
   57 # if defined(_MSC_VER) && _MSC_VER <= 1600 
   58 #  define VIGRA_NO_STD_THREADING 
   62 #ifdef USE_BOOST_THREAD 
   64 #define BOOST_THREAD_VERSION 4 
   65 #  include <boost/thread.hpp> 
   66 #  if BOOST_VERSION >= 105300 
   68 #    define BOOST_ATOMIC_NO_LIB 1 
   69 #    include <boost/atomic.hpp> 
   70 #    define VIGRA_HAS_ATOMIC 1 
   72 #  define VIGRA_THREADING_NAMESPACE boost 
   73 #elif defined(VIGRA_NO_STD_THREADING) 
   74 #  error "Your compiler does not support std::thread. If the boost libraries are available, consider running cmake with -DWITH_BOOST_THREAD=1" 
   76 #  include <condition_variable> 
   82 #  define VIGRA_HAS_ATOMIC 1 
   83 #  define VIGRA_THREADING_NAMESPACE std 
   86 #if defined(_MSC_VER) && !defined(VIGRA_HAS_ATOMIC) 
   90 namespace vigra { 
namespace threading {
 
   94 using VIGRA_THREADING_NAMESPACE::thread;
 
   96 namespace this_thread {
 
   98 using VIGRA_THREADING_NAMESPACE::this_thread::yield;
 
   99 using VIGRA_THREADING_NAMESPACE::this_thread::get_id;
 
  100 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_for;
 
  101 using VIGRA_THREADING_NAMESPACE::this_thread::sleep_until;
 
  107 using VIGRA_THREADING_NAMESPACE::mutex;
 
  108 using VIGRA_THREADING_NAMESPACE::recursive_mutex;
 
  112 #    ifdef USE_BOOST_THREAD 
  113        using VIGRA_THREADING_NAMESPACE::timed_mutex;
 
  114        using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
 
  117        using VIGRA_THREADING_NAMESPACE::timed_mutex;
 
  118        using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
 
  121        using VIGRA_THREADING_NAMESPACE::timed_mutex;
 
  122        using VIGRA_THREADING_NAMESPACE::recursive_timed_mutex;
 
  125 using VIGRA_THREADING_NAMESPACE::lock_guard;
 
  126 using VIGRA_THREADING_NAMESPACE::unique_lock;
 
  128 using VIGRA_THREADING_NAMESPACE::defer_lock_t;
 
  129 using VIGRA_THREADING_NAMESPACE::try_to_lock_t;
 
  130 using VIGRA_THREADING_NAMESPACE::adopt_lock_t;
 
  132 using VIGRA_THREADING_NAMESPACE::defer_lock;
 
  133 using VIGRA_THREADING_NAMESPACE::try_to_lock;
 
  134 using VIGRA_THREADING_NAMESPACE::adopt_lock;
 
  136 using VIGRA_THREADING_NAMESPACE::try_lock;
 
  137 using VIGRA_THREADING_NAMESPACE::lock;
 
  139 using VIGRA_THREADING_NAMESPACE::once_flag;
 
  140 using VIGRA_THREADING_NAMESPACE::call_once;
 
  149 using VIGRA_THREADING_NAMESPACE::future;
 
  153 using VIGRA_THREADING_NAMESPACE::condition_variable;
 
  157 using VIGRA_THREADING_NAMESPACE::packaged_task;
 
  159 #ifdef VIGRA_HAS_ATOMIC 
  163 using VIGRA_THREADING_NAMESPACE::atomic_flag;
 
  164 using VIGRA_THREADING_NAMESPACE::atomic;
 
  166 using VIGRA_THREADING_NAMESPACE::atomic_char;
 
  167 using VIGRA_THREADING_NAMESPACE::atomic_schar;
 
  168 using VIGRA_THREADING_NAMESPACE::atomic_uchar;
 
  169 using VIGRA_THREADING_NAMESPACE::atomic_short;
 
  170 using VIGRA_THREADING_NAMESPACE::atomic_ushort;
 
  171 using VIGRA_THREADING_NAMESPACE::atomic_int;
 
  172 using VIGRA_THREADING_NAMESPACE::atomic_uint;
 
  173 using VIGRA_THREADING_NAMESPACE::atomic_long;
 
  174 using VIGRA_THREADING_NAMESPACE::atomic_ulong;
 
  175 using VIGRA_THREADING_NAMESPACE::atomic_llong;
 
  176 using VIGRA_THREADING_NAMESPACE::atomic_ullong;
 
  179 using VIGRA_THREADING_NAMESPACE::atomic_wchar_t;
 
  180 using VIGRA_THREADING_NAMESPACE::atomic_int_least8_t;
 
  181 using VIGRA_THREADING_NAMESPACE::atomic_uint_least8_t;
 
  182 using VIGRA_THREADING_NAMESPACE::atomic_int_least16_t;
 
  183 using VIGRA_THREADING_NAMESPACE::atomic_uint_least16_t;
 
  184 using VIGRA_THREADING_NAMESPACE::atomic_int_least32_t;
 
  185 using VIGRA_THREADING_NAMESPACE::atomic_uint_least32_t;
 
  186 using VIGRA_THREADING_NAMESPACE::atomic_int_least64_t;
 
  187 using VIGRA_THREADING_NAMESPACE::atomic_uint_least64_t;
 
  188 using VIGRA_THREADING_NAMESPACE::atomic_int_fast8_t;
 
  189 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast8_t;
 
  190 using VIGRA_THREADING_NAMESPACE::atomic_int_fast16_t;
 
  191 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast16_t;
 
  192 using VIGRA_THREADING_NAMESPACE::atomic_int_fast32_t;
 
  193 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast32_t;
 
  194 using VIGRA_THREADING_NAMESPACE::atomic_int_fast64_t;
 
  195 using VIGRA_THREADING_NAMESPACE::atomic_uint_fast64_t;
 
  196 using VIGRA_THREADING_NAMESPACE::atomic_intptr_t;
 
  197 using VIGRA_THREADING_NAMESPACE::atomic_uintptr_t;
 
  198 using VIGRA_THREADING_NAMESPACE::atomic_size_t;
 
  199 using VIGRA_THREADING_NAMESPACE::atomic_ptrdiff_t;
 
  200 using VIGRA_THREADING_NAMESPACE::atomic_intmax_t;
 
  201 using VIGRA_THREADING_NAMESPACE::atomic_uintmax_t;
 
  203 using VIGRA_THREADING_NAMESPACE::memory_order;
 
  204 using VIGRA_THREADING_NAMESPACE::memory_order_relaxed;
 
  205 using VIGRA_THREADING_NAMESPACE::memory_order_release;
 
  206 using VIGRA_THREADING_NAMESPACE::memory_order_acquire;
 
  207 using VIGRA_THREADING_NAMESPACE::memory_order_consume;
 
  208 using VIGRA_THREADING_NAMESPACE::memory_order_acq_rel;
 
  209 using VIGRA_THREADING_NAMESPACE::memory_order_seq_cst;
 
  211 using VIGRA_THREADING_NAMESPACE::atomic_thread_fence;
 
  212 using VIGRA_THREADING_NAMESPACE::atomic_signal_fence;
 
  232 #else  // VIGRA_HAS_ATOMIC not defined 
  235     memory_order_relaxed,
 
  236     memory_order_release,
 
  237     memory_order_acquire,
 
  238     memory_order_consume,
 
  239     memory_order_acq_rel,
 
  245 template <
int SIZE=4>
 
  246 struct atomic_long_impl
 
  248     typedef LONG value_type;
 
  250     static long load(value_type 
const & val)
 
  257     static void store(value_type & dest, 
long val)
 
  263     static long add(value_type & dest, 
long val)
 
  265         return InterlockedExchangeAdd(&dest, val);
 
  268     static long sub(value_type & dest, 
long val)
 
  270         return InterlockedExchangeAdd(&dest, -val);
 
  273     static bool compare_exchange(value_type & dest, 
long & old_val, 
long new_val)
 
  275         long check_val = old_val;
 
  276         old_val = InterlockedCompareExchange(&dest, new_val, old_val);
 
  277         return check_val == old_val;
 
  282 struct atomic_long_impl<8>
 
  284     typedef LONGLONG value_type;
 
  286     static long load(value_type 
const & val)
 
  293     static void store(value_type & dest, 
long val)
 
  299     static long add(value_type & dest, 
long val)
 
  301         return InterlockedExchangeAdd64(&dest, val);
 
  304     static long sub(value_type & dest, 
long val)
 
  306         return InterlockedExchangeAdd64(&dest, -val);
 
  309     static bool compare_exchange(value_type & dest, 
long & old_val, 
long new_val)
 
  311         long check_val = old_val;
 
  312         old_val = InterlockedCompareExchange64(&dest, new_val, old_val);
 
  313         return check_val == old_val;
 
  319 template <
int SIZE=4>
 
  320 struct atomic_long_impl
 
  322     typedef long value_type;
 
  324     static long load(value_type 
const & val)
 
  327         __sync_synchronize();
 
  331     static void store(value_type & dest, 
long val)
 
  333         __sync_synchronize();
 
  337     static long add(value_type & dest, 
long val)
 
  339         return __sync_fetch_and_add(&dest, val);
 
  342     static long sub(value_type & dest, 
long val)
 
  344         return __sync_fetch_and_sub(&dest, val);
 
  347     static bool compare_exchange(value_type & dest, 
long & old_val, 
long new_val)
 
  349         long check_val = old_val;
 
  350         old_val = __sync_val_compare_and_swap(&dest, old_val, new_val);
 
  351         return check_val == old_val;
 
  359     typedef atomic_long_impl<sizeof(long)>::value_type value_type;
 
  361     atomic_long(
long v = 0)
 
  365     atomic_long & operator=(
long val)
 
  373         return load() == val;
 
  386     long load(memory_order = memory_order_seq_cst)
 const 
  388         return atomic_long_impl<sizeof(long)>::load(value_);
 
  391     void store(
long v, memory_order = memory_order_seq_cst)
 
  393         atomic_long_impl<sizeof(long)>::store(value_, v);
 
  396     long fetch_add(
long v, memory_order = memory_order_seq_cst)
 
  401     long fetch_sub(
long v, memory_order = memory_order_seq_cst)
 
  406     bool compare_exchange_strong(
long & old_val, 
long new_val, memory_order = memory_order_seq_cst)
 
  408         return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
 
  411     bool compare_exchange_weak(
long & old_val, 
long new_val, memory_order = memory_order_seq_cst)
 
  413         return atomic_long_impl<sizeof(long)>::compare_exchange(value_, old_val, new_val);
 
  419 #endif // VIGRA_HAS_ATOMIC 
  423 #undef VIGRA_THREADING_NAMESPACE 
  425 #endif // not VIGRA_SINGLE_THREADED 
  427 #endif // VIGRA_THREADING_HXX 
void sub(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
subtraction with enforced result type. 
Definition: fixedpoint.hxx:583
void add(FixedPoint< IntBits1, FracBits1 > l, FixedPoint< IntBits2, FracBits2 > r, FixedPoint< IntBits3, FracBits3 > &result)
addition with enforced result type. 
Definition: fixedpoint.hxx:561
bool operator==(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
equal 
Definition: fftw3.hxx:825