129 #ifndef VIGRA_MULTI_ARRAY_CHUNKED_HXX 
  130 #define VIGRA_MULTI_ARRAY_CHUNKED_HXX 
  135 #include "multi_fwd.hxx" 
  136 #include "multi_handle.hxx" 
  137 #include "multi_array.hxx" 
  138 #include "memory.hxx" 
  139 #include "metaprogramming.hxx" 
  140 #include "threading.hxx" 
  141 #include "compression.hxx" 
  144 # include "windows.h" 
  149 # include <sys/stat.h> 
  150 # include <sys/mman.h> 
  155 #ifdef VIGRA_CHECK_BOUNDS 
  156 #define VIGRA_ASSERT_INSIDE(diff) \ 
  157   vigra_precondition(this->isInside(diff), "Index out of bounds") 
  159 #define VIGRA_ASSERT_INSIDE(diff) 
  165     #define VIGRA_NO_SPARSE_FILE 
  171 void winErrorToException(std::string message = 
"")
 
  174     DWORD dw = GetLastError();
 
  177         FORMAT_MESSAGE_ALLOCATE_BUFFER |
 
  178         FORMAT_MESSAGE_FROM_SYSTEM |
 
  179         FORMAT_MESSAGE_IGNORE_INSERTS,
 
  182         MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
 
  186     message += (
char*)lpMsgBuf;
 
  189     throw std::runtime_error(message);
 
  193 std::string winTempFileName(std::string path = 
"")
 
  197         TCHAR default_path[MAX_PATH];
 
  198         if(!GetTempPath(MAX_PATH, default_path))
 
  199             winErrorToException(
"winTempFileName(): ");
 
  203     TCHAR name[MAX_PATH];
 
  204     if(!GetTempFileName(path.c_str(), TEXT(
"vigra"), 0, name))
 
  205         winErrorToException(
"winTempFileName(): ");
 
  207     return std::string(name);
 
  211 std::size_t winClusterSize()
 
  214     ::GetSystemInfo(&info);
 
  215     return info.dwAllocationGranularity;
 
  223 std::size_t mmap_alignment = winClusterSize();
 
  225 std::size_t mmap_alignment = sysconf(_SC_PAGE_SIZE);
 
  230 template <
unsigned int N, 
class T>
 
  231 class IteratorChunkHandle;
 
  235 template <
unsigned int N>
 
  238     template <
class T, 
int M>
 
  239     static void chunkIndex(TinyVector<T, M> 
const & p,
 
  240                            TinyVector<T, M> 
const & bits,
 
  241                            TinyVector<T, M> & index)
 
  243         typedef std::size_t UI;
 
  244         ChunkIndexing<N-1>::chunkIndex(p, bits, index);
 
  245         index[N-1] = (UI)p[N-1] >> bits[N-1];
 
  248     template <
class T, 
int M>
 
  249     static std::size_t chunkOffset(TinyVector<T, M> 
const & p,
 
  250                                    TinyVector<T, M> 
const & bits,
 
  251                                    TinyVector<T, M> 
const & strides)
 
  253         typedef std::size_t UI;
 
  254         return ChunkIndexing<N-1>::chunkOffset(p, bits, strides) +
 
  255                ((UI)p[N-1] >> bits[N-1]) * strides[N-1];
 
  258     template <
class T, 
int M>
 
  259     static std::size_t offsetInChunk(TinyVector<T, M> 
const & p,
 
  260                                      TinyVector<T, M> 
const & mask,
 
  261                                      TinyVector<T, M> 
const & strides)
 
  263         typedef std::size_t UI;
 
  264         return ChunkIndexing<N-1>::offsetInChunk(p, mask, strides) +
 
  265                ((UI)p[N-1] & (UI)mask[N-1]) * strides[N-1];
 
  270 struct ChunkIndexing<1>
 
  272     template <
class T, 
int M>
 
  273     static void chunkIndex(TinyVector<T, M> 
const & p,
 
  274                            TinyVector<T, M> 
const & bits,
 
  275                            TinyVector<T, M> & index)
 
  277         typedef std::size_t UI;
 
  278         index[0] = (UI)p[0] >> bits[0];
 
  281     template <
class T, 
int M>
 
  282     static std::size_t chunkOffset(TinyVector<T, M> 
const & p,
 
  283                                    TinyVector<T, M> 
const & bits,
 
  284                                    TinyVector<T, M> 
const & strides)
 
  286         typedef std::size_t UI;
 
  287         return ((UI)p[0] >> bits[0]) * strides[0];
 
  290     template <
class T, 
int M>
 
  291     static std::size_t offsetInChunk(TinyVector<T, M> 
const & p,
 
  292                                      TinyVector<T, M> 
const & mask,
 
  293                                      TinyVector<T, M> 
const & strides)
 
  295         typedef std::size_t UI;
 
  296         return ((UI)p[0] & (UI)mask[0]) * strides[0];
 
  300 template <
class T, 
int M>
 
  301 inline TinyVector<T, M>
 
  302 computeChunkArrayShape(TinyVector<T, M> shape,
 
  303                        TinyVector<T, M> 
const & bits,
 
  304                        TinyVector<T, M> 
const & mask)
 
  306     for(
int k=0; k<M; ++k)
 
  307         shape[k] = (shape[k] + mask[k]) >> bits[k];
 
  311 template <
class T, 
int M>
 
  313 defaultCacheSize(TinyVector<T, M> 
const & shape)
 
  316     for(
int k=0; k<M-1; ++k)
 
  317         for(
int j=k+1; j<M; ++j)
 
  318             res = std::max(res, shape[k]*shape[j]);
 
  324 template <
unsigned int N, 
class T>
 
  328     typedef typename MultiArrayShape<N>::type shape_type;
 
  329     typedef T value_type;
 
  337     ChunkBase(shape_type 
const & strides, pointer p = 0)
 
  342     typename MultiArrayShape<N>::type strides_;
 
  346 template <
unsigned int N, 
class T>
 
  347 class SharedChunkHandle
 
  350     typedef typename MultiArrayShape<N>::type shape_type;
 
  352     static const long chunk_asleep = -2;
 
  353     static const long chunk_uninitialized = -3;
 
  354     static const long chunk_locked = -4;
 
  355     static const long chunk_failed = -5;
 
  361         chunk_state_ = chunk_uninitialized;
 
  364     SharedChunkHandle(SharedChunkHandle 
const & rhs)
 
  365     : pointer_(rhs.pointer_)
 
  368         chunk_state_ = chunk_uninitialized;
 
  371     shape_type 
const & strides()
 const 
  373         return pointer_->strides_;
 
  376     ChunkBase<N, T> * pointer_;
 
  377     mutable threading::atomic_long chunk_state_;
 
  380     SharedChunkHandle & operator=(SharedChunkHandle 
const & rhs);
 
  383 template <
unsigned int N, 
class T>
 
  384 class ChunkedArrayBase
 
  387     enum ActualDimension{ actual_dimension = (N == 0) ? 1 : N };
 
  388     typedef typename MultiArrayShape<N>::type  shape_type;
 
  389     typedef T value_type;
 
  390     typedef value_type * pointer;
 
  391     typedef value_type & reference;
 
  392     typedef ChunkBase<N, T> Chunk;
 
  399     ChunkedArrayBase(shape_type 
const & shape, shape_type 
const & chunk_shape)
 
  401     , chunk_shape_(
prod(chunk_shape) > 0 ? chunk_shape : detail::ChunkShape<N, T>::defaultShape())
 
  404     virtual ~ChunkedArrayBase()
 
  407     virtual void unrefChunk(IteratorChunkHandle<N, T> * h) 
const = 0;
 
  409     virtual pointer chunkForIterator(shape_type 
const & point,
 
  410                                      shape_type & strides, shape_type & upper_bound,
 
  411                                      IteratorChunkHandle<N, T> * h) = 0;
 
  413     virtual pointer chunkForIterator(shape_type 
const & point,
 
  414                                      shape_type & strides, shape_type & upper_bound,
 
  415                                      IteratorChunkHandle<N, T> * h) 
const = 0;
 
  417     virtual std::string backend() 
const = 0;
 
  419     virtual shape_type chunkArrayShape() 
const = 0;
 
  421     virtual bool isReadOnly()
 const 
  431     shape_type 
const & shape()
 const 
  441     shape_type 
const & chunkShape()
 const 
  448         return chunk_shape_[d];
 
  451     bool isInside(shape_type 
const & p)
 const 
  453         for(
unsigned d=0; d<N; ++d)
 
  454             if(p[d] < 0 || p[d] >= shape_[d])
 
  459     shape_type shape_, chunk_shape_;
 
  462 template <
unsigned int N, 
class T>
 
  465 struct ChunkUnrefProxyBase
 
  467     virtual ~ChunkUnrefProxyBase() {}
 
  470 template <
unsigned int N, 
class T_MaybeConst>
 
  471 class MultiArrayView<N, T_MaybeConst, ChunkedArrayTag>
 
  472 : 
public ChunkedArrayBase<N, typename UnqualifiedType<T_MaybeConst>::type>
 
  475     enum ActualDimension { actual_dimension = (N==0) ? 1 : N };
 
  476     typedef typename UnqualifiedType<T_MaybeConst>::type     T;
 
  477     typedef T value_type;   
 
  478     typedef T_MaybeConst & reference;
 
  479     typedef const value_type &const_reference;
 
  480     typedef T_MaybeConst * pointer;
 
  481     typedef const value_type *const_pointer;
 
  483     typedef difference_type key_type;
 
  484     typedef difference_type size_type;
 
  485     typedef difference_type shape_type;
 
  487     typedef ChunkIterator<actual_dimension, T_MaybeConst>         chunk_iterator;
 
  488     typedef ChunkIterator<actual_dimension, T const>   chunk_const_iterator;
 
  489     typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T_MaybeConst>, T_MaybeConst&, T_MaybeConst*> iterator;
 
  490     typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T const>, T 
const &, T 
const *> const_iterator;
 
  491     typedef MultiArrayView<N, T_MaybeConst, ChunkedArrayTag> view_type;
 
  492     typedef MultiArrayView<N, T const, ChunkedArrayTag> const_view_type;
 
  493     typedef ChunkedArrayTag StrideTag;
 
  494     typedef ChunkBase<N, T> Chunk;
 
  496     typedef MultiArray<N, Chunk> ChunkHolder;
 
  499     : 
public ChunkUnrefProxyBase
 
  501         UnrefProxy(
int size, ChunkedArray<N, T> * array)
 
  509                 array_->unrefChunks(chunks_);
 
  512         ArrayVector<SharedChunkHandle<N, T> *> chunks_;
 
  513         ChunkedArray<N, T> * array_;
 
  516     virtual shape_type chunkArrayShape()
 const 
  518         return chunks_.shape();
 
  521     shape_type chunkStart(shape_type 
const & global_start)
 const 
  523         shape_type chunk_start(SkipInitialization);
 
  524         detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
 
  528     shape_type chunkStop(shape_type global_stop)
 const 
  530         global_stop -= shape_type(1);
 
  531         shape_type chunk_stop(SkipInitialization);
 
  532         detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
 
  533         chunk_stop += shape_type(1);
 
  537     virtual void unrefChunk(IteratorChunkHandle<N, T> *)
 const {}
 
  539     virtual T* chunkForIterator(shape_type 
const & point,
 
  540                                 shape_type & strides, shape_type & upper_bound,
 
  541                                 IteratorChunkHandle<N, T> * h)
 
  543         return const_cast<MultiArrayView 
const *
>(
this)->chunkForIterator(point, strides, upper_bound, h);
 
  546     virtual T* chunkForIterator(shape_type 
const & point,
 
  547                                 shape_type & strides, shape_type & upper_bound,
 
  548                                 IteratorChunkHandle<N, T> * h)
 const 
  550         shape_type global_point = point + h->offset_;
 
  552         if(!this->isInside(global_point))
 
  554             upper_bound = point + this->chunk_shape_;
 
  558         global_point += offset_;
 
  559         shape_type coffset = offset_ + h->offset_;
 
  561         shape_type chunkIndex = chunkStart(global_point);
 
  562         Chunk 
const * chunk = &chunks_[chunkIndex];
 
  563         strides = chunk->strides_;
 
  564         upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - coffset;
 
  565         std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
 
  566         return const_cast<T*
>(chunk->pointer_ + offset);
 
  569     virtual std::string backend()
 const 
  571         return "MultiArrayView<ChunkedArrayTag>";
 
  575     : ChunkedArrayBase<N, T>()
 
  578     MultiArrayView(shape_type 
const & shape, shape_type 
const & chunk_shape)
 
  579     : ChunkedArrayBase<N, T>(shape, chunk_shape)
 
  582     MultiArrayView & operator=(MultiArrayView 
const & rhs)
 
  588                 ChunkedArrayBase<N, T>::operator=(rhs);
 
  589                 chunks_ = rhs.chunks_;
 
  590                 offset_ = rhs.offset_;
 
  597                 vigra_precondition(this->shape() == rhs.shape(),
 
  598                                    "MultiArrayView::operator=(): shape mismatch.");
 
  599                 iterator i = begin(), ie = end();
 
  600                 const_iterator j = rhs.begin();
 
  601                 for(; i != ie; ++i, ++j)
 
  608     #define VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(op) \ 
  609     template<class U, class C1> \ 
  610     MultiArrayView & operator op(MultiArrayView<N, U, C1> const & rhs) \ 
  612         vigra_precondition(this->shape() == rhs.shape(), \ 
  613                            "MultiArrayView::operator" #op "(): shape mismatch."); \ 
  614         iterator i = begin(), ie = end(); \ 
  615         typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin(); \ 
  616         for(; i != ie; ++i, ++j) \ 
  617             *i op detail::RequiresExplicitCast<value_type>::cast(*j); \ 
  621     MultiArrayView & operator op(value_type const & v) \ 
  625             iterator i = begin(), ie = end(); \ 
  626             for(; i != ie; ++i) \ 
  632     VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(=)
 
  633     VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(+=)
 
  634     VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(-=)
 
  635     VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(*=)
 
  636     VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(/=)
 
  638     #undef VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN 
  687     reference operator[](shape_type point)
 
  689         VIGRA_ASSERT_INSIDE(point);
 
  691         Chunk * chunk = chunks_.data() +
 
  692                         detail::ChunkIndexing<N>::chunkOffset(point, bits_, chunks_.stride());
 
  693         return *(chunk->pointer_ +
 
  694                  detail::ChunkIndexing<N>::offsetInChunk(point, mask_, chunk->strides_));
 
  697     const_reference operator[](shape_type 
const & point)
 const 
  699         return const_cast<MultiArrayView *
>(
this)->
operator[](point);
 
  703     MultiArrayView <N-M, T, ChunkedArrayTag>
 
  704     operator[](
const TinyVector<MultiArrayIndex, M> &d)
 const 
  709     reference operator[](difference_type_1 d)
 
  711         return operator[](scanOrderIndexToCoordinate(d));
 
  714     const_reference operator[](difference_type_1 d)
 const 
  716         return operator[](scanOrderIndexToCoordinate(d));
 
  719     difference_type scanOrderIndexToCoordinate(difference_type_1 d)
 const 
  721         difference_type coord(SkipInitialization);
 
  722         detail::ScanOrderToCoordinate<actual_dimension>::exec(d, this->shape_, coord);
 
  728     difference_type_1 coordinateToScanOrderIndex(
const difference_type &d)
 const 
  730         return detail::CoordinateToScanOrder<actual_dimension>::exec(this->shape_, d);
 
  818     MultiArrayView & init(
const U & init)
 
  820         return operator=(init);
 
  823     template <
class U, 
class CN>
 
  824     void copy(
const MultiArrayView <N, U, CN>& rhs)
 
  829     template <
class T2, 
class C2>
 
  830     void swapData(MultiArrayView <N, T2, C2> rhs)
 
  834         vigra_precondition(this->shape() == rhs.shape(),
 
  835                            "MultiArrayView::swapData(): shape mismatch.");
 
  836         iterator i = begin(), ie = end();
 
  837         typename MultiArrayView<N, T2, C2>::iterator j = rhs.begin();
 
  838         for(; i != ie; ++i, ++j)
 
  842     bool isUnstrided(
unsigned int dimension = N-1)
 const 
  844         if(chunks_.size() > 1)
 
  846         difference_type s = vigra::detail::defaultStride<actual_dimension>(this->shape());
 
  847         for(
unsigned int k = 0; k <= dimension; ++k)
 
  848             if(chunks_.data()->strides_[k] != s[k])
 
  853     MultiArrayView<N-1, value_type, ChunkedArrayTag>
 
  856         MultiArrayView<N-1, value_type, ChunkedArrayTag> res(this->shape_.dropIndex(m), this->chunk_shape_.dropIndex(m));
 
  857         res.offset_ = offset_.dropIndex(m);
 
  858         res.bits_   = bits_.dropIndex(m);
 
  859         res.mask_   = mask_.dropIndex(m);
 
  860         res.chunks_.reshape(chunks_.shape().dropIndex(m));
 
  863         typedef std::size_t UI;
 
  864         UI start = offset_[m] + d;
 
  865         UI chunk_start = start >> bits_[m];
 
  866         UI startInChunk = start - chunk_start * this->chunk_shape_[m];
 
  868         MultiArrayView<N-1, Chunk> view(chunks_.bindAt(m, chunk_start));
 
  869         MultiCoordinateIterator<N-1> i(view.shape()),
 
  870                                      end(i.getEndIterator());
 
  873             res.chunks_[*i].pointer_ = view[*i].pointer_ + startInChunk*view[*i].strides_[m];
 
  874             res.chunks_[*i].strides_ = view[*i].strides_.dropIndex(m);
 
  880     template <
unsigned int M>
 
  881     MultiArrayView <N-1, value_type, ChunkedArrayTag>
 
  882     bind (difference_type_1 d)
 const 
  887     MultiArrayView <N-1, value_type, ChunkedArrayTag>
 
  888     bindOuter (difference_type_1 d)
 const 
  890         return bindAt(N-1, d);
 
  893     template <
int M, 
class Index>
 
  894     MultiArrayView <N-M, value_type, ChunkedArrayTag>
 
  895     bindOuter(
const TinyVector <Index, M> &d)
 const 
  897         return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
 
  900     template <
class Index>
 
  901     MultiArrayView <N-1, value_type, ChunkedArrayTag>
 
  902     bindOuter(
const TinyVector <Index, 1> &d)
 const 
  904         return bindAt(N-1, d[0]);
 
  907     MultiArrayView <N-1, value_type, ChunkedArrayTag>
 
  908     bindInner (difference_type_1 d)
 const 
  913     template <
int M, 
class Index>
 
  914     MultiArrayView <N-M, value_type, ChunkedArrayTag>
 
  915     bindInner(
const TinyVector <Index, M> &d)
 const 
  917         return bindAt(0, d[0]).bindInner(d.dropIndex(0));
 
  920     template <
class Index>
 
  921     MultiArrayView <N-1, value_type, ChunkedArrayTag>
 
  922     bindInner(
const TinyVector <Index, 1> &d)
 const 
  924         return bindAt(0, d[0]);
 
  953     checkSubarrayBounds(shape_type 
const & start, shape_type 
const & stop,
 
  954                         std::string message)
 const 
  956         message += 
": subarray out of bounds.";
 
  963     MultiArrayView<N, value_type, ChunkedArrayTag>
 
  964     subarray(shape_type start, shape_type stop)
 
  966         checkSubarrayBounds(start, stop, 
"MultiArrayView<N-1, T, ChunkedArrayTag>::subarray()");
 
  969         shape_type chunk_start(chunkStart(start));
 
  971         MultiArrayView<N, value_type, ChunkedArrayTag> view(stop-start, this->chunk_shape_);
 
  972         view.chunks_ = chunks_.subarray(chunk_start, chunkStop(stop));
 
  973         view.offset_ = start - chunk_start * this->chunk_shape_;
 
  976         view.unref_ = unref_;
 
  995     MultiArrayView <N, value_type, ChunkedArrayTag>
 
 1001     MultiArrayView <N, value_type, ChunkedArrayTag>
 
 1002     transpose(
const difference_type &permutation)
 const 
 1004         MultiArrayView<N, value_type, ChunkedArrayTag>
 
 1005             view(vigra::transpose(this->shape_, permutation), vigra::transpose(this->chunk_shape_, permutation));
 
 1006         view.chunks_        = chunks_.transpose(permutation); 
 
 1010         view.unref_         = unref_;
 
 1012                                                 iend = view.chunks_.end();
 
 1013         for(; i != iend; ++i)
 
 1014             i->strides_ = vigra::transpose(i->strides_, permutation);
 
 1048     template <
class U, 
class C1>
 
 1049     bool operator==(MultiArrayView<N, U, C1> 
const & rhs)
 const 
 1051         if(this->shape() != rhs.shape())
 
 1053         const_iterator i = begin(), ie = end();
 
 1054         typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin();
 
 1055         for(; i != ie; ++i, ++j)
 
 1061     template <
class U, 
class C1>
 
 1062     bool operator!=(MultiArrayView<N, U, C1> 
const & rhs)
 const 
 1157     bool hasData ()
 const 
 1159         return chunks_.hasData();
 
 1164         return createCoupledIterator(*
this);
 
 1169         return begin().getEndIterator();
 
 1172     const_iterator cbegin()
 const 
 1174         return createCoupledIterator(const_cast<MultiArrayView const &>(*
this));
 
 1177     const_iterator cend()
 const 
 1179         return cbegin().getEndIterator();
 
 1182     const_iterator begin()
 const 
 1184         return createCoupledIterator(*
this);
 
 1187     const_iterator end()
 const 
 1189         return begin().getEndIterator();
 
 1192     chunk_iterator chunk_begin(shape_type 
const & start, shape_type 
const & stop)
 
 1194         checkSubarrayBounds(start, stop, 
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
 
 1195         return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 1198     chunk_iterator chunk_end(shape_type 
const & start, shape_type 
const & stop)
 
 1200         return chunk_begin(start, stop).getEndIterator();
 
 1203     chunk_const_iterator chunk_begin(shape_type 
const & start, shape_type 
const & stop)
 const 
 1205         checkSubarrayBounds(start, stop, 
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
 
 1206         return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 1209     chunk_const_iterator chunk_end(shape_type 
const & start, shape_type 
const & stop)
 const 
 1211         return chunk_begin(start, stop).getEndIterator();
 
 1214     chunk_const_iterator chunk_cbegin(shape_type 
const & start, shape_type 
const & stop)
 const 
 1216         checkSubarrayBounds(start, stop, 
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_cbegin()");
 
 1217         return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 1220     chunk_const_iterator chunk_cend(shape_type 
const & start, shape_type 
const & stop)
 const 
 1222         return chunk_cbegin(start, stop).getEndIterator();
 
 1230     MultiArray<N, Chunk> chunks_;
 
 1231     shape_type offset_, bits_, mask_;
 
 1232     VIGRA_SHARED_PTR<ChunkUnrefProxyBase> unref_;
 
 1235 template <
unsigned int N, 
class T>
 
 1236 typename MultiArrayView<N, T, ChunkedArrayTag>::iterator
 
 1237 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag> & m)
 
 1239     typedef typename MultiArrayView<N, T, ChunkedArrayTag>::iterator    IteratorType;
 
 1240     typedef typename IteratorType::handle_type           P1;
 
 1241     typedef typename P1::base_type                       P0;
 
 1243     return IteratorType(P1(m,
 
 1247 template <
unsigned int N, 
class T>
 
 1248 typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator
 
 1249 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag> 
const & m)
 
 1251     typedef typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator    IteratorType;
 
 1252     typedef typename IteratorType::handle_type           P1;
 
 1253     typedef typename P1::base_type                       P0;
 
 1255     return IteratorType(P1(m,
 
 1275     , compression_method(DEFAULT_COMPRESSION)
 
 1314         compression_method = v;
 
 1325     CompressionMethod compression_method;
 
 1522 template <
unsigned int N, 
class T>
 
 1524 : 
public ChunkedArrayBase<N, T>
 
 1600     typedef ChunkedArrayBase<N, T> base_type;
 
 1601     typedef typename MultiArrayShape<N>::type  shape_type;
 
 1602     typedef typename shape_type::value_type  difference_type_1;
 
 1603     typedef T value_type;
 
 1604     typedef value_type * pointer;
 
 1605     typedef value_type 
const * const_pointer;
 
 1606     typedef value_type & reference;
 
 1607     typedef value_type 
const & const_reference;
 
 1608     typedef ChunkIterator<N, T>         chunk_iterator;
 
 1609     typedef ChunkIterator<N, T const>   chunk_const_iterator;
 
 1610     typedef StridedScanOrderIterator<N, ChunkedMemory<T>, reference, pointer>   iterator;
 
 1611     typedef StridedScanOrderIterator<N, ChunkedMemory<T const>, const_reference, const_pointer>   const_iterator;
 
 1612     typedef SharedChunkHandle<N, T> Handle;
 
 1613     typedef ChunkBase<N, T> Chunk;
 
 1614     typedef MultiArrayView<N, T, ChunkedArrayTag>                   view_type;
 
 1615     typedef MultiArrayView<N, T const, ChunkedArrayTag>             const_view_type;
 
 1616     typedef std::queue<Handle*> CacheType;
 
 1618     static const long chunk_asleep = Handle::chunk_asleep;
 
 1619     static const long chunk_uninitialized = Handle::chunk_uninitialized;
 
 1620     static const long chunk_locked = Handle::chunk_locked;
 
 1621     static const long chunk_failed = Handle::chunk_failed;
 
 1624     explicit ChunkedArray(shape_type 
const & shape,
 
 1625                           shape_type 
const & chunk_shape = shape_type(),
 
 1626                           ChunkedArrayOptions 
const & options = ChunkedArrayOptions())
 
 1627     : ChunkedArrayBase<N, T>(shape, chunk_shape)
 
 1628     , bits_(initBitMask(this->chunk_shape_))
 
 1629     , mask_(this->chunk_shape_ -shape_type(1))
 
 1630     , cache_max_size_(options.cache_max)
 
 1631     , chunk_lock_(new threading::mutex())
 
 1632     , fill_value_(T(options.fill_value))
 
 1633     , fill_scalar_(options.fill_value)
 
 1634     , handle_array_(detail::computeChunkArrayShape(shape, bits_, mask_))
 
 1636     , overhead_bytes_(handle_array_.size()*sizeof(Handle))
 
 1638         fill_value_chunk_.pointer_ = &fill_value_;
 
 1639         fill_value_handle_.pointer_ = &fill_value_chunk_;
 
 1640         fill_value_handle_.chunk_state_.store(1);
 
 1644     static shape_type initBitMask(shape_type 
const & chunk_shape)
 
 1647         for(
unsigned int k=0; k<N; ++k)
 
 1651                                "ChunkedArray: chunk_shape elements must be powers of 2.");
 
 1657     virtual ~ChunkedArray()
 
 1666         return cache_.size();
 
 1683         return overhead_bytes_;
 
 1690         return handle_array_.shape();
 
 1693     virtual std::size_t dataBytes(Chunk * c) 
const = 0;
 
 1699         return prod(this->chunk_shape_)*
sizeof(T);
 
 1704     virtual std::size_t overheadBytesPerChunk() 
const = 0;
 
 1710         shape_type chunk_start(SkipInitialization);
 
 1711         detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
 
 1724         global_stop -= shape_type(1);
 
 1725         shape_type chunk_stop(SkipInitialization);
 
 1726         detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
 
 1727         chunk_stop += shape_type(1);
 
 1738         return min(this->chunk_shape_,
 
 1739                    this->shape_ - chunk_index*this->chunk_shape_);
 
 1742     using base_type::chunkShape;
 
 1750     shape_type 
const & chunkShape() 
const;
 
 1754     shape_type 
const & shape() 
const;
 
 1762     bool isInside(shape_type 
const & p) 
const;
 
 1766     std::string backend() 
const;
 
 1771     checkSubarrayBounds(shape_type 
const & start, shape_type 
const & stop,
 
 1772                         std::string message)
 const 
 1774         message += 
": subarray out of bounds.";
 
 1775         vigra_precondition(
allLessEqual(shape_type(), start) &&
 
 1783     template <
class U, 
class C1>
 
 1786         if(this->shape() != rhs.shape())
 
 1788         const_iterator i = begin(), ie = end();
 
 1790         for(; i != ie; ++i, ++j)
 
 1798     template <
class U, 
class C1>
 
 1805     virtual pointer loadChunk(Chunk ** chunk, shape_type 
const & chunk_index) = 0;
 
 1810     virtual bool unloadHandle(Handle * handle, 
bool destroy = 
false)
 
 1812         if(handle == &fill_value_handle_)
 
 1814         return unloadChunk(handle->pointer_, destroy);
 
 1817     virtual bool unloadChunk(Chunk * chunk, 
bool destroy = 
false) = 0;
 
 1819     Handle * lookupHandle(shape_type 
const & index)
 
 1821         return &handle_array_[index];
 
 1826     virtual void unrefChunk(IteratorChunkHandle<N, T> * h)
 const 
 1828         unrefChunk(h->chunk_);
 
 1833     void unrefChunk(Handle * chunk)
 const 
 1837             long rc = chunk->chunk_state_.fetch_sub(1);
 
 1838             ignore_argument(rc);
 
 1839           #ifdef VIGRA_CHECK_BOUNDS 
 1840             vigra_invariant(rc >= 0,
 
 1841                             "ChunkedArray::unrefChunk(): chunk refcount got negative!");
 
 1847     void unrefChunks(ArrayVector<Handle*> 
const & chunks)
 
 1849         for(
unsigned int k=0; k<chunks.size(); ++k)
 
 1850             unrefChunk(chunks[k]);
 
 1852         if(cacheMaxSize() > 0)
 
 1854             threading::lock_guard<threading::mutex> guard(*chunk_lock_);
 
 1855             cleanCache(cache_.size());
 
 1861     long acquireRef(Handle * handle)
 const 
 1869         long rc = handle->chunk_state_.load(threading::memory_order_acquire);
 
 1874                 if(handle->chunk_state_.compare_exchange_weak(rc, rc+1, threading::memory_order_seq_cst))
 
 1881                 if(rc == chunk_failed)
 
 1883                     vigra_precondition(
false,
 
 1884                      "ChunkedArray::acquireRef() attempt to access failed chunk.");
 
 1886                 else if(rc == chunk_locked)
 
 1889                     threading::this_thread::yield();
 
 1890                     rc = handle->chunk_state_.load(threading::memory_order_acquire);
 
 1892                 else if(handle->chunk_state_.compare_exchange_weak(rc, chunk_locked, threading::memory_order_seq_cst))
 
 1901     getChunk(Handle * handle, 
bool isConst, 
bool insertInCache, shape_type 
const & chunk_index)
 const 
 1903         ChunkedArray * 
self = 
const_cast<ChunkedArray *
>(
this);
 
 1905         long rc = acquireRef(handle);
 
 1907             return handle->pointer_->pointer_;
 
 1909         threading::lock_guard<threading::mutex> guard(*chunk_lock_);
 
 1912             T * p = 
self->loadChunk(&handle->pointer_, chunk_index);
 
 1913             Chunk * chunk = handle->pointer_;
 
 1914             if(!isConst && rc == chunk_uninitialized)
 
 1915                 std::fill(p, p + 
prod(chunkShape(chunk_index)), this->fill_value_);
 
 1917             self->data_bytes_ += dataBytes(chunk);
 
 1919             if(cacheMaxSize() > 0 && insertInCache)
 
 1922                 self->cache_.push(handle);
 
 1926                 self->cleanCache(2);
 
 1928             handle->chunk_state_.store(1, threading::memory_order_release);
 
 1933             handle->chunk_state_.store(chunk_failed);
 
 1940     chunkForIteratorImpl(shape_type 
const & point,
 
 1941                          shape_type & strides, shape_type & upper_bound,
 
 1942                          IteratorChunkHandle<N, T> * h,
 
 1945         ChunkedArray * 
self = 
const_cast<ChunkedArray *
>(
this);
 
 1947         unrefChunk(h->chunk_);
 
 1950         shape_type global_point = point + h->offset_;
 
 1952         if(!this->isInside(global_point))
 
 1954             upper_bound = point + this->chunk_shape_;
 
 1958         shape_type chunkIndex(chunkStart(global_point));
 
 1960         bool insertInCache = 
true;
 
 1961         Handle * handle = 
self->lookupHandle(chunkIndex);
 
 1962         if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
 
 1964             handle = &
self->fill_value_handle_;
 
 1965             insertInCache = 
false;
 
 1968         pointer p = getChunk(handle, isConst, insertInCache, chunkIndex);
 
 1969         strides = handle->strides();
 
 1970         upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - h->offset_;
 
 1971         std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
 
 1978     virtual pointer chunkForIterator(shape_type 
const & point,
 
 1979                                      shape_type & strides, shape_type & upper_bound,
 
 1980                                      IteratorChunkHandle<N, T> * h)
 
 1982         return chunkForIteratorImpl(point, strides, upper_bound, h, 
false);
 
 1985     virtual pointer chunkForIterator(shape_type 
const & point,
 
 1986                                      shape_type & strides, shape_type & upper_bound,
 
 1987                                      IteratorChunkHandle<N, T> * h)
 const 
 1989         return chunkForIteratorImpl(point, strides, upper_bound, h, 
true);
 
 1994     long releaseChunk(Handle * handle, 
bool destroy = 
false)
 
 1997         bool mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
 
 1998         if(!mayUnload && destroy)
 
 2001             mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
 
 2008                 vigra_invariant(handle != &fill_value_handle_,
 
 2009                    "ChunkedArray::releaseChunk(): attempt to release fill_value_handle_.");
 
 2010                 Chunk * chunk = handle->pointer_;
 
 2011                 this->data_bytes_ -= dataBytes(chunk);
 
 2012                 int didDestroy = unloadChunk(chunk, destroy);
 
 2013                 this->data_bytes_ += dataBytes(chunk);
 
 2015                     handle->chunk_state_.store(chunk_uninitialized);
 
 2017                     handle->chunk_state_.store(chunk_asleep);
 
 2021                 handle->chunk_state_.store(chunk_failed);
 
 2029     void cleanCache(
int how_many = -1)
 
 2032             how_many = cache_.size();
 
 2033         for(; cache_.size() > cacheMaxSize() && how_many > 0; --how_many)
 
 2035             Handle * handle = cache_.front();
 
 2037             long rc = releaseChunk(handle);
 
 2039                 cache_.push(handle);
 
 2051     void releaseChunks(shape_type 
const & start, shape_type 
const & stop, 
bool destroy = 
false)
 
 2053         checkSubarrayBounds(start, stop, 
"ChunkedArray::releaseChunks()");
 
 2056                                    end(i.getEndIterator());
 
 2057         for(; i != end; ++i)
 
 2059             shape_type chunkOffset = *i * this->chunk_shape_;
 
 2061                !
allLessEqual(min(chunkOffset+this->chunk_shape_, this->shape()), stop))
 
 2067             Handle * handle = this->lookupHandle(*i);
 
 2068             threading::lock_guard<threading::mutex> guard(*chunk_lock_);
 
 2069             releaseChunk(handle, destroy);
 
 2073         threading::lock_guard<threading::mutex> guard(*chunk_lock_);
 
 2074         int cache_size = cache_.size();
 
 2075         for(
int k=0; k < cache_size; ++k)
 
 2077             Handle * handle = cache_.front();
 
 2079             if(handle->chunk_state_.load() >= 0)
 
 2080                 cache_.push(handle);
 
 2090     template <
class U, 
class Str
ide>
 
 2095         shape_type stop   = start + subarray.shape();
 
 2097         checkSubarrayBounds(start, stop, 
"ChunkedArray::checkoutSubarray()");
 
 2099         chunk_const_iterator i = chunk_cbegin(start, stop);
 
 2100         for(; i.isValid(); ++i)
 
 2102             subarray.subarray(i.chunkStart()-start, i.chunkStop()-start) = *i;
 
 2112     template <
class U, 
class Str
ide>
 
 2117         shape_type stop   = start + subarray.shape();
 
 2119         vigra_precondition(!this->isReadOnly(),
 
 2120                            "ChunkedArray::commitSubarray(): array is read-only.");
 
 2121         checkSubarrayBounds(start, stop, 
"ChunkedArray::commitSubarray()");
 
 2123         chunk_iterator i = chunk_begin(start, stop);
 
 2124         for(; i.isValid(); ++i)
 
 2126             *i = subarray.subarray(i.chunkStart()-start, i.chunkStop()-start);
 
 2131     template <
class View>
 
 2132     void subarrayImpl(shape_type 
const & start, shape_type 
const & stop,
 
 2136         vigra_precondition(isConst || !this->isReadOnly(),
 
 2137                            "ChunkedArray::subarray(): array is read-only.");
 
 2138         checkSubarrayBounds(start, stop, 
"ChunkedArray::subarray()");
 
 2139         shape_type chunk_start(chunkStart(start)), chunk_stop(chunkStop(stop));
 
 2141         view.shape_ = stop-start;
 
 2142         view.chunk_shape_ = this->chunk_shape_;
 
 2143         view.chunks_.reshape(chunk_stop-chunk_start);
 
 2144         view.offset_ = start - chunk_start * this->chunk_shape_;
 
 2148         typedef typename View::UnrefProxy Unref;
 
 2150         Unref * unref = 
new Unref(view.chunks_.size(), 
self);
 
 2151         view.unref_ = VIGRA_SHARED_PTR<Unref>(unref);
 
 2154                                    end(i.getEndIterator());
 
 2155         for(; i != end; ++i)
 
 2157             Handle * handle = 
self->lookupHandle(*i);
 
 2159             if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
 
 2160                 handle = &self->fill_value_handle_;
 
 2164             pointer p = getChunk(handle, isConst, 
true, *i);
 
 2166             ChunkBase<N, T> * mini_chunk = &view.chunks_[*i - chunk_start];
 
 2167             mini_chunk->pointer_ = p;
 
 2168             mini_chunk->strides_ = handle->strides();
 
 2169             unref->chunks_[i.scanOrderIndex()] = handle;
 
 2180     subarray(shape_type 
const & start, shape_type 
const & stop)
 
 2183         subarrayImpl(start, stop, view, 
false);
 
 2194     subarray(shape_type 
const & start, shape_type 
const & stop)
 const 
 2196         const_view_type view;
 
 2197         subarrayImpl(start, stop, view, 
true);
 
 2210         const_view_type view;
 
 2211         subarrayImpl(start, stop, view, 
true);
 
 2221     value_type 
getItem(shape_type 
const & point)
 const 
 2223         vigra_precondition(this->isInside(point),
 
 2224             "ChunkedArray::getItem(): index out of bounds.");
 
 2227         shape_type chunk_index(chunkStart(point));
 
 2228         Handle * handle = 
self->lookupHandle(chunk_index);
 
 2229         if(handle->chunk_state_.load() == chunk_uninitialized)
 
 2231         pointer p = 
self->getChunk(handle, 
true, 
false, chunk_index);
 
 2232         value_type res = *(p +
 
 2233                            detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides()));
 
 2234         self->unrefChunk(handle);
 
 2244     void setItem(shape_type 
const & point, value_type 
const & v)
 
 2246         vigra_precondition(!this->isReadOnly(),
 
 2247             "ChunkedArray::setItem(): array is read-only.");
 
 2248         vigra_precondition(this->isInside(point),
 
 2249             "ChunkedArray::setItem(): index out of bounds.");
 
 2251         shape_type chunk_index(chunkStart(point));
 
 2252         Handle * handle = lookupHandle(chunk_index);
 
 2253         pointer p = getChunk(handle, 
false, 
false, chunk_index);
 
 2254         *(p + detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides())) = v;
 
 2267         shape_type start, stop(this->shape());
 
 2269         stop[dim] = index+1;
 
 2270         return subarray(start, stop).bindAt(dim, 0);
 
 2279     template <
unsigned int M>
 
 2281     bind (difference_type_1 index)
 const 
 2283         return bindAt(M, index);
 
 2295         return bindAt(N-1, index);
 
 2303     template <
int M, 
class Index>
 
 2307         return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
 
 2311     template <
class Index>
 
 2315         return bindAt(N-1, d[0]);
 
 2324     MultiArrayView <N-1, T, ChunkedArrayTag>
 
 2327         return bindAt(0, index);
 
 2335     template <
int M, 
class Index>
 
 2339         return bindAt(0, d[0]).bindInner(d.dropIndex(0));
 
 2343     template <
class Index>
 
 2347         return bindAt(0, d[0]);
 
 2359         if(cache_max_size_ < 0)
 
 2360             const_cast<int &
>(cache_max_size_) = detail::defaultCacheSize(this->chunkArrayShape());
 
 2361         return cache_max_size_;
 
 2371         cache_max_size_ = c;
 
 2372         if(c < cache_.size())
 
 2374             threading::lock_guard<threading::mutex> guard(*chunk_lock_);
 
 2383         return createCoupledIterator(*
this);
 
 2391         return begin().getEndIterator();
 
 2399         return createCoupledIterator(const_cast<ChunkedArray const &>(*
this));
 
 2407         return cbegin().getEndIterator();
 
 2415         return createCoupledIterator(*
this);
 
 2423         return begin().getEndIterator();
 
 2428     chunk_iterator 
chunk_begin(shape_type 
const & start, shape_type 
const & stop)
 
 2430         checkSubarrayBounds(start, stop, 
"ChunkedArray::chunk_begin()");
 
 2431         return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 2437     chunk_iterator 
chunk_end(shape_type 
const & start, shape_type 
const & stop)
 
 2439         return chunk_begin(start, stop).getEndIterator();
 
 2445     chunk_const_iterator 
chunk_begin(shape_type 
const & start, shape_type 
const & stop)
 const 
 2447         checkSubarrayBounds(start, stop, 
"ChunkedArray::chunk_begin()");
 
 2448         return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 2454     chunk_const_iterator 
chunk_end(shape_type 
const & start, shape_type 
const & stop)
 const 
 2456         return chunk_begin(start, stop).getEndIterator();
 
 2462     chunk_const_iterator 
chunk_cbegin(shape_type 
const & start, shape_type 
const & stop)
 const 
 2464         checkSubarrayBounds(start, stop, 
"ChunkedArray::chunk_cbegin()");
 
 2465         return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
 
 2471     chunk_const_iterator 
chunk_cend(shape_type 
const & start, shape_type 
const & stop)
 const 
 2473         return chunk_cbegin(start, stop).getEndIterator();
 
 2476     shape_type bits_, mask_;
 
 2477     int cache_max_size_;
 
 2478     VIGRA_SHARED_PTR<threading::mutex> chunk_lock_;
 
 2480     Chunk fill_value_chunk_;
 
 2481     Handle fill_value_handle_;
 
 2482     value_type fill_value_;
 
 2483     double fill_scalar_;
 
 2485     std::size_t data_bytes_, overhead_bytes_;
 
 2490 template <
unsigned int N, 
class T>
 
 2491 typename ChunkedArray<N, T>::iterator
 
 2495     typedef typename IteratorType::handle_type           P1;
 
 2496     typedef typename P1::base_type                       P0;
 
 2498     return IteratorType(P1(m,
 
 2502 template <
unsigned int N, 
class T>
 
 2503 typename ChunkedArray<N, T>::const_iterator
 
 2504 createCoupledIterator(ChunkedArray<N, T> 
const & m)
 
 2506     typedef typename ChunkedArray<N, T>::const_iterator  IteratorType;
 
 2507     typedef typename IteratorType::handle_type           P1;
 
 2508     typedef typename P1::base_type                       P0;
 
 2510     return IteratorType(P1(m,
 
 2523 template <
unsigned int N, 
class T, 
class Alloc = std::allocator<T> >
 
 2545     typedef typename ChunkedArray<N, T>::Chunk       Chunk;
 
 2547     static shape_type computeChunkShape(shape_type s)
 
 2549         for(
unsigned k=0; k<N; ++k)
 
 2554     using Storage::subarray;
 
 2555     using Storage::bindOuter;
 
 2556     using Storage::bindInner;
 
 2557     using Storage::bind;
 
 2558     using Storage::bindAt;
 
 2559     using Storage::isInside;
 
 2560     using Storage::shape;
 
 2561     using Storage::size;
 
 2562     using Storage::begin;
 
 2565 #ifndef DOXYGEN  // doxygen doesn't understand this 
 2566     using Storage::operator==;
 
 2567     using Storage::operator!=;
 
 2575                               Alloc 
const & alloc = Alloc())
 
 2576     : 
ChunkedArray<N, T>(shape, computeChunkShape(shape), options.cacheMax(0)),
 
 2577       Storage(shape, this->fill_value_, alloc),
 
 2578       upper_bound_(shape),
 
 2579       chunk_(detail::defaultStride(shape), this->data())
 
 2581         this->handle_array_[0].pointer_ = &chunk_;
 
 2582         this->handle_array_[0].chunk_state_.store(1);
 
 2583         this->data_bytes_ = size()*
sizeof(T);
 
 2584         this->overhead_bytes_ = overheadBytesPerChunk();
 
 2590       upper_bound_(rhs.upper_bound_),
 
 2591       chunk_(detail::defaultStride(shape), this->data())
 
 2593         this->handle_array_[0].pointer_ = &chunk_;
 
 2594         this->handle_array_[0].chunk_state_.store(1);
 
 2597     ChunkedArrayFull & operator=(ChunkedArrayFull 
const & rhs)
 
 2601             ChunkedArray<N, T>::operator=(rhs);
 
 2602             Storage::operator=(rhs);
 
 2603             upper_bound_ = rhs.upper_bound_;
 
 2613         return shape_type(1);
 
 2616     virtual pointer loadChunk(ChunkBase<N, T> **, shape_type 
const &)
 
 2618         return this->data();
 
 2621     virtual bool unloadChunk(ChunkBase<N, T> *, 
bool )
 
 2626     virtual std::size_t dataBytes(Chunk *)
 const 
 2628         return prod(this->shape());
 
 2633         return sizeof(Chunk) + 
sizeof(SharedChunkHandle<N, T>);
 
 2636     virtual pointer chunkForIterator(shape_type 
const & point,
 
 2637                                      shape_type & strides, shape_type & upper_bound,
 
 2638                                      IteratorChunkHandle<N, T> * h)
 const 
 2640         shape_type global_point = point + h->offset_;
 
 2642         if(!this->isInside(global_point))
 
 2644             upper_bound = point + this->chunk_shape_;
 
 2648         strides = this->stride();
 
 2649         upper_bound = upper_bound_;
 
 2650         return const_cast<pointer
>(&Storage::operator[](global_point));
 
 2653     virtual pointer chunkForIterator(shape_type 
const & point,
 
 2654                                      shape_type & strides, shape_type & upper_bound,
 
 2655                                      IteratorChunkHandle<N, T> * h)
 
 2657         shape_type global_point = point + h->offset_;
 
 2659         if(!this->isInside(global_point))
 
 2661             upper_bound = point + this->chunk_shape_;
 
 2665         strides = this->stride();
 
 2666         upper_bound = upper_bound_;
 
 2667         return &Storage::operator[](global_point);
 
 2670     virtual std::string backend()
 const 
 2672         return "ChunkedArrayFull";
 
 2675     shape_type upper_bound_;
 
 2692 template <
unsigned int N, 
class T, 
class Alloc = std::allocator<T> >
 
 2699     : 
public ChunkBase<N, T>
 
 2703         typedef T value_type;
 
 2704         typedef value_type * pointer;
 
 2705         typedef value_type & reference;
 
 2707         Chunk(shape_type 
const & shape, Alloc 
const & alloc = Alloc())
 
 2708         : ChunkBase<N, T>(detail::defaultStride(shape))
 
 2709         , size_(
prod(shape))
 
 2720             if(this->pointer_ == 0)
 
 2721                 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
 
 2722             return this->pointer_;
 
 2727             detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
 
 2735         Chunk & operator=(Chunk 
const &);
 
 2740     typedef T value_type;
 
 2741     typedef value_type * pointer;
 
 2742     typedef value_type & reference;
 
 2748                               shape_type 
const & chunk_shape=shape_type(),
 
 2750                               Alloc 
const & alloc = Alloc())
 
 2751     : 
ChunkedArray<N, T>(shape, chunk_shape, options.cacheMax(0))
 
 2757         typename ChunkStorage::iterator i   = this->handle_array_.
begin(),
 
 2758                                         end = this->handle_array_.end();
 
 2759         for(; i != end; ++i)
 
 2762                 delete static_cast<Chunk*
>(i->pointer_);
 
 2767     virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type 
const & index)
 
 2771             *p = 
new Chunk(this->chunkShape(index));
 
 2772             this->overhead_bytes_ += 
sizeof(Chunk);
 
 2774         return static_cast<Chunk *
>(*p)->allocate();
 
 2777     virtual bool unloadChunk(ChunkBase<N, T> * chunk, 
bool destroy)
 
 2780             static_cast<Chunk *
>(chunk)->deallocate();
 
 2784     virtual std::string backend()
 const 
 2786         return "ChunkedArrayLazy";
 
 2789     virtual std::size_t dataBytes(ChunkBase<N,T> * c)
 const 
 2791         return c->pointer_ == 0
 
 2793                  : 
static_cast<Chunk*
>(c)->size_*
sizeof(T);
 
 2798         return sizeof(Chunk) + 
sizeof(SharedChunkHandle<N, T>);
 
 2818 template <
unsigned int N, 
class T, 
class Alloc = std::allocator<T> >
 
 2825     : 
public ChunkBase<N, T>
 
 2829         typedef T value_type;
 
 2830         typedef value_type * pointer;
 
 2831         typedef value_type & reference;
 
 2833         Chunk(shape_type 
const & shape)
 
 2834         : ChunkBase<N, T>(detail::defaultStride(shape))
 
 2836         , size_(
prod(shape))
 
 2846             if(this->pointer_ == 0)
 
 2847                 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
 
 2848             return this->pointer_;
 
 2853             detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
 
 2855             compressed_.clear();
 
 2858         void compress(CompressionMethod method)
 
 2860             if(this->pointer_ != 0)
 
 2862                 vigra_invariant(compressed_.size() == 0,
 
 2863                     "ChunkedArrayCompressed::Chunk::compress(): compressed and uncompressed pointer are both non-zero.");
 
 2865                 ::vigra::compress((
char const *)this->pointer_, size_*
sizeof(T), compressed_, method);
 
 2868                 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
 
 2875             if(this->pointer_ == 0)
 
 2877                 if(compressed_.size())
 
 2879                     this->pointer_ = alloc_.allocate((
typename Alloc::size_type)size_);
 
 2881                     ::vigra::uncompress(compressed_.data(), compressed_.size(),
 
 2882                                         (
char*)this->pointer_, size_*
sizeof(T), method);
 
 2883                     compressed_.clear();
 
 2887                     this->pointer_ = allocate();
 
 2892                 vigra_invariant(compressed_.size() == 0,
 
 2893                     "ChunkedArrayCompressed::Chunk::uncompress(): compressed and uncompressed pointer are both non-zero.");
 
 2895             return this->pointer_;
 
 2903         Chunk & operator=(Chunk 
const &);
 
 2908     typedef T value_type;
 
 2909     typedef value_type * pointer;
 
 2910     typedef value_type & reference;
 
 2925                                     shape_type 
const & chunk_shape=shape_type(),
 
 2928        compression_method_(options.compression_method)
 
 2930         if(compression_method_ == DEFAULT_COMPRESSION)
 
 2931             compression_method_ = LZ4;
 
 2936         typename ChunkStorage::iterator i   = this->handle_array_.
begin(),
 
 2937                                         end = this->handle_array_.end();
 
 2938         for(; i != end; ++i)
 
 2941                 delete static_cast<Chunk*
>(i->pointer_);
 
 2946     virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type 
const & index)
 
 2950             *p = 
new Chunk(this->chunkShape(index));
 
 2951             this->overhead_bytes_ += 
sizeof(Chunk);
 
 2953         return static_cast<Chunk *
>(*p)->uncompress(compression_method_);
 
 2956     virtual bool unloadChunk(ChunkBase<N, T> * chunk, 
bool destroy)
 
 2959             static_cast<Chunk *
>(chunk)->deallocate();
 
 2961             static_cast<Chunk *
>(chunk)->
compress(compression_method_);
 
 2965     virtual std::string backend()
 const 
 2967         switch(compression_method_)
 
 2970             return "ChunkedArrayCompressed<ZLIB>";
 
 2972             return "ChunkedArrayCompressed<ZLIB_NONE>";
 
 2974             return "ChunkedArrayCompressed<ZLIB_FAST>";
 
 2976             return "ChunkedArrayCompressed<ZLIB_BEST>";
 
 2978             return "ChunkedArrayCompressed<LZ4>";
 
 2984     virtual std::size_t dataBytes(ChunkBase<N,T> * c)
 const 
 2986         return c->pointer_ == 0
 
 2987                  ? 
static_cast<Chunk*
>(c)->compressed_.size()
 
 2988                  : 
static_cast<Chunk*
>(c)->size_*
sizeof(T);
 
 2993         return sizeof(Chunk) + 
sizeof(SharedChunkHandle<N, T>);
 
 2996     CompressionMethod compression_method_;
 
 3021 template <
unsigned int N, 
class T>
 
 3039     typedef HANDLE FileHandle;
 
 3041     typedef int FileHandle;
 
 3045     : 
public ChunkBase<N, T>
 
 3049         typedef T value_type;
 
 3050         typedef value_type * pointer;
 
 3051         typedef value_type & reference;
 
 3053         Chunk(shape_type 
const & shape,
 
 3054               std::size_t offset, 
size_t alloc_size,
 
 3056         : ChunkBase<N, T>(detail::defaultStride(shape))
 
 3058         , alloc_size_(alloc_size)
 
 3069             if(this->pointer_ == 0)
 
 3072                 static const std::size_t bits = 
sizeof(DWORD)*8,
 
 3073                                          mask = (std::size_t(1) << bits) - 1;
 
 3074                 this->pointer_ = (pointer)MapViewOfFile(file_, FILE_MAP_ALL_ACCESS,
 
 3075                                            std::size_t(offset_) >> bits, offset_ & mask, alloc_size_);
 
 3076                 if(this->pointer_ == 0)
 
 3077                     winErrorToException(
"ChunkedArrayChunk::map(): ");
 
 3079                 this->pointer_ = (pointer)mmap(0, alloc_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
 
 3081                 if(this->pointer_ == 0)
 
 3082                     throw std::runtime_error(
"ChunkedArrayChunk::map(): mmap() failed.");
 
 3085             return this->pointer_;
 
 3090             if(this->pointer_ != 0)
 
 3093                 ::UnmapViewOfFile(this->pointer_);
 
 3095                 munmap(this->pointer_, alloc_size_);
 
 3101         std::size_t offset_, alloc_size_;
 
 3105         Chunk & operator=(Chunk 
const &);
 
 3111     typedef T value_type;
 
 3112     typedef value_type * pointer;
 
 3113     typedef value_type & reference;
 
 3115     static std::size_t computeAllocSize(shape_type 
const & shape)
 
 3117         std::size_t size = 
prod(shape)*
sizeof(T);
 
 3118         std::size_t mask = mmap_alignment - 1;
 
 3119         return (size + mask) & ~mask;
 
 3129                                  shape_type 
const & chunk_shape=shape_type(),
 
 3131                                  std::string 
const & path = 
"")
 
 3133     #ifndef VIGRA_NO_SPARSE_FILE
 
 3134     , offset_array_(this->chunkArrayShape())
 
 3139         ignore_argument(path);
 
 3140     #ifdef VIGRA_NO_SPARSE_FILE 
 3141         file_capacity_ = 4*
prod(this->chunk_shape_)*
sizeof(T);
 
 3145                                          end = offset_array_.end();
 
 3146         std::size_t size = 0;
 
 3147         for(; i != end; ++i)
 
 3150             size += computeAllocSize(this->chunkShape(i.point()));
 
 3152         file_capacity_ = size;
 
 3153         this->overhead_bytes_ += offset_array_.size()*
sizeof(std::size_t);
 
 3159         file_ = ::CreateFile(winTempFileName(path).c_str(), GENERIC_READ | GENERIC_WRITE,
 
 3160                              0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE, NULL);
 
 3161         if (file_ == INVALID_HANDLE_VALUE)
 
 3162             winErrorToException(
"ChunkedArrayTmpFile(): ");
 
 3166         if(!::DeviceIoControl(file_, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, &dwTemp, NULL))
 
 3167             winErrorToException(
"ChunkedArrayTmpFile(): ");
 
 3173         static const std::size_t bits = 
sizeof(LONG)*8, mask = (std::size_t(1) << bits) - 1;
 
 3174         mappedFile_ = CreateFileMapping(file_, NULL, PAGE_READWRITE,
 
 3175                                         file_capacity_ >> bits, file_capacity_ & mask, NULL);
 
 3177             winErrorToException(
"ChunkedArrayTmpFile(): ");
 
 3179         mappedFile_ = file_ = fileno(tmpfile());
 
 3181             throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to open file.");
 
 3182         lseek(file_, file_capacity_-1, SEEK_SET);
 
 3183         if(write(file_, 
"0", 1) == -1)
 
 3184             throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
 
 3190         typename ChunkStorage::iterator  i = this->handle_array_.
begin(),
 
 3191                                          end = this->handle_array_.end();
 
 3192         for(; i != end; ++i)
 
 3195                 delete static_cast<Chunk*
>(i->pointer_);
 
 3199         ::CloseHandle(mappedFile_);
 
 3200         ::CloseHandle(file_);
 
 3206     virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type 
const & index)
 
 3210             shape_type shape = this->chunkShape(index);
 
 3211             std::size_t chunk_size = computeAllocSize(shape);
 
 3212         #ifdef VIGRA_NO_SPARSE_FILE 
 3213             std::size_t offset = file_size_;
 
 3214             if(offset + chunk_size > file_capacity_)
 
 3216                 file_capacity_ = max<std::size_t>(offset+chunk_size, file_capacity_ * 120 / 100); 
 
 3217                 if(lseek(file_, file_capacity_-1, SEEK_SET) == -1)
 
 3218                     throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to reset file size.");
 
 3219                 if(write(file_, 
"0", 1) == -1)
 
 3220                     throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
 
 3222             file_size_ += chunk_size;
 
 3224             std::size_t offset = offset_array_[index];
 
 3226             *p = 
new Chunk(shape, offset, chunk_size, mappedFile_);
 
 3227             this->overhead_bytes_ += 
sizeof(Chunk);
 
 3229         return static_cast<Chunk*
>(*p)->map();
 
 3232     virtual bool unloadChunk(ChunkBase<N, T> * chunk, 
bool )
 
 3234         static_cast<Chunk *
>(chunk)->unmap();
 
 3238     virtual std::string backend()
 const 
 3240         return "ChunkedArrayTmpFile";
 
 3243     virtual std::size_t dataBytes(ChunkBase<N,T> * c)
 const 
 3245         return c->pointer_ == 0
 
 3247                  : 
static_cast<Chunk*
>(c)->alloc_size_;
 
 3252       #ifdef VIGRA_NO_SPARSE_FILE 
 3253         return sizeof(Chunk) + 
sizeof(SharedChunkHandle<N, T>);
 
 3255         return sizeof(Chunk) + 
sizeof(SharedChunkHandle<N, T>) + 
sizeof(std::size_t);
 
 3259   #ifndef VIGRA_NO_SPARSE_FILE 
 3260     OffsetStorage offset_array_;  
 
 3262     FileHandle file_, mappedFile_;  
 
 3263     std::size_t file_size_, file_capacity_;
 
 3266 template<
unsigned int N, 
class U>
 
 3268 : 
public MultiCoordinateIterator<N>
 
 3269 , 
private MultiArrayView<N, typename UnqualifiedType<U>::type>
 
 3272     typedef typename UnqualifiedType<U>::type      T;
 
 3273     typedef MultiCoordinateIterator<N>             base_type;
 
 3274     typedef MultiArrayView<N, T>                   base_type2;
 
 3276     typedef typename base_type::shape_type         shape_type;
 
 3277     typedef typename base_type::difference_type    difference_type;
 
 3278     typedef ChunkIterator                          iterator;
 
 3279     typedef std::random_access_iterator_tag        iterator_category;
 
 3281     typedef MultiArrayView<N, T>                   value_type;
 
 3282     typedef MultiArrayView<N, T> &                 reference;
 
 3283     typedef MultiArrayView<N, T> 
const &           const_reference;
 
 3284     typedef MultiArrayView<N, T> *                 pointer;
 
 3285     typedef MultiArrayView<N, T> 
const *           const_pointer;
 
 3287     typedef typename IfBool<UnqualifiedType<U>::isConst,
 
 3288                           ChunkedArrayBase<N, T> 
const,
 
 3289                           ChunkedArrayBase<N, T> >::type array_type;
 
 3290     typedef IteratorChunkHandle<N, T>        Chunk;
 
 3298     ChunkIterator(array_type * array,
 
 3299                   shape_type 
const & start, shape_type 
const & end,
 
 3300                   shape_type 
const & chunk_start, shape_type 
const & chunk_end,
 
 3301                   shape_type 
const & chunk_shape)
 
 3302     : base_type(chunk_start, chunk_end)
 
 3304     , chunk_(chunk_start * chunk_shape)
 
 3305     , start_(start - chunk_.offset_)
 
 3306     , stop_(end - chunk_.offset_)
 
 3307     , chunk_shape_(chunk_shape)
 
 3312     ChunkIterator(ChunkIterator 
const & rhs)
 
 3315     , array_(rhs.array_)
 
 3316     , chunk_(rhs.chunk_)
 
 3317     , start_(rhs.start_)
 
 3319     , chunk_shape_(rhs.chunk_shape_)
 
 3324     ChunkIterator & operator=(ChunkIterator 
const & rhs)
 
 3328             base_type::operator=(rhs);
 
 3329             array_ = rhs.array_;
 
 3330             chunk_ = rhs.chunk_;
 
 3331             start_ = rhs.start_;
 
 3333             chunk_shape_ = rhs.chunk_shape_;
 
 3339     reference operator*()
 
 3344     const_reference operator*()
 const 
 3349     pointer operator->()
 
 3354     const_pointer operator->()
 const 
 3361         return *(ChunkIterator(*
this) += i);
 
 3364     value_type operator[](
const shape_type &coordOffset)
 const 
 3366         return *(ChunkIterator(*
this) += coordOffset);
 
 3373             shape_type array_point = max(start_, this->point()*chunk_shape_),
 
 3374                        upper_bound(SkipInitialization);
 
 3375             this->m_ptr = array_->chunkForIterator(array_point, this->m_stride, upper_bound, &chunk_);
 
 3376             this->m_shape = min(upper_bound, stop_) - array_point;
 
 3380     shape_type chunkStart()
 const 
 3382         return max(start_, this->point()*chunk_shape_) + chunk_.offset_;
 
 3385     shape_type chunkStop()
 const 
 3387         return chunkStart() + this->m_shape;
 
 3390     ChunkIterator & operator++()
 
 3392         base_type::operator++();
 
 3397     ChunkIterator operator++(
int)
 
 3399         ChunkIterator res(*
this);
 
 3411     ChunkIterator & 
operator+=(
const shape_type &coordOffset)
 
 3418     ChunkIterator & operator--()
 
 3420         base_type::operator--();
 
 3425     ChunkIterator operator--(
int)
 
 3427         ChunkIterator res(*
this);
 
 3437     ChunkIterator & 
operator-=(
const shape_type &coordOffset)
 
 3442     ChunkIterator getEndIterator()
 const 
 3444         ChunkIterator res(*
this);
 
 3445         static_cast<base_type &
>(res) = base_type::getEndIterator();
 
 3452         return ChunkIterator(*
this) += d;
 
 3457         return ChunkIterator(*
this) -= d;
 
 3460     ChunkIterator 
operator+(
const shape_type &coordOffset)
 const 
 3462         return ChunkIterator(*
this) += coordOffset;
 
 3465     ChunkIterator 
operator-(
const shape_type &coordOffset)
 const 
 3467         return ChunkIterator(*
this) -= coordOffset;
 
 3475 #ifndef DOXYGEN  // doxygen doesn't understand this 
 3476     using base_type::operator==;
 
 3477     using base_type::operator!=;
 
 3479     using base_type::shape;
 
 3481     array_type * array_;
 
 3483     shape_type start_, stop_, chunk_shape_, array_point_;
 
 3490 #undef VIGRA_ASSERT_INSIDE 
std::size_t cacheMaxSize() const 
Get the number of chunks the cache will hold. 
Definition: multi_array_chunked.hxx:2357
chunk_iterator chunk_end(shape_type const &start, shape_type const &stop)
Create the end iterator for iteration over all chunks intersected by the given ROI. 
Definition: multi_array_chunked.hxx:2437
Sequential iterator for MultiArrayView. 
Definition: multi_fwd.hxx:161
MultiArrayView< N-1, T, ChunkedArrayTag > bindOuter(difference_type_1 index) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2293
std::size_t dataBytes() const 
Bytes of main memory occupied by the array's data. 
Definition: multi_array_chunked.hxx:1674
ChunkedArrayTmpFile(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions(), std::string const &path="")
Construct with given 'shape', 'chunk_shape' and 'options'. 
Definition: multi_array_chunked.hxx:3128
const_iterator end() const 
Create the end iterator for read-only scan-order iteration over the entire chunked array...
Definition: multi_array_chunked.hxx:2421
void commitSubarray(shape_type const &start, MultiArrayView< N, U, Stride > const &subarray)
Copy an ordinary MultiArrayView into an ROI of the chunked array. 
Definition: multi_array_chunked.hxx:2114
Option object for ChunkedArray construction. 
Definition: multi_array_chunked.hxx:1267
view_type::pointer pointer
Definition: multi_array.hxx:2502
void transpose(const MultiArrayView< 2, T, C1 > &v, MultiArrayView< 2, T, C2 > &r)
Definition: matrix.hxx:965
void releaseChunks(shape_type const &start, shape_type const &stop, bool destroy=false)
Definition: multi_array_chunked.hxx:2051
ChunkedArrayCompressed(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions())
Construct with given 'shape', 'chunk_shape' and 'options'. 
Definition: multi_array_chunked.hxx:2924
Diff2D operator-(Diff2D const &a, Diff2D const &b)
Definition: diff2d.hxx:711
Definition: multi_array_chunked.hxx:2819
const_iterator begin() const 
Create a read-only scan-order iterator for the entire chunked array. 
Definition: multi_array_chunked.hxx:2413
chunk_iterator chunk_begin(shape_type const &start, shape_type const &stop)
Create an iterator over all chunks intersected by the given ROI. 
Definition: multi_array_chunked.hxx:2428
MultiArrayView< N-1, T, ChunkedArrayTag > bind(difference_type_1 index) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2281
ChunkedArrayFull(shape_type const &shape, ChunkedArrayOptions const &options=ChunkedArrayOptions(), Alloc const &alloc=Alloc())
Construct with given 'shape' and 'options', using the allocator 'alloc' to manage the memory...
Definition: multi_array_chunked.hxx:2573
void linearSequence(Iterator first, Iterator last, Value start, Value step)
Fill an array with a sequence of numbers. 
Definition: algorithm.hxx:208
view_type::iterator iterator
Definition: multi_array.hxx:2548
iterator begin()
Create a scan-order iterator for the entire chunked array. 
Definition: multi_array_chunked.hxx:2381
shape_type chunkStart(shape_type const &global_start) const 
Find the chunk that contains array element 'global_start'. 
Definition: multi_array_chunked.hxx:1708
const_iterator cbegin() const 
Create a read-only scan-order iterator for the entire chunked array. 
Definition: multi_array_chunked.hxx:2397
void compress(char const *source, std::size_t size, ArrayVector< char > &dest, CompressionMethod method)
Diff2D operator+(Diff2D const &a, Diff2D const &b)
Definition: diff2d.hxx:739
ChunkedArrayLazy(shape_type const &shape, shape_type const &chunk_shape=shape_type(), ChunkedArrayOptions const &options=ChunkedArrayOptions(), Alloc const &alloc=Alloc())
Construct with given 'shape', 'chunk_shape' and 'options', using the allocator 'alloc' to manage the ...
Definition: multi_array_chunked.hxx:2747
Interface and base class for chunked arrays. 
Definition: multi_array_chunked.hxx:463
std::ptrdiff_t MultiArrayIndex
Definition: multi_fwd.hxx:60
value_type getItem(shape_type const &point) const 
Read the array element at index 'point'. 
Definition: multi_array_chunked.hxx:2221
bool allLess(TinyVectorBase< V1, SIZE, D1, D2 > const &l, TinyVectorBase< V2, SIZE, D3, D4 > const &r)
pointwise less-than 
Definition: tinyvector.hxx:1375
Definition: multi_array_chunked.hxx:2524
view_type::difference_type difference_type
Definition: multi_array.hxx:2522
int cacheSize() const 
Number of chunks currently fitting into the cache. 
Definition: multi_array_chunked.hxx:1664
FFTWComplex< R > & operator-=(FFTWComplex< R > &a, const FFTWComplex< R > &b)
subtract-assignment 
Definition: fftw3.hxx:867
MultiArrayView< N-M, T, ChunkedArrayTag > bindOuter(const TinyVector< Index, M > &d) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2305
MultiArrayView< N-M, T, ChunkedArrayTag > bindInner(const TinyVector< Index, M > &d) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2337
ChunkedArrayOptions & compression(CompressionMethod v)
Compress inactive chunks with the given method. 
Definition: multi_array_chunked.hxx:1312
chunk_const_iterator chunk_cbegin(shape_type const &start, shape_type const &stop) const 
Create a read-only iterator over all chunks intersected by the given ROI. 
Definition: multi_array_chunked.hxx:2462
void setCacheMaxSize(std::size_t c)
Set the number of chunks the cache will hold. 
Definition: multi_array_chunked.hxx:2369
const_view_type const_subarray(shape_type const &start, shape_type const &stop) const 
Create a read-only view to the specified ROI. 
Definition: multi_array_chunked.hxx:2208
std::size_t overheadBytes() const 
Bytes of main memory needed to manage the chunked storage. 
Definition: multi_array_chunked.hxx:1681
FFTWComplex< R > & operator+=(FFTWComplex< R > &a, const FFTWComplex< R > &b)
add-assignment 
Definition: fftw3.hxx:859
view_type::reference reference
Definition: multi_array.hxx:2510
Int32 log2i(UInt32 x)
Compute the base-2 logarithm of an integer. 
Definition: mathutil.hxx:361
NumericTraits< V >::Promote prod(TinyVectorBase< V, SIZE, D1, D2 > const &l)
product of the vector's elements 
Definition: tinyvector.hxx:2097
bool operator!=(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
not equal 
Definition: fftw3.hxx:841
bool operator!=(MultiArrayView< N, U, C1 > const &rhs) const 
Check if two arrays differ in at least one element. 
Definition: multi_array_chunked.hxx:1799
iterator end()
Create the end iterator for scan-order iteration over the entire chunked array. 
Definition: multi_array_chunked.hxx:2389
ChunkedArrayOptions & fillValue(double v)
Element value for read-only access of uninitialized chunks. 
Definition: multi_array_chunked.hxx:1282
void checkoutSubarray(shape_type const &start, MultiArrayView< N, U, Stride > &subarray) const 
Copy an ROI of the chunked array into an ordinary MultiArrayView. 
Definition: multi_array_chunked.hxx:2092
bool operator==(FFTWComplex< R > const &a, const FFTWComplex< R > &b)
equal 
Definition: fftw3.hxx:825
shape_type chunkStop(shape_type global_stop) const 
Find the chunk that is beyond array element 'global_stop'. 
Definition: multi_array_chunked.hxx:1722
virtual std::size_t overheadBytesPerChunk() const 
Bytes of main memory needed to manage a single chunk. 
Definition: multi_array_chunked.hxx:3250
TinyVector< MultiArrayIndex, N > type
Definition: multi_shape.hxx:272
view_type::size_type size_type
Definition: multi_array.hxx:2518
const_view_type subarray(shape_type const &start, shape_type const &stop) const 
Create a read-only view to the specified ROI. 
Definition: multi_array_chunked.hxx:2194
std::size_t dataBytesPerChunk() const 
Number of data bytes in an uncompressed chunk. 
Definition: multi_array_chunked.hxx:1697
ChunkedArrayOptions & cacheMax(int v)
Maximum number of chunks in the cache. 
Definition: multi_array_chunked.hxx:1297
view_type::const_pointer const_pointer
Definition: multi_array.hxx:2506
view_type::value_type value_type
Definition: multi_array.hxx:2498
void setItem(shape_type const &point, value_type const &v)
Write the array element at index 'point'. 
Definition: multi_array_chunked.hxx:2244
shape_type chunkShape(shape_type const &chunk_index) const 
Find the shape of the chunk indexed by 'chunk_index'. 
Definition: multi_array_chunked.hxx:1736
Definition: multi_array_chunked.hxx:2693
Class for fixed size vectors.This class contains an array of size SIZE of the specified VALUETYPE...
Definition: accessor.hxx:940
MultiArrayView< N-1, T, ChunkedArrayTag > bindInner(difference_type_1 index) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2325
Definition: metaprogramming.hxx:130
chunk_const_iterator chunk_cend(shape_type const &start, shape_type const &stop) const 
Create the end iterator for read-only iteration over all chunks intersected by the given ROI...
Definition: multi_array_chunked.hxx:2471
chunk_const_iterator chunk_begin(shape_type const &start, shape_type const &stop) const 
Create a read-only iterator over all chunks intersected by the given ROI. 
Definition: multi_array_chunked.hxx:2445
view_type::difference_type_1 difference_type_1
Definition: multi_array.hxx:2526
virtual shape_type chunkArrayShape() const 
Number of chunks along each coordinate direction. 
Definition: multi_array_chunked.hxx:1688
virtual std::size_t overheadBytesPerChunk() const 
Bytes of main memory needed to manage a single chunk. 
Definition: multi_array_chunked.hxx:2631
detail::SelectIntegerType< 32, detail::UnsignedIntTypes >::type UInt32
32-bit unsigned int 
Definition: sized_int.hxx:183
bool operator==(MultiArrayView< N, U, C1 > const &rhs) const 
Check if two arrays are elementwise equal. 
Definition: multi_array_chunked.hxx:1784
chunk_const_iterator chunk_end(shape_type const &start, shape_type const &stop) const 
Create the end iterator for read-only iteration over all chunks intersected by the given ROI...
Definition: multi_array_chunked.hxx:2454
Definition: multi_array_chunked.hxx:3022
Base class for, and view to, vigra::MultiArray. 
Definition: multi_array.hxx:704
void uncompress(char const *source, std::size_t srcSize, char *dest, std::size_t destSize, CompressionMethod method)
virtual std::size_t overheadBytesPerChunk() const 
Bytes of main memory needed to manage a single chunk. 
Definition: multi_array_chunked.hxx:2796
bool allLessEqual(TinyVectorBase< V1, SIZE, D1, D2 > const &l, TinyVectorBase< V2, SIZE, D3, D4 > const &r)
pointwise less-equal 
Definition: tinyvector.hxx:1399
view_type subarray(shape_type const &start, shape_type const &stop)
Create a view to the specified ROI. 
Definition: multi_array_chunked.hxx:2180
UInt32 ceilPower2(UInt32 x)
Round up to the nearest power of 2. 
Definition: mathutil.hxx:294
virtual std::size_t overheadBytesPerChunk() const 
Bytes of main memory needed to manage a single chunk. 
Definition: multi_array_chunked.hxx:2991
TinyVector< V, SIZE > transpose(TinyVector< V, SIZE > const &t, TinyVector< T, SIZE > const &permutation)
transposed copy 
Definition: tinyvector.hxx:2251
const_iterator cend() const 
Create the end iterator for read-only scan-order iteration over the entire chunked array...
Definition: multi_array_chunked.hxx:2405
MultiArrayView< N-1, T, ChunkedArrayTag > bindAt(MultiArrayIndex dim, MultiArrayIndex index) const 
Create a lower dimensional view to the chunked array. 
Definition: multi_array_chunked.hxx:2265
Iterate over a virtual array where each element contains its coordinate. 
Definition: multi_fwd.hxx:157
difference_type key_type
Definition: multi_array.hxx:743
virtual shape_type chunkArrayShape() const 
Number of chunks along each coordinate direction. 
Definition: multi_array_chunked.hxx:2611
view_type::const_reference const_reference
Definition: multi_array.hxx:2514
ChunkedArrayOptions()
Initialize options with defaults. 
Definition: multi_array_chunked.hxx:1272