156#ifndef VSPLINE_WIELDING_H
172template <
size_t vsz ,
typename ic_type ,
class functor_type ,
173 typename = std::enable_if < ( vsz > 1 ) > >
178 typedef typename functor_type::in_type
in_type ;
180 typedef typename functor_type::in_v
in_v ;
185 typedef typename functor_type::out_v
out_v ;
188 enum { dim_in = functor_type::dim_in } ;
189 enum { dim_out = functor_type::dim_out } ;
199 : functor ( _functor )
214 return ( vsz % div_by == 0 ) ;
230 auto aggregates = length / vsz ;
231 auto leftover = length - aggregates * vsz ;
247 for (
int d = 0 ; d < dim_in ; d++ )
253 for (
int e = 0 ; e < vsz ; e++ )
254 md_crd[d][e] = crd[d] + e ;
267 static const bool out_n_vecsz ( is_n_hsize() ) ;
271 static const bool out_n_vecsz = false ;
281 typedef typename std::integral_constant < bool , dim_out == 1 > use_store_t ;
289 if ( stride == 1 && ( dim_out == 1 || out_n_vecsz ) )
291 for (
ic_type a = 0 ; a < aggregates ; a++ )
293 functor ( md_crd , buffer ) ;
294 fluff ( buffer , trg , use_store_t() ) ;
296 md_crd[axis] += vsz ;
301 for (
ic_type a = 0 ; a < aggregates ; a++ )
303 functor ( md_crd , buffer ) ;
304 fluff ( buffer , trg , stride ) ;
305 trg += vsz * stride ;
306 md_crd[axis] += vsz ;
312 crd[axis] += aggregates * vsz ;
314 for (
ic_type r = 0 ; r < leftover ; r++ )
316 functor ( crd , *trg ) ;
326template <
typename ic_type ,
class functor_type >
332 : functor ( _functor )
344 typename functor_type::out_type * trg ,
348 for (
ic_type r = 0 ; r < length ; r++ )
350 functor ( crd , *trg ) ;
373template <
size_t vsz ,
typename ic_type ,
class functor_type ,
374 typename = std::enable_if < ( vsz > 1 ) > >
379 typedef typename functor_type::in_type
in_type ;
381 typedef typename functor_type::in_v
in_v ;
384 enum { dim_in = functor_type::dim_in } ;
395 : functor ( _functor )
402 auto aggregates = length / vsz ;
403 auto leftover = length - aggregates * vsz ;
418 for (
int d = 0 ; d < dim_in ; d++ )
424 for (
int e = 0 ; e < vsz ; e++ )
425 md_crd[d][e] = crd[d] + e ;
431 for (
ic_type a = 0 ; a < aggregates ; a++ )
434 md_crd[axis] += vsz ;
439 crd[axis] += aggregates * vsz ;
441 for (
ic_type r = 0 ; r < leftover ; r++ )
452template <
typename ic_type ,
class functor_type >
458 : functor ( _functor )
472 for (
ic_type r = 0 ; r < length ; r++ )
485template <
size_t vsz ,
typename ic_type ,
class functor_type ,
486 typename = std::enable_if < ( vsz > 1 ) > >
489 typedef typename functor_type::in_type
in_type ;
492 enum { dim_in = functor_type::dim_in } ;
498 typedef typename functor_type::in_v
in_v ;
501 : functor ( _functor )
509 auto aggregates = length / vsz ;
510 auto leftover = length - aggregates * vsz ;
527 static const bool in_n_vecsz
533 static const bool in_n_vecsz = false ;
539 typedef typename std::integral_constant < bool , dim_in == 1 > use_load_t ;
552 for (
ic_type a = 0 ; a < aggregates ; a++ )
555 && ( dim_in == 1 || in_n_vecsz ) )
557 bunch ( src , in_buffer , use_load_t() ) ;
559 functor ( in_buffer ) ;
563 bunch ( src , in_buffer , in_stride ) ;
564 src += in_stride * vsz ;
565 functor ( in_buffer ) ;
571 for (
ic_type r = 0 ; r < leftover ; r++ )
582template <
typename ic_type ,
class functor_type >
591 void operator() (
const typename functor_type::in_type * src ,
596 for (
ic_type r = 0 ; r < length ; r++ )
621template <
size_t _vsize ,
typename ic_type ,
class functor_type ,
622 typename = std::enable_if < ( _vsize > 1 ) > >
625 static const size_t vsize = _vsize ;
634 typedef typename functor_type::out_v
out_v ;
638 enum { channels = functor_type::channels } ;
646 typedef typename functor_type::shape_type
crd_type ;
653 : functor ( _functor )
659#ifdef USE_BUFFERED_GENERATION
661 void operator() ( crd_type crd ,
669 out_nd_ele_type * & nd_trg
670 =
reinterpret_cast < out_nd_ele_type * &
> ( trg ) ;
672 auto aggregates = length /
vsize ;
673 auto leftover = length - aggregates *
vsize ;
677 functor.reset ( crd , aggregates ) ;
682 vigra::MultiArray < 1 , out_ele_v >
683 vbuffer ( aggregates * channels ) ;
685 vigra::MultiArray < 1 , out_type > rest ( leftover ) ;
687 functor.eval ( vbuffer , rest ) ;
698 static const bool out_n_vecsz
704 static const bool out_n_vecsz = false ;
713 typedef typename std::integral_constant
714 < bool , channels == 1 > use_store_t ;
730 out_nd_ele_v & ndvr =
reinterpret_cast < out_nd_ele_v &
> ( vr ) ;
732 if ( stride == 1 && ( channels == 1 || out_n_vecsz ) )
734 for (
ic_type a = 0 ; a < aggregates ; a++ )
736 for (
size_t ch = 0 ; ch < channels ; ch++ )
737 ndvr[ch] = vbuffer [ a * channels + ch ] ;
738 fluff ( ndvr , nd_trg , use_store_t() ) ;
744 for (
ic_type a = 0 ; a < aggregates ; a++ )
746 for (
size_t ch = 0 ; ch < channels ; ch++ )
747 ndvr[ch] = vbuffer [ a * channels + ch ] ;
748 fluff ( ndvr , nd_trg , stride ) ;
749 trg +=
vsize * stride ;
755 for (
ic_type r = 0 ; r < leftover ; r++ )
777 auto aggregates = length /
vsize ;
778 auto leftover = length - aggregates *
vsize ;
782 functor.reset ( crd , aggregates ) ;
800 static const bool out_n_vecsz
806 static const bool out_n_vecsz = false ;
815 typedef typename std::integral_constant
816 < bool , channels == 1 > use_store_t ;
824 if ( stride == 1 && ( channels == 1 || out_n_vecsz ) )
826 for (
ic_type a = 0 ; a < aggregates ; a++ )
828 functor.eval ( vr ) ;
829 fluff ( ndvr , nd_trg , use_store_t() ) ;
835 for (
ic_type a = 0 ; a < aggregates ; a++ )
837 functor.eval ( vr ) ;
838 fluff ( ndvr , nd_trg , stride ) ;
839 trg +=
vsize * stride ;
845 for (
ic_type r = 0 ; r < leftover ; r++ )
847 functor.eval ( *trg ) ;
859template <
typename ic_type ,
class functor_type >
869 typedef typename functor_type::shape_type
crd_type ;
872#ifdef USE_BUFFERED_GENERATION
881 vigra::MultiArray < 1 , out_type > result ( length ) ;
884 for (
ic_type r = 0 ; r < length ; r++ )
886 *trg = result [ r ] ;
901 for (
ic_type r = 0 ; r < length ; r++ )
921template <
size_t vsz ,
typename ic_type ,
class functor_type ,
922 typename = std::enable_if < ( vsz > 1 ) > >
925 typedef typename functor_type::in_type
in_type ;
928 enum { dim_in = functor_type::dim_in } ;
929 enum { dim_out = functor_type::dim_out } ;
938 typedef typename functor_type::in_v
in_v ;
939 typedef typename functor_type::out_v
out_v ;
942 : functor ( _functor )
952 auto aggregates = length / vsz ;
953 auto leftover = length - aggregates * vsz ;
954 const bool is_apply = ( (
void*) src == (
void*) trg ) ;
972 static const bool in_n_vecsz
976 static const bool out_n_vecsz
982 static const bool in_n_vecsz = false ;
983 static const bool out_n_vecsz = false ;
990 typedef typename std::integral_constant < bool , dim_in == 1 > use_load_t ;
992 typedef typename std::integral_constant < bool , dim_out == 1 > use_store_t ;
1006 && ( dim_in == 1 || in_n_vecsz ) )
1008 if ( out_stride == 1
1009 && ( dim_out == 1 || out_n_vecsz ) )
1011 for (
ic_type a = 0 ; a < aggregates ; a++ )
1013 bunch ( src , in_buffer , use_load_t() ) ;
1015 functor ( in_buffer , out_buffer ) ;
1016 fluff ( out_buffer , trg , use_store_t() ) ;
1022 for (
ic_type a = 0 ; a < aggregates ; a++ )
1024 bunch ( src , in_buffer , use_load_t() ) ;
1026 functor ( in_buffer , out_buffer ) ;
1027 fluff ( out_buffer , trg , out_stride ) ;
1028 trg += out_stride * vsz ;
1034 if ( out_stride == 1
1035 && ( dim_out == 1 || out_n_vecsz ) )
1037 for (
ic_type a = 0 ; a < aggregates ; a++ )
1039 bunch ( src , in_buffer , in_stride ) ;
1040 src += in_stride * vsz ;
1041 functor ( in_buffer , out_buffer ) ;
1042 fluff ( out_buffer , trg , use_store_t() ) ;
1049 for (
ic_type a = 0 ; a < aggregates ; a++ )
1051 bunch ( src , in_buffer , in_stride ) ;
1052 src += in_stride * vsz ;
1053 functor ( in_buffer , out_buffer ) ;
1054 fluff ( out_buffer , trg , out_stride ) ;
1055 trg += out_stride * vsz ;
1073 for (
ic_type r = 0 ; r < leftover ; r++ )
1075 functor ( *src , help ) ;
1084 for (
ic_type r = 0 ; r < leftover ; r++ )
1086 functor ( *src , *trg ) ;
1098template <
typename ic_type ,
class functor_type >
1109 typename functor_type::out_type * trg ,
1114 if ( (
void*)src == (
void*)trg )
1117 typename functor_type::out_type help ;
1118 for (
ic_type r = 0 ; r < length ; r++ )
1129 for (
ic_type r = 0 ; r < length ; r++ )
1153template <
int dimension ,
class in_type ,
class out_type = in_type >
1279#ifndef WIELDING_SEGMENT_SIZE
1280#define WIELDING_SEGMENT_SIZE 0
1312 template <
size_t vsz ,
typename ... types >
1319 std::ptrdiff_t segment_size = WIELDING_SEGMENT_SIZE
1322 assert ( in_view.shape() == out_view.shape() ) ;
1326 if ( segment_size <= 0 )
1327 segment_size = in_view.shape ( axis ) ;
1331 auto in_stride = in_view.stride ( axis ) ;
1332 auto out_stride = out_view.stride ( axis ) ;
1336 auto slice1 = in_view.bindAt ( axis , 0 ) ;
1337 auto slice2 = out_view.bindAt ( axis , 0 ) ;
1341 auto in_it = slice1.begin() ;
1342 auto out_it = slice2.begin() ;
1346 auto length = in_view.shape ( axis ) ;
1347 auto nr_lines = slice1.size() ;
1351 std::ptrdiff_t nsegments = length / segment_size ;
1352 if ( length % segment_size )
1357 std::ptrdiff_t nr_indexes = nr_lines * nsegments ;
1369 std::ptrdiff_t joblet_index ;
1375 if ( p_cancel && p_cancel->load() )
1380 auto s = joblet_index / nr_lines ;
1381 auto j = joblet_index % nr_lines ;
1385 auto in_start_address =
1386 & ( in_it [ j ] ) + in_stride * s * segment_size ;
1388 auto out_start_address =
1389 & ( out_it [ j ] ) + out_stride * s * segment_size ;
1393 auto segment_length =
1394 std::min ( segment_size , length - s * segment_size ) ;
1398 func ( in_start_address ,
1415 template <
size_t vsz ,
typename ... types >
1421 std::ptrdiff_t segment_size = WIELDING_SEGMENT_SIZE
1424 if ( segment_size <= 0 )
1425 segment_size = out_view.shape ( axis ) ;
1427 auto out_stride = out_view.stride ( axis ) ;
1428 auto slice = out_view.bindAt ( axis , 0 ) ;
1430 auto out_it = slice.begin() ;
1431 std::ptrdiff_t nr_lines = slice.size() ;
1432 auto length = out_view.shape ( axis ) ;
1434 auto slice_shape = out_view.shape() ;
1435 slice_shape[axis] = 1 ;
1437 typedef vigra::MultiCoordinateIterator
1438 < out_view_type::actual_dimension > mci_type ;
1440 mci_type it ( slice_shape ) ;
1442 std::ptrdiff_t nsegments = length / segment_size ;
1443 if ( length % segment_size )
1446 std::ptrdiff_t nr_indexes = nr_lines * nsegments ;
1456 if ( p_cancel && p_cancel->load() )
1459 auto s = i / nr_lines ;
1460 auto j = i % nr_lines ;
1462 auto start_index = it [ j ] ;
1463 start_index [ axis ] += s * segment_size ;
1465 auto start_address = & ( out_view [ start_index ] ) ;
1467 auto segment_length =
1468 std::min ( segment_size , length - s * segment_size ) ;
1470 func ( start_index ,
1486 template <
size_t vsz ,
typename ... types >
1492 std::ptrdiff_t segment_size = WIELDING_SEGMENT_SIZE
1499 if ( segment_size <= 0 )
1500 segment_size = in_shape [ axis ] ;
1504 auto length = in_shape [ axis ] ;
1505 auto nr_lines = prod ( in_shape ) / length ;
1507 auto slice_shape = in_shape ;
1508 slice_shape[axis] = 1 ;
1510 typedef vigra::MultiCoordinateIterator < dimension > mci_type ;
1512 mci_type it ( slice_shape ) ;
1516 std::ptrdiff_t nsegments = length / segment_size ;
1517 if ( length % segment_size )
1522 std::ptrdiff_t nr_indexes = nr_lines * nsegments ;
1535 func_t w_func ( func ) ;
1539 if ( p_cancel && p_cancel->load() )
1542 auto s = i / nr_lines ;
1543 auto j = i % nr_lines ;
1545 auto start_index = it [ j ] ;
1546 start_index [ axis ] += s * segment_size ;
1548 auto segment_length =
1549 std::min ( segment_size , length - s * segment_size ) ;
1551 w_func ( it [ i ] , axis , length ) ;
1561 template <
size_t vsz ,
typename ... types >
1567 std::ptrdiff_t segment_size = WIELDING_SEGMENT_SIZE
1574 if ( segment_size <= 0 )
1575 segment_size = in_view.shape ( axis ) ;
1579 auto in_stride = in_view.stride ( axis ) ;
1583 auto slice1 = in_view.bindAt ( axis , 0 ) ;
1587 auto in_it = slice1.begin() ;
1591 auto length = in_view.shape ( axis ) ;
1592 auto nr_lines = slice1.size() ;
1596 std::ptrdiff_t nsegments = length / segment_size ;
1597 if ( length % segment_size )
1602 std::ptrdiff_t nr_indexes = nr_lines * nsegments ;
1614 std::ptrdiff_t joblet_index ;
1615 func_t w_func ( func ) ;
1621 if ( p_cancel && p_cancel->load() )
1626 auto s = joblet_index / nr_lines ;
1627 auto j = joblet_index % nr_lines ;
1631 auto in_start_address =
1632 & ( in_it [ j ] ) + in_stride * s * segment_size ;
1636 auto segment_length =
1637 std::min ( segment_size , length - s * segment_size ) ;
1641 w_func ( in_start_address ,
1661 template <
size_t vsz ,
typename ... types >
1669 auto out_stride = out_view.stride ( axis ) ;
1670 auto slice = out_view.bindAt ( axis , 0 ) ;
1672 auto out_it = slice.begin() ;
1673 std::ptrdiff_t nr_indexes = slice.size() ;
1675 auto length = out_view.shape ( axis ) ;
1677 auto slice_shape = out_view.shape() ;
1678 slice_shape[axis] = 1 ;
1682 typedef vigra::MultiCoordinateIterator
1683 < out_view_type::actual_dimension > mci_type ;
1685 mci_type it ( slice_shape ) ;
1694 auto w_func = func ;
1700 if ( p_cancel && p_cancel->load() )
1705 & ( out_it [ i ] ) ,
1715template <
class in_type ,
class out_type >
1718 enum { dimension = 1 } ;
1725 template <
size_t vsz ,
typename ... types >
1734 auto stride1 = in_view.stride ( axis ) ;
1735 auto length = in_view.shape ( axis ) ;
1736 auto stride2 = out_view.stride ( axis ) ;
1738 assert ( in_view.shape() == out_view.shape() ) ;
1740 auto nr_indexes = in_view.shape ( axis ) ;
1742 std::ptrdiff_t batch_size = 1024 ;
1747 std::ptrdiff_t lo , hi ;
1750 ( indexes , batch_size , nr_indexes , lo , hi ) )
1752 if ( p_cancel && p_cancel->load() )
1755 func ( & ( in_view [ lo ] ) ,
1757 & ( out_view [ lo ] ) ,
1769 template <
size_t vsz ,
typename ... types >
1777 std::ptrdiff_t stride = view.stride ( axis ) ;
1778 std::ptrdiff_t nr_indexes = view.shape ( axis ) ;
1780 std::ptrdiff_t batch_size = 1024 ;
1785 std::ptrdiff_t lo , hi ;
1788 ( indexes , batch_size , nr_indexes , lo , hi ) )
1790 if ( p_cancel && p_cancel->load() )
1811 template <
size_t vsz ,
typename ... types >
1820 std::ptrdiff_t nr_indexes = shape [ axis ] ;
1822 std::ptrdiff_t batch_size = 1024 ;
1827 std::ptrdiff_t lo , hi ;
1828 func_t w_func ( func ) ;
1831 ( indexes , batch_size , nr_indexes , lo , hi ) )
1833 if ( p_cancel && p_cancel->load() )
1839 w_func ( _lo , axis , hi - lo ) ;
1848 template <
size_t vsz ,
typename ... types >
1858 auto stride1 = in_view.stride ( axis ) ;
1859 auto length = in_view.shape ( axis ) ;
1861 auto nr_indexes = in_view.shape ( axis ) ;
1863 std::ptrdiff_t batch_size = 1024 ;
1868 std::ptrdiff_t lo , hi ;
1869 func_t w_func ( func ) ;
1872 ( indexes , batch_size , nr_indexes , lo , hi ) )
1874 if ( p_cancel && p_cancel->load() )
1877 w_func ( & ( in_view [ lo ] ) , stride1 , hi - lo ) ;
1886 template <
size_t vsz ,
typename ... types >
1894 std::ptrdiff_t stride = view.stride ( axis ) ;
1895 std::ptrdiff_t nr_indexes = view.shape ( axis ) ;
1898 std::ptrdiff_t batch_size = 1024 % vsz
1899 ? ( 1 + 1024 / vsz ) * vsz
1909 auto w_func = func ;
1911 std::ptrdiff_t lo , hi ;
1914 ( indexes , batch_size , nr_indexes , lo , hi ) )
1916 if ( p_cancel && p_cancel->load() )
1957template <
class inner_type >
1961 using typename inner_type::in_ele_v ;
1962 using typename inner_type::out_ele_v ;
1963 using typename inner_type::in_ele_type ;
1964 using typename inner_type::out_ele_type ;
1966 typedef typename inner_type::in_nd_ele_type
in_type ;
1967 typedef typename inner_type::out_nd_ele_type
out_type ;
1968 typedef typename inner_type::in_nd_ele_v
in_v ;
1969 typedef typename inner_type::out_nd_ele_v
out_v ;
1972 : inner_type ( _inner )
1981 (
reinterpret_cast < const typename inner_type::in_type &
> ( in ) ,
1982 reinterpret_cast < typename inner_type::out_type &
> ( out ) ) ;
1987 template <
typename = std::enable_if < ( inner_type::vsize > 1 ) > >
1992 (
reinterpret_cast < const typename inner_type::in_v &
> ( in ) ,
1993 reinterpret_cast < typename inner_type::out_v &
> ( out ) ) ;
1999template <
class sink_type >
2003 using typename sink_type::in_ele_v ;
2005 typedef typename sink_type::in_nd_ele_type
in_type ;
2006 typedef typename sink_type::in_nd_ele_v
in_v ;
2009 : sink_type ( _sink )
2016 (*((sink_type*)(
this)))
2017 (
reinterpret_cast < const typename sink_type::in_type &
> ( in ) ) ;
2022 template <
typename = std::enable_if < ( sink_type::vsize > 1 ) > >
2025 (*((sink_type*)(
this)))
2026 (
reinterpret_cast < const typename sink_type::in_v &
> ( in ) ) ;
2051template <
class functor_type ,
int dimension >
2053 vigra::MultiArrayView < dimension ,
2054 typename functor_type::out_type
2060 typedef typename functor_type::out_type out_type ;
2066 functor_type > agg ( functor ) ;
2068 wld ( *output , agg , 0 , njobs , p_cancel ) ;
2080template <
class functor_type ,
int dimension >
2082 vigra::TinyVector < long , dimension > shape ,
2091 functor_type > agg ( functor ) ;
2093 wld ( shape , agg , 0 , njobs , p_cancel ) ;
2098template <
class functor_type ,
int dimension >
2100 const vigra::MultiArrayView < dimension ,
2101 typename functor_type::in_type
2111 functor_type > agg ( functor ) ;
2113 wld ( *input , agg , 0 , njobs , p_cancel ) ;
2121template <
class functor_type ,
int dimension >
2123 const vigra::MultiArrayView < dimension ,
2124 typename functor_type::in_type
2126 vigra::MultiArrayView < dimension ,
2127 typename functor_type::out_type
2133 typedef typename functor_type::in_type in_type ;
2134 typedef typename functor_type::out_type out_type ;
2140 functor_type > agg ( functor ) ;
2142 wld ( *input , *output , agg , 0 , njobs , p_cancel ) ;
2151template <
class functor_type ,
unsigned int dimension >
2153 vigra::MultiArrayView < dimension ,
2154 typename functor_type::out_type
2160 typedef typename functor_type::out_type out_type ;
2166 functor_type > agg ( functor ) ;
2168 wld.
generate ( output , agg , 0 , njobs , p_cancel ) ;
2173#define VSPLINE_WIELDING_H
Implementation of 'bunch' and 'fluff'.
bool fetch_ascending(vspline::atomic< index_t > &source, const index_t &total, index_t &index)
fetch_ascending counts up from zero to total-1, which is more efficient if the indexes are used to ad...
int multithread(std::function< void() > payload, std::size_t nr_workers=default_njobs)
multithread uses a thread pool of worker threads to perform a multithreaded operation....
bool fetch_range_ascending(vspline::atomic< index_t > &source, const index_t &count, const index_t &total, index_t &low, index_t &high)
fetch_range_ascending also uses an atomic initialized to the total number of indexes to be distribute...
void index_reduce(const functor_type &functor, vigra::TinyVector< long, dimension > shape, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
void bunch(const vigra::TinyVector< ele_type, chn > *const &src, vigra::TinyVector< vspline::vc_simd_type< ele_type, vsz >, chn > &trg, const ic_type &stride)
bunch picks up data from interleaved, strided memory and stores them in a data type representing a pa...
void coupled_wield(const functor_type functor, const vigra::MultiArrayView< dimension, typename functor_type::in_type > *input, vigra::MultiArrayView< dimension, typename functor_type::out_type > *output, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
coupled_wield processes two arrays. The first array is taken as input, the second for output....
void generate_wield(const functor_type functor, vigra::MultiArrayView< dimension, typename functor_type::out_type > &output, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
generate_wield uses a generator function to produce data. Inside vspline, this is used for grid_eval,...
void index_wield(const functor_type functor, vigra::MultiArrayView< dimension, typename functor_type::out_type > *output, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
index_wield uses vspline's 'multithread' function to invoke an index-transformation functor for all i...
void value_reduce(const functor_type &functor, const vigra::MultiArrayView< dimension, typename functor_type::in_type > *input, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
void fluff(const vigra::TinyVector< vspline::vc_simd_type< ele_type, vsz >, chn > &src, vigra::TinyVector< ele_type, chn > *const &trg, const ic_type &stride)
reverse operation: a package of vectorized data is written to interleaved, strided memory....
with the definition of 'simd_traits', we can proceed to implement 'vector_traits': struct vector_trai...
const functor_type functor
coupled_aggregator(const functor_type &_functor)
an aggregator for separate - possibly different - source and target. If source and target are in fact...
functor_type::out_ele_type out_ele_type
functor_type::out_type out_type
functor_type::out_v out_v
coupled_aggregator(const functor_type &_functor)
functor_type::in_type in_type
const functor_type functor
functor_type::in_ele_type in_ele_type
void operator()(const in_type *src, ic_type in_stride, out_type *trg, ic_type out_stride, ic_type length)
generate_aggregator(const functor_type &_functor)
functor_type::shape_type crd_type
functor_type::out_type out_type
generate_aggregator is very similar to indexed_aggregator, but instead of managing and passing a coor...
functor_type::shape_type crd_type
functor_type::out_nd_ele_v out_nd_ele_v
functor_type::out_nd_ele_type out_nd_ele_type
functor_type::out_v out_v
static const size_t vsize
functor_type::out_ele_type out_ele_type
generate_aggregator(const functor_type &_functor)
functor_type::out_ele_v out_ele_v
functor_type::out_type out_type
void operator()(crd_type crd, int axis, out_type *trg, ic_type stride, ic_type length)
specialization for vsz == 1. Here the data are simply processed one by one in a loop,...
indexed_aggregator(const functor_type &_functor)
functor_type::in_type sd_coordinate_type
const functor_type functor
indexed_aggregator receives the start coordinate and processing axis along with the data to process,...
indexed_aggregator(const functor_type &_functor)
functor_type::out_ele_type out_ele_type
functor_type::out_type out_type
functor_type::in_ele_v in_ele_v
functor_type::out_ele_v out_ele_v
const functor_type functor
functor_type::out_v out_v
functor_type::in_ele_type in_ele_type
functor_type::in_type in_type
specialization for vsz == 1. Here the data are simply processed one by one in a loop,...
functor_type::in_type sd_coordinate_type
const functor_type functor
indexed_reductor(const functor_type &_functor)
indexed_reductor is used for reductions and has no output. The actual reduction is handled by the fun...
functor_type::in_ele_type in_ele_type
functor_type::in_type in_type
functor_type::in_ele_v in_ele_v
indexed_reductor(const functor_type &_functor)
functor_type::in_type crd_type
vs_adapter wraps a vspline::unary_functor to produce a functor which is compatible with the wielding ...
inner_type::in_nd_ele_type in_type
inner_type::in_nd_ele_v in_v
void operator()(const in_type &in, out_type &out) const
operator() overload for unvectorized arguments
inner_type::out_nd_ele_v out_v
vs_adapter(const inner_type &_inner)
inner_type::out_nd_ele_type out_type
same procedure for a vspline::sink_type
sink_type::in_nd_ele_type in_type
vs_sink_adapter(const sink_type &_sink)
void operator()(const in_type &in) const
operator() overload for unvectorized arguments
sink_type::in_nd_ele_v in_v
vigra::MultiArrayView< dimension, in_type > in_view_type
in_view_type::difference_type shape_type
in_view_type::difference_type_1 index_type
void generate(in_view_type &view, generate_aggregator< vsz, types ... > func, int axis=0, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
vigra::MultiArrayView< dimension, out_type > out_view_type
reimplementation of wield using the new 'neutral' multithread. The workers now all receive the same t...
void generate(out_view_type &out_view, generate_aggregator< vsz, types ... > func, int axis=0, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0)
in_view_type::difference_type shape_type
void operator()(const in_view_type &in_view, out_view_type &out_view, coupled_aggregator< vsz, types ... > func, int axis=0, int njobs=vspline::default_njobs, vspline::atomic< bool > *p_cancel=0, std::ptrdiff_t segment_size=WIELDING_SEGMENT_SIZE)
vigra::MultiArrayView< dimension, in_type > in_view_type
vigra::MultiArrayView< dimension, out_type > out_view_type
in_view_type::difference_type_1 index_type
const functor_type functor
yield_reductor(const functor_type &_functor)
an aggregator to reduce arrays. This is like using indexed_reductor with a functor gathering from an ...
void operator()(const in_type *src, ic_type in_stride, ic_type length)
functor_type::in_ele_type in_ele_type
functor_type::in_type in_type
yield_reductor(const functor_type &_functor)
includes all headers from vspline (most of them indirectly)