23 #include <ecolab_epilogue.h> 31 if (arg1 && arg1->rank()!=0)
33 hypercube(arg1->hypercube());
34 if (arg2 && arg2->rank()!=0 && arg1->hypercube().dims()!=arg2->hypercube().dims())
35 throw std::runtime_error(
"arguments not conformal");
39 hypercube(arg2->hypercube());
41 hypercube(Hypercube());
43 if (arg1) indices.insert(arg1->index().begin(), arg1->index().end());
44 if (arg2) indices.insert(arg2->index().begin(), arg2->index().end());
49 void ReduceArguments::setArguments(
const vector<TensorPtr>& a,
const Args&)
54 auto hc=a[0]->hypercube();
58 for (
const auto& i: a)
60 if (i->rank()>0 && i->hypercube()!=hc)
61 throw runtime_error(
"arguments not conformal");
62 idx.insert(i->index().begin(), i->index().end());
69 double ReduceArguments::operator[](
size_t i)
const 71 if (args.empty())
return init;
74 for (
const auto& j: args)
76 auto x=j->rank()==0? (*j)[0]: (*j)[i];
85 for (
const auto& i: args)
86 t=max(t, i->timestamp());
90 double ReduceAllOp::operator[](
size_t)
const 93 for (
size_t i=0; i<arg->size(); ++i)
104 dimension=std::numeric_limits<size_t>::max();
107 const auto& ahc=arg->hypercube();
109 auto& xv=m_hypercube.xvectors;
110 for (
auto i=xv.begin(); i!=xv.end(); ++i)
112 dimension=i-xv.begin();
113 if (dimension<arg->rank())
115 xv.erase(xv.begin()+dimension);
118 for (
size_t i=0; i<arg->size(); ++i)
120 auto splitIdx=ahc.splitIndex(arg->index()[i]);
121 SOI soi{i,splitIdx[dimension]};
122 splitIdx.erase(splitIdx.begin()+dimension);
123 auto idx=m_hypercube.linealIndex(splitIdx);
124 sumOverIndices[idx].emplace_back(soi);
130 m_hypercube.xvectors.clear();
133 m_hypercube.xvectors.clear();
137 double ReductionOp::operator[](
size_t i)
const 140 if (!arg)
return init;
141 if (dimension>arg->rank())
142 return ReduceAllOp::operator[](i);
147 auto argDims=arg->shape();
149 for (
size_t j=0; j<dimension; ++j)
151 auto quotRem=ldiv(i, stride);
152 auto start=quotRem.quot*stride*argDims[dimension] + quotRem.rem;
153 assert(stride*argDims[dimension]>0);
154 for (
size_t j=0; j<argDims[dimension]; ++j)
156 double x=arg->atHCIndex(j*stride+start);
162 auto soi=sumOverIndices.find(index()[i]);
163 assert(soi!=sumOverIndices.end());
164 if (soi!=sumOverIndices.end())
165 for (
auto j: soi->second)
167 double x=(*arg)[j.index];
168 if (!
isnan(x))
f(r,x,j.dimIndex);
174 double CachedTensorOp::operator[](
size_t i)
const 177 if (m_timestamp<timestamp()) {
179 m_timestamp=Timestamp::clock::now();
181 return cachedResult[i];
184 void DimensionedArgCachedOp::setArgument(
const TensorPtr& a,
const Args& args)
188 if (!arg) {m_hypercube.xvectors.clear();
return;}
189 dimension=std::numeric_limits<size_t>::max();
190 auto hc=arg->hypercube();
191 auto& xv=hc.xvectors;
192 for (
auto i=xv.begin(); i!=xv.end(); ++i)
194 dimension=i-xv.begin();
199 void Scan::computeTensor()
const 201 if (dimension<rank())
203 auto argDims=arg->hypercube().dims();
205 for (
size_t j=0; j<dimension; ++j)
207 if (argVal>=1 && argVal<argDims[dimension])
209 for (
size_t i=0; i<hypercube().numElements(); i+=stride*argDims[dimension])
210 for (
size_t j=0; j<stride; ++j)
211 for (
size_t j1=0; j1<argDims[dimension]*stride; j1+=stride)
213 size_t k=i+j+max(ssize_t(0), ssize_t(j1-ssize_t(argVal-1)*stride));
214 cachedResult[i+j+j1]=arg->atHCIndex(i+j+j1);
215 for (; k<i+j+j1; k+=stride)
217 f(cachedResult[i+j+j1], arg->atHCIndex(k), k);
221 for (
size_t i=0; i<hypercube().numElements(); i+=stride*argDims[dimension])
222 for (
size_t j=0; j<stride; ++j)
224 cachedResult[i+j]=arg->atHCIndex(i+j);
225 for (
size_t k=i+j+stride; k<i+j+stride*argDims[dimension]; k+=stride)
227 cachedResult[k] = cachedResult[k-stride];
228 f(cachedResult[k], arg->atHCIndex(k), k);
234 cachedResult[0]=arg->atHCIndex(0);
235 for (
size_t i=1; i<hypercube().numElements(); ++i)
237 cachedResult[i]=cachedResult[i-1];
238 f(cachedResult[i], arg->atHCIndex(i), i);
250 auto& xv=arg->hypercube().xvectors;
256 for (; i!=xv.end(); ++i)
259 stride=split*i->size();
264 hc.xvectors.push_back(*i);
272 for (i++; i!=xv.end(); ++i)
274 hc.xvectors.push_back(*i);
278 auto& ahc=arg->hypercube();
279 map<size_t, size_t> ai;
280 for (
size_t i=0; i<arg->index().size(); ++i)
282 auto splitIdx=ahc.splitIndex(arg->index()[i]);
283 if (splitIdx[splitAxis]==sliceIndex)
285 splitIdx.erase(splitIdx.begin()+splitAxis);
286 auto l=hc.linealIndex(splitIdx);
291 arg_index.resize(ai.size());
294 for (
auto& i: ai) arg_index[j++]=i.second;
298 double Slice::operator[](
size_t i)
const 303 auto res=div(ssize_t(i), ssize_t(split));
304 return arg->atHCIndex(res.quot*stride + sliceIndex*split + res.rem);
306 return (*arg)[arg_index[i]];
313 for (
auto& i: arg->hypercube().xvectors)
314 axes.push_back(i.name);
315 setOrientation(axes);
319 void Pivot::setOrientation(
const vector<string>& axes)
321 map<string,size_t> pMap;
322 map<string,XVector> xVectorMap;
323 auto& ahc=arg->hypercube();
324 for (
size_t i=0; i<ahc.xvectors.size(); ++i)
326 pMap[ahc.xvectors[i].name]=i;
327 xVectorMap[ahc.xvectors[i].name]=ahc.xvectors[i];
332 map<size_t, size_t> invPermutation;
338 throw runtime_error(
"axis "+i+
" not found in argument");
339 invPermutation[v->second]=permutation.size();
340 permutation.push_back(v->second);
341 hc.xvectors.push_back(xVectorMap[i]);
344 for (
size_t i=0; i<ahc.xvectors.size(); ++i)
345 if (!axisSet.count(ahc.xvectors[i].name))
347 invPermutation[i]=permutation.size();
348 permutation.push_back(i);
349 hc.xvectors.push_back(ahc.xvectors[i]);
352 assert(hc.rank()==arg->rank());
355 map<size_t, size_t> pi;
356 for (
size_t i=0; i<arg->index().size(); ++i)
358 auto idx=arg->hypercube().splitIndex(arg->index()[i]);
360 for (
size_t j=0; j<idx.size(); ++j)
362 assert(invPermutation.count(j));
363 assert(invPermutation[j]<pidx.size());
364 pidx[invPermutation[j]]=idx[j];
366 auto l=hypercube().linealIndex(pidx);
367 assert(pi.count(l)==0);
372 permutedIndex.clear();
373 for (
auto& i: pi) permutedIndex.push_back(i.second);
374 if (!permutedIndex.empty()) permutation.clear();
377 size_t Pivot::pivotIndex(
size_t i)
const 379 auto idx=hypercube().splitIndex(i);
381 for (
size_t i=0; i<idx.size(); ++i)
382 pidx[permutation[i]]=idx[i];
383 return arg->hypercube().linealIndex(pidx);
386 double Pivot::operator[](
size_t i)
const 390 return arg->atHCIndex(pivotIndex(i));
391 return (*arg)[permutedIndex[i]];
402 case ravel::Op::sum:
return make_shared<Sum>();
403 case ravel::Op::prod:
return make_shared<Product>();
404 case ravel::Op::av:
return make_shared<civita::Average>();
405 case ravel::Op::stddev:
return make_shared<civita::StdDeviation>();
406 case ravel::Op::min:
return make_shared<Min>();
407 case ravel::Op::max:
return make_shared<Max>();
408 default:
throw runtime_error(
"Reduction "+
to_string(
op)+
" not understood");
416 hypercube(arg->hypercube());
417 m_index=arg->index();
418 for (m_axis=0; m_axis<m_hypercube.xvectors.size(); ++m_axis)
419 if (m_hypercube.xvectors[m_axis].name==args.
dimension)
421 if (m_axis==m_hypercube.xvectors.size())
422 throw runtime_error(
"axis "+args.
dimension+
" not found");
423 for (
size_t i=0; i<m_hypercube.xvectors[m_axis].size(); ++i)
424 m_permutation.push_back(i);
427 void PermuteAxis::setPermutation(vector<size_t>&& p)
429 m_permutation=move(p);
430 auto& xv=m_hypercube.xvectors[m_axis];
432 auto& axv=arg->hypercube().xvectors[m_axis];
433 for (
auto i: m_permutation)
434 xv.push_back(axv[i]);
435 map<unsigned,unsigned> reverseIndex;
436 for (
size_t i=0; i<m_permutation.size(); ++i)
437 reverseIndex[m_permutation[i]]=i;
438 map<size_t,size_t> indices;
439 for (
size_t i=0; i<arg->index().size(); ++i)
441 auto splitted=arg->hypercube().splitIndex(arg->index()[i]);
442 auto ri=reverseIndex.find(splitted[m_axis]);
443 if (ri!=reverseIndex.end() && (splitted[m_axis]=ri->second)<axv.size())
444 indices[hypercube().linealIndex(splitted)]=i;
447 permutedIndex.clear();
448 for (
auto& i: indices) permutedIndex.push_back(i.second);
451 double PermuteAxis::operator[](
size_t i)
const 456 auto splitted=hypercube().splitIndex(i);
457 splitted[m_axis]=m_permutation[splitted[m_axis]];
458 return arg->atHCIndex(arg->hypercube().linealIndex(splitted));
460 return (*arg)[permutedIndex[i]];
464 void SortByValue::computeTensor()
const 466 assert(arg->rank()==1);
467 vector<size_t> idx; idx.reserve(arg->size());
469 for (
size_t i=0; i<arg->size(); ++i)
472 tmp.push_back((*arg)[i]);
476 case ravel::HandleSort::forward:
477 sort(idx.begin(), idx.end(), [&](
size_t i,
size_t j){
return tmp[i]<tmp[j];});
479 case ravel::HandleSort::reverse:
480 sort(idx.begin(), idx.end(), [&](
size_t i,
size_t j){
return tmp[i]>tmp[j];});
486 auto hc=arg->hypercube();
487 XVector xv(hc.xvectors[0]);
488 for (
size_t i=0; i<idx.size(); ++i)
489 xv[i]=hc.xvectors[0][idx[i]];
490 hc.xvectors[0].swap(xv);
491 cachedResult.hypercube(move(hc));
492 for (
size_t i=0; i<idx.size(); ++i)
493 cachedResult[i]=tmp[idx[i]];
499 set<string> outputHandles(state.outputHandles.begin(), state.outputHandles.end());
500 vector<TensorPtr> chain{arg};
502 for (
auto& i: state.handleStates)
504 if (i.order!=ravel::HandleSort::none || i.displayFilterCaliper)
507 auto permuteAxis=make_shared<PermuteAxis>();
508 permuteAxis->setArgument(chain.back(), {i.description,0});
509 auto& xv=chain.back()->hypercube().xvectors[permuteAxis->axis()];
511 for (
size_t i=0; i<xv.size(); ++i)
515 case ravel::HandleSort::none:
break;
516 case ravel::HandleSort::forward:
517 case ravel::HandleSort::numForward:
518 case ravel::HandleSort::timeForward:
519 sort(perm.begin(), perm.end(),
520 [&](
size_t i,
size_t j) {
return diff(xv[i],xv[j])<0;});
522 case ravel::HandleSort::reverse:
523 case ravel::HandleSort::numReverse:
524 case ravel::HandleSort::timeReverse:
525 sort(perm.begin(), perm.end(),
526 [&](
size_t i,
size_t j) {
return diff(xv[i],xv[j])>0;});
528 case ravel::HandleSort::custom:
530 map<string, size_t> offsets;
531 for (
size_t i=0; i<xv.size(); ++i)
532 offsets[
str(xv[i], xv.dimension.units)]=i;
534 for (
auto& j: i.customOrder)
535 if (offsets.count(j))
536 perm.push_back(offsets[j]);
543 if (i.displayFilterCaliper)
546 if (!i.minLabel.empty())
547 for (
auto j=perm.begin(); j!=perm.end(); ++j)
548 if (
str(xv[*j],xv.dimension.units) == i.minLabel)
550 perm.erase(perm.begin(), j);
553 if (!i.maxLabel.empty())
554 for (
auto j=perm.begin(); j!=perm.end(); ++j)
555 if (
str(xv[*j],xv.dimension.units) == i.maxLabel)
557 perm.erase(j+1, perm.end());
561 permuteAxis->setPermutation(move(perm));
562 chain.push_back(permuteAxis);
564 if (!outputHandles.count(i.description))
566 auto arg=chain.back();
570 chain.back()->setArgument(arg, {i.description,0});
574 chain.emplace_back(
new Slice);
575 auto& xv=arg->hypercube().xvectors;
576 auto axisIt=find_if(xv.begin(), xv.end(),
577 [&](
const XVector& j){
return j.name==i.description;});
578 if (axisIt==xv.end())
throw runtime_error(
"axis "+i.description+
" not found");
579 auto sliceIt=find_if(axisIt->begin(), axisIt->end(),
580 [&](
const boost::any& j){
return str(j,axisIt->dimension.units)==i.sliceLabel;});
583 if (sliceIt!=axisIt->end())
584 sliceIdx=sliceIt-axisIt->begin();
585 chain.back()->setArgument(arg, {i.description, double(sliceIdx)});
590 if (chain.back()->rank()>1)
592 auto finalPivot=make_shared<Pivot>();
593 finalPivot->setArgument(chain.back());
594 finalPivot->setOrientation(state.outputHandles);
595 chain.push_back(finalPivot);
vector< TensorPtr > createRavelChain(const ravel::RavelState &state, const TensorPtr &arg)
creates a chain of tensor operations that represents a Ravel in state state, operating on arg ...
struct TCLcmd::trap::init_t init
std::chrono::time_point< std::chrono::high_resolution_clock > Timestamp
arguments relevant for tensor expressions, not always meaningful. Exception thrown if not...
std::string str(T x)
utility function to create a string representation of a numeric type
std::shared_ptr< ITensor > TensorPtr
corresponds to OLAP slice operation
string to_string(CONST84 char *x)
TensorPtr createReductionOp(ravel::Op::ReductionOp op)
factory method for creating reduction operations
ravel::Op::ReductionOp ReductionOp