21 #include <classdesc.h> 34 template <> Factory<ITensor, minsky::OperationType::Type>::Factory() {}
46 const char*
what()
const throw() {
return "Derivative not defined";}
51 size_t size()
const override {
return 1;}
52 double operator[](
size_t)
const override {
return EvalOpBase::t;}
54 double dFlow(std::size_t, std::size_t)
const override {
return 0;}
55 double dStock(std::size_t, std::size_t)
const override {
return 0;}
70 if (state) state->throw_error(msg);
71 else throw runtime_error(msg);
82 {
if (!a.empty()) setArgument(a[0],{
"",0});}
83 double dFlow(
size_t ti,
size_t fi)
const override {
86 if (
const double df=deriv->dFlow(ti,fi))
87 return eo.
d1((*arg)[ti])*df;
90 double dStock(
size_t ti,
size_t si)
const override {
93 if (
const double ds=deriv->dStock(ti,si))
94 return eo.
d1((*arg)[ti])*ds;
107 if (!a1 || a1->rank()==0 || !a2 || a2->rank()==0 || a1->hypercube()==a2->hypercube())
112 auto pivotArg1=make_shared<Pivot>(), pivotArg2=make_shared<Pivot>();
113 pivotArg1->setArgument(a1,{});
114 pivotArg2->setArgument(a2,{});
117 for (
auto& xv: a2->hypercube().xvectors) a2Axes.insert(xv.name);
120 std::vector<string> a1Order, common;
121 Hypercube hcSpread1, hcSpread2;
122 for (
auto& i: a1->hypercube().xvectors)
123 if (a2Axes.contains(i.name))
125 common.push_back(i.name);
126 a2Axes.erase(i.name);
130 a1Order.push_back(i.name);
131 hcSpread2.xvectors.push_back(i);
133 for (
auto& xv: a2->hypercube().xvectors)
134 if (a2Axes.contains(xv.name))
135 hcSpread1.xvectors.push_back(xv);
136 const size_t numCommonAxes=common.size();
139 a1Order.insert(a1Order.end(), common.begin(), common.end());
140 pivotArg1->setOrientation(a1Order);
142 common.insert(common.end(), a2Axes.begin(), a2Axes.end());
143 pivotArg2->setOrientation(common);
146 auto spread1=make_shared<SpreadLast>();
147 spread1->setArgument(pivotArg1,{});
148 spread1->setSpreadDimensions(hcSpread1);
149 auto spread2=make_shared<SpreadFirst>();
150 spread2->setArgument(pivotArg2,{});
151 spread2->setSpreadDimensions(hcSpread2);
155 auto& xv1=spread1->hypercube().xvectors;
156 auto& xv2=spread2->hypercube().xvectors;
157 assert(xv1.size()==xv2.size());
158 for (
size_t i=0; i<xv1.size(); ++i)
160 assert(xv1[i].name==xv2[i].name);
161 assert(xv1[i].dimension.type==xv2[i].dimension.type);
166 if (spread1->hypercube()==spread2->hypercube())
168 setArguments(spread1, spread2);
170 map<size_t,set<size_t>> p2i;
171 auto& xv=pivotArg2->hypercube().xvectors;
172 size_t commonElements=1;
173 for (
size_t i=0; i<numCommonAxes; ++i) commonElements*=xv[i].size();
174 const size_t p1NumElements=pivotArg1->hypercube().numElements();
175 const size_t p1ExtraElements=p1NumElements/commonElements;
176 const size_t p2ExtraElements=pivotArg2->hypercube().numElements()/commonElements;
177 for (
auto i: pivotArg2->index())
180 auto r=lldiv(i,commonElements);
181 p2i[r.rem].insert(r.quot);
184 for (
auto i: pivotArg1->index())
185 for (
size_t j=0; j<p2ExtraElements; ++j)
188 auto s=p2i.find(i/p1ExtraElements);
190 if (
s->second.contains(j))
191 index.insert(i+p1NumElements*j);
197 Hypercube unionHC=spread1->hypercube();
198 civita::unionHypercube(unionHC,spread2->hypercube());
202 if (unionHC!=spread1->hypercube())
204 auto interpolate=make_shared<PivotedInterpolateHC>();
205 interpolate->hypercube(unionHC);
206 interpolate->setArgument(spread1,{});
209 if (unionHC!=spread2->hypercube())
211 auto interpolate=make_shared<PivotedInterpolateHC>();
212 interpolate->hypercube(unionHC);
213 interpolate->setArgument(spread2,{});
220 double dFlow(
size_t ti,
size_t fi)
const override {
225 if (
const double df=deriv1->dFlow(ti,fi))
226 r += eo.
d1((*arg1)[ti])*df;
227 if (
const double df=deriv2->dFlow(ti,fi))
228 r += eo.
d2((*arg2)[ti])*df;
231 double dStock(
size_t ti,
size_t si)
const override {
236 if (
const double ds=deriv1->dStock(ti,si))
237 r += eo.
d1((*arg1)[ti])*ds;
238 if (
const double ds=deriv2->dStock(ti,si))
239 r += eo.
d2((*arg2)[ti])*ds;
282 const std::vector<TensorPtr>& a2,
290 pa1 = make_shared<AccumArgs<op>>();
291 pa1->setArguments(a1,{
"",0});
298 pa2 = make_shared<AccumArgs<op>>();
299 pa2->setArguments(a2,{
"",0});
310 template <
int I,
int J>
315 template <
template<OperationType::Type>
class T,
int op,
int to>
316 typename classdesc::enable_if<Not<is_equal<op, to>>,
void>::T
323 template <
template<OperationType::Type>
class T,
int op,
int to>
324 typename classdesc::enable_if<is_equal<op, to>,
void>::T
328 TensorOpFactory::TensorOpFactory()
330 registerType<TimeOp>(OperationType::time);
331 registerOps<MinskyTensorOp, OperationType::euler, OperationType::add>(*this);
332 registerOps<MultiWireBinOp, OperationType::add, OperationType::log>(*this);
333 registerOps<TensorBinOp, OperationType::log, OperationType::copy>(*this);
334 registerOps<MinskyTensorOp, OperationType::copy, OperationType::sum>(*this);
335 registerOps<GeneralTensorOp, OperationType::sum, OperationType::numOps>(*this);
382 if (dimension>=rank())
384 errorMsg=
"axis name needs to be specified in difference operator";
390 auto dimSize=arg->hypercube().xvectors[dimension].size();
391 if (
size_t(abs(delta))>=dimSize)
393 errorMsg=
"Δ ("+
to_string(abs(delta))+
") larger than dimension size ("+
to_string(dimSize)+
")";
399 auto hc=arg->hypercube();
401 auto& xv=hc.xvectors[dimension];
402 if (
size_t(abs(delta))>=xv.size())
return;
404 xv.erase(xv.begin(), xv.begin()+delta);
406 xv.erase(xv.end()+delta, xv.end());
407 cachedResult.hypercube(std::move(hc));
410 auto dims=arg->hypercube().dims();
412 if (dimension<dims.size())
414 for (
size_t i=0; i<dimension; ++i)
417 innerStride*=dims[i];
419 outerStride=innerStride*dims[dimension];
422 outerStride=arg->hypercube().numElements();
423 auto idx=arg->index();
424 const set<size_t> idxSet(idx.begin(),idx.end());
426 const size_t hcSize=cachedResult.hypercube().numElements();
431 auto t=ssize_t(i)-delta;
432 if (t>=0 && t<ssize_t(arg->hypercube().numElements()) && idxSet.contains(t) && sameSlice(t,i))
434 auto linealIndex=hypercube().linealIndex(arg->hypercube().splitIndex(delta>0? t: i));
435 if (linealIndex<hcSize)
437 argIndices.push_back(i);
438 newIdx.insert(linealIndex);
439 assert(argIndices.size()==newIdx.size());
443 cachedResult.index(Index(newIdx));
450 assert(i%innerStride==j%innerStride);
451 return abs(ssize_t(i)-ssize_t(j))<outerStride;
456 if (!arg) throw_error(
"input unwired");
457 if (!errorMsg.empty()) throw_error(errorMsg);
458 if (!argIndices.empty())
460 assert(argIndices.size()==size());
462 for (
auto i: argIndices)
465 cachedResult[idx++]=arg->atHCIndex(i)-arg->atHCIndex(i-delta);
469 for (
size_t i=0; i<cachedResult.size(); checkCancel(), ++i)
471 auto ai=arg->hypercube().linealIndex(cachedResult.hypercube().splitIndex(i));
472 auto t=ai+delta*innerStride;
473 if (sameSlice(t, ai))
474 cachedResult[i]=arg->atHCIndex(t)-arg->atHCIndex(ai);
476 cachedResult[i]=nan(
"");
479 for (
size_t i=0; i<size(); checkCancel(), ++i)
481 auto ai=arg->hypercube().linealIndex(cachedResult.hypercube().splitIndex(i));
484 cachedResult[i]=arg->atHCIndex(ai)-arg->atHCIndex(t);
486 cachedResult[i]=nan(
"");
506 std::shared_ptr<ITensor> arg1,
arg2;
508 if (!arg1 || !arg2)
return;
509 if (arg1->rank()!=0 && arg2->rank()!=0 &&
510 arg1->hypercube().dims()[arg1->rank()-1]!=arg2->hypercube().dims()[0])
511 throw_error(
"inner dimensions of tensors do not match");
515 for (
size_t i=0; i<arg1->rank()-1; i++)
516 m*=arg1->hypercube().dims()[i];
519 for (
size_t i=1; i<arg2->rank(); i++)
520 n*=arg2->hypercube().dims()[i];
522 const size_t stride=arg2->rank()>0? arg2->hypercube().dims()[0]: 1;
524 for (
size_t i=0; i< m; i++)
525 for (
size_t j=0; j< n; j++)
528 for (
size_t k=0; k<stride; checkCancel(), k++)
530 auto v1=m>1? arg1->atHCIndex(k*m+i) : (*arg1)[k];
531 auto v2=n>1? arg2->atHCIndex(j*stride + k) : (*arg2)[k];
534 cachedResult[i+m*j]=tmpSum;
539 const Args&)
override {
541 if (!arg1 || arg1->rank()==0 || !arg2 || arg2->rank()==0)
return;
542 auto xv1=arg1->hypercube().xvectors, xv2=arg2->hypercube().xvectors;
544 hc.xvectors.insert(hc.xvectors.begin(), xv2.begin()+1, xv2.end());
545 hc.xvectors.insert(hc.xvectors.begin(), xv1.begin(), xv1.end()-1);
546 cachedResult.hypercube(std::move(hc));
553 std::shared_ptr<ITensor> arg1,
arg2;
555 if (!arg1 || !arg2)
return;
556 const size_t m=arg1->size(), n=arg2->size();
557 assert(cachedResult.size()==m*n);
559 for (
size_t i=0; i< m; i++)
562 for (
size_t j=0; j< n; checkCancel(), j++)
565 cachedResult[i+j*m]=v1*v2;
570 {
return max(arg1? arg1->timestamp():
Timestamp(), arg2? arg2->timestamp():
Timestamp());}
572 const Args&)
override {
574 if (!arg1 || !arg2)
return;
577 const size_t stride=arg1->hypercube().numElements();
579 vector<size_t> idx1(arg1->index().begin(), arg1->index().end()), idx2(arg2->index().begin(), arg2->index().end());
580 if (!idx1.empty() || !idx2.empty())
583 for (
size_t i=0; i<arg1->size(); ++i)
586 for (
size_t i=0; i<arg2->size(); ++i)
591 checkCancel(), newIdx.insert(i+stride*j);
593 cachedResult.index(Index(newIdx));
596 auto xv1=arg1->hypercube().xvectors, xv2=arg2->hypercube().xvectors;
598 hc.xvectors.insert(hc.xvectors.begin(), xv2.begin(), xv2.end());
599 hc.xvectors.insert(hc.xvectors.begin(), xv1.begin(), xv1.end());
600 cachedResult.hypercube(std::move(hc));
607 std::shared_ptr<ITensor>
arg;
610 for (; i<arg->size(); checkCancel(), ++i)
613 for (; j<cachedResult.size(); checkCancel(), ++j)
614 cachedResult[j]=nan(
"");
617 arg=a; cachedResult.index(a->index()); cachedResult.hypercube(a->hypercube());
626 std::shared_ptr<ITensor> arg1,
arg2;
627 size_t dimension=numeric_limits<size_t>::max();
633 throw_error(
"No data to interpolate");
634 auto maxIdx=arg1->rank()==1? arg1->size()-1: arg1->hypercube().xvectors[dimension].size()-1;
635 if (idx<=-1 || idx>maxIdx)
638 return arg1->atHCIndex(idx*stride+offset);
640 unsigned lesser=idx, greater=idx+1;
641 double lv=arg1->atHCIndex(lesser*stride+offset), gv=arg1->atHCIndex(greater*stride+offset);
642 for (; lesser>0 &&
isnan(lv); checkCancel(), --lesser, lv=arg1->atHCIndex(lesser*stride+offset));
643 for (; greater<maxIdx &&
isnan(gv); checkCancel(), ++greater, gv=arg1->atHCIndex(greater*stride+offset));
644 const double s=(idx-lesser)/(greater-lesser);
648 return (1-s)*lv + s*gv;
651 double interpolateAny(
const XVector& xv,
const civita::any& x,
size_t stride,
size_t offset)
const 653 if (xv.size()<2 || diff(x,xv.front())<0 || diff(x,xv.back())>0)
656 for (; diff(x,*(i+1))>0; checkCancel(), ++i);
659 double lv=arg1->atHCIndex((i-xv.begin())*stride+offset);
660 double gv=arg1->atHCIndex((greater-xv.begin())*stride+offset);
661 for (; i>xv.begin() &&
isnan(lv); checkCancel(), --i, lv=arg1->atHCIndex((i-xv.begin())*stride+offset));
662 for (; greater<xv.end()-1 &&
isnan(gv); checkCancel(), ++greater, gv=arg1->atHCIndex((greater-xv.begin())*stride+offset));
663 const double s=diff(x,*i)/diff(*greater,*i);
667 return (1-s)*lv + s*gv;
673 throw_error(
"Cannot apply gather to a scalar");
674 if (dimension>=arg1->rank())
675 throw_error(
"Need to specify which dimension to gather");
678 if (arg1 && d>=arg1->rank()) d=0;
679 if (!arg1 || arg1->hypercube().xvectors.size()<=d)
return;
680 auto& xv=arg1->hypercube().xvectors[d];
685 Hypercube hc=cachedResult.hypercube();
686 auto& xvToGather=arg1->hypercube().xvectors[dimension];
687 for (
size_t i=0; !xv.empty() && i<arg2->size() && i<hc.xvectors[0].size(); ++i)
690 const double fracPart=std::modf((*arg2)[i], &intPart);
692 if (intPart<xvToGather.size()-1)
693 hc.xvectors[0][i]=interpolate(xvToGather[intPart],xvToGather[intPart+1],
fracPart);
694 else if (intPart<xvToGather.size())
695 hc.xvectors[0][i]=xvToGather[intPart];
697 hc.xvectors[0][i]=any(xvToGather.dimension.type);
698 else if (intPart==-1)
699 hc.xvectors[0][i]=xvToGather[0];
701 hc.xvectors[0][i]=any(xvToGather.dimension.type);
703 cachedResult.hypercube(std::move(hc));
706 function<double(double,size_t)> interpolate;
709 auto dims=arg1->hypercube().dims();
710 for (
size_t i=0; i<d; ++i)
714 switch (xv.dimension.type)
716 case Dimension::string:
717 interpolate=[&](
double x,
size_t offset){
718 return interpolateString(x,stride,offset);};
720 case Dimension::time:
721 interpolate=[&](
double x,
size_t offset){
724 const int daysInYear=(date(year+1,Jan,1)-date(year,Jan,1)).days();
725 const double dayInYearF=daysInYear*(x-year);
726 const int dayInYear=dayInYearF;
727 const ptime xtime(date(year,Jan,1)+date_duration(dayInYear), seconds(
int(3600*24*(dayInYearF-dayInYear))));
728 return interpolateAny(xv, xtime, stride, offset);
731 case Dimension::value:
732 interpolate=[&](
double x,
size_t offset){
733 return interpolateAny(xv,x,stride,offset);};
737 for (
size_t j=0; j<offsets.size(); ++j)
738 for (
size_t i=0; i<arg2->size(); checkCancel(), ++i)
743 assert(i+arg2->size()*j<cachedResult.size());
744 cachedResult[i+arg2->size()*j]=interpolate(idx, offsets[j]);
747 cachedResult[i]=nan(
"");
752 const Args& args)
override {
755 if (!arg1 || !arg2)
return;
757 auto& xv=arg1->hypercube().xvectors;
758 dimension=find_if(xv.begin(), xv.end(), [&](
const XVector& i)
759 {
return i.name==args.
dimension;})-xv.begin();
762 switch (arg1->rank())
770 if (dimension>=arg1->rank())
return;
775 auto arg1Dims=arg1->hypercube().dims();
776 size_t lowerStride=1;
777 for (
size_t i=0; i<dimension; ++i)
778 lowerStride*=arg1Dims[i];
779 size_t upperStride=1;
780 for (
size_t i=dimension+1; i<arg1Dims.size(); ++i)
781 upperStride*=arg1Dims[i];
784 set<size_t> offsetSet;
785 if (arg1->index().empty())
787 for (
size_t i=0; i<upperStride; ++i)
788 for (
size_t j=0; j<lowerStride; checkCancel(), ++j)
789 offsetSet.insert(lowerStride*i*arg1Dims[dimension]+j);
792 for (
auto i: arg1->index())
795 auto splitted=arg1->hypercube().splitIndex(i);
796 splitted[dimension]=0;
797 offsetSet.insert(arg1->hypercube().linealIndex(splitted));
799 offsets.clear(); offsets.insert(offsets.end(), offsetSet.begin(), offsetSet.end());
802 const size_t arg2NumElements=arg2->hypercube().numElements();
804 if (arg2NumElements>1)
806 hc.xvectors.push_back(arg1->hypercube().xvectors[dimension]);
807 hc.xvectors[0].resize(arg2NumElements);
810 hc.xvectors.insert(hc.xvectors.end(), arg1->hypercube().xvectors.begin(), arg1->hypercube().xvectors.begin()+dimension);
811 hc.xvectors.insert(hc.xvectors.end(), arg1->hypercube().xvectors.begin()+dimension+1, arg1->hypercube().xvectors.end());
813 if (!arg1->index().empty() || !arg2->index().empty())
815 vector<size_t> arg1Idx(arg1->index().begin(), arg1->index().end()),
816 arg2Idx(arg2->index().begin(), arg2->index().end());
820 for (
size_t i=0; i<upperStride; ++i)
821 for (
size_t j=0; j<lowerStride; checkCancel(), ++j)
822 arg1Idx.push_back(i*lowerStride*arg1Dims[dimension]+j);
824 for (
size_t i=0; i<arg2->size(); checkCancel(), ++i) arg2Idx.push_back(i);
826 set<size_t> resultantIndex;
827 size_t lastOuter=numeric_limits<size_t>::max();
828 for (
auto i: arg1Idx)
830 auto splitIdx=arg1->hypercube().splitIndex(i);
831 size_t outerIdx=0, stride=1;
832 for (
size_t j=0; j<arg1->rank(); checkCancel(), ++j)
835 outerIdx+=stride*splitIdx[j];
838 if (outerIdx==lastOuter)
continue;
840 for (
auto j: arg2Idx)
841 checkCancel(), resultantIndex.insert(outerIdx*arg2NumElements+j);
843 cachedResult.index(resultantIndex);
848 set<string> axisNames;
849 for (
auto& xv: hc.xvectors)
850 if (!axisNames.insert(xv.name).second)
852 while (!axisNames.insert(
to_string(++axisName)).second);
855 cachedResult.hypercube(std::move(hc));
865 ([this](double& r,double x,size_t i){
866 if (i==0 || x>maxValue) {
878 ([this](double& r,double x,size_t i){
879 if (i==0 || x<minValue) {
890 size_t dimension=numeric_limits<size_t>::max();
896 for (
size_t i=0; i<arg->rank(); ++i)
897 if (arg->hypercube().xvectors[i].name==args.
dimension)
904 dimension=numeric_limits<size_t>::max();
907 {
return dimension<arg->rank()? arg->hypercube().xvectors[dimension].size(): arg->size();}
920 hypercube({arg->rank()});
925 {
return arg? arg->hypercube().xvectors[i].size(): nan(
"");}
939 if (!arg1 || !arg2)
return;
943 set<string> dimNames;
944 switch (arg1->rank())
947 errorMsg=
"covariance or ρ needs at least rank 1 arguments";
954 for (
auto& xv:arg1->hypercube().xvectors)
956 dimension1=&xv-arg1->hypercube().xvectors.data();
959 hc.xvectors.push_back(xv);
960 dimNames.insert(xv.name);
965 switch (arg2->rank())
968 errorMsg=
"covariance or ρ needs at least rank 1 arguments";
975 for (
auto& xv:arg2->hypercube().xvectors)
977 dimension2=&xv-arg2->hypercube().xvectors.data();
980 hc.xvectors.push_back(xv);
981 if (!dimNames.insert(xv.name).second)
982 hc.xvectors.back().name+=
"'";
987 if (dimension1<0 || dimension2<0)
989 errorMsg=
"dimension "+args.
dimension+
" not found";
992 if (arg1->hypercube().xvectors[dimension1].size() != arg2->hypercube().xvectors[dimension2].size())
994 errorMsg=
"arguments not conformant";
1004 if (!errorMsg.empty()) throw_error(errorMsg);
1005 auto splitted=hypercube().splitIndex(idx);
1006 auto splitIndexIterator=splitted.begin();
1008 auto computeIndexAndStride=[&](
size_t& lineal,
size_t& stride,
size_t dimension,
const vector<unsigned>& dims) {
1010 for (
size_t i=0, s=1; i<dims.size(); s*=dims[i], ++i)
1012 lineal+=*splitIndexIterator++ * s;
1017 size_t arg1Lineal, arg1Stride, arg2Lineal, arg2Stride;
1018 computeIndexAndStride(arg1Lineal, arg1Stride, dimension1, arg1->hypercube().dims());
1019 computeIndexAndStride(arg2Lineal, arg2Stride, dimension2, arg2->hypercube().dims());
1021 for (
size_t i=0; i<arg1->hypercube().xvectors[dimension1].size(); ++i)
1023 auto x=arg1->atHCIndex(arg1Lineal+i*arg1Stride);
1024 auto y=arg2->atHCIndex(arg2Lineal+i*arg2Stride);
1030 {
return max(arg1->timestamp(), arg2->timestamp());}
1037 if (!arg1 || !arg2)
return nan(
"");
1038 double sumXY=0, sumX=0, sumY=0;
1040 auto f=[&](
double x,
double y)
1048 return (sumXY-sumX*sumY/count)/(count-1);
1056 if (!arg1 || !arg2)
return nan(
"");
1057 double sumXY=0, sumX=0, sumY=0, sumXsq=0, sumYsq=0;
1059 auto f=[&](
double x,
double y)
1069 const double invCount=1.0/count;
1070 return (sumXY-sumX*sumY*invCount)/
1071 sqrt((sumXsq-sumX*sumX*invCount)*(sumYsq-sumY*sumY*invCount));
1087 for (
size_t i=0; i<
scale.size(); checkCancel(), ++i)
1091 scale[i]=(n*sumxy[i] - sx*sumy[i])/(n*sumxx[i]-sx*sx);
1092 offset[i]=sumy[i]/n-
scale[i]*sx/n;
1094 if (state &&
scale.size()==1 &&
1095 (state->tooltip().empty()||state->tooltip().starts_with(
"y=")))
1097 m_timestamp=timestamp();
1110 hypercube(y->hypercube());
1113 auto& xv=m_hypercube.xvectors;
1114 dimension=rank()>1? rank(): 0;
1115 for (
auto i=xv.begin(); i!=xv.end(); ++i)
1117 dimension=i-xv.begin();
1124 spreadX=make_shared<SpreadY>();
1125 spreadX->setArguments(y,x,{});
1129 if (rank()>1 && dimension>=rank())
return;
1131 auto tv=make_shared<TensorVal>();
1133 tv->index(y->index());
1134 auto& hc=y->hypercube();
1135 tv->hypercube(y->hypercube());
1136 auto& xv=hc.xvectors[dimension];
1137 for (
size_t i=0; i<tv->size(); checkCancel(), ++i)
1139 auto slice=hc.splitIndex(tv->index()[i])[dimension];
1140 switch (xv.dimension.type)
1142 case Dimension::string:
1145 case Dimension::value:
1146 (*tv)[i]=xv[slice].value;
1148 case Dimension::time:
1149 (*tv)[i]=(xv[slice].time-ptime(date(1970,Jan,1))).total_microseconds()*1E-6;
1155 auto fxy=[](
double x,
double y){
return isfinite(x) &&
isfinite(y)? x*y: 0;};
1156 sumyy.
setArgument(make_shared<BinOp>(fxy,y,y),args);
1157 sumxx.
setArgument(make_shared<BinOp>(fxy,spreadX,spreadX),args);
1158 sumxy.
setArgument(make_shared<BinOp>(fxy,y,spreadX),args);
1160 (make_shared<BinOp>([](
double x,
double y) {
return isfinite(x)*
isfinite(y);},y,spreadX),args);
1167 offset.index(sumx.
index());
1176 if (rank()>1 && dimension>=rank())
1177 throw_error(
"Need to specify axis");
1178 if (!x)
return nan(
"");
1179 assert(dimension<rank() ||
scale.size()==1);
1181 if (timestamp()>m_timestamp) computeScaleAndOffset();
1183 if (dimension<rank())
1185 auto splitted=hypercube().splitIndex(i);
1186 splitted.erase(splitted.begin()+dimension);
1187 auto hcIdx=
scale.hypercube().linealIndex(splitted);
1188 return scale.atHCIndex(hcIdx) * (*x)[i] + offset.atHCIndex(hcIdx);
1190 return scale[0]* (*x)[i] + offset[0];
1210 if (values.empty())
return nan(
"");
1211 sort(values.begin(),values.end());
1212 const size_t halfIndex=values.size()/2;
1213 return (values.size()&1)? values[halfIndex]: 0.5*(values[halfIndex-1]+values[halfIndex]);
1224 ([this](double& x,double y,
std::size_t) {
1226 x+=
pow(y-mean, exponent);
1248 if (a && a->rank()>0 && argVal>=1)
1251 if (dimension>a->rank())
1252 hc.dims({unsigned(argVal)});
1256 auto& xv=hc.xvectors[dimension];
1258 for (
size_t i=0; i<argVal; ++i)
1262 cachedResult.hypercube(std::move(hc));
1268 double min=numeric_limits<double>::max(), max=-numeric_limits<double>::max();
1269 for (
size_t i=0; i<arg->size(); checkCancel(), ++i)
1271 min=std::min((*arg)[i],min);
1272 max=std::max((*arg)[i],max);
1276 auto binSize=(max-min)/argVal;
1277 if (arg->rank()==0 || binSize==0)
1279 cachedResult.hypercube({});
1285 auto hc=arg->hypercube();
1287 if (dimension>=hc.rank())
1289 hc.xvectors.resize(1);
1293 auto& xv=hc.xvectors[dim];
1294 xv.name=
"histogram";
1295 xv.dimension=Dimension(Dimension::value,
"");
1297 for (
double x=min+0.5*binSize; x<max; x+=binSize)
1299 cachedResult.hypercube(std::move(hc));
1300 for (
size_t i=0; i<cachedResult.size(); checkCancel(), ++i) cachedResult[i]=0;
1302 auto iBinSize=1/binSize;
1303 if (cachedResult.rank()>1)
1304 for (
size_t i=0; i<arg->size(); checkCancel(), ++i)
1306 auto splitted=arg->hypercube().splitIndex(i);
1307 splitted[dim]=((*arg)[i]-min)*iBinSize;
1308 cachedResult[cachedResult.hypercube().linealIndex(splitted)]+=1;
1311 for (
size_t i=0; i<arg->size(); checkCancel(), ++i)
1313 auto index=((*arg)[i]-min)*iBinSize;
1314 cachedResult[index]+=1;
1325 unionHypercube(hc, i->hypercube(),
false);
1328 vector<string> unionDims; unionDims.reserve(hc.xvectors.size());
1329 for (
auto& i: hc.xvectors) unionDims.push_back(i.name);
1334 set <string> argDims;
1335 for (
auto& j: i->hypercube().xvectors) argDims.insert(j.name);
1336 auto spread=make_shared<SpreadLast>();
1338 for (
auto& j: hc.xvectors)
1339 if (!argDims.contains(j.name))
1340 spreadHC.xvectors.push_back(j);
1341 spread->setArgument(i,{});
1342 spread->setSpreadDimensions(spreadHC);
1344 auto pivot=make_shared<Pivot>();
1345 pivot->setArgument(spread,{});
1346 pivot->setOrientation(unionDims);
1348 if (pivot->hypercube()==hc)
1352 auto spreadOverHC=make_shared<SpreadOverHC>();
1353 spreadOverHC->hypercube(hc);
1354 spreadOverHC->setArgument(pivot,{});
1364 using civita::Meld::setArguments;
1366 const std::vector<TensorPtr>& a2,
1367 const Args& opArgs)
override 1369 if (a1.empty() && a2.empty())
return;
1370 vector<TensorPtr> args=a1;
1371 args.insert(args.end(), a2.begin(), a2.end());
1373 civita::Meld::setArguments(args,opArgs);
1380 using civita::Merge::setArguments;
1382 const std::vector<TensorPtr>& a2,
1383 const Args& opArgs)
override 1385 if (a1.empty() && a2.empty())
return;
1386 vector<TensorPtr> args=a1;
1387 args.insert(args.end(), a2.begin(), a2.end());
1389 civita::Merge::setArguments(args,opArgs);
1391 if (!hypercube().dimsAreDistinct())
1392 throw_error(
"Please use a distinct name for the synthetic dimension produced by this operation");
1396 for (
size_t i=0; state && i<state->portsSize(); ++i)
1397 if (
auto p=state->ports(i).lock())
1399 for (
auto w:p->wires())
1400 if (
auto from=w->from())
1402 auto v=from->item().variableCast();
1403 if (v && !v->name().empty())
1404 m_hypercube.xvectors.back()[stream]=
latexToPango(v->name());
1406 m_hypercube.xvectors.back()[stream]=
to_string(stream);
1419 const int slice=args.
val;
1420 vector<size_t> permutation;
1421 auto argSize=arg->hypercube().xvectors[axis()].size();
1423 for (
int i=slice; i<0; ++i)
1424 permutation.push_back(argSize+i);
1426 for (
int i=0; i<slice; ++i)
1427 permutation.push_back(i);
1428 setPermutation(std::move(permutation));
1440 hypercube(Hypercube());
1442 hypercube(args[1]->hypercube());
1445 set<size_t> indices;
1448 for (
auto& j: i->index())
1449 checkCancel(), indices.insert(j);
1454 else if (m_size!=i->size())
1456 throw_error(
"nonconformant tensor arguments in switch");
1460 if (!m_index.empty()) m_size=m_index.size();
1462 size_t size()
const override {
return m_size;}
1467 auto tt=i->timestamp();
1473 if (args.size()<2)
return nan(
"");
1478 if (args[0]->rank()==0)
1479 selector = (*args[0])[0];
1481 selector = (*args[0])[i];
1483 const ssize_t idx = selector+1.5;
1485 if (idx>0 && idx<
int(args.size()))
1487 if (args[idx]->rank()==0)
1488 return (*args[idx])[0];
1489 return args[idx]->atHCIndex(index()[i]);
1509 const_cast<Ravel&
>(ravel).populateHypercube(a->hypercube());
1517 v=(*chain.back())[i];
1518 if (m_timestamp<chain.back()->timestamp())
1520 const_cast<Ravel&
>(ravel).populateHypercube(arg->hypercube());
1522 m_timestamp=Timestamp::clock::now();
1527 size_t size()
const override {
return chain.empty()? 1: chain.back()->size();}
1529 {
return chain.empty()? m_index: chain.back()->index();}
1531 {
return chain.empty()?
Timestamp(): chain.back()->timestamp();}
1532 const Hypercube&
hypercube()
const override {
return chain.empty()? m_hypercube: chain.back()->hypercube();}
1544 std::shared_ptr<ITensor> TensorOpFactory::create
1547 if (
auto ravel=dynamic_cast<const Ravel*>(it.get()))
1549 auto r=make_shared<RavelTensor>(*ravel);
1553 if (
auto op=it->operationCast())
1557 if (
auto ss=dynamic_cast<SetState*>(r.get()))
1558 ss->setState(dynamic_pointer_cast<OperationBase>(it));
1559 switch (
op->portsSize())
1570 catch (
const InvalidType&)
1572 catch (
const TensorOpError&)
1576 catch (
const std::exception& ex)
1580 throw TensorOpError(ex.what());
1582 else if (
auto v=it->variableCast())
1583 return make_shared<ConstTensorVarVal>(v->vValue(), tfp.
ev);
1584 else if (
auto sw=dynamic_cast<const SwitchIcon*>(it.get()))
1586 auto r=make_shared<SwitchTensor>();
1590 else if (
auto l=dynamic_cast<const Lock*>(it.get()))
1594 if (
auto r=l->ravelInput())
1595 if (
auto p=r->ports(1).lock())
1600 return chain.back();
1604 if (
auto tensors=tfp.
tensorsFromPort(*l->ports(1).lock()); !tensors.empty())
1605 return tensors.front();
1610 vector<TensorPtr> TensorsFromPort::tensorsFromPort(
const Port& p)
const 1612 if (!p.
input())
return {};
1613 vector<TensorPtr> r;
1614 for (
auto w: p.
wires())
1616 Item& item=w->from()->item();
1617 if (
auto o=item.operationCast())
1619 if (o->type()==OperationType::differentiate)
1623 auto rhs=tensorsFromPort(*o->ports(1).lock());
1624 if (rhs.empty() || rhs[0]->size()==1)
1628 throw std::runtime_error(
"Tensor derivative not implemented");
1632 r.push_back(tensorOp);
1638 vector<TensorPtr> TensorsFromPort::tensorsFromPorts(
const Item& item)
const 1640 vector<TensorPtr> r;
1641 for (
size_t i=0; i<item.
portsSize(); ++i)
1642 if (
auto p=item.
ports(i).lock())
1645 auto tensorArgs=tensorsFromPort(*p);
1646 r.insert(r.end(), tensorArgs.begin(), tensorArgs.end());
1651 TensorEval::TensorEval(
const shared_ptr<VariableValue>& dest,
const shared_ptr<VariableValue>& src):
1659 rhs=std::move(copy);
1671 if (fvIsGlobalFlowVars)
1680 for (
size_t i=0; i<
rhs->size(); ++i)
1690 const double* sv,
const double* fv)
1695 result.
ev->update(const_cast<double*>(fv), n, sv);
1696 if (
auto deriv=dynamic_cast<DerivativeMixin*>(
rhs.get()))
1699 for (
size_t i=0; i<
rhs->size(); ++i)
double operator[](std::size_t) const override
return or compute data at a location
shared_ptr< EvalCommon > ev
reference to EvalOpVector owning this value, to extract flowVar and stockVarinfo
classdesc::enable_if< is_equal< op, to >, void >::T registerOps(TensorOpFactory &tensorOpFactory)
const Index & index() const override
the index vector - assumed to be ordered and unique
virtual bool input() const
true if input port
std::string latexToPango(const char *s)
void throw_error(const std::string &msg) const
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const Args &opArgs) override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
calculate the sum along an axis or whole tensor
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double operator[](size_t) const override
std::size_t portsSize() const
number of ports
virtual const Hypercube & hypercube() const
information describing the axes, types and labels of this tensor
std::vector< Wire * > const & wires() const
returns a vector of weak references to the wires attached to this port
void setState(const OperationPtr &state) override
ravel::RavelState getState() const
get the current state of the Ravel
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
void setArgument(const TensorPtr &a, const ITensor::Args &args={"", 0}) override
argument indices corresponding to this indices, when sparse
calculates the average along an axis or whole tensor
UnitsExpressionWalker pow(const UnitsExpressionWalker &x, const UnitsExpressionWalker &y)
const char * what() const
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void displayErrorItem(const Item &op) const
indicate operation item has error, if visible, otherwise contining group
static std::vector< double, CIVITA_ALLOCATOR< double > > stockVars
vector of variables that are integrated via Runge-Kutta. These variables label the columns of the God...
shared_ptr< EvalCommon > ev
vector< TensorPtr > createRavelChain(const ravel::RavelState &state, const TensorPtr &arg)
creates a chain of tensor operations that represents a Ravel in state state, operating on arg ...
compute the reduction along the indicated dimension, ignoring any missing entry (NaNs) ...
double dStock(size_t ti, size_t si) const override
std::vector< size_t > offsets
vector< TensorPtr > chain
shared_ptr class for polymorphic operation objects. Note, you may assume that this pointer is always ...
double d1(double x1=0, double x2=0) const override
virtual const Index & index() const
the index vector - assumed to be ordered and unique
double operator[](size_t i) const override
perform an operation elementwise over a tensor valued argument
std::shared_ptr< Item > ItemPtr
bool sameSlice(size_t i, size_t j) const
double operator[](std::size_t i) const override
return or compute data at a location
void performSum(F f, size_t idx) const
vector< size_t > argIndices
std::shared_ptr< ITensor > arg
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
const Hypercube & hypercube(const Hypercube &hc) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
double operator[](size_t i) const override
double operator[](size_t i) const override
void computeScaleAndOffset() const
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const Args &opArgs) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
double operator[](size_t i) const override
void setArgument(const TensorPtr &a, const Args &) override
double dFlow(std::size_t, std::size_t) const override
partial derivative of tensor component ti wrt flow variable fi
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
std::shared_ptr< ITensor > create(const ItemPtr &, const TensorsFromPort &tp={})
create a tensor representation of the expression rooted at op. If expression doesn't contain any refe...
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
As it says on the tin, this is a factory for creating a TensorOp which can compute the result of op a...
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &args) override
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
std::shared_ptr< ITensor > arg2
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
double dStock(std::size_t, std::size_t) const override
partial derivative of tensor component ti wrt stock variable si
void deriv(double *df, std::size_t, const double *ds, const double *sv, const double *fv) override
void setState(const OperationPtr &state) override
double dFlow(size_t ti, size_t fi) const override
std::vector< TensorPtr > tensorsFromPort(const Port &) const
returns vector of tensor ops for all wires attach to port. Port must be an input port ...
void meldArgsIntoCommonHypercube(vector< TensorPtr > &args)
A place to store common data shared between TensorVarVals within a give calculation.
Creation and access to the minskyTCL_obj object, which has code to record whenever Minsky's state cha...
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &args={"", 0}) override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &) override
void setArgument(const TensorPtr &a, const ITensor::Args &) override
std::chrono::time_point< std::chrono::high_resolution_clock > Timestamp
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double isfinite(double x)
double dFlow(size_t ti, size_t fi) const override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &) override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &args) override
double operator[](size_t i) const override
support for partial derivatives needed for implicit method
calculate the minimum along an axis or whole tensor
arguments relevant for tensor expressions, not always meaningful. Exception thrown if not...
const Index & index(Index &&x) override
ItemPtr itemPtrFromThis() const
return a shared_ptr to this
double fracPart(double x)
#define CLASSDESC_ACCESS(type)
perform a binary operation elementwise over two tensor arguments. Arguments need to be conformal: at ...
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
void setArgument(const TensorPtr &arg, const Args &args) override
const Minsky & cminsky()
const version to help in const correctness
calculate the maximum along an axis or whole tensor
virtual void setArgument(const TensorPtr &, const ITensor::Args &args={"", 0})
RavelTensor(const Ravel &ravel)
void setArguments(const std::vector< TensorPtr > &a, const Args &) override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
double operator[](std::size_t i) const override
return or compute data at a location
std::shared_ptr< ITensor > TensorPtr
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double operator[](std::size_t i) const override
return or compute data at a location
double interpolateString(double idx, size_t stride, size_t offset) const
static std::vector< double, CIVITA_ALLOCATOR< double > > flowVars
variables defined as a simple function of the stock variables, also known as lhs variables. These variables appear in the body of the Godley table
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
void setArguments(const TensorPtr &y, const TensorPtr &x, const ITensor::Args &args) override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
std::shared_ptr< ITensor > arg2
void setArguments(const std::vector< TensorPtr > &a, const Args &av={"", 0}) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void eval(double *fv, std::size_t, const double *sv) override
TensorOpFactory tensorOpFactory
std::size_t size() const override
double dStock(size_t ti, size_t si) const override
elementwise reduction over a vector of arguments
std::vector< TensorPtr > tensorsFromPorts(const Item &) const
returns vector of tensor ops for all wires attached to item
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
calculates the standard deviation along an axis or whole tensor
void setState(const OperationPtr &s) override
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
virtual std::weak_ptr< Port > ports(std::size_t i) const
callback to be run when item deleted from group
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const ITensor::Args &) override
double operator[](size_t i) const override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
string to_string(CONST84 char *x)
void setArgument(const TensorPtr &arg, const ITensor::Args &args) override
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
std::shared_ptr< ITensor > arg2
calculate the product along an axis or whole tensor
double evaluate(double in1=0, double in2=0) const override
double interpolateAny(const XVector &xv, const civita::any &x, size_t stride, size_t offset) const
TensorOpError(const string &msg)
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
const Hypercube & hypercube() const override
information describing the axes, types and labels of this tensor
void setArgument(const TensorPtr &a, const Args &) override
exception throw if flowVars vector unexpectedly resized during evalEquations
void setArgument(const TensorPtr &a, const ITensor::Args &) override
double d2(double x1=0, double x2=0) const override