Minsky
minskyTensorOps.cc
Go to the documentation of this file.
1 /*
2  @copyright Steve Keen 2020
3  @author Russell Standish
4  This file is part of Minsky.
5 
6  Minsky is free software: you can redistribute it and/or modify it
7  under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 3 of the License, or
9  (at your option) any later version.
10 
11  Minsky is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with Minsky. If not, see <http://www.gnu.org/licenses/>.
18 */
19 
20 #include "minsky.h"
21 #include <classdesc.h>
22 #include "minskyTensorOps.h"
23 #include "interpolateHypercube.h"
24 #include "ravelWrap.h"
25 #include "minsky_epilogue.h"
26 
27 using namespace civita;
28 using namespace boost::gregorian;
29 using namespace boost::posix_time;
30 
31 namespace classdesc
32 {
33  // postpone factory definition to TensorOpFactory()
34  template <> Factory<ITensor, minsky::OperationType::Type>::Factory() {}
35 }
36 
37 
38 namespace minsky
39 {
40  using namespace classdesc;
41 
43 
44  struct DerivativeNotDefined: public std::exception
45  {
46  const char* what() const throw() {return "Derivative not defined";}
47  };
48 
49  struct TimeOp: public ITensor, public DerivativeMixin
50  {
51  size_t size() const override {return 1;}
52  double operator[](size_t) const override {return EvalOpBase::t;}
53  Timestamp timestamp() const override {return {};}
54  double dFlow(std::size_t, std::size_t) const override {return 0;}
55  double dStock(std::size_t, std::size_t) const override {return 0;}
56  };
57 
58  // insert a setState virtual call for those that need
59  struct SetState
60  {
61  virtual ~SetState() {}
62  virtual void setState(const OperationPtr&)=0;
63  };
64 
65  struct OpState: public SetState
66  {
68  void setState(const OperationPtr& s) override {state=s;}
69  [[noreturn]] void throw_error(const std::string& msg) const {
70  if (state) state->throw_error(msg);
71  else throw runtime_error(msg);
72  }
73  };
74 
75  // Default template calls the regular legacy double function
76  template <OperationType::Type op> struct MinskyTensorOp: public civita::ElementWiseOp, public DerivativeMixin, public SetState
77  {
79  MinskyTensorOp(): ElementWiseOp([this](double x){return eo.evaluate(x);}) {}
80  void setState(const OperationPtr& state) override {eo.state=state;}
81  void setArguments(const std::vector<TensorPtr>& a,const Args&) override
82  {if (!a.empty()) setArgument(a[0],{"",0});}
83  double dFlow(size_t ti, size_t fi) const override {
84  auto deriv=dynamic_cast<DerivativeMixin*>(arg.get());
85  if (!deriv) throw DerivativeNotDefined();
86  if (const double df=deriv->dFlow(ti,fi))
87  return eo.d1((*arg)[ti])*df;
88  return 0;
89  }
90  double dStock(size_t ti, size_t si) const override {
91  auto deriv=dynamic_cast<DerivativeMixin*>(arg.get());
92  if (!deriv) throw DerivativeNotDefined();
93  if (const double ds=deriv->dStock(ti,si))
94  return eo.d1((*arg)[ti])*ds;
95  return 0;
96  }
97  };
98 
99  template <OperationType::Type op> struct TensorBinOp: civita::BinOp, public DerivativeMixin, public SetState
100  {
102  TensorBinOp(): BinOp([this](double x,double y){return eo.evaluate(x,y);}) {}
103  void setState(const OperationPtr& state) override {eo.state=state;}
104  void setArguments(const TensorPtr& a1, const TensorPtr& a2,
105  const ITensor::Args& args={"",0}) override
106  {
107  if (!a1 || a1->rank()==0 || !a2 || a2->rank()==0 || a1->hypercube()==a2->hypercube())
108  civita::BinOp::setArguments(a1,a2,args);
109  else
110  {
111  // pivot a1, a2 such that common axes are at end (resp beginning)
112  auto pivotArg1=make_shared<Pivot>(), pivotArg2=make_shared<Pivot>();
113  pivotArg1->setArgument(a1,{});
114  pivotArg2->setArgument(a2,{});
115 
116  set <string> a2Axes;
117  for (auto& xv: a2->hypercube().xvectors) a2Axes.insert(xv.name);
118 
119  // compute pivot orders and spread dimensions
120  std::vector<string> a1Order, common;
121  Hypercube hcSpread1, hcSpread2;
122  for (auto& i: a1->hypercube().xvectors)
123  if (a2Axes.contains(i.name))
124  {
125  common.push_back(i.name);
126  a2Axes.erase(i.name);
127  }
128  else
129  {
130  a1Order.push_back(i.name);
131  hcSpread2.xvectors.push_back(i);
132  }
133  for (auto& xv: a2->hypercube().xvectors)
134  if (a2Axes.contains(xv.name))
135  hcSpread1.xvectors.push_back(xv);
136  const size_t numCommonAxes=common.size();
137 
138  // append common dimensions to make up a1 final order
139  a1Order.insert(a1Order.end(), common.begin(), common.end());
140  pivotArg1->setOrientation(a1Order);
141  // add in remaining a2 axes to make up a2 order, reusing common.
142  common.insert(common.end(), a2Axes.begin(), a2Axes.end());
143  pivotArg2->setOrientation(common);
144 
145  // now spread pivoted arguments across remaining dimensions
146  auto spread1=make_shared<SpreadLast>();
147  spread1->setArgument(pivotArg1,{});
148  spread1->setSpreadDimensions(hcSpread1);
149  auto spread2=make_shared<SpreadFirst>();
150  spread2->setArgument(pivotArg2,{});
151  spread2->setSpreadDimensions(hcSpread2);
152 
153 #ifndef NDEBUG
154  {
155  auto& xv1=spread1->hypercube().xvectors;
156  auto& xv2=spread2->hypercube().xvectors;
157  assert(xv1.size()==xv2.size());
158  for (size_t i=0; i<xv1.size(); ++i)
159  {
160  assert(xv1[i].name==xv2[i].name);
161  assert(xv1[i].dimension.type==xv2[i].dimension.type);
162  }
163  }
164 #endif
165 
166  if (spread1->hypercube()==spread2->hypercube())
167  {
168  setArguments(spread1, spread2);
169  // calculate the sparsity pattern of the result
170  map<size_t,set<size_t>> p2i;
171  auto& xv=pivotArg2->hypercube().xvectors;
172  size_t commonElements=1;
173  for (size_t i=0; i<numCommonAxes; ++i) commonElements*=xv[i].size();
174  const size_t p1NumElements=pivotArg1->hypercube().numElements();
175  const size_t p1ExtraElements=p1NumElements/commonElements;
176  const size_t p2ExtraElements=pivotArg2->hypercube().numElements()/commonElements;
177  for (auto i: pivotArg2->index())
178  {
179  checkCancel();
180  auto r=lldiv(i,commonElements);
181  p2i[r.rem].insert(r.quot);
182  }
183  set<size_t> index;
184  for (auto i: pivotArg1->index())
185  for (size_t j=0; j<p2ExtraElements; ++j) // loop over all elements that extend pivot1
186  {
187  checkCancel();
188  auto s=p2i.find(i/p1ExtraElements);
189  if (s!=p2i.end())
190  if (s->second.contains(j))
191  index.insert(i+p1NumElements*j);
192  }
193  m_index=index;
194  }
195  else
196  { // hypercubes not equal, interpolate the second argument
197  Hypercube unionHC=spread1->hypercube();
198  civita::unionHypercube(unionHC,spread2->hypercube());
199  TensorPtr arg1=spread1, arg2=spread2;
200  spread1->setIndex();
201  spread2->setIndex();
202  if (unionHC!=spread1->hypercube())
203  {
204  auto interpolate=make_shared<PivotedInterpolateHC>();
205  interpolate->hypercube(unionHC);
206  interpolate->setArgument(spread1,{});
207  arg1=interpolate;
208  }
209  if (unionHC!=spread2->hypercube())
210  {
211  auto interpolate=make_shared<PivotedInterpolateHC>();
212  interpolate->hypercube(unionHC);
213  interpolate->setArgument(spread2,{});
214  arg2=interpolate;
215  }
216  civita::BinOp::setArguments(arg1, arg2, args);
217  }
218  }
219  }
220  double dFlow(size_t ti, size_t fi) const override {
221  auto deriv1=dynamic_cast<DerivativeMixin*>(arg1.get());
222  auto deriv2=dynamic_cast<DerivativeMixin*>(arg2.get());
223  if (!deriv1 || !deriv2) throw DerivativeNotDefined();
224  double r=0;
225  if (const double df=deriv1->dFlow(ti,fi))
226  r += eo.d1((*arg1)[ti])*df;
227  if (const double df=deriv2->dFlow(ti,fi))
228  r += eo.d2((*arg2)[ti])*df;
229  return r;
230  }
231  double dStock(size_t ti, size_t si) const override {
232  auto deriv1=dynamic_cast<DerivativeMixin*>(arg1.get());
233  auto deriv2=dynamic_cast<DerivativeMixin*>(arg2.get());
234  if (!deriv1 || !deriv2) throw DerivativeNotDefined();
235  double r=0;
236  if (const double ds=deriv1->dStock(ti,si))
237  r += eo.d1((*arg1)[ti])*ds;
238  if (const double ds=deriv2->dStock(ti,si))
239  r += eo.d2((*arg2)[ti])*ds;
240  return r;
241  }
242  };
243 
244  template <OperationType::Type op> struct AccumArgs;
245 
246  template <> struct AccumArgs<OperationType::add>: public civita::ReduceArguments
247  {
248  AccumArgs(): civita::ReduceArguments([](double& x,double y){x+=y;},0) {}
249  };
250  template <> struct AccumArgs<OperationType::subtract>: public AccumArgs<OperationType::add> {};
251 
252  template <> struct AccumArgs<OperationType::multiply>: public civita::ReduceArguments
253  {
254  AccumArgs(): civita::ReduceArguments([](double& x,double y){x*=y;},1) {}
255  };
256  template <> struct AccumArgs<OperationType::divide>: public AccumArgs<OperationType::multiply> {};
257 
258  template <> struct AccumArgs<OperationType::min>: public civita::ReduceArguments
259  {
260  AccumArgs(): civita::ReduceArguments([](double& x,double y){if (y<x) x=y;},std::numeric_limits<double>::max()) {}
261  };
262  template <> struct AccumArgs<OperationType::max>: public civita::ReduceArguments
263  {
264  AccumArgs(): civita::ReduceArguments([](double& x,double y){if (y>x) x=y;},-std::numeric_limits<double>::max()) {}
265  };
266 
267  template <> struct AccumArgs<OperationType::and_>: public civita::ReduceArguments
268  {
269  AccumArgs(): civita::ReduceArguments([](double& x,double y){x*=(y>0.5);},1) {}
270  };
271  template <> struct AccumArgs<OperationType::or_>: public civita::ReduceArguments
272  {
273  AccumArgs(): civita::ReduceArguments([](double& x,double y){if (y>0.5) x=1;},0) {}
274  };
275 
276 
277 
278  template <OperationType::Type op> struct MultiWireBinOp: public TensorBinOp<op>
279  {
281  void setArguments(const std::vector<TensorPtr>& a1,
282  const std::vector<TensorPtr>& a2,
283  const ITensor::Args&) override
284  {
285  TensorPtr pa1, pa2;
286  if (a1.size()==1)
287  pa1=a1[0];
288  else
289  {
290  pa1 = make_shared<AccumArgs<op>>();
291  pa1->setArguments(a1,{"",0});
292  }
293 
294  if (a2.size()==1)
295  pa2=a2[0];
296  else
297  {
298  pa2 = make_shared<AccumArgs<op>>();
299  pa2->setArguments(a2,{"",0});
300  }
301 
302  TensorBinOp<op>::setArguments(pa1, pa2,{"",0});
303  }
304  };
305 
306  template <OperationType::Type op> struct GeneralTensorOp;
307 
308  namespace
309  {
310  template <int I, int J>
311  struct is_equal {const static bool value=I==J;};
312  }
313 
314  //register factory functions for all binary ops
315  template <template<OperationType::Type> class T, int op, int to>
316  typename classdesc::enable_if<Not<is_equal<op, to>>, void>::T
318  {
319  tensorOpFactory.registerType<T<OperationType::Type(op)>>(OperationType::Type(op));
320  registerOps<T, op+1, to>(tensorOpFactory);
321  }
322 
323  template <template<OperationType::Type> class T, int op, int to>
324  typename classdesc::enable_if<is_equal<op, to>, void>::T
326  {} //terminates recursion
327 
328  TensorOpFactory::TensorOpFactory()
329  {
330  registerType<TimeOp>(OperationType::time);
331  registerOps<MinskyTensorOp, OperationType::euler, OperationType::add>(*this);
332  registerOps<MultiWireBinOp, OperationType::add, OperationType::log>(*this);
333  registerOps<TensorBinOp, OperationType::log, OperationType::copy>(*this);
334  registerOps<MinskyTensorOp, OperationType::copy, OperationType::sum>(*this);
335  registerOps<GeneralTensorOp, OperationType::sum, OperationType::numOps>(*this);
336  }
337 
338  template <> struct GeneralTensorOp<OperationType::sum>: public civita::Sum {};
339  template <> struct GeneralTensorOp<OperationType::product>: public civita::Product {};
340  template <> struct GeneralTensorOp<OperationType::infimum>: public civita::Min {};
341  template <> struct GeneralTensorOp<OperationType::supremum>: public civita::Max {};
342  template <>
344  {
345  GeneralTensorOp(): civita::ReductionOp([](double& x, double y,size_t){if (y>0.5) x=1;},0){}
346  };
347  template <>
349  {
350  GeneralTensorOp(): civita::ReductionOp([](double& x, double y,size_t){x*=(y>0.5);},1){}
351  };
352 
353  template <>
354  struct GeneralTensorOp<OperationType::runningSum>: public civita::Scan
355  {
356  GeneralTensorOp(): civita::Scan([](double& x,double y,size_t){x+=y;}) {}
357  };
358 
359  template <>
360  struct GeneralTensorOp<OperationType::runningProduct>: public civita::Scan
361  {
362  GeneralTensorOp(): civita::Scan([](double& x,double y,size_t){x*=y;}) {}
363  };
364 
365  template <>
367  {
368  ssize_t delta=0;
369  size_t innerStride=1, outerStride;
370  vector<size_t> argIndices;
371  string errorMsg;
372  void setArgument(const TensorPtr& a,const ITensor::Args& args) override {
374  errorMsg="";
375  switch (rank())
376  {
377  case 0: return;
378  case 1:
379  dimension=0;
380  break;
381  default:
382  if (dimension>=rank())
383  {
384  errorMsg="axis name needs to be specified in difference operator";
385  return;
386  }
387  break;
388  }
389 
390  auto dimSize=arg->hypercube().xvectors[dimension].size();
391  if (size_t(abs(delta))>=dimSize)
392  {
393  errorMsg="Δ ("+to_string(abs(delta))+") larger than dimension size ("+to_string(dimSize)+")";
394  return;
395  }
396 
397  delta=args.val;
398  // remove initial slice of hypercube
399  auto hc=arg->hypercube();
400 
401  auto& xv=hc.xvectors[dimension];
402  if (size_t(abs(delta))>=xv.size()) return;
403  if (delta>=0)
404  xv.erase(xv.begin(), xv.begin()+delta);
405  else
406  xv.erase(xv.end()+delta, xv.end());
407  cachedResult.hypercube(std::move(hc));
408 
409  // determine offset in hypercube space
410  auto dims=arg->hypercube().dims();
411  innerStride=1;
412  if (dimension<dims.size())
413  {
414  for (size_t i=0; i<dimension; ++i)
415  {
416  delta*=dims[i];
417  innerStride*=dims[i];
418  }
419  outerStride=innerStride*dims[dimension];
420  }
421  else
422  outerStride=arg->hypercube().numElements();
423  auto idx=arg->index();
424  const set<size_t> idxSet(idx.begin(),idx.end());
425  set<size_t> newIdx;
426  const size_t hcSize=cachedResult.hypercube().numElements();
427  for (auto& i: idx)
428  {
429  checkCancel();
430  // strip of any indices outside the output range
431  auto t=ssize_t(i)-delta;
432  if (t>=0 && t<ssize_t(arg->hypercube().numElements()) && idxSet.contains(t) && sameSlice(t,i))
433  {
434  auto linealIndex=hypercube().linealIndex(arg->hypercube().splitIndex(delta>0? t: i));
435  if (linealIndex<hcSize)
436  {
437  argIndices.push_back(i);
438  newIdx.insert(linealIndex);
439  assert(argIndices.size()==newIdx.size());
440  }
441  }
442  }
443  cachedResult.index(Index(newIdx));
444  }
445 
446  bool sameSlice(size_t i, size_t j) const
447  {
448  // original implementation left on place for reference
449  //return cachedResult.rank()<=1 || (i%innerStride==j%innerStride && i/outerStride==j/outerStride);
450  assert(i%innerStride==j%innerStride); // internally, delta should be a multiple of innerStride
451  return abs(ssize_t(i)-ssize_t(j))<outerStride; // simpler version of above
452  }
453 
454  void computeTensor() const override
455  {
456  if (!arg) throw_error("input unwired");
457  if (!errorMsg.empty()) throw_error(errorMsg);
458  if (!argIndices.empty())
459  {
460  assert(argIndices.size()==size());
461  size_t idx=0;
462  for (auto i: argIndices)
463  {
464  checkCancel();
465  cachedResult[idx++]=arg->atHCIndex(i)-arg->atHCIndex(i-delta);
466  }
467  }
468  else if (delta>=0)
469  for (size_t i=0; i<cachedResult.size(); checkCancel(), ++i)
470  {
471  auto ai=arg->hypercube().linealIndex(cachedResult.hypercube().splitIndex(i));
472  auto t=ai+delta*innerStride;
473  if (sameSlice(t, ai))
474  cachedResult[i]=arg->atHCIndex(t)-arg->atHCIndex(ai);
475  else
476  cachedResult[i]=nan("");
477  }
478  else // with -ve delta, origin of result is shifted
479  for (size_t i=0; i<size(); checkCancel(), ++i)
480  {
481  auto ai=arg->hypercube().linealIndex(cachedResult.hypercube().splitIndex(i));
482  auto t=ai-delta;
483  if (sameSlice(t,ai))
484  cachedResult[i]=arg->atHCIndex(ai)-arg->atHCIndex(t);
485  else
486  cachedResult[i]=nan("");
487  }
488  }
489 
490  };
491 
492  template <>
493  struct GeneralTensorOp<OperationType::differencePlus>: public GeneralTensorOp<OperationType::difference>
494  {
495  void setArgument(const TensorPtr& a,const ITensor::Args& args) override {
496  const ITensor::Args negArg={args.dimension,-args.val};
498  }
499  double operator[](std::size_t i) const override
501  };
502 
503  template <>
504  struct GeneralTensorOp<OperationType::innerProduct>: public civita::CachedTensorOp, public OpState
505  {
506  std::shared_ptr<ITensor> arg1, arg2;
507  void computeTensor() const override {
508  if (!arg1 || !arg2) return;
509  if (arg1->rank()!=0 && arg2->rank()!=0 &&
510  arg1->hypercube().dims()[arg1->rank()-1]!=arg2->hypercube().dims()[0])
511  throw_error("inner dimensions of tensors do not match");
512 
513  size_t m=1, n=1;
514  if (arg1->rank()>1)
515  for (size_t i=0; i<arg1->rank()-1; i++)
516  m*=arg1->hypercube().dims()[i];
517 
518  if (arg2->rank()>1)
519  for (size_t i=1; i<arg2->rank(); i++)
520  n*=arg2->hypercube().dims()[i];
521 
522  const size_t stride=arg2->rank()>0? arg2->hypercube().dims()[0]: 1;
523  double tmpSum;
524  for (size_t i=0; i< m; i++)
525  for (size_t j=0; j< n; j++)
526  {
527  tmpSum=0;
528  for (size_t k=0; k<stride; checkCancel(), k++)
529  {
530  auto v1=m>1? arg1->atHCIndex(k*m+i) : (*arg1)[k];
531  auto v2=n>1? arg2->atHCIndex(j*stride + k) : (*arg2)[k];
532  if (!isnan(v1) && !isnan(v2)) tmpSum+=v1*v2;
533  }
534  cachedResult[i+m*j]=tmpSum;
535  }
536  }
537  Timestamp timestamp() const override {return max(arg1? arg1->timestamp(): Timestamp(), arg2? arg2->timestamp(): Timestamp());}
538  void setArguments(const TensorPtr& a1, const TensorPtr& a2,
539  const Args&) override {
540  arg1=a1; arg2=a2;
541  if (!arg1 || arg1->rank()==0 || !arg2 || arg2->rank()==0) return;
542  auto xv1=arg1->hypercube().xvectors, xv2=arg2->hypercube().xvectors;
543  Hypercube hc;
544  hc.xvectors.insert(hc.xvectors.begin(), xv2.begin()+1, xv2.end());
545  hc.xvectors.insert(hc.xvectors.begin(), xv1.begin(), xv1.end()-1);
546  cachedResult.hypercube(std::move(hc));
547  }
548  };
549 
550  template <>
552  {
553  std::shared_ptr<ITensor> arg1, arg2;
554  void computeTensor() const override {
555  if (!arg1 || !arg2) return;
556  const size_t m=arg1->size(), n=arg2->size();
557  assert(cachedResult.size()==m*n);
558 
559  for (size_t i=0; i< m; i++)
560  {
561  auto v1=(*arg1)[i];
562  for (size_t j=0; j< n; checkCancel(), j++)
563  {
564  auto v2=(*arg2)[j];
565  cachedResult[i+j*m]=v1*v2;
566  }
567  }
568  }
569  Timestamp timestamp() const override
570  {return max(arg1? arg1->timestamp(): Timestamp(), arg2? arg2->timestamp(): Timestamp());}
571  void setArguments(const TensorPtr& a1, const TensorPtr& a2,
572  const Args&) override {
573  arg1=a1; arg2=a2;
574  if (!arg1 || !arg2) return;
575 
576  set<size_t> newIdx;
577  const size_t stride=arg1->hypercube().numElements();
578 
579  vector<size_t> idx1(arg1->index().begin(), arg1->index().end()), idx2(arg2->index().begin(), arg2->index().end());
580  if (!idx1.empty() || !idx2.empty())
581  {
582  if (idx1.empty()) // dense arg1, generate a corresponding sparse index vector
583  for (size_t i=0; i<arg1->size(); ++i)
584  idx1.push_back(i);
585  if (idx2.empty()) // dense arg2, generate a corresponding sparse index vector
586  for (size_t i=0; i<arg2->size(); ++i)
587  idx2.push_back(i);
588 
589  for (auto& i: idx1)
590  for (auto& j: idx2)
591  checkCancel(), newIdx.insert(i+stride*j);
592 
593  cachedResult.index(Index(newIdx));
594  }
595 
596  auto xv1=arg1->hypercube().xvectors, xv2=arg2->hypercube().xvectors;
597  Hypercube hc;
598  hc.xvectors.insert(hc.xvectors.begin(), xv2.begin(), xv2.end());
599  hc.xvectors.insert(hc.xvectors.begin(), xv1.begin(), xv1.end());
600  cachedResult.hypercube(std::move(hc));
601  }
602  };
603 
604  template <>
606  {
607  std::shared_ptr<ITensor> arg;
608  void computeTensor() const override {
609  size_t i=0, j=0;
610  for (; i<arg->size(); checkCancel(), ++i)
611  if ((*arg)[i]>0.5)
612  cachedResult[j++]=i;
613  for (; j<cachedResult.size(); checkCancel(), ++j)
614  cachedResult[j]=nan("");
615  }
616  void setArgument(const TensorPtr& a, const Args&) override {
617  arg=a; cachedResult.index(a->index()); cachedResult.hypercube(a->hypercube());
618  }
619 
620  Timestamp timestamp() const override {return arg? arg->timestamp(): Timestamp();}
621  };
622 
623  template <>
625  {
626  std::shared_ptr<ITensor> arg1, arg2;
627  size_t dimension=numeric_limits<size_t>::max();
628  std::vector<size_t> offsets; // offsets for each 1D slice of the tensor
629 
630  double interpolateString(double idx, size_t stride, size_t offset) const
631  {
632  if (arg1->size()==0)
633  throw_error("No data to interpolate");
634  auto maxIdx=arg1->rank()==1? arg1->size()-1: arg1->hypercube().xvectors[dimension].size()-1;
635  if (idx<=-1 || idx>maxIdx)
636  return nan("");
637  if (idx==maxIdx)
638  return arg1->atHCIndex(idx*stride+offset);
639  // expand range to skip over any nan in arg1
640  unsigned lesser=idx, greater=idx+1;
641  double lv=arg1->atHCIndex(lesser*stride+offset), gv=arg1->atHCIndex(greater*stride+offset);
642  for (; lesser>0 && isnan(lv); checkCancel(), --lesser, lv=arg1->atHCIndex(lesser*stride+offset));
643  for (; greater<maxIdx && isnan(gv); checkCancel(), ++greater, gv=arg1->atHCIndex(greater*stride+offset));
644  const double s=(idx-lesser)/(greater-lesser);
645  // special cases to avoid unncessarily including nans/infs in the computation
646  if (s==0) return lv;
647  if (s==1) return gv;
648  return (1-s)*lv + s*gv;
649  }
650 
651  double interpolateAny(const XVector& xv, const civita::any& x, size_t stride, size_t offset) const
652  {
653  if (xv.size()<2 || diff(x,xv.front())<0 || diff(x,xv.back())>0)
654  return nan("");
655  auto i=xv.begin();
656  for (; diff(x,*(i+1))>0; checkCancel(), ++i); // loop will terminate b/c diff(x,xv.back())<=0
657  // expand i & i+1 to finite values on the hypercube, if possible
658  auto greater=i+1;
659  double lv=arg1->atHCIndex((i-xv.begin())*stride+offset);
660  double gv=arg1->atHCIndex((greater-xv.begin())*stride+offset);
661  for (; i>xv.begin() && isnan(lv); checkCancel(), --i, lv=arg1->atHCIndex((i-xv.begin())*stride+offset));
662  for (; greater<xv.end()-1 && isnan(gv); checkCancel(), ++greater, gv=arg1->atHCIndex((greater-xv.begin())*stride+offset));
663  const double s=diff(x,*i)/diff(*greater,*i);
664  // special cases to avoid unncessarily including nans/infs in the computation
665  if (s==0) return lv;
666  if (s==1) return gv;
667  return (1-s)*lv + s*gv;
668  }
669 
670  void computeTensor() const override
671  {
672  if (arg1->rank()==0)
673  throw_error("Cannot apply gather to a scalar");
674  if (dimension>=arg1->rank())
675  throw_error("Need to specify which dimension to gather");
676 
677  size_t d=dimension;
678  if (arg1 && d>=arg1->rank()) d=0;
679  if (!arg1 || arg1->hypercube().xvectors.size()<=d) return;
680  auto& xv=arg1->hypercube().xvectors[d];
681 
682  if (arg2->rank()>0)
683  {
684  // recalculate hypercube
685  Hypercube hc=cachedResult.hypercube();
686  auto& xvToGather=arg1->hypercube().xvectors[dimension];
687  for (size_t i=0; !xv.empty() && i<arg2->size() && i<hc.xvectors[0].size(); ++i)
688  {
689  double intPart;
690  const double fracPart=std::modf((*arg2)[i], &intPart);
691  if (intPart>=0)
692  if (intPart<xvToGather.size()-1)
693  hc.xvectors[0][i]=interpolate(xvToGather[intPart],xvToGather[intPart+1],fracPart);
694  else if (intPart<xvToGather.size())
695  hc.xvectors[0][i]=xvToGather[intPart];
696  else
697  hc.xvectors[0][i]=any(xvToGather.dimension.type);
698  else if (intPart==-1)
699  hc.xvectors[0][i]=xvToGather[0];
700  else
701  hc.xvectors[0][i]=any(xvToGather.dimension.type);
702  }
703  cachedResult.hypercube(std::move(hc));
704  }
705 
706  function<double(double,size_t)> interpolate;
707 
708  size_t stride=1;
709  auto dims=arg1->hypercube().dims();
710  for (size_t i=0; i<d; ++i)
711  stride*=dims[i];
712 
713 
714  switch (xv.dimension.type)
715  {
716  case Dimension::string:
717  interpolate=[&](double x, size_t offset){
718  return interpolateString(x,stride,offset);};
719  break;
720  case Dimension::time:
721  interpolate=[&](double x, size_t offset){
722  // interpret as "year" in a common era date (Gregorian calendar)
723  const int year=x;
724  const int daysInYear=(date(year+1,Jan,1)-date(year,Jan,1)).days();
725  const double dayInYearF=daysInYear*(x-year);
726  const int dayInYear=dayInYearF;
727  const ptime xtime(date(year,Jan,1)+date_duration(dayInYear), seconds(int(3600*24*(dayInYearF-dayInYear))));
728  return interpolateAny(xv, xtime, stride, offset);
729  };
730  break;
731  case Dimension::value:
732  interpolate=[&](double x, size_t offset){
733  return interpolateAny(xv,x,stride,offset);};
734  break;
735  }
736 
737  for (size_t j=0; j<offsets.size(); ++j)
738  for (size_t i=0; i<arg2->size(); checkCancel(), ++i)
739  {
740  auto idx=(*arg2)[i];
741  if (isfinite(idx))
742  {
743  assert(i+arg2->size()*j<cachedResult.size());
744  cachedResult[i+arg2->size()*j]=interpolate(idx, offsets[j]);
745  }
746  else
747  cachedResult[i]=nan("");
748  }
749  }
750  Timestamp timestamp() const override {return max(arg1? arg1->timestamp(): Timestamp(), arg2? arg2->timestamp(): Timestamp());}
751  void setArguments(const TensorPtr& a1, const TensorPtr& a2,
752  const Args& args) override {
753 
754  arg1=a1; arg2=a2;
755  if (!arg1 || !arg2) return;
756  {
757  auto& xv=arg1->hypercube().xvectors;
758  dimension=find_if(xv.begin(), xv.end(), [&](const XVector& i)
759  {return i.name==args.dimension;})-xv.begin();
760  }
761 
762  switch (arg1->rank())
763  {
764  case 0:
765  return;
766  case 1:
767  dimension=0;
768  break;
769  default:
770  if (dimension>=arg1->rank()) return;
771  break;
772  }
773 
774  // find reduced dimensions of arg1
775  auto arg1Dims=arg1->hypercube().dims();
776  size_t lowerStride=1;
777  for (size_t i=0; i<dimension; ++i)
778  lowerStride*=arg1Dims[i];
779  size_t upperStride=1;
780  for (size_t i=dimension+1; i<arg1Dims.size(); ++i)
781  upperStride*=arg1Dims[i];
782 
783  // calculate offsets
784  set<size_t> offsetSet;
785  if (arg1->index().empty()) //dense case
786  {
787  for (size_t i=0; i<upperStride; ++i)
788  for (size_t j=0; j<lowerStride; checkCancel(), ++j)
789  offsetSet.insert(lowerStride*i*arg1Dims[dimension]+j);
790  }
791  else
792  for (auto i: arg1->index())
793  {
794  checkCancel();
795  auto splitted=arg1->hypercube().splitIndex(i);
796  splitted[dimension]=0;
797  offsetSet.insert(arg1->hypercube().linealIndex(splitted));
798  }
799  offsets.clear(); offsets.insert(offsets.end(), offsetSet.begin(), offsetSet.end());
800 
801  // resulting hypercube is a tensor product of arg2 and the reduced arg1.
802  const size_t arg2NumElements=arg2->hypercube().numElements();
803  Hypercube hc;
804  if (arg2NumElements>1)
805  {
806  hc.xvectors.push_back(arg1->hypercube().xvectors[dimension]);
807  hc.xvectors[0].resize(arg2NumElements);
808  }
809 
810  hc.xvectors.insert(hc.xvectors.end(), arg1->hypercube().xvectors.begin(), arg1->hypercube().xvectors.begin()+dimension);
811  hc.xvectors.insert(hc.xvectors.end(), arg1->hypercube().xvectors.begin()+dimension+1, arg1->hypercube().xvectors.end());
812 
813  if (!arg1->index().empty() || !arg2->index().empty())
814  {
815  vector<size_t> arg1Idx(arg1->index().begin(), arg1->index().end()),
816  arg2Idx(arg2->index().begin(), arg2->index().end());
817 
818  if (arg1Idx.empty())
819  // no need to duplicate elements along the reduced dimension
820  for (size_t i=0; i<upperStride; ++i)
821  for (size_t j=0; j<lowerStride; checkCancel(), ++j)
822  arg1Idx.push_back(i*lowerStride*arg1Dims[dimension]+j);
823  if (arg2Idx.empty())
824  for (size_t i=0; i<arg2->size(); checkCancel(), ++i) arg2Idx.push_back(i);
825 
826  set<size_t> resultantIndex;
827  size_t lastOuter=numeric_limits<size_t>::max();
828  for (auto i: arg1Idx)
829  {
830  auto splitIdx=arg1->hypercube().splitIndex(i);
831  size_t outerIdx=0, stride=1;
832  for (size_t j=0; j<arg1->rank(); checkCancel(), ++j)
833  if (j!=dimension)
834  {
835  outerIdx+=stride*splitIdx[j];
836  stride*=arg1Dims[j];
837  }
838  if (outerIdx==lastOuter) continue;
839  lastOuter=outerIdx;
840  for (auto j: arg2Idx)
841  checkCancel(), resultantIndex.insert(outerIdx*arg2NumElements+j);
842  }
843  cachedResult.index(resultantIndex);
844  }
845 
846  // relabel any axis that has a duplicate name
847  int axisName=0;
848  set<string> axisNames;
849  for (auto& xv: hc.xvectors)
850  if (!axisNames.insert(xv.name).second)
851  {
852  while (!axisNames.insert(to_string(++axisName)).second); // find a name that hasn't been used
853  xv.name=to_string(axisName);
854  }
855  cachedResult.hypercube(std::move(hc));
856  }
857 
858  };
859 
860  template <>
862  {
863  double maxValue; // scratch register for holding current max
865  ([this](double& r,double x,size_t i){
866  if (i==0 || x>maxValue) {
867  maxValue=x;
868  r=i;
869  }
870  },0) {}
871  };
872 
873  template <>
875  {
876  double minValue; // scratch register for holding current min
878  ([this](double& r,double x,size_t i){
879  if (i==0 || x<minValue) {
880  minValue=x;
881  r=i;
882  }
883  },0) {}
884  };
885 
886  template <>
888  {
890  size_t dimension=numeric_limits<size_t>::max();
891  void setArgument(const TensorPtr& a,const ITensor::Args& args) override
892  {
893  arg=a;
894  if (a)
895  {
896  for (size_t i=0; i<arg->rank(); ++i)
897  if (arg->hypercube().xvectors[i].name==args.dimension)
898  {
899  dimension=i;
900  break;
901  }
902  }
903  else
904  dimension=numeric_limits<size_t>::max();
905  }
906  double operator[](std::size_t) const override
907  {return dimension<arg->rank()? arg->hypercube().xvectors[dimension].size(): arg->size();}
908  civita::ITensor::Timestamp timestamp() const override {return arg->timestamp();}
909  };
910 
911  template <>
913  {
915  int dimension=-1;
916  void setArgument(const TensorPtr& a,const ITensor::Args& args) override
917  {
918  arg=a;
919  if (a)
920  hypercube({arg->rank()});
921  else
922  hypercube({});
923  }
924  double operator[](std::size_t i) const override
925  {return arg? arg->hypercube().xvectors[i].size(): nan("");}
926  civita::ITensor::Timestamp timestamp() const override {return arg->timestamp();}
927  };
928 
929  struct Correlation: public civita::ITensor, public OpState
930  {
931  int dimension1, dimension2;
933  string errorMsg;
934  void setArguments(const TensorPtr& a1, const TensorPtr& a2,
935  const ITensor::Args& args) override
936  {
937  arg1=a1? a1: a2;
938  arg2=a2? a2: a1;
939  if (!arg1 || !arg2) return;
940  errorMsg="";
941 
942  Hypercube hc;
943  set<string> dimNames; // for ensuring dimension names are unique
944  switch (arg1->rank())
945  {
946  case 0:
947  errorMsg="covariance or ρ needs at least rank 1 arguments";
948  return;
949  case 1:
950  dimension1=0;
951  break;
952  default:
953  dimension1=-1;
954  for (auto& xv:arg1->hypercube().xvectors)
955  if (xv.name==args.dimension)
956  dimension1=&xv-arg1->hypercube().xvectors.data();
957  else
958  {
959  hc.xvectors.push_back(xv);
960  dimNames.insert(xv.name);
961  }
962  break;
963  }
964 
965  switch (arg2->rank())
966  {
967  case 0:
968  errorMsg="covariance or ρ needs at least rank 1 arguments";
969  return;
970  case 1:
971  dimension2=0;
972  break;
973  default:
974  dimension2=-1;
975  for (auto& xv:arg2->hypercube().xvectors)
976  if (xv.name==args.dimension)
977  dimension2=&xv-arg2->hypercube().xvectors.data();
978  else
979  {
980  hc.xvectors.push_back(xv);
981  if (!dimNames.insert(xv.name).second)
982  hc.xvectors.back().name+="'"; // ensure dimension names are unique
983  }
984  break;
985  }
986 
987  if (dimension1<0 || dimension2<0)
988  {
989  errorMsg="dimension "+args.dimension+" not found";
990  return;
991  }
992  if (arg1->hypercube().xvectors[dimension1].size() != arg2->hypercube().xvectors[dimension2].size())
993  {
994  errorMsg="arguments not conformant";
995  return;
996  }
997 
998  hypercube(hc);
999 
1000  }
1001 
1002  template <class F> void performSum(F f, size_t idx) const
1003  {
1004  if (!errorMsg.empty()) throw_error(errorMsg);
1005  auto splitted=hypercube().splitIndex(idx);
1006  auto splitIndexIterator=splitted.begin();
1007 
1008  auto computeIndexAndStride=[&](size_t& lineal, size_t& stride, size_t dimension, const vector<unsigned>& dims) {
1009  lineal=0; stride=1;
1010  for (size_t i=0, s=1; i<dims.size(); s*=dims[i], ++i)
1011  if (i!=dimension)
1012  lineal+=*splitIndexIterator++ * s;
1013  else
1014  stride=s;
1015  };
1016 
1017  size_t arg1Lineal, arg1Stride, arg2Lineal, arg2Stride;
1018  computeIndexAndStride(arg1Lineal, arg1Stride, dimension1, arg1->hypercube().dims());
1019  computeIndexAndStride(arg2Lineal, arg2Stride, dimension2, arg2->hypercube().dims());
1020 
1021  for (size_t i=0; i<arg1->hypercube().xvectors[dimension1].size(); ++i)
1022  {
1023  auto x=arg1->atHCIndex(arg1Lineal+i*arg1Stride);
1024  auto y=arg2->atHCIndex(arg2Lineal+i*arg2Stride);
1025  if (isfinite(x) && isfinite(y)) f(x,y);
1026  }
1027 
1028  }
1029  Timestamp timestamp() const override
1030  {return max(arg1->timestamp(), arg2->timestamp());}
1031  };
1032 
1033  template <> struct GeneralTensorOp<OperationType::covariance>: public Correlation
1034  {
1035  double operator[](size_t i) const override
1036  {
1037  if (!arg1 || !arg2) return nan("");
1038  double sumXY=0, sumX=0, sumY=0;
1039  size_t count=0;
1040  auto f=[&](double x, double y)
1041  {
1042  sumXY+=x*y;
1043  sumX+=x;
1044  sumY+=y;
1045  count++;
1046  };
1047  performSum(f,i);
1048  return (sumXY-sumX*sumY/count)/(count-1);
1049  }
1050  };
1051 
1052  template <> struct GeneralTensorOp<OperationType::correlation>: public Correlation
1053  {
1054  double operator[](size_t i) const override
1055  {
1056  if (!arg1 || !arg2) return nan("");
1057  double sumXY=0, sumX=0, sumY=0, sumXsq=0, sumYsq=0;
1058  size_t count=0;
1059  auto f=[&](double x, double y)
1060  {
1061  sumXY+=x*y;
1062  sumX+=x;
1063  sumY+=y;
1064  sumXsq+=x*x;
1065  sumYsq+=y*y;
1066  count++;
1067  };
1068  performSum(f,i);
1069  const double invCount=1.0/count;
1070  return (sumXY-sumX*sumY*invCount)/
1071  sqrt((sumXsq-sumX*sumX*invCount)*(sumYsq-sumY*sumY*invCount));
1072  }
1073  };
1074 
1075  // OperationType template parameter is arbitrary, its going to be overridden anyway
1076  template <> struct GeneralTensorOp<OperationType::linearRegression>: public ITensor, public OpState
1077  {
1079  std::size_t dimension;
1080  Sum sumx, sumy, sumxx, sumyy, sumxy, count;
1081 
1082  // allow these members to be updated by computeScaleAndOffset
1083  mutable TensorVal scale, offset;
1084  mutable Timestamp m_timestamp;
1085 
1086  void computeScaleAndOffset() const {
1087  for (size_t i=0; i<scale.size(); checkCancel(), ++i)
1088  {
1089  auto n=count[i];
1090  auto sx=sumx[i];
1091  scale[i]=(n*sumxy[i] - sx*sumy[i])/(n*sumxx[i]-sx*sx);
1092  offset[i]=sumy[i]/n-scale[i]*sx/n;
1093  }
1094  if (state && scale.size()==1 &&
1095  (state->tooltip().empty()||state->tooltip().starts_with("y=")))
1096  state->tooltip("y="+to_string(scale[0])+"x + "+to_string(offset[0]));
1097  m_timestamp=timestamp();
1098  }
1099 
1100  // return y spread over x's hypercube. Note OperationType template parameter ignored
1101  struct SpreadY: public TensorBinOp<OperationType::add>
1102  {
1103  SpreadY() {f=[](double x,double y){return y;};}
1104  };
1105 
1106  void setArguments(const TensorPtr& y, const TensorPtr& x,
1107  const ITensor::Args& args) override
1108  {
1109  m_index=y->index();
1110  hypercube(y->hypercube());
1111 
1112  {
1113  auto& xv=m_hypercube.xvectors;
1114  dimension=rank()>1? rank(): 0;
1115  for (auto i=xv.begin(); i!=xv.end(); ++i)
1116  if (i->name==args.dimension)
1117  dimension=i-xv.begin();
1118  }
1119 
1120  sumy.setArgument(y,args);
1121  TensorPtr spreadX;
1122  if (x)
1123  {
1124  spreadX=make_shared<SpreadY>();
1125  spreadX->setArguments(y,x,{});
1126  }
1127  else
1128  {
1129  if (rank()>1 && dimension>=rank()) return;
1130  // construct x from y's x-vector
1131  auto tv=make_shared<TensorVal>();
1132  spreadX=tv;
1133  tv->index(y->index());
1134  auto& hc=y->hypercube();
1135  tv->hypercube(y->hypercube());
1136  auto& xv=hc.xvectors[dimension];
1137  for (size_t i=0; i<tv->size(); checkCancel(), ++i)
1138  {
1139  auto slice=hc.splitIndex(tv->index()[i])[dimension];
1140  switch (xv.dimension.type)
1141  {
1142  case Dimension::string:
1143  (*tv)[i]=slice;
1144  break;
1145  case Dimension::value:
1146  (*tv)[i]=xv[slice].value;
1147  break;
1148  case Dimension::time:
1149  (*tv)[i]=(xv[slice].time-ptime(date(1970,Jan,1))).total_microseconds()*1E-6;
1150  break;
1151  }
1152  }
1153  }
1154  sumx.setArgument(spreadX,args);
1155  auto fxy=[](double x, double y){return isfinite(x) && isfinite(y)? x*y: 0;};
1156  sumyy.setArgument(make_shared<BinOp>(fxy,y,y),args);
1157  sumxx.setArgument(make_shared<BinOp>(fxy,spreadX,spreadX),args);
1158  sumxy.setArgument(make_shared<BinOp>(fxy,y,spreadX),args);
1159  count.setArgument
1160  (make_shared<BinOp>([](double x,double y) {return isfinite(x)*isfinite(y);},y,spreadX),args);
1161 
1162  assert(sumx.hypercube()==sumy.hypercube());
1163  assert(sumx.index()==sumy.index());
1164 
1165  scale.index(sumx.index());
1166  scale.hypercube(sumx.hypercube());
1167  offset.index(sumx.index());
1168  offset.hypercube(sumx.hypercube());
1169 
1170  this->x=spreadX;
1171  this->y=y;
1172  }
1173 
1174  double operator[](size_t i) const override
1175  {
1176  if (rank()>1 && dimension>=rank())
1177  throw_error("Need to specify axis");
1178  if (!x) return nan("");
1179  assert(dimension<rank() || scale.size()==1);
1180 
1181  if (timestamp()>m_timestamp) computeScaleAndOffset();
1182 
1183  if (dimension<rank())
1184  {
1185  auto splitted=hypercube().splitIndex(i);
1186  splitted.erase(splitted.begin()+dimension);
1187  auto hcIdx=scale.hypercube().linealIndex(splitted);
1188  return scale.atHCIndex(hcIdx) * (*x)[i] + offset.atHCIndex(hcIdx);
1189  }
1190  return scale[0]* (*x)[i] + offset[0];
1191  }
1192 
1193  civita::ITensor::Timestamp timestamp() const override {return std::max(x->timestamp(), y->timestamp());}
1194 
1195 
1196  };
1197 
1198 
1199  template <> struct GeneralTensorOp<OperationType::mean>: public civita::Average {};
1200  template <> struct GeneralTensorOp<OperationType::stdDev>: public civita::StdDeviation {};
1201 
1202  template <> struct GeneralTensorOp<OperationType::median>: public civita::ReductionOp
1203  {
1204  mutable vector<double> values;
1205  GeneralTensorOp(): civita::ReductionOp([this](double&,double y,std::size_t) {values.push_back(y);},0) {}
1206  double operator[](size_t i) const override
1207  {
1208  values.clear();
1210  if (values.empty()) return nan("");
1211  sort(values.begin(),values.end());
1212  const size_t halfIndex=values.size()/2;
1213  return (values.size()&1)? values[halfIndex]: 0.5*(values[halfIndex-1]+values[halfIndex]);
1214  }
1215  };
1216 
1217  template <> struct GeneralTensorOp<OperationType::moment>: public civita::ReductionOp
1218  {
1219  double exponent;
1220  mutable double mean;
1221  mutable size_t count;
1224  ([this](double& x,double y,std::size_t) {
1225  ++count;
1226  x+=pow(y-mean, exponent);
1227  },0) {}
1228  void setArgument(const TensorPtr& arg, const ITensor::Args& args) override {
1230  average.setArgument(arg,args);
1231  exponent=args.val;
1232  }
1233  double operator[](size_t i) const override
1234  {
1235  count=0;
1236  mean=average[i];
1237  auto sum=civita::ReductionOp::operator[](i);
1238  return sum/count;
1239  }
1240  };
1241 
1242  template <> struct GeneralTensorOp<OperationType::histogram>: public civita::DimensionedArgCachedOp
1243  {
1244  void setArgument(const TensorPtr& a, const ITensor::Args& args) override
1245  {
1247  Hypercube hc;
1248  if (a && a->rank()>0 && argVal>=1)
1249  {
1250  // fake the hypercube for now, recomputed in computeTensor
1251  if (dimension>a->rank()) // result is a vector
1252  hc.dims({unsigned(argVal)});
1253  else // rewrite the named dimension
1254  {
1255  hc=a->hypercube();
1256  auto& xv=hc.xvectors[dimension];
1257  xv.clear();
1258  for (size_t i=0; i<argVal; ++i)
1259  xv.push_back(i);
1260  }
1261  }
1262  cachedResult.hypercube(std::move(hc));
1263  }
1264 
1265  void computeTensor() const override
1266  {
1267  // first compute max/min over the whole dataset
1268  double min=numeric_limits<double>::max(), max=-numeric_limits<double>::max();
1269  for (size_t i=0; i<arg->size(); checkCancel(), ++i)
1270  {
1271  min=std::min((*arg)[i],min);
1272  max=std::max((*arg)[i],max);
1273  }
1274  max*=1.01; // ensure that actual maximum value is mapped to a bin.
1275 
1276  auto binSize=(max-min)/argVal;
1277  if (arg->rank()==0 || binSize==0)
1278  {
1279  cachedResult.hypercube({});
1280  cachedResult[0]=1;
1281  return;
1282  }
1283 
1284  // adjust the hypercube
1285  auto hc=arg->hypercube();
1286  auto dim=dimension;
1287  if (dimension>=hc.rank()) // global histogram over all dimensions
1288  {
1289  hc.xvectors.resize(1);
1290  dim=0;
1291  }
1292 
1293  auto& xv=hc.xvectors[dim];
1294  xv.name="histogram";
1295  xv.dimension=Dimension(Dimension::value,"");
1296  xv.clear();
1297  for (double x=min+0.5*binSize; x<max; x+=binSize)
1298  xv.push_back(x);
1299  cachedResult.hypercube(std::move(hc));
1300  for (size_t i=0; i<cachedResult.size(); checkCancel(), ++i) cachedResult[i]=0;
1301 
1302  auto iBinSize=1/binSize;
1303  if (cachedResult.rank()>1) // histogram along a particular dimension
1304  for (size_t i=0; i<arg->size(); checkCancel(), ++i)
1305  {
1306  auto splitted=arg->hypercube().splitIndex(i);
1307  splitted[dim]=((*arg)[i]-min)*iBinSize;
1308  cachedResult[cachedResult.hypercube().linealIndex(splitted)]+=1;
1309  }
1310  else // histogram over the lot
1311  for (size_t i=0; i<arg->size(); checkCancel(), ++i)
1312  {
1313  auto index=((*arg)[i]-min)*iBinSize;
1314  cachedResult[index]+=1;
1315  }
1316  }
1317  };
1318 
1319 
1320  namespace {
1321  // arrange for arguments to be expanded over a common hypercube
1322  void meldArgsIntoCommonHypercube(vector<TensorPtr>& args) {
1323  Hypercube hc;
1324  for (auto& i: args)
1325  unionHypercube(hc, i->hypercube(), false);
1326 
1327  // list of final dimension names in order
1328  vector<string> unionDims; unionDims.reserve(hc.xvectors.size());
1329  for (auto& i: hc.xvectors) unionDims.push_back(i.name);
1330 
1331  // create tensor chains for each argument to permute it into the common hypercube
1332  for (auto& i: args)
1333  {
1334  set <string> argDims;
1335  for (auto& j: i->hypercube().xvectors) argDims.insert(j.name);
1336  auto spread=make_shared<SpreadLast>();
1337  Hypercube spreadHC;
1338  for (auto& j: hc.xvectors)
1339  if (!argDims.contains(j.name))
1340  spreadHC.xvectors.push_back(j);
1341  spread->setArgument(i,{});
1342  spread->setSpreadDimensions(spreadHC);
1343 
1344  auto pivot=make_shared<Pivot>();
1345  pivot->setArgument(spread,{});
1346  pivot->setOrientation(unionDims);
1347 
1348  if (pivot->hypercube()==hc)
1349  i=pivot;
1350  else
1351  {
1352  auto spreadOverHC=make_shared<SpreadOverHC>();
1353  spreadOverHC->hypercube(hc);
1354  spreadOverHC->setArgument(pivot,{});
1355  i=spreadOverHC;
1356  }
1357  }
1358  }
1359  }
1360 
1361  template <>
1362  struct GeneralTensorOp<OperationType::meld>: public civita::Meld
1363  {
1364  using civita::Meld::setArguments;
1365  void setArguments(const std::vector<TensorPtr>& a1,
1366  const std::vector<TensorPtr>& a2,
1367  const Args& opArgs) override
1368  {
1369  if (a1.empty() && a2.empty()) return;
1370  vector<TensorPtr> args=a1;
1371  args.insert(args.end(), a2.begin(), a2.end());
1373  civita::Meld::setArguments(args,opArgs);
1374  }
1375  };
1376 
1377  template <>
1378  struct GeneralTensorOp<OperationType::merge>: public civita::Merge, public OpState
1379  {
1380  using civita::Merge::setArguments;
1381  void setArguments(const std::vector<TensorPtr>& a1,
1382  const std::vector<TensorPtr>& a2,
1383  const Args& opArgs) override
1384  {
1385  if (a1.empty() && a2.empty()) return;
1386  vector<TensorPtr> args=a1;
1387  args.insert(args.end(), a2.begin(), a2.end());
1389  civita::Merge::setArguments(args,opArgs);
1390 
1391  if (!hypercube().dimsAreDistinct())
1392  throw_error("Please use a distinct name for the synthetic dimension produced by this operation");
1393 
1394  // relabel slices along new dimension with variable names if available
1395  int stream=0;
1396  for (size_t i=0; state && i<state->portsSize(); ++i)
1397  if (auto p=state->ports(i).lock())
1398  if (p->input())
1399  for (auto w:p->wires())
1400  if (auto from=w->from())
1401  {
1402  auto v=from->item().variableCast();
1403  if (v && !v->name().empty())
1404  m_hypercube.xvectors.back()[stream]=latexToPango(v->name());
1405  else
1406  m_hypercube.xvectors.back()[stream]=to_string(stream);
1407  stream++;
1408  }
1409  }
1410 
1411  };
1412 
1413  template <> struct GeneralTensorOp<OperationType::slice>: public civita::PermuteAxis {
1414  public:
1415  void setArgument(const TensorPtr& arg, const Args& args) override
1416  {
1418  // now construct the permutation corresponding to the slice
1419  const int slice=args.val;
1420  vector<size_t> permutation;
1421  auto argSize=arg->hypercube().xvectors[axis()].size();
1422  if (slice<0) // negative slices refer to the end (python style)
1423  for (int i=slice; i<0; ++i)
1424  permutation.push_back(argSize+i);
1425  else
1426  for (int i=0; i<slice; ++i)
1427  permutation.push_back(i);
1428  setPermutation(std::move(permutation));
1429  }
1430  };
1431 
1432  class SwitchTensor: public ITensor, public OpState
1433  {
1434  size_t m_size=1;
1435  vector<TensorPtr> args;
1436  public:
1437  void setArguments(const std::vector<TensorPtr>& a,const Args& av={"",0}) override {
1438  args=a;
1439  if (args.size()<2)
1440  hypercube(Hypercube());
1441  else
1442  hypercube(args[1]->hypercube());
1443 
1444  m_size=1;
1445  set<size_t> indices;
1446  for (auto& i: args)
1447  {
1448  for (auto& j: i->index())
1449  checkCancel(), indices.insert(j);
1450  if (i->size()>1)
1451  {
1452  if (m_size==1)
1453  m_size=i->size();
1454  else if (m_size!=i->size())
1455  // TODO - should we check and throw on nonconformat hypercubes?
1456  throw_error("nonconformant tensor arguments in switch");
1457  }
1458  }
1459  m_index=indices;
1460  if (!m_index.empty()) m_size=m_index.size();
1461  }
1462  size_t size() const override {return m_size;}
1463  Timestamp timestamp() const override {
1464  Timestamp t;
1465  for (auto& i: args)
1466  {
1467  auto tt=i->timestamp();
1468  if (tt>t) t=tt;
1469  }
1470  return t;
1471  }
1472  double operator[](size_t i) const override {
1473  if (args.size()<2) return nan("");
1474 
1475  double selector=0;
1476  if (args[0])
1477  {
1478  if (args[0]->rank()==0) // scalar selector, so broadcast
1479  selector = (*args[0])[0];
1480  else
1481  selector = (*args[0])[i];
1482  }
1483  const ssize_t idx = selector+1.5; // selector selects between args 1..n
1484 
1485  if (idx>0 && idx<int(args.size()))
1486  {
1487  if (args[idx]->rank()==0)
1488  return (*args[idx])[0];
1489  return args[idx]->atHCIndex(index()[i]);
1490  }
1491  return nan("");
1492  }
1493  };
1494 
1496  {
1497  const Ravel& ravel;
1498  mutable vector<TensorPtr> chain;
1501 
1503  public:
1504  RavelTensor(const Ravel& ravel): ravel(ravel) {}
1505 
1506  void setArgument(const TensorPtr& a,const Args&) override {
1507  // not sure how to avoid this const cast here
1508  arg=a;
1509  const_cast<Ravel&>(ravel).populateHypercube(a->hypercube());
1510  chain=ravel::createRavelChain(ravel.getState(), a);
1511  }
1512 
1513  double operator[](size_t i) const override {
1514  double v=0;
1515  if (!chain.empty())
1516  {
1517  v=(*chain.back())[i];
1518  if (m_timestamp<chain.back()->timestamp())
1519  { // update hypercube if argument has changed
1520  const_cast<Ravel&>(ravel).populateHypercube(arg->hypercube());
1521  chain=ravel::createRavelChain(ravel.getState(), arg);
1522  m_timestamp=Timestamp::clock::now();
1523  }
1524  }
1525  return v;
1526  }
1527  size_t size() const override {return chain.empty()? 1: chain.back()->size();}
1528  const Index& index() const override
1529  {return chain.empty()? m_index: chain.back()->index();}
1530  Timestamp timestamp() const override
1531  {return chain.empty()? Timestamp(): chain.back()->timestamp();}
1532  const Hypercube& hypercube() const override {return chain.empty()? m_hypercube: chain.back()->hypercube();}
1533  };
1534 
1535  namespace
1536  {
1537  // used to mark the exception as already dealt with, in terms of displayErrorItem
1538  struct TensorOpError: public runtime_error
1539  {
1540  TensorOpError(const string& msg): runtime_error(msg) {}
1541  };
1542  }
1543 
1544  std::shared_ptr<ITensor> TensorOpFactory::create
1545  (const ItemPtr& it, const TensorsFromPort& tfp)
1546  {
1547  if (auto ravel=dynamic_cast<const Ravel*>(it.get()))
1548  {
1549  auto r=make_shared<RavelTensor>(*ravel);
1550  r->setArguments(tfp.tensorsFromPorts(*it));
1551  return r;
1552  }
1553  if (auto op=it->operationCast())
1554  try
1555  {
1556  TensorPtr r{create(op->type())};
1557  if (auto ss=dynamic_cast<SetState*>(r.get()))
1558  ss->setState(dynamic_pointer_cast<OperationBase>(it));
1559  switch (op->portsSize())
1560  {
1561  case 2:
1562  r->setArguments(tfp.tensorsFromPort(*op->ports(1).lock()),{op->axis,op->arg});
1563  break;
1564  case 3:
1565  r->setArguments(tfp.tensorsFromPort(*op->ports(1).lock()), tfp.tensorsFromPort(*op->ports(2).lock()),{op->axis,op->arg});
1566  break;
1567  }
1568  return r;
1569  }
1570  catch (const InvalidType&)
1571  {return {};}
1572  catch (const TensorOpError&)
1573  {throw;}
1574  catch (const FallBackToScalar&)
1575  {throw;}
1576  catch (const std::exception& ex)
1577  {
1578  // mark op on canvas, and rethrow as TensorOpError to indicate op is marked
1580  throw TensorOpError(ex.what());
1581  }
1582  else if (auto v=it->variableCast())
1583  return make_shared<ConstTensorVarVal>(v->vValue(), tfp.ev);
1584  else if (auto sw=dynamic_cast<const SwitchIcon*>(it.get()))
1585  {
1586  auto r=make_shared<SwitchTensor>();
1587  r->setArguments(tfp.tensorsFromPorts(*it));
1588  return r;
1589  }
1590  else if (auto l=dynamic_cast<const Lock*>(it.get()))
1591  {
1592  if (l->locked())
1593  {
1594  if (auto r=l->ravelInput())
1595  if (auto p=r->ports(1).lock())
1596  {
1597  assert(!tfp.tensorsFromPort(*p).empty());
1598  auto chain=createRavelChain(l->lockedState, tfp.tensorsFromPort(*p)[0]);
1599  if (!chain.empty())
1600  return chain.back();
1601  }
1602  }
1603  else
1604  if (auto tensors=tfp.tensorsFromPort(*l->ports(1).lock()); !tensors.empty())
1605  return tensors.front();
1606  }
1607  return {};
1608  }
1609 
1610  vector<TensorPtr> TensorsFromPort::tensorsFromPort(const Port& p) const
1611  {
1612  if (!p.input()) return {};
1613  vector<TensorPtr> r;
1614  for (auto w: p.wires())
1615  {
1616  Item& item=w->from()->item();
1617  if (auto o=item.operationCast())
1618  {
1619  if (o->type()==OperationType::differentiate)
1620  {
1621  // check if we're differentiating a scalar or tensor
1622  // expression, and throw accordingly
1623  auto rhs=tensorsFromPort(*o->ports(1).lock());
1624  if (rhs.empty() || rhs[0]->size()==1)
1625  throw FallBackToScalar();
1626  // TODO - implement symbolic differentiation of
1627  // tensor operations
1628  throw std::runtime_error("Tensor derivative not implemented");
1629  }
1630  }
1631  if (auto tensorOp=tensorOpFactory.create(item.itemPtrFromThis(), *this))
1632  r.push_back(tensorOp);
1633  }
1634  return r;
1635  }
1636 
1637 
1638  vector<TensorPtr> TensorsFromPort::tensorsFromPorts(const Item& item) const
1639  {
1640  vector<TensorPtr> r;
1641  for (size_t i=0; i<item.portsSize(); ++i)
1642  if (auto p=item.ports(i).lock())
1643  if (p->input())
1644  {
1645  auto tensorArgs=tensorsFromPort(*p);
1646  r.insert(r.end(), tensorArgs.begin(), tensorArgs.end());
1647  }
1648  return r;
1649  }
1650 
1651  TensorEval::TensorEval(const shared_ptr<VariableValue>& dest, const shared_ptr<VariableValue>& src):
1652  result(dest,make_shared<EvalCommon>())
1653  {
1654  result.index(src->index());
1655  result.hypercube(src->hypercube());
1656  const OperationPtr tmp(OperationType::copy);
1657  auto copy=dynamic_pointer_cast<ITensor>(tensorOpFactory.create(tmp));
1658  copy->setArgument(make_shared<ConstTensorVarVal>(src,result.ev));
1659  rhs=std::move(copy);
1660  assert(result.size()==rhs->size());
1661  }
1662 
1663  void TensorEval::eval(double* fv, size_t n, const double* sv)
1664  {
1665  if (rhs)
1666  {
1667  assert(result.idx()>=0);
1668  const bool fvIsGlobalFlowVars=fv==ValueVector::flowVars.data();
1669  result.index(rhs->index());
1670  result.hypercube(rhs->hypercube());
1671  if (fvIsGlobalFlowVars) // hypercube operation may have resized flowVars, invalidating fv
1672  {
1673  fv=ValueVector::flowVars.data();
1674  n=ValueVector::flowVars.size();
1675  }
1676  else if (n!=ValueVector::flowVars.size())
1677  throw FlowVarsResized();
1678  result.ev->update(fv, n, sv);
1679  assert(result.size()==rhs->size());
1680  for (size_t i=0; i<rhs->size(); ++i)
1681  {
1682  auto v=(*rhs)[i];
1683  result[i]=v;
1684  assert(!isfinite(result[i]) || fv[result.idx()+i]==v);
1685  }
1686  }
1687  }
1688 
1689  void TensorEval::deriv(double* df, size_t n, const double* ds,
1690  const double* sv, const double* fv)
1691  {
1692  if (result.idx()<0) return;
1693  if (rhs)
1694  {
1695  result.ev->update(const_cast<double*>(fv), n, sv);
1696  if (auto deriv=dynamic_cast<DerivativeMixin*>(rhs.get()))
1697  {
1698  assert(result.idx()+rhs->size()<=n);
1699  for (size_t i=0; i<rhs->size(); ++i)
1700  {
1701  df[result.idx()+i]=0;
1702  for (int j=0; j<result.idx(); ++j)
1703  df[result.idx()+i] += df[j]*deriv->dFlow(i,j);
1704  // skip self variables
1705  for (size_t j=result.idx()+result.size(); j<ValueVector::flowVars.size(); ++j)
1706  df[result.idx()+i] += df[j]*deriv->dFlow(i,j);
1707  for (size_t j=0; j<ValueVector::stockVars.size(); ++j)
1708  df[result.idx()+i] += ds[j]*deriv->dStock(i,j);
1709  }
1710  }
1711  }
1712  }
1713 }
1714 
double operator[](std::size_t) const override
return or compute data at a location
shared_ptr< EvalCommon > ev
reference to EvalOpVector owning this value, to extract flowVar and stockVarinfo
function f
Definition: canvas.m:1
classdesc::enable_if< is_equal< op, to >, void >::T registerOps(TensorOpFactory &tensorOpFactory)
const Index & index() const override
the index vector - assumed to be ordered and unique
virtual bool input() const
true if input port
Definition: port.h:80
std::string latexToPango(const char *s)
Definition: latexMarkup.h:30
void throw_error(const std::string &msg) const
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const Args &opArgs) override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
calculate the sum along an axis or whole tensor
Definition: tensorOp.h:147
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double operator[](size_t) const override
std::size_t portsSize() const
number of ports
Definition: item.h:184
virtual const Hypercube & hypercube() const
information describing the axes, types and labels of this tensor
std::vector< Wire * > const & wires() const
returns a vector of weak references to the wires attached to this port
Definition: port.h:71
void setState(const OperationPtr &state) override
ravel::RavelState getState() const
get the current state of the Ravel
Definition: ravelWrap.h:206
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
void setArgument(const TensorPtr &a, const ITensor::Args &args={"", 0}) override
argument indices corresponding to this indices, when sparse
Definition: tensorOp.cc:413
calculates the average along an axis or whole tensor
Definition: tensorOp.h:174
UnitsExpressionWalker pow(const UnitsExpressionWalker &x, const UnitsExpressionWalker &y)
const char * what() const
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void displayErrorItem(const Item &op) const
indicate operation item has error, if visible, otherwise contining group
Definition: minsky.cc:1230
static std::vector< double, CIVITA_ALLOCATOR< double > > stockVars
vector of variables that are integrated via Runge-Kutta. These variables label the columns of the God...
shared_ptr< EvalCommon > ev
vector< TensorPtr > createRavelChain(const ravel::RavelState &state, const TensorPtr &arg)
creates a chain of tensor operations that represents a Ravel in state state, operating on arg ...
Definition: tensorOp.cc:497
compute the reduction along the indicated dimension, ignoring any missing entry (NaNs) ...
Definition: tensorOp.h:113
double dStock(size_t ti, size_t si) const override
vector< TensorPtr > chain
Expr sqrt(const Expr &x)
Definition: expr.h:154
shared_ptr class for polymorphic operation objects. Note, you may assume that this pointer is always ...
double d1(double x1=0, double x2=0) const override
virtual const Index & index() const
the index vector - assumed to be ordered and unique
perform an operation elementwise over a tensor valued argument
Definition: tensorOp.h:33
STL namespace.
std::shared_ptr< Item > ItemPtr
Definition: item.h:57
double operator[](std::size_t i) const override
return or compute data at a location
Definition: tensorOp.cc:137
void performSum(F f, size_t idx) const
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
const Hypercube & hypercube(const Hypercube &hc) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const Args &opArgs) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void setArgument(const TensorPtr &a, const Args &) override
double dFlow(std::size_t, std::size_t) const override
partial derivative of tensor component ti wrt flow variable fi
OperationPtr state
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
std::shared_ptr< ITensor > create(const ItemPtr &, const TensorsFromPort &tp={})
create a tensor representation of the expression rooted at op. If expression doesn&#39;t contain any refe...
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
As it says on the tin, this is a factory for creating a TensorOp which can compute the result of op a...
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &args) override
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
double dStock(std::size_t, std::size_t) const override
partial derivative of tensor component ti wrt stock variable si
void deriv(double *df, std::size_t, const double *ds, const double *sv, const double *fv) override
void setState(const OperationPtr &state) override
double dFlow(size_t ti, size_t fi) const override
std::vector< TensorPtr > tensorsFromPort(const Port &) const
returns vector of tensor ops for all wires attach to port. Port must be an input port ...
void meldArgsIntoCommonHypercube(vector< TensorPtr > &args)
A place to store common data shared between TensorVarVals within a give calculation.
Creation and access to the minskyTCL_obj object, which has code to record whenever Minsky&#39;s state cha...
Definition: constMap.h:22
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &args={"", 0}) override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &) override
void setArgument(const TensorPtr &a, const ITensor::Args &) override
Definition: tensorOp.cc:184
std::chrono::time_point< std::chrono::high_resolution_clock > Timestamp
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double dFlow(size_t ti, size_t fi) const override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &) override
Definition: tensorOp.cc:28
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const ITensor::Args &args) override
double operator[](size_t i) const override
support for partial derivatives needed for implicit method
calculate the minimum along an axis or whole tensor
Definition: tensorOp.h:161
arguments relevant for tensor expressions, not always meaningful. Exception thrown if not...
const Index & index(Index &&x) override
ItemPtr itemPtrFromThis() const
return a shared_ptr to this
Definition: item.cc:447
#define CLASSDESC_ACCESS(type)
perform a binary operation elementwise over two tensor arguments. Arguments need to be conformal: at ...
Definition: tensorOp.h:49
size_t size() const override
return number of elements in tensor - maybe less than hypercube.numElements if sparse ...
void setArgument(const TensorPtr &arg, const Args &args) override
const Minsky & cminsky()
const version to help in const correctness
Definition: minsky.h:549
calculate the maximum along an axis or whole tensor
Definition: tensorOp.h:167
virtual void setArgument(const TensorPtr &, const ITensor::Args &args={"", 0})
RavelTensor(const Ravel &ravel)
void setArguments(const std::vector< TensorPtr > &a, const Args &) override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
double operator[](std::size_t i) const override
return or compute data at a location
std::shared_ptr< ITensor > TensorPtr
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
double operator[](std::size_t i) const override
return or compute data at a location
double interpolateString(double idx, size_t stride, size_t offset) const
static std::vector< double, CIVITA_ALLOCATOR< double > > flowVars
variables defined as a simple function of the stock variables, also known as lhs variables. These variables appear in the body of the Godley table
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
void setArguments(const TensorPtr &y, const TensorPtr &x, const ITensor::Args &args) override
void setArguments(const TensorPtr &a1, const TensorPtr &a2, const Args &) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void setArguments(const std::vector< TensorPtr > &a, const Args &av={"", 0}) override
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
void eval(double *fv, std::size_t, const double *sv) override
TensorOpFactory tensorOpFactory
std::size_t size() const override
double dStock(size_t ti, size_t si) const override
elementwise reduction over a vector of arguments
Definition: tensorOp.h:82
std::vector< TensorPtr > tensorsFromPorts(const Item &) const
returns vector of tensor ops for all wires attached to item
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
vector< TensorPtr > args
calculates the standard deviation along an axis or whole tensor
Definition: tensorOp.h:184
void setState(const OperationPtr &s) override
Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
virtual std::weak_ptr< Port > ports(std::size_t i) const
callback to be run when item deleted from group
Definition: item.h:180
void setArguments(const std::vector< TensorPtr > &a1, const std::vector< TensorPtr > &a2, const ITensor::Args &) override
double operator[](size_t i) const override
void setArgument(const TensorPtr &a, const ITensor::Args &args) override
string to_string(CONST84 char *x)
Definition: minskyTCLObj.h:33
void setArgument(const TensorPtr &arg, const ITensor::Args &args) override
civita::ITensor::Timestamp timestamp() const override
timestamp indicating how old the dependendent data might be. Used in CachedTensorOp to determine when...
calculate the product along an axis or whole tensor
Definition: tensorOp.h:154
double evaluate(double in1=0, double in2=0) const override
double interpolateAny(const XVector &xv, const civita::any &x, size_t stride, size_t offset) const
void computeTensor() const override
computeTensor updates the above two mutable fields, but is logically const
const Hypercube & hypercube() const override
information describing the axes, types and labels of this tensor
void setArgument(const TensorPtr &a, const Args &) override
exception throw if flowVars vector unexpectedly resized during evalEquations
Definition: evalOp.h:190
void setArgument(const TensorPtr &a, const ITensor::Args &) override
Definition: tensorOp.cc:101
double d2(double x1=0, double x2=0) const override