50         (
"missing argument "+to_string(a1)+
","+to_string(a2)+
    51          (s?(
" on operation "+
OperationType::typeName(s->type())):string()))
    67             auto v=values.find(result->valueId());
    69               v->second->init(value);
    81     assert(result->idx()>=0);
    90       if (
auto state=nodeOp.
state)
    93             auto ec=make_shared<EvalCommon>();
    95             if (!rhs) 
return false;
    96             result->index(rhs->index());
   106   bool VariableDAG::tensorEval(std::set<const Node*>&)
 const   114       if (
auto nodeOp=dynamic_cast<OperationDAGBase*>(rhs.payload))
   131             if ((!tensorEval() && !rhs->tensorEval()) || !
addTensorOp(ev))
   133                 if (result->idx()<0) result->allocValue();
   134                 rhs->addEvalOps(ev, result);
   138           if (result->idx()<0) result->allocValue();
   142     assert(result->idx()>=0);
   159             result->allocValue();
   162           rhs->addEvalOps(ev, result);
   164           throw runtime_error(
"integral not defined for "+name);
   166     assert(result->idx()>=0);
   167     if (r && r->isFlowVar() && (r!=result || !result->isFlowVar()))
   185   int OperationDAGBase::order(
unsigned maxOrder)
 const   187     if (type()==integrate)
   189     if (cachedOrder>=0) 
return cachedOrder;
   192       throw error(
"maximum order recursion reached");
   197     int order=type()==constant? 1: 0;
   198     for (
size_t i=0; i<arguments.size(); ++i)
   199       for (
size_t j=0; j<arguments[i].size(); ++j)
   202           order=std::max(order, arguments[i][j]->order(maxOrder-1));
   204     return cachedOrder=order;
   207   bool OperationDAGBase::tensorEval(std::set<const Node*>& visited)
 const    209     if (!visited.insert(
this).second)
   213       case reduction: 
case scan: 
case tensor: 
case statistics:
   215       case general: 
case binop: 
case constop: 
case function:
   216         for (
auto& i: arguments)
   218             if (j && j->tensorEval(visited)) 
return true;
   227   void OperationDAGBase::checkArg(
unsigned i, 
unsigned j)
 const   229     if (arguments.size()<=i || arguments[i].size()<=j || !arguments[i][j])
   230       throw NoArgument(state, i, j);
   237                   const vector<vector<VariableValuePtr> >& argIdx,
   243       for (
auto& i: argIdx)
   247             r->hypercube(i[0]->hypercube());
   251         throw runtime_error(
"Scalar processing not supported in tensor code, try adding an intermediate variable");
   252       if (r->idx()==-1) r->allocValue();
   258             for (
auto& i: argIdx[1])
   260                 throw runtime_error(
"divide by constant zero");
   261           for (
auto& i: argIdx)
   273             while (!argIdx.empty() && i<argIdx[0].size() && argIdx[0][i]->isZero())
   275           if (!argIdx.empty() && i<argIdx[0].size())
   278               for (++i; i<argIdx[0].size(); ++i)
   280                   ev.push_back(
EvalOpPtr(accum, state, r, *r, *argIdx[0][i]));
   292           if (argIdx[1].size()==1)
   294             ev.push_back(
EvalOpPtr(
op, state, r, *r, *argIdx[1][0]));
   295           else if (argIdx[1].size()>1)
   299               tmp->hypercube(r->hypercube());
   302                 while (i<argIdx[1].size() && argIdx[1][i]->isZero()) i++;
   303               if (i<argIdx[1].size())
   306                   for (++i; i<argIdx[1].size(); ++i)
   308                       ev.push_back(
EvalOpPtr(accum, state, tmp, *tmp, *argIdx[1][i]));
   322         assert(!dynamic_cast<IntOp*>(state.get()));
   328             result->allocValue();
   330         if (tensorEval() && 
addTensorOp(result, *
this, ev))
   333         assert(result->idx()>=0);
   336         vector<vector<VariableValuePtr> > argIdx(arguments.size());
   337         for (
size_t i=0; type()!=integrate && i<arguments.size(); ++i)
   338           for (
size_t j=0; j<arguments[i].size(); ++j)
   340               argIdx[i].push_back(arguments[i][j]->addEvalOps(ev));
   344                 argIdx[i].back()->allocValue();
   353                 cumulate(ev, state, result, argIdx, add, add, 0);
   356                 cumulate(ev, state, result, argIdx, subtract, add, 0);
   359                 cumulate(ev, state, result, argIdx, multiply, multiply, 1);
   362                 cumulate(ev, state, result, argIdx, divide, multiply, 1);
   365                 cumulate(ev, state, result, argIdx, min, min, numeric_limits<double>::max());
   368                 cumulate(ev, state, result, argIdx, max, max, -numeric_limits<double>::max());
   371                 cumulate(ev, state, result, argIdx, and_, and_, 1);
   374                 cumulate(ev, state, result, argIdx, or_, or_, 0);
   378                 throw error(
"Constant deprecated");
   379               case lt: 
case le: 
case eq:
   380                 for (
size_t i=0; i<arguments.size(); ++i)
   381                   if (arguments[i].empty())
   384                       argIdx[i].back()->allocValue();
   387                         argIdx[i][0]->units=argIdx[i-1][0]->units;
   388                       else if (arguments.size()>1 && !argIdx[1].empty())
   389                         argIdx[0][0]->units=argIdx[1][0]->units;
   391                 ev.push_back(
EvalOpPtr(type(), state, result, *argIdx[0][0], *argIdx[1][0])); 
   394                 for (
size_t i=0; i<arguments.size(); ++i)
   395                   if (arguments[i].empty())
   398                       argIdx[i].back()->allocValue();
   400                 ev.push_back(
EvalOpPtr(type(), state, result, *argIdx[0][0], *argIdx[1][0])); 
   403                 if (!argIdx.empty() && argIdx[0].size()==1)
   404                   ev.push_back(
EvalOpPtr(type(), state, result, *argIdx[0][0])); 
   406                   throw error(
"inputs for highlighted operations incorrectly wired");
   409                 switch (classify(type()))
   411                   case reduction: 
case scan: 
case tensor:
   412                     if (result->idx()==-1) result->allocValue();
   417                       bool correctlyWired=
true;
   418                       for (
size_t i=0; i<arguments.size(); ++i)
   419                         correctlyWired &= arguments[i].size()==1;
   421                         throw error(
"inputs for highlighted operations incorrectly wired");
   423                       switch (arguments.size())
   426                           ev.push_back(
EvalOpPtr(type(), state, result));
   429                           ev.push_back(
EvalOpPtr(type(), state, result, *argIdx[0][0]));
   432                           ev.push_back(
EvalOpPtr(type(), state, result, *argIdx[0][0], *argIdx[1][0]));
   435                           throw error(
"Too many arguments");
   441         catch (
const std::exception&)
   449     if (state && state->portsSize()>0)
   450       if (
auto statePort=state->ports(0).lock()) 
   451         statePort->setVariableValue(result);
   452     assert(result->idx()>=0);
   468     map<string, VariableDAG*> integVarMap;
   470     vector<pair<VariableDAGPtr,Wire*>> integralInputs;
   476        [&](
const Items&, Items::const_iterator it){
   477          if (
auto v=(*it)->variableCast())
   480              if (v->inputWired() && v!=
minsky.definingVar(v->valueId()).
get())
   482                  minsky.displayErrorItem(*v);
   483                  throw runtime_error(
"Multiply defined");
   486              if (
auto vv=v->vValue())
   487                if (vv->type() != v->type())
   489                    minsky.displayErrorItem(*v);
   493          else if (
IntOp* i=dynamic_cast<IntOp*>(it->get()))
   502                  if (!i->ports(1).lock()->wires().empty())
   507                      input->name=iv->name();
   516                          integralInputs.emplace_back(
input,i->ports(1).lock()->wires()[0]);
   522                  if (!i->ports(2).lock()->wires().empty())
   532                      if (
auto v=dynamic_cast<VariableDAG*>(init.get()))
   534                      else if (
auto c=dynamic_cast<ConstantDAG*>(init.get()))
   538                          if (
auto vv=iv->vValue()) vv->adjustSliderBounds();
   541                        throw error(
"only constants, parameters and variables can be connected to the initial value port");
   546          else if (
auto fn=dynamic_cast<UserFunction*>(it->get()))
   556        [&](
const Groups&, Groups::const_iterator it){
   557          if (!(*it)->name().empty())
   562            catch (
const std::exception&)
   570        [&](
const Items&, Items::const_iterator it){
   575               input->name=i->name();
   584     for (
auto& i: integralInputs)
   589     map<string, GodleyColumnDAG> godleyVars;
   592        [&](
const Items&, Items::const_iterator i)
   594          if (
auto g=dynamic_cast<GodleyIcon*>(i->get()))
   599     for (
auto& g: godleyVars)
   615               i.first->rhs=ii->rhs;
   620         throw runtime_error(
"Unable to differentiate "+i.second);
   640     for (
auto& v: integVarMap)
   646           if (v.second->isFlowVar())
   647             if (
auto vv=dynamic_cast<VariableDAG*>
   648                 (
makeDAG(v.first, v.second->name, v.second->type()).
get()))
   660         set<VariableDAG*,VariableDefOrder> variableSet(variableDefOrder);
   663            [&](
const Items&, Items::const_iterator it){
   664              if (
auto v=(*it)->variableCast())
   665                if (
auto vv=v->vValue())
   666                  if (
auto dag=dynamic_cast<VariableDAG*>
   667                      (
makeDAG(v->valueId(), vv->name, vv->type()).
get()))
   668                    variableSet.insert(dag);
   682       throw runtime_error(
"Invalid valueId: "+
valueId);
   695     for (
unsigned i=0; 
varNames.contains(nm); ++i)
   696       nm=name+
"_"+to_string(i);
   715         assert(
op.portsSize()==2);
   717         if (
op.ports(1).lock()->wires().empty() || !(expr=
getNodeFromWire(*
op.ports(1).lock()->wires()[0])))
   718           op.throw_error(
"derivative not wired");
   722               (
op, expr->derivative(*
this));
   734         r->state=
minsky.model->findItem(
op);
   737         r->arguments.resize(
op.numPorts()-1);
   738         for (
size_t i=1; i<
op.portsSize(); ++i)
   739           if (
auto p=
op.ports(i).lock())
   740             for (
auto w: p->wires())
   742         if (
auto uf=dynamic_cast<const UserFunction*>(&
op))
   745             r->arguments.emplace_back();
   746             for (
auto& i: uf->symbolNames())
   749                 if (vv!=
minsky.variableValues.end())
   751                     r->arguments.back().emplace_back(
makeDAG(vv->first,vv->second->name,vv->second->type()));
   763     for (
unsigned i=1; i<sw.portsSize(); ++i)
   765         auto& w=sw.ports(i).lock()->wires();
   768             minsky.displayErrorItem(sw);
   769             throw error(
"input port not wired");
   773         wires.push_back(w[0]);
   776     assert(wires.size()==sw.numCases()+1);
   779     auto r=make_shared<OperationDAG<OperationType::add>>();
   780     r->state=
minsky.model->findItem(sw);
   783     r->arguments[0].resize(sw.numCases());
   787     for (
unsigned i=1; i<sw.numCases()-1; ++i)
   791       (*wires[sw.numCases()])*(1-(
input<sw.numCases()-1));
   800         if (r && r->isFlowVar())
   814           if (chain.empty()) 
return {};
   815           result->index(chain.back()->index());
   816           result->hypercube(chain.back()->hypercube());
   820       catch (
const std::exception& ex)
   830     auto r=make_shared<LockDAG>(l);
   835         if (
auto p=ravel->ports(1).lock())
   836           if (!p->wires().empty())
   840       if (
auto p=l.
ports(1).lock())
   841         if (!p->wires().empty())
   848     if (
auto p=
wire.from())
   850         auto& item=p->item();
   851         if (
auto o=item.operationCast())
   858         if (
auto v=item.variableCast())
   866         else if (
auto s=dynamic_cast<SwitchIcon*>(&item))
   872         else if (
auto l=dynamic_cast<Lock*>(&item))
   904     o << 
"\\begin{eqnarray*}\n";
   907       o<<i.first<<
"(x,y)&=&"<<i.second<<
"\\\\\n";
   910         if (dynamic_cast<const IntegralInputVariableDAG*>(i) ||
   912         o << i->latex() << 
"&=&";
   923         o << 
"\\frac{ d " << 
mathrm(i->name) << 
   927           input->rhs->latex(o);
   930     return o << 
"\\end{eqnarray*}\n";
   935     o << 
"\\begin{dgroup*}\n";
   938         if (dynamic_cast<const IntegralInputVariableDAG*>(i)) 
continue;
   940         o<<
"\\begin{dmath*}\n";
   941         o << i->latex() << 
"=";
   946         o << 
"\n\\end{dmath*}\n";
   951         o<<
"\\begin{dmath*}\n";
   953         o << 
"\\begin{dmath*}\n\\frac{ d " << 
mathrm(i->name) << 
   957           input->rhs->latex(o);
   958         o << 
"\\end{dmath*}\n";
   960     return o << 
"\\end{dgroup*}\n";
   968         o<<
"function f="<<i.first<<
"\n";
   969         o<<
"f="<<i.second<<
"\nendfunction;\n\n";
   971     o<<
"function f=f(x,t)\n";
   975       o<<i->matlab()<<
"=x("<<j++<<
");\n";
   979         if (dynamic_cast<const IntegralInputVariableDAG*>(i) ||
   981         o << i->matlab() << 
"=";
   983           o << i->rhs->matlab();
   992         o << 
"f("<<j++<<
")=";
   995           input->rhs->matlab(o);
  1000     o<<
"endfunction;\n\n";
  1005       o << 
"x0("<<j++<<
")="<<
matlabInit(i->init)<<
";\n";
  1018         i->addEvalOps(equations,i->result);
  1019         assert(
minsky.variableValues.validEntries());
  1024         const string vid=i->valueId;
  1027         assert(
minsky.variableValues.count(vid));
  1028         integrals.back().stock=
minsky.variableValues[vid];
  1029         integrals.back().operation=
dynamic_cast<IntOp*
>(i->intOp);
  1030         const VariableDAGPtr iInput=expressionCache.getIntegralInput(vid);
  1031         if (iInput && iInput->rhs)
  1032           integrals.back().setInput(iInput->rhs->addEvalOps(equations));
  1034     assert(
minsky.variableValues.validEntries());
  1040     minsky.model->recursiveDo
  1042        [&](
Items&, Items::iterator i)
  1044          if (
auto v=(*i)->variableCast())
  1047                throw error(
"variable %s has undefined type",v->name().c_str());
  1048              assert(
minsky.variableValues.count(v->valueId()));
  1049              if (v->portsSize()>0)
  1050                v->ports(0).lock()->setVariableValue(
minsky.variableValues[v->valueId()]);
  1052          else if (
auto pw=(*i)->plotWidgetCast())
  1053            for (
size_t port=0; port<pw->portsSize(); ++port)
  1054              for (
auto w: pw->ports(port).lock()->wires())
  1056                w->from()->setVariableValue(
getNodeFromWire(*w)->addEvalOps(equations));
  1057          else if (
auto s=dynamic_cast<Sheet*>(i->get()))
  1059              for (
auto w: s->ports(0).lock()->wires())
  1061                w->from()->setVariableValue(
getNodeFromWire(*w)->addEvalOps(equations));
  1064          else if (
auto r=dynamic_cast<Ravel*>(i->get()))
  1065            for (
auto w: r->ports(1).lock()->wires())
  1067                w->from()->setVariableValue(
getNodeFromWire(*w)->addEvalOps(equations));
  1074   (map<string, GodleyColumnDAG>& godleyVariables, 
const GodleyIcon& gi)
  1077     for (
size_t c=1; c<
godley.cols(); ++c)
  1080         if (
uqName(colName).empty())
  1083         colName=
valueId(gi.group.lock(), colName);
  1084         if (processedColumns.contains(colName)) 
continue; 
  1085         processedColumns.insert(colName);
  1089         const vector<WeakNodePtr>& arguments=gd.
arguments[0];
  1090         for (
size_t r=1; r<
godley.rows(); ++r)
  1092             if (
godley.initialConditionRow(r)) 
continue;
  1094             if (fc.name.empty()) 
continue;
  1099             if (abs(fc.coef)==1)
  1106                   (expressionCache.insertAnonymous(
NodePtr(term)));
 virtual VariableValuePtr addEvalOps(EvalOpVector &, const VariableValuePtr &result={})=0
adds EvalOps to an EvalOpVector representing this node. 
 
bool addTensorOp(const shared_ptr< VariableValue > &result, OperationDAGBase &nodeOp, EvalOpVector &ev)
 
void doOneEvent(bool idleTasksOnly)
checks if any GUI events are waiting, and proces an event if so 
 
Ravel * ravelInput() const
Ravel this is connected to. nullptr if not connected to a Ravel. 
 
A helper to evaluate a variable value. 
 
void displayErrorItem(const Item &op) const
indicate operation item has error, if visible, otherwise contining group 
 
void resetValue(VariableValue &) const
reset a give variable value to it's initial condition, in this context 
 
vector< TensorPtr > createRavelChain(const ravel::RavelState &state, const TensorPtr &arg)
creates a chain of tensor operations that represents a Ravel in state state, operating on arg ...
 
NodePtr makeDAG(const string &valueId, const string &name, VariableType::Type type)
create a variable DAG. returns cached value if previously called 
 
ostream & latexWrapped(ostream &) const
 
NoArgument(const OperationPtr &s, unsigned a1, unsigned a2)
 
VariableValues variableValues
 
An integral is an additional stock variable, that integrates its flow variable. 
 
shared_ptr class for polymorphic operation objects. Note, you may assume that this pointer is always ...
 
const NodePtr & insert(const T &x, const NodePtr &n)
 
void populateEvalOpVector(EvalOpVector &equations, std::vector< Integral > &integrals)
 
SubexpressionCache expressionCache
 
SystemOfEquations(const Minsky &, const Group &g)
construct the system of equations 
 
bool operator()(const VariableDAG *x, const VariableDAG *y) const
 
std::shared_ptr< Item > ItemPtr
 
VariableValuePtr result
reference to where this node's value is stored 
 
std::string uqName(const std::string &name)
extract unqualified portion of name 
 
vector< VariableDAG * > integrationVariables
 
string valueId(const string &name)
construct a valueId from fully qualified name @ name should not be canonicalised 
 
a shared_ptr that default constructs a default target, and is always valid 
 
NodePtr zero
useful constants to share 
 
std::shared_ptr< ITensor > create(const ItemPtr &, const TensorsFromPort &tp={})
create a tensor representation of the expression rooted at op. If expression doesn't contain any refe...
 
string latexInit(const string &)
convert an initialisation string into a matlab expression 
 
VariableDefOrder(unsigned maxOrder)
 
ravel::RavelState lockedState
 
GodleyTable table
table data. Must be declared before editor 
 
ItemPtr itemIndicator
for drawing error indicator on the canvas 
 
OperationFactory< OperationDAGBase, OperationDAG, OperationType::numOps-1 > operationDAGFactory
 
std::vector< ItemPtr > Items
 
std::string trimWS(const std::string &s)
 
void processGodleyTable(map< string, GodleyColumnDAG > &godleyVariables, const GodleyIcon &)
 
string matlabInit(const string &)
convert an initialisation string into a matlab expression 
 
virtual std::string valueId() const
string used to link to the VariableValue associated with this 
 
VariableDAGPtr getIntegralInput(const string &name) const
 
void updatePortVariableValue(EvalOpVector &equations)
 
std::vector< GroupPtr > Groups
 
VariableValuePtr addEvalOps(EvalOpVector &, const VariableValuePtr &result={}) override
adds EvalOps to an EvalOpVector representing this node. 
 
static std::string typeName(int t)
 
void cumulate(EvalOpVector &ev, const ItemPtr &state, VariableValuePtr &r, const vector< vector< VariableValuePtr > > &argIdx, OperationType::Type op, OperationType::Type accum, double groupIdentity)
 
NodePtr getNodeFromWire(const Wire &wire)
returns cached subexpression node representing what feeds the wire, creating a new one if necessary ...
 
factory class template, for creating objects of type T<O>. N is the maximum value of O ...
 
const Minsky & cminsky()
const version to help in const correctness 
 
ostream & matlab(ostream &) const
render as MatLab code create equations suitable for Runge-Kutta solver 
 
void throw_error(const std::string &) const
mark item on canvas, then throw 
 
VariableDAGPtr getNodeFromVar(const VariableBase &v)
 
weak reference into subexpression cache 
 
represents a numerical coefficient times a variable (a "flow") 
 
std::shared_ptr< ITensor > TensorPtr
 
std::shared_ptr< Node > NodePtr
 
bool isValueId(const string &name)
check that name is a valid valueId (useful for assertions) 
 
int order(unsigned maxOrder) const override
returns evaluation order in sequence of variable defintions 
 
NodePtr insertAnonymous(NodePtr x)
 
void insertIntegralInput(const string &name, const VariableDAGPtr &n)
 
TensorOpFactory tensorOpFactory
 
vector< vector< WeakNodePtr > > arguments
 
std::set< std::string > varNames
used to rename ambiguous variables in different scopes 
 
represents a Godley column 
 
vector< pair< VariableDAGPtr, string > > derivInputs
 
std::map< std::string, std::string > userDefinedFunctions
table of user defined functions and their definitions 
 
vector< VariableDAG * > variables
 
virtual std::weak_ptr< Port > ports(std::size_t i) const
callback to be run when item deleted from group 
 
shared_ptr< VariableDAG > VariableDAGPtr
 
Minsky & minsky()
global minsky object 
 
string mathrm(const string &nm)
wraps in  if nm has more than one letter - and also takes into account LaTeX subscript/superscripts ...
 
ostringstream getDefFromIntVar(const VariableBase &v)
 
static Group classify(Type t)
 
static OperationDAGBase * create(Type type, const string &name="")
factory method 
 
ostream & latex(ostream &) const
render as a LaTeX eqnarray Use LaTeX brqn environment to wrap long lines