def _build_gate_inftau(g, q10tempadjustmentName, neuroml_dt): assert len(g.time_courses) == 1 and len(g.steady_states) == 1 eqns = [] state_name = g.name term_name_inf = "%s_%s" % (state_name, "inf") term_name_tau = "%s_%s" % (state_name, "tau") def remap_gate_eqnI(s): s = re.sub(r"""\bV\b""", "__VGate__", s) s = re.sub(r"""\bv\b""", "__VGate__", s) return s tc = SeqUtils.expect_single(g.time_courses) ss = SeqUtils.expect_single(g.steady_states) tc_eqn = "%s = ( %s) * (%s)" % (term_name_tau, remap_gate_eqnI(tc.getEqn()), neuroml_dt) ss_eqn = "%s = %s" % (term_name_inf, remap_gate_eqnI(ss.getEqn())) state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name, term_name_tau) # Add the equations eqns.extend([tc_eqn, ss_eqn, state_eqn]) # Add the steddy-state eqns.append("<=> INITIAL %s:%s" % (state_name, term_name_inf)) return eqns
def _build_gate_inftau(g, q10tempadjustmentName, neuroml_dt): assert len(g.time_courses) == 1 and len(g.steady_states) == 1 eqns = [] state_name = g.name term_name_inf = "%s_%s" % (state_name, 'inf') term_name_tau = "%s_%s" % (state_name, 'tau') def remap_gate_eqnI(s): s = re.sub(r"""\bV\b""", '__VGate__', s) s = re.sub(r"""\bv\b""", '__VGate__', s) return s tc = SeqUtils.expect_single(g.time_courses) ss = SeqUtils.expect_single(g.steady_states) tc_eqn = '%s = ( %s) * (%s)' % (term_name_tau, remap_gate_eqnI( tc.getEqn()), neuroml_dt) ss_eqn = '%s = %s' % (term_name_inf, remap_gate_eqnI(ss.getEqn())) state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name, term_name_tau) # Add the equations eqns.extend([ tc_eqn, ss_eqn, state_eqn, ]) # Add the steddy-state eqns.append("<=> INITIAL %s:%s" % (state_name, term_name_inf)) return eqns
def parse_expr(text, parse_type, start_symbol=None, debug=False, backend=None, working_dir=None, options=None, library_manager=None, name=None): #debug=True # Are a parsing a complex expression? Then we need a library manager: if library_manager is None and ParseTypes is not ParseTypes.L1_Unit: library_manager = LibraryManager(backend=backend, working_dir=working_dir, options=options, name=name, src_text=text) #First, let preprocess the text: text = preprocess_string(text, parse_type=parse_type) # Now, we can parse the expression using PLY: try: pRes, library_manager = parse_eqn_block( text, parse_type=parse_type, debug=debug, library_manager=library_manager) except: print print 'Error Parsing: %s' % text print 'Parsing as', parse_type raise # If its a level-3 expression, we need to evaluate it: if parse_type == ParseTypes.L3_QuantityExpr: from neurounits.writers.writer_ast_to_simulatable_object import FunctorGenerator ev = FunctorGenerator().visit(pRes) pRes = ev() # And return the correct type of object: ret = { ParseTypes.L1_Unit: lambda: pRes, ParseTypes.L2_QuantitySimple: lambda: pRes, ParseTypes.L3_QuantityExpr: lambda: pRes, ParseTypes.L4_EqnSet: lambda: SeqUtils.expect_single(library_manager.eqnsets), ParseTypes.L5_Library: lambda: SeqUtils.expect_single(library_manager.libraries), ParseTypes.L6_TextBlock: lambda: library_manager, } return ret[parse_type]()
def _build_gate_alphabetainftau(g, q10tempadjustmentName, neuroml_dt): if len(g.openstates) != 1 or len(g.closedstates) != 1 or len(g.transitions) != 2: raise NeuroUnitsImportNeuroMLNotImplementedException("Non-Standard Gate/Transtions setup") state_name = g.name alphaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "alpha") betaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "beta") term_name_alpha = "%s_%s" % (state_name, "alpha") term_name_beta = "%s_%s" % (state_name, "beta") term_name_inf = "%s_%s" % (state_name, "inf") term_name_tau = "%s_%s" % (state_name, "tau") def remap_gate_eqn(s): # Remap Alpha/Beta terms into the appropriate Units # when they are used in inf/tau equations: alpha_repl = "((%s) * (%s) )" % (term_name_alpha, neuroml_dt) beta_repl = "((%s) * (%s) )" % (term_name_beta, neuroml_dt) s = s.replace("alpha", alpha_repl).replace("beta", beta_repl) # Remap voltage terms to __VGATE__ (since they might be subject # to offset later on: s = re.sub(r"""\bV\b""", "__VGate__", s) s = re.sub(r"""\bv\b""", "__VGate__", s) return s # Write the alpha/beta terms: e1 = "%s = (%s) * (1/%s)" % (term_name_alpha, remap_gate_eqn(alphaTr.getEqn()), neuroml_dt) e2 = "%s = (%s) * (1/%s)" % (term_name_beta, remap_gate_eqn(betaTr.getEqn()), neuroml_dt) # Time courses should always be divided by rate_adjustment term! if len(g.time_courses) != 0: tc = SeqUtils.expect_single(g.time_courses) tc_eqn = "%s = %s * (1/%s) *(%s)" % ( term_name_tau, remap_gate_eqn(tc.getEqn()), q10tempadjustmentName, neuroml_dt, ) else: tc_eqn = "%s = 1/(%s* (%s+%s))" % (term_name_tau, q10tempadjustmentName, term_name_alpha, term_name_beta) if len(g.steady_states) != 0: ss = SeqUtils.expect_single(g.steady_states) ss_eqn = "%s = %s" % (term_name_inf, remap_gate_eqn(ss.getEqn())) else: ss_eqn = "%s = %s/(%s+%s)" % (term_name_inf, term_name_alpha, term_name_alpha, term_name_beta) # The state-equations state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name, term_name_tau) # Set the initial value of the state-variable to be the same # as the steady-state value: initial_cond = "<=> INITIAL %s:%s" % (state_name, term_name_inf) return [e1, e2, tc_eqn, ss_eqn, state_eqn, initial_cond]
def parse_expr(text, parse_type, start_symbol=None, debug=False, backend=None, working_dir=None, options=None,library_manager=None, name=None): original_text = text # Some initial preprocessing # (This is a bit hacky) if parse_type in [ParseTypes.L4_EqnSet, ParseTypes.L5_Library, ParseTypes.L6_TextBlock]: text = "\n".join([ l.split("#")[0] for l in text.split("\n") ]) lines = [] for l in text.split('\n'): if len(lines) != 0 and lines[-1].endswith('\\'): assert l lines[-1] = (lines[-1])[:-1] + l else: l = l.strip() if not l: continue if not l[-1] in ('{', ';'): l = l + ';' lines.append(l) text = '\n'.join(lines) else: text = text.strip() if library_manager is None and ParseTypes is not ParseTypes.L1_Unit: library_manager = LibraryManager(backend=backend, working_dir=working_dir, options=options, name=name, src_text=original_text ) pRes, library_manager = parse_eqn_block(text, parse_type=parse_type, debug=debug, library_manager=library_manager) if parse_type==ParseTypes.L3_QuantityExpr: from neurounits.writers.writer_ast_to_simulatable_object import FunctorGenerator ev = FunctorGenerator().visit(pRes) #functor = SeqUtils.expect_single( F.assignment_evaluators.values() ) #\ev = F.visit(pRes pRes = ev() ret = { ParseTypes.L1_Unit: lambda: pRes, ParseTypes.L2_QuantitySimple: lambda: pRes, ParseTypes.L3_QuantityExpr: lambda: pRes, ParseTypes.L4_EqnSet: lambda: SeqUtils.expect_single(library_manager.eqnsets), ParseTypes.L5_Library: lambda: SeqUtils.expect_single(library_manager.libraries), ParseTypes.L6_TextBlock: lambda: library_manager, } return ret[parse_type]()
def _res_assignments(self, o, **kwargs): removed = [] for aKey in o._eqn_assignment.keys(): a = o._eqn_assignment[aKey] alhs = a.lhs fixed_value = self.visit(a.rhs_map) if fixed_value: sym_suffix = '_as_symconst' sym_suffix = '' s = ast.SymbolicConstant(symbol=aKey.symbol + sym_suffix, value=fixed_value) #ReplaceNode(a.lhs, s).visit(o) ReplaceNode.replace_and_check(srcObj=a.lhs, dstObj=s, root = o) o._symbolicconstants[aKey.symbol] = s from neurounits.misc import SeqUtils old_ass = SeqUtils.filter_expect_single( o._eqn_assignment, lambda o:o.symbol == aKey.symbol ) del o._eqn_assignment[ old_ass ] #o.get_terminal_obj(aKey.symbol) ] #del o._eqn_assignment[ o.get_terminal_obj(aKey.symbol) ] removed.append(alhs) # Double check they have gone: for a in removed: nc = EqnsetVisitorNodeCollector() nc.visit(o) assert not a in nc.all()
def VisitRegimeDispatchMap(self,o,**kwargs): rt_graph = o.get_rt_graph() rhs_functors = dict([(regime, self.visit(rhs)) for (regime,rhs) in o.rhs_map.items()]) from neurounits.misc import SeqUtils try: default = SeqUtils.filter_expect_single(rhs_functors.keys(), lambda r:r.name == None) assert not None in rhs_functors rhs_functors[None] = rhs_functors[default] except ValueError: pass def f3(state_data, **kw): regime_states = state_data.rt_regimes #print 'Getting regime_state for RT graph:', repr(rt_graph) curr_state = regime_states[rt_graph] if curr_state in rhs_functors: rhs_functor = rhs_functors[curr_state] else: rhs_functor = rhs_functors[None] return rhs_functor(state_data=state_data, **kw) return f3 assert len(o.rhs_map) == 1 return self.visit(o.rhs_map.values()[0])
def get_library(self, libname): # print 'Searching for library: ' % libname lib = SeqUtils.expect_single([l for l in chain(self.libraries, self._stdlib_cache.libraries) if l.name == libname]) return lib
def get_library(self, libname): lib = SeqUtils.expect_single([ l for l in chain(self.libraries, self._stdlib_cache.libraries) if l.name == libname ]) return lib
def get(self, name, include_stdlibs=True): if LibraryManager._stdlib_cache_loading: include_stdlibs = False if include_stdlibs: srcs = chain(self.eqnsets, self.libraries, self._stdlib_cache.libraries) else: srcs = chain(self.eqnsets, self.libraries) return SeqUtils.expect_single([l for l in srcs if l.name == name])
def parse_expr(text, parse_type, start_symbol=None, debug=False, backend=None, working_dir=None, options=None,library_manager=None, name=None): #debug=True # Are a parsing a complex expression? Then we need a library manager: if library_manager is None and ParseTypes is not ParseTypes.L1_Unit: library_manager = LibraryManager(backend=backend, working_dir=working_dir, options=options, name=name, src_text=text ) #First, let preprocess the text: text = preprocess_string(text, parse_type=parse_type) # Now, we can parse the expression using PLY: try: pRes, library_manager = parse_eqn_block(text, parse_type=parse_type, debug=debug, library_manager=library_manager) except: print print 'Error Parsing: %s' % text print 'Parsing as', parse_type raise # If its a level-3 expression, we need to evaluate it: if parse_type==ParseTypes.L3_QuantityExpr: from neurounits.writers.writer_ast_to_simulatable_object import FunctorGenerator ev = FunctorGenerator().visit(pRes) pRes = ev() # And return the correct type of object: ret = { ParseTypes.L1_Unit: lambda: pRes, ParseTypes.L2_QuantitySimple: lambda: pRes, ParseTypes.L3_QuantityExpr: lambda: pRes, ParseTypes.L4_EqnSet: lambda: SeqUtils.expect_single(library_manager.eqnsets), ParseTypes.L5_Library: lambda: SeqUtils.expect_single(library_manager.libraries), ParseTypes.L6_TextBlock: lambda: library_manager, } return ret[parse_type]()
def get_eqnset(self, libname): eqnset = SeqUtils.expect_single( [l for l in self.eqnsets if l.name == libname]) return eqnset
def get_eqnset(self, libname): eqnset = SeqUtils.expect_single([l for l in self.eqnsets if l.name == libname]) return eqnset
def _build_gate_alphabetainftau(g, q10tempadjustmentName, neuroml_dt): if len(g.openstates) != 1 or len(g.closedstates) != 1 or len( g.transitions) != 2: raise NeuroUnitsImportNeuroMLNotImplementedException( 'Non-Standard Gate/Transtions setup') state_name = g.name alphaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "alpha") betaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "beta") term_name_alpha = "%s_%s" % (state_name, 'alpha') term_name_beta = "%s_%s" % (state_name, 'beta') term_name_inf = "%s_%s" % (state_name, 'inf') term_name_tau = "%s_%s" % (state_name, 'tau') def remap_gate_eqn(s): # Remap Alpha/Beta terms into the appropriate Units # when they are used in inf/tau equations: alpha_repl = "((%s) * (%s) )" % (term_name_alpha, neuroml_dt) beta_repl = "((%s) * (%s) )" % (term_name_beta, neuroml_dt) s = s.replace('alpha', alpha_repl).replace('beta', beta_repl) # Remap voltage terms to __VGATE__ (since they might be subject # to offset later on: s = re.sub(r"""\bV\b""", '__VGate__', s) s = re.sub(r"""\bv\b""", '__VGate__', s) return s # Write the alpha/beta terms: e1 = '%s = (%s) * (1/%s)' % (term_name_alpha, remap_gate_eqn(alphaTr.getEqn()), neuroml_dt) e2 = '%s = (%s) * (1/%s)' % (term_name_beta, remap_gate_eqn(betaTr.getEqn()), neuroml_dt) # Time courses should always be divided by rate_adjustment term! if len(g.time_courses) != 0: tc = SeqUtils.expect_single(g.time_courses) tc_eqn = '%s = %s * (1/%s) *(%s)' % ( term_name_tau, remap_gate_eqn( tc.getEqn()), q10tempadjustmentName, neuroml_dt) else: tc_eqn = '%s = 1/(%s* (%s+%s))' % (term_name_tau, q10tempadjustmentName, term_name_alpha, term_name_beta) if len(g.steady_states) != 0: ss = SeqUtils.expect_single(g.steady_states) ss_eqn = '%s = %s' % (term_name_inf, remap_gate_eqn(ss.getEqn())) else: ss_eqn = '%s = %s/(%s+%s)' % (term_name_inf, term_name_alpha, term_name_alpha, term_name_beta) # The state-equations state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name, term_name_tau) # Set the initial value of the state-variable to be the same # as the steady-state value: initial_cond = "<=> INITIAL %s:%s" % (state_name, term_name_inf) return [e1, e2, tc_eqn, ss_eqn, state_eqn, initial_cond]
def EqnSet(cls, text, **kwargs): library_manager = cls.File(text=text, **kwargs ) eqnset_name = SeqUtils.expect_single( library_manager.get_eqnset_names() ) return library_manager.get_eqnset(eqnset_name)