def _build_gate_alphabetainftau(g, q10tempadjustmentName, neuroml_dt):

    if len(g.openstates) != 1 or len(g.closedstates) != 1 or len(g.transitions) != 2:
        raise NeuroUnitsImportNeuroMLNotImplementedException("Non-Standard Gate/Transtions setup")

    state_name = g.name
    alphaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "alpha")
    betaTr = SeqUtils.filter_expect_single(g.transitions, lambda s: s.name == "beta")
    term_name_alpha = "%s_%s" % (state_name, "alpha")
    term_name_beta = "%s_%s" % (state_name, "beta")
    term_name_inf = "%s_%s" % (state_name, "inf")
    term_name_tau = "%s_%s" % (state_name, "tau")

    def remap_gate_eqn(s):

        # Remap Alpha/Beta terms into the appropriate Units
        # when they are used in inf/tau equations:
        alpha_repl = "((%s) * (%s)  )" % (term_name_alpha, neuroml_dt)
        beta_repl = "((%s) * (%s)  )" % (term_name_beta, neuroml_dt)
        s = s.replace("alpha", alpha_repl).replace("beta", beta_repl)

        # Remap voltage terms to __VGATE__ (since they might be subject
        # to offset later on:
        s = re.sub(r"""\bV\b""", "__VGate__", s)
        s = re.sub(r"""\bv\b""", "__VGate__", s)
        return s

    # Write the alpha/beta terms:
    e1 = "%s =  (%s) * (1/%s)" % (term_name_alpha, remap_gate_eqn(alphaTr.getEqn()), neuroml_dt)
    e2 = "%s =  (%s) * (1/%s)" % (term_name_beta, remap_gate_eqn(betaTr.getEqn()), neuroml_dt)

    # Time courses should always be divided by rate_adjustment term!
    if len(g.time_courses) != 0:
        tc = SeqUtils.expect_single(g.time_courses)
        tc_eqn = "%s =  %s * (1/%s) *(%s)" % (
            term_name_tau,
            remap_gate_eqn(tc.getEqn()),
            q10tempadjustmentName,
            neuroml_dt,
        )
    else:
        tc_eqn = "%s =  1/(%s* (%s+%s))" % (term_name_tau, q10tempadjustmentName, term_name_alpha, term_name_beta)

    if len(g.steady_states) != 0:
        ss = SeqUtils.expect_single(g.steady_states)
        ss_eqn = "%s =  %s" % (term_name_inf, remap_gate_eqn(ss.getEqn()))
    else:
        ss_eqn = "%s =  %s/(%s+%s)" % (term_name_inf, term_name_alpha, term_name_alpha, term_name_beta)

    # The state-equations
    state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name, term_name_tau)

    # Set the initial value of the state-variable to be the same
    # as the steady-state value:
    initial_cond = "<=> INITIAL %s:%s" % (state_name, term_name_inf)

    return [e1, e2, tc_eqn, ss_eqn, state_eqn, initial_cond]
    def _res_assignments(self, o, **kwargs):
        removed = []
        for aKey in o._eqn_assignment.keys():
            a = o._eqn_assignment[aKey]
            alhs = a.lhs
            fixed_value = self.visit(a.rhs_map)
            if fixed_value:

                sym_suffix = '_as_symconst'
                sym_suffix = ''
                s = ast.SymbolicConstant(symbol=aKey.symbol
                        + sym_suffix, value=fixed_value)

                #ReplaceNode(a.lhs, s).visit(o)
                ReplaceNode.replace_and_check(srcObj=a.lhs, dstObj=s, root = o)



                o._symbolicconstants[aKey.symbol] = s

                from neurounits.misc import SeqUtils
                old_ass = SeqUtils.filter_expect_single( o._eqn_assignment, lambda o:o.symbol == aKey.symbol )
                del o._eqn_assignment[ old_ass ] #o.get_terminal_obj(aKey.symbol) ]

                #del o._eqn_assignment[ o.get_terminal_obj(aKey.symbol) ]

                removed.append(alhs)

        # Double check they have gone:
        for a in removed:
            nc = EqnsetVisitorNodeCollector()
            nc.visit(o)
            assert not a in nc.all()
Esempio n. 3
0
    def VisitRegimeDispatchMap(self,o,**kwargs):


        rt_graph = o.get_rt_graph()

        rhs_functors = dict([(regime, self.visit(rhs)) for (regime,rhs) in o.rhs_map.items()])
        from neurounits.misc import SeqUtils
        try:
            default = SeqUtils.filter_expect_single(rhs_functors.keys(), lambda r:r.name == None)
            assert not None in rhs_functors
            rhs_functors[None] = rhs_functors[default]
        except ValueError:
            pass





        def f3(state_data, **kw):
            regime_states = state_data.rt_regimes
            #print 'Getting regime_state for RT graph:', repr(rt_graph)

            curr_state = regime_states[rt_graph]
            if curr_state in rhs_functors:
                rhs_functor = rhs_functors[curr_state]
            else:
                rhs_functor = rhs_functors[None]

            return rhs_functor(state_data=state_data, **kw)

        return f3

        assert len(o.rhs_map) == 1
        return self.visit(o.rhs_map.values()[0])
def _build_gate_alphabetainftau(g, q10tempadjustmentName, neuroml_dt):

    if len(g.openstates) != 1 or len(g.closedstates) != 1 or len(
            g.transitions) != 2:
        raise NeuroUnitsImportNeuroMLNotImplementedException(
            'Non-Standard Gate/Transtions setup')

    state_name = g.name
    alphaTr = SeqUtils.filter_expect_single(g.transitions,
                                            lambda s: s.name == "alpha")
    betaTr = SeqUtils.filter_expect_single(g.transitions,
                                           lambda s: s.name == "beta")
    term_name_alpha = "%s_%s" % (state_name, 'alpha')
    term_name_beta = "%s_%s" % (state_name, 'beta')
    term_name_inf = "%s_%s" % (state_name, 'inf')
    term_name_tau = "%s_%s" % (state_name, 'tau')

    def remap_gate_eqn(s):

        # Remap Alpha/Beta terms into the appropriate Units
        # when they are used in inf/tau equations:
        alpha_repl = "((%s) * (%s)  )" % (term_name_alpha, neuroml_dt)
        beta_repl = "((%s) * (%s)  )" % (term_name_beta, neuroml_dt)
        s = s.replace('alpha', alpha_repl).replace('beta', beta_repl)

        # Remap voltage terms to __VGATE__ (since they might be subject
        # to offset later on:
        s = re.sub(r"""\bV\b""", '__VGate__', s)
        s = re.sub(r"""\bv\b""", '__VGate__', s)
        return s

    # Write the alpha/beta terms:
    e1 = '%s =  (%s) * (1/%s)' % (term_name_alpha,
                                  remap_gate_eqn(alphaTr.getEqn()), neuroml_dt)
    e2 = '%s =  (%s) * (1/%s)' % (term_name_beta,
                                  remap_gate_eqn(betaTr.getEqn()), neuroml_dt)

    # Time courses should always be divided by rate_adjustment term!
    if len(g.time_courses) != 0:
        tc = SeqUtils.expect_single(g.time_courses)
        tc_eqn = '%s =  %s * (1/%s) *(%s)' % (
            term_name_tau, remap_gate_eqn(
                tc.getEqn()), q10tempadjustmentName, neuroml_dt)
    else:
        tc_eqn = '%s =  1/(%s* (%s+%s))' % (term_name_tau,
                                            q10tempadjustmentName,
                                            term_name_alpha, term_name_beta)

    if len(g.steady_states) != 0:
        ss = SeqUtils.expect_single(g.steady_states)
        ss_eqn = '%s =  %s' % (term_name_inf, remap_gate_eqn(ss.getEqn()))
    else:
        ss_eqn = '%s =  %s/(%s+%s)' % (term_name_inf, term_name_alpha,
                                       term_name_alpha, term_name_beta)

    # The state-equations
    state_eqn = "%s' = (%s-%s)/(%s) " % (state_name, term_name_inf, state_name,
                                         term_name_tau)

    # Set the initial value of the state-variable to be the same
    # as the steady-state value:
    initial_cond = "<=> INITIAL %s:%s" % (state_name, term_name_inf)

    return [e1, e2, tc_eqn, ss_eqn, state_eqn, initial_cond]