def extract_spike_variable(description): cond = prepare_string(description['raw_spike']) if len(cond) > 1: _print(description['raw_spike']) _error('The spike condition must be a single expression') translator = Equation('raw_spike_cond', cond[0].strip(), description) raw_spike_code = translator.parse() # Also store the variables used in the condition, as it may be needed for CUDA generation spike_code_dependencies = translator.dependencies() reset_desc = [] if 'raw_reset' in description.keys() and description['raw_reset']: reset_desc = process_equations(description['raw_reset']) for var in reset_desc: translator = Equation(var['name'], var['eq'], description) var['cpp'] = translator.parse() var['dependencies'] = translator.dependencies() return { 'spike_cond': raw_spike_code, 'spike_cond_dependencies': spike_code_dependencies, 'spike_reset': reset_desc}
def eventdriven(self, expression): # Standardize the equation real_tau, stepsize, steadystate = self.standardize_ODE(expression) if real_tau == None: # the equation can not be standardized _print(self.expression) _error( 'The equation is not a linear ODE and can not be evaluated exactly.' ) exit(0) # Check the steady state is not dependent on other variables for var in self.variables: if self.local_dict[var] in steadystate.atoms(): _print(self.expression) _error('The equation can not depend on other variables (' + var + ') to be evaluated exactly.') exit(0) # Obtain C code variable_name = self.c_code(self.local_dict[self.name]) steady = self.c_code(steadystate) if steady == '0': code = variable_name + '*= exp(dt*(_last_event[i][j] - (t))/(' + self.c_code( real_tau) + '));' else: code = variable_name + ' = ' + steady + ' + (' + variable_name + ' - ' + steady + ')*exp(dt*(_last_event[i][j] - (t))/(' + self.c_code( real_tau) + '));' return code
def extract_randomdist(description): " Extracts RandomDistribution objects from all variables" rk_rand = 0 random_objects = [] for variable in description['variables']: eq = variable['eq'] # Search for all distributions for dist in available_distributions: matches = re.findall('(?P<pre>[^\w.])'+dist+'\(([^()]+)\)', eq) if matches == ' ': continue for l, v in matches: # Check the arguments arguments = v.split(',') # Check the number of provided arguments if len(arguments) < distributions_arguments[dist]: _print(eq) _error('The distribution ' + dist + ' requires ' + str(distributions_arguments[dist]) + 'parameters') elif len(arguments) > distributions_arguments[dist]: _print(eq) _error('Too many parameters provided to the distribution ' + dist) # Process the arguments processed_arguments = "" for idx in range(len(arguments)): try: arg = float(arguments[idx]) except: # A global parameter if arguments[idx].strip() in description['global']: if description['object'] == 'neuron': arg = arguments[idx].strip() else: arg = arguments[idx].strip() else: _error(arguments[idx] + ' is not a global parameter of the neuron/synapse. It can not be used as an argument to the random distribution ' + dist + '(' + v + ')') processed_arguments += str(arg) if idx != len(arguments)-1: # not the last one processed_arguments += ', ' definition = distributions_equivalents[dist] + '(' + processed_arguments + ')' # Store its definition desc = {'name': 'rand_' + str(rk_rand) , 'dist': dist, 'definition': definition, 'args' : processed_arguments, 'template': distributions_equivalents[dist], 'locality': variable['locality']} rk_rand += 1 random_objects.append(desc) # Replace its definition by its temporary name # Problem: when one uses twice the same RD in a single equation (perverse...) eq = eq.replace(dist+'('+v+')', desc['name']) # Add the new variable to the vocabulary description['attributes'].append(desc['name']) if variable['name'] in description['local']: description['local'].append(desc['name']) else: # Why not on a population-wide variable? description['global'].append(desc['name']) variable['transformed_eq'] = eq return random_objects
def idx_target(val): target = val.group(1).strip() if target == '': _print(eq) _error('post.sum() requires one argument.') exit(0) dependencies['post'].append('sum('+target+')') rep = '_post_sum_' + target.strip() untouched[rep] = '%(post_prefix)s_sum_' + target + '%(post_index)s' return rep
def idx_target(val): target = val.group(1).strip() if target == '': _print(eq) _error('pre.sum() requires one argument.') exit(0) rep = '_pre_sum_' + target.strip() dependencies['pre'].append('sum(' + target + ')') untouched[rep] = '_sum_' + target + '%(pre_index)s' return rep
def parse_expression(self, expression, local_dict): " Parses a string with respect to the vocabulary defined in local_dict." try: res = parse_expr(transform_condition(expression), local_dict = local_dict, transformations = (standard_transformations + (convert_xor,)) ) except Exception as e: _print(e) _error('Can not analyse the expression :' + str(expression)) exit(0) else: return res
def parse_expression(self, expression, local_dict): " Parses a string with respect to the vocabulary defined in local_dict." try: res = parse_expr(transform_condition(expression), local_dict=local_dict, transformations=(standard_transformations + (convert_xor, ))) except Exception as e: _print(e) _error('Can not analyse the expression :' + str(expression)) exit(0) else: return res
def parse(self): try: if self.type == 'ODE': code = self.analyse_ODE(self.expression) elif self.type == 'cond': code = self.analyse_condition(self.expression) elif self.type == 'inc': code = self.analyse_increment(self.expression) elif self.type == 'return': code = self.analyse_return(self.expression) elif self.type == 'simple': code = self.analyse_assignment(self.expression) except Exception as e: _print(e) _error('can not analyse', self.expression) return code
def extract_targets(variables): targets = [] for var in variables: # Rate-coded neurons code = re.findall('(?P<pre>[^\w.])sum\(\s*([^()]+)\s*\)', var['eq']) for l, t in code: if t.strip() == '': _print(var['eq']) _error('sum() must have one argument.') exit(0) targets.append(t.strip()) # Spiking neurons code = re.findall('([^\w.])g_([\w]+)', var['eq']) for l, t in code: targets.append(t.strip()) return list(set(targets))
def report(filename="./report.tex", standalone=True, gather_subprojections=False, net_id=0): """ Generates a .tex file describing the network according to: Nordlie E, Gewaltig M-O, Plesser HE (2009). Towards Reproducible Descriptions of Neuronal Network Models. PLoS Comput Biol 5(8): e1000456. **Parameters:** * *filename*: name of the .tex file where the report will be written (default: "./report.tex") * *standalone*: tells if the generated file should be directly compilable or only includable (default: True) * *gather_subprojections*: if a projection between two populations has been implemented as a multiple of projections between sub-populations, this flag allows to group them in the summary (default: False). * *net_id*: id of the network to be used for reporting (default: 0, everything that was declared) """ # stdout _print('Generating report in', filename) # Generate the summary summary = _generate_summary(net_id) # Generate the populations populations = _generate_populations(net_id) # Generate the projections projections = _generate_projections(net_id, gather_subprojections) # Generate the neuron models neuron_models = _generate_neuron_models(net_id) # Generate the synapse models synapse_models = _generate_synapse_models(net_id) # Generate the population parameters pop_parameters = _generate_population_parameters(net_id) # Generate the population parameters proj_parameters = _generate_projection_parameters(net_id, gather_subprojections) with open(filename, 'w') as wfile: if standalone: wfile.write(header) wfile.write(preamble) wfile.write(summary) wfile.write(populations) wfile.write(projections) wfile.write(neuron_models) wfile.write(synapse_models) wfile.write(pop_parameters) wfile.write(proj_parameters) if standalone: wfile.write(footer)
def extract_spike_variable(description): cond = prepare_string(description['raw_spike']) if len(cond) > 1: _error('The spike condition must be a single expression') _print(description['raw_spike']) exit(0) translator = Equation('raw_spike_cond', cond[0].strip(), description) raw_spike_code = translator.parse() reset_desc = [] if 'raw_reset' in description.keys() and description['raw_reset']: reset_desc = process_equations(description['raw_reset']) for var in reset_desc: translator = Equation(var['name'], var['eq'], description) var['cpp'] = translator.parse() return {'spike_cond': raw_spike_code, 'spike_reset': reset_desc}
def implicit(self, expression): "Full implicit method, linearising for example (V - E)^2, but this is not desired." # Transform the gradient into a difference TODO: more robust... new_expression = expression.replace('d' + self.name, '_t_gradient_') new_expression = re.sub(r'([^\w]+)' + self.name + r'([^\w]+)', r'\1_' + self.name + r'\2', new_expression) new_expression = new_expression.replace( '_t_gradient_', '(_' + self.name + ' - ' + self.name + ')') # Add a sympbol for the next value of the variable new_var = Symbol('_' + self.name) self.local_dict['_' + self.name] = new_var # Parse the string analysed = parse_expr(new_expression, local_dict=self.local_dict, transformations=(standard_transformations + (convert_xor, ))) self.analysed = analysed # Solve the equation for delta_mp solved = solve(analysed, new_var) if len(solved) > 1: _print(self.expression) _error('the ODE is not linear, can not use the implicit method.') exit(0) else: solved = solved[0] equation = simplify(collect(solved, self.local_dict['dt'])) # Obtain C code variable_name = self.c_code(self.local_dict[self.name]) explicit_code = 'double _' + self.name + ' = '\ + self.c_code(equation) + ';' switch = variable_name + ' = _' + self.name + ' ;' # Return result return [explicit_code, switch]
def _analyse_equation(orig, eq, local_dict, tex_dict): left = eq.split('=')[0] if left[-1] in ['+', '-', '*', '/']: op = left[-1] try: left = _analyse_part(left[:-1], local_dict, tex_dict) except Exception as e: _print(e) _warning('can not transform the left side of ' + orig +' to LaTeX, you have to do it by hand...') left = left[:-1] operator = " = " + left + " " + op + (" (" if op != '+' else '') else: try: left = _analyse_part(left, local_dict, tex_dict) except Exception as e: _print(e) _warning('can not transform the left side of ' + orig +' to LaTeX, you have to do it by hand...') operator = " = " try: right = _analyse_part(eq.split('=')[1], local_dict, tex_dict) except Exception as e: _print(e) _warning('can not transform the right side of ' + orig +' to LaTeX, you have to do it by hand...') right = eq.split('=')[1] return left + operator + right + (" )" if operator.endswith('(') else "")
def implicit(self, expression): "Full implicit method, linearising for example (V - E)^2, but this is not desired." # Transform the gradient into a difference TODO: more robust... new_expression = expression.replace('d'+self.name, '_t_gradient_') new_expression = re.sub(r'([^\w]+)'+self.name+r'([^\w]+)', r'\1_'+self.name+r'\2', new_expression) new_expression = new_expression.replace('_t_gradient_', '(_'+self.name+' - '+self.name+')') # Add a sympbol for the next value of the variable new_var = Symbol('_'+self.name) self.local_dict['_'+self.name] = new_var # Parse the string analysed = parse_expr(new_expression, local_dict = self.local_dict, transformations = (standard_transformations + (convert_xor,)) ) self.analysed = analysed # Solve the equation for delta_mp solved = solve(analysed, new_var) if len(solved) > 1: _print(self.expression) _error('the ODE is not linear, can not use the implicit method.') exit(0) else: solved = solved[0] equation = simplify(collect( solved, self.local_dict['dt'])) # Obtain C code variable_name = self.c_code(self.local_dict[self.name]) explicit_code = 'double _' + self.name + ' = '\ + self.c_code(equation) + ';' switch = variable_name + ' = _' + self.name + ' ;' # Return result return [explicit_code, switch]
def extract_spike_variable(description): cond = prepare_string(description['raw_spike']) if len(cond) > 1: _error('The spike condition must be a single expression') _print(description['raw_spike']) exit(0) translator = Equation('raw_spike_cond', cond[0].strip(), description) raw_spike_code = translator.parse() reset_desc = [] if 'raw_reset' in description.keys() and description['raw_reset']: reset_desc = process_equations(description['raw_reset']) for var in reset_desc: translator = Equation(var['name'], var['eq'], description) var['cpp'] = translator.parse() return { 'spike_cond': raw_spike_code, 'spike_reset': reset_desc}
def extract_globalops_neuron(name, eq, description): """ Replaces global operations (mean(r), etc) with arbitrary names and returns a dictionary of changes. """ untouched = {} globs = [] # Global ops glop_names = ['min', 'max', 'mean', 'norm1', 'norm2'] for op in glop_names: matches = re.findall('([^\w]*)'+op+'\(([\s\w]*)\)', eq) for pre, var in matches: if var.strip() in description['local']: globs.append({'function': op, 'variable': var.strip()}) oldname = op + '(' + var + ')' newname = '_' + op + '_' + var.strip() eq = eq.replace(oldname, newname) untouched[newname] = '_' + op + '_' + var.strip() else: _print(eq) _error('There is no local attribute '+var+'.') return eq, untouched, globs
def eventdriven(self, expression): # Standardize the equation real_tau, stepsize, steadystate = self.standardize_ODE(expression) if real_tau == None: # the equation can not be standardized _print(self.expression) _error('The equation is not a linear ODE and can not be evaluated exactly.') exit(0) # Check the steady state is not dependent on other variables for var in self.variables: if self.local_dict[var] in steadystate.atoms(): _print(self.expression) _error('The equation can not depend on other variables ('+var+') to be evaluated exactly.') exit(0) # Obtain C code variable_name = self.c_code(self.local_dict[self.name]) steady = self.c_code(steadystate) if steady == '0': code = variable_name + '*= exp(dt*(_last_event[i][j] - (t))/(' + self.c_code(real_tau) + '));' else: code = variable_name + ' = ' + steady + ' + (' + variable_name + ' - ' + steady + ')*exp(dt*(_last_event[i][j] - (t))/(' + self.c_code(real_tau) + '));' return code
def process_equations(equations): """ Takes a multi-string describing equations and returns a list of dictionaries, where: * 'name' is the name of the variable * 'eq' is the equation * 'constraints' is all the constraints given after the last :. _extract_flags() should be called on it. Warning: one equation can now be on multiple lines, without needing the ... newline symbol. TODO: should this be used for other arguments as equations? pre_event and so on """ def is_constraint(eq): " Internal method to determine if a string contains reserved keywords." eq = ',' + eq.replace(' ', '') + ',' for key in authorized_keywords: pattern = '([,]+)' + key + '([=,]+)' if re.match(pattern, eq): return True return False # All equations will be stored there, in the order of their definition variables = [] try: equations = equations.replace(';', '\n').split('\n') except: # euqations is empty return variables # Iterate over all lines for line in equations: # Skip empty lines definition = line.strip() if definition == '': continue # Remove comments com = definition.split('#') if len(com) > 1: definition = com[0] if definition.strip() == '': continue # Process the line try: equation, constraint = definition.rsplit(':', 1) except ValueError: # There is no :, only equation is concerned equation = line constraint = '' else: # there is a : # Check if the constraint contains the reserved keywords has_constraint = is_constraint(constraint) # If the right part of : is a constraint, just store it # Otherwise, it is an if-then-else statement if has_constraint: equation = equation.strip() constraint = constraint.strip() else: equation = definition.strip() # there are no constraints constraint = '' # Split the equation around operators = += -= *= /=, but not == split_operators = re.findall('([\s\w\+\-\*\/\)]+)=([^=])', equation) if len(split_operators) == 1: # definition of a new variable # Retrieve the name eq = split_operators[0][0] if eq.strip() == "": _error(equation) _print('The equation can not be analysed, check the syntax.') exit(0) name = extract_name(eq, left=True) if name in ['_undefined', '']: _error('No variable name can be found in ' + equation) exit(0) # Append the result variables.append({ 'name': name, 'eq': equation.strip(), 'constraint': constraint.strip() }) elif len(split_operators) == 0: # Continuation of the equation on a new line: append the equation to the previous variable variables[-1]['eq'] += ' ' + equation.strip() variables[-1]['constraint'] += constraint else: _print( 'Error: only one assignement operator is allowed per equation.' ) exit(0) return variables
def standardize_ODE(self, expression): """ Transform any 1rst order ODE into the standardized form: tau * dV/dt + V = S Non-linear functions of V are left in the steady-state argument. Returns: * tau : the time constant associated to the standardized equation. * stepsize: a simplified version of dt/tau. * steadystate: the right term of the equation after standardization """ # Replace the gradient with a temporary variable expression = expression.replace('d' + self.name +'/dt', '_gradvar_') # TODO: robust to spaces # Add the gradient sympbol grad_var = Symbol('_gradvar_') # Parse the string analysed = self.parse_expression(expression, local_dict = self.local_dict ) self.analysed = analysed # Collect factor on the gradient and main variable A*dV/dt + B*V = C expanded = analysed.expand( modulus=None, power_base=False, power_exp=False, mul=True, log=False, multinomial=False) # Make sure the expansion went well collected_var = collect(expanded, self.local_dict[self.name], evaluate=False, exact=False) if self.method == 'exponential': if not self.local_dict[self.name] in collected_var.keys() or len(collected_var)>2: _print(self.expression) _error('The exponential method is reserved for linear first-order ODEs of the type tau*d'+ self.name+'/dt + '+self.name+' = f(t). Use the explicit method instead.') exit(0) factor_var = collected_var[self.local_dict[self.name]] collected_gradient = collect(expand(analysed, grad_var), grad_var, evaluate=False, exact=True) if grad_var in collected_gradient.keys(): factor_gradient = collected_gradient[grad_var] else: factor_gradient = S(1.0) # Real time constant when using the form tau*dV/dt + V = A real_tau = factor_gradient / factor_var # Normalized equation tau*dV/dt + V = A normalized = analysed / factor_var # Steady state A steadystate = together(real_tau * grad_var + self.local_dict[self.name] - normalized) # Stepsize stepsize = together(self.local_dict['dt']/real_tau) return real_tau, stepsize, steadystate
def standardize_ODE(self, expression): """ Transform any 1rst order ODE into the standardized form: tau * dV/dt + V = S Non-linear functions of V are left in the steady-state argument. Returns: * tau : the time constant associated to the standardized equation. * stepsize: a simplified version of dt/tau. * steadystate: the right term of the equation after standardization """ # Replace the gradient with a temporary variable expression = expression.replace('d' + self.name + '/dt', '_gradvar_') # TODO: robust to spaces # Add the gradient sympbol grad_var = Symbol('_gradvar_') # Parse the string analysed = self.parse_expression(expression, local_dict=self.local_dict) self.analysed = analysed # Collect factor on the gradient and main variable A*dV/dt + B*V = C expanded = analysed.expand(modulus=None, power_base=False, power_exp=False, mul=True, log=False, multinomial=False) # Make sure the expansion went well collected_var = collect(expanded, self.local_dict[self.name], evaluate=False, exact=False) if self.method == 'exponential': if not self.local_dict[self.name] in collected_var.keys() or len( collected_var) > 2: _print(self.expression) _error( 'The exponential method is reserved for linear first-order ODEs of the type tau*d' + self.name + '/dt + ' + self.name + ' = f(t). Use the explicit method instead.') exit(0) factor_var = collected_var[self.local_dict[self.name]] collected_gradient = collect(expand(analysed, grad_var), grad_var, evaluate=False, exact=True) if grad_var in collected_gradient.keys(): factor_gradient = collected_gradient[grad_var] else: factor_gradient = S(1.0) # Real time constant when using the form tau*dV/dt + V = A real_tau = factor_gradient / factor_var # Normalized equation tau*dV/dt + V = A normalized = analysed / factor_var # Steady state A steadystate = together(real_tau * grad_var + self.local_dict[self.name] - normalized) # Stepsize stepsize = together(self.local_dict['dt'] / real_tau) return real_tau, stepsize, steadystate
def extract_structural_plasticity(statement, description): # Extract flags try: eq, constraint = statement.rsplit(':', 1) bounds, flags = extract_flags(constraint) except: eq = statement.strip() bounds = {} flags = [] # Extract RD rd = None for dist in available_distributions: matches = re.findall('(?P<pre>[^\w.])'+dist+'\(([^()]+)\)', eq) for l, v in matches: # Check the arguments arguments = v.split(',') # Check the number of provided arguments if len(arguments) < distributions_arguments[dist]: _print(eq) _error('The distribution ' + dist + ' requires ' + str(distributions_arguments[dist]) + 'parameters') elif len(arguments) > distributions_arguments[dist]: _print(eq) _error('Too many parameters provided to the distribution ' + dist) # Process the arguments processed_arguments = "" for idx in range(len(arguments)): try: arg = float(arguments[idx]) except: # A global parameter _print(eq) _error('Random distributions for creating/pruning synapses must use foxed values.') processed_arguments += str(arg) if idx != len(arguments)-1: # not the last one processed_arguments += ', ' definition = distributions_equivalents[dist] + '(' + processed_arguments + ')' # Store its definition if rd: _print(eq) _error('Only one random distribution per equation is allowed.') rd = {'name': 'rand_' + str(0) , 'origin': dist+'('+v+')', 'dist': dist, 'definition': definition, 'args' : processed_arguments, 'template': distributions_equivalents[dist]} if rd: eq = eq.replace(rd['origin'], 'rd(rng)') # Extract pre/post dependencies eq, untouched, dependencies = extract_prepost('test', eq, description) # Parse code translator = Equation('test', eq, description, method = 'cond', untouched = {}) code = translator.parse() # Replace untouched variables with their original name for prev, new in untouched.items(): code = code.replace(prev, new) # Add new dependencies for dep in dependencies['pre']: description['dependencies']['pre'].append(dep) for dep in dependencies['post']: description['dependencies']['post'].append(dep) return {'eq': eq, 'cpp': code, 'bounds': bounds, 'flags': flags, 'rd': rd}
def check_equation(equation): "Makes a formal check on the equation (matching parentheses, etc)" # Matching parentheses if equation.count('(') != equation.count(')'): _print(equation) _error('The number of parentheses does not match.')
def extract_ite(name, eq, description, split=True): """ Extracts if-then-else statements and processes them. If-then-else statements must be of the form: .. code-block:: python variable = if condition: ... val1 ... else: ... val2 Conditional statements can be nested, but they should return only one value! """ def transform(code): " Transforms the code into a list of lines." res = [] items = [] for arg in code.split(':'): items.append( arg.strip()) for i in range(len(items)): if items[i].startswith('if '): res.append( items[i].strip() ) elif items[i].endswith('else'): res.append(items[i].split('else')[0].strip() ) res.append('else' ) else: # the last then res.append( items[i].strip() ) return res def parse(lines): " Recursive analysis of if-else statements" result = [] while lines: if lines[0].startswith('if'): block = [lines.pop(0).split('if')[1], parse(lines)] if lines[0].startswith('else'): lines.pop(0) block.append(parse(lines)) result.append(block) elif not lines[0].startswith(('else')): result.append(lines.pop(0)) else: break return result[0] # If no if, not a conditional if not 'if ' in eq: return eq, [] # Process the equation condition = [] # Eventually split around = if split: left, right = eq.split('=', 1) else: left = '' right = eq nb_then = len(re.findall(':', right)) nb_else = len(re.findall('else', right)) # The equation contains a conditional statement if nb_then > 0: # A if must be right after the equal sign if not right.strip().startswith('if'): _error(eq, '\nThe right term must directly start with a if statement.') # It must have the same number of : and of else if not nb_then == 2*nb_else: _error(eq, '\nConditional statements must use both : and else.') multilined = transform(right) condition = parse(multilined) right = ' __conditional__0 ' # only one conditional allowed in that case if split: eq = left + '=' + right else: eq = right else: _print(eq) _error('Conditional statements must define "then" and "else" values.\n var = if condition: a else: b') return eq, [condition]
def extract_ite(name, eq, description, split=True): """ Extracts if-then-else statements and processes them. If-then-else statements must be of the form: .. code-block:: python variable = if condition: ... val1 ... else: ... val2 Conditional statements can be nested, but they should return only one value! """ def transform(code): " Transforms the code into a list of lines." res = [] items = [] for arg in code.split(':'): items.append(arg.strip()) for i in range(len(items)): if items[i].startswith('if '): res.append(items[i].strip()) elif items[i].endswith('else'): res.append(items[i].split('else')[0].strip()) res.append('else') else: # the last then res.append(items[i].strip()) return res def parse(lines): " Recursive analysis of if-else statements" result = [] while lines: if lines[0].startswith('if'): block = [lines.pop(0).split('if')[1], parse(lines)] if lines[0].startswith('else'): lines.pop(0) block.append(parse(lines)) result.append(block) elif not lines[0].startswith(('else')): result.append(lines.pop(0)) else: break return result[0] # If no if, not a conditional if not 'if ' in eq: return eq, [] # Process the equation condition = [] # Eventually split around = if split: left, right = eq.split('=', 1) else: left = '' right = eq nb_then = len(re.findall(':', right)) nb_else = len(re.findall('else', right)) # The equation contains a conditional statement if nb_then > 0: # A if must be right after the equal sign if not right.strip().startswith('if'): _error( eq, '\nThe right term must directly start with a if statement.') # It must have the same number of : and of else if not nb_then == 2 * nb_else: _error(eq, '\nConditional statements must use both : and else.') multilined = transform(right) condition = parse(multilined) right = ' __conditional__0 ' # only one conditional allowed in that case if split: eq = left + '=' + right else: eq = right else: _print(eq) _error( 'Conditional statements must define "then" and "else" values.\n var = if condition: a else: b' ) return eq, [condition]
def process_equations(equations): """ Takes a multi-string describing equations and returns a list of dictionaries, where: * 'name' is the name of the variable * 'eq' is the equation * 'constraints' is all the constraints given after the last :. _extract_flags() should be called on it. Warning: one equation can now be on multiple lines, without needing the ... newline symbol. TODO: should this be used for other arguments as equations? pre_event and so on """ def is_constraint(eq): " Internal method to determine if a string contains reserved keywords." eq = ',' + eq.replace(' ', '') + ',' for key in authorized_keywords: pattern = '([,]+)' + key + '([=,]+)' if re.match(pattern, eq): return True return False # All equations will be stored there, in the order of their definition variables = [] try: equations = equations.replace(';', '\n').split('\n') except: # equations is empty return variables # Iterate over all lines for line in equations: # Skip empty lines definition = line.strip() if definition == '': continue # Remove comments com = definition.split('#') if len(com) > 1: definition = com[0] if definition.strip() == '': continue # Process the line try: equation, constraint = definition.rsplit(':', 1) except ValueError: # There is no :, only equation is concerned equation = definition constraint = '' else: # there is a : # Check if the constraint contains the reserved keywords has_constraint = is_constraint(constraint) # If the right part of : is a constraint, just store it # Otherwise, it is an if-then-else statement if has_constraint: equation = equation.strip() constraint = constraint.strip() else: equation = definition.strip() # there are no constraints constraint = '' # Split the equation around operators = += -= *= /=, but not == split_operators = re.findall('([\s\w\+\-\*\/\)]+)=([^=])', equation) if len(split_operators) == 1: # definition of a new variable # Retrieve the name eq = split_operators[0][0] if eq.strip() == "": _print(equation) _error('The equation can not be analysed, check the syntax.') name = extract_name(eq, left=True) if name in ['_undefined', '']: _error('No variable name can be found in ' + equation) # Append the result variables.append({'name': name, 'eq': equation.strip(), 'constraint': constraint.strip()}) elif len(split_operators) == 0: # Continuation of the equation on a new line: append the equation to the previous variable variables[-1]['eq'] += ' ' + equation.strip() variables[-1]['constraint'] += constraint else: _error('Only one assignement operator is allowed per equation.') return variables