class Max(NumpyAggregate): np_func = np.amax nan_func = (nanmax, ) dtype = firstarg_dtype # manually defined argspec so that is works with bottleneck (which is a # builtin function) argspec = argspec('a, axis=None', **NumpyAggregate.kwonlyargs)
class Where(NumexprFunction): funcname = 'if' argspec = argspec('cond, iftrue, iffalse') @property def cond(self): return self.args[0] @property def iftrue(self): return self.args[1] @property def iffalse(self): return self.args[2] def as_simple_expr(self, context): cond = as_simple_expr(self.cond, context) # filter is stored as an unevaluated expression context_filter = context.filter_expr local_ctx = context.clone() if context_filter is None: local_ctx.filter_expr = self.cond else: # filter = filter and cond local_ctx.filter_expr = LogicalOp('&', context_filter, self.cond) iftrue = as_simple_expr(self.iftrue, local_ctx) if context_filter is None: local_ctx.filter_expr = UnaryOp('~', self.cond) else: # filter = filter and not cond local_ctx.filter_expr = LogicalOp('&', context_filter, UnaryOp('~', self.cond)) iffalse = as_simple_expr(self.iffalse, local_ctx) return Where(cond, iftrue, iffalse) def as_string(self): args = as_string((self.cond, self.iftrue, self.iffalse)) return 'where(%s)' % self.format_args_str(args, []) def dtype(self, context): assert getdtype(self.cond, context) == bool return coerce_types(context, self.iftrue, self.iffalse)
class Choice(NumpyRandom): np_func = np.random.choice # choice(a, size=None, replace=True, p=None) argspec = argspec('choices, p=None, size=None, replace=True', **NumpyRandom.kwonlyargs) # TODO: document the change in behavior for the case where the sum of # probabilities is != 1 # random.choice only checks that the error is < 1e-8 but always # divides probabilities by sum(p). It is probably a better choice # because it distributes the error to all bins instead of only # adjusting the probability of the last choice. # We override _eval_args only to change the order of arguments because we # do not use the same order than numpy def _eval_args(self, context): (a, p, size, replace), kwargs = NumpyRandom._eval_args(self, context) return (a, size, replace, p), kwargs def compute(self, context, a, size=None, replace=True, p=None): if isinstance(p, (list, np.ndarray)) and len(p) and not np.isscalar(p[0]): assert len(p) == len(a) assert all(len(px) == size for px in p) assert len(a) >= 2 # I have not found a way to do this without an explicit loop as # np.digitize only supports a 1d array for bins. What we do is # worse than a linear "search" since we always evaluate all # possibilities (there is no shortcut when the value is found). # It might be faster to rewrite this using numba + np.digitize # for each individual (assuming it has a low setup overhead). # if isinstance(p, list) and any(isinstance(px, la.LArray) for px in p): # p = [np.asarray(px) for px in p] ap = np.asarray(p) cdf = ap.cumsum(axis=0) # copied & adapted from numpy/random/mtrand/mtrand.pyx atol = np.sqrt(np.finfo(np.float64).eps) if np.issubdtype(ap.dtype, np.floating): atol = max(atol, np.sqrt(np.finfo(ap.dtype).eps)) if np.any(np.abs(cdf[-1] - 1.) > atol): raise ValueError("probabilities do not sum to 1") cdf /= cdf[-1] # the goal is to build something like: # if(u < proba1, outcome1, # if(u < proba2, outcome2, # outcome3)) data = {'u': np.random.uniform(size=size)} expr = a[-1] # iterate in reverse and skip last pairs = zip(cdf[-2::-1], a[-2::-1]) for i, (proba_x, outcome_x) in enumerate(pairs): data['p%d' % i] = proba_x expr = Where( ComparisonOp('<', Variable(None, 'u'), Variable(None, 'p%d' % i)), outcome_x, expr) local_ctx = context.clone(fresh_data=True, entity_data=data) return expr.evaluate(local_ctx) else: return NumpyRandom.compute(self, context, a, size, replace, p) dtype = firstarg_dtype
class Exp(NumexprFunction): argspec = argspec('expr') dtype = always(float)
def parse_expressions(self, items, context, functions_only=False): """ items -- a list of tuples (name, process_string) context -- parsing context a dict of all symbols available for all entities functions_only -- whether non-functions processes are allowed """ processes = [] for k, v in items: if k == 'while': if isinstance(v, dict): raise SyntaxError(""" This syntax for while is not supported anymore: - while: cond: {cond_expr} code: - ... Please use this instead: - while {cond_expr}: - ... """.format(cond_expr=v['cond'])) else: raise ValueError("while is a reserved keyword") elif k is not None and k.startswith('while '): if not isinstance(v, list): raise SyntaxError("while is a reserved keyword") cond = parse(k[6:].strip(), context) assert isinstance(cond, Expr) code = self.parse_process_group("while_code", v, context, purge=False) process = While(k, self, cond, code) elif k == 'return': e = SyntaxError("return is a reserved keyword. To return " "from a function, use 'return expr' " "instead of 'return: expr'") e.liam2context = "while parsing: return: {}".format(v) raise e elif k is None and isinstance(v, str) and v.startswith('return'): assert len(v) == 6 or v[6] == ' ' if len(v) > 6: result_def = v[7:].strip() else: result_def = None result_expr = parse(result_def, context) process = Return(None, self, result_expr) else: process = self.parse_expr(k, v, context) if process is not None and functions_only: if k in self.fields.names: msg = """defining a process outside of a function is deprecated because it is ambiguous. You should: * wrap the '{name}: {expr}' assignment inside a function like this: compute_{name}: # you can name it any way you like but simply \ '{name}' is not recommended ! - {name}: {expr} * update the simulation.processes list to use 'compute_{name}' (the function \ name) instead of '{name}'. """ else: msg = """defining a process outside of a function is \ deprecated because it is ambiguous. 1) If '{name}: {expr}' is an assignment ('{name}' stores the result of \ '{expr}'), you should: * wrap the assignment inside a function, for example, like this: compute_{name}: # you can name it any way you like but simply \ '{name}' is not recommended ! - {name}: {expr} * update the simulation.processes list to use 'compute_{name}' (the function \ name) instead of '{name}'. * add '{name}' in the entities fields with 'output: False' 2) otherwise if '{expr}' is an expression which does not return any value, you \ can simply transform it into a function, like this: {name}: - {expr} """ warnings.warn(msg.format(name=k, expr=v), UserDeprecationWarning) if process is None: if self.ismethod(v): if isinstance(v, dict): args = v.get('args', '') code = v.get('code', '') result = v.get('return', '') oldargs = "\n args: {}".format(args) \ if args else '' oldcode = "\n code:\n - ..." \ if code else '' newcode = "\n - ..." if code else '' oldresult = "\n return: " + result \ if result else '' newresult = "\n - return " + result \ if result else '' template = """ This syntax for defining functions with arguments or a return value is not supported anymore: {funcname}:{oldargs}{oldcode}{oldresult} Please use this instead: {funcname}({newargs}):{newcode}{newresult}""" msg = template.format(funcname=k, oldargs=oldargs, oldcode=oldcode, oldresult=oldresult, newargs=args, newcode=newcode, newresult=newresult) raise SyntaxError(msg) assert isinstance(v, list) # v should be a list of dicts (assignments) or # strings (actions) if "(" in k: k, args = split_signature(k) argnames = argspec(args).args code_def, result_def = v, None else: argnames, code_def, result_def = [], v, None method_context = self.get_group_context( context, argnames) code = self.parse_process_group(k + "_code", code_def, method_context, purge=False) # TODO: use code.predictors instead (but it currently # fails for some reason) or at least factor this out # with the code in parse_process_group group_expressions = [ elem.items()[0] if isinstance(elem, dict) else (None, elem) for elem in code_def ] group_predictors = \ self.collect_predictors(group_expressions) method_context = self.get_group_context( method_context, group_predictors) result_expr = parse(result_def, method_context) assert result_expr is None or \ isinstance(result_expr, Expr) process = Function(k, self, argnames, code, result_expr) elif isinstance(v, dict) and 'predictor' in v: raise ValueError("Using the 'predictor' keyword is " "not supported anymore. " "If you need several processes to " "write to the same variable, you " "should rather use functions.") elif k is None and v is None: raise ValueError("empty process found ('-')") else: raise Exception("unknown expression type for " "%s: %s (%s)" % (k, v, type(v))) processes.append((k, process)) return processes
def parse_expressions(self, items, context, functions_only=False): """ items -- a list of tuples (name, process_string) context -- parsing context a dict of all symbols available for all entities functions_only -- whether non-functions processes are allowed """ processes = [] for k, v in items: if k == 'while': if isinstance(v, dict): raise SyntaxError(""" This syntax for while is not supported anymore: - while: cond: {cond_expr} code: - ... Please use this instead: - while {cond_expr}: - ... """.format(cond_expr=v['cond'])) else: raise ValueError("while is a reserved keyword") elif k is not None and k.startswith('while '): if not isinstance(v, list): raise SyntaxError("while is a reserved keyword") cond = parse(k[6:].strip(), context) assert isinstance(cond, Expr) code = self.parse_process_group("while_code", v, context, purge=False) process = While(k, self, cond, code) elif k == 'return': e = SyntaxError("return is a reserved keyword. To return " "from a function, use 'return expr' " "instead of 'return: expr'") e.liam2context = "while parsing: return: {}".format(v) raise e elif k is None and isinstance(v, str) and v.startswith('return'): assert len(v) == 6 or v[6] == ' ' if len(v) > 6: result_def = v[7:].strip() else: result_def = None result_expr = parse(result_def, context) process = Return(None, self, result_expr) else: process = self.parse_expr(k, v, context) if process is not None and functions_only: if k in self.fields.names: msg = """defining a process outside of a function is deprecated because it is ambiguous. You should: * wrap the '{name}: {expr}' assignment inside a function like this: compute_{name}: # you can name it any way you like but simply \ '{name}' is not recommended ! - {name}: {expr} * update the simulation.processes list to use 'compute_{name}' (the function \ name) instead of '{name}'. """ else: msg = """defining a process outside of a function is \ deprecated because it is ambiguous. 1) If '{name}: {expr}' is an assignment ('{name}' stores the result of \ '{expr}'), you should: * wrap the assignment inside a function, for example, like this: compute_{name}: # you can name it any way you like but simply \ '{name}' is not recommended ! - {name}: {expr} * update the simulation.processes list to use 'compute_{name}' (the function \ name) instead of '{name}'. * add '{name}' in the entities fields with 'output: False' 2) otherwise if '{expr}' is an expression which does not return any value, you \ can simply transform it into a function, like this: {name}: - {expr} """ warnings.warn(msg.format(name=k, expr=v), UserDeprecationWarning) if process is None: if self.ismethod(v): if isinstance(v, dict): args = v.get('args', '') code = v.get('code', '') result = v.get('return', '') oldargs = "\n args: {}".format(args) \ if args else '' oldcode = "\n code:\n - ..." \ if code else '' newcode = "\n - ..." if code else '' oldresult = "\n return: " + result \ if result else '' newresult = "\n - return " + result \ if result else '' template = """ This syntax for defining functions with arguments or a return value is not supported anymore: {funcname}:{oldargs}{oldcode}{oldresult} Please use this instead: {funcname}({newargs}):{newcode}{newresult}""" msg = template.format(funcname=k, oldargs=oldargs, oldcode=oldcode, oldresult=oldresult, newargs=args, newcode=newcode, newresult=newresult) raise SyntaxError(msg) assert isinstance(v, list) # v should be a list of dicts (assignments) or # strings (actions) if "(" in k: k, args = split_signature(k) argnames = argspec(args).args code_def, result_def = v, None else: argnames, code_def, result_def = [], v, None method_context = self.get_group_context(context, argnames) code = self.parse_process_group(k + "_code", code_def, method_context, purge=False) # TODO: use code.predictors instead (but it currently # fails for some reason) or at least factor this out # with the code in parse_process_group group_expressions = [elem.items()[0] if isinstance(elem, dict) else (None, elem) for elem in code_def] group_predictors = \ self.collect_predictors(group_expressions) method_context = self.get_group_context( method_context, group_predictors) result_expr = parse(result_def, method_context) assert result_expr is None or \ isinstance(result_expr, Expr) process = Function(k, self, argnames, code, result_expr) elif isinstance(v, dict) and 'predictor' in v: raise ValueError("Using the 'predictor' keyword is " "not supported anymore. " "If you need several processes to " "write to the same variable, you " "should rather use functions.") elif k is None and v is None: raise ValueError("empty process found ('-')") else: raise Exception("unknown expression type for " "%s: %s (%s)" % (k, v, type(v))) processes.append((k, process)) return processes
class FuncClass(baseclass): np_func = evalfunc funcname = name argspec = argspec(args, **baseclass.kwonlyargs) if dtypefunc is not None: dtype = dtypefunc