def from_expressions(cls, exprs): inputs = sorted(reduce(__or__, map(flip(getattr)('free_symbols'), exprs), frozenset()), key=flip(getattr)('name')) calls = tuple() outputs = tuple(exprs) return Function(inputs, calls, outputs)
def free_symbols(self): return reduce( __or__, map( compose( curry(reduce)(__or__), tupfun( flip(getattr)('free_symbols'), flip(getattr)('free_symbols'))), self.mapping.items())) | self.arg.free_symbols
def __init__(self, *pairs): self.pairs = tuple(pairs) ExprType = self.outtype if not all( map( compose( all, tupfun( flip(isinstance)(ExprType), flip(isinstance)(BooleanExpression))), self.pairs)): raise TypeError('Arguments to Piecewise have incorrect type.')
def sym_predict_random_forest_regressor(estimator): inputs = syms(estimator) Var = VariableFactory(existing=inputs) subs = tuple(map(sym_predict, estimator.estimators_)) calls = tuple((tuple(Var() for _ in range(len(sub.outputs))), (sub, inputs)) for sub in subs) outputs = tuple(map(flip(__truediv__)(RealNumber(len(subs))), map(curry(reduce)(__add__), zip(*map(flip(getitem)(0), calls))))) return Function(inputs, calls, outputs)
def trim(self, used=None): ''' Remove unused computation. ''' if used is None: used_ = frozenset(range(len(self.outputs))) else: used_ = frozenset(used) trimmed_outputs = tuple(map(self.outputs.__getitem__, sorted(used_))) used_symbols = reduce( __or__, map(flip(getattr)('free_symbols'), trimmed_outputs), frozenset()) trimmed_calls = tuple() for assigned, (fun, arguments) in reversed(self.calls): argmap = dict(zip(fun.inputs, arguments)) trimmed_assigned = tuple( filter(used_symbols.__contains__, assigned)) if not trimmed_assigned: continue trimmed_fun = fun.trim( frozenset(i for i in range(len(assigned)) if assigned[i] in used_symbols)) trimmed_arguments = tuple( map(argmap.__getitem__, trimmed_fun.inputs)) trimmed_calls = ( (trimmed_assigned, (trimmed_fun, trimmed_arguments)), ) + trimmed_calls used_symbols = used_symbols | frozenset(trimmed_arguments) trimmed_inputs = tuple(filter(used_symbols.__contains__, self.inputs)) return Function(trimmed_inputs, trimmed_calls, trimmed_outputs)
def get_tenant_metrics(tenant_id, scaling_groups, grouped_servers, _print=False): """ Produce per-group metrics for all the groups of a tenant :param list scaling_groups: Tenant's scaling groups as dict from CASS :param dict grouped_servers: Servers from Nova grouped based on scaling group ID. :return: generator of (tenantId, groupId, desired, actual) GroupMetrics """ if _print: print('processing tenant {} with groups {} and servers {}'.format( tenant_id, len(scaling_groups), len(grouped_servers))) groups = {g['groupId']: g for g in scaling_groups} for group_id in set(groups.keys() + grouped_servers.keys()): servers = grouped_servers.get(group_id, []) if group_id in groups: group = groups[group_id] else: group = {'groupId': group_id_from_metadata(servers[0]['metadata']), 'desired': 0} servers = map(NovaServer.from_server_details_json, servers) _len = compose(len, list, flip(filter, servers)) active = _len(lambda s: s.state == ServerState.ACTIVE) bad = _len(lambda s: s.state in (ServerState.SHUTOFF, ServerState.ERROR, ServerState.DELETED)) yield GroupMetrics(tenant_id, group['groupId'], group['desired'], active, len(servers) - bad - active)
def all_variables(self): result = set() result |= set(self.inputs) result |= reduce(__or__, map(compose(set, flip(__getitem__)(0)), self.calls), set()) return result
def get_tenant_metrics(tenant_id, scaling_groups, grouped_servers, _print=False): """ Produce per-group metrics for all the groups of a tenant :param list scaling_groups: Tenant's scaling groups as dict from CASS :param dict grouped_servers: Servers from Nova grouped based on scaling group ID. :return: generator of (tenantId, groupId, desired, actual) GroupMetrics """ if _print: print('processing tenant {} with groups {} and servers {}'.format( tenant_id, len(scaling_groups), len(grouped_servers))) groups = {g['groupId']: g for g in scaling_groups} for group_id in set(groups.keys() + grouped_servers.keys()): servers = grouped_servers.get(group_id, []) if group_id in groups: group = groups[group_id] else: group = { 'groupId': group_id_from_metadata(servers[0]['metadata']), 'desired': 0 } servers = map(NovaServer.from_server_details_json, servers) _len = compose(len, list, flip(filter, servers)) active = _len(lambda s: s.state == ServerState.ACTIVE) bad = _len(lambda s: s.state in (ServerState.SHUTOFF, ServerState. ERROR, ServerState.DELETED)) yield GroupMetrics(tenant_id, group['groupId'], group['desired'], active, len(servers) - bad - active)
def __init__(self, mapping, arg): self.mapping = frozendict(mapping) if not all( map( flip(isinstance)(Constant), chain(mapping.keys(), mapping.values()))): raise TypeError( 'Keys and values of FiniteMap must be Constants. Got %s.' % str(tuple(map(type, chain(mapping.keys(), mapping.values()))))) self.arg = arg self.outtype = get_common_type(map(type, self.mapping.values()))
def disjoint_union(self, *others, **kwargs): assert set(kwargs.keys()) <= {'levels', 'name', 'names'} pieces = (self, ) + others union_size = max(map(flip(getattr)('key_size'), pieces)) + 1 offsets = [1] * len(pieces) if 'names' in kwargs: names = kwargs['names'] del kwargs['name'] else: names = [piece.name for piece in pieces] def filler(i, j): return names[i] if j == 0 else ind items = self._unionize(union_size, pieces, offsets, filler) return self.__class__(*items, **kwargs)
def bin_window_(*args): n = common_size(*args) remainder = n % n_bins quotient = n // n_bins start = 0 while start < n: size = quotient if remainder > 0: size += 1 remainder -= 1 end = start + size yield tuple( map( flip(safe_rows_select)(np.arange(start, end, dtype=int)), args)) start = end
def get_in(keys, coll, default=None, no_default=False): """ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless ``no_default`` is specified, then it raises KeyError or IndexError. ``get_in`` is a generalization of ``operator.getitem`` for nested data structures such as dictionaries and lists. >>> transaction = {'name': 'Alice', ... 'purchase': {'items': ['Apple', 'Orange'], ... 'costs': [0.50, 1.25]}, ... 'credit card': '5555-1234-1234-1234'} >>> get_in(['purchase', 'items', 0], transaction) 'Apple' >>> get_in(['name'], transaction) 'Alice' >>> get_in(['purchase', 'total'], transaction) >>> get_in(['purchase', 'items', 'apple'], transaction) >>> get_in(['purchase', 'items', 10], transaction) >>> get_in(['purchase', 'total'], transaction, 0) 0 >>> get_in(['y'], {}, no_default=True) Traceback (most recent call last): ... KeyError: 'y' >>> class C: ... def __init__(self, x): ... self.x = x >>> a = C(C(1)) >>> get_in(['x', 'x'], a) 1 >>> get_in(['x', 'b'], a, 2) 2 See Also: itertoolz.get operator.getitem """ reducer = flip( partial(get, default=(utils.no_default if no_default else default))) return reduce(reducer, keys, coll)
def __exit__(self, exc_type, exc_value, exc_traceback): if isinstance(exc_value, ModuleCacheValid) or \ exc_type is ModuleCacheValid or \ exc_value is ModuleCacheValid: inspect.stack()[1][0].f_globals.update(self.moduledata) return True elif exc_value is None: new_moduledata = valfilter( complement(flip(isinstance)(ModuleType)), dissoc(inspect.stack()[1][0].f_globals, *self.suppress)) # Check that all objects can be cached for _ in starmap(self._check_cachability, new_moduledata.items()): pass new_metadata = self.invalidator.new_metadata(new_moduledata) self._put_in_cache(new_metadata, new_moduledata) return True else: return False
def _is_concrete_key(self, key): return isinstance(key, tuple) and all( map(flip(isinstance)(string_types), key))
def __init__(self, *args): if not all(map(flip(isinstance)(self.argtype), args)): raise TypeError( 'Attempt to create %s with arguments of incorrect output type. Should be %s. Got %s.' % (self.__class__.__name__, self.argtype.__name__, str(tuple(map(lambda x: x.__class__.__name__, args)))))
def free_symbols(self): return reduce(__or__, map(flip(getattr)('free_symbols'), self.args), set())
def test_flip(): def f(a, b): return a, b assert flip(f, 'a', 'b') == ('b', 'a')
def from_expression(cls, expr): inputs = sorted(expr.free_symbols, key=flip(getattr)('name')) calls = tuple() outputs = (expr, ) return Function(inputs, calls, outputs)
if len(losses) <= self.n: return False return all( map( curry(__lt__)(-self.threshold), starmap(self.stat, sliding_window(2, losses[-(self.n + 1):])))) @curry def stop_after_n_iterations_without_stat_improvement_over_threshold( stat, n, threshold=0.): return NIterationsWithoutImprovementOverThreshold(stat, n, threshold) stop_after_n_iterations_without_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold( flip(__sub__)) def percent_reduction(before, after): return 100 * (after - before) / float(before) stop_after_n_iterations_without_percent_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold( percent_reduction) class BoosterFitRecord(object): def __init__(self, losses, times, stopping_condition=None): self.losses = losses self.times = times self.stopping_condition = stopping_condition
def moving_window_(*args): n = common_size(*args) window = np.arange(window_size, dtype=int) for i in range(n - window_size): yield tuple(map(flip(safe_rows_select)(window + i), *args))
def free_symbols(self): return reduce(__or__, map(flip(getattr('free_symbols')), self.weights), set()) | super(WeightedStatistic, self).free_symbols
if len(losses) <= self.n: return False return all(map(curry(__lt__)(-self.threshold), starmap(self.stat, sliding_window(2, losses[-(self.n+1):])))) @curry def stop_after_n_iterations_without_stat_improvement_over_threshold(stat, n, threshold=0.): return NIterationsWithoutImprovementOverThreshold(stat, n, threshold) # def _stop_after_n_iterations_without_improvement(losses, **kwargs): # if len(losses) <= n: # return False # return all(map(curry(__lt__)(-threshold), starmap(stat, sliding_window(2, losses[-(n+1):])))) # return _stop_after_n_iterations_without_improvement def reduction(before, after): return after - before stop_after_n_iterations_without_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold(flip(__sub__)) def percent_reduction(before, after): return 100*(after - before) / float(before) stop_after_n_iterations_without_percent_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold(percent_reduction) # class GradientDescentRegressor(STSimpleEstimator): # def __init__(self, loss_function, initial_value=0.): # self.loss_function = loss_function # # def fit(self, X, y, sample_weight=None): # intercept = self.initial_value # coef = np.zeros(X.shape[1]) # prediction = intercept + np.dot(X, coef) # gradient_args = {'y':y, 'pred':prediction}
def free_symbols(self): return reduce(__or__, map(flip(getattr('free_symbols')), self.data), set())
def union(self, *others, **kwargs): pieces = (self, ) + others union_size = max(map(flip(getattr)('key_size'), pieces)) items = self._unionize(union_size, pieces) return self.__class__(*items, **kwargs)
def order_by_(*args): return list(map(flip(safe_rows_select)(order), args))