def plan_SimProcess(self, all_ops): class_groups = groupby(all_ops, lambda op: type(op.process)) plan_groups = defaultdict(list) step_plans = [] for process_class, ops in class_groups: for cls in process_class.__mro__: attrname = '_plan_' + cls.__name__ if hasattr(self, attrname): plan_groups[attrname].extend(ops) break else: for op in ops: shape = lambda s: s.shape if s is not None else (0, ) fn = op.process.make_step(shape(op.input), shape(op.output), self.model.dt, rng=op.process.get_rng(self.rng)) step_plans.extend( self._plan_python_fn(fn, [op.t], [op.input], [op.output])) process_plans = [ p for attr, ops in iteritems(plan_groups) for p in getattr(self, attr)(ops) ] return process_plans + step_plans
def test_groupby(hashable, force_list, rng): if hashable: keys = list(range(1, 5)) else: keys = [[0, 0], [0, 1], [1, 0], [1, 1]] keys = sorted(keys) # make groups and pairs groups = [rng.randn(rng.randint(5, 10)) for _ in keys] pairs = [] for key, group in zip(keys, groups): pairs.extend((key, value) for value in group) # shuffle pairs pairs = [pairs[i] for i in rng.permutation(len(pairs))] # call groupby keygroups = groupby(pairs, lambda p: p[0], force_list=force_list) keys2 = sorted(map(lambda x: x[0], keygroups)) assert keys2 == keys for key2, keygroup2 in keygroups: group = groups[keys.index(key2)] group2 = map(lambda x: x[1], keygroup2) assert sorted(group2) == sorted(group)
def plan_SimPyFunc(self, ops): groups = groupby(ops, lambda op: op.fn) plans = [] for fn, group in groups: assert all(op.fn is fn for op in group) plans.extend(self._plan_python_fn(fn, [op.t for op in group], [op.x for op in group], [op.output for op in group])) return plans
def plan_SimPyFunc(self, ops): groups = groupby(ops, lambda op: op.fn) # ^ NOTE: Groups functions based on equality `==`, not identity `is`. # I think this is what we want in all cases. plans = [] for fn, group in groups: plans.extend(self._plan_python_fn( fn, ts=[op.t for op in group], xs=[op.x for op in group], ys=[op.output for op in group])) return plans
def plan_SimNeurons(self, all_ops): groups = groupby(all_ops, lambda op: op.neurons.__class__) plans = [] for neuron_class, ops in groups: attr_name = '_plan_%s' % neuron_class.__name__ if hasattr(self, attr_name): plans.extend(getattr(self, attr_name)(ops)) else: raise ValueError("Unsupported neuron type '%s'" % neuron_class.__name__) return plans
def plan_SimPyFunc(self, ops): groups = groupby(ops, lambda op: op.fn) # ^ NOTE: Groups functions based on equality `==`, not identity `is`. # I think this is what we want in all cases. plans = [] for fn, group in groups: plans.extend( self._plan_python_fn(fn, ts=[op.t for op in group], xs=[op.x for op in group], ys=[op.output for op in group])) return plans
def plan_SimProcess(self, all_ops): groups = groupby(all_ops, lambda op: op.process.__class__) plans = [] for process_class, ops in groups: attrname = '_plan_' + process_class.__name__ if hasattr(self, attrname): plans.extend(getattr(self, attrname)(ops)) else: raise NotImplementedError("Unsupported process type '%s'" % process_class.__name__) return plans
def plan_SimNeurons(self, all_ops): groups = groupby(all_ops, lambda op: op.neurons.__class__) plans = [] for neuron_class, ops in groups: if neuron_class is LIF: plans.extend(self._plan_LIF(ops)) elif neuron_class is LIFRate: plans.extend(self._plan_LIFRate(ops)) else: raise ValueError("Unsupported neuron type '%s'" % neuron_class.__name__) return plans
def _plan_python_fn(self, fn, ts, xs, ys): assert len(ts) == len(xs) == len(ys) assert all(t is None for t in ts) or all(t is not None for t in ts) assert all(x is None for x in xs) or all(x is not None for x in xs) assert all(y is None for y in ys) or all(y is not None for y in ys) if ts[0] is not None: assert all(t is self.model.time for t in ts) signal_size = lambda sig: sig.size if sig is not None else None fn_name = fn.__name__ if inspect.isfunction(fn) else type(fn).__name__ # group by number of x dims signals = zip(ts, xs, ys) groups = groupby(signals, lambda s: signal_size(s[1])) # --- try to turn Python function into OCL code plans = [] unplanned_signals = [] for x_dim, group in groups: tt, xx, yy = zip(*group) # if any functions have no output, must do them in Python y_dim = signal_size(yy[0]) if y_dim is None: self._found_python_code( "Function %r could not be converted to OCL " "since it has no outputs." % (fn_name)) unplanned_signals.extend(zip(tt, xx, yy)) continue # try to get OCL code if self.if_python_code == 'error': plans.append(self._plan_fn_in_ocl(fn, tt, xx, yy, fn_name)) else: try: plans.append(self._plan_fn_in_ocl(fn, tt, xx, yy, fn_name)) except Exception as e: self._found_python_code( "Function %r could not be converted to OCL due to %s%s" % (fn_name, type(e).__name__, e.args)) unplanned_signals.extend(zip(tt, xx, yy)) # --- do remaining unplanned signals in Python if len(unplanned_signals) > 0: tt, xx, yy = zip(*unplanned_signals) plans.append(self._plan_fn_in_python(fn, tt, xx, yy, fn_name)) return plans
def validate_ops(sets, ups, incs): # -- assert that only one op sets any particular view for node in sets: assert len(sets[node]) == 1, (node, sets[node]) # -- assert that only one op updates any particular view for node in ups: assert len(ups[node]) == 1, (node, ups[node]) # --- assert that any node that is incremented is also set/updated for node in incs: assert len(sets[node] + ups[node]) > 0, (node) # -- assert that no two views are both set and aliased for _, base_group in groupby(sets, lambda x: x.base, hashable=True): for node, other in itertools.combinations(base_group, 2): assert not node.shares_memory_with(other), ( "%s shares memory with %s" % (node, other)) # -- assert that no two views are both updated and aliased for _, base_group in groupby(ups, lambda x: x.base, hashable=True): for node, other in itertools.combinations(base_group, 2): assert not node.shares_memory_with(other), ( "%s shares memory with %s" % (node, other))
def plan_SimProcess(self, all_ops): groups = groupby(all_ops, lambda op: op.process.__class__) plans = [] for process_class, ops in groups: if process_class is WhiteNoise: plans.extend(self._plan_WhiteNoise(ops)) elif process_class is FilteredNoise: plans.extend(self._plan_FilteredNoise(ops)) elif process_class is WhiteSignal: plans.extend(self._plan_WhiteSignal(ops)) else: raise NotImplementedError("Unsupported process type '%s'" % process_class.__name__) return plans
def plan_SimProcess(self, all_ops): class_groups = groupby(all_ops, lambda op: op.process.__class__) plan_groups = defaultdict(list) for process_class, ops in class_groups: for cls in process_class.__mro__: attrname = '_plan_' + cls.__name__ if hasattr(self, attrname): plan_groups[attrname].extend(ops) break else: raise NotImplementedError("Unsupported process type '%s'" % process_class.__name__) return [p for attr, ops in iteritems(plan_groups) for p in getattr(self, attr)(ops)]
def plan_SimProcess(self, all_ops): class_groups = groupby(all_ops, lambda op: type(op.process)) plan_groups = defaultdict(list) python_ops = [] for process_class, ops in class_groups: for cls in process_class.__mro__: attrname = '_plan_' + cls.__name__ if hasattr(self, attrname): plan_groups[attrname].extend(ops) break else: python_ops.extend(ops) process_plans = [p for attr, ops in iteritems(plan_groups) for p in getattr(self, attr)(ops)] python_plans = [p for op in python_ops for p in self._plan_python_process(op)] return process_plans + python_plans
def plan_SimProcess(self, all_ops): class_groups = groupby(all_ops, lambda op: type(op.process)) plan_groups = defaultdict(list) python_ops = [] for process_class, ops in class_groups: for cls in process_class.__mro__: attrname = '_plan_' + cls.__name__ if hasattr(self, attrname): plan_groups[attrname].extend(ops) break else: python_ops.extend(ops) process_plans = [ p for attr, ops in iteritems(plan_groups) for p in getattr(self, attr)(ops) ] python_plans = [ p for op in python_ops for p in self._plan_python_process(op) ] return process_plans + python_plans
def plan_SimProcess(self, all_ops): class_groups = groupby(all_ops, lambda op: type(op.process)) plan_groups = defaultdict(list) step_plans = [] for process_class, ops in class_groups: for cls in process_class.__mro__: attrname = '_plan_' + cls.__name__ if hasattr(self, attrname): plan_groups[attrname].extend(ops) break else: for op in ops: shape = lambda s: s.shape if s is not None else (0,) fn = op.process.make_step( shape(op.input), shape(op.output), self.model.dt, rng=op.process.get_rng(self.rng)) step_plans.extend(self._plan_python_fn( fn, [op.t], [op.input], [op.output])) process_plans = [p for attr, ops in iteritems(plan_groups) for p in getattr(self, attr)(ops)] return process_plans + step_plans
def build_axons(n2core, core, group, all_axons): # noqa C901 if len(all_axons) == 0: return pop_type0 = all_axons[0][2] if pop_type0 == 0: for cx_id, atom, pop_type, tchip_id, tcore_id, taxon_id in all_axons: assert pop_type == 0, "All axons must be discrete, or none" assert atom == 0 n2core.createDiscreteAxon(srcCxId=cx_id, dstChipId=tchip_id, dstCoreId=tcore_id, dstSynMapId=taxon_id) return else: assert all( axon[2] != 0 for axon in all_axons), ("All axons must be discrete, or none") axons_by_cx = groupby(all_axons, key=lambda x: x[0]) # group by cx_id axon_id = 0 axon_map = {} for cx_id, cx_axons in axons_by_cx: if len(cx_axons) == 0: continue # cx_axon -> (cx, atom, type, tchip_id, tcore_id, taxon_id) assert all(cx_axon[0] == cx_id for cx_axon in cx_axons) atom = cx_axons[0][1] assert all(cx_axon[1] == atom for cx_axon in cx_axons), ( "cx atom must be the same for all axons") cx_axons = sorted(cx_axons, key=lambda a: a[2:]) key = tuple(cx_axon[2:] for cx_axon in cx_axons) if key not in axon_map: axon_id0 = axon_id axon_len = 0 for cx_axon in cx_axons: pop_type, tchip_id, tcore_id, taxon_id = cx_axon[2:] if pop_type == 0: # discrete assert False, "Should have been handled in code above" elif pop_type == 16: # pop16 n2core.axonCfg[axon_id].pop16.configure(coreId=tcore_id, axonId=taxon_id) axon_id += 1 axon_len += 1 elif pop_type == 32: # pop32 n2core.axonCfg[axon_id].pop32_0.configure(coreId=tcore_id, axonId=taxon_id) n2core.axonCfg[axon_id + 1].pop32_1.configure() axon_id += 2 axon_len += 2 else: raise ValueError("Unrecognized pop_type: %d" % (pop_type, )) axon_map[key] = (axon_id0, axon_len) axon_ptr, axon_len = axon_map[key] n2core.axonMap[cx_id].configure(ptr=axon_ptr, len=axon_len, atom=atom)