def simulation(dt, N, output_n, nsteps = 10000, output = None): start_time = time.time() dur = nsteps * dt steps = nsteps man = core.Manager() G = create_graph(N) print("Creating graph completed in {} seconds.".format(time.time()-start_time)) start_time = time.time() #comp_dict, conns = LPU.graph_to_dicts(G, remove_edge_id=False) fl_input_processor = StepInputProcessor('I', ['neuron_{}'.format(i) for i in range(N)], 20.0, 0.0, dur) fl_output_processor = [] if output == 'disk': fl_output_processor.append( FileOutputProcessor([('V', None), ('spike_state', None)], 'neurodriver_output_{}.h5'.format(output_n), sample_interval=1, cache_length=2000)]) elif output == 'memory': fl_output_processor.append( OutputRecorder([('spike_state', None), ('V', None)], sample_interval = 1)])
def add_LPU(config, manager): config_photor = config['Photoreceptor'] gexf_file = config_photor['gexf_file'] input_file = config_photor['input_file'] output_file = config_photor['output_file'] G = generate_gexf(config_photor, gexf_file) LPU.graph_to_dicts(G) comp_dict, conns = LPU.graph_to_dicts(G) LPU_id = 'photoreceptor' debug = config_photor['debug'] dt = config['General']['dt'] extra_comps = [PhotoreceptorModel] input_processor = StepInputProcessor('photon', G.nodes(), 10000, 0.2, 1) output_processor = FileOutputProcessor([('V', G.nodes())], output_file, sample_interval=1) manager.add(LPU, LPU_id, dt, comp_dict, conns, device=0, input_processors=[input_processor], output_processors=[output_processor], debug=debug, time_sync=False, extra_comps=extra_comps)
def simulation(dt, N, output_n): start_time = time.time() dur = 0.01 steps = int(np.round(dur / dt)) G = create_graph(N) print("Creating graph completed in {} seconds.".format(time.time() - start_time)) start_time = time.time() #comp_dict, conns = LPU.graph_to_dicts(G, remove_edge_id=False) fl_input_processor = StepInputProcessor( 'I', ['neuron_{}'.format(i) for i in range(N)], 20.0, 0.0, dur) #fl_output_processor = [FileOutputProcessor([('V', None), ('g', ['synapse_neuron_{}_to_neuron_1'.format(i) for i in range(N)])],# ('spike_state', None), ('g', None), ('E', None)], # 'neurodriver_output_{}.h5'.format(output_n), sample_interval=10, cache_length=2000)] fl_output_processor = [] # temporarily suppress generating output #fl_output_processor = [OutputRecorder([('spike_state', None), ('V', None), ('g', None), ('E', None)], dur, dt, sample_interval = 1)] lpu = LPU(dt, 'obj', G, device=args.gpu_dev, id='ge', input_processors=[fl_input_processor], output_processors=fl_output_processor, debug=args.debug, manager=False, print_timing=False, time_sync=False, extra_comps=[]) print("Instantiating LPU completed in {} seconds.".format(time.time() - start_time)) start_time1 = time.time() # LPU.run includes pre_run, run_steps and post_run lpu.run(steps=steps) execution_time = time.time() - start_time1 compile_and_execute_time = time.time() - start_time print("LPUs Compilation and Execution Completed in {} seconds.".format( compile_and_execute_time)) return compile_and_execute_time, execution_time
def loadExperimentSettings(X): inList = [] for a in X: if a['name'] == 'InIGaussianNoise': inList.append( InIGaussianNoise(a['node_id'], a['mean'], a['std'], a['t_start'], a['t_end'])) elif a['name'] == 'InISinusoidal': inList.append( InISinusoidal(a['node_id'], a['amplitude'], a['frequency'], a['t_start'], a['t_end'], a['mean'], a['shift'], a['frequency_sweep'], a['frequency_sweep_frequency'], a['threshold_active'], a['threshold_value'])) elif a['name'] == 'InIBoxcar': inList.append( InIBoxcar(a['node_id'], a['I_val'], a['t_start'], a['t_end'])) else: inList.append(StepInputProcessor(*a['args'], **a['kwargs'])) return inList
G = nx.MultiDiGraph() G.add_node( 'neuron0', { 'class': 'LeakyIAF', 'name': 'LeakyIAF', 'resting_potential': -70.0, 'threshold': -45.0, 'capacitance': 0.07, # in mS 'resistance': 0.2, # in Ohm }) comp_dict, conns = LPU.graph_to_dicts(G) fl_input_processor = StepInputProcessor('I', ['neuron0'], 40, 0.2, 0.8) fl_output_processor = FileOutputProcessor([('spike_state', None), ('V', None)], 'new_output.h5', sample_interval=1) man.add(LPU, 'ge', dt, comp_dict, conns, device=args.gpu_dev, input_processors=[fl_input_processor], output_processors=[fl_output_processor], debug=args.debug)
G.add_node( 'neuron{}'.format(i), **{ 'class': 'LeakyIAF', 'name': 'LeakyIAF', 'resting_potential': -70.0, # (mV) 'threshold': -40.0, # Firing Threshold (mV) 'reset_potential': -70.0, # Potential to be reset to after a spike (mV) 'capacitance': 1, # (\mu F/cm^2) 'resistance': 0.007 # (k\Omega cm.^2) }) comp_dict, conns = LPU.graph_to_dicts(G) fl_input_processor = StepInputProcessor( 'I', ['neuron{}'.format(i) for i in range(N)], 20.0, 0.0, dur) fl_output_processor = [ FileOutputProcessor([('spike_state', None), ('V', None)], 'output.h5', sample_interval=1) ] #fl_output_processor = [OutputRecorder([('spike_state', None), ('V', None)], dur, dt, sample_interval = 1)] man.add(LPU, 'ge', dt, comp_dict, conns, device=args.gpu_dev, input_processors=[fl_input_processor],
def launch(self, user_id, task): neuron_uid_list = [str(a) for a in task['neuron_list']] conf_obj = get_config_obj() config = conf_obj.conf if config['Retina']['intype'] == 'Natural': coord_file = config['InputType']['Natural']['coord_file'] tmp = os.path.splitext(coord_file) config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format( tmp[0], user_id, tmp[1]) setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True) manager = core.Manager() lpus = {} patterns = {} G = task['data'] with open('G.pickle', 'wb') as f: pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL) print(G) print(G.keys()) print(G['LPU']) print(G['LPU'].keys()) # get graph and output_uid_list for each LPU for k, lpu in G['LPU'].items(): lpus[k] = {} g_lpu_na = create_graph_from_database_returned(lpu) lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na) lpus[k]['graph'] = lpu_nk_graph lpus[k]['output_uid_list'] = list( set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list))) lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id) for kkey, lpu in lpus.items(): graph = lpu['graph'] for uid, comp in graph.node.items(): if 'attr_dict' in comp: print('Found attr_dict; fixing...') nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) graph.nodes[uid].pop('attr_dict') for i, j, k, v in graph.edges(keys=True, data=True): if 'attr_dict' in v: for key in v['attr_dict']: nx.set_edge_attributes( graph, {(i, j, k): { key: v['attr_dict'][key] }}) graph.edges[(i, j, k)].pop('attr_dict') lpus[kkey]['graph'] = graph # get graph for each Pattern for k, pat in G['Pattern'].items(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: g_pattern_na = create_graph_from_database_returned(pat) pattern_nk = nk.na_pat_to_nk(g_pattern_na) print(lpus[l1]['graph'].nodes(data=True)) lpu_ports = [node[1]['selector'] \ for node in lpus[l1]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] + \ [node[1]['selector'] \ for node in lpus[l2]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] pattern_ports = pattern_nk.nodes() patterns[k] = {} patterns[k]['graph'] = pattern_nk.subgraph( list(set(lpu_ports).intersection(set(pattern_ports)))) dt = config['General']['dt'] if 'dt' in task: dt = task['dt'] print(dt) # add LPUs to manager for k, lpu in lpus.items(): lpu_name = k graph = lpu['graph'] for uid, comp in graph.node.items(): if 'attr_dict' in comp: nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) graph.nodes[uid].pop('attr_dict') for i, j, ko, v in graph.edges(keys=True, data=True): if 'attr_dict' in v: for key in v['attr_dict']: nx.set_edge_attributes( graph, {(i, j, ko): { key: v['attr_dict'][key] }}) graph.edges[(i, j, ko)].pop('attr_dict') nx.write_gexf(graph, 'name.gexf') with open(lpu_name + '.pickle', 'wb') as f: pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL) comps = graph.node.items() #for uid, comp in comps: # if 'attr_dict' in comp: # nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) # if 'class' in comp: if k == 'retina': prs = [node for node in graph.nodes(data=True) \ if node[1]['class'] == 'PhotoreceptorModel'] for pr in prs: graph.node[pr[0]]['num_microvilli'] = 3000 input_processors = [ RetinaInputIndividual(config, prs, user_id) ] extra_comps = [PhotoreceptorModel] retina_input_uids = [a[0] for a in prs] elif k == 'EB': input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \ if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0) input_processors = [input_processor] extra_comps = [BufferVoltage] else: input_processors = [] extra_comps = [BufferVoltage] if 'inputProcessors' in task: input_processors = loadExperimentSettings( task['inputProcessors']) output_processor = FileOutputProcessor( [('V', lpu['output_uid_list'])], lpu['output_file'], sample_interval=10) (comp_dict, conns) = LPU.graph_to_dicts(graph) # print(comp_dict) # print(conns) print(k) manager.add(LPU, k, dt, comp_dict, conns, device=0, input_processors=input_processors, output_processors=[output_processor], extra_comps=extra_comps, debug=True) # connect LPUs by Patterns for k, pattern in patterns.items(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: print('Connecting {} and {}'.format(l1, l2)) pat, key_order = Pattern.from_graph(nx.DiGraph( pattern['graph']), return_key_order=True) print(l1, l2) print(key_order) with Timer('update of connections in Manager'): manager.connect(l1, l2, pat, int_0=key_order.index(l1), int_1=key_order.index(l2)) # start simulation steps = config['General']['steps'] ignored_steps = config['General']['ignored_steps'] if 'steps' in task: steps = task['steps'] if 'ignored_steps' in task: ignored_steps = task['ignored_steps'] # ignored_steps = 0 # steps = 100 manager.spawn() manager.start(steps=steps) manager.wait() time.sleep(5) print(task) # post-processing inputs (hard coded, can be better organized) inputs = { u'ydomain': 1.0, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } if 'retina' in lpus: input_array = si.read_array('{}_{}.h5'.format( config['Retina']['input_file'], user_id)) inputs[u'ydomain'] = input_array.max() for i, item in enumerate(retina_input_uids): inputs['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, input_array[ignored_steps::10, i:i + 1])).tolist() del input_array # post-processing outputs from all LPUs and combine them into one dictionary result = { u'ydomain': 1, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } for k, lpu in lpus.items(): with h5py.File(lpu['output_file']) as output_file: uids = output_file['V']['uids'][:] output_array = output_file['V']['data'][:] for i, item in enumerate(uids): output = output_array[int(ignored_steps / 10):, i:i + 1] # tmp = output.max()-output.min() # if tmp <= 0.01: #mV # output = (output - output.min()) + 0.5 # else: # output = (output - output.min())/tmp*0.9+0.1 result['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, output)).tolist() return inputs, result