def generate_graphs(self, dur): """ Access output files and process them to generate graphs and/or videos """ outputfile = self.outputfile dt = self.run_dt # check if other dt is needed Nt = int(dur / dt) t = np.arange(0, dt * Nt, dt) axis = 1 # axis along which the summation is done neuron = 0 # neuron id microv = 0 # microvillus id input = si.read_array(self.inputfile) if self.record_neuron: outputV = si.read_array(outputfile + 'V.h5') outputIall = si.read_array(outputfile + 'I.h5') if self.record_microvilli: outputX0 = si.read_array(outputfile + 'X0.h5') outputX1 = si.read_array(outputfile + 'X1.h5') outputX2 = si.read_array(outputfile + 'X2.h5') outputX3 = si.read_array(outputfile + 'X3.h5') outputX4 = si.read_array(outputfile + 'X4.h5') outputX5 = si.read_array(outputfile + 'X5.h5') outputX6 = si.read_array(outputfile + 'X6.h5') fig, ax = plt.subplots(5, 2, sharex=True) ax[0, 0].plot(t, input[:, neuron]) ax[0, 0].set_title('Input Light Intensity per second', fontsize=12) if self.record_neuron: ax[0, 1].plot(t, outputV[:, neuron]) ax[0, 1].set_title('Output Potential', fontsize=12) ax[1, 0].plot(t, outputIall[:, neuron]) ax[1, 0].set_title('Output Current', fontsize=12) if self.record_microvilli: ax[1, 1].plot(t, outputX0[:, neuron]) ax[1, 1].set_title('G state of a microvillus', fontsize=12) ax[2, 0].plot(t, outputX1[:, neuron]) ax[2, 0].set_title('G star state of a microvillus', fontsize=12) ax[2, 1].plot(t, outputX2[:, neuron]) ax[2, 1].set_title('PLC star state of a microvillus', fontsize=12) ax[3, 0].plot(t, outputX3[:, neuron]) ax[3, 0].set_title('D star state of a microvillus', fontsize=12) ax[3, 1].plot(t, outputX4[:, neuron]) ax[3, 1].set_title('C star state of a microvillus', fontsize=12) ax[4, 0].plot(t, outputX5[:, neuron]) ax[4, 0].set_title('T star state of a microvillus', fontsize=12) ax[4, 1].plot(t, outputX6[:, neuron]) ax[4, 1].set_title('M star state of a microvillus', fontsize=12) fig.canvas.draw() fig.savefig(outputfile + '_plot.png', bbox_inches='tight')
def launch(self, user_id, task): neuron_uid_list = [str(a) for a in task['neuron_list']] conf_obj = get_config_obj() config = conf_obj.conf if config['Retina']['intype'] == 'Natural': coord_file = config['InputType']['Natural']['coord_file'] tmp = os.path.splitext(coord_file) config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format( tmp[0], user_id, tmp[1]) setup_logger(file_name='neurokernel_' + user_id + '.log', screen=False) manager = core.Manager() lpus = {} patterns = {} G = task['success']['data'] # get graph and output_uid_list for each LPU for k, lpu in G['LPU'].iteritems(): lpus[k] = {} g_lpu_na = create_graph_from_database_returned(lpu) lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na) lpus[k]['graph'] = lpu_nk_graph lpus[k]['output_uid_list'] = list( set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list))) lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id) # get graph for each Pattern for k, pat in G['Pattern'].iteritems(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: g_pattern_na = create_graph_from_database_returned(pat) pattern_nk = nk.na_pat_to_nk(g_pattern_na) lpu_ports = [node[1]['selector'] \ for node in lpus[l1]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] + \ [node[1]['selector'] \ for node in lpus[l2]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] pattern_ports = pattern_nk.nodes() patterns[k] = {} patterns[k]['graph'] = pattern_nk.subgraph( list(set(lpu_ports).intersection(set(pattern_ports)))) dt = config['General']['dt'] # add LPUs to manager for k, lpu in lpus.iteritems(): graph = lpu['graph'] if k == 'retina': prs = [node for node in graph.nodes(data=True) \ if node[1]['class'] == 'PhotoreceptorModel'] for pr in prs: graph.node[pr[0]]['num_microvilli'] = 3000 input_processors = [ RetinaInputIndividual(config, prs, user_id) ] extra_comps = [PhotoreceptorModel] retina_input_uids = [a[0] for a in prs] else: input_processors = [] extra_comps = [BufferVoltage] output_processor = FileOutputProcessor( [('V', lpu['output_uid_list'])], lpu['output_file'], sample_interval=10) (comp_dict, conns) = LPU.graph_to_dicts(graph) manager.add(LPU, k, dt, comp_dict, conns, device=0, input_processors=input_processors, output_processors=[output_processor], extra_comps=extra_comps) # connect LPUs by Patterns for k, pattern in patterns.iteritems(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: print('Connecting {} and {}'.format(l1, l2)) pat, key_order = Pattern.from_graph( nx.DiGraph(pattern['graph'])) with Timer('update of connections in Manager'): manager.connect(l1, l2, pat, int_0=key_order.index(l1), int_1=key_order.index(l2)) # start simulation steps = config['General']['steps'] ignored_steps = config['General']['ignored_steps'] manager.spawn() manager.start(steps=steps) manager.wait() time.sleep(5) # post-processing inputs (hard coded, can be better organized) inputs = { u'ydomain': 1.0, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } if 'retina' in lpus: input_array = si.read_array('{}_{}.h5'.format( config['Retina']['input_file'], user_id)) inputs[u'ydomain'] = input_array.max() for i, item in enumerate(retina_input_uids): inputs['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, input_array[ignored_steps::10, i:i + 1])).tolist() del input_array # post-processing outputs from all LPUs and combine them into one dictionary result = { u'ydomain': 1, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } for k, lpu in lpus.iteritems(): with h5py.File(lpu['output_file']) as output_file: uids = output_file['V']['uids'][:] output_array = output_file['V']['data'][:] for i, item in enumerate(uids): output = output_array[int(ignored_steps / 10):, i:i + 1] tmp = output.max() - output.min() if tmp <= 0.01: #mV output = (output - output.min()) + 0.5 else: output = (output - output.min()) / tmp * 0.9 + 0.1 result['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, output)).tolist() return inputs, result
def launch(self, user_id, task): neuron_uid_list = [str(a) for a in task['neuron_list']] conf_obj = get_config_obj() config = conf_obj.conf if config['Retina']['intype'] == 'Natural': coord_file = config['InputType']['Natural']['coord_file'] tmp = os.path.splitext(coord_file) config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format( tmp[0], user_id, tmp[1]) setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True) manager = core.Manager() lpus = {} patterns = {} G = task['data'] with open('G.pickle', 'wb') as f: pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL) print(G) print(G.keys()) print(G['LPU']) print(G['LPU'].keys()) # get graph and output_uid_list for each LPU for k, lpu in G['LPU'].items(): lpus[k] = {} g_lpu_na = create_graph_from_database_returned(lpu) lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na) lpus[k]['graph'] = lpu_nk_graph lpus[k]['output_uid_list'] = list( set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list))) lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id) for kkey, lpu in lpus.items(): graph = lpu['graph'] for uid, comp in graph.node.items(): if 'attr_dict' in comp: print('Found attr_dict; fixing...') nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) graph.nodes[uid].pop('attr_dict') for i, j, k, v in graph.edges(keys=True, data=True): if 'attr_dict' in v: for key in v['attr_dict']: nx.set_edge_attributes( graph, {(i, j, k): { key: v['attr_dict'][key] }}) graph.edges[(i, j, k)].pop('attr_dict') lpus[kkey]['graph'] = graph # get graph for each Pattern for k, pat in G['Pattern'].items(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: g_pattern_na = create_graph_from_database_returned(pat) pattern_nk = nk.na_pat_to_nk(g_pattern_na) print(lpus[l1]['graph'].nodes(data=True)) lpu_ports = [node[1]['selector'] \ for node in lpus[l1]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] + \ [node[1]['selector'] \ for node in lpus[l2]['graph'].nodes(data=True) \ if node[1]['class']=='Port'] pattern_ports = pattern_nk.nodes() patterns[k] = {} patterns[k]['graph'] = pattern_nk.subgraph( list(set(lpu_ports).intersection(set(pattern_ports)))) dt = config['General']['dt'] if 'dt' in task: dt = task['dt'] print(dt) # add LPUs to manager for k, lpu in lpus.items(): lpu_name = k graph = lpu['graph'] for uid, comp in graph.node.items(): if 'attr_dict' in comp: nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) graph.nodes[uid].pop('attr_dict') for i, j, ko, v in graph.edges(keys=True, data=True): if 'attr_dict' in v: for key in v['attr_dict']: nx.set_edge_attributes( graph, {(i, j, ko): { key: v['attr_dict'][key] }}) graph.edges[(i, j, ko)].pop('attr_dict') nx.write_gexf(graph, 'name.gexf') with open(lpu_name + '.pickle', 'wb') as f: pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL) comps = graph.node.items() #for uid, comp in comps: # if 'attr_dict' in comp: # nx.set_node_attributes(graph, {uid: comp['attr_dict']}) # print('changed',uid) # if 'class' in comp: if k == 'retina': prs = [node for node in graph.nodes(data=True) \ if node[1]['class'] == 'PhotoreceptorModel'] for pr in prs: graph.node[pr[0]]['num_microvilli'] = 3000 input_processors = [ RetinaInputIndividual(config, prs, user_id) ] extra_comps = [PhotoreceptorModel] retina_input_uids = [a[0] for a in prs] elif k == 'EB': input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \ if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0) input_processors = [input_processor] extra_comps = [BufferVoltage] else: input_processors = [] extra_comps = [BufferVoltage] if 'inputProcessors' in task: input_processors = loadExperimentSettings( task['inputProcessors']) output_processor = FileOutputProcessor( [('V', lpu['output_uid_list'])], lpu['output_file'], sample_interval=10) (comp_dict, conns) = LPU.graph_to_dicts(graph) # print(comp_dict) # print(conns) print(k) manager.add(LPU, k, dt, comp_dict, conns, device=0, input_processors=input_processors, output_processors=[output_processor], extra_comps=extra_comps, debug=True) # connect LPUs by Patterns for k, pattern in patterns.items(): l1, l2 = k.split('-') if l1 in lpus and l2 in lpus: print('Connecting {} and {}'.format(l1, l2)) pat, key_order = Pattern.from_graph(nx.DiGraph( pattern['graph']), return_key_order=True) print(l1, l2) print(key_order) with Timer('update of connections in Manager'): manager.connect(l1, l2, pat, int_0=key_order.index(l1), int_1=key_order.index(l2)) # start simulation steps = config['General']['steps'] ignored_steps = config['General']['ignored_steps'] if 'steps' in task: steps = task['steps'] if 'ignored_steps' in task: ignored_steps = task['ignored_steps'] # ignored_steps = 0 # steps = 100 manager.spawn() manager.start(steps=steps) manager.wait() time.sleep(5) print(task) # post-processing inputs (hard coded, can be better organized) inputs = { u'ydomain': 1.0, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } if 'retina' in lpus: input_array = si.read_array('{}_{}.h5'.format( config['Retina']['input_file'], user_id)) inputs[u'ydomain'] = input_array.max() for i, item in enumerate(retina_input_uids): inputs['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, input_array[ignored_steps::10, i:i + 1])).tolist() del input_array # post-processing outputs from all LPUs and combine them into one dictionary result = { u'ydomain': 1, u'xdomain': dt * (steps - ignored_steps), u'dt': dt * 10, u'data': {} } for k, lpu in lpus.items(): with h5py.File(lpu['output_file']) as output_file: uids = output_file['V']['uids'][:] output_array = output_file['V']['data'][:] for i, item in enumerate(uids): output = output_array[int(ignored_steps / 10):, i:i + 1] # tmp = output.max()-output.min() # if tmp <= 0.01: #mV # output = (output - output.min()) + 0.5 # else: # output = (output - output.min())/tmp*0.9+0.1 result['data'][item] = np.hstack( (np.arange(int((steps - ignored_steps) / 10)).reshape( (-1, 1)) * dt * 10, output)).tolist() return inputs, result
def gen_input(config): cuda.init() ctx = cuda.Device(0).make_context() atexit.register(ctx.pop) suffix = config['General']['file_suffix'] eye_num = config['General']['eye_num'] eulerangles = config['Retina']['eulerangles'] radius = config['Retina']['radius'] rings = config['Retina']['rings'] steps = config['General']['steps'] screen_write_step = config['Retina']['screen_write_step'] config['Retina']['screen_write_step'] = 1 screen_type = config['Retina']['screentype'] screen_cls = cls_map.get_screen_cls(screen_type) for i in range(eye_num): screen = screen_cls(config) screen_file = 'intensities_tmp{}.h5'.format(i) screen.setup_file(screen_file) retina_elev_file = 'retina_elev{}.h5'.format(i) retina_azim_file = 'retina_azim{}.h5'.format(i) screen_dima_file = 'grid_dima{}.h5'.format(i) screen_dimb_file = 'grid_dimb{}.h5'.format(i) retina_dima_file = 'retina_dima{}.h5'.format(i) retina_dimb_file = 'retina_dimb{}.h5'.format(i) input_file = 'retina_input{}.h5'.format(i) transform = AlbersProjectionMap(radius, eulerangles[3*i:3*(i+1)]).invmap hexagon = hx.HexagonArray(num_rings=rings, radius=radius, transform=transform) retina = ret.RetinaArray(hexagon, config) print('Acceptance angle: {}'.format(retina.acceptance_angle)) print('Neurons: {}'.format(retina.num_photoreceptors)) elev_v, azim_v = retina.get_ommatidia_pos() rfs = _get_receptive_fields(retina, screen, screen_type) steps_count = steps write_mode = 'w' while (steps_count > 0): steps_batch = min(100, steps_count) im = screen.get_screen_intensity_steps(steps_batch) photor_inputs = rfs.filter(im) sio.write_array(photor_inputs, filename=input_file, mode=write_mode) steps_count -= steps_batch write_mode = 'a' tmp = sio.read_array(screen_file) sio.write_array(tmp[::screen_write_step], 'intensities{}{}.h5'.format(suffix, i), complevel = 9) del tmp os.remove(screen_file) for data, filename in [(elev_v, retina_elev_file), (azim_v, retina_azim_file), (screen.grid[0], screen_dima_file), (screen.grid[1], screen_dimb_file), (rfs.refa, retina_dima_file), (rfs.refb, retina_dimb_file)]: sio.write_array(data, filename)
def gen_input(config): cuda.init() ctx = cuda.Device(0).make_context() atexit.register(ctx.pop) suffix = config['General']['file_suffix'] eye_num = config['General']['eye_num'] rings = config['Retina']['rings'] steps = config['General']['steps'] input_filename = config['Retina']['input_file'] screen_write_step = config['Retina']['screen_write_step'] config['Retina']['screen_write_step'] = 1 screen_type = config['Retina']['screentype'] screen_cls = cls_map.get_screen_cls(screen_type) eulerangles = config['Retina']['eulerangles'] radius = config['Retina']['radius'] for i in range(eye_num): screen = screen_cls(config) screen_file = 'intensities_tmp{}.h5'.format(i) screen.setup_file(screen_file) retina_elev_file = 'retina_elev{}.h5'.format(i) retina_azim_file = 'retina_azim{}.h5'.format(i) screen_dima_file = 'grid_dima{}.h5'.format(i) screen_dimb_file = 'grid_dimb{}.h5'.format(i) retina_dima_file = 'retina_dima{}.h5'.format(i) retina_dimb_file = 'retina_dimb{}.h5'.format(i) input_file = '{}{}{}.h5'.format(input_filename, i, suffix) transform = AlbersProjectionMap(radius, eulerangles[3*i:3*(i+1)]).invmap hexagon = hx.HexagonArray(num_rings=rings, radius=radius, transform=transform) retina = ret.RetinaArray(hexagon, config=config) print('Acceptance angle: {}'.format(retina.acceptance_angle)) print('Neurons: {}'.format(retina.num_photoreceptors)) elev_v, azim_v = retina.get_ommatidia_pos() rfs = _get_receptive_fields(retina, screen, screen_type) steps_count = steps write_mode = 'w' while (steps_count > 0): steps_batch = min(100, steps_count) im = screen.get_screen_intensity_steps(steps_batch) photor_inputs = rfs.filter(im) sio.write_array(photor_inputs, filename=input_file, mode=write_mode) steps_count -= steps_batch write_mode = 'a' tmp = sio.read_array(screen_file) sio.write_array(tmp[::screen_write_step], 'intensities{}{}.h5'.format(suffix, i), complevel = 9) del tmp os.remove(screen_file) for data, filename in [(elev_v, retina_elev_file), (azim_v, retina_azim_file), (screen.grid[0], screen_dima_file), (screen.grid[1], screen_dimb_file), (rfs.refa, retina_dima_file), (rfs.refb, retina_dimb_file)]: sio.write_array(data, filename)