def plot(datafiles, output_file, extra_annotation=None): print(datafiles) print(output_file) # load data blocks = [get_io(datafile).read_block() for datafile in datafiles] # note: Neo needs a pretty printer that is not tied to IPython # for block in blocks: # print(block.describe()) # for now take only the first segment segments = [block.segments[0] for block in blocks] labels = [block.annotations['simulator'] for block in blocks] # build annotations script_name = blocks[0].annotations.get('script_name', '') if script_name: for block in blocks[1:]: assert block.annotations['script_name'] == script_name # also consider adding metadata to PNG file - see http://stackoverflow.com/questions/10532614/can-matplotlib-add-metadata-to-saved-figures context = ["Generated by: %s" % __file__, "Working directory: %s" % os.getcwd(), "Timestamp: %s" % datetime.now().strftime("%Y-%m-%d %H:%M:%S%z"), "Output file: %s" % output_file, "Input file(s): %s" % "\n ".join(datafiles)] if extra_annotation: context.append(extra_annotation) annotations = "\n".join(context) # create and save plot fig = comparison_plot(segments, labels, title=script_name, annotations=annotations) fig.save(output_file)
def get(self, request, format=None, **kwargs): na_file = request.session['na_file'] block = get_io(na_file).read_block() id_segment = int(request.GET['segment_id']) segment = block.segments[id_segment] seg_data_test = { 'name': "segment 1", 'description': "a first fake segment", 'file_origin': "nowhere", 'spiketrains': [{}, {}], 'analogsignals': [{}, {}, {}] } seg_data = { 'name': segment.name or "", 'description': segment.description or "", 'file_origin': segment.file_origin or "", 'annotations': segment.annotations, # 'spiketrains': segment.spiketrains, 'analogsignals': [{} for a in segment.analogsignals], 'as_prop': [{ 'size': e.size, 'name': e.name.decode('cp1252') } for e in segment.analogsignals] } return JsonResponse(seg_data, safe=False)
def get(self, request, format=None, **kwargs): na_file = request.session['na_file'] block = get_io(na_file).read_block() id_segment = int(request.GET['segment_id']) id_analog_signal = int(request.GET['analog_signal_id']) segment = block.segments[id_segment] analogsignal = segment.analogsignals[id_analog_signal] analog_signal_values = [] for item in analogsignal: try: # TODO find a better solution analog_signal_values.append(item.item()) except ValueError: analog_signal_values.append(item[1].item()) analog_signal_times = [] for item in analogsignal.times: analog_signal_times.append(item.item()) graph_data = { "values": analog_signal_values, "values_units": str(analogsignal.units.dimensionality), "times": analog_signal_times, "times_dimensionality": str(analogsignal.t_start.units.dimensionality), "t_start": analogsignal.t_start.item(), "t_stop": analogsignal.t_stop.item() } return JsonResponse(graph_data)
def end(compatible_output=True): """Do any necessary cleaning up before exiting.""" for (population, variables, filename) in simulator.state.write_on_end: io = get_io(filename) population.write_data(io, variables) simulator.state.write_on_end = [] nml_doc = simulator._get_nml_doc() import neuroml.writers as writers if save_format == 'xml': nml_file = '%s.net.nml' % nml_doc.id writers.NeuroMLWriter.write(nml_doc, nml_file) elif save_format == 'hdf5': nml_file = '%s.net.nml.h5' % nml_doc.id writers.NeuroMLHdf5Writer.write(nml_doc, nml_file) logger.info("Written NeuroML 2 file out to: " + nml_file) lems_sim = simulator._get_lems_sim() lems_sim.include_neuroml2_file("PyNN.xml", include_included=False) lems_sim.include_neuroml2_file(nml_file) lems_file = lems_sim.save_to_file() logger.info("Written LEMS file (to simulate NeuroML file) to: " + lems_file)
def write_data(self, io, variables='all', gather=True, clear=False, annotations=None): if isinstance(io, basestring): io = get_io(io) data = self.get_data(variables, gather, clear, annotations) #if self._simulator.state.mpi_rank == 0 or gather is False: if True: # tmp. Need to handle MPI io.write(data)
def get(self, request, format=None, **kwargs): na_file = _get_file_from_url(request) if 'type' in request.GET and request.GET.get('type'): iotype = request.GET.get('type') method = getattr(io, iotype) r = method(filename=na_file) block = r.read_block() else: try: block = get_io(na_file).read_block() except IOError as err: # todo: need to be more fine grained. There could be other reasons # for an IOError return JsonResponse( { 'error': 'incorrect file type', 'message': str(err) }, status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE) block_data = { 'block': [{ 'annotations': _handle_dict(block.annotations), # 'channel_indexes': block.channel_indexes, 'description': block.description or "", # 'file_datetime': block.file_datetime, 'file_origin': block.file_origin or "", # 'index': block.index, 'name': block.name or "", 'rec_datetime': block.rec_datetime, 'segments': [ { 'name': s.name or "", 'annotations': _handle_dict(s.annotations), 'description': s.description or "", # 'epochs': s.epochs, # 'events': s.events, # 'spiketrains': s.spiketrains, # 'spiketrains': [], 'rec_datetime': s.rec_datetime, # 'irregularlysampledsignals': s.irregularlysampledsignals, # 'index': s.index, 'file_origin': s.file_origin or "", # 'block': s.block, # 'analogsignals': s.analogsignals, 'analogsignals': [], } for s in block.segments ], }] } return JsonResponse(block_data)
def plot(datafiles, output_file, annotation=None): print datafiles print output_file blocks = [get_io(datafile).read_block() for datafile in datafiles] # note: Neo needs a pretty printer that is not tied to IPython for block in blocks: print (block.describe()) # for now take only the first segment segments = [block.segments[0] for block in blocks] labels = [block.annotations['simulator'] for block in blocks] variables_to_plot = set.union(*(variable_names(s) for s in segments)) print "Plotting the following variables: %s" % ", ".join(variables_to_plot) n_panels = sum(a.shape[1] for a in segments[0].analogsignalarrays) #+ bool(segments[0].spiketrains) script_name = blocks[0].annotations.get('script_name', '') if script_name: for block in blocks[1:]: assert block.annotations['script_name'] == script_name fig_settings = { # pass these in a configuration file? 'lines.linewidth': 0.5, 'axes.linewidth': 0.5, 'axes.labelsize': 'small', 'legend.fontsize': 'small', 'font.size': 8, } plt.rcParams.update(fig_settings) width, height = 6, 3*n_panels + 1.2 plt.figure(1, figsize=(width, height)) gs = gridspec.GridSpec(n_panels, 1) gs.update(bottom=1.2/height) # leave space for annotations panels = [plt.subplot(gs[i, 0]) for i in reversed(range(n_panels))] n_seg = len(segments) for k, (segment, label) in enumerate(zip(segments, labels)): panel = 0 lw = 2*(n_seg - k) - 1 col = 'rbgmck'[k%6] for array in segment.analogsignalarrays: sorted_channels = sorted(array.channel_index) for channel in sorted_channels: i = array.channel_index.tolist().index(channel) print "plotting '%s' for %s in panel %d" % (array.name, label, panel) plot_signal(panels[panel], array, i, colour=col, linewidth=lw, label=label) panel += 1 for panel in panels: panel.legend() plt.xlabel("time (%s)" % array.times.units._dimensionality.string) plt.setp(plt.gca().get_xticklabels(), visible=True) plt.title(script_name) # also consider adding metadata to PNG file - see http://stackoverflow.com/questions/10532614/can-matplotlib-add-metadata-to-saved-figures context = ["Generated by: %s" % __file__, "Working directory: %s" % os.getcwd(), "Timestamp: %s" % datetime.now().strftime("%Y-%m-%d %H:%M:%S%z"), "Output file: %s" % output_file, "Input file(s): %s" % "\n ".join(datafiles)] if annotation: context.append(annotation) plt.figtext(0.01, 0.01, "\n".join(context), fontsize=6, verticalalignment='bottom') plt.savefig(output_file)
def end(compatible_output=True): """Do any necessary cleaning up before exiting.""" for (population, variables, filename) in simulator.state.write_on_end: io = get_io(filename) population.write_data(io, variables) simulator.state.write_on_end = [] # should have common implementation of end() simulator.state.net.to_nineml().write(simulator.state.output_filename)
def get(self, request, format=None, **kwargs): if 'na_file' not in request.session: url = request.GET.get('url') request = _get_file_from_url(request, url) na_file = request.session['na_file'] if 'type' in request.GET and request.GET.get('type'): iotype = request.GET.get('type') method = getattr(io, iotype) r = method(filename=na_file) block = r.read_block() else: try: block = get_io(na_file).read_block() except IOError: return JsonResponse( {'block': [{ 'error': 'incorrect file type' }]}) block_data = { 'block': [{ 'annotations': _handle_dict(block.annotations), # 'channel_indexes': block.channel_indexes, 'description': block.description or "", # 'file_datetime': block.file_datetime, 'file_origin': block.file_origin or "", # 'index': block.index, 'name': block.name or "", 'rec_datetime': block.rec_datetime, 'segments': [ { 'name': s.name or "", 'annotations': _handle_dict(s.annotations), 'description': s.description or "", # 'epochs': s.epochs, # 'events': s.events, # 'spiketrains': s.spiketrains, # 'spiketrains': [], 'rec_datetime': s.rec_datetime, # 'irregularlysampledsignals': s.irregularlysampledsignals, # 'index': s.index, 'file_origin': s.file_origin or "", # 'block': s.block, # 'analogsignals': s.analogsignals, 'analogsignals': [], } for s in block.segments ], }] } return JsonResponse(block_data)
def test_mix_procedural_and_oo(sim): # cf Issues #217, #234 fn_proc = "test_write_procedural.pkl" fn_oo = "test_write_oo.pkl" sim.setup(timestep=0.1, min_delay=0.1) cells = sim.Population(5, sim.IF_cond_exp(i_offset=0.2)) sim.record('v', cells, fn_proc) sim.run(10.0) cells.write_data(fn_oo) # explicitly write data sim.end() # implicitly write data using filename provided previously data_proc = get_io(fn_proc).read()[0] data_oo = get_io(fn_oo).read()[0] assert_array_equal(data_proc.segments[0].analogsignals[0], data_oo.segments[0].analogsignals[0]) os.remove(fn_proc) os.remove(fn_oo)
def custom_get_io(filename): try: io = get_io(filename) except AssertionError as err: if "try_signal_grouping" in str(err): io = neo.io.Spike2IO(filename, try_signal_grouping=False) else: raise return io
def test_mix_procedural_and_oo(sim): # cf Issues #217, #234 fn_proc = "test_write_procedural.pkl" fn_oo = "test_write_oo.pkl" sim.setup(timestep=0.1, min_delay=0.1) cells = sim.Population(5, sim.IF_cond_exp(i_offset=0.2)) sim.record('v', cells, fn_proc) sim.run(10.0) cells.write_data(fn_oo) # explicitly write data sim.end() # implicitly write data using filename provided previously data_proc = get_io(fn_proc).read()[0] data_oo = get_io(fn_oo).read()[0] assert_arrays_equal(data_proc.segments[0].analogsignalarrays[0], data_oo.segments[0].analogsignalarrays[0]) os.remove(fn_proc) os.remove(fn_oo)
def end(compatible_output=True): """Do any necessary cleaning up before exiting.""" for (population, variables, filename) in simulator.state.write_on_end: # Make directories if necessary dir = os.path.dirname(filename) if dir and not os.path.exists(dir): os.makedirs(dir) # Get NEO IO for filename io = get_io(filename) population.write_data(io, variables) simulator.state.write_on_end = []
def plot(datafiles, output_file, sort_by='simulator', annotation=None): blocks = [get_io(datafile).read_block() for datafile in datafiles] # note: Neo needs a pretty printer that is not tied to IPython #for block in blocks: # print(block.describe()) script_name = blocks[0].annotations['script_name'] for block in blocks[1:]: assert block.annotations['script_name'] == script_name fig_settings = { # pass these in a configuration file? 'lines.linewidth': 0.5, 'axes.linewidth': 0.5, 'axes.labelsize': 'small', 'legend.fontsize': 'small', 'font.size': 8, 'savefig.dpi': 200, } plt.rcParams.update(fig_settings) CM = 1 / 2.54 plt.figure(1, figsize=(15 * CM * len(blocks), 20 * CM)) gs = gridspec.GridSpec(4, 2 * len(blocks), hspace=0.25, wspace=0.25) sorted_blocks = sort_by_annotation(sort_by, blocks) hide_axis_labels = False for k, (label, block_list) in enumerate(sorted_blocks.items()): segments = {} for block in block_list: for name in ("exc", "inh"): if name in block.name.lower(): segments[name] = block.segments[0] # Plot membrane potential traces plot_vm_traces(plt.subplot(gs[0, 2 * k:2 * k + 2]), segments['exc'], label, hide_axis_labels) # Plot spike rasters plot_spiketrains(plt.subplot(gs[1, 2 * k:2 * k + 2]), segments['exc'], label, hide_axis_labels) # Inter-spike-interval histograms # Histograms of coefficients of variation of ISI plot_isi_hist(plt.subplot(gs[2, 2 * k]), segments['exc'], 'exc', hide_axis_labels) plot_cvisi_hist(plt.subplot(gs[3, 2 * k]), segments['exc'], 'exc', hide_axis_labels) hide_axis_labels = True plot_isi_hist(plt.subplot(gs[2, 2 * k + 1]), segments['inh'], 'inh', hide_axis_labels) plot_cvisi_hist(plt.subplot(gs[3, 2 * k + 1]), segments['inh'], 'inh', hide_axis_labels) plt.savefig(output_file)
def get(self, request, format=None, **kwargs): if 'na_file' not in request.session: url = request.GET.get('url') request = _get_file_from_url(request, url) na_file = request.session['na_file'] try: block = get_io(na_file).read_block() except IOError: return Response({'error': 'incorrect file type'}, status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE) # read neo file from hd block_data = { 'block': [{ 'annotations': block.annotations, # 'channel_indexes': block.channel_indexes, 'description': block.description or "", # 'file_datetime': block.file_datetime, 'file_origin': block.file_origin or "", # 'index': block.index, 'name': block.name or "", 'rec_datetime': block.rec_datetime, 'segments': [ { 'name': s.name or "", 'annotations': s.annotations, 'description': s.description or "", # 'epochs': s.epochs, # 'events': s.events, # 'spiketrains': s.spiketrains, # 'spiketrains': [], 'rec_datetime': s.rec_datetime, # 'irregularlysampledsignals': s.irregularlysampledsignals, # 'index': s.index, 'file_origin': s.file_origin or "", # 'block': s.block, # 'analogsignals': s.analogsignals, 'analogsignals': [], } for s in block.segments ], }] } return JsonResponse(block_data)
def plot(datafiles, output_file, sort_by='simulator', annotation=None): blocks = [get_io(datafile).read_block() for datafile in datafiles] # note: Neo needs a pretty printer that is not tied to IPython #for block in blocks: # print(block.describe()) script_name = blocks[0].annotations['script_name'] for block in blocks[1:]: assert block.annotations['script_name'] == script_name fig_settings = { # pass these in a configuration file? 'lines.linewidth': 0.5, 'axes.linewidth': 0.5, 'axes.labelsize': 'small', 'legend.fontsize': 'small', 'font.size': 8, 'savefig.dpi': 200, } plt.rcParams.update(fig_settings) CM=1/2.54 plt.figure(1, figsize=(15*CM*len(blocks), 20*CM)) gs = gridspec.GridSpec(4, 2*len(blocks), hspace=0.25, wspace=0.25) sorted_blocks = sort_by_annotation(sort_by, blocks) hide_axis_labels = False for k, (label, block_list) in enumerate(sorted_blocks.items()): segments = {} for block in block_list: for name in ("exc", "inh"): if name in block.name.lower(): segments[name] = block.segments[0] # Plot membrane potential traces plot_vm_traces(plt.subplot(gs[0, 2*k:2*k+2]), segments['exc'], label, hide_axis_labels) # Plot spike rasters plot_spiketrains(plt.subplot(gs[1, 2*k:2*k+2]), segments['exc'], label, hide_axis_labels) # Inter-spike-interval histograms # Histograms of coefficients of variation of ISI plot_isi_hist(plt.subplot(gs[2, 2*k]), segments['exc'], 'exc', hide_axis_labels) plot_cvisi_hist(plt.subplot(gs[3, 2*k]), segments['exc'], 'exc', hide_axis_labels) hide_axis_labels = True plot_isi_hist(plt.subplot(gs[2, 2*k+1]), segments['inh'], 'inh', hide_axis_labels) plot_cvisi_hist(plt.subplot(gs[3, 2*k+1]), segments['inh'], 'inh', hide_axis_labels) plt.savefig(output_file)
def get(self, request, format=None, **kwargs): na_file = _get_file_from_url(request) block = get_io(na_file).read_block() id_segment = int(request.GET['segment_id']) # todo, catch MultiValueDictKeyError in case segment_id isn't given, and return a 400 Bad Request response segment = block.segments[id_segment] # todo, catch IndexError, and return a 404 response seg_data_test = { 'name': "segment 1", 'description': "a first fake segment", 'file_origin': "nowhere", 'spiketrains': [{}, {}], 'analogsignals': [{}, {}, {}] } seg_data = { 'name': segment.name or "", 'description': segment.description or "", 'file_origin': segment.file_origin or "", 'annotations': _handle_dict(segment.annotations), # 'spiketrains': segment.spiketrains, 'analogsignals': [{} for a in segment.analogsignals], 'as_prop': [{ 'size': e.size, 'name': e.name } for e in segment.analogsignals] } return JsonResponse(seg_data, safe=False)
def end(compatible_output=True): """Do any necessary cleaning up before exiting.""" for (population, variables, filename) in simulator.state.write_on_end: io = get_io(filename) population.write_data(io, variables) simulator.state.write_on_end = [] nml_doc = simulator._get_nml_doc() import neuroml.writers as writers if save_format == 'xml': nml_file = '%s.net.nml'%nml_doc.id writers.NeuroMLWriter.write(nml_doc, nml_file) elif save_format == 'hdf5': nml_file = '%s.net.nml.h5'%nml_doc.id writers.NeuroMLHdf5Writer.write(nml_doc, nml_file) logger.info("Written NeuroML 2 file out to: "+nml_file) lems_sim = simulator._get_lems_sim() lems_sim.include_neuroml2_file("PyNN.xml", include_included=False) lems_sim.include_neuroml2_file(nml_file) lems_file = lems_sim.save_to_file() logger.info("Written LEMS file (to simulate NeuroML file) to: "+lems_file)
def end(compatible_output=True): """Do any necessary cleaning up before exiting.""" for (population, variables, filename) in simulator.state.write_on_end: io = get_io(filename) population.write_data(io, variables) simulator.state.write_on_end = []
def plot(datafiles, output_file, annotation=None): print datafiles print output_file blocks = [get_io(datafile).read_block() for datafile in datafiles] # note: Neo needs a pretty printer that is not tied to IPython for block in blocks: print(block.describe()) # for now take only the first segment segments = [block.segments[0] for block in blocks] labels = [block.annotations['simulator'] for block in blocks] variables_to_plot = set.union(*(variable_names(s) for s in segments)) print "Plotting the following variables: %s" % ", ".join(variables_to_plot) n_panels = sum( a.shape[1] for a in segments[0].analogsignalarrays) #+ bool(segments[0].spiketrains) script_name = blocks[0].annotations.get('script_name', '') if script_name: for block in blocks[1:]: assert block.annotations['script_name'] == script_name fig_settings = { # pass these in a configuration file? 'lines.linewidth': 0.5, 'axes.linewidth': 0.5, 'axes.labelsize': 'small', 'legend.fontsize': 'small', 'font.size': 8, } plt.rcParams.update(fig_settings) width, height = 6, 3 * n_panels + 1.2 plt.figure(1, figsize=(width, height)) gs = gridspec.GridSpec(n_panels, 1) gs.update(bottom=1.2 / height) # leave space for annotations panels = [plt.subplot(gs[i, 0]) for i in reversed(range(n_panels))] n_seg = len(segments) for k, (segment, label) in enumerate(zip(segments, labels)): panel = 0 lw = 2 * (n_seg - k) - 1 col = 'rbgmck'[k % 6] for array in segment.analogsignalarrays: sorted_channels = sorted(array.channel_index) for channel in sorted_channels: i = array.channel_index.tolist().index(channel) print "plotting '%s' for %s in panel %d" % (array.name, label, panel) plot_signal(panels[panel], array, i, colour=col, linewidth=lw, label=label) panel += 1 for panel in panels: panel.legend() plt.xlabel("time (%s)" % array.times.units._dimensionality.string) plt.setp(plt.gca().get_xticklabels(), visible=True) plt.title(script_name) # also consider adding metadata to PNG file - see http://stackoverflow.com/questions/10532614/can-matplotlib-add-metadata-to-saved-figures context = [ "Generated by: %s" % __file__, "Working directory: %s" % os.getcwd(), "Timestamp: %s" % datetime.now().strftime("%Y-%m-%d %H:%M:%S%z"), "Output file: %s" % output_file, "Input file(s): %s" % "\n ".join(datafiles) ] if annotation: context.append(annotation) plt.figtext(0.01, 0.01, "\n".join(context), fontsize=6, verticalalignment='bottom') plt.savefig(output_file)
def get(self, request, format=None, **kwargs): na_file = _get_file_from_url(request) block = get_io(na_file).read_block() id_segment = int(request.GET['segment_id']) id_analog_signal = int(request.GET['analog_signal_id']) # todo, catch MultiValueDictKeyError in case segment_id or analog_signal_id aren't given, and return a 400 Bad Request response segment = block.segments[id_segment] analogsignal = segment.analogsignals[id_analog_signal] # todo, catch any IndexErrors, and return a 404 response # unit = analogsignal.units # t_start = analogsignal.t_start # sampling_rate = analogsignal.sampling_rate # time_laps = 1/sampling_rate print("anaolgsignals", analogsignal, len(analogsignal)) print("units", analogsignal.units) print("t_start", analogsignal.t_start) print("sampling_rate", analogsignal.sampling_rate) print("t_stop", analogsignal.t_stop) print("times", analogsignal.times) print("duration", analogsignal.duration) print("name", analogsignal.name) print("size", analogsignal.size) # print('analogsignal', analogsignal[0], analogsignal[0].sampling_rate) # analog_signal = block.segments[id_segment].analogsignals[id_analog_signal] # print(analog_signal) analog_signal_values = [] for item in analogsignal: try: # TODO find a better solution analog_signal_values.append(item.item()) except ValueError: analog_signal_values.append(item[1].item()) analog_signal_times = [] for item in analogsignal.times: analog_signal_times.append(item.item()) graph_data = { "name": analogsignal.name, "values": analog_signal_values, "values_units": str(analogsignal.units.dimensionality), "times": analog_signal_times, "times_dimensionality": str(analogsignal.t_start.units.dimensionality), "t_start": analogsignal.t_start.item(), "t_stop": analogsignal.t_stop.item(), "sampling_rate": float(analogsignal.sampling_rate.magnitude), "sampling_rate_units": str(analogsignal.sampling_rate.units.dimensionality) } return JsonResponse(graph_data)
from binascii import hexlify repo = hg.repository(ui.ui(), "..") ctx = repo.parents()[0] return hexlify(ctx.node()[:6]) except ImportError: return "unknown" __version__ = get_version() spikes_and_vm_file, spike_counts_file, neuron_id = sys.argv[1:] fig = plt.figure(figsize=(8, 3)) fig.dpi = 120 # Plot spike times data = get_io(spikes_and_vm_file).read()[0].segments[0] ax = fig.add_axes((0.1, 0.12, 0.6, 0.55), frameon=False) ax.set_xlim([0, TSTOP]) for i, spiketrain in enumerate(data.spiketrains): ax.plot(spiketrain.times, i*numpy.ones_like(spiketrain), 'b.', markersize=0.2) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.set_xlabel("Time (ms)") ax.set_ylabel("Cell number") # Plot firing rate histogram with open(spike_counts_file) as fp: fp.readline() # first line is metadata n_spikes = numpy.loadtxt(fp) * 1000.0/TSTOP # calculate firing rate
def get(self, request, format=None, **kwargs): if 'na_file' not in request.session: url = request.GET.get('url') request = _get_file_from_url(request, url) na_file = request.session['na_file'] block = get_io(na_file).read_block() id_segment = int(request.GET['segment_id']) id_analog_signal = int(request.GET['analog_signal_id']) segment = block.segments[id_segment] analogsignal = segment.analogsignals[id_analog_signal] # unit = analogsignal.units # t_start = analogsignal.t_start # sampling_rate = analogsignal.sampling_rate # time_laps = 1/sampling_rate print("anaolgsignals", analogsignal, len(analogsignal)) print("units", analogsignal.units) print("t_start", analogsignal.t_start) print("sampling_rate", analogsignal.sampling_rate) print("t_stop", analogsignal.t_stop) print("times", analogsignal.times) print("duration", analogsignal.duration) print("name", analogsignal.name) print("size", analogsignal.size) # print('analogsignal', analogsignal[0], analogsignal[0].sampling_rate) # analog_signal = block.segments[id_segment].analogsignals[id_analog_signal] # print(analog_signal) analog_signal_values = [] for item in analogsignal: try: # TODO find a better solution analog_signal_values.append(item.item()) except ValueError: analog_signal_values.append(item[1].item()) analog_signal_times = [] for item in analogsignal.times: analog_signal_times.append(item.item()) graph_data = { "name": analogsignal.name, "values": analog_signal_values, "values_units": str(analogsignal.units.dimensionality), "times": analog_signal_times, "times_dimensionality": str(analogsignal.t_start.units.dimensionality), "t_start": analogsignal.t_start.item(), "t_stop": analogsignal.t_stop.item(), "sampling_rate": float(analogsignal.sampling_rate.magnitude), "sampling_rate_units": str(analogsignal.sampling_rate.units.dimensionality) } return JsonResponse(graph_data)