def import_spiketrains(epoch, protocol, segment): for (i, spike_train) in enumerate(segment.spiketrains): params = {'t_start_ms': spike_train.t_start.rescale(pq.ms).item(), 't_stop_ms': spike_train.t_stop.rescale(pq.ms).item(), 'sampling_rate_hz': spike_train.sampling_rate.rescale(pq.Hz).item(), 'description': spike_train.description, 'file_origin': spike_train.file_origin} if spike_train.name: name = spike_train.name else: name = "spike train {}".format(i + 1) inputs = Maps.newHashMap() for m in iterable(epoch.getMeasurements()): inputs.put(m.getName(), m) ar = epoch.addAnalysisRecord(name, inputs, protocol, to_map(params)) # spike_train.labels = ['spike time' for i in spike_train.shape] spike_train.sampling_rates = [spike_train.sampling_rate for i in spike_train.shape] spike_train.waveforms.labels = ['channel index', 'time', 'spike'] spike_train.waveforms.sampling_rates = [0, spike_train.sampling_rate, 0] * pq.Hz insert_numeric_analysis_artifact(ar, name, {'spike times': spike_train, 'spike waveforms': spike_train.waveforms})
def should_round_trip_multi_element_data_frame(self): arr1 = np.random.randn(10,10) * pq.s arr1.labels = [u'volts', u'other'] arr1.name = u'name' arr1.sampling_rates = [1.0 * pq.Hz, 10.0 * pq.Hz] arr2 = np.random.randn(10,10) * pq.V arr2.labels = [u'volts', u'other'] arr2.name = u'name' arr2.sampling_rates = [1.0 * pq.Hz, 10.0 * pq.Hz] epoch = self.expt.insertEpoch(DateTime(), DateTime(), self.protocol, None, None) ar = epoch.addAnalysisRecord("record", to_map({}), self.protocol, to_map({})) result_name1 = 'result' result_name2 = 'other-result' expected = {result_name1: arr1, result_name2: arr2} record_name = "record-name" artifact = insert_numeric_analysis_artifact(ar, record_name, expected) assert artifact is not None sleep(0.5) actual = as_data_frame(ar.getOutputs().get(record_name)) assert_data_frame_equals(expected, actual)