def _handle_epochs_group(self, block):
     # Note that an NWB Epoch corresponds to a Neo Segment, not to a Neo Epoch.
     epochs = self._file.get('epochs')
     # todo: handle epochs.attrs.get('tags')
     for name, epoch in epochs.items():
         # todo: handle epoch.attrs.get('links')
         timeseries = []
         for key, value in epoch.items():
             if key == 'start_time':
                 t_start = value * pq.second
             elif key == 'stop_time':
                 t_stop = value * pq.second
             else:
                 # todo: handle value['count']
                 # todo: handle value['idx_start']
                 timeseries.append(self._handle_timeseries(key, value.get('timeseries')))
         segment = Segment(name=name)
         for obj in timeseries:
             obj.segment = segment
             if isinstance(obj, AnalogSignal):
                 segment.analogsignals.append(obj)
             elif isinstance(obj, IrregularlySampledSignal):
                 segment.irregularlysampledsignals.append(obj)
             elif isinstance(obj, Event):
                 segment.events.append(obj)
             elif isinstance(obj, Epoch):
                 segment.epochs.append(obj)
         segment.block = block
         block.segments.append(segment)
示例#2
0
    def test_roundtrip_with_json_metadata(self):
        sample_data = np.random.uniform(size=(200, 3))
        filename = "test_roundtrip_with_json_metadata.txt"
        metadata_filename = "test_roundtrip_with_json_metadata_about.json"
        signal1 = AnalogSignal(sample_data,
                               units="pA",
                               sampling_rate=2 * pq.kHz)
        seg1 = Segment()
        block1 = Block()
        seg1.analogsignals.append(signal1)
        seg1.block = block1
        block1.segments.append(seg1)

        iow = AsciiSignalIO(filename, metadata_filename=metadata_filename)
        iow.write_block(block1)
        self.assert_(os.path.exists(metadata_filename))

        ior = AsciiSignalIO(filename)
        block2 = ior.read_block()
        assert len(block2.segments[0].analogsignals) == 3
        signal2 = block2.segments[0].analogsignals[1]

        assert_array_almost_equal(signal1.magnitude[:, 1],
                                  signal2.magnitude.reshape(-1),
                                  decimal=7)
        self.assertEqual(signal1.units, signal2.units)
        self.assertEqual(signal1.sampling_rate, signal2.sampling_rate)
        assert_array_equal(signal1.times, signal2.times)

        os.remove(filename)
        os.remove(metadata_filename)
示例#3
0
 def _handle_epochs_group(self, block):
     # Note that an NWB Epoch corresponds to a Neo Segment, not to a Neo Epoch.
     epochs = self._file.get('epochs')
     # todo: handle epochs.attrs.get('tags')
     for name, epoch in epochs.items():
         # todo: handle epoch.attrs.get('links')
         timeseries = []
         for key, value in epoch.items():
             if key == 'start_time':
                 t_start = value * pq.second
             elif key == 'stop_time':
                 t_stop = value * pq.second
             else:
                 # todo: handle value['count']
                 # todo: handle value['idx_start']
                 timeseries.append(self._handle_timeseries(key, value.get('timeseries')))
         segment = Segment(name=name)
         for obj in timeseries:
             obj.segment = segment
             if isinstance(obj, AnalogSignal):
                 segment.analogsignals.append(obj)
             elif isinstance(obj, IrregularlySampledSignal):
                 segment.irregularlysampledsignals.append(obj)
             elif isinstance(obj, Event):
                 segment.events.append(obj)
             elif isinstance(obj, Epoch):
                 segment.epochs.append(obj)
         segment.block = block
         block.segments.append(segment)
示例#4
0
    def _read_segment(self, node, parent):
        attributes = self._get_standard_attributes(node)
        segment = Segment(**attributes)

        signals = []
        for name, child_node in node['analogsignals'].items():
            if "AnalogSignal" in name:
                signals.append(self._read_analogsignal(child_node, parent=segment))
        if signals and self.merge_singles:
            segment.unmerged_analogsignals = signals  # signals will be merged later
            signals = []
        for name, child_node in node['analogsignalarrays'].items():
            if "AnalogSignalArray" in name:
                signals.append(self._read_analogsignalarray(child_node, parent=segment))
        segment.analogsignals = signals

        irr_signals = []
        for name, child_node in node['irregularlysampledsignals'].items():
            if "IrregularlySampledSignal" in name:
                irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
        if irr_signals and self.merge_singles:
            segment.unmerged_irregularlysampledsignals = irr_signals
            irr_signals = []
        segment.irregularlysampledsignals = irr_signals

        epochs = []
        for name, child_node in node['epochs'].items():
            if "Epoch" in name:
                epochs.append(self._read_epoch(child_node, parent=segment))
        if self.merge_singles:
            epochs = self._merge_data_objects(epochs)
        for name, child_node in node['epocharrays'].items():
            if "EpochArray" in name:
                epochs.append(self._read_epocharray(child_node, parent=segment))
        segment.epochs = epochs

        events = []
        for name, child_node in node['events'].items():
            if "Event" in name:
                events.append(self._read_event(child_node, parent=segment))
        if self.merge_singles:
            events = self._merge_data_objects(events)
        for name, child_node in node['eventarrays'].items():
            if "EventArray" in name:
                events.append(self._read_eventarray(child_node, parent=segment))
        segment.events = events

        spiketrains = []
        for name, child_node in node['spikes'].items():
            raise NotImplementedError('Spike objects not yet handled.')
        for name, child_node in node['spiketrains'].items():
            if "SpikeTrain" in name:
                spiketrains.append(self._read_spiketrain(child_node, parent=segment))
        segment.spiketrains = spiketrains

        segment.block = parent
        return segment
示例#5
0
    def _read_segment(self, node, parent):
        attributes = self._get_standard_attributes(node)
        segment = Segment(**attributes)

        signals = []
        for name, child_node in node['analogsignals'].items():
            if "AnalogSignal" in name:
                signals.append(self._read_analogsignal(child_node, parent=segment))
        if signals and self.merge_singles:
            segment.unmerged_analogsignals = signals  # signals will be merged later
            signals = []
        for name, child_node in node['analogsignalarrays'].items():
            if "AnalogSignalArray" in name:
                signals.append(self._read_analogsignalarray(child_node, parent=segment))
        segment.analogsignals = signals

        irr_signals = []
        for name, child_node in node['irregularlysampledsignals'].items():
            if "IrregularlySampledSignal" in name:
                irr_signals.append(self._read_irregularlysampledsignal(child_node, parent=segment))
        if irr_signals and self.merge_singles:
            segment.unmerged_irregularlysampledsignals = irr_signals
            irr_signals = []
        segment.irregularlysampledsignals = irr_signals

        epochs = []
        for name, child_node in node['epochs'].items():
            if "Epoch" in name:
                epochs.append(self._read_epoch(child_node, parent=segment))
        if self.merge_singles:
            epochs = self._merge_data_objects(epochs)
        for name, child_node in node['epocharrays'].items():
            if "EpochArray" in name:
                epochs.append(self._read_epocharray(child_node, parent=segment))
        segment.epochs = epochs

        events = []
        for name, child_node in node['events'].items():
            if "Event" in name:
                events.append(self._read_event(child_node, parent=segment))
        if self.merge_singles:
            events = self._merge_data_objects(events)
        for name, child_node in node['eventarrays'].items():
            if "EventArray" in name:
                events.append(self._read_eventarray(child_node, parent=segment))
        segment.events = events

        spiketrains = []
        for name, child_node in node['spikes'].items():
            raise NotImplementedError('Spike objects not yet handled.')
        for name, child_node in node['spiketrains'].items():
            if "SpikeTrain" in name:
                spiketrains.append(self._read_spiketrain(child_node, parent=segment))
        segment.spiketrains = spiketrains

        segment.block = parent
        return segment
示例#6
0
    def read_block(self, lazy=False, **kwargs):
        # to sort file
        def natural_sort(l):
            convert = lambda text: int(text) if text.isdigit() else text.lower(
            )
            alphanum_key = lambda key: [
                convert(c) for c in re.split('([0-9]+)', key)
            ]
            return sorted(l, key=alphanum_key)

        # find all the images in the given directory
        file_name_list = []
        # name of extensions to track
        types = ["*.tif", "*.tiff"]
        for file in types:
            file_name_list.append(glob.glob(self.filename + "/" + file))
        # flatten list
        file_name_list = [
            item for sublist in file_name_list for item in sublist
        ]
        # delete path in the name of file
        file_name_list = [
            file_name[len(self.filename) + 1::] for file_name in file_name_list
        ]
        # sorting file
        file_name_list = natural_sort(file_name_list)
        list_data_image = []
        for file_name in file_name_list:
            list_data_image.append(
                np.array(Image.open(self.filename + "/" + file_name),
                         dtype=np.float))
        list_data_image = np.array(list_data_image)
        if len(list_data_image.shape) == 4:
            list_data_image = []
            for file_name in file_name_list:
                list_data_image.append(
                    np.array(Image.open(self.filename + "/" +
                                        file_name).convert('L'),
                             dtype=np.float))

        print("read block")
        image_sequence = ImageSequence(np.stack(list_data_image),
                                       units=self.units,
                                       sampling_rate=self.sampling_rate,
                                       spatial_scale=self.spatial_scale)
        print("creating segment")
        segment = Segment(file_origin=self.filename)
        segment.annotate(tiff_file_names=file_name_list)
        segment.imagesequences = [image_sequence]

        block = Block(file_origin=self.filename)
        segment.block = block
        block.segments.append(segment)
        print("returning block")
        return block
示例#7
0
    def create_segment(self, parent=None, name='Segment'):
        segment = Segment()

        segment.block = parent

        self._assign_basic_attributes(segment, name=name)
        self._assign_datetime_attributes(segment)
        self._assign_index_attribute(segment)

        self._create_segment_children(segment)

        self._assign_annotations(segment)

        return segment
示例#8
0
    def create_segment(self, parent=None, name='Segment'):
        segment = Segment()

        segment.block = parent

        self._assign_basic_attributes(segment, name=name)
        self._assign_datetime_attributes(segment)
        self._assign_index_attribute(segment)

        self._create_segment_children(segment)

        self._assign_annotations(segment)

        return segment
示例#9
0
 def _get_segment(self, block_name, segment_name):
     # If we've already created a Block with the given name return it,
     #   otherwise create it now and store it in self._blocks.
     # If we've already created a Segment in the given block, return it,
     #   otherwise create it now and return it.
     if block_name in self._blocks:
         block = self._blocks[block_name]
     else:
         block = Block(name=block_name, **self.global_block_metadata)
         self._blocks[block_name] = block
     segment = None
     for seg in block.segments:
         if segment_name == seg.name:
             segment = seg
             break
     if segment is None:
         segment = Segment(name=segment_name)
         segment.block = block
         block.segments.append(segment)
     return segment
示例#10
0
    def read_block(self, lazy=False, **kwargs):

        file = open(self.filename, 'r')
        data = file.read()
        print("read block")
        liste_value = []
        record = []
        for i in range(len(data)):

            if data[i] == "\n" or data[i] == "\t":
                t = "".join(str(e) for e in record)
                liste_value.append(t)
                record = []
            else:
                record.append(data[i])

        data = []
        nb = 0
        for i in range(self.nb_frame):
            data.append([])
            for y in range(self.nb_row):
                data[i].append([])
                for x in range(self.nb_column):
                    data[i][y].append(liste_value[nb])
                    nb += 1

        image_sequence = ImageSequence(np.array(data, dtype='float'),
                                       units=self.units,
                                       sampling_rate=self.sampling_rate,
                                       spatial_scale=self.spatial_scale)
        file.close()
        print("creating segment")
        segment = Segment(file_origin=self.filename)
        segment.imagesequences = [image_sequence]

        block = Block(file_origin=self.filename)
        segment.block = block
        block.segments.append(segment)
        print("returning block")

        return block
示例#11
0
    def read_block(self, lazy=False, **kargs):
        def read(name, type, nb, dictionary, file):

            if type == 'int32':
                # dictionary[name] = int.from_bytes(file.read(4), byteorder=sys.byteorder, signed=True)
                dictionary[name] = struct.unpack("i", file.read(4))[0]
            if type == 'float32':
                dictionary[name] = struct.unpack('f', file.read(4))[0]
            if type == 'uint8':
                l = []
                for i in range(nb):
                    l.append(chr(struct.unpack('B', file.read(1))[0]))
                dictionary[name] = l
            if type == 'uint16':
                l = []
                for i in range(nb):
                    l.append((struct.unpack('H', file.read(2)))[0])
                dictionary[name] = l
            if type == 'short':
                dictionary[name] = struct.unpack('h', file.read(2))[0]

            return dictionary

        def read_header(file_name):

            file = open(file_name, "rb")

            i = [['file_size', 'int32', 1], ['checksum_header', 'int32', 1],
                 ['check_data', 'int32', 1], ['lenheader', 'int32', 1],
                 ['versionid', 'float32', 1], ['filetype', 'int32', 1],
                 ['filesubtype', 'int32', 1], ['datatype', 'int32', 1],
                 ['sizeof', 'int32', 1], ['framewidth', 'int32', 1],
                 ['frameheight', 'int32', 1], ['nframesperstim', 'int32', 1],
                 ['nstimuli', 'int32', 1], ['initialxbinfactor', 'int32', 1],
                 ['initialybinfactor', 'int32', 1], ['xbinfactor', 'int32', 1],
                 ['ybinfactor', 'int32', 1], ['username', 'uint8', 32],
                 ['recordingdate', 'uint8', 16], ['x1roi', 'int32', 1],
                 ['y1roi', 'int32', 1], ['x2roi', 'int32', 1],
                 ['y2roi', 'int32', 1], ['stimoffs', 'int32', 1],
                 ['stimsize', 'int32', 1], ['frameoffs', 'int32', 1],
                 ['framesize', 'int32', 1], ['refoffs', 'int32', 1],
                 ['refsize', 'int32', 1], ['refwidth', 'int32', 1],
                 ['refheight', 'int32', 1], ['whichblocks', 'uint16', 16],
                 ['whichframe', 'uint16', 16], ['loclip', 'int32', 1],
                 ['hiclip', 'int32', 1], ['lopass', 'int32', 1],
                 ['hipass', 'int32', 1], ['operationsperformed', 'uint8', 64],
                 ['magnification', 'float32', 1], ['gain', 'uint16', 1],
                 ['wavelength', 'uint16', 1], ['exposuretime', 'int32', 1],
                 ['nrepetitions', 'int32', 1],
                 ['acquisitiondelay', 'int32', 1],
                 ['interstiminterval', 'int32', 1],
                 ['creationdate', 'uint8', 16], ['datafilename', 'uint8', 64],
                 ['orareserved', 'uint8', 256]]

            dic = {}
            for x in i:
                dic = read(name=x[0],
                           type=x[1],
                           nb=x[2],
                           dictionary=dic,
                           file=file)

            if dic['filesubtype'] == 13:
                i = [["includesrefframe", "int32", 1], ["temp", "uint8", 128],
                     ["ntrials", "int32", 1], ["scalefactors", "int32", 1],
                     ["cameragain", "short", 1], ["ampgain", "short", 1],
                     ["samplingrate", "short", 1], ["average", "short", 1],
                     ["exposuretime", "short", 1],
                     ["samplingaverage", "short", 1],
                     ["presentaverage", "short", 1],
                     ["framesperstim", "short", 1],
                     ["trialsperblock", "short", 1],
                     ["sizeofanalogbufferinframes", "short", 1],
                     ["cameratrials", "short", 1], ["filler", "uint8", 106],
                     ["dyedaqreserved", "uint8", 106]]
                for x in i:
                    dic = read(name=x[0],
                               type=x[1],
                               nb=x[2],
                               dictionary=dic,
                               file=file)
                # nottested
                #  p.listofstimuli=temp(1:max(find(temp~=0)))';  % up to first non-zero stimulus
                dic["listofstimuli"] = dic["temp"][0:np.argwhere(
                    x != 0).max(0)]
            else:
                i = [["includesrefframe", "int32", 1],
                     ["listofstimuli", "uint8", 256],
                     ["nvideoframesperdataframe", "int32", 1],
                     ["ntrials", "int32", 1], ["scalefactor", "int32", 1],
                     ["meanampgain", "float32",
                      1], ["meanampdc", "float32", 1],
                     ["vdaqreserved", "uint8", 256]]
                for x in i:
                    dic = read(name=x[0],
                               type=x[1],
                               nb=x[2],
                               dictionary=dic,
                               file=file)
            i = [["user", "uint8", 256], ["comment", "uint8", 256],
                 ["refscalefactor", "int32", 1]]
            for x in i:
                dic = read(name=x[0],
                           type=x[1],
                           nb=x[2],
                           dictionary=dic,
                           file=file)
            dic["actuallength"] = os.stat(file_name).st_size
            file.close()

            return dic

        # start of the reading process
        nblocks = 1
        print("reading the header")
        header = read_header(self.filename)
        nstim = header['nstimuli']
        ni = header['framewidth']
        nj = header['frameheight']
        nfr = header['nframesperstim']
        lenh = header['lenheader']
        framesize = header['framesize']
        filesize = header['file_size']
        dtype = header['datatype']
        gain = header['meanampgain']
        dc = header['meanampdc']
        scalefactor = header['scalefactor']

        # [["dtype","nbytes","datatype","type_out"],[...]]
        l = [[11, 1, "uchar", "uint8", "B"], [12, 2, "ushort", "uint16", "H"],
             [13, 4, "ulong", "uint32", "I"], [14, 4, "float", "single", "f"]]

        for i in l:
            if dtype == i[0]:
                nbytes, datatype, type_out, struct_type = i[1], i[2], i[3], i[
                    4]

        if framesize != ni * nj * nbytes:
            print(
                "BAD HEADER!!! framesize does not match framewidth*frameheight*nbytes!"
            )
            framesize = ni * nj * nbytes
        if (filesize - lenh) > (framesize * nfr * nstim):
            nfr2 = nfr + 1
            includesrefframe = True
        else:
            nfr2 = nfr
            includesrefframe = False

        nbin = nblocks
        conds = [i for i in range(1, nstim + 1)]
        ncond = len(conds)
        data = [[[np.zeros((ni, nj, nfr), type_out)] for x in range(ncond)]
                for i in range(nbin)]
        for k in range(1, nbin + 1):
            print("reading block")
            bin = np.arange(math.floor((k - 1 / nbin * nblocks) + 1),
                            math.floor((k / nbin * nblocks) + 1))
            sbin = bin.size
            for j in range(1, sbin + 1):
                file = open(self.filename, 'rb')
                for i in range(1, ncond + 1):

                    framestart = conds[i - 1] * nfr2 - nfr
                    offset = framestart * ni * nj * nbytes + lenh
                    file.seek(offset, 0)

                    a = [(struct.unpack(struct_type, file.read(nbytes)))[0]
                         for m in range(ni * nj * nfr)]
                    a = np.reshape(np.array(a, dtype=type_out, order='F'),
                                   (ni * nj, nfr),
                                   order='F')
                    a = np.reshape(a, (ni, nj, nfr), order='F')

                    if includesrefframe:
                        # not tested
                        framestart = (conds[i] - 1) * nfr2
                        offset = framestart * ni * nj * nbytes + lenh

                        file.seek(offset)

                        ref = [(struct.unpack(struct_type,
                                              file.read(nbytes)))[0]
                               for m in range(ni * nj)]
                        ref = np.array(ref, dtype=type_out)
                        for y in range(len(ref)):
                            ref[y] *= scalefactor
                        ref = np.reshape(ref, (ni, nj))
                        b = np.tile(ref, [1, 1, nfr])
                        for y in range(len(a)):
                            b.append([])
                            for x in range(len(a[y])):
                                b[y + 1].append([])
                                for frame in range(len(a[y][x])):
                                    b[y + 1][x][frame] = (a[y][x][frame] / gain) - \
                                        (scalefactor * dc / gain)
                        a = b
                    if sbin == 1:
                        data[k - 1][i - 1] = a
                    else:
                        # not tested
                        for y in range(len(a)):
                            for x in range(len(a[y])):
                                a[y][x] /= sbin
                        data[k - 1][i - 1] = data[k - 1][i - 1] + a / sbin

                file.close()

        # data format [block][stim][width][height][frame]]
        # data structure should be [block][stim][frame][width][height] in order to be easy to use with neo
        # each file is a block
        # each stim could be a segment
        # then an image sequence [frame][width][height]
        # image need to be rotated

        # changing order of data for compatibility
        # [block][stim][width][height][frame]]
        # to
        # [block][stim][frame][width][height]

        for block in range(len(data)):
            for stim in range(len(data[block])):
                a = []
                for frame in range(header['nframesperstim']):
                    a.append([])
                    for width in range(len(data[block][stim])):
                        a[frame].append([])
                        for height in range(len(data[block][stim][width])):
                            a[frame][width].append(
                                data[block][stim][width][height][frame])
                    # rotation of data to be the same as thomas deneux screenshot
                    a[frame] = np.rot90(np.fliplr(a[frame]))
                data[block][stim] = a

        block = Block(file_origin=self.filename)
        for stim in range(len(data[0])):
            image_sequence = ImageSequence(data[0][stim],
                                           units=self.units,
                                           sampling_rate=self.sampling_rate,
                                           spatial_scale=self.spatial_scale)
            segment = Segment(file_origin=self.filename,
                              description=("stim nb:" + str(stim)))
            segment.imagesequences = [image_sequence]
            segment.block = block
            for key in header:
                block.annotations[key] = header[key]
            block.segments.append(segment)

        print("returning block")

        return block
示例#12
0
    def test_roundtrip(self):

        annotations = {"session_start_time": datetime.now()}
        # Define Neo blocks
        bl0 = Block(name='First block', **annotations)
        bl1 = Block(name='Second block', **annotations)
        bl2 = Block(name='Third block', **annotations)
        original_blocks = [bl0, bl1, bl2]

        num_seg = 4  # number of segments
        num_chan = 3  # number of channels

        for blk in original_blocks:

            for ind in range(num_seg):  # number of Segments
                seg = Segment(index=ind)
                seg.block = blk
                blk.segments.append(seg)

            for seg in blk.segments:  # AnalogSignal objects

                # 3 Neo AnalogSignals
                a = AnalogSignal(np.random.randn(44, num_chan) * pq.nA,
                                 sampling_rate=10 * pq.kHz,
                                 t_start=50 * pq.ms)
                b = AnalogSignal(np.random.randn(64, num_chan) * pq.mV,
                                 sampling_rate=8 * pq.kHz,
                                 t_start=40 * pq.ms)
                c = AnalogSignal(np.random.randn(33, num_chan) * pq.uA,
                                 sampling_rate=10 * pq.kHz,
                                 t_start=120 * pq.ms)

                # 2 Neo IrregularlySampledSignals
                d = IrregularlySampledSignal(
                    np.arange(7.0) * pq.ms,
                    np.random.randn(7, num_chan) * pq.mV)

                # 2 Neo SpikeTrains
                train = SpikeTrain(times=[1, 2, 3] * pq.s,
                                   t_start=1.0,
                                   t_stop=10.0)
                train2 = SpikeTrain(times=[4, 5, 6] * pq.s, t_stop=10.0)
                # todo: add waveforms

                # 1 Neo Event
                evt = Event(times=np.arange(0, 30, 10) * pq.ms,
                            labels=np.array(['ev0', 'ev1', 'ev2']))

                # 2 Neo Epochs
                epc = Epoch(times=np.arange(0, 30, 10) * pq.s,
                            durations=[10, 5, 7] * pq.ms,
                            labels=np.array(['btn0', 'btn1', 'btn2']))

                epc2 = Epoch(times=np.arange(10, 40, 10) * pq.s,
                             durations=[9, 3, 8] * pq.ms,
                             labels=np.array(['btn3', 'btn4', 'btn5']))

                seg.spiketrains.append(train)
                seg.spiketrains.append(train2)

                seg.epochs.append(epc)
                seg.epochs.append(epc2)

                seg.analogsignals.append(a)
                seg.analogsignals.append(b)
                seg.analogsignals.append(c)
                seg.irregularlysampledsignals.append(d)
                seg.events.append(evt)
                a.segment = seg
                b.segment = seg
                c.segment = seg
                d.segment = seg
                evt.segment = seg
                train.segment = seg
                train2.segment = seg
                epc.segment = seg
                epc2.segment = seg

        # write to file
        test_file_name = "test_round_trip.nwb"
        iow = NWBIO(filename=test_file_name, mode='w')
        iow.write_all_blocks(original_blocks)

        ior = NWBIO(filename=test_file_name, mode='r')
        retrieved_blocks = ior.read_all_blocks()

        self.assertEqual(len(retrieved_blocks), 3)
        self.assertEqual(len(retrieved_blocks[2].segments), num_seg)

        original_signal_22b = original_blocks[2].segments[2].analogsignals[1]
        retrieved_signal_22b = retrieved_blocks[2].segments[2].analogsignals[1]
        for attr_name in ("name", "units", "sampling_rate", "t_start"):
            retrieved_attribute = getattr(retrieved_signal_22b, attr_name)
            original_attribute = getattr(original_signal_22b, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(retrieved_signal_22b.magnitude,
                           original_signal_22b.magnitude)

        original_issignal_22d = original_blocks[2].segments[
            2].irregularlysampledsignals[0]
        retrieved_issignal_22d = retrieved_blocks[2].segments[
            2].irregularlysampledsignals[0]
        for attr_name in ("name", "units", "t_start"):
            retrieved_attribute = getattr(retrieved_issignal_22d, attr_name)
            original_attribute = getattr(original_issignal_22d, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_issignal_22d.times.rescale('ms').magnitude,
            original_issignal_22d.times.rescale('ms').magnitude)
        assert_array_equal(retrieved_issignal_22d.magnitude,
                           original_issignal_22d.magnitude)

        original_event_11 = original_blocks[1].segments[1].events[0]
        retrieved_event_11 = retrieved_blocks[1].segments[1].events[0]
        for attr_name in ("name", ):
            retrieved_attribute = getattr(retrieved_event_11, attr_name)
            original_attribute = getattr(original_event_11, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_event_11.rescale('ms').magnitude,
            original_event_11.rescale('ms').magnitude)
        assert_array_equal(retrieved_event_11.labels, original_event_11.labels)

        original_spiketrain_131 = original_blocks[1].segments[1].spiketrains[1]
        retrieved_spiketrain_131 = retrieved_blocks[1].segments[1].spiketrains[
            1]
        for attr_name in ("name", "t_start", "t_stop"):
            retrieved_attribute = getattr(retrieved_spiketrain_131, attr_name)
            original_attribute = getattr(original_spiketrain_131, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_spiketrain_131.times.rescale('ms').magnitude,
            original_spiketrain_131.times.rescale('ms').magnitude)

        original_epoch_11 = original_blocks[1].segments[1].epochs[0]
        retrieved_epoch_11 = retrieved_blocks[1].segments[1].epochs[0]
        for attr_name in ("name", ):
            retrieved_attribute = getattr(retrieved_epoch_11, attr_name)
            original_attribute = getattr(original_epoch_11, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(
            retrieved_epoch_11.rescale('ms').magnitude,
            original_epoch_11.rescale('ms').magnitude)
        assert_allclose(
            retrieved_epoch_11.durations.rescale('ms').magnitude,
            original_epoch_11.durations.rescale('ms').magnitude)
        assert_array_equal(retrieved_epoch_11.labels, original_epoch_11.labels)
        os.remove(test_file_name)
示例#13
0
    def test_roundtrip_with_annotations(self):
        # test with NWB-specific annotations

        original_block = Block(name="experiment",
                               session_start_time=datetime.now())
        segment = Segment(name="session 1")
        original_block.segments.append(segment)
        segment.block = original_block

        electrode_annotations = {
            "name": "electrode #1",
            "description": "intracellular electrode",
            "device": {
                "name": "electrode #1"
            }
        }
        stimulus_annotations = {
            "nwb_group": "stimulus",
            "nwb_neurodata_type":
            ("pynwb.icephys", "CurrentClampStimulusSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0
        }
        response_annotations = {
            "nwb_group": "acquisition",
            "nwb_neurodata_type": ("pynwb.icephys", "CurrentClampSeries"),
            "nwb_electrode": electrode_annotations,
            "nwb:sweep_number": 1,
            "nwb:gain": 1.0,
            "nwb:bias_current": 1e-12,
            "nwb:bridge_balance": 70e6,
            "nwb:capacitance_compensation": 1e-12
        }
        stimulus = AnalogSignal(np.random.randn(100, 1) * pq.nA,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="stimulus",
                                **stimulus_annotations)
        response = AnalogSignal(np.random.randn(100, 1) * pq.mV,
                                sampling_rate=5 * pq.kHz,
                                t_start=50 * pq.ms,
                                name="response",
                                **response_annotations)
        segment.analogsignals = [stimulus, response]
        stimulus.segment = response.segment = segment

        test_file_name = "test_round_trip_with_annotations.nwb"
        iow = NWBIO(filename=test_file_name, mode='w')
        iow.write_all_blocks([original_block])

        nwbfile = pynwb.NWBHDF5IO(test_file_name, mode="r").read()

        self.assertIsInstance(nwbfile.acquisition["response"],
                              pynwb.icephys.CurrentClampSeries)
        self.assertIsInstance(nwbfile.stimulus["stimulus"],
                              pynwb.icephys.CurrentClampStimulusSeries)
        self.assertEqual(nwbfile.acquisition["response"].bridge_balance,
                         response_annotations["nwb:bridge_balance"])

        ior = NWBIO(filename=test_file_name, mode='r')
        retrieved_block = ior.read_all_blocks()[0]

        original_response = original_block.segments[0].filter(
            name="response")[0]
        retrieved_response = retrieved_block.segments[0].filter(
            name="response")[0]
        for attr_name in ("name", "units", "sampling_rate", "t_start"):
            retrieved_attribute = getattr(retrieved_response, attr_name)
            original_attribute = getattr(original_response, attr_name)
            self.assertEqual(retrieved_attribute, original_attribute)
        assert_array_equal(retrieved_response.magnitude,
                           original_response.magnitude)

        os.remove(test_file_name)
    def read_block(self,
                   lazy=False,
                   cascade=True,
                   signal_names=None,
                   signal_units=None):
        block = Block(file_origin=self.filename)
        segment = Segment(name="default")
        block.segments.append(segment)
        segment.block = block

        spike_times = defaultdict(list)
        spike_file = self.filename + ".dat"
        print("SPIKEFILE: {}".format(spike_file))
        if os.path.exists(spike_file):
            print("Loading data from {}".format(spike_file))
            with open(spike_file, 'r') as fp:
                for line in fp:
                    if line[0] != '#':
                        entries = line.strip().split()
                        if len(entries) > 1:
                            time = float(entries[0])
                            for id in entries[1:]:
                                spike_times[id].append(time)
                t_stop = float(entries[0])
            min_id = min(map(int, spike_times))
            segment.spiketrains = [
                SpikeTrain(times,
                           t_stop=t_stop,
                           units="ms",
                           id=int(id),
                           source_index=int(id) - min_id)
                for id, times in spike_times.items()
            ]
        signal_files = glob("{}_state.*.dat".format(self.filename))
        print(signal_files)
        for signal_file in signal_files:
            print("Loading data from {}".format(signal_file))
            population = os.path.basename(signal_file).split(".")[1]
            try:
                data = np.loadtxt(signal_file, delimiter=", ")
            except ValueError:
                print("Couldn't load data from file {}".format(signal_file))
                continue
            t_start = data[0, 1]
            ids = data[:, 0]
            unique_ids = np.unique(ids)
            for column in range(2, data.shape[1]):
                if signal_names is None:
                    signal_name = "signal{}".format(column - 2)
                else:
                    signal_name = signal_names[column - 2]
                if signal_units is None:
                    units = "mV"  # seems like a reasonable default
                else:
                    units = signal_units[column - 2]
                signals_by_id = {}
                for id in unique_ids:
                    times = data[ids == id, 1]
                    unique_times, idx = np.unique(
                        times, return_index=True
                    )  # some time points are represented twice
                    signals_by_id[id] = data[ids == id, column][idx]
                    assert signals_by_id[id].shape == signals_by_id[
                        unique_ids[0]].shape
                channel_ids = np.array(list(signals_by_id.keys()))
                sampling_period = unique_times[1] - unique_times[0]
                assert sampling_period != 0.0, sampling_period
                signal = AnalogSignal(np.vstack(signals_by_id.values()).T,
                                      units=units,
                                      t_start=t_start * pq.ms,
                                      sampling_period=sampling_period * pq.ms,
                                      name=signal_name,
                                      population=population)
                #signal.channel_index = ChannelIndex(np.arange(signal.shape[1], int),
                #                                    channel_ids=channel_ids)
                signal.channel_index = channel_ids
                segment.analogsignals.append(signal)

        return block
示例#15
0
    def read_block(self, lazy=False, cascade=True, signal_names=None, signal_units=None):
        block = Block(file_origin=self.filename)
        segment = Segment(name="default")
        block.segments.append(segment)
        segment.block = block

        spike_times = defaultdict(list)
        spike_file = self.filename + ".dat"
        print("SPIKEFILE: {}".format(spike_file))
        if os.path.exists(spike_file):
            print("Loading data from {}".format(spike_file))
            with open(spike_file, 'r') as fp:
                for line in fp:
                    if line[0] != '#':
                        entries = line.strip().split()
                        if len(entries) > 1:
                            time = float(entries[0])
                            for id in entries[1:]:
                                spike_times[id].append(time)
                t_stop = float(entries[0])
            if spike_times:
                min_id = min(map(int, spike_times))
            segment.spiketrains = [SpikeTrain(times, t_stop=t_stop, units="ms",
                                              id=int(id), source_index=int(id) - min_id)
                                   for id, times in spike_times.items()]
        signal_files = glob("{}_state.*.dat".format(self.filename))
        print(signal_files)
        for signal_file in signal_files:
            print("Loading data from {}".format(signal_file))
            population = os.path.basename(signal_file).split(".")[1]
            try:
                data = np.loadtxt(signal_file, delimiter=", ")
            except ValueError:
                print("Couldn't load data from file {}".format(signal_file))
                continue
            t_start = data[0, 1]
            ids = data[:, 0]
            unique_ids = np.unique(ids)
            for column in range(2, data.shape[1]):
                if signal_names is None:
                    signal_name = "signal{}".format(column - 2)
                else:
                    signal_name = signal_names[column - 2]
                if signal_units is None:
                    units = "mV"  # seems like a reasonable default
                else:
                    units = signal_units[column - 2]
                signals_by_id = {}
                for id in unique_ids:
                    times = data[ids==id, 1]
                    unique_times, idx = np.unique(times, return_index=True)  # some time points are represented twice
                    signals_by_id[id] = data[ids==id, column][idx]
                channel_ids = np.array(list(signals_by_id.keys()))
                if len(unique_times) > 1:
                    sampling_period = unique_times[1] - unique_times[0]
                    assert sampling_period != 0.0, sampling_period
                    signal_lengths = np.array([s.size for s in signals_by_id.values()])
                    min_length = signal_lengths.min()
                    if not (signal_lengths == signal_lengths[0]).all():
                        print("Warning: signals have different sizes: min={}, max={}".format(min_length,
                                                                                             signal_lengths.max()))
                        print("Truncating to length {}".format(min_length))
                    signal = AnalogSignal(np.vstack([s[:min_length] for s in signals_by_id.values()]).T,
                                          units=units,
                                          t_start=t_start * pq.ms,
                                          sampling_period=sampling_period*pq.ms,
                                          name=signal_name,
                                          population=population)
                    #signal.channel_index = ChannelIndex(np.arange(signal.shape[1], int),
                    #                                    channel_ids=channel_ids)
                    signal.channel_index = channel_ids
                    segment.analogsignals.append(signal)

        return block