def test_block_verify(self):
        # check that file verifies against itself for single block
        filename = self.get_filename_path(
            'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr0 = NlxHeader(filename)
        nb0 = NcsSectionsFactory.build_for_ncs_file(data0, hdr0)

        self.assertTrue(NcsSectionsFactory._verifySectionsStructure(
            data0, nb0))

        # check that fails against file with two blocks
        filename = self.get_filename_path(
            'BML_unfilledsplit/original_data/unfilledSplitRecords.Ncs')
        data1 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr1 = NlxHeader(filename)
        nb1 = NcsSectionsFactory.build_for_ncs_file(data1, hdr1)

        self.assertFalse(
            NcsSectionsFactory._verifySectionsStructure(data1, nb0))

        # check that two blocks verify against self
        self.assertTrue(NcsSectionsFactory._verifySectionsStructure(
            data1, nb1))
    def test_recording_types(self):

        for typeTest in self.ncsTypeTestFiles:

            filename = self.get_filename_path(typeTest[0])
            hdr = NlxHeader(filename)
            self.assertEqual(hdr.type_of_recording(), typeTest[1])
    def test_build_using_header_and_scanning(self):

        # Test early files where the frequency listed in the header is
        # floor(1e6/(actual number of microseconds between samples)
        filename = self.get_filename_path(
            'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        hdr = NlxHeader(filename)
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)

        self.assertEqual(nb.sampFreqUsed, 1 / 35e-6)
        self.assertEqual(nb.microsPerSampUsed, 35)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startRec, 0)
        self.assertEqual(nb.sects[0].endRec, 9)

        # test Cheetah 5.5.1, which is DigitalLynxSX and has two blocks of records
        # with a fairly large gap
        filename = self.get_filename_path(
            'Cheetah_v5.5.1/original_data/Tet3a.ncs')
        hdr = NlxHeader(filename)
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(nb.sampFreqUsed, 32000)
        self.assertEqual(nb.microsPerSampUsed, 31.25)
        self.assertEqual(len(nb.sects), 2)
        self.assertListEqual([blk.startRec for blk in nb.sects], [0, 2498])
        self.assertListEqual([blk.endRec for blk in nb.sects], [2497, 3331])
Exemplo n.º 4
0
    def test_block_start_and_end_times(self):
        # digitallynxsx version to exercise the _parseForMaxGap function with multiple blocks
        filename = self.get_local_path(
            'neuralynx/Cheetah_v6.3.2/incomplete_blocks/CSC1_reduced.ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertListEqual([blk.startTime for blk in nb.sects],
                             [8408806811, 8427832053, 8487768561])
        self.assertListEqual([blk.endTime for blk in nb.sects],
                             [8427831990, 8487768498, 8515816549])

        # digitallynxsx with single block of records to exercise path in _buildForMaxGap
        filename = self.get_local_path(
            'neuralynx/Cheetah_v1.1.0/original_data/CSC67_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startTime, 253293161778)
        self.assertEqual(nb.sects[0].endTime, 253293349278)

        # PRE4 version with single block of records to exercise path in _buildGivenActualFrequency
        filename = self.get_local_path(
            'neuralynx/Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startTime, 266982936)
        self.assertEqual(nb.sects[0].endTime, 267162136)

        # BML style with two blocks of records and one partially filled record to exercise
        # _parseGivenActualFrequency
        filename = self.get_local_path(
            'neuralynx/BML_unfilledsplit/original_data/unfilledSplitRecords.Ncs'
        )
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 2)
        self.assertListEqual([blk.startTime for blk in nb.sects],
                             [1837623129, 6132625241])
        self.assertListEqual([blk.endTime for blk in nb.sects],
                             [1837651009, 6132642649])
    def test_ncsblocks_partial(self):
        filename = self.get_filename_path(
            'Cheetah_v6.3.2/incomplete_blocks/CSC1_reduced.ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        self.assertEqual(data0.shape[0], 6690)
        self.assertEqual(data0['timestamp'][6689],
                         8515800549)  # timestamp of last record

        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(nb.sampFreqUsed, 32000.012813673042)
        self.assertEqual(nb.microsPerSampUsed, 31.249987486652431)
        self.assertListEqual([blk.startRec for blk in nb.sects],
                             [0, 1190, 4937])
        self.assertListEqual([blk.endRec for blk in nb.sects],
                             [1189, 4936, 6689])
Exemplo n.º 6
0
    def _get_file_map(self, filename):
        """
        Create memory maps when needed
        see also https://github.com/numpy/numpy/issues/19340
        """
        filename = pathlib.Path(filename)
        suffix = filename.suffix.lower()[1:]

        if suffix == 'ncs':
            return np.memmap(filename,
                             dtype=self._ncs_dtype,
                             mode='r',
                             offset=NlxHeader.HEADER_SIZE)

        elif suffix in ['nse', 'ntt']:
            info = NlxHeader(filename)
            dtype = get_nse_or_ntt_dtype(info, suffix)

            # return empty map if file does not contain data
            if os.path.getsize(filename) <= NlxHeader.HEADER_SIZE:
                self._empty_nse_ntt.append(filename)
                return np.zeros((0, ), dtype=dtype)

            return np.memmap(filename,
                             dtype=dtype,
                             mode='r',
                             offset=NlxHeader.HEADER_SIZE)

        elif suffix == 'nev':
            return np.memmap(filename,
                             dtype=nev_dtype,
                             mode='r',
                             offset=NlxHeader.HEADER_SIZE)

        else:
            raise ValueError(f'Unknown file suffix {suffix}')
Exemplo n.º 7
0
    def _parse_header(self):

        sig_channels = []
        unit_channels = []
        event_channels = []

        self.ncs_filenames = OrderedDict()  # (chan_name, chan_id): filename
        self.nse_ntt_filenames = OrderedDict(
        )  # (chan_name, chan_id): filename
        self.nev_filenames = OrderedDict()  # chan_id: filename

        self._nev_memmap = {}
        self._spike_memmap = {}
        self.internal_unit_ids = [
        ]  # channel_index > ((channel_name, channel_id), unit_id)
        self.internal_event_ids = []
        self._empty_ncs = []  # this list contains filenames of empty files
        self._empty_nev = []
        self._empty_nse_ntt = []

        # Explore the directory looking for ncs, nev, nse and ntt
        # and construct channels headers.
        signal_annotations = []
        unit_annotations = []
        event_annotations = []

        for filename in sorted(os.listdir(self.dirname)):
            filename = os.path.join(self.dirname, filename)

            _, ext = os.path.splitext(filename)
            ext = ext[1:]  # remove dot
            ext = ext.lower()  # make lower case for comparisons
            if ext not in self.extensions:
                continue

            # Skip Ncs files with only header. Other empty file types
            # will have an empty dataset constructed later.
            if (os.path.getsize(filename) <=
                    NlxHeader.HEADER_SIZE) and ext in ['ncs']:
                self._empty_ncs.append(filename)
                continue

            # All file have more or less the same header structure
            info = NlxHeader(filename)
            chan_names = info['channel_names']
            chan_ids = info['channel_ids']

            for idx, chan_id in enumerate(chan_ids):
                chan_name = chan_names[idx]

                chan_uid = (chan_name, chan_id)
                if ext == 'ncs':
                    # a sampled signal channel
                    units = 'uV'
                    gain = info['bit_to_microVolt'][idx]
                    if info.get('input_inverted', False):
                        gain *= -1
                    offset = 0.
                    group_id = 0
                    sig_channels.append(
                        (chan_name, chan_id, info['sampling_rate'], 'int16',
                         units, gain, offset, group_id))
                    self.ncs_filenames[chan_uid] = filename
                    keys = [
                        'DspFilterDelay_µs',
                        'recording_opened',
                        'FileType',
                        'DspDelayCompensation',
                        'recording_closed',
                        'DspLowCutFilterType',
                        'HardwareSubSystemName',
                        'DspLowCutNumTaps',
                        'DSPLowCutFilterEnabled',
                        'HardwareSubSystemType',
                        'DspHighCutNumTaps',
                        'ADMaxValue',
                        'DspLowCutFrequency',
                        'DSPHighCutFilterEnabled',
                        'RecordSize',
                        'InputRange',
                        'DspHighCutFrequency',
                        'input_inverted',
                        'NumADChannels',
                        'DspHighCutFilterType',
                    ]
                    d = {k: info[k] for k in keys if k in info}
                    signal_annotations.append(d)

                elif ext in ('nse', 'ntt'):
                    # nse and ntt are pretty similar except for the waveform shape.
                    # A file can contain several unit_id (so several unit channel).
                    assert chan_id not in self.nse_ntt_filenames, \
                        'Several nse or ntt files have the same unit_id!!!'
                    self.nse_ntt_filenames[chan_uid] = filename

                    dtype = get_nse_or_ntt_dtype(info, ext)

                    if os.path.getsize(filename) <= NlxHeader.HEADER_SIZE:
                        self._empty_nse_ntt.append(filename)
                        data = np.zeros((0, ), dtype=dtype)
                    else:
                        data = np.memmap(filename,
                                         dtype=dtype,
                                         mode='r',
                                         offset=NlxHeader.HEADER_SIZE)

                    self._spike_memmap[chan_uid] = data

                    unit_ids = np.unique(data['unit_id'])
                    for unit_id in unit_ids:
                        # a spike channel for each (chan_id, unit_id)
                        self.internal_unit_ids.append((chan_uid, unit_id))

                        unit_name = "ch{}#{}#{}".format(
                            chan_name, chan_id, unit_id)
                        unit_id = '{}'.format(unit_id)
                        wf_units = 'uV'
                        wf_gain = info['bit_to_microVolt'][idx]
                        if info.get('input_inverted', False):
                            wf_gain *= -1
                        wf_offset = 0.
                        wf_left_sweep = -1  # NOT KNOWN
                        wf_sampling_rate = info['sampling_rate']
                        unit_channels.append((unit_name, '{}'.format(unit_id),
                                              wf_units, wf_gain, wf_offset,
                                              wf_left_sweep, wf_sampling_rate))
                        unit_annotations.append(dict(file_origin=filename))

                elif ext == 'nev':
                    # an event channel
                    # each ('event_id',  'ttl_input') give a new event channel
                    self.nev_filenames[chan_id] = filename

                    if os.path.getsize(filename) <= NlxHeader.HEADER_SIZE:
                        self._empty_nev.append(filename)
                        data = np.zeros((0, ), dtype=nev_dtype)
                        internal_ids = []
                    else:
                        data = np.memmap(filename,
                                         dtype=nev_dtype,
                                         mode='r',
                                         offset=NlxHeader.HEADER_SIZE)
                        internal_ids = np.unique(
                            data[['event_id', 'ttl_input']]).tolist()
                    for internal_event_id in internal_ids:
                        if internal_event_id not in self.internal_event_ids:
                            event_id, ttl_input = internal_event_id
                            name = '{} event_id={} ttl={}'.format(
                                chan_name, event_id, ttl_input)
                            event_channels.append((name, chan_id, 'event'))
                            self.internal_event_ids.append(internal_event_id)

                    self._nev_memmap[chan_id] = data

        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
        event_channels = np.array(event_channels, dtype=_event_channel_dtype)

        # require all sampled signals, ncs files, to have same sampling rate
        if sig_channels.size > 0:
            sampling_rate = np.unique(sig_channels['sampling_rate'])
            assert sampling_rate.size == 1
            self._sigs_sampling_rate = sampling_rate[0]

        # set 2 attributes needed later for header in case there are no ncs files in dataset,
        #   e.g. Pegasus
        self._timestamp_limits = None
        self._nb_segment = 1

        # Read ncs files for gap detection and nb_segment computation.
        self._sigs_memmaps, ncsSegTimestampLimits = self.scan_ncs_files(
            self.ncs_filenames)
        if ncsSegTimestampLimits:
            self._ncs_seg_timestamp_limits = ncsSegTimestampLimits  # save copy
            self._nb_segment = ncsSegTimestampLimits.nb_segment
            self._sigs_length = ncsSegTimestampLimits.length.copy()
            self._timestamp_limits = ncsSegTimestampLimits.timestamp_limits.copy(
            )
            self._sigs_t_start = ncsSegTimestampLimits.t_start.copy()
            self._sigs_t_stop = ncsSegTimestampLimits.t_stop.copy()

        # Determine timestamp limits in nev, nse file by scanning them.
        ts0, ts1 = None, None
        for _data_memmap in (self._spike_memmap, self._nev_memmap):
            for _, data in _data_memmap.items():
                ts = data['timestamp']
                if ts.size == 0:
                    continue
                if ts0 is None:
                    ts0 = ts[0]
                    ts1 = ts[-1]
                ts0 = min(ts0, ts[0])
                ts1 = max(ts1, ts[-1])

        # decide on segment and global start and stop times based on files available
        if self._timestamp_limits is None:
            # case  NO ncs but HAVE nev or nse
            self._timestamp_limits = [(ts0, ts1)]
            self._seg_t_starts = [ts0 / 1e6]
            self._seg_t_stops = [ts1 / 1e6]
            self.global_t_start = ts0 / 1e6
            self.global_t_stop = ts1 / 1e6
        elif ts0 is not None:
            # case  HAVE ncs AND HAVE nev or nse
            self.global_t_start = min(ts0 / 1e6, self._sigs_t_start[0])
            self.global_t_stop = max(ts1 / 1e6, self._sigs_t_stop[-1])
            self._seg_t_starts = list(self._sigs_t_start)
            self._seg_t_starts[0] = self.global_t_start
            self._seg_t_stops = list(self._sigs_t_stop)
            self._seg_t_stops[-1] = self.global_t_stop
        else:
            # case HAVE ncs but  NO nev or nse
            self._seg_t_starts = self._sigs_t_start
            self._seg_t_stops = self._sigs_t_stop
            self.global_t_start = self._sigs_t_start[0]
            self.global_t_stop = self._sigs_t_stop[-1]

        if self.keep_original_times:
            self.global_t_stop = self.global_t_stop - self.global_t_start
            self.global_t_start = 0

        # fill header dictionary
        self.header = {}
        self.header['nb_block'] = 1
        self.header['nb_segment'] = [self._nb_segment]
        self.header['signal_channels'] = sig_channels
        self.header['unit_channels'] = unit_channels
        self.header['event_channels'] = event_channels

        # Annotations
        self._generate_minimal_annotations()
        bl_annotations = self.raw_annotations['blocks'][0]

        for seg_index in range(self._nb_segment):
            seg_annotations = bl_annotations['segments'][seg_index]

            for c in range(sig_channels.size):
                sig_ann = seg_annotations['signals'][c]
                sig_ann.update(signal_annotations[c])

            for c in range(unit_channels.size):
                unit_ann = seg_annotations['units'][c]
                unit_ann.update(unit_annotations[c])

            for c in range(event_channels.size):
                # annotations for channel events
                event_id, ttl_input = self.internal_event_ids[c]
                chan_id = event_channels[c]['id']

                ev_ann = seg_annotations['events'][c]
                ev_ann['file_origin'] = self.nev_filenames[chan_id]
Exemplo n.º 8
0
    def scan_ncs_files(self, ncs_filenames):
        """
        Given a list of ncs files, read their basic structure.

        PARAMETERS:
        ------
        ncs_filenames - list of ncs filenames to scan.

        RETURNS:
        ------
        memmaps
            [ {} for seg_index in range(self._nb_segment) ][chan_uid]
        seg_time_limits
            SegmentTimeLimits for sections in scanned Ncs files

        Files will be scanned to determine the sections of records. If file is a single
        section of records, this scan is brief, otherwise it will check each record which may
        take some time.
        """

        # :TODO: Needs to account for gaps and start and end times potentially
        #    being different in different groups of channels. These groups typically
        #    correspond to the channels collected by a single ADC card.
        if len(ncs_filenames) == 0:
            return None, None

        # Build dictionary of chan_uid to associated NcsSections, memmap and NlxHeaders. Only
        # construct new NcsSections when it is different from that for the preceding file.
        chanSectMap = dict()
        for chan_uid, ncs_filename in self.ncs_filenames.items():

            data = np.memmap(ncs_filename,
                             dtype=self._ncs_dtype,
                             mode='r',
                             offset=NlxHeader.HEADER_SIZE)
            nlxHeader = NlxHeader(ncs_filename)

            if not chanSectMap or (
                    chanSectMap
                    and not NcsSectionsFactory._verifySectionsStructure(
                        data, lastNcsSections)):
                lastNcsSections = NcsSectionsFactory.build_for_ncs_file(
                    data, nlxHeader)

            chanSectMap[chan_uid] = [lastNcsSections, nlxHeader, data]

        # Construct an inverse dictionary from NcsSections to list of associated chan_uids
        revSectMap = dict()
        for k, v in chanSectMap.items():
            revSectMap.setdefault(v[0], []).append(k)

        # If there is only one NcsSections structure in the set of ncs files, there should only
        # be one entry. Otherwise this is presently unsupported.
        if len(revSectMap) > 1:
            raise IOError(
                'ncs files have {} different sections structures. Unsupported.'
                .format(len(revSectMap)))

        seg_time_limits = SegmentTimeLimits(nb_segment=len(
            lastNcsSections.sects),
                                            t_start=[],
                                            t_stop=[],
                                            length=[],
                                            timestamp_limits=[])
        memmaps = [{} for seg_index in range(seg_time_limits.nb_segment)]

        # create segment with subdata block/t_start/t_stop/length for each channel
        for i, fileEntry in enumerate(self.ncs_filenames.items()):
            chan_uid = fileEntry[0]
            data = chanSectMap[chan_uid][2]

            # create a memmap for each record section of the current file
            curSects = chanSectMap[chan_uid][0]
            for seg_index in range(len(curSects.sects)):

                curSect = curSects.sects[seg_index]
                subdata = data[curSect.startRec:(curSect.endRec + 1)]
                memmaps[seg_index][chan_uid] = subdata

                # create segment timestamp limits based on only NcsSections structure in use
                if i == 0:
                    numSampsLastSect = subdata[-1]['nb_valid']
                    ts0 = subdata[0]['timestamp']
                    ts1 = NcsSectionsFactory.calc_sample_time(
                        curSects.sampFreqUsed, subdata[-1]['timestamp'],
                        numSampsLastSect)
                    seg_time_limits.timestamp_limits.append((ts0, ts1))
                    t_start = ts0 / 1e6
                    seg_time_limits.t_start.append(t_start)
                    t_stop = ts1 / 1e6
                    seg_time_limits.t_stop.append(t_stop)
                    # :NOTE: This should really be the total of nb_valid in records, but this
                    #  allows the last record of a section to be shorter, the most common case.
                    #  Have never seen a section of records with not full records before the last.
                    length = (subdata.size -
                              1) * NcsSection._RECORD_SIZE + numSampsLastSect
                    seg_time_limits.length.append(length)

        return memmaps, seg_time_limits