def test_block_verify(self):
        # check that file verifies against itself for single block
        filename = self.get_filename_path(
            'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr0 = NlxHeader(filename)
        nb0 = NcsSectionsFactory.build_for_ncs_file(data0, hdr0)

        self.assertTrue(NcsSectionsFactory._verifySectionsStructure(
            data0, nb0))

        # check that fails against file with two blocks
        filename = self.get_filename_path(
            'BML_unfilledsplit/original_data/unfilledSplitRecords.Ncs')
        data1 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr1 = NlxHeader(filename)
        nb1 = NcsSectionsFactory.build_for_ncs_file(data1, hdr1)

        self.assertFalse(
            NcsSectionsFactory._verifySectionsStructure(data1, nb0))

        # check that two blocks verify against self
        self.assertTrue(NcsSectionsFactory._verifySectionsStructure(
            data1, nb1))
    def test_build_using_header_and_scanning(self):

        # Test early files where the frequency listed in the header is
        # floor(1e6/(actual number of microseconds between samples)
        filename = self.get_filename_path(
            'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        hdr = NlxHeader(filename)
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)

        self.assertEqual(nb.sampFreqUsed, 1 / 35e-6)
        self.assertEqual(nb.microsPerSampUsed, 35)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startRec, 0)
        self.assertEqual(nb.sects[0].endRec, 9)

        # test Cheetah 5.5.1, which is DigitalLynxSX and has two blocks of records
        # with a fairly large gap
        filename = self.get_filename_path(
            'Cheetah_v5.5.1/original_data/Tet3a.ncs')
        hdr = NlxHeader(filename)
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(nb.sampFreqUsed, 32000)
        self.assertEqual(nb.microsPerSampUsed, 31.25)
        self.assertEqual(len(nb.sects), 2)
        self.assertListEqual([blk.startRec for blk in nb.sects], [0, 2498])
        self.assertListEqual([blk.endRec for blk in nb.sects], [2497, 3331])
Esempio n. 3
0
    def test_block_start_and_end_times(self):
        # digitallynxsx version to exercise the _parseForMaxGap function with multiple blocks
        filename = self.get_local_path(
            'neuralynx/Cheetah_v6.3.2/incomplete_blocks/CSC1_reduced.ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertListEqual([blk.startTime for blk in nb.sects],
                             [8408806811, 8427832053, 8487768561])
        self.assertListEqual([blk.endTime for blk in nb.sects],
                             [8427831990, 8487768498, 8515816549])

        # digitallynxsx with single block of records to exercise path in _buildForMaxGap
        filename = self.get_local_path(
            'neuralynx/Cheetah_v1.1.0/original_data/CSC67_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startTime, 253293161778)
        self.assertEqual(nb.sects[0].endTime, 253293349278)

        # PRE4 version with single block of records to exercise path in _buildGivenActualFrequency
        filename = self.get_local_path(
            'neuralynx/Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 1)
        self.assertEqual(nb.sects[0].startTime, 266982936)
        self.assertEqual(nb.sects[0].endTime, 267162136)

        # BML style with two blocks of records and one partially filled record to exercise
        # _parseGivenActualFrequency
        filename = self.get_local_path(
            'neuralynx/BML_unfilledsplit/original_data/unfilledSplitRecords.Ncs'
        )
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(len(nb.sects), 2)
        self.assertListEqual([blk.startTime for blk in nb.sects],
                             [1837623129, 6132625241])
        self.assertListEqual([blk.endTime for blk in nb.sects],
                             [1837651009, 6132642649])
    def test_build_given_actual_frequency(self):

        # Test early files where the frequency listed in the header is
        # floor(1e6/(actual number of microseconds between samples)
        filename = self.get_filename_path(
            'Cheetah_v4.0.2/original_data/CSC14_trunc.Ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        ncsBlocks = NcsSections()
        ncsBlocks.sampFreqUsed = 1 / (35e-6)
        ncsBlocks.microsPerSampUsed = 35
        ncsBlocks = NcsSectionsFactory._buildGivenActualFrequency(
            data0, ncsBlocks.sampFreqUsed, 27789)
        self.assertEqual(len(ncsBlocks.sects), 1)
        self.assertEqual(ncsBlocks.sects[0].startRec, 0)
        self.assertEqual(ncsBlocks.sects[0].endRec, 9)
    def test_ncsblocks_partial(self):
        filename = self.get_filename_path(
            'Cheetah_v6.3.2/incomplete_blocks/CSC1_reduced.ncs')
        data0 = np.memmap(filename,
                          dtype=NeuralynxRawIO._ncs_dtype,
                          mode='r',
                          offset=NlxHeader.HEADER_SIZE)
        self.assertEqual(data0.shape[0], 6690)
        self.assertEqual(data0['timestamp'][6689],
                         8515800549)  # timestamp of last record

        hdr = NlxHeader(filename)
        nb = NcsSectionsFactory.build_for_ncs_file(data0, hdr)
        self.assertEqual(nb.sampFreqUsed, 32000.012813673042)
        self.assertEqual(nb.microsPerSampUsed, 31.249987486652431)
        self.assertListEqual([blk.startRec for blk in nb.sects],
                             [0, 1190, 4937])
        self.assertListEqual([blk.endRec for blk in nb.sects],
                             [1189, 4936, 6689])
Esempio n. 6
0
    def scan_ncs_files(self, ncs_filenames):
        """
        Given a list of ncs files, read their basic structure.

        PARAMETERS:
        ------
        ncs_filenames - list of ncs filenames to scan.

        RETURNS:
        ------
        memmaps
            [ {} for seg_index in range(self._nb_segment) ][chan_uid]
        seg_time_limits
            SegmentTimeLimits for sections in scanned Ncs files

        Files will be scanned to determine the sections of records. If file is a single
        section of records, this scan is brief, otherwise it will check each record which may
        take some time.
        """

        # :TODO: Needs to account for gaps and start and end times potentially
        #    being different in different groups of channels. These groups typically
        #    correspond to the channels collected by a single ADC card.
        if len(ncs_filenames) == 0:
            return None, None

        # Build dictionary of chan_uid to associated NcsSections, memmap and NlxHeaders. Only
        # construct new NcsSections when it is different from that for the preceding file.
        chanSectMap = dict()
        for chan_uid, ncs_filename in self.ncs_filenames.items():

            data = np.memmap(ncs_filename,
                             dtype=self._ncs_dtype,
                             mode='r',
                             offset=NlxHeader.HEADER_SIZE)
            nlxHeader = NlxHeader(ncs_filename)

            if not chanSectMap or (
                    chanSectMap
                    and not NcsSectionsFactory._verifySectionsStructure(
                        data, lastNcsSections)):
                lastNcsSections = NcsSectionsFactory.build_for_ncs_file(
                    data, nlxHeader)

            chanSectMap[chan_uid] = [lastNcsSections, nlxHeader, data]

        # Construct an inverse dictionary from NcsSections to list of associated chan_uids
        revSectMap = dict()
        for k, v in chanSectMap.items():
            revSectMap.setdefault(v[0], []).append(k)

        # If there is only one NcsSections structure in the set of ncs files, there should only
        # be one entry. Otherwise this is presently unsupported.
        if len(revSectMap) > 1:
            raise IOError(
                'ncs files have {} different sections structures. Unsupported.'
                .format(len(revSectMap)))

        seg_time_limits = SegmentTimeLimits(nb_segment=len(
            lastNcsSections.sects),
                                            t_start=[],
                                            t_stop=[],
                                            length=[],
                                            timestamp_limits=[])
        memmaps = [{} for seg_index in range(seg_time_limits.nb_segment)]

        # create segment with subdata block/t_start/t_stop/length for each channel
        for i, fileEntry in enumerate(self.ncs_filenames.items()):
            chan_uid = fileEntry[0]
            data = chanSectMap[chan_uid][2]

            # create a memmap for each record section of the current file
            curSects = chanSectMap[chan_uid][0]
            for seg_index in range(len(curSects.sects)):

                curSect = curSects.sects[seg_index]
                subdata = data[curSect.startRec:(curSect.endRec + 1)]
                memmaps[seg_index][chan_uid] = subdata

                # create segment timestamp limits based on only NcsSections structure in use
                if i == 0:
                    numSampsLastSect = subdata[-1]['nb_valid']
                    ts0 = subdata[0]['timestamp']
                    ts1 = NcsSectionsFactory.calc_sample_time(
                        curSects.sampFreqUsed, subdata[-1]['timestamp'],
                        numSampsLastSect)
                    seg_time_limits.timestamp_limits.append((ts0, ts1))
                    t_start = ts0 / 1e6
                    seg_time_limits.t_start.append(t_start)
                    t_stop = ts1 / 1e6
                    seg_time_limits.t_stop.append(t_stop)
                    # :NOTE: This should really be the total of nb_valid in records, but this
                    #  allows the last record of a section to be shorter, the most common case.
                    #  Have never seen a section of records with not full records before the last.
                    length = (subdata.size -
                              1) * NcsSection._RECORD_SIZE + numSampsLastSect
                    seg_time_limits.length.append(length)

        return memmaps, seg_time_limits