Ejemplo n.º 1
0
    def assess_noise(self):
        """
        Assess noise in the raw AP data.
        """
        for rec_num, recording in enumerate(self.files):
            print(
                f">>>>> Assessing noise for recording {rec_num + 1} of {len(self.files)}"
            )
            output = self.processed / f'noise_{rec_num}.json'
            if output.exists():
                continue

            data_file = self.find_file(recording['spike_data'])
            num_chans = self.spike_meta[rec_num]['nSavedChans']
            data = ioutils.read_bin(data_file, num_chans)

            # sample some points -- it takes too long to use them all
            length = data.shape[0]
            points = np.random.choice(range(length),
                                      length // 60,
                                      replace=False)
            sample = data[points, :]
            median = np.median(sample, axis=1)
            SDs = []
            for i in range(385):
                SDs.append(np.std(sample[:, i] - median))

            results = dict(
                median=np.median(SDs),
                SDs=SDs,
            )

            with open(output, 'w') as fd:
                json.dump(results, fd)
Ejemplo n.º 2
0
    def process_lfp(self):
        """
        Process the LFP data from the raw neural recording data.
        """
        for rec_num, recording in enumerate(self.files):
            print(
                f">>>>> Processing LFP for recording {rec_num + 1} of {len(self.files)}"
            )

            data_file = self.find_file(recording['lfp_data'])
            orig_rate = self.lfp_meta[rec_num]['imSampRate']
            num_chans = self.lfp_meta[rec_num]['nSavedChans']

            print("> Mapping LFP data")
            data = ioutils.read_bin(data_file, num_chans)

            print(f"> Downsampling to {self.sample_rate} Hz")
            data = signal.resample(data, orig_rate, self.sample_rate)

            if self._lag[rec_num] is None:
                self.sync_data(rec_num, sync_channel=data[:, -1])
            lag_start, lag_end = self._lag[rec_num]

            output = self.processed / recording['lfp_processed']
            print(f"> Saving data to {output}")
            if lag_end < 0:
                data = data[:lag_end]
            if lag_start < 0:
                data = data[-lag_start:]
            data = pd.DataFrame(data[:, :-1])
            ioutils.write_hdf5(output, data)
Ejemplo n.º 3
0
    def _get_neuro_raw(self, kind):
        raw = []
        meta = getattr(self, f"{kind}_meta")
        for rec_num, recording in enumerate(self.files):
            data_file = self.find_file(recording[f'{kind}_data'])
            orig_rate = int(meta[rec_num]['imSampRate'])
            num_chans = int(meta[rec_num]['nSavedChans'])
            factor = orig_rate / self.sample_rate

            data = ioutils.read_bin(data_file, num_chans)

            if self._lag[rec_num] is None:
                self.sync_data(rec_num, sync_channel=data[:, -1])
            lag_start, lag_end = self._lag[rec_num]

            lag_start = int(lag_start * factor)
            lag_end = int(lag_end * factor)
            if lag_end < 0:
                data = data[:lag_end]
            if lag_start < 0:
                data = data[-lag_start:]
            raw.append(pd.DataFrame(data[:, :-1]))

        return raw, orig_rate
Ejemplo n.º 4
0
    def process_spikes(self):
        """
        Process the spike data from the raw neural recording data.
        """
        for rec_num, recording in enumerate(self.files):
            print(
                f">>>>> Processing spike data for recording {rec_num + 1} of {len(self.files)}"
            )

            data_file = self.find_file(recording['spike_data'])
            orig_rate = self.spike_meta[rec_num]['imSampRate']
            num_chans = self.spike_meta[rec_num]['nSavedChans']

            print("> Mapping spike data")
            data = ioutils.read_bin(data_file, num_chans)

            #print("> Performing median subtraction across rows")  # TODO: fix
            #data = signal.median_subtraction(data, axis=0)
            #print("> Performing median subtraction across columns")
            #data = signal.median_subtraction(data, axis=1)

            print(f"> Downsampling to {self.sample_rate} Hz")
            data = signal.resample(data, orig_rate, self.sample_rate)

            if self._lag[rec_num] is None:
                self.sync_data(rec_num, sync_channel=data[:, -1])
            lag_start, lag_end = self._lag[rec_num]

            output = self.processed / recording['spike_processed']
            print(f"> Saving data to {output}")
            if lag_end < 0:
                data = data[:lag_end]
            if lag_start < 0:
                data = data[-lag_start:]
            data = pd.DataFrame(data[:, :-1])
            ioutils.write_hdf5(output, data)
Ejemplo n.º 5
0
    def sync_data(self, rec_num, behavioural_data=None, sync_channel=None):
        """
        This method will calculate the lag between the behavioural data and the
        neuropixels data for each recording and save it to file and self._lag.

        behavioural_data and sync_channel will be loaded from file and downsampled if
        not provided, otherwise if provided they must already be the same sample
        frequency.

        Parameters
        ----------
        rec_num : int
            The recording number, i.e. index of self.files to get file paths.

        behavioural_data : pandas.DataFrame, optional
            The unprocessed behavioural data loaded from the TDMS file.

        sync_channel : np.ndarray, optional
            The sync channel from either the spike or LFP data.

        """
        print("    Finding lag between sync channels")
        recording = self.files[rec_num]

        if behavioural_data is None:
            print("    Loading behavioural data")
            data_file = self.find_file(recording['behaviour'])
            behavioural_data = ioutils.read_tdms(data_file,
                                                 groups=["NpxlSync_Signal"])
            behavioural_data = signal.resample(behavioural_data.values,
                                               BEHAVIOUR_HZ, self.sample_rate)

        if sync_channel is None:
            print("    Loading neuropixels sync channel")
            data_file = self.find_file(recording['lfp_data'])
            num_chans = self.lfp_meta[rec_num]['nSavedChans']
            sync_channel = ioutils.read_bin(data_file, num_chans, channel=384)
            orig_rate = int(self.lfp_meta[rec_num]['imSampRate'])
            #sync_channel = sync_channel[:120 * orig_rate * 2]  # 2 mins, rec Hz, back/forward
            sync_channel = signal.resample(sync_channel, orig_rate,
                                           self.sample_rate)

        behavioural_data = signal.binarise(behavioural_data)
        sync_channel = signal.binarise(sync_channel)

        print("    Finding lag")
        plot = self.processed / f'sync_{rec_num}.png'
        lag_start, match = signal.find_sync_lag(
            behavioural_data,
            sync_channel,
            plot=plot,
        )

        lag_end = len(behavioural_data) - (lag_start + len(sync_channel))
        self._lag[rec_num] = (lag_start, lag_end)

        if match < 95:
            print(
                "    The sync channels did not match very well. Check the plot."
            )
        print(f"    Calculated lag: {(lag_start, lag_end)}")

        lag_json = []
        for lag in self._lag:
            if lag is None:
                lag_json.append(dict(lag_start=None, lag_end=None))
            else:
                lag_start, lag_end = lag
                lag_json.append(dict(lag_start=lag_start, lag_end=lag_end))
        with (self.processed / 'lag.json').open('w') as fd:
            json.dump(lag_json, fd)