def register_aligned_track(probe_id, xyz_channels, chn_coords=None, one=None, overwrite=False, channels=True, brain_atlas=None): """ Register ephys aligned trajectory and channel locations to Alyx Here we update Alyx models on the database in 2 steps 1) The trajectory computed from the final electrode channel locations 2) Channel locations are set to the trajectory """ assert one brain_atlas = brain_atlas or atlas.AllenAtlas(25) if chn_coords is None: geometry = trace_header(version=1) chn_coords = np.c_[geometry['x'], geometry['y']] insertion = atlas.Insertion.from_track(xyz_channels, brain_atlas) tdict = create_trajectory_dict(probe_id, insertion, provenance='Ephys aligned histology track') hist_traj = one.alyx.rest('trajectories', 'list', probe_insertion=probe_id, provenance='Ephys aligned histology track', no_cache=True) # if the trajectory exists, remove it, this will cascade delete existing channel locations if len(hist_traj): if overwrite: one.alyx.rest('trajectories', 'delete', id=hist_traj[0]['id']) else: raise FileExistsError( 'The session already exists, however overwrite is set to False.' 'If you want to overwrite, set overwrite=True.') hist_traj = one.alyx.rest('trajectories', 'create', data=tdict) if channels: brain_regions = brain_atlas.regions.get( brain_atlas.get_labels(xyz_channels)) brain_regions['xyz'] = xyz_channels brain_regions['lateral'] = chn_coords[:, 0] brain_regions['axial'] = chn_coords[:, 1] assert np.unique([len(brain_regions[k]) for k in brain_regions]).size == 1 channel_dict = create_channel_dict(hist_traj, brain_regions) one.alyx.rest('channels', 'create', data=channel_dict)
def get_brain_regions(xyz, channels_positions=None, brain_atlas=None): """ :param xyz: numpy array of 3D coordinates corresponding to a picked track or a trajectory the deepest point is assumed to be the tip. :param channels_positions: :param brain_atlas: :return: brain_regions (associated to each channel), insertion (object atlas.Insertion, defining 2 points of entries (tip and end of probe)) """ """ this is the depth along the probe (from the first point which is the deepest labeled point) Due to the blockiness, depths may not be unique along the track so it has to be prepared """ brain_atlas = brain_atlas or atlas.AllenAtlas(25) if channels_positions is None: geometry = trace_header(version=1) channels_positions = np.c_[geometry['x'], geometry['y']] xyz = xyz[np.argsort(xyz[:, 2]), :] d = atlas.cart2sph(xyz[:, 0] - xyz[0, 0], xyz[:, 1] - xyz[0, 1], xyz[:, 2] - xyz[0, 2])[0] indsort = np.argsort(d) xyz = xyz[indsort, :] d = d[indsort] iduplicates = np.where(np.diff(d) == 0)[0] xyz = np.delete(xyz, iduplicates, axis=0) d = np.delete(d, iduplicates, axis=0) assert np.all(np.diff(d) > 0), "Depths should be strictly increasing" # Get the probe insertion from the coordinates insertion = atlas.Insertion.from_track(xyz, brain_atlas) # Interpolate channel positions along the probe depth and get brain locations TIP_SIZE_UM = 200 xyz_channels = interpolate_along_track( xyz, (channels_positions[:, 1] + TIP_SIZE_UM) / 1e6) # get the brain regions brain_regions = brain_atlas.regions.get( brain_atlas.get_labels(xyz_channels)) brain_regions['xyz'] = xyz_channels brain_regions['lateral'] = channels_positions[:, 0] brain_regions['axial'] = channels_positions[:, 1] assert np.unique([len(brain_regions[k]) for k in brain_regions]).size == 1 return brain_regions, insertion
def get_channels(self, alf_object, collection): electrodes = {} try: electrodes = self.one.load_object(self.eid, alf_object, collection=collection) electrodes['axial_um'] = electrodes['localCoordinates'][:, 1] except ALFObjectNotFound: _logger.warning(f'{alf_object} does not yet exist') if self.hist_lookup[self.histology_status] == 3: try: electrodes['atlas_id'] = electrodes['brainLocationIds_ccf_2017'] electrodes['mlapdv'] = electrodes['mlapdv'] / 1e6 except KeyError: _logger.warning('Insertion resolved but brainLocationIds_ccf_2017 attribute do not exist') if self.hist_lookup[self.histology_status] > 0 and 'atlas_id' not in electrodes.keys(): if not self.brain_atlas: self.brain_atlas = AllenAtlas() self.brain_regions = self.brain_regions or self.brain_atlas.regions if 'localCoordinates' not in electrodes.keys(): geometry = trace_header(version=1) electrodes['localCoordinates'] = np.c_[geometry['x'], geometry['y']] electrodes['axial_um'] = electrodes['localCoordinates'][:, 1] depths = electrodes['localCoordinates'][:, 1] xyz = np.array(self.ins['json']['xyz_picks']) / 1e6 if self.hist_lookup[self.histology_status] >= 2: traj = self.one.alyx.rest('trajectories', 'list', provenance='Ephys aligned histology track', probe_insertion=self.pid)[0] align_key = self.ins['json']['extended_qc']['alignment_stored'] feature = traj['json'][align_key][0] track = traj['json'][align_key][1] ephysalign = EphysAlignment(xyz, depths, track_prev=track, feature_prev=feature, brain_atlas=self.brain_atlas, speedy=True) electrodes['mlapdv'] = ephysalign.get_channel_locations(feature, track) electrodes['atlas_id'] = self.brain_atlas.regions.get(self.brain_atlas.get_labels(electrodes['mlapdv']))['id'] if self.hist_lookup[self.histology_status] == 1: xyz = xyz[np.argsort(xyz[:, 2]), :] electrodes['mlapdv'] = interpolate_along_track(xyz, (depths + TIP_SIZE_UM) / 1e6) electrodes['atlas_id'] = self.brain_atlas.regions.get(self.brain_atlas.get_labels(electrodes['mlapdv']))['id'] return electrodes
def make_synthetic_data(ns=10000, nc=384, nss=121, ncs=21, nspikes=1200, tr=None, sample=None): if tr is None: tr = np.random.randint(np.ceil(ncs / 2), nc - np.ceil(ncs / 2), nspikes) if sample is None: sample = np.random.randint(np.ceil(nss / 2), ns - np.ceil(nss / 2), nspikes) h = neuropixel.trace_header(1) icsmid = int(np.floor(ncs / 2)) issmid = int(np.floor(nss / 2)) template = a_little_spike(121) data = np.zeros((ns, nc)) for m in np.arange(tr.size): itr = np.arange(tr[m] - icsmid, tr[m] + icsmid + 1) iss = np.arange(sample[m] - issmid, sample[m] + issmid + 1) offset = np.abs(h['x'][itr[icsmid]] + 1j * h['y'][itr[icsmid]] - h['x'][itr] - 1j * h['y'][itr]) ampfac = 1 / (offset + 10) ** 1.3 ampfac = ampfac / np.max(ampfac) tmp = template[:, np.newaxis] * ampfac[np.newaxis, :] data[slice(iss[0], iss[-1] + 1), slice(itr[0], itr[-1] + 1)] += tmp return data
def test_spike_detection(self): """ Test that creates a synthetic dataset with spikes and an amplitude decay function with the probe gemetry, and then pastes spikes all around the dataset and detects and de-duplicates The test is feeding the detections in a new round of simulation, and then computing the zero-lag cross-correlation between input and simulated output, and asserting on the similarity """ fs = 30000 nspikes = 1200 h = neuropixel.trace_header(version=1) ns, nc = (10000, len(h['x'])) nss, ncs = (121, 21) np.random.seed(973) display = False data = make_synthetic_data(ns, nc, nss, ncs, nspikes) detects = spikes.detection(data, fs=fs, h=h, detect_threshold=-0.8, time_tol=.0006) sample_out = (detects.time * fs + nss / 2 - 4).astype(np.int32) tr_out = detects.trace.astype(np.int32) data_out = make_synthetic_data(ns, nc, nss, ncs, tr=tr_out, sample=sample_out) if display: from easyqc.gui import viewseis eqc = viewseis(data, si=1 / 30000 * 1e3, taxis=0, title='data') eqc.ctrl.add_scatter(detects.time * 1e3, detects.trace) eqco = viewseis(data_out, si=1 / 30000 * 1e3, taxis=0, title='data_out') # noqa xcor = np.zeros(nc) for tr in np.arange(nc): if np.all(data[:, tr] == 0): xcor[tr] = 1 continue xcor[tr] = np.corrcoef(data[:, tr], data_out[:, tr])[1, 0] assert np.mean(xcor > .8) > .95 assert np.nanmedian(xcor) > .99
def tests_headers(self): th = neuropixel.trace_header() assert set(th.keys()) == set(['x', 'y', 'row', 'col', 'ind', 'adc', 'sample_shift', 'shank'])
def upload_channels(self, alignment_key, upload_alyx, upload_flatiron): """ Upload channels to alyx and flatiron based on the alignment specified by the alignment key """ feature = np.array(self.alignments[alignment_key][0]) track = np.array(self.alignments[alignment_key][1]) try: meta_dset = self.one.list_datasets(self.insertion['session'], '*ap.meta', collection=f'raw_ephys_data/{self.insertion["name"]}') meta_file = self.one.load_dataset(self.insertion['session'], meta_dset[0].split('/')[-1], collection=f'raw_ephys_data/{self.insertion["name"]}', download_only=True) geometry = spikeglx.read_geometry(meta_file) chns = np.c_[geometry['x'], geometry['y']] except Exception as err: self.log.warning(f"Could not compute channel locations from meta file, errored with message: {err}. " f"Will use default Neuropixel 1 channels") geometry = trace_header(version=1) chns = np.c_[geometry['x'], geometry['y']] ephysalign = EphysAlignment(self.xyz_picks, chns[:, 1], track_prev=track, feature_prev=feature, brain_atlas=self.brain_atlas) channels_mlapdv = np.int32(ephysalign.get_channel_locations(feature, track) * 1e6) channels_atlas_id = ephysalign.get_brain_locations(channels_mlapdv / 1e6)['id'] # Need to change channels stored on alyx as well as the stored key is not the same as the latest key if upload_alyx: if alignment_key != self.align_keys_sorted[0]: histology.register_aligned_track(self.eid, channels_mlapdv / 1e6, chn_coords=chns, one=self.one, overwrite=True, channels=self.channels_flag, brain_atlas=self.brain_atlas) ephys_traj = self.one.alyx.get(f'/trajectories?&probe_insertion={self.eid}' '&provenance=Ephys aligned histology track', clobber=True) patch_dict = {'json': self.alignments} self.one.alyx.rest('trajectories', 'partial_update', id=ephys_traj[0]['id'], data=patch_dict) files_to_register = [] if upload_flatiron: ftp_patcher = FTPPatcher(one=self.one) alf_path = self.one.eid2path(self.insertion['session']).joinpath('alf', self.insertion["name"]) alf_path.mkdir(exist_ok=True, parents=True) f_name = alf_path.joinpath('electrodeSites.mlapdv.npy') np.save(f_name, channels_mlapdv) files_to_register.append(f_name) f_name = alf_path.joinpath('electrodeSites.brainLocationIds_ccf_2017.npy') np.save(f_name, channels_atlas_id) files_to_register.append(f_name) f_name = alf_path.joinpath('electrodeSites.localCoordinates.npy') np.save(f_name, chns) files_to_register.append(f_name) probe_collections = self.one.list_collections(self.insertion['session'], filename='channels*', collection=f'alf/{self.insertion["name"]}*') for collection in probe_collections: chns = self.one.load_dataset(self.insertion['session'], 'channels.localCoordinates', collection=collection) ephysalign = EphysAlignment(self.xyz_picks, chns[:, 1], track_prev=track, feature_prev=feature, brain_atlas=self.brain_atlas) channels_mlapdv = np.int32(ephysalign.get_channel_locations(feature, track) * 1e6) channels_atlas_id = ephysalign.get_brain_locations(channels_mlapdv / 1e6)['id'] alf_path = self.one.eid2path(self.insertion['session']).joinpath(collection) alf_path.mkdir(exist_ok=True, parents=True) f_name = alf_path.joinpath('channels.mlapdv.npy') np.save(f_name, channels_mlapdv) files_to_register.append(f_name) f_name = alf_path.joinpath('channels.brainLocationIds_ccf_2017.npy') np.save(f_name, channels_atlas_id) files_to_register.append(f_name) self.log.info("Writing datasets to FlatIron") ftp_patcher.create_dataset(path=files_to_register, created_by=self.one.alyx.user) return files_to_register
def run(self, update: bool = False, overwrite: bool = True, stream: bool = None, **kwargs) -> (str, dict): """ Run QC on samples of the .ap file, and on the entire file for .lf data if it is present. :param update: bool, whether to update the qc json fields for this probe. Default is False. :param overwrite: bool, whether to overwrite locally existing outputs of this function. Default is False. :param stream: bool, whether to stream the samples of the .ap data if not locally available. Defaults to value set in class init (True if none set). :return: A list of QC output files. In case of a complete run that is one file for .ap and three files for .lf. """ # If stream is explicitly given in run, overwrite value from init if stream is not None: self.stream = stream # Load data self.load_data() qc_files = [] # If ap meta file present, calculate median RMS per channel before and after destriping # NB: ideally this should go a a separate function once we have a spikeglx.Streamer that behaves like the Reader if self.data.ap_meta: files = {'rms': self.probe_path.joinpath("_iblqc_ephysChannels.apRMS.npy"), 'spike_rate': self.probe_path.joinpath("_iblqc_ephysChannels.rawSpikeRates.npy"), 'channel_labels': self.probe_path.joinpath("_iblqc_ephysChannels.labels.npy"), 'ap_freqs': self.probe_path.joinpath("_iblqc_ephysSpectralDensityAP.freqs.npy"), 'ap_power': self.probe_path.joinpath("_iblqc_ephysSpectralDensityAP.power.npy"), } if all([files[k].exists() for k in files]) and not overwrite: _logger.warning(f'RMS map already exists for .ap data in {self.probe_path}, skipping. ' f'Use overwrite option.') results = {k: np.load(files[k]) for k in files} else: sr = self.data['ap'] nc = sr.nc - sr.nsync # verify that the channel layout is correct according to IBL layout h = neuropixel.trace_header(sr.major_version) th = sr.geometry if not (np.all(h['x'] == th['x']) and np.all(h['y'] == th['y'])): _logger.critical("Channel geometry seems incorrect") raise ValueError("Wrong Neuropixel channel mapping used - ABORT") t0s = np.arange(TMIN, sr.rl - SAMPLE_LENGTH, BATCHES_SPACING) all_rms = np.zeros((2, nc, t0s.shape[0])) all_srs, channel_ok = (np.zeros((nc, t0s.shape[0])) for _ in range(2)) psds = np.zeros((nc, fourier.fscale(WELCH_WIN_LENGTH_SAMPLES, 1, one_sided=True).size)) _logger.info(f'Computing RMS samples for .ap data {self.probe_path}') for i, t0 in enumerate(t0s): sl = slice(int(t0 * sr.fs), int((t0 + SAMPLE_LENGTH) * sr.fs)) raw = sr[sl, :-sr.nsync].T all_rms[0, :, i], all_rms[1, :, i], all_srs[:, i], channel_ok[:, i], psd =\ self._compute_metrics_array(raw, sr.fs, h) psds += psd # Calculate the median RMS across all samples per channel results = {'rms': np.median(all_rms, axis=-1), 'spike_rate': np.median(all_srs, axis=-1), 'channel_labels': stats.mode(channel_ok, axis=1)[0], 'ap_freqs': fourier.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sr.fs, one_sided=True), 'ap_power': psds.T / len(t0s), # shape: (nfreqs, nchannels) } for k in files: np.save(files[k], results[k]) qc_files.extend([files[k] for k in files]) for p in [10, 90]: self.metrics[f'apRms_p{p}_raw'] = np.format_float_scientific( np.percentile(results['rms'][0, :], p), precision=2) self.metrics[f'apRms_p{p}_proc'] = np.format_float_scientific( np.percentile(results['rms'][1, :], p), precision=2) if update: self.update_extended_qc(self.metrics) # If lf meta and bin file present, run the old qc on LF data if self.data.lf_meta and self.data.lf: qc_files.extend(extract_rmsmap(self.data.lf, out_folder=self.probe_path, overwrite=overwrite)) return qc_files