Exemple #1
0
def rmsmap(fbin, spectra=True):
    """
    Computes RMS map in time domain and spectra for each channel of Neuropixel probe

    :param fbin: binary file in spike glx format (will look for attached metatdata)
    :type fbin: str or pathlib.Path
    :param spectra: whether to compute the power spectrum (only need for lfp data)
    :type: bool
    :return: a dictionary with amplitudes in channeltime space, channelfrequency space, time
     and frequency scales
    """
    if not isinstance(fbin, spikeglx.Reader):
        sglx = spikeglx.Reader(fbin)
    rms_win_length_samples = 2**np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS))
    # the window generator will generates window indices
    wingen = dsp.WindowGenerator(ns=sglx.ns,
                                 nswin=rms_win_length_samples,
                                 overlap=0)
    # pre-allocate output dictionary of numpy arrays
    win = {
        'TRMS': np.zeros((wingen.nwin, sglx.nc)),
        'nsamples': np.zeros((wingen.nwin, )),
        'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES,
                             1 / sglx.fs,
                             one_sided=True),
        'tscale': wingen.tscale(fs=sglx.fs)
    }
    win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc))
    # loop through the whole session
    for first, last in wingen.firstlast:
        D = sglx.read_samples(first_sample=first,
                              last_sample=last)[0].transpose()
        # remove low frequency noise below 1 Hz
        D = dsp.hp(D, 1 / sglx.fs, [0, 1])
        iw = wingen.iw
        win['TRMS'][iw, :] = dsp.rms(D)
        win['nsamples'][iw] = D.shape[1]
        if spectra:
            # the last window may be smaller than what is needed for welch
            if last - first < WELCH_WIN_LENGTH_SAMPLES:
                continue
            # compute a smoothed spectrum using welch method
            _, w = signal.welch(D,
                                fs=sglx.fs,
                                window='hanning',
                                nperseg=WELCH_WIN_LENGTH_SAMPLES,
                                detrend='constant',
                                return_onesided=True,
                                scaling='density',
                                axis=-1)
            win['spectral_density'] += w.T
        # print at least every 20 windows
        if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0:
            print_progress(iw, wingen.nwin)
    return win
Exemple #2
0
def http_download_file(full_link_to_file,
                       *,
                       clobber=False,
                       offline=False,
                       username='',
                       password='',
                       cache_dir=''):
    """
    :param full_link_to_file: http link to the file.
    :type full_link_to_file: str
    :param clobber: [False] If True, force overwrite the existing file.
    :type clobber: bool
    :param username: [''] authentication for password protected file server.
    :type username: str
    :param password: [''] authentication for password protected file server.
    :type password: str
    :param cache_dir: [''] directory in which files are cached; defaults to user's
     Download directory.
    :type cache_dir: str

    :return: (str) a list of the local full path of the downloaded files.
    """
    if not full_link_to_file:
        return ''

    # default cache directory is the home dir
    if not cache_dir:
        cache_dir = str(Path.home().joinpath("Downloads"))

    # This is the local file name
    file_name = cache_dir + os.sep + os.path.basename(full_link_to_file)

    # do not overwrite an existing file unless specified
    if not clobber and os.path.exists(file_name):
        return file_name
    elif offline:
        return file_name

    # This should be the base url you wanted to access.
    baseurl = os.path.split(str(full_link_to_file))[0]

    # Create a password manager
    manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
    if (len(password) != 0) & (len(username) != 0):
        manager.add_password(None, baseurl, username, password)

    # Create an authentication handler using the password manager
    auth = urllib.request.HTTPBasicAuthHandler(manager)

    # Create an opener that will replace the default urlopen method on further calls
    opener = urllib.request.build_opener(auth)
    urllib.request.install_opener(opener)

    # Open the url and get the length
    u = urllib.request.urlopen(full_link_to_file)
    file_size = int(u.getheader('Content-length'))

    print(f"Downloading: {file_name} Bytes: {file_size}")
    file_size_dl = 0
    block_sz = 8192 * 64 * 8
    f = open(file_name, 'wb')
    while True:
        buffer = u.read(block_sz)
        if not buffer:
            break
        file_size_dl += len(buffer)
        f.write(buffer)
        print_progress(file_size_dl, file_size, prefix='', suffix='')
    f.close()

    return file_name
Exemple #3
0
def http_download_file(full_link_to_file,
                       chunks=None,
                       *,
                       clobber=False,
                       username='',
                       password='',
                       cache_dir='',
                       return_md5=False,
                       headers=None):
    """
    :param full_link_to_file: http link to the file.
    :type full_link_to_file: str
    :param clobber: [False] If True, force overwrite the existing file.
    :type clobber: bool
    :param username: [''] authentication for password protected file server.
    :type username: str
    :param password: [''] authentication for password protected file server.
    :type password: str
    :param cache_dir: [''] directory in which files are cached; defaults to user's
     Download directory.
    :param: headers: [{}] additional headers to add to the request (auth tokens etc..)
    :type cache_dir: str

    :return: (str) a list of the local full path of the downloaded files.
    """
    from ibllib.io import hashfile
    if not full_link_to_file:
        return ''

    # default cache directory is the home dir
    if not cache_dir:
        cache_dir = str(Path.home().joinpath("Downloads"))

    # This is the local file name
    file_name = str(cache_dir) + os.sep + os.path.basename(full_link_to_file)

    # do not overwrite an existing file unless specified
    if not clobber and os.path.exists(file_name):
        return (file_name,
                hashfile.md5(file_name)) if return_md5 else file_name

    # This should be the base url you wanted to access.
    baseurl = os.path.split(str(full_link_to_file))[0]

    # Create a password manager
    manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
    if (len(password) != 0) & (len(username) != 0):
        manager.add_password(None, baseurl, username, password)

    # Create an authentication handler using the password manager
    auth = urllib.request.HTTPBasicAuthHandler(manager)

    # Create an opener that will replace the default urlopen method on further calls
    opener = urllib.request.build_opener(auth)
    urllib.request.install_opener(opener)

    # Support for partial download.
    req = urllib.request.Request(full_link_to_file)
    if chunks is not None:
        first_byte, n_bytes = chunks
        req.add_header("Range",
                       "bytes=%d-%d" % (first_byte, first_byte + n_bytes - 1))

    # add additional headers
    if headers is not None:
        for k in headers:
            req.add_header(k, headers[k])

    # Open the url and get the length
    try:
        u = urllib.request.urlopen(req)
    except urllib.error.HTTPError as e:
        _logger.error(f"{str(e)} {full_link_to_file}")
        raise e

    file_size = int(u.getheader('Content-length'))

    print(f"Downloading: {file_name} Bytes: {file_size}")
    file_size_dl = 0
    block_sz = 8192 * 64 * 8

    md5 = hashlib.md5()
    f = open(file_name, 'wb')
    while True:
        buffer = u.read(block_sz)
        if not buffer:
            break
        file_size_dl += len(buffer)
        f.write(buffer)
        if return_md5:
            md5.update(buffer)
        print_progress(file_size_dl, file_size, prefix='', suffix='')
    f.close()

    return (file_name, md5.hexdigest()) if return_md5 else file_name
Exemple #4
0
 def test_simple_print(self):
     print('waitbar')
     for p in range(10):
         time.sleep(0.05)
         print_progress(p, 9)
def populate_dj_with_phy(probe_label,
                         eid=None,
                         subj=None,
                         date=None,
                         sess_no=None,
                         one=None):
    if one is None:
        one = ONE()

    if eid is None:
        eid = one.search(subject=subj, date=date, number=sess_no)[0]

    sess_path = one.path_from_eid(eid)
    alf_path = sess_path.joinpath('alf', probe_label)

    cluster_path = Path(alf_path, 'spikes.clusters.npy')
    template_path = Path(alf_path, 'spikes.templates.npy')

    # Compare spikes.clusters with spikes.templates to find which clusters have been merged
    phy_clusters = np.load(cluster_path)
    id_phy = np.unique(phy_clusters)
    orig_clusters = np.load(template_path)
    id_orig = np.unique(orig_clusters)

    uuid_list = alf.io.load_file_content(
        alf_path.joinpath('clusters.uuids.csv'))

    # First deal with merged clusters and make sure they have cluster uuids assigned
    # Find the original cluster ids that have been merged into a new cluster
    merged_idx = np.setdiff1d(id_orig, id_phy)

    # See if any clusters have been merged, if not skip to the next bit
    if np.any(merged_idx):
        # Make association between original cluster and new cluster id and save in dict
        merge_list = {}
        for m in merged_idx:
            idx = phy_clusters[np.where(orig_clusters == m)[0][0]]
            if idx in merge_list:
                merge_list[idx].append(m)
            else:
                merge_list[idx] = [m]

        # Create a dataframe from the dict
        merge_clust = pd.DataFrame(
            columns={'cluster_idx', 'merged_uuid', 'merged_id'})
        for key, value in merge_list.items():
            value_uuid = uuid_list['uuids'][value]
            merge_clust = merge_clust.append(
                {
                    'cluster_idx': key,
                    'merged_uuid': tuple(value_uuid),
                    'merged_idx': tuple(value)
                },
                ignore_index=True)

        # Get the dj table that has previously stored merged clusters and store in frame
        merge = cluster_table.MergedClusters()
        merge_dj = pd.DataFrame(columns={'cluster_uuid', 'merged_uuid'})
        merge_dj['cluster_uuid'] = merge.fetch('cluster_uuid').astype(str)
        merge_dj['merged_uuid'] = tuple(map(tuple, merge.fetch('merged_uuid')))

        # Merge the two dataframe to see if any merge combinations already have a cluster_uuid
        merge_comb = pd.merge(merge_dj,
                              merge_clust,
                              on=['merged_uuid'],
                              how='outer')

        # Find the merged clusters that do not have a uuid assigned
        no_uuid = np.where(pd.isnull(merge_comb['cluster_uuid']))[0]

        # Assign new uuid to new merge pairs and add to the merge table
        for nid in no_uuid:
            new_uuid = str(uuid.uuid4())
            merge_comb['cluster_uuid'].iloc[nid] = new_uuid
            merge.insert1(dict(
                cluster_uuid=new_uuid,
                merged_uuid=merge_comb['merged_uuid'].iloc[nid]),
                          allow_direct_insert=True)

        # Add all the uuids to the cluster_uuid frame with index according to cluster id from phy
        for idx, c_uuid in zip(merge_comb['cluster_idx'].values,
                               merge_comb['cluster_uuid'].values):
            uuid_list.loc[idx] = c_uuid

        csv_path = Path(alf_path, 'merge_info.csv')
        merge_comb = merge_comb.reindex(columns=[
            'cluster_idx', 'cluster_uuid', 'merged_idx', 'merged_uuid'
        ])

        try:
            merge_comb.to_csv(csv_path, index=False)
        except Exception as err:
            print(err)
            print('Close merge_info.csv file and then relaunch script')
            sys.exit(1)
    else:
        print('No merges detected, continuing...')

    # Now populate datajoint with cluster labels
    user = one._par.ALYX_LOGIN
    current_date = datetime.now().replace(microsecond=0)

    try:
        cluster_group = alf.io.load_file_content(
            alf_path.joinpath('cluster_group.tsv'))
    except Exception as err:
        print(err)
        print('Could not find cluster group file output from phy')
        sys.exit(1)

    try:
        cluster_notes = alf.io.load_file_content(
            alf_path.joinpath('cluster_notes.tsv'))
        cluster_info = pd.merge(cluster_group,
                                cluster_notes,
                                on=['cluster_id'],
                                how='outer')
    except Exception as err:
        cluster_info = cluster_group
        cluster_info['notes'] = None

    cluster_info = cluster_info.where(cluster_info.notnull(), None)
    cluster_info['cluster_uuid'] = uuid_list['uuids'][
        cluster_info['cluster_id']].values

    # dj table that holds data
    cluster = cluster_table.ClusterLabel()

    # Find clusters that have already been labelled by user
    old_clust = cluster & cluster_info & {'user_name': user}

    dj_clust = pd.DataFrame()
    dj_clust['cluster_uuid'] = (old_clust.fetch('cluster_uuid')).astype(str)
    dj_clust['cluster_label'] = old_clust.fetch('cluster_label')

    # First find the new clusters to insert into datajoint
    idx_new = np.where(
        np.isin(cluster_info['cluster_uuid'],
                dj_clust['cluster_uuid'],
                invert=True))[0]
    cluster_uuid = cluster_info['cluster_uuid'][idx_new].values
    cluster_label = cluster_info['group'][idx_new].values
    cluster_note = cluster_info['notes'][idx_new].values

    if idx_new.size != 0:
        print('Populating dj with ' + str(idx_new.size) + ' new labels')
    else:
        print('No new labels to add')
    for iIter, (iClust, iLabel, iNote) in enumerate(
            zip(cluster_uuid, cluster_label, cluster_note)):
        cluster.insert1(dict(cluster_uuid=iClust,
                             user_name=user,
                             label_time=current_date,
                             cluster_label=iLabel,
                             cluster_note=iNote),
                        allow_direct_insert=True)
        print_progress(iIter, cluster_uuid.size, '', '')

    # Next look through clusters already on datajoint and check if any labels have
    # been changed
    comp_clust = pd.merge(cluster_info, dj_clust, on='cluster_uuid')
    idx_change = np.where(
        comp_clust['group'] != comp_clust['cluster_label'])[0]

    cluster_uuid = comp_clust['cluster_uuid'][idx_change].values
    cluster_label = comp_clust['group'][idx_change].values
    cluster_note = comp_clust['notes'][idx_change].values

    # Populate table
    if idx_change.size != 0:
        print('Replacing label of ' + str(idx_change.size) + ' clusters')
    else:
        print('No labels to change')
    for iIter, (iClust, iLabel, iNote) in enumerate(
            zip(cluster_uuid, cluster_label, cluster_note)):
        prev_clust = cluster & {'user_name': user} & {'cluster_uuid': iClust}
        cluster.insert1(dict(*prev_clust.proj(),
                             label_time=current_date,
                             cluster_label=iLabel,
                             cluster_note=iNote),
                        allow_direct_insert=True,
                        replace=True)
        print_progress(iIter, cluster_uuid.size, '', '')

    print('Upload to datajoint complete')
Exemple #6
0
 def print_progress(self):
     """
     Prints progress using a terminal progress bar
     """
     print_progress(self.iw, self.nwin)