示例#1
0
def open_klusters(filename):
    indices = find_indices(filename)
    triplet = filename_to_triplet(filename)
    filenames_shanks = {}
    for index in indices:
        filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index, ))
    klusters_data = {
        index: open_klusters_oneshank(filename)
        for index, filename in filenames_shanks.iteritems()
    }
    shanks = filenames_shanks.keys()

    # Find the dataset filenames and load the metadata.
    filenames = find_filenames(filename)
    # Metadata common to all shanks.
    metadata = read_xml(filenames['xml'], 1)
    # Metadata specific to each shank.
    metadata.update(
        {shank: read_xml(filenames['xml'], shank)
         for shank in shanks})
    metadata['shanks'] = sorted(shanks)
    metadata['has_masks'] = (
        ('mask' in filenames and filenames['mask'] is not None)
        or ('fmask' in filenames and filenames['fmask'] is not None))

    klusters_data['name'] = triplet[0]
    klusters_data['metadata'] = metadata
    klusters_data['shanks'] = shanks
    klusters_data['filenames'] = filenames

    # Load probe file.
    filename_probe = filenames['probe']
    # It no probe file exists, create a default, linear probe with the right
    # number of channels per shank.
    if not filename_probe:
        # Generate a probe filename.
        filename_probe = find_filename_or_new(filename,
                                              'default.probe',
                                              have_file_index=False)
        shanks = {
            shank: klusters_data[shank]['nchannels']
            for shank in filenames_shanks.keys()
        }
        probe_python = generate_probe(shanks, 'complete')
        # with open(filename_probe, 'w') as f:
        # f.write(probe_python)
        # save_probe(filename_probe, probe_python)
        klusters_data['prb'] = probe_python
    else:
        probe_ns = {}
        execfile(filename_probe, {}, probe_ns)
        klusters_data['probe'] = probe_ns

    return klusters_data
示例#2
0
def open_klusters(filename):
    indices = find_indices(filename)
    triplet = filename_to_triplet(filename)
    filenames_shanks = {}
    for index in indices:
        filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index,))
    klusters_data = {index: open_klusters_oneshank(filename) 
        for index, filename in filenames_shanks.iteritems()}
    shanks = filenames_shanks.keys()
           
    # Find the dataset filenames and load the metadata.
    filenames = find_filenames(filename)
    # Metadata common to all shanks.
    metadata = read_xml(filenames['xml'], 1)
    # Metadata specific to each shank.
    metadata.update({shank: read_xml(filenames['xml'], shank)
        for shank in shanks})
    metadata['shanks'] = sorted(shanks)
    metadata['has_masks'] = (('mask' in filenames 
                                    and filenames['mask'] is not None) or (
                                  'fmask' in filenames 
                                    and filenames['fmask'] is not None
                                  ))
    
    klusters_data['name'] = triplet[0]
    klusters_data['metadata'] = metadata
    klusters_data['shanks'] = shanks
    klusters_data['filenames'] = filenames
    
    # Load probe file.
    filename_probe = filenames['probe']
    # It no probe file exists, create a default, linear probe with the right
    # number of channels per shank.
    if not filename_probe:
        # Generate a probe filename.
        filename_probe = find_filename_or_new(filename, 'default.probe',
            have_file_index=False)
        shanks = {shank: klusters_data[shank]['nchannels']
            for shank in filenames_shanks.keys()}
        probe_python = generate_probe(shanks, 'complete')
        # with open(filename_probe, 'w') as f:
            # f.write(probe_python)
        # save_probe(filename_probe, probe_python)
        klusters_data['prb'] = probe_python
    else:
        probe_ns = {}
        execfile(filename_probe, {}, probe_ns)
        klusters_data['probe'] = probe_ns
    
    return klusters_data
示例#3
0
def open_klusters_oneshank(filename):
    filenames = find_filenames(filename)
    fileindex = find_index(filename)

    # Open small Klusters files.
    data = {}
    metadata = read_xml(filenames['xml'], fileindex)
    data['clu'] = read_clusters(filenames['clu'])

    # Read .aclu data.
    if 'aclu' in filenames and os.path.exists(filenames['aclu']):
        data['aclu'] = read_clusters(filenames['aclu'])
    else:
        data['aclu'] = data['clu']

    # Read .acluinfo data.
    if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['acluinfo'])
    # If the ACLUINFO does not exist, try CLUINFO (older file extension)
    elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['cluinfo'])
    else:
        data['acluinfo'] = default_cluster_info(np.unique(data['aclu']))

    # Read group info.
    if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']):
        data['groupinfo'] = read_group_info(filenames['groupinfo'])
    else:
        data['groupinfo'] = default_group_info()

    # Find out the number of columns in the .fet file.
    with open(filenames['fet'], 'r') as f:
        f.readline()
        # Get the number of non-empty columns in the .fet file.
        data['fetcol'] = len(
            [col for col in f.readline().split(' ') if col.strip() != ''])

    metadata['nspikes'] = len(data['clu'])
    data['fileindex'] = fileindex

    # Open big Klusters files.
    data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1)
    if 'spk' in filenames and os.path.exists(filenames['spk'] or ''):
        data['spk'] = MemMappedBinary(filenames['spk'],
                                      np.int16,
                                      rowsize=metadata['nchannels'] *
                                      metadata['nsamples'])
    if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''):
        data['uspk'] = MemMappedBinary(filenames['uspk'],
                                       np.int16,
                                       rowsize=metadata['nchannels'] *
                                       metadata['nsamples'])
    if 'mask' in filenames and os.path.exists(filenames['mask'] or ''):
        data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1)

    # data['metadata'] = metadata
    data.update(metadata)

    return data
示例#4
0
def open_klusters_oneshank(filename):
    filenames = find_filenames(filename)
    fileindex = find_index(filename)
    
    # Open small Klusters files.
    data = {}
    metadata = read_xml(filenames['xml'], fileindex)
    data['clu'] = read_clusters(filenames['clu'])
    
    # Read .aclu data.
    if 'aclu' in filenames and os.path.exists(filenames['aclu']):
        data['aclu'] = read_clusters(filenames['aclu'])
    else:
        data['aclu'] = data['clu']
        
    # Read .acluinfo data.
    if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['acluinfo'])
    # If the ACLUINFO does not exist, try CLUINFO (older file extension)
    elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['cluinfo'])
    else:
        data['acluinfo'] = default_cluster_info(np.unique(data['aclu']))
        
    # Read group info.
    if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']):
        data['groupinfo'] = read_group_info(filenames['groupinfo'])
    else:
        data['groupinfo'] = default_group_info()
    
    # Find out the number of columns in the .fet file.
    with open(filenames['fet'], 'r') as f:
        f.readline()
        # Get the number of non-empty columns in the .fet file.
        data['fetcol'] = len([col for col in f.readline().split(' ') if col.strip() != ''])
    
    metadata['nspikes'] = len(data['clu'])
    data['fileindex'] = fileindex

    # Open big Klusters files.
    data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1)
    if 'spk' in filenames and os.path.exists(filenames['spk'] or ''):
        data['spk'] = MemMappedBinary(filenames['spk'], np.int16, 
            rowsize=metadata['nchannels'] * metadata['nsamples'])
    if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''):
        data['uspk'] = MemMappedBinary(filenames['uspk'], np.int16, 
            rowsize=metadata['nchannels'] * metadata['nsamples'])
    if 'mask' in filenames and os.path.exists(filenames['mask'] or ''):
        data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1)

    # data['metadata'] = metadata
    data.update(metadata)
    
    return data