Пример #1
0
def load_keys(pathlist, keys=None):
    '''
    If no keys are passed in, all are loaded
    '''
    if type(pathlist) is str:
        pathlist = [pathlist]
    data = dict()

    for path in pathlist:
        assert os.path.isfile(path), 'File was invalid: {0}'.format(path)
        name = os.path.basename(path).replace('.hdf5', '').replace('_ica', '')
        print('Loading File:', name)

        try:
            f = hdf5manager(path, create=False)
            filedata = f.load(keys)
        except:
            filedata = None

        data[name] = filedata

    return data
Пример #2
0
    def ica_project(self,
                    movie=None,
                    savedata=True,
                    calc_dfof=True,
                    del_movie=True,
                    n_components=None,
                    svd_multiplier=None,
                    suffix='',
                    output_folder=None,
                    mean_filter_method='wavelet',
                    low_cutoff=0.5):
        '''
        Apply an ica decomposition to the experiment.  If rois and/or a bounding box have been defined, these will be used to crop the movie before filtration.

        By default, results are all saved to a [experiment]_[parameters]_ica.hdf5 file in the same directory as the original video files.

        Arguments:
            movie: 
                The movie to apply ica decomposition to.  If left blank, the movie cropped by the roimask and bounding box is used.
            save_data:
                Whether to save components to a file, or just return as a variable.
            calc_dfof:
                If true, calculate the dFoF before applying ICA decomposition.  If false, ICA is computed on the raw movie.
            del_movie:
                If true, delete the original full movie array before decomposition to save memory.
            n_components:
                A specified number of components to project. If left as None, the svd_multiplier auto component selection is used.
            svd_multiplier:
                The factor to multiply by the detected SVD noise threshold while estimating the number of ICA components to identify.  When left blank, the automatic value set in seas.ica.project is used.
            suffix:
                Optional suffix to append to the ica processed file.
            output_folder:
                By default, the results are saved to an [experiment]_ica.hdf5 file, in the same folder as the original video.  If a different folder is specified by output_folder, the ica file will be saved there instead.
            mean_filter_method: 
                Which method to use while filtering the mean.  Default is highpass wavelet filter.
            low_cutoff: 
                The lower cutoff for a highpass filter.  Default is 0.5Hz.

        Returns:
            components: A dictionary containing all the results, metadata, and information regarding the filter applied.

                mean: 
                    the original video mean
                roimask: 
                    the mask applied to the video before decomposing
                shape: 
                    the original shape of the movie array
                eig_mix: 
                    the ICA mixing matrix
                timecourses: 
                    the ICA component time series
                eig_vec: 
                    the eigenvectors
                n_components:
                    the number of components in eig_vec (reduced to only have 25% of total components as noise)
                project_meta:
                    The metadata for the ica projection
                expmeta:
                    All metadata created for this class
                lag1: 
                    the lag-1 autocorrelation
                noise_components: 
                    a vector (n components long) to store binary representation of which components were detected as noise 
                cutoff: 
                    the signal-noise cutoff value
                mean_filtered: 
                    the filtered mean
                mean_filter_meta: 
                    metadata on how the mean filter was applied

            if the n_components was automatically set, the following additional keys are also returned in components

                svd_cutoff: 
                    the number of components originally decomposed
                lag1_full: 
                    the lag-1 autocorrelation of the full set of components decomposed before cropping to only 25% noise components
                svd_multiplier: 
                    the svd multiplier value used to determine cutoff
        '''
        print('\nICA Projecting\n-----------------------')

        if savedata:
            suffix_list = []
            if len(suffix) > 0:
                suffix_list.append(suffix)

            if self.downsample:
                suffix_list.append(str(self.downsample) + 'xds')

            if self.downsample_t:
                suffix_list.append(str(self.downsample_t) + 'xtds')

            if not calc_dfof:
                suffix_list.append('raw')

            if svd_multiplier is not None:
                suffix_list.append(str(svd_multiplier) + 'svdmult')

            if output_folder is None:
                output_folder = os.path.dirname(self.path[0])

            suffix_list.append('ica.hdf5')

            suffix = '_'.join(suffix_list)

            savepath = os.path.join(output_folder, self.name + '_' + suffix)
            print('Saving ICA data to:', savepath)
        else:
            savepath = None

        if savedata:
            f = hdf5manager(savepath)
            components = f.load()  # should be empty if it didn't exist yet.
        else:
            components = {}

        # Load all attributes of experiment class into expmeta dictionary
        # to keep info in ica and filtered files.
        ignore = ['movie', 'filtered', 'notifications']
        expdict = self.__dict__
        expmeta = {}
        for key in expdict:
            if key not in ignore:
                expmeta[key] = expdict[key]
        components['expmeta'] = expmeta
        print('Saving keys under expmeta in PC components:')
        for key in expmeta:
            print(key)

        if savedata:
            f.save(components)

        # calculate decomposition:
        if 'eig_vec' and 'eig_mix' in components:
            # if data was already in the save path, use it
            print('Found ICA decomposition in components')
        else:

            if hasattr(self, 'roimask'):
                roimask = self.bound_mask()
            else:
                roimask = None

            if movie is None:
                movie = self.bound_movie()

                if calc_dfof:
                    movie = dfof(movie)

            if del_movie:
                print('Deleting original movie to save memory..')
                del self.movie

            #drop dimension and flip to prepare timecourse for ICA
            shape = movie.shape
            t, x, y = shape
            vector = movie.reshape(t, x * y)
            vector = vector.T  # now vector is (x*y, t) for ICA along x*y dimension
            print('M has been reshaped from {0} to {1}\n'.format(
                movie.shape, vector.shape))

            # run ICA projection
            ica_project_kwargs = {'vector': vector, 'shape': shape}

            if svd_multiplier is not None:
                ica_project_kwargs['svd_multiplier'] = svd_multiplier

            if roimask is not None:
                ica_project_kwargs['roimask'] = roimask

            if n_components is not None:
                ica_project_kwargs['n_components'] = n_components

            components = project(**ica_project_kwargs)
            components['expmeta'] = expmeta

            if savedata:
                f.save(components)

        # Calculate other relevant parameters
        components['mean_filtered'] = filter_mean(
            components['mean'],
            filter_method=mean_filter_method,
            low_cutoff=low_cutoff)
        components['mean_filter_meta'] = {
            'mean_filter_method': mean_filter_method,
            'low_cutoff': low_cutoff
        }

        if savedata:
            f.save({
                'noise_components': components['noise_components'],
                'cutoff': components['cutoff'],
                'lag1': components['lag1'],
                'mean_filtered': components['mean_filtered'],
                'mean_filter_meta': components['mean_filter_meta'],
            })
            print('Saved all data to file:')
            f.print()

        return components
Пример #3
0
    help='hdf5 file to load with alternate map to rebuild timecourses')
ap.add_argument('--rotate',
                nargs=1,
                type=int,
                help='rotate movies before saving')
args = vars(ap.parse_args())

path = [path.name for path in args['input']][0]
print('Input file found:', path)

savepath = path.replace('.hdf5', '_')
savepath = savepath.replace('_reduced', '')

# Load relevant objects from processed file
# -----------------------------------------------
f = hdf5manager(path)
f.print()

if args['rotate'] is not None:
    rotate = args['rotate'][0]
else:
    rotate = 0

if ('domain_ROIs' not in f.keys()) or args['force']:

    if 'artifact_components' not in f.keys():
        if not args['force']:
            print('PCA gui must be run before parcellation analyses')
            raise KeyError('Artifact components not found!')
        else:
            print('Artifact components were not found.')
Пример #4
0
def filter_comparison(components,
                      downsample=4,
                      savepath=None,
                      filtered_path=None,
                      include_noise=True,
                      t_start=None,
                      t_stop=None,
                      apply_mean_filter=True,
                      n_rotations=0):
    '''
    Create a filter comparison movie, displaying the original movie, artifacts removed, and the filtered movie side by side.


    Arguments:
        components: 
            The ICA components returned by ica.project
        downsample:
            The factor to downsample by before writing the video
        savepath:
            The path to save the video at (mp4)
        filtered_path:
            The hdf5 path to save the filtered movie to. 
        include_noise:
            Whether noise components should be included in the filtered video
        t_start: 
            The frame to start rebuilding the movie at.  If none is provided, the rebuilt movie starts at the first frame
        t_stop: 
            The frame to stop rebuilding the movie at.  If none is provided, the rebuilt movie ends at the last frame
        filter_mean:
            Whether to filter the mean before readding
        n_rotations:
            The number of CCW rotations to apply before saving the video

    Returns:
        Nothing.
    '''
    print('\n-----------------------', '\nBuilding Filter Comparison Movies',
          '\n-----------------------')

    print('\nFiltered Movie\n-----------------------')
    filtered = rebuild(components,
                       include_noise=include_noise,
                       t_start=t_start,
                       t_stop=t_stop,
                       apply_mean_filter=apply_mean_filter)

    if filtered_path is not None:
        print('Saving filtered movie to:', filtered_path)
        f = hdf5manager(filtered_path)
        f.save({'filtered_movie': filtered})

    filtered = scale_video(filtered, downsample)
    filtered = rotate(filtered, n_rotations)

    print('\nArtifact Movie\n-----------------------')
    artifact_index = np.where(components['artifact_components'] == 1)[0]
    components['artifact_components'] = np.ones(
        components['artifact_components'].shape)
    components['artifact_components'][artifact_index] = 0
    if not include_noise:
        components['artifact_components'][np.where(
            components['noise_components'] == 1)] = 0
    artifact_movie = rebuild(components, t_start=t_start, t_stop=t_stop)
    print('rescaling video...')
    artifact_movie = scale_video(artifact_movie, downsample)
    artifact_movie = rotate(artifact_movie, n_rotations)

    print('\nOriginal Movie\n-----------------------')
    components['artifact_components'] = np.zeros(
        components['artifact_components'].shape)
    raw_movie = rebuild(components,
                        t_start=t_start,
                        t_stop=t_stop,
                        apply_mean_filter=apply_mean_filter)
    print('rescaling video...')
    raw_movie = scale_video(raw_movie, downsample)
    raw_movie = rotate(raw_movie, n_rotations)

    movies = np.concatenate((raw_movie, artifact_movie, filtered), axis=2)

    if 'roimask' in components.keys():
        roimask = components['roimask']
        overlay = (roimask == 0).astype('uint8')
        overlay = rotate(overlay, n_rotations)

        overlay = scale_video(overlay[None, :, :], downsample)[0]
        overlay = np.concatenate((overlay, overlay, overlay), axis=1)

    else:
        overlay = None

    print('overlay', overlay.shape)
    print('movies', movies.shape)

    save(movies,
         savepath,
         rescale_range=True,
         resize_factor=1 / 2,
         save_cbar=True,
         overlay=overlay)
Пример #5
0
def rebuild(components,
            artifact_components=None,
            t_start=None,
            t_stop=None,
            apply_mean_filter=True,
            filter_method='wavelet',
            include_noise=True):
    '''
    Rebuild original vector space based on a subset of principal 
    components of the data.  Eigenvectors to use are specified where 
    artifact_components == False.  Returns a matrix data_r, the reconstructed 
    vector projected back into its original dimensions.

    The filtered mean is *NOT* readded by this function.

    Arguments:
        components: 
            The components from ica_project.  artifact_components must be assigned to components before rebuilding, or passed in explicitly
        artifact_components:
            Overrides the artifact_components key in components, to rebuild all components except those specified
        t_start: 
            The frame to start rebuilding the movie at.  If none is provided, the rebuilt movie starts at the first frame
        t_stop: 
            The frame to stop rebuilding the movie at.  If none is provided, the rebuilt movie ends at the last frame
        apply_mean_filter:
            Whether to apply a filter to the mean signal.
        filter_method:;
            The filter method to apply (see filter_mean function).
        include_noise:
            Whether to include noise components when rebuilding.  If noise_components should not be included in the rebuilt movie, set this to False

    Returns:
        data_r: The ICA filtered video.
    '''
    print('\nRebuilding Data from Selected ICs\n-----------------------')

    if type(components) is str:
        f = hdf5manager(components)
        components = f.load()

    assert type(components) is dict, 'Components were not in format expected'

    eig_vec = components['eig_vec']
    roimask = components['roimask']
    shape = components['shape']
    mean = components['mean']
    n_components = components['n_components']
    dtype = np.float32

    t, x, y = shape
    l = eig_vec[:, 0].size

    if mean.ndim > 1:  # why is there sometimes an extra dimension added?
        mean = mean.flatten()

    if artifact_components is None:
        artifact_components = components['artifact_components']
    elif artifact_components == 'none':
        print('including all components')
        artifact_components = np.zeros(n_components)
    elif ((not include_noise) and ('noise_components' in components.keys())):
        print('Not rebuilding noise components')
        artifact_components += components['noise_components']
        artifact_components[np.where(artifact_components > 1)] = 1

    reconstruct_indices = np.where(artifact_components == 0)[0]

    if reconstruct_indices.size == 0:
        print('No indices were selected for reconstruction.')
        print('Returning empty matrix...')
        data_r = np.zeros((t, x, y), dtype='uint8')
        data_r = data_r[t_start:t_stop]
        return data_r

    n_components = reconstruct_indices.size

    # make sure vector extracted properly matches the roimask given
    if roimask is None:
        assert eig_vec[:, 0].size == x * y, (
            "Eigenvector size isn't compatible with the shape of the output "
            'matrix')
    else:
        maskind = np.where(roimask.flat == 1)
        assert eig_vec[:,0].size == maskind[0].size, \
        "Eigenvector size is not compatible with the masked region's size"

    eig_mix = components['eig_mix']

    if (t_start == None):
        t_start = 0

    if (t_stop == None):
        t_stop = eig_mix.shape[0]

    if (t_stop - t_start) is not shape[0]:
        shape = (t_stop - t_start, shape[1], shape[2])

    t = t_stop - t_start

    print('\nRebuilding ICA...')
    print('number of elements included:', n_components)
    print('eig_vec:', eig_vec.shape)
    print('eig_mix:', eig_mix.shape)

    print('\nReconstructing....')
    data_r = np.dot(eig_vec[:, reconstruct_indices],
                    eig_mix[t_start:t_stop, reconstruct_indices].T).T

    if apply_mean_filter:
        mean_filtered = filter_mean(mean, filter_method)
        data_r += mean_filtered[t_start:t_stop, None]

    else:
        print('Not filtering mean')
        mean_filtered = None
        data_r += mean[t_start:t_stop, None]

    print('Done!')

    if roimask is None:
        data_r = data_r.reshape(shape)
    else:
        reconstructed = np.zeros((x * y, t), dtype=dtype)
        reconstructed[maskind] = data_r.swapaxes(0, 1)
        reconstructed = reconstructed.swapaxes(0, 1)
        data_r = reconstructed.reshape(t, x, y)

    return data_r
Пример #6
0
def main():
    '''
    If called directly from command line, take argument passed, and try to read 
    contents if it's an .hdf5 file.
    '''

    print('\nHDF5 Manager\n-----------------------')

    ap = argparse.ArgumentParser()
    ap.add_argument('file',
                    type=argparse.FileType('r'),
                    nargs='+',
                    help='path to the hdf5 file(s)')
    ap.add_argument('-e',
                    '--extract',
                    type=str,
                    nargs='+',
                    help='key(s) to be extracted')
    ap.add_argument('-m',
                    '--merge',
                    type=argparse.FileType('r'),
                    help='merges keys in merge file into main file(s).')
    ap.add_argument('-c',
                    '--copy',
                    action='store_true',
                    help='make copy of hdf5 file')
    ap.add_argument('-d', '--delete', type=str, nargs='+', help='delete key')
    ap.add_argument('-r', '--rename', type=str, nargs=2, help='rename key')
    ap.add_argument('-i',
                    '--ignore',
                    type=str,
                    help='key to ignore while loading.  For use with copy.')
    ap.add_argument('--read',
                    type=str,
                    nargs='+',
                    help='key(s) to read to terminal.')

    args = vars(ap.parse_args())

    if len(args['file']) == 1:
        path = args['file'][0].name

        assert path.endswith('.hdf5'), 'Not a valid hdf5 file.\nExiting.\n'

        print('Found hdf5 file:', path)
        f = hdf5manager(path, verbose=True)
        # f.print()

        if args['extract'] is not None:
            print('extracting keys:', ', '.join(args['extract']), '\n')

            for key in args['extract']:
                assert key in f.keys(), '{0} was not a valid key!'.format(key)

            loaded = f.load(args['extract'])

            if type(loaded) is not dict:
                loaded = {args['extract'][0]: loaded}

            newpath = f.path.replace(
                '.hdf5', '_extract_{0}.hdf5'.format('-'.join(args['extract'])))

            print('new path:', newpath)

            hdf5manager(newpath).save(loaded)

        elif args['merge'] is not None:
            mergepath = args['merge'].name
            assert mergepath.endswith('.hdf5'), 'merge file was not valid'
            print('merging hdf5 file:', mergepath)

            mergedict = hdf5manager(mergepath).load()
            # print(mergedict)

            for key in mergedict.keys():
                print(key)

                if key in f.keys():
                    print(
                        'found in key, are you sure you want to merge? (y/n)')

                    loop = True

                    while loop:
                        response = input().lower().strip()
                        if (response == 'y') | (response == 'yes'):
                            loop = False
                            f.save({key: mergedict[key]})
                        elif (response == 'n') | (response == 'no'):
                            print('not saving', key)
                            loop = False
                        else:
                            print('invalid answer!')

                else:
                    print(key, 'not in main file.  No merge conflicts')
                    f.save({key: mergedict[key]})

        elif args['copy']:
            ignore = args['ignore']

            if ignore is not None:
                assert ignore in f.keys(), '{0} not a valid key!'.format(
                    ignore)

            data = f.load(ignore=ignore)

            newpath = f.path.replace('.hdf5', '_copy.hdf5')
            g = hdf5manager(newpath)
            g.save(data)

        elif args['delete']:

            f.delete(args['delete'])

            print('Note: deleting keys from hdf5 file may not free up space.')
            print('Make a copy with --copy command to free up space.')

        elif args['rename']:
            print('renaming', args['rename'][0], 'to', args['rename'][1], '\n')

            key = args['rename'][0]
            assert key in f.keys(), \
                    'key was not valid: ' + key
            f.verbose = False

            data = f.load(key)
            print('data loaded:', data)

            f.save({args['rename'][1]: data})

            f.open()
            try:
                del f.f[key]
            except:
                del f.f.attrs[key]
            f.close()
            f.print()

        elif args['read']:
            f.verbose = False
            for key in args['read']:
                if key in f.keys():
                    print('key found:', key)
                    print(key + ':', f.load(key))
                else:
                    print('key not found:', key)

        else:
            print('no additional commands found')

    elif len(args['file']) > 1:
        pathlist = [file.name for file in args['file']]

        print('\nFound multiple files:')
        [print('\t', path) for path in pathlist]
        print('')

        if args['extract'] is not None:
            print('extracting keys:', ', '.join(args['extract']), '\n')

            data = load_keys(pathlist, args['extract'])

            directory = os.path.dirname(pathlist[0])
            directory = os.path.abspath(directory)
            filename = 'hdf5extract-' + '-'.join(args['extract']) + '.hdf5'

            path = os.path.join(directory, filename)
            print('path', path)

            f = hdf5manager(path)
            f.save(data)

        elif args['merge'] is not None:
            print('testing!')

            mergepath = args['merge'].name
            assert mergepath.endswith('.hdf5'), 'merge file was not valid'
            print('merging hdf5 file:', mergepath)

            mergedict = hdf5manager(mergepath).load()
            key = list(mergedict.keys())[0]
            # get a random key, test if subdict structure or one key extracted

            if type(mergedict[key]) is dict:
                subdict = True
            else:
                subdict = False
                mergekey = os.path.basename(mergepath).replace(
                    '.hdf5', '').replace('hdf5extract-', '')

            for path in pathlist:
                name = os.path.basename(path).replace('.hdf5',
                                                      '').replace('_ica', '')
                print('Merging file:', name)

                if name in mergedict.keys():
                    f = hdf5manager(path, verbose=False)

                    if subdict:
                        conflict = []
                        for key in mergedict[name]:
                            if key in f.keys():
                                conflict.append(key)

                        if len(conflict) > 0:
                            print('replace old dict with new?')

                            print('\tOriginal:')
                            print('\t', f.load(conflict))
                            print('\n\tNew:')
                            print('\t', mergedict[name])
                            print('\n\treplace?')

                            loop = True
                            while loop:
                                response = input().lower().strip()
                                if (response == 'y') | (response == 'yes'):
                                    loop = False
                                    f.save(mergedict[name])
                                elif (response == 'n') | (response == 'no'):
                                    print('not saving')
                                    loop = False
                                else:
                                    print('invalid answer!')
                        else:
                            f.save(mergedict[name])

                    else:
                        if mergekey in f.keys():
                            print(mergekey, 'was in original file.')
                            print('Are you sure you want to replace this?')

                            print('\tOriginal:')
                            print('\t', f.load(mergekey))
                            print('\n\tNew:')
                            print('\t', mergedict[name])
                            print('\n\treplace?')

                            loop = True
                            while loop:
                                response = input().lower().strip()
                                if (response == 'y') | (response == 'yes'):
                                    loop = False
                                    f.save({mergekey: mergedict[name]})
                                elif (response == 'n') | (response == 'no'):
                                    print('not saving', key)
                                    loop = False
                                else:
                                    print('invalid answer!')
                        else:
                            f.save({mergekey: mergedict[name]})

                else:
                    print(name, 'not found in merge dictionary')
                    print('skipping...')

        elif args['read']:
            for path in pathlist:
                print('\n', path)
                f = hdf5manager(path, verbose=False)
                for key in args['read']:
                    if key in f.keys():
                        print('key found:', key)
                        print(key + ':', f.load(key))
                    else:
                        print('key not found:', key)

        else:
            print('Command not defined for multiple files.')

    else:
        print('No hdf5file found')