Exemple #1
0
def test_prepare_fcma_data():
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
    expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
    assert len(raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    for idx in range(len(raw_data)):
        assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
            'raw data do not match in test_prepare_fcma_data'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_fcma_data'
    from brainiak.fcma.preprocessing import RandomType
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images,
                                              conditions,
                                              mask,
                                              random=RandomType.REPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images,
                                              conditions,
                                              mask,
                                              random=RandomType.UNREPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
def test_prepare_fcma_data():
    images = io.load_images_from_dir(data_dir, suffix)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
    expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
    assert len(raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    for idx in range(len(raw_data)):
        assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
            'raw data do not match in test_prepare_fcma_data'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_fcma_data'
def test_prepare_fcma_data():
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
    expected_raw_data = np.load(expected_dir / 'expected_raw_data.npy')
    assert len(raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    for idx in range(len(raw_data)):
        assert np.allclose(raw_data[idx], expected_raw_data[idx]), \
            'raw data do not match in test_prepare_fcma_data'
    assert np.array_equal(labels, expected_labels), \
        'the labels do not match in test_prepare_fcma_data'
    from brainiak.fcma.preprocessing import RandomType
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
                                              random=RandomType.REPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    random_raw_data, _, _ = prepare_fcma_data(images, conditions, mask,
                                              random=RandomType.UNREPRODUCIBLE)
    assert len(random_raw_data) == len(expected_raw_data), \
        'numbers of epochs do not match in test_prepare_fcma_data'
Exemple #4
0
logger = logging.getLogger(__name__)
"""
example running command in run_voxel_selection.sh
"""
if __name__ == '__main__':
    if MPI.COMM_WORLD.Get_rank() == 0:
        logger.info('programming starts in %d process(es)' %
                    MPI.COMM_WORLD.Get_size())
    data_dir = sys.argv[1]
    suffix = sys.argv[2]
    mask_file = sys.argv[3]
    epoch_file = sys.argv[4]
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)

    # setting the random argument produces random voxel selection results
    # for non-parametric statistical analysis.
    # There are three random options:
    # RandomType.NORANDOM is the default
    # RandomType.REPRODUCIBLE permutes the voxels in the same way every run
    # RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
    # example:
    # from brainiak.fcma.preprocessing import RandomType
    # raw_data, _, labels = prepare_fcma_data(images, conditions, mask,
    #                                         random=RandomType.REPRODUCIBLE)

    # if providing two masks, just append the second mask as the last input argument
    # and specify raw_data2
    # example:
        logger.error('the number of input argument is not correct')
        sys.exit(1)

    data_dir = sys.argv[1]
    extension = sys.argv[2]
    mask_file = sys.argv[3]
    epoch_file = sys.argv[4]

    epoch_list = np.load(epoch_file)
    num_subjects = len(epoch_list)
    num_epochs_per_subj = epoch_list[0].shape[1]

    images = io.load_images_from_dir(data_dir, extension)
    mask = io.load_boolean_mask(mask_file)
    conditions = io.load_labels(epoch_file)
    raw_data, _, labels = prepare_fcma_data(images, conditions, mask)

    example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj)

    example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj)

    example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj)

    # test of two different components for correlation computation
    # images = io.load_images_from_dir(data_dir, extension)
    # mask2 = io.load_boolean_mask('face_scene/visual_top_mask.nii.gz')
    # raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
    #                                                 mask2)
    #example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
    #example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
Exemple #6
0
        logger.info(
            'Testing for participant %d.\nProgramming starts in %d process(es)' %
            (int(left_out_subj), MPI.COMM_WORLD.Get_size())
        )
    
    # Load in the volumes, mask and labels
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    top_n_mask = io.load_boolean_mask(top_n_mask_file)
    epoch_list = io.load_labels(epoch_file)

    # Parse the epoch data for useful dimensions   
    epochs_per_subj = epochs_per_subj = epoch_list[0].shape[1]
    num_subjs = len(epoch_list)
    
    # Prepare the data
    int_data, _, labels = prepare_fcma_data(images, epoch_list, top_n_mask)
    
    # What indexes pick out the left out participant?
    start_idx = int(int(left_out_subj) * epochs_per_subj)
    end_idx = int(start_idx + epochs_per_subj)
    
    # Take out the idxs corresponding to all participants but this one
    training_idx = list(set(range(len(labels))) - set(range(start_idx, end_idx)))
    testing_idx = list(range(start_idx, end_idx))
    
    # Pull out the data
    int_data_training = [int_data[i] for i in training_idx]
    int_data_testing = [int_data[i] for i in testing_idx]
    
    # Pull out the labels
    labels_training = [labels[i] for i in training_idx]
    if MPI.COMM_WORLD.Get_rank() == 0:
        logger.info(
            'Testing for participant %d.\nProgramming starts in %d process(es)'
            % (int(left_out_subj), MPI.COMM_WORLD.Get_size()))

    # Load in the volumes, mask and labels
    images = io.load_images_from_dir(data_dir, suffix=suffix)
    mask = io.load_boolean_mask(mask_file)
    epoch_list = io.load_labels(epoch_file)

    # Parse the epoch data for useful dimensions
    epochs_per_subj = epoch_list[0].shape[1]
    num_subjs = len(epoch_list)

    # Preprocess the data and prepare for FCMA
    raw_data, _, labels = prepare_fcma_data(images, epoch_list, mask)

    # enforce left one out
    file_str = output_dir + '/fc_no' + str(left_out_subj) + '_'
    start_idx = int(int(left_out_subj) * epochs_per_subj)
    end_idx = int(start_idx + epochs_per_subj)

    # Take out the idxs corresponding to all participants but this one
    subsampled_idx = list(
        set(range(len(labels))) - set(range(start_idx, end_idx)))
    labels_subsampled = [labels[i] for i in subsampled_idx]
    raw_data_subsampled = [raw_data[i] for i in subsampled_idx]

    # Set up the voxel selection object for fcma
    vs = VoxelSelector(labels_subsampled, epochs_per_subj, num_subjs - 1,
                       raw_data_subsampled)