def test_decode_continuous_from_file(): """Acceptance test of continuous image-based decoding with str input. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') continuous_file = join(get_test_data_path(), 'continuous.nii.gz') model = Model.load(model_file) decoded_df, _ = decode_continuous(model, continuous_file) assert decoded_df.shape[0] == model.n_word_labels
def test_decode_roi_from_file(): """Acceptance test of ROI-based decoding with str input. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') roi_file = join(get_test_data_path(), 'roi.nii.gz') model = Model.load(model_file) decoded_df, _ = decode_roi(model, roi_file) assert decoded_df.shape[0] == model.n_word_labels
def test_decode_roi_with_priors(): """Acceptance test of ROI-based decoding with topic priors. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') roi_file = join(get_test_data_path(), 'roi.nii.gz') model = Model.load(model_file) _, priors = decode_roi(model, roi_file) decoded_df, _ = decode_roi(model, roi_file, topic_priors=priors) assert decoded_df.shape[0] == model.n_word_labels
def test_decode_continuous_with_priors(): """Acceptance test of continuous image-based decoding with topic priors. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') continuous_file = join(get_test_data_path(), 'continuous.nii.gz') model = Model.load(model_file) _, priors = decode_continuous(model, continuous_file) decoded_df, _ = decode_continuous(model, continuous_file, topic_priors=priors) assert decoded_df.shape[0] == model.n_word_labels
def test_save_model2(): """Test gclda.model.Model.save with gzipped file. """ model_file = join(get_test_data_path(), 'gclda_model.pklz') temp_file = join(get_test_data_path(), 'temp.pklz') model = Model.load(model_file) model.save(temp_file) file_found = isfile(temp_file) assert file_found # Perform cleanup remove(temp_file)
def test_save_dataset2(): """Test gclda.dataset.Dataset.save with gzipped file. """ dataset_file = join(get_test_data_path(), 'gclda_dataset.pklz') temp_file = join(get_test_data_path(), 'temp.pklz') dset = Dataset.load(dataset_file) dset.save(temp_file) file_found = isfile(temp_file) assert file_found # Perform cleanup remove(temp_file)
def test_save_topic_figures(): """Writes out images for topics. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') temp_dir = join(get_test_data_path(), 'temp') model = Model.load(model_file) model.save_topic_figures(temp_dir, n_top_words=5) figures = glob(join(temp_dir, '*.png')) assert len(figures) == model.n_topics # Perform cleanup rmtree(temp_dir)
def test_save_model_params(): """Ensure appropriate files are created. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') temp_dir = join(get_test_data_path(), 'temp') model = Model.load(model_file) model.save_model_params(temp_dir, n_top_words=2) files_found = [ isfile(join(temp_dir, 'Topic_X_Word_Probs.csv')), isfile(join(temp_dir, 'Topic_X_Word_CountMatrix.csv')), isfile(join(temp_dir, 'ActivationAssignments.csv')) ] assert all(files_found) # Perform cleanup rmtree(temp_dir)
def test_run_iteration(): """Test functions needed to run each iteration. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') model = Model.load(model_file) initial_iter = model.iter model.run_complete_iteration() assert model.iter == initial_iter + 1
def test_encode_from_list(): """Acceptance test of test-to-image encoding with list input. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') text = ['anterior', 'insula', 'was', 'analyzed'] model = Model.load(model_file) encoded_img, _ = encode(model, text) assert encoded_img.shape == model.dataset.mask_img.shape
def test_encode_with_priors(): """Acceptance test of test-to-image encoding. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') text = 'anterior insula was analyzed' model = Model.load(model_file) _, priors = encode(model, text) encoded_img, _ = encode(model, text, topic_priors=priors) assert encoded_img.shape == model.dataset.mask_img.shape
def test_display_model_summary(): """Prints model information to the console. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') model = Model.load(model_file) captured_output = StringIO() # Create StringIO object sys.stdout = captured_output # and redirect stdout. model.display_model_summary() # Call unchanged function. sys.stdout = sys.__stdout__ # Reset redirect. assert len(captured_output.getvalue()) > 0
def test_view_peak_indices(): """Prints dataset information to the console. """ dataset_file = join(get_test_data_path(), 'gclda_dataset.pkl') dset = Dataset.load(dataset_file) captured_output = StringIO() # Create StringIO object sys.stdout = captured_output # and redirect stdout. dset.view_peak_indices(n_peak_indices=5) # Call unchanged function. sys.stdout = sys.__stdout__ # Reset redirect. assert len(captured_output.getvalue()) > 0
def test_plot_brain(): """Ensure that utils.plot_brain returns a matplotlib figure. """ from gclda.utils import plot_brain ns_dset = get_test_dataset() underlay = ns_dset.masker.volume test_file = join(get_test_data_path(), 'continuous.nii.gz') test_data = nib.load(test_file).get_data() fig = plot_brain(test_data, underlay) assert isinstance(fig, matplotlib.figure.Figure)
def test_import_from_email(): """Ensure that Dataset files can be generated using email. """ from gclda.dataset import import_neurosynth email = '*****@*****.**' ns_dset_file = join(get_test_data_path(), 'neurosynth_dataset.pkl') temp_dir = join(get_test_data_path(), 'temp') ns_dset = neurosynth.Dataset.load(ns_dset_file) import_neurosynth(ns_dset, 'temp', out_dir=get_test_data_path(), email=email) files_found = [ isfile(join(temp_dir, 'pmids.txt')), isfile(join(temp_dir, 'peak_indices.txt')), isfile(join(temp_dir, 'word_labels.txt')), isfile(join(temp_dir, 'word_indices.txt')) ] assert all(files_found) # Perform cleanup rmtree(temp_dir)
def test_import_from_abstracts(): """Ensure that Dataset files can be generated using abstracts file. """ from gclda.dataset import import_neurosynth abstracts_file = join(get_test_data_path(), 'abstracts.csv') ns_dset_file = join(get_test_data_path(), 'neurosynth_dataset.pkl') temp_dir = join(get_test_data_path(), 'temp') ns_dset = neurosynth.Dataset.load(ns_dset_file) import_neurosynth(ns_dset, temp_dir, out_dir=get_test_data_path(), abstracts_file=abstracts_file) files_found = [ isfile(join(temp_dir, 'pmids.txt')), isfile(join(temp_dir, 'peak_indices.txt')), isfile(join(temp_dir, 'word_labels.txt')), isfile(join(temp_dir, 'word_indices.txt')) ] assert all(files_found) # Perform cleanup rmtree(temp_dir)
def test_init(): """Smoke test for Model class. """ dataset_file = join(get_test_data_path(), 'gclda_dataset.pkl') dset = Dataset.load(dataset_file) model = Model(dset, n_topics=50, n_regions=1, symmetric=False, alpha=.1, beta=.01, gamma=.01, delta=1., dobs=25, roi_size=10., seed_init=1) assert isinstance(model, Model)
def test_symmetric(): """Test running a model with symmetric ROIs. """ dataset_file = join(get_test_data_path(), 'gclda_dataset.pkl') dset = Dataset.load(dataset_file) model = Model(dset, n_topics=50, n_regions=2, symmetric=True, alpha=.1, beta=.01, gamma=.01, delta=1., dobs=25, roi_size=10., seed_init=1) initial_iter = model.iter model.run_complete_iteration() assert model.iter == initial_iter + 1
def test_load_model(): """Test gclda.model.Model.load. """ model_file = join(get_test_data_path(), 'gclda_model.pkl') model = Model.load(model_file) assert isinstance(model, Model)
def test_load_dataset2(): """Test gclda.dataset.Dataset.load with gzipped file. """ dataset_file = join(get_test_data_path(), 'gclda_dataset.pklz') dset = Dataset.load(dataset_file) assert isinstance(dset, Dataset)
def test_init(): """Smoke test for Dataset class. """ dataset_dir = get_test_data_path() dset = Dataset('dataset_files', dataset_dir) assert isinstance(dset, Dataset)