def test_setup_env():
    arguments = [spike_times_file_name,
                 '--settings-file', 'tests/settings/test_utils.yaml']

    task, spike_times, spike_times_optimization, spike_times_validation, \
        analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
        analysis_num, settings = parse_arguments(arguments,
                                                 defined_tasks,
                                                 defined_estimation_methods)

    estimator_env.spike_times = spike_times
    estimator_env.settings = settings
Exemple #2
0
def test_argument_parser():
    arguments = [
        spike_times_file_name, '--settings-file',
        'tests/settings/test_utils.yaml'
    ]

    task, spike_times, spike_times_optimization, spike_times_validation, \
        analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
        analysis_num, settings = parse_arguments(arguments,
                                                 defined_tasks,
                                                 defined_estimation_methods)

    assert task == "full-analysis"
    assert np.isclose(spike_times, estimator_env.spike_times).all()
    assert utl.get_hash(spike_times) == utl.get_hash(estimator_env.spike_times)
    assert type(analysis_file) == h5py.File
    for f in [csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file]:
        p = Path(f.name)
        assert p.is_file()

    assert settings["embedding_past_range_set"] == exp.embedding_past_range_set
    assert settings[
        "embedding_number_of_bins_set"] == exp.embedding_number_of_bins_set
    assert settings[
        "embedding_scaling_exponent_set"] == exp.embedding_scaling_exponent_set
    assert settings["estimation_method"] == exp.estimation_method

    estimator_env.spike_times_optimization = spike_times_optimization
    estimator_env.spike_times_validation = spike_times_validation
    estimator_env.analysis_file = analysis_file
    estimator_env.csv_stats_file = csv_stats_file
    estimator_env.csv_histdep_data_file = csv_histdep_data_file
    estimator_env.csv_auto_MI_data_file = csv_auto_MI_data_file
    estimator_env.analysis_num = analysis_num
    estimator_env.settings = settings

    # assert error catching
    # no arguments
    arguments = []

    with pytest.raises(SystemExit) as pytest_e:
        task, spike_times, spike_times_optimization, spike_times_validation, \
            analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
            analysis_num, settings = parse_arguments(arguments,
                                                     defined_tasks,
                                                     defined_estimation_methods)
    assert pytest_e.type == SystemExit
    assert pytest_e.value.code == 2  # might want to remove this line

    # wrong task (non-existing spike times file)
    arguments = ['tests/asdfg.dat']

    with pytest.raises(SystemExit) as pytest_e:
        task, spike_times, spike_times_optimization, spike_times_validation, \
            analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
            analysis_num, settings = parse_arguments(arguments,
                                                     defined_tasks,
                                                     defined_estimation_methods)
    assert pytest_e.type == SystemExit
    assert pytest_e.value.code == EXIT_FAILURE

    # wrong task (too short -> not unique, could be confidence-intervals or csv-files)
    arguments = [
        spike_times_file_name, '-t', 'c', '--settings-file',
        'tests/settings/test_utils.yaml'
    ]

    with pytest.raises(SystemExit) as pytest_e:
        task, spike_times, spike_times_optimization, spike_times_validation, \
            analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
            analysis_num, settings = parse_arguments(arguments,
                                                     defined_tasks,
                                                     defined_estimation_methods)
    assert pytest_e.type == SystemExit
    assert pytest_e.value.code == EXIT_FAILURE

    # max number of bins d too large, has to be < 63
    arguments = [
        spike_times_file_name, '--settings-file',
        'tests/settings/test_max_num_bins.yaml'
    ]

    with pytest.raises(SystemExit) as pytest_e:
        task, spike_times, spike_times_optimization, spike_times_validation, \
            analysis_file, csv_stats_file, csv_histdep_data_file, csv_auto_MI_data_file, \
            analysis_num, settings = parse_arguments(arguments,
                                                     defined_tasks,
                                                     defined_estimation_methods)
    assert pytest_e.type == SystemExit
    assert pytest_e.value.code == EXIT_FAILURE