Beispiel #1
0
def test_should_return_user_run_selection(mocker):
    from estimator.training.integration.test_integration_training import FakeExperimentLauncher
    model = FakeModel()
    launcher = FakeExperimentLauncher(
        [FakeModel(), FakeModel(), model,
         FakeModel()])
    mocker.patch('builtins.input', return_value='2')
    assert utils.user_run_selection(launcher).model == model
Beispiel #2
0
def test_should_provide_single_run_data_for_experiment_launcher(mocker):
    launcher = FakeExperimentLauncher([FakeModel(), FakeModel()])
    run_data = gen.run_data()
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)
    mocker.patch('src.utils.utils.user_run_selection', return_value=run_data)

    result = providing_launcher.provide_single_run_data()
    assert result == run_data
Beispiel #3
0
def test_should_train_with_each_model(mocker):
    cnn_model_uno = FakeModel()
    cnn_model_dos = FakeModel()
    launcher = FakeExperimentLauncher([cnn_model_uno, cnn_model_dos])

    before_run_mock, in_memory_training_mock = prepare_mocks(launcher, mocker)
    import sys

    training.main(sys.argv)

    assert before_run_mock.call_count == 2
    assert in_memory_training_mock.call_count == 2
    model_arguments = get_model_called_with(before_run_mock)
    assert_that(model_arguments, contains(*[x for x in launcher.runs_data]))
Beispiel #4
0
def test_should_log_when_flag_not_defined_during_training(mocker):
    launcher = DefaultLauncher([(FakeModel())])
    mocker.patch('src.utils.image_summaries.create_pair_summaries')
    mocker.patch('src.estimator.training.training.train')
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)

    undefined_commandline_flag = ("foo", 34)
    defined_commandline_flag = (consts.BATCH_SIZE, 42)
    pass_cli_arg([undefined_commandline_flag, defined_commandline_flag])

    testing_utils.testing_helpers.run_app()

    log = list(
        filenames.get_run_text_logs_dir(
            launcher.runs_data[0]).iterdir())[0].read_text()
    lines = tuple(
        open(
            str(
                list(
                    filenames.get_run_text_logs_dir(
                        launcher.runs_data[0]).iterdir())[0]), 'r'))
    line_in_question = [
        x for x in lines if 'Undefined commandline flags' in x
    ][0]
    assert "--foo=34" in line_in_question
Beispiel #5
0
def test_should_provide_single_run_data_for_default_launcher(mocker):
    model = FakeModel()
    launcher = DefaultLauncher([model])
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)
    result = providing_launcher.provide_single_run_data()
    assert result == launcher.runs_data[0]
    assert result.model == model
Beispiel #6
0
def test_should_create_correct_text_log_name(mocker, run_dir_mock):
    mocker.patch('time.strftime', return_value=mocked_strftime)
    summary = "summary"
    get_run_summary_mock = mocker.patch('src.utils.utils.get_run_summary',
                                        return_value=summary)

    model = FakeModel()
    dir_name = filenames.create_text_log_name(model)
    assert_that(str(dir_name),
                ends_with(summary + '_' + mocked_strftime + '.log'))
    get_run_summary_mock.assert_called_once_with(model)
def test_create_pair_summaries(patched_dataset_reading):
    provider = patched_dataset_reading.param
    run_data = gen.run_data(model=FakeModel(data_provider=provider()))
    dir_with_pair_summaries = filenames.get_run_logs_data_dir(
        run_data) / 'features'
    assert utils.check_filepath(dir_with_pair_summaries, exists=False)

    image_summaries.create_pair_summaries(run_data)

    assert utils.check_filepath(dir_with_pair_summaries,
                                is_directory=True,
                                is_empty=False)
    assert len(list(dir_with_pair_summaries.iterdir())) == 1
Beispiel #8
0
def test_training_in_memory_only(mocker):
    launcher = DefaultLauncher([FakeModel()])
    before_run_mock, in_memory_training_mock = prepare_mocks(launcher, mocker)

    distributed_training_mock = mocker.patch(
        'src.estimator.training.training.distributed_train_eval',
        autospec=True)
    import sys

    training.main(sys.argv)

    before_run_mock.assert_called_once()
    in_memory_training_mock.assert_called_once()
    distributed_training_mock.assert_not_called()
Beispiel #9
0
def test_should_create_run_dir_for_default_launcher_ignoring_global_suffix(
        mocker, patched_params):
    launcher = DefaultLauncher([FakeModel()])
    run_data: RunData = launcher.runs_data[0]

    summary = "run_summary"
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)
    get_run_summary_mock = mocker.patch('src.utils.utils.get_run_summary',
                                        return_value=summary)

    dir_name = filenames.get_run_dir(run_data)

    assert_that(
        str(dir_name),
        ends_with('/tf/runs/{}/{}/{}'.format(run_data.runs_directory_name,
                                             run_data.launcher_name, summary)))
    get_run_summary_mock.assert_called_once_with(run_data.model)
Beispiel #10
0
def test_should_create_run_dir_for_experiment_launcher(mocker,
                                                       patched_global_suffix):
    launcher = FakeExperimentLauncher([FakeModel()])
    run_data: RunData = launcher.runs_data[0]

    summary = "run_summary"
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)
    get_run_summary_mock = mocker.patch('src.utils.utils.get_run_summary',
                                        return_value=summary)

    dir_name = filenames.get_run_dir(run_data)

    assert_that(
        str(dir_name),
        ends_with('/tf/runs/{}/{}/{}'.format(
            run_data.runs_directory_name, run_data.launcher_name +
            (('_' + patched_global_suffix)
             if patched_global_suffix is not None else ""), summary)))
    get_run_summary_mock.assert_called_once_with(run_data.model)
Beispiel #11
0
def test_should_create_correct_run_summary(suffix, patched_global_suffix,
                                           patched_excluded):
    model = FakeModel()

    dir_name = utils.get_run_summary(model)
    assert dir_name.endswith(suffix)
Beispiel #12
0
        inference.single_run_inference(run_data=run_data, show=False)


def test_should_not_throw_if_model_has_more_checkpoints():
    run_data = gen.run_data(with_model_dir=True)

    model_dir = filenames.get_run_logs_data_dir(run_data)
    empty_checkpoints = [(model_dir / ("model.ckpt-{}.foobar".format(x)))
                         for x in range(2)]
    [f.write_text("this is sparta") for f in empty_checkpoints]
    inference.single_run_inference(run_data=run_data, show=False)


@pytest.mark.integration
@pytest.mark.parametrize('launcher', [
    DefaultLauncher([FakeModel()]),
    FakeExperimentLauncher([FakeModel(), FakeModel()]),
])
@pytest.mark.parametrize('patched_params',
                         [{
                             consts.IS_INFER_CHECKPOINT_OBLIGATORY: False
                         }],
                         indirect=True)
def test_should_run_inference_for_different_launchers(mocker, launcher,
                                                      patched_params):
    mocker.patch('src.estimator.launcher.providing_launcher.provide_launcher',
                 return_value=launcher)
    result = mocker.patch('builtins.input', return_value='0')
    inference.infer()
    if launcher.is_experiment:
        result.assert_called_once()
Beispiel #13
0
def test_should_pass_model_dir_to_estimator():
    model = FakeModel()
    run_data = gen.run_data(model)
    estimator = training.create_estimator(run_data)
    model_dir = estimator.params[consts.MODEL_DIR]
    assert model_dir == str(filenames.get_run_logs_data_dir(run_data))