def test_dmlrunner_same_train_job_with_split_1( \ runner, mnist_filepath): split_job = make_split_job(mnist_filepath) split_job.hyperparams['split'] = 1 job_results = runner.run_job(split_job) session_filepath = job_results.results['session_filepath'] datapoint_count = job_results.results['datapoint_count'] initialize_job = make_initialize_job() initial_weights = runner.run_job(initialize_job).results['weights'] train_job = make_train_job( initial_weights, session_filepath, datapoint_count ) result = runner.run_job(train_job) assert result.status == 'successful' results = result.results new_weights = results['weights'] omega = results['omega'] train_stats = results['train_stats'] assert result.job.job_type is JobTypes.JOB_TRAIN.name assert type(new_weights) == list assert type(new_weights[0]) == np.ndarray assert type(omega) == int or type(omega) == float assert type(train_stats) == dict
def train_dmlresult_obj(runner, split_dmlresult_obj, init_dmlresult_obj, small_filepath): initial_weights = init_dmlresult_obj.results['weights'] session_filepath = split_dmlresult_obj.results['session_filepath'] datapoint_count = split_dmlresult_obj.results['datapoint_count'] train_job = make_train_job(initial_weights, session_filepath, datapoint_count) result = runner.run_job(train_job) return result
def train_dmlresult_obj(config_manager, split_dmlresult_obj, init_dmlresult_obj): runner = DMLRunner(config_manager) initial_weights = init_dmlresult_obj.results['weights'] session_filepath = split_dmlresult_obj.results['session_filepath'] datapoint_count = split_dmlresult_obj.results['datapoint_count'] train_job = make_train_job(make_model_json(), initial_weights, make_hyperparams(split=1), session_filepath, datapoint_count) result = runner.run_job(train_job) return result
def test_dmlrunner_same_train_job_with_split_1( \ config_manager, mnist_filepath): model_json = make_model_json() hyperparams = make_hyperparams(split=1) runner = DMLRunner(config_manager) initialize_job = make_initialize_job(model_json) initial_weights = runner.run_job(initialize_job).results['weights'] split_job = make_split_job(model_json, mnist_filepath) job_results = runner.run_job(split_job) session_filepath = job_results.results['session_filepath'] datapoint_count = job_results.results['datapoint_count'] train_job = make_train_job(model_json, initial_weights, hyperparams, session_filepath, datapoint_count) result = runner.run_job(train_job) assert result.status == 'successful' results = result.results new_weights = results['weights'] omega = results['omega'] train_stats = results['train_stats'] assert result.job.job_type is JobTypes.JOB_TRAIN.name assert type(new_weights) == list assert type(new_weights[0]) == np.ndarray assert type(omega) == int or type(omega) == float assert type(train_stats) == dict