def load_best_model(experiment_label, step): """ Here we load the blueprints generated during an experiment and create the Keras model from the top scoring blueprint """ blueprint = load_experiment_best_blueprint(experiment_label, step, Environment()) return ModelBuilder().build(blueprint, cpu_device(), compile_model=False)
def load_experiment_step_best_blueprint(experiment_label, step, environment=Environment()): blueprints = load_experiment_blueprints( experiment_label, step, environment) if len(blueprints) == 0: return None return list(sorted(blueprints, key=lambda b: -b.score[0]))[0]
def __init__(self, label, layout=None, training=None, batch_iterator=None, test_batch_iterator=None, environment=None, parameters=None, resume=False): self.label = label self.layout = layout self.training = training self.batch_iterator = batch_iterator self.test_batch_iterator = test_batch_iterator self.environment = environment or Environment() self.parameters = parameters or ExperimentParameters()
def load_experiment_best_blueprint(experiment_label, environment=Environment()): experiment = Experiment(experiment_label, environment=environment) last_step, _ = load_experiment_checkpoint(experiment) blueprints = list() for step in range(last_step): blueprint = load_experiment_step_best_blueprint( experiment_label, step, environment=environment) if blueprint: blueprints.append(blueprint) if len(blueprints) == 0: return None return list(sorted(blueprints, key=lambda b: -b.score[0]))[0]
def test_ga_search(self): with tempfile.TemporaryDirectory() as tmp_dir: epoch = 3 generations = 2 batch_size = 50 batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset( batch_size, 1000) layout = Layout(input_size=1000, output_size=nb_classes, output_activation='softmax') training = Training( objective=Objective('categorical_crossentropy'), optimizer=Optimizer(optimizer='Adam'), metric=Metric('categorical_accuracy'), stopping=EpochStoppingCondition(epoch), batch_size=batch_size) experiment_parameters = ExperimentParameters( use_default_values=False) experiment_parameters.layout_parameter('rows', 1) experiment_parameters.layout_parameter('blocks', 1) experiment_parameters.layout_parameter('layers', 1) experiment_parameters.layer_parameter('Dense.output_dim', int_param(10, 500)) experiment_parameters.all_search_parameters(True) experiment_label = 'test__reuters_experiment' experiment = Experiment(experiment_label, layout, training, batch_iterator, test_batch_iterator, CpuEnvironment(n_jobs=2, data_dir=tmp_dir), parameters=experiment_parameters) run_ga_search_experiment(experiment, population_size=2, generations=2) self.assertTrue(isfile(experiment.get_log_filename()), 'Should have logged') self.assertTrue(isfile(experiment.get_step_data_filename(0)), 'Should have logged') self.assertTrue(isfile(experiment.get_step_log_filename(0)), 'Should have logged') blueprints = load_experiment_blueprints( experiment_label, 0, Environment(data_dir=tmp_dir)) self.assertTrue( len(blueprints) > 0, 'Should have saved/loaded blueprints') model = ModelBuilder().build(blueprints[0], cpu_device()) disable_sysout() model.fit_generator( generator=batch_iterator, samples_per_epoch=batch_iterator.samples_per_epoch, nb_epoch=5, validation_data=test_batch_iterator, nb_val_samples=test_batch_iterator.sample_count) score = model.evaluate_generator( test_batch_iterator, val_samples=test_batch_iterator.sample_count) self.assertTrue(score[1] > 0, 'Should have valid score') step, population = load_experiment_checkpoint(experiment) self.assertEqual(generations - 1, step, 'Should have loaded checkpoint') self.assertIsNotNone(population, 'Should have loaded checkpoint') blueprint = load_experiment_best_blueprint( experiment.label, environment=CpuEnvironment(n_jobs=2, data_dir=tmp_dir)) model = ModelBuilder().build(blueprint, cpu_device(), compile_model=False) self.assertIsNotNone( model, 'Should have loaded and built best model from experiment')
def load_experiment_blueprints(experiment_label, step, environment=Environment()): experiment = Experiment(experiment_label, environment=environment) data_filename = experiment.get_step_data_filename(step) with open(data_filename, 'rb') as data_file: return pickle.load(data_file)