示例#1
0
 def fit(cls, space=dflt, max_evals=100, mongo=None):
     if mongo:
         trials = mongoexp.MongoTrials(mongo['url'], exp_key=mongo['exp'])
     else:
         trials = None
     best = hpo.fmin(
         cls.objective,
         space,
         algo=hpo.tpe.suggest,
         max_evals=max_evals,
         trials=trials
     )
     return best
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out


if __name__ == "__main__":
    trials = mongoexp.MongoTrials('mongo://*****:*****@mongodb.host:27017/jobs/jobs', exp_key='mnist_test')
    best_run, best_model = optim.minimize(model=create_model,
                                          data=data,
                                          algo=tpe.suggest,
                                          max_evals=10,
                                          trials=trials,
                                          keep_temp=True)  # this last bit is important
    print("Best performing model chosen hyper-parameters:")
    print(best_run)
示例#3
0
    outf.write("Evalutation of best performing model:\n")
    outf.write(str(best_eval) + "\n")
    #outf.write("Best performing model chosen hyper-parameters:\n")
    #outf.write(str(real_param_values) + "\n")
    outf.write("Best performing model chosen hyper-parameters index:\n")
    outf.write(str(best_run) + "\n")
    outf.write("Summary of best model:\n")
    best_model.summary(print_fn=lambda x: outf.write(x + '\n'))
 
if __name__=="__main__":
    #x_train,y_train,x_validate,y_validate,x_test,y_test=data_from_bed()
    x_train,y_train,x_validate,y_validate,x_test,y_test=data()
    #model = create_model0(x_train,y_train,x_validate,y_validate,x_test,y_test)

    print("seed=1234")
    trials = mongoexp.MongoTrials('mongo://localhost:12345/foo_db/jobs', exp_key='exp1')

    best_run,best_model=optim.minimize(model=create_model,
                                       data=data,
                                       algo=tpe.suggest,
                                       max_evals=100,
                                       trials=trials,
                                       rseed=1234,
                                       keep_temp=True)
    #from hyperas.utils import eval_hyperopt_space
    #space = get_space()
    #real_param_values = eval_hyperopt_space(space, best_run)

    best_eval = best_model.evaluate(x_test, y_test)

    outf=open('hyperas.results.txt','w')
示例#4
0
    score = model.evaluate(X_test_aug, y_test)

    out = {
        'loss': -score[2],
        'score': score[0],
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir() + '/' + next(
        tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out


if __name__ == "__main__":
    trials = mongoexp.MongoTrials('mongo://localhost:27017/jobs/jobs',
                                  exp_key='exp1')
    best_run, best_model = optim.minimize(
        model=create_model,
        data=data,
        algo=tpe.suggest,
        max_evals=10,
        trials=trials,
        keep_temp=True)  # this last bit is important
    print("Best performing model chosen hyper-parameters:")
    print(best_run)
            'pred_y': pred_y
        }, pkl_out)
    print("RMSE: {} in {}s".format(*metric_results.values()))

    return {
        'loss': metric_results['rmse'],
        'status': STATUS_OK,
        'model': params,
        'metrics': metric_results,
        'figure': Binary(bytes_out.getvalue()),
        'pred_data': Binary(pkl_out.getvalue())
    }


if __name__ == '__main__':
    import sys

    print(sys.argv)
    trials = mongoexp.MongoTrials(sys.argv[1], exp_key='htm_sm_115_proper')

    best_run, best_model = optim.minimize(model=create_model,
                                          data=data,
                                          algo=tpe.suggest,
                                          max_evals=200,
                                          keep_temp=True,
                                          trials=trials)
    print("Best performing model chosen hyper-parameters:")

    print(best_run)
    print(json_util.dumps(best_model, indent=4))
示例#6
0
        'status': STATUS_OK,
        'model': model._updated_config(),
        'metrics': {
            'rmse': rmse_result,
            # 'geh': geh(unscaled_pred, unscaled_test_y)[0],
            'duration': (finish - start).total_seconds()
        },
        'figure': fig_b64,
        'params': params
    }


if __name__ == '__main__':
    # trials = Trials()
    import sys
    trials = mongoexp.MongoTrials(sys.argv[1], exp_key='lstm_3_layer_115_2')

    best_run, best_model = optim.minimize(model=create_model,
                                          data=data,
                                          algo=tpe.suggest,
                                          max_evals=300,
                                          trials=trials)
    print(
        "BEST_RUN",
        json.dumps(
            {
                k: v
                for k, v in trials.best_trial['result'].items()
                if k not in ['model', 'figure']
            },
            indent=4))