def evaluate(config, output):
    """
    """

    m = load_model(get_model(config))

    data_set = get_input_data(config)
    x = load_data(data_set, 'data_x')
    y = load_data(data_set, 'data_y') if 'data_y' in data_set else None

    assert 'scoring' in config, 'Missing scoring'
    scoring = config['scoring']

    if y is None:
        results = dict([(s, get_scorer(s)(m, x)) for s in scoring])
    else:
        results = dict([(s, get_scorer(s)(m, x, y)) for s in scoring])

    if 'errors_acf' in config:
        errs = y - m.predict(x)
        acf_params = config['errors_acf']
        acfs, conf_lvl = acf(errs, **acf_params)
        results['errors_acf'] = {
            'acfs': acfs,
            'conf_lvl': conf_lvl
        }

    save_json(results, os.path.join(output, m.name+'_eval.json'))
Exemple #2
0
def create_cv_inputs(model, dt):
    """
    5-fold sliding window CV

    window = 0.8
    horizon = 0.2
    """

    n_folds = 5
    train_y = load_npy('data/%s_train_y.npy' % dt)
    train_len = len(train_y)

    inp = {
        "model": "results/optimize/%s_%s.json" % (model, dt),
        "cv": {
            "method": "time_series",
            "params": calc_cv_params(train_len, n_folds),
            "max_threads": n_folds,
            "scoring": ["r2", "rmse", "mse", "mae"]
        },
        'data_set': {
            "data_x": {
                "path": "data/%s_train_x.npy" % dt,
            },
            "data_y": {
                "path": "data/%s_train_y.npy" % dt,
            }
        }
    }
    save_json(inp, 'inputs/%s_%s_cv.json' % (model, dt))
Exemple #3
0
def create_opt_inputs(model, dt):
    inp = {
        "hp_space": load_json('models/%s_%s.json' % (model, dt)),
        "opt": {
            "method": {
                "class": "cmaes",
                "params": opt_params[dt]
            },
            "obj_fn": {
                "cv": {
                    "method": "split",
                    "params": {
                        "test_size": 0.2
                    },
                    "scoring": "mse"
                }
            },
            "max_threads": 24
        },
        'data_set': {
            "data_x": {
                "path": "data/%s_train_x.npy" % dt,
            },
            "data_y": {
                "path": "data/%s_train_y.npy" % dt,
            }
        }
    }
    save_json(inp, 'inputs/%s_%s_optimize.json' % (model, dt))
Exemple #4
0
def create_pred_inputs(model, dt):
    inp = {
        "model": "results/fit/%s_%s.json" % (model, dt),
        'data_set': {
            "data_x": {
                "path": "data/%s_test_x.npy" % dt,
            }
        }
    }
    save_json(inp, 'inputs/%s_%s_predict.json' % (model, dt))
Exemple #5
0
def create_fit_inputs(model, dt):
    inp = {
        'model': "results/optimize/%s_%s.json" % (model, dt),
        'data_set': {
            'train_x': {
                'path': "data/%s_train_x.npy" % dt,
            },
            'train_y': {
                'path': "data/%s_train_y.npy" % dt,
            }
        }
    }
    save_json(inp, 'inputs/%s_%s_fit.json' % (model, dt))
def score(config, output):
    """
    """

    m = load_model(get_model(config))

    data_set = get_input_data(config)
    x = load_data(data_set, 'data_x')
    y = load_data(data_set, 'data_y') if 'data_y' in data_set else None

    if y is None:
        s = m.score(x)
    else:
        s = m.score(x, y)

    result = {m.get_loss_func(): s}

    save_json(result, os.path.join(output, m.name+'_score.json'))
Exemple #7
0
def create_eval_inputs(model, dt):
    inp = {
        "model": "results/fit/%s_%s.json" % (model, dt),
        'data_set': {
            "data_x": {
                "path": "data/%s_test_x.npy" % dt,
            },
            "data_y": {
                "path": "data/%s_test_y.npy" % dt,
            }
        },
        'scoring': ['rmse', 'mse', 'mae', 'r2'],
        'errors_acf': {
            "nlags": 30
        }
    }

    save_json(inp, 'inputs/%s_%s_eval.json' % (model, dt))