def prepare_model(series):
    # Convert series into supervised learning problem
    X = series.values
    supervised = timeseries_to_supervised(X, 1)
    print("*** Supervised Learning ***")
    print(supervised.head())

    # Convert time series to stationary
    differenced = difference(series, 1)
    print("*** Stationary Data Set ***")
    print(differenced.head())
    # invert transform
    inverted = list()
    for i in range(len(differenced)):
        value = inverse_difference(series, differenced[i], len(series) - i)
        inverted.append(value)
    inverted = pd.Series(inverted)
    print(inverted.head())

    # Scale time series
    scaler, scaled_X = scale(series)
    scaled_series = pd.Series(scaled_X[:, 0])
    print("*** Scaled Time Series ***")
    print(scaled_series.head())
    # invert transform
    inverted_X = scaler.inverse_transform(scaled_X)
    inverted_series = pd.Series(inverted_X[:, 0])
    print(inverted_series.head())
Пример #2
0
def get_org_img(img):
    scaled_img = scale(img, long_size=1280)
    scaled_img = Image.fromarray(scaled_img)
    scaled_img = scaled_img.convert('RGB')
    scaled_img = transforms.ToTensor()(scaled_img)
    scaled_img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])(scaled_img)
    return org_img
Пример #3
0
def _run(cache_path, clf_class, **best_params):

    if os.path.exists(cache_path):
        print("Cached")
        res = joblib.load(cache_path)
    else:
        res = {}

    change = False

    cpu = joblib.cpu_count()

    names = ["b261", "b277", "b278", "b360"]
    combs = list(it.combinations(names, 2))
    print(combs)

    data = dataset.load_tile_clf()

    for t0, t1 in combs:
        if (t0, t1) in res:
            print("!!! Skip", t0, t1)
            continue

        print(dt.datetime.now(), t0, t1)

        df = pd.concat([data[t0], data[t1]])

        unscaled, df, scl = dataset.scale(df)

        cls = {name: idx for idx, name in enumerate(df.tile.unique())}
        print(cls)
        df["cls"] = df.tile.apply(cls.get)

        X = df[dataset.FEATURES].values
        y = df.cls.values

        clf = clf_class(**best_params)
        sel = RFECV(clf, n_jobs=-1, cv=5)

        print("fit")
        sel.fit(X, y)

        print("storing")
        res[(t0, t1)] = {
            "selector": sel,
            "unscaled": unscaled,
            "scaled": df,
            "scl": scl
        }
        joblib.dump(res, cache_path, compress=3)

        print("-----------------------------")

    return res
def multiple_repeats(series):
    # transform data to be stationary
    raw_values = series.values
    diff_values = difference(raw_values, 1)
    # transform data to be supervised learning
    supervised = timeseries_to_supervised(diff_values, 1)
    supervised_values = supervised.values
    # split data into train and test-sets
    train, test = supervised_values[0:-12], supervised_values[-12:]
    # transform the scale of the data
    scaler, train_scaled, test_scaled = scale(train, test)

    # repeat experiment
    repeats = 30
    error_scores = []
    for r in range(repeats):
        # fit the model
        lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
        # forecast the entire training dataset to build up state for forecasting
        train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
        lstm_model.predict(train_reshaped, batch_size=1)
        # walk-forward validation on the test data
        predictions = []
        for i in range(len(test_scaled)):
            # make one-step forecast
            X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
            yhat = forecast_lstm(lstm_model, 1, X)
            # invert scaling
            yhat = invert_scale(scaler, X, yhat)
            # invert differencing
            yhat = inverse_difference(raw_values, yhat,
                                      len(test_scaled) + 1 - i)
            # store forecast
            predictions.append(yhat)
        # report performance
        rmse = sqrt(mse(raw_values[-12:], predictions))
        print('%d) Test RMSE: %.3f' % (r + 1, rmse))
        error_scores.append(rmse)

    # summarize results
    results = pd.DataFrame()
    results['rmse'] = error_scores
    print(results.describe())
    results.boxplot()
    plt.show()
def model(series):
    # transform data to be stationary
    raw_values = series.values
    diff_values = difference(raw_values, 1)
    # transform data to be supervised learning
    supervised = timeseries_to_supervised(diff_values, 1)
    supervised_values = supervised.values
    # split data into train and test-sets
    train, test = supervised_values[0:-12], supervised_values[-12:]
    # transform the scale of the data
    scaler, train_scaled, test_scaled = scale(train, test)

    # fit the model
    lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
    # forecast the entire training dataset to build up state for forecasting
    train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
    lstm_model.predict(train_reshaped, batch_size=1)

    # walk-forward validation on the test data
    predictions = []
    for i in range(len(test_scaled)):
        # make one-step forecast
        X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
        yhat = forecast_lstm(lstm_model, 1, X)
        # invert scaling
        yhat = invert_scale(scaler, X, yhat)
        # invert differencing
        yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
        # store forecast
        predictions.append(yhat)
        expected = raw_values[len(train) + i + 1]
        print('Month=%d, Predicted=%f, Expected=%f' % (i + 1, yhat, expected))

    # report performance
    rmse = sqrt(mse(raw_values[-12:], predictions))
    print('Test RMSE: %.3f' % rmse)
    # line plot of observed vs predicted
    plt.plot(raw_values[-12:])
    plt.plot(predictions)
    plt.show()