Exemple #1
0
        for lm, dataset in [
            (lm.LinearRegression, datasets.TrumpApproval()),
            (lm.LogisticRegression, datasets.Bananas())
        ]
        for optimizer, initializer in itertools.product(
            [
                optim.AdaBound(),
                optim.AdaDelta(),
                optim.AdaGrad(),
                optim.AdaMax(),
                optim.Adam(),
                optim.AMSGrad(),
                # TODO: check momentum optimizers
                # optim.Momentum(),
                # optim.NesterovMomentum(),
                optim.RMSProp(),
                optim.SGD()
            ],
            [
                optim.initializers.Zeros(),
                optim.initializers.Normal(mu=0, sigma=1, seed=42)
            ]
        )
    ]
)
@pytest.mark.slow
def test_finite_differences(lm, dataset):
    """Checks the gradient of a linear model via finite differences.
    References
    ----------
    [^1]: [How to test gradient implementations](https://timvieira.github.io/blog/post/2017/04/21/how-to-test-gradient-implementations/)
Exemple #2
0
FTRL_l2 = config['FTRL_l2']

if (opt == "AdaBound"):
    optimizer = optim.AdaBound(lr, beta_1, beta_2, eps, gamma, final_lr)
elif (opt == "AdaDelta"):
    optimizer = optim.AdaDelta(rho, eps)
elif (opt == "AdaGrad"):
    optimizer = optim.AdaGrad(lr, eps)
elif (opt == "Adam"):
    optimizer = optim.Adam(lr, beta_1, beta_2, eps)
elif (opt == "FTRLProximal"):
    optimizer = optim.FTRLProximal(alpha, beta, l1, l2)
elif (opt == "Momentum"):
    optimizer = optim.Momentum(lr, rho)
elif (opt == "RMSProp"):
    optimizer = optim.RMSProp(lr, rho, eps)
elif (opt == "VanillaSGD"):
    optimizer = optim.VanillaSGD(lr)
elif (opt == "NesterovMomentum"):
    optimizer = optim.NesterovMomentum(lr, rho)
else:
    optimizer = None

output = {}

while True:

    #wait request
    data = input()

    if (init == 0):
Exemple #3
0
    def __init__(self, data_collector):
        dc = data_collector
        data = dc.get_data_frame()
        metric = metrics.MAE()

        # delete NA examples
        data = data.dropna()

        # shuffle data
        X_y = data.sample(frac=1).reset_index(drop=True)

        data = X_y[['x', 'y', 'theta']].to_dict('records')
        target_1 = X_y[['sensor_1']]
        target_2 = X_y[['sensor_3']]
        target_3 = X_y[['sensor_5']]
        target_4 = X_y[['sensor_7']]

        print('constructing models')

        # construct our pipeline
        model_1 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_2 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_3 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_4 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        print('start training')

        for x, y_1, y_2, y_3, y_4 in zip(
                data,
                target_1.values,
                target_2.values,
                target_3.values,
                target_4.values,
        ):
            model_1, y_pred_1 = self._update_model(model_1, x, y_1)
            model_2, y_pred_2 = self._update_model(model_2, x, y_2)
            model_3, y_pred_3 = self._update_model(model_3, x, y_3)
            model_4, y_pred_4 = self._update_model(model_4, x, y_4)

        self.models = [model_1, model_2, model_3, model_4]

        print('done...')
Exemple #4
0
            optimizer=optim.VanillaSGD(
                lr=optim.OptimalLR()
            )
        )
    ]),
    'Logistic regression w/ Adam': compose.Pipeline([
        preprocessing.StandardScaler(),
        linear_model.LogisticRegression(optim.Adam(optim.OptimalLR()))
    ]),
    'Logistic regression w/ AdaGrad': compose.Pipeline([
        preprocessing.StandardScaler(),
        linear_model.LogisticRegression(optim.AdaGrad(optim.OptimalLR()))
    ]),
    'Logistic regression w/ RMSProp': compose.Pipeline([
        preprocessing.StandardScaler(),
        linear_model.LogisticRegression(optim.RMSProp(optim.OptimalLR()))
    ])
}

fig, ax = plt.subplots(figsize=(10, 6))

for name, model in models.items():
    print(name)
    metric, train_duration, pred_duration = evaluate_model(
        X_y=datasets.fetch_electricity(),
        model=model,
        metric=metrics.Accuracy()
    )
    print(metric)
    print(f'Training duration: {train_duration}')
    print(f'Predicting duration: {pred_duration}')