Beispiel #1
0
def run_experiment(trX,trY, teX, teY, degree=2, degree2=2):
    x, y = torch.Tensor(trX), torch.Tensor(trY)
    tx, ty = torch.Tensor(teX), torch.Tensor(teY)
    sortedx, idxX = torch.sort(tx)
    sortedy = ty[idxX]

    laplace = Laplace()
    adaptive = Adaptive()
    gaussian = Gaussian()

    # linear regression
    stats = []

    # # robust linear regression
    # lr = PolyRegression(degree)
    # fit = train_regular(lr, x, y, gaussian, epoch=1000, learning_rate=1e-2, verbose=False)
    # res1 = fit(sortedx).detach().numpy().flatten() - sortedy.numpy().flatten()
    # data = dict()
    # data['model'] = 'LR+' + str(degree)
    # data['MSE'] = (res1 ** 2).mean()
    # data['MAE'] = (np.abs(res1)).mean()
    # data['likelihood'] = gaussian.loglikelihood(res1)
    # stats.append(data)

    # adaptive linear regression
    lr = PolyRegression(degree)
    fit, alpha, scale = train_adaptive(lr, x, y, epoch=1000, learning_rate=1e-2, verbose=False)
    res = fit(sortedx).view(-1) - sortedy
    data = dict()
    data['model'] = 'Adaptive+' + str(degree)
    data['MSE'] = (res**2).mean().detach().numpy().flatten()[0]
    data['MAE'] = (np.abs(res.detach().numpy())).mean().flatten()[0]
    data['likelihood'] = adaptive.loglikelihood(res, alpha, scale)
    stats.append(data)

    # locally adaptive linear regression
    lr = PolyRegression(degree)
    alpha_model = PolyRegression(degree2, init_zeros=True)
    scale_model = PolyRegression(degree2, init_zeros=True)
    fit, alpha_reg, scale_reg = train_locally_adaptive(lr, alpha_model, scale_model, x, y,
                                                       epoch=1000, learning_rate=1e-2, verbose=False)
    res = fit(sortedx).view(-1) - sortedy
    alphas = torch.exp(alpha_reg(sortedx).view(-1))
    scales = torch.exp(scale_reg(sortedx).view(-1))

    data = dict()
    data['model'] = 'LocalAdaptive+' + str(degree)
    data['MSE'] = (res ** 2).mean().detach().numpy().flatten()[0]
    data['MAE'] = (np.abs(res.detach().numpy())).mean().flatten()[0]
    data['likelihood'] = adaptive.loglikelihood(res, alphas, scales)
    stats.append(data)

    return pd.DataFrame(stats)
Beispiel #2
0
def run_experiment(trX, trY, teX, teY, degree=2):
    x, y = torch.Tensor(trX), torch.Tensor(trY)
    tx, ty = torch.Tensor(teX), torch.Tensor(teY)
    sortedx, idxX = torch.sort(x)
    sortedy = y[idxX]

    gaussian = Gaussian()
    laplace = Laplace()
    adaptive = Adaptive()

    # linear regression
    stats = []

    lr = PolyRegression(degree)
    fit = train_regular(lr,
                        x,
                        y,
                        gaussian,
                        epoch=1000,
                        learning_rate=1e-2,
                        verbose=False)
    res1 = fit(sortedx).detach().numpy().flatten() - sortedy.numpy().flatten()
    data = dict()
    data['model'] = 'LR+' + str(degree)
    data['likelihood'] = gaussian.loglikelihood(res1)
    stats.append(data)

    # robust linear regression
    lr = PolyRegression(degree)
    fit = train_regular(lr,
                        x,
                        y,
                        laplace,
                        epoch=1000,
                        learning_rate=1e-2,
                        verbose=False)
    res1 = fit(sortedx).detach().numpy().flatten() - sortedy.numpy().flatten()
    data = dict()
    data['model'] = 'RobustLR+' + str(degree)
    data['likelihood'] = laplace.loglikelihood(res1)
    stats.append(data)

    # adaptive linear regression
    lr = PolyRegression(degree)
    fit, alpha, scale = train_adaptive(lr,
                                       x,
                                       y,
                                       epoch=1000,
                                       learning_rate=1e-2,
                                       verbose=False)
    res = fit(sortedx).view(-1) - sortedy
    data = dict()
    data['model'] = 'Adaptive+' + str(degree)
    data['likelihood'] = adaptive.loglikelihood(res, alpha, scale)
    stats.append(data)

    # locally adaptive linear regression
    lr = PolyRegression(degree)
    alpha_model = PolyRegression(2, init_zeros=True)
    scale_model = PolyRegression(2, init_zeros=True)
    fit, alpha_reg, scale_reg = train_locally_adaptive(lr,
                                                       alpha_model,
                                                       scale_model,
                                                       x,
                                                       y,
                                                       epoch=1000,
                                                       learning_rate=1e-2,
                                                       verbose=False)
    res = fit(sortedx).view(-1) - sortedy
    alphas = torch.exp(alpha_reg(sortedx).view(-1))
    scales = torch.exp(scale_reg(sortedx).view(-1))

    data = dict()
    data['model'] = 'LocalAdaptive+' + str(degree)
    data['likelihood'] = adaptive.loglikelihood(res, alphas, scales)
    stats.append(data)

    # gaussian process regression
    likelihood = gpytorch.likelihoods.GaussianLikelihood()
    model = ExactGPModel(x, y, likelihood)

    model.train()
    likelihood.train()

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
    training_iter = 100
    for _ in tqdm(range(training_iter)):
        optimizer.zero_grad()
        output = model(x)
        loss = -mll(output, y)
        loss.backward()
        optimizer.step()

    model.eval()
    likelihood.eval()
    with torch.no_grad(), gpytorch.settings.fast_pred_var():
        observed_pred = likelihood(model(sortedx))
    # the source code divides mll by the size of input -> reconstruct mll
    data = dict()
    data['model'] = 'GPR'
    data['likelihood'] = (mll(observed_pred, sortedy) *
                          len(sortedy)).detach().numpy()
    stats.append(data)

    return pd.DataFrame(stats)
    # # 2-D linear regression + laplace
    robust_lr2 = PolyRegression(2)
    robust_fit2 = train_regular(robust_lr2,
                                x,
                                y,
                                laplace,
                                epoch=1000,
                                learning_rate=1e-2,
                                verbose=False)

    # 2-D linear regression + adaptive
    ada_lr2 = PolyRegression(2)
    ada_fit2, alpha, scale = train_adaptive(ada_lr2,
                                            x,
                                            y,
                                            epoch=1000,
                                            learning_rate=1e-2,
                                            verbose=False)

    ada_lr2 = PolyRegression(2)
    alpha_model = PolyRegression(2, init_zeros=True)
    scale_model = PolyRegression(2, init_zeros=True)

    ada_fit22, alpha_reg, scale_reg = train_locally_adaptive(
        ada_lr2,
        alpha_model,
        scale_model,
        x,
        y,
        epoch=1000,
        learning_rate=1e-2,
Beispiel #4
0
def run_experiment(trX,trY, teX, teY, degree=2):
    x, y = torch.Tensor(trX), torch.Tensor(trY)
    tx, ty = torch.Tensor(teX), torch.Tensor(teY)
    sortedx, idxX = torch.sort(tx)
    sortedy = ty[idxX]

    gaussian = Gaussian()
    laplace = Laplace()
    adaptive = Adaptive()
    # linear regression
    stats = []

    lr = PolyRegression(degree)
    fit = train_regular(lr, x, y, gaussian, epoch=100, learning_rate=1e-2, verbose=False)
    data = dict()
    data['model'] = 'LR+' + str(degree)
    param = fit.beta.weight.data.numpy().flatten()
    bias = fit.beta.bias.data.numpy().flatten()
    data['param0'] = param[0]
    data['param1'] = param[1]
    data['param2'] = bias[0]

    stats.append(data)

    # robust linear regression
    lr = PolyRegression(degree)
    fit = train_regular(lr, x, y, laplace, epoch=100, learning_rate=1e-2, verbose=False)
    data = dict()
    data['model'] = 'RobustLR+' + str(degree)
    param = fit.beta.weight.data.numpy().flatten()
    bias = fit.beta.bias.data.numpy().flatten()
    data['param0'] = param[0]
    data['param1'] = param[1]
    data['param2'] = bias[0]

    stats.append(data)

    # adaptive linear regression
    lr = PolyRegression(degree)
    fit, alpha, scale = train_adaptive(lr, x, y, epoch=100, learning_rate=1e-2, verbose=False)
    data = dict()
    data['model'] = 'Adaptive+' + str(degree)
    param = fit.beta.weight.data.numpy().flatten()
    bias = fit.beta.bias.data.numpy().flatten()
    data['param0'] = param[0]
    data['param1'] = param[1]
    data['param2'] = bias[0]
    stats.append(data)

    # locally adaptive linear regression
    lr = PolyRegression(degree)
    alpha_model = PolyRegression(2, init_zeros=True)
    scale_model = PolyRegression(2, init_zeros=True)
    fit, alpha_reg, scale_reg = train_locally_adaptive(lr, alpha_model, scale_model, x, y,
                                                       epoch=500, learning_rate=1e-2, verbose=False)
    data = dict()
    data['model'] = 'LocalAdaptive+' + str(degree)
    param = fit.beta.weight.data.numpy().flatten()
    bias = fit.beta.bias.data.numpy().flatten()
    data['param0'] = param[0]
    data['param1'] = param[1]
    data['param2'] = bias[0]
    stats.append(data)

    # modal regression
    ml = ModalLinearRegression(kernel="gaussian", poly=degree, bandwidth=1)
    ml.fit(x.numpy().reshape(len(x), -1), y.numpy().reshape(-1))
    yml = ml.predict(sortedx.detach().numpy().reshape(len(sortedx), -1))
    data = dict()
    data['model'] = 'Modal'
    param = ml.coef_.flatten()
    bias = ml.intercept_.flatten()
    data['param0'] = param[0]
    data['param1'] = param[1]
    data['param2'] = bias[0]
    stats.append(data)

    return pd.DataFrame(stats)