示例#1
0
            'params': model.gp_layer.variational_parameters(),
            'lr': lr * 100
        },
        {
            'params': likelihood.parameters(),
            'lr': lr * 100
        },
    ],
                    lr=lr,
                    momentum=0.9,
                    nesterov=True,
                    weight_decay=0)

    mll = mlls.VariationalELBO(
        likelihood,
        model.gp_layer,
        num_data=int(
            (len(X_train_abnormal) + len(X_train_normal)) * no_segments))

    losses = []
    kl_z_loss = []
    kl_u_loss = []
    exp_log_prob_loss = []
    best_auc_eval = 0
    best_auc_test = 0
    aucs_eval = []
    aucs_test = []
    X_train = np.concatenate([X_train_abnormal, X_train_normal])
    videos = np.concatenate(
        [video_names_abnormal_train, video_names_normal_train])
    idx = list(range(len(X_train)))
示例#2
0
# If you run this example without CUDA, I hope you like waiting!
if torch.cuda.is_available():
    model = model.cuda()
    likelihood = likelihood.cuda()

n_epochs = 1
lr = 0.1
optimizer = SGD([
    {'params': model.feature_extractor.parameters(), 'weight_decay': 1e-4},
    {'params': model.gp_layer.hyperparameters(), 'lr': lr * 0.01},
    {'params': model.gp_layer.variational_parameters()},
    {'params': likelihood.parameters()},
], lr=lr, momentum=0.9, nesterov=True, weight_decay=0)
scheduler = MultiStepLR(optimizer, milestones=[0.5 * n_epochs, 0.75 * n_epochs], gamma=0.1)
mll = mlls.VariationalELBO(likelihood, model.gp_layer, num_data=len(train_loader.dataset))

#
#def train(epoch):
#    model.train()
#    likelihood.train()
#
#    minibatch_iter = tqdm.tqdm(train_loader, desc=f"(Epoch {epoch}) Minibatch")
#    with gpytorch.settings.num_likelihood_samples(8):
#        for data, target in minibatch_iter:
#            if torch.cuda.is_available():
#                data, target = data.cuda(), target.cuda()
#            optimizer.zero_grad()
#            output = model(data)
#            loss = -mll(output, target)
#            loss.backward()