Пример #1
0
# In[45]:

train_loss = []
train_acc = []
val_loss = []
val_acc = []
best_acc = 0
best_model = None
best_generator = None

for epoch in range(num_epochs):
    print("Epoch: {}/{}".format(epoch, num_epochs))

    # train
    model.train()
    trange = tqdm(train_dataloader)
    for batch in trange:
        optim.zero_grad()
        x, y = batch
        x, y = x.to(device), y.to(device)
        model_output = model(x)
        prototype, query_samples = get_proto_query(model_output.cpu(),
                                                   y.cpu(),
                                                   n_aug=sample_M,
                                                   n_support=N_shot)
        #         loss, acc = loss_fn(query_samples, prototype, n_aug=sample_M)
        loss, acc = cal_loss(query_samples, prototype, n_aug=sample_M)

        loss.backward()
Пример #2
0
    old_lr = optimizer.param_groups[0]['lr']
    scheduler.step()
    new_lr = optimizer.param_groups[0]['lr']

    if epoch == 0:
        print_log('\tInitial lr is {:.8f}\n'.format(old_lr), opts.log_file)
    if new_lr != old_lr:
        print_log(
            '\tLR changes from {:.8f} to {:.8f} at epoch {:d}\n'.format(
                old_lr, new_lr, epoch), opts.log_file)

    tr_iter = iter(train_db)

    for batch in tqdm(tr_iter):

        net.train()
        x, y = batch[0].to(opts.device), batch[1].to(opts.device)
        # TODO use k_query or not?
        loss, acc = loss_fn(net(x),
                            target=y,
                            n_support=opts.k_shot,
                            distance=opts.distance,
                            device=opts.device)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss.append(loss.item())
        train_acc.append(acc.item())
    # ONE EPOCH ENDS