Beispiel #1
0
def train(net, loader, ep, scheduler=None, writer=None):
    global n_iter
    if scheduler:
        scheduler.step()

    net.train()
    loss_all, norm_all = [], []
    train_iter = tqdm(loader)
    for images, labels in train_iter:
        n_iter += 1

        images, labels = images.cuda(), labels.cuda()
        embedding = net(images)
        loss = criterion(embedding, labels)
        loss_all.append(loss.item())

        if writer:
            writer.add_scalar('loss/train', loss.item(), n_iter)
        print(cuda.memory_allocated(cuda.current_device()))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_iter.set_description("[Train][Epoch %d] Loss: %.5f" % (ep, loss.item()))
    print('[Epoch %d] Loss: %.5f\n' % (ep, torch.Tensor(loss_all).mean()))
def train(ep, writer=None):
    global n_iter
    lr_scheduler.step()

    model.train()
    sample_net.train()

    loss_all = []
    train_iter = tqdm(zip(loader_train_sample, loader_eval_sample),
                      total=len(loader_train_sample))

    for (images, labels), (val_images, val_labels) in train_iter:
        n_iter += 1
        images, labels = images.cuda(), labels.cuda()
        val_images, val_labels = val_images.cuda(), val_labels.cuda()
        # weight = torch.ones((len(images), len(labels)), device=images.device, requires_grad=True)
        embedding = model(images)

        if opts.no_maml:
            weight = torch.ones((len(images), len(labels)),
                                device=images.device)
        else:

            weight = sample_net(embedding)
            loss = criterion(embedding, labels, weight)

            optimizer.zero_grad()
            model_gradients = grad(loss, model.parameters(), create_graph=True)

            updated_param = maml_optimizer.update(model.parameters(),
                                                  model_gradients)
            updated_param = model.format_parameters(updated_param)

            updated_embedding = model(val_images, updated_param)
            sample_loss = criterion(updated_embedding, val_labels)

            sampler_optimizer.zero_grad()
            sample_loss.backward(retain_graph=True)
            sampler_optimizer.step()

            weight = sample_net(embedding)

        loss = criterion(embedding, labels, weight)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        loss_all.append(loss.item())
        if writer:
            writer.add_scalar('loss/train', loss.item(), n_iter)

        train_iter.set_description("[Train, Emnbedding][Epoch %d] Loss: %.5f" %
                                   (ep, loss.item()))
    print('[Epoch %d] Loss: %.5f\n' % (ep, torch.Tensor(loss_all).mean()))
Beispiel #3
0
def train(net, loader, ep, scheduler=None):
    if scheduler is not None:
        scheduler.step()
    net.train()
    loss_all, norm_all = [], []

    train_iter = tqdm(loader)
    for images, labels in train_iter:
        images, labels = images.cuda(), labels.cuda()
        loss = criterion(net(images), labels)
        loss_all.append(loss.item())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_iter.set_description("[Train][Epoch %d] Loss: %.5f" %
                                   (ep, loss.item()))
    print('[Epoch %d] Loss: %.5f\n' % (ep, torch.Tensor(loss_all).mean()))
def train(net, loader, ep):
    lr_scheduler.step()
    net.train()
    loss_all, norm_all = [], []
    train_iter = tqdm(loader, ncols=80)
    for images, labels in train_iter:
        images, labels = images.cuda(), labels.cuda()
        pool, embedding = net(images)
        loss = criterion(embedding, labels)
        loss_all.append(loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_iter.set_description("[Train][Epoch %d] Loss: %.5f" %
                                   (ep, loss.item()))
    print('[Epoch %d] MeanLoss: %.5f\n' % (ep, torch.Tensor(loss_all).mean()))
Beispiel #5
0
def train(net, loader, ep, scheduler=None, writer=None):
    global n_iter
    if scheduler:
        scheduler.step()
        beta_scheduler.step()

    net.train()
    loss_all, norm_all = [], []
    train_iter = tqdm(loader)
    for images, labels in train_iter:
        n_iter += 1

        images, labels = images.cuda(), labels.cuda()
        embedding = net(images)

        if embedding.dim() == 3:
            loss = sum([
                criterion(embedding[:, i], labels)
                for i in range(embedding.size(1))
            ])
        else:
            loss = criterion(embedding, labels)
        loss_all.append(loss.item())

        if writer:
            writer.add_scalar('loss/train', loss.item(), n_iter)

        optimizer.zero_grad()
        beta_optimizer.zero_grad()
        loss.backward()

        optimizer.step()
        beta_optimizer.step()
        train_iter.set_description("[Train][Epoch %d] Loss: %.5f" %
                                   (ep, loss.item()))
    print('[Epoch %d] Loss: %.5f\n' % (ep, torch.Tensor(loss_all).mean()))