Beispiel #1
0
def net_config_classify(image, label, model, args):
    out = createmodel(image, model, args)

    if args.loss_name == "softmax":
        metricloss = SoftmaxLoss(class_dim=args.class_dim, )
    elif args.loss_name == "arcmargin":
        metricloss = ArcMarginLoss(
            class_dim=args.class_dim,
            margin=args.arc_margin,
            scale=args.arc_scale,
            easy_margin=args.arc_easy_margin,
        )
    cost, logit = metricloss.loss(out, label)
    avg_cost = fluid.layers.mean(x=cost)
    acc_top1 = fluid.layers.accuracy(input=logit, label=label, k=1)
    acc_top5 = fluid.layers.accuracy(input=logit, label=label, k=5)
    return [avg_cost, acc_top1, acc_top5]
def net_config(image, label, model, args):
    assert args.model in model_list, "{} is not in lists: {}".format(args.model,
                                                                     model_list)

    out = model.net(input=image, embedding_size=args.embedding_size)

    if args.loss_name == "softmax":
        metricloss = SoftmaxLoss(class_dim=args.class_dim, )
    elif args.loss_name == "arcmargin":
        metricloss = ArcMarginLoss(
            class_dim=args.class_dim,
            margin=args.arc_margin,
            scale=args.arc_scale,
            easy_margin=args.arc_easy_margin, )
    cost, logit = metricloss.loss(out, label)
    avg_cost = fluid.layers.mean(x=cost)
    acc_top1 = fluid.layers.accuracy(input=logit, label=label, k=1)
    acc_top5 = fluid.layers.accuracy(input=logit, label=label, k=5)
    return avg_cost, acc_top1, acc_top5, out
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=256)
    parser.add_argument('--loss', default='softmax_loss',
                        choices=['softmax_loss', 'center_loss', 'sphere_face_loss', 'cos_face_loss', 'arc_face_loss'])
    parser.add_argument('--viz', default='vizs')
    parser.add_argument('--epochs', type=int, default=30)
    parser.add_argument('--lr', type=float, default=0.001)
    args = parser.parse_args()

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_loader = DataLoader(
        datasets.MNIST('../data', train=True, download=True, transform=transform),
        batch_size=args.batch_size, shuffle=True, **kwargs)

    test_loader = DataLoader(
        datasets.MNIST('../data', train=False, transform=transform),
        batch_size=512, shuffle=True, **kwargs)

    model = Net().to(device)

    if args.loss == 'center_loss':
        criterion = CenterLoss().to(device)
        center_optimizer = optim.SGD([criterion.centers], lr=args.lr, momentum=0.9)
    elif args.loss == 'sphere_face_loss':
        criterion = SphereFaceLoss().to(device)
    elif args.loss == 'cos_face_loss':
        criterion = CosFaceLoss(s=7, m=0.2).to(device)
    elif args.loss == 'softmax_loss':
        criterion = SoftmaxLoss().to(device)
    elif args.loss == 'arc_face_loss':
        criterion = ArcFaceLoss().to(device)

    optimizer = optim.SGD([{'params': model.parameters()}, {'params': criterion.fc.parameters()}],
                          lr=args.lr, momentum=0.9)

    for epoch in range(1, args.epochs + 1):
        model.train()
        embeddings = []
        labels = []
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)

            optimizer.zero_grad()
            if args.loss == 'center_loss':
                center_optimizer.zero_grad()
            embedding = model(data)

            loss = criterion(embedding, target)
            loss.backward()
            optimizer.step()
            if args.loss == 'center_loss':
                center_optimizer.step()

            embeddings.append(embedding)
            labels.append(target)

            if batch_idx % 100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.item()))

        embeddings = torch.cat(embeddings, 0).cpu().detach().numpy()
        labels = torch.cat(labels, 0).cpu().detach().numpy()
        acc = val(model, criterion, device, test_loader)
        visualize(args.viz, args.loss, embeddings, labels, epoch, acc)

    print('Creating gif...')
    create_gif('./%s/gifs/%s.gif' % (args.viz, args.loss),
               './%s/%s' % (args.viz, args.loss), 0.2)
    print('Done')