Beispiel #1
0
def test(epoch, test_loader, save=True):
    global best_acc
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(
                batch_idx, len(test_loader),
                'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                    losses.avg, top1.avg, top5.avg))

    if save:
        writer.add_scalar('loss/test', losses.avg, epoch)
        writer.add_scalar('acc/test_top1', top1.avg, epoch)
        writer.add_scalar('acc/test_top5', top5.avg, epoch)

        is_best = False
        if top1.avg > best_acc:
            best_acc = top1.avg
            is_best = True

        print('Current best acc: {}'.format(best_acc))
        save_checkpoint(
            {
                'epoch':
                epoch,
                'model':
                args.model,
                'dataset':
                args.dataset,
                'state_dict':
                net.module.state_dict()
                if isinstance(net, nn.DataParallel) else net.state_dict(),
                'acc':
                top1.avg,
                'optimizer':
                optimizer.state_dict(),
            },
            is_best,
            checkpoint_dir=log_dir)
Beispiel #2
0
def test(epoch, test_loader, save=False):
    global best_accd
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()
    device0 = 'cuda'
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            if use_cuda:
                # inputs, targets = inputs.cuda(), targets.cuda()
                inputs, targets = inputs.to(device0), targets.to(device0)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(
                batch_idx, len(test_loader),
                'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                    losses.avg, top1.avg, top5.avg))

    return top1.avg
Beispiel #3
0
def train(epoch, trainloader, net, use_cuda, learning_rate):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=learning_rate,
                          momentum=0.9,
                          weight_decay=5e-4)
    #optimizer = optim.Adam(net.parameters(), lr = learning_rate, weight_decay=5e-4)
    for batch_idx, samples in enumerate(trainloader):
        n_iter = (epoch * len(trainloader)) + batch_idx
        inputs = samples[0]
        targets = samples[1]
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        #reset grad
        net.zero_grad()
        optimizer.zero_grad()
        # get data batch
        inputs = Variable(inputs, requires_grad=True).float()
        targets = Variable(targets, requires_grad=False).long()

        #forward
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        #backward
        loss.backward()
        optimizer.step()

        train_loss += loss.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum().item()
        #logging

        writer.add_scalars("data/scalars_group",
                           {"tr_loss": (train_loss / (batch_idx + 1))}, epoch)
        writer.add_scalars("data/scalars_group", {"lr": (learning_rate)},
                           epoch)

        utils.progress_bar(
            batch_idx, len(trainloader),
            'Loss: %.3f | Tr_Acc: %.3f%% (%d/%d)|lr: %.5f' %
            (train_loss / (batch_idx + 1), 100. * correct / total, correct,
             total, learning_rate))
Beispiel #4
0
def evaluate():
    # build dataset
    val_loader, n_class = get_dataset()
    # build model
    net = get_model(n_class)

    criterion = nn.CrossEntropyLoss()

    if use_cuda:
        net = net.cuda()
        net = torch.nn.DataParallel(net, list(range(args.n_gpu)))
        cudnn.benchmark = True

    # begin eval
    net.eval()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(val_loader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs), Variable(targets)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1.item(), inputs.size(0))
            top5.update(prec5.item(), inputs.size(0))
            # timing
            batch_time.update(time.time() - end)
            end = time.time()

            progress_bar(batch_idx, len(val_loader), 'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'
                         .format(losses.avg, top1.avg, top5.avg))
Beispiel #5
0
def train(epoch, train_loader):
    print('\nEpoch: %d' % epoch)
    net.train()

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    for batch_idx, (inputs, targets) in enumerate(train_loader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))
        # timing
        batch_time.update(time.time() - end)
        end = time.time()

        progress_bar(
            batch_idx, len(train_loader),
            'Loss: {:.3f} | Acc1: {:.3f}% | Acc5: {:.3f}%'.format(
                losses.avg, top1.avg, top5.avg))
    writer.add_scalar('loss/train', losses.avg, epoch)
    writer.add_scalar('acc/train_top1', top1.avg, epoch)
    writer.add_scalar('acc/train_top5', top5.avg, epoch)
Beispiel #6
0
def test(epoch, testloader, net, use_cuda, learning_rate):
    import shutil
    global best_acc
    net.eval()
    test_loss = 0
    correct = 0
    total = 0

    def save_checkpoint(state,
                        is_best,
                        filename="./checkpoint/checkpoint.pth.tar"):
        torch.save(state, filename)
        if is_best:
            shutil.copyfile(filename, "best_model.pth.tar")

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=learning_rate,
                          momentum=0.9,
                          weight_decay=5e-4)

    with torch.no_grad():
        for batch_idx, samples in enumerate(testloader):

            inputs = samples[0]
            targets = samples[1]

            if use_cuda:
                inputs, outputs = inputs.cuda(), targets.cuda()
            inputs, targets = Variable(inputs,
                                       volatile=True), Variable(outputs)

            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.data[0]
            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            #correct += predicted.eq(targets.data).cpu().sum().item()
            correct += (predicted == targets.data).sum().item()

            Te_Acc = 100. * correct / total
            utils.progress_bar(
                batch_idx, len(testloader),
                "Loss: %.3f | Te_Acc: %.3f (%d,%d)" %
                (test_loss /
                 (batch_idx + 1), 100. * correct / total, correct, total))

            writer.add_scalars("data/scalars_group",
                               {"te_loss":
                                (test_loss / (batch_idx + 1))}, epoch)
            writer.add_scalars("data/scalars_group",
                               {"te_acc": (Te_Acc / (batch_idx + 1))}, epoch)

            #save checkpoint
            is_best = Te_Acc > best_acc
            best_acc = max(Te_Acc, best_acc)
            save_checkpoint(
                {
                    "epoch": epoch + 1,
                    "args": args,
                    "net": net.module if use_cuda else net,
                    "best_acc": best_acc,
                    "optimizer": optimizer.state_dict()
                }, is_best)

        print("Saving model..:")
        classes = data_loader.create_class()

        dataiter = iter(testloader)
        img, lbl = dataiter.next()
        _out = net(Variable(img))
        _, predicted = torch.max(_out, 1)

        #GroundTruth
        print('GroundTruth: ',
              ' '.join('%9s' % classes[lbl[j]] for j in range(9)))
        #Predicted
        print('Predicted: ',
              ' '.join('%9s' % classes[predicted[j]] for j in range(9)))

        #Embeddings
        lbl = [classes[i] for i in lbl]
        writer.add_embedding(_out.data,
                             metadata=lbl,
                             label_img=img.data,
                             global_step=epoch)

        # plot
        df = utils.confusion(targets, predicted)
        print("---------------Printing Confusion Matirx---------------")
        print(df["confusion"])