Esempio n. 1
0
def train(training_set_loader, model, criterion, optimizer, epoch):
    """train a epoch, return average loss and accuracy"""
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()
    model.train()

    for i, (images, targets) in enumerate(training_set_loader):

        # process image data
        images = Variable(images.cuda())
        targets = Variable(targets.cuda())

        t0 = time.time()

        out = model(images)
        optimizer.zero_grad()
        loss = criterion(out, targets)
        loss.backward()
        optimizer.step()


        t_accuracy = accuracy(out.float().data, targets.float().data)
        losses.update(loss.data[0], images.size(0))
        accuracies.update(t_accuracy, images.size(0))

        t1 = time.time()

        if i % 16 == 0:
            print('Timer: %.4f sec.' % (t1 - t0))
            print('iter ' + repr(i) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')

    return losses.avg, accuracies.avg
Esempio n. 2
0
def train(model, trainloader, device, optimizer, loss_function, epoch):
    """
    Trains the model for one epoch.
    Args:
        model: the model to train
        trainloader: Dataloader for the training data
        device: the compute device
        optimizer
        loss_function
    """
    global train_losses
    model.train()
    train_iter = 0
    loss_meter = AverageMeter("train-avg")
    for x, _ in trainloader:
        x = x.to(device)
        z, logdet, _, logp = model(preprocess(x))
        loss = loss_function(logp, logdet, x.size())

        # code for rosalinty model
        # log_p_sum, logdet, z_outs = model(preprocess(x))
        # loss = loss_function(log_p_sum, logdet, x.size())

        if(train_iter % 10 == 0):
            print(f"iteration: {train_iter}, loss: {loss.item()}", end="\r")
        
        model.zero_grad()
        loss_meter.update(loss.item())
        loss.backward()
        optimizer.step()
        train_iter += 1
    print(f"epoch complete, mean loss: {loss_meter.avg}")
    train_losses.append({"epoch": epoch, "avg_loss": loss_meter.avg})
def validate(val_loader, model, criterion, print_freq=10000):
    model.eval()
    losses = AverageMeter()
    percent_acc = AverageMeter()
    with torch.no_grad():
        time_now = time.time()
        for batch_idx, (data, target) in enumerate(val_loader):
            data = data.cuda()
            target = target.cuda()
            if Config.ss == False:
                output = model(data)
                loss = criterion(output, target)
            else:
                layer2_output, layer3_output, output = model(data)
                loss = criterion(output, target) + 0.5 * criterion(layer2_output, target) + 0.5 * criterion(
                    layer3_output, target)
            losses.update(loss.item(), data.size(0))

            acc = accuracy(output, target)
            percent_acc.update(acc, data.size(0))

            time_end = time.time() - time_now
            if batch_idx % print_freq == 0:
                print('Validation Round: {}, Time: {}'.format(batch_idx, np.round(time_end, 2)))
                print('Validation Loss: val:{} avg:{} Acc: val:{} avg:{}'.format(losses.val, losses.avg,
                                                                                 percent_acc.val, percent_acc.avg))
    return losses, percent_acc
def train_epoch(data_loader, model, criterion, optimizer, epoch, print_freq=10000):
    losses = AverageMeter()
    percent_acc = AverageMeter()
    model.train()
    time_now = time.time()

    for batch_idx, (data, target) in enumerate(data_loader):
        data = data.cuda()
        target = target.cuda()
        if Config.ss == False:
            output = model(data)
            loss = criterion(output, target)
        else:
            layer2_output, layer3_output, output = model(data)
            loss = criterion(output, target) + 0.5 * criterion(layer2_output, target) + 0.5 * criterion(layer3_output,
                                                                                                        target)
        losses.update(loss.item(), data.size(0))

        acc = accuracy(output, target)
        percent_acc.update(acc, data.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        time_end = time.time() - time_now
        if batch_idx % print_freq == 0:
            print('Training Round: {}, Time: {}'.format(batch_idx, np.round(time_end, 2)))
            print('Training Loss: val:{} avg:{} Acc: val:{} avg:{}'.format(losses.val, losses.avg,
                                                                           percent_acc.val, percent_acc.avg))
    return losses, percent_acc
Esempio n. 5
0
def validate(val_loader,
             model,
             criterion,
             _WEIGHT_DECAY=5e-4,
             print_freq=10000):
    model.eval()
    losses = AverageMeter()
    percent_acc = AverageMeter()
    with torch.no_grad():
        time_now = time.time()
        for batch_idx, (data, target) in enumerate(val_loader):
            data = data.cuda()
            target = target.cuda().long()

            output = model(data)
            # print("output.shape: {}".format(output.shape))
            ################## main parts of lgm loss
            loss = criterion(output, target)

            losses.update(loss.item(), data.size(0))

            acc = accuracy(output, target)
            percent_acc.update(acc, data.size(0))

            time_end = time.time() - time_now
            """
            if batch_idx % print_freq == 0:
                print('Validation Round: {}, Time: {}'.format(batch_idx, np.round(time_end, 2)))
                print('Validation Loss: val:{} avg:{} Acc: val:{} avg:{}'.format(losses.val, losses.avg,
                                                                      percent_acc.val, percent_acc.avg))
                                                                      """
    return losses, percent_acc
def validate(val_loader,
             model,
             criterion,
             _WEIGHT_DECAY=5e-4,
             print_freq=10000):
    model.eval()
    losses = AverageMeter()
    percent_acc = AverageMeter()
    with torch.no_grad():
        time_now = time.time()
        for batch_idx, (data, target) in enumerate(val_loader):
            target = target.long()
            target_backup = target
            if torch.cuda.is_available():
                data = data.cuda()
                target = target.cuda()

            output = model(data, target)
            # print("output.shape: {}".format(output.shape))
            ################## main parts of lgm loss
            if args.loss == 'lgm':
                logits, likelihood_regloss, means = criterion(output, target)
                ################## main parts of lgm loss

                ################## l2 regularization loss for loss
                # l2_criterion = nn.MSELoss(size_average=False)
                """
                l2_loss = 0
                for param in model.parameters():
                    l2_loss += torch.norm(param)

                target_onehot = one_hot(target_backup)
                if torch.cuda.is_available():
                    target_onehot = target_onehot.cuda()
                mean_loss = torch.sum(- target_onehot * torch.nn.functional.log_softmax(logits, -1), -1)
                mean_loss = torch.mean(mean_loss.float())
                """
                loss = logits  #+ _WEIGHT_DECAY * l2_loss + likelihood_regloss
            if args.loss == 'ce':
                loss = criterion(output, target)

            losses.update(loss.item(), data.size(0))

            acc = accuracy(output, target)
            percent_acc.update(acc, data.size(0))

            time_end = time.time() - time_now
            """
            if batch_idx % print_freq == 0:
                print('Validation Round: {}, Time: {}'.format(batch_idx, np.round(time_end, 2)))
                print('Validation Loss: val:{} avg:{} Acc: val:{} avg:{}'.format(losses.val, losses.avg,
                                                                      percent_acc.val, percent_acc.avg))
                                                                      """

    return losses, percent_acc
Esempio n. 7
0
def train_epoch(data_loader,
                model,
                criterion,
                optimizer,
                mean_optimizer=None,
                _WEIGHT_DECAY=5e-4,
                print_freq=args.print_freq):
    losses = AverageMeter()
    percent_acc = AverageMeter()
    means_param = AverageMeter()
    model.train()
    time_now = time.time()

    for batch_idx, (data, target) in enumerate(data_loader):
        target = target.long()
        target_backup = target
        if torch.cuda.is_available():
            data = data.cuda()
            target = target.cuda()
        #print('target: {}'.format(target))
        #print('onehot_target: {}'.format(one_hot(target)))
        output = model(data)
        #print("output.shape: {}".format(output.shape))
        ################## main parts of lgm loss
        loss = criterion(output, target)
        ################## main parts of lgm loss

        losses.update(loss.item(), data.size(0))

        acc = accuracy(output, target)
        percent_acc.update(acc, data.size(0))

        # compute gradient and do SGD step
        #means_param.update(means.item(), data.size(0))

        if args.mean == 'true':
            mean_optimizer.zero_grad()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if args.mean == 'true':
            mean_optimizer.step()
        time_end = time.time() - time_now
        if batch_idx % print_freq == 0:
            print('Training Round: {}, Time: {}'.format(
                batch_idx, np.round(time_end, 2)))
            print('loss: {}'.format(loss))
    return losses, percent_acc
Esempio n. 8
0
def test(model, testloader, device, loss_function, epoch, generate_imgs, levels, dataset_name, n_samples):
    global best_loss  # keep track of best loss
    global test_losses
    model.eval()
    loss = 0
    num_samples = n_samples
    # TODO: add average loss checker here, they use that in code for checkpointing
    loss_meter = AverageMeter('test-avg')
    for x, y in testloader:
        x = x.to(device)
        z, logdet, _, logp = model(preprocess(x))
        loss = loss_function(logp, logdet, x.size())

        # code for rosalinty model
        # log_p_sum, logdet, z_outs = model(x)
        # loss = loss_function(log_p_sum, logdet, x.size())

        loss_meter.update(loss.item())

    if loss_meter.avg < best_loss:
        print(f"New best model found, average loss {loss_meter.avg}")
        checkpoint_state = {
            "model": model.state_dict(),
            "test_loss": loss_meter.avg,
            "epoch": epoch
        }
        os.makedirs("new_checkpoints", exist_ok=True)
        # save the model
        torch.save(checkpoint_state, f"new_checkpoints/best_{dataset_name.lower()}.pth.tar")
        best_loss = loss_meter.avg
    print(f"test epoch complete, result: {loss_meter.avg} bits/dim")
    test_losses.append({"epoch": epoch, "avg_loss": loss_meter.avg})
    x = next(iter(testloader))[0]  # extract first batch of data in order to get channel dimens
    # generate samples after each test
    if(generate_imgs):
        sample_images = generate(model, num_samples, device, shape=x.shape, levels=levels)
        os.makedirs('generated_imgs', exist_ok=True)
        grid = torchvision.utils.make_grid(sample_images, nrow=int(num_samples ** 0.5))
        torchvision.utils.save_image(grid, f"generated_imgs/epoch_{epoch}.png", normalize=True, nrow=10,
                                     range=(-0.5, 0.5))
Esempio n. 9
0
def train_epoch(data_loader, model, criterion, optimizer, print_freq=1000):
    losses = AverageMeter()
    percent_acc = AverageMeter()
    means_param = AverageMeter()
    model.train()
    time_now = time.time()

    for batch_idx, (data, target) in enumerate(data_loader):
        data = data.cuda()
        target = target.cuda().long()

        output = model(data)
        #print("output.shape: {}".format(output.shape))
        ################## main parts of lgm loss
        loss = criterion(output, target)
        ################## l2 regularization loss

        ################## softmax using logits.
        #print("target.shape: {}, logits.shape: {}".format(target.shape, logits.shape))
        #logits = torch.max(logits, 0)
        #print("max logits.shape ", logits.shape)
        losses.update(loss.item(), data.size(0))

        acc = accuracy(output, target)
        percent_acc.update(acc, data.size(0))

        # compute gradient and do SGD step

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        time_end = time.time() - time_now
        if batch_idx % print_freq == 0:
            print('Training Round: {}, Time: {}'.format(
                batch_idx, np.round(time_end, 2)))
            print('Training Loss: val:{} avg:{} Acc: val:{} avg:{}'.format(
                losses.val, losses.avg, percent_acc.val, percent_acc.avg))
    return losses, percent_acc
Esempio n. 10
0
def validate(val_set_loader, model, criterion):
    losses = AverageMeter()
    accuracies = AverageMeter()

    model.eval()
    for i, (images, targets) in enumerate(val_set_loader):
        # process image data
        images = Variable(images.cuda())
        targets = Variable(targets.cuda())

        t0 = time.time()
        out = model(images)
        loss = criterion(out, targets)
        t_accuracy = accuracy(out.float().data, targets.float().data)
        losses.update(loss.data[0], images.size(0))
        accuracies.update(t_accuracy, images.size(0))

        t1 = time.time()

        if i % 16 == 0:
            print('Timer: %.4f sec.' % (t1 - t0))
            print('EVAL iter ' + repr(i) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')
    # return average loss and accuracy
    return losses.avg, accuracies.avg
Esempio n. 11
0
def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    #    class_correct = list(0. for i in range(NUM_CLASSES))
    #    class_total = list(0. for i in range(NUM_CLASSES))

    # switch to evaluate mode
    model.eval()

    end = time.time()

    for i, (input, target) in enumerate(val_loader):
        if USE_CUDA:
            input = input.cuda(async=True)
            target = target.cuda(async=True)
        with torch.no_grad():
            input_var = torch.autograd.Variable(input)
            target_var = torch.autograd.Variable(target)

        # compute output
        if MODEL_ID == 6:
            ##flatten input for logistic
            input_var = input_var.view(-1, INPUT_SIZE * INPUT_SIZE * 3)

        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            print('Test: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(val_loader),
                      batch_time=batch_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))
        if i == (len(val_loader) - 1):
            with open(filename_dev, 'a') as a:
                a.write('{0}\t'
                        '{batch_time.avg:16.3f}\t'
                        '{loss.avg:16.4f}\t'
                        '{top1.avg:16.3f}\t'
                        '{top5.avg:16.3f}\n'.format(epoch,
                                                    batch_time=batch_time,
                                                    loss=losses,
                                                    top1=top1,
                                                    top5=top5))

    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1,
                                                                  top5=top5))

    return top1.avg
Esempio n. 12
0
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()

    # for i, (input, target) in enumerate(train_loader):
    for i, data in enumerate(train_loader):
        (input, target), (path, _) = data
        # measure data loading time
        if USE_CUDA:
            input = input.cuda(async=True)
            target = target.cuda(async=True)

        data_time.update(time.time() - end)

        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        if MODEL_ID == 6:
            ##flatten input for logistic
            input_var = input_var.view(-1, INPUT_SIZE * INPUT_SIZE * 3)
        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data,
                                target,
                                topk=(1, 5),
                                path=path,
                                minibatch=i)
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % 10 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  '\Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))

        if i == (len(train_loader) - 1):
            with open(filename_train, 'a') as a:
                a.write('{0}\t'
                        '{batch_time.avg:16.3f}\t'
                        '{data_time.avg:16.3f}\t'
                        '{loss.avg:16.4f}\t'
                        '{top1.avg:16.3f}\t'
                        '{top5.avg:16.3f}\n'.format(epoch,
                                                    batch_time=batch_time,
                                                    data_time=data_time,
                                                    loss=losses,
                                                    top1=top1,
                                                    top5=top5))
def validate(val_set_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    model.eval()
    end = time.time()

    for i, (input, target) in enumerate(val_set_loader):
        input_cuda_var, target_cuda_var = Variable(input.cuda()), Variable(
            target.cuda())

        output_cuda_var = model(input_cuda_var)
        loss = criterion(output_cuda_var, target_cuda_var)

        # transfer Variable to float Variable
        output_cuda_var_float = output_cuda_var.float()
        loss = loss.float()

        # measure accuracy
        prec1 = accuracy(output_cuda_var_float.data, target_cuda_var.data)[0]
        losses.update(loss.data[0], input.size(0))
        accuracies.update(prec1, input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.print_allow and i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      i,
                      len(val_set_loader),
                      batch_time=batch_time,
                      loss=losses,
                      top1=accuracies))

    print('val * Prec@1 {top1.avg: .3f}'.format(top1=accuracies))

    # return average loss and accuracy
    return losses.avg, accuracies.avg
def train(training_set_loader, model, criterion, optimizer, epoch):
    """train a epoch, return average loss and accuracy"""
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    model.train()
    end = time.time()

    for i, (input, target) in enumerate(training_set_loader):
        data_time.update(time.time() - end)

        input_cuda_var, target_cuda_var = Variable(input.cuda()), Variable(
            target.cuda())
        optimizer.zero_grad()
        output_cuda_var = model(input_cuda_var)
        loss = criterion(output_cuda_var, target_cuda_var)
        loss.backward()
        optimizer.step()

        # transfer Variable to float Varibale
        output_cuda_var_float = output_cuda_var.float()
        loss = loss.float()

        # measure accuracy use Variable's Tensor
        prec1 = accuracy(output_cuda_var_float.data, target_cuda_var.data)[0]
        losses.update(loss.data[0], input.size(0))
        accuracies.update(prec1, input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.print_allow and i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      epoch,
                      i,
                      len(training_set_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=accuracies))

    # return average loss and accuracy
    return losses.avg, accuracies.avg
def train_epoch(data_loader,
                model,
                criterion,
                optimizer,
                mean_optimizer=None,
                _WEIGHT_DECAY=5e-4,
                print_freq=args.print_freq):
    losses = AverageMeter()
    percent_acc = AverageMeter()
    means_param = AverageMeter()
    model.train()
    time_now = time.time()

    for batch_idx, (data, target) in enumerate(data_loader):
        target = target.long()
        target_backup = target
        data_backup = data
        if torch.cuda.is_available():
            data = data.cuda()
            target = target.cuda()
        #print('target: {}'.format(target))
        #print('onehot_target: {}'.format(one_hot(target)))
        output = model(data, target)
        #print("output.shape: {}".format(output.shape))
        ################## main parts of lgm loss
        if args.loss == 'lgm':
            logits, likelihood_regloss, means = criterion(output, target)
            ################## main parts of lgm loss

            ################## l2 regularization loss for loss
            # l2_criterion = nn.MSELoss(size_average=False)
            """
            l2_loss = 0
            for param in model.parameters():
                l2_loss += torch.norm(param)
            """
            ################## l2 regularization loss

            ################## softmax using logits.
            # print("target.shape: {}, logits.shape: {}".format(target.shape, logits.shape))
            # logits = torch.max(logits, 0)
            # print("max logits.shape ", logits.shape)

            # below is some code for debug purpose
            """
            print('softmax_logits: {}'.format(torch.nn.functional.softmax(logits)))
            print('log_softmax_logits: {}'.format(torch.log(torch.nn.functional.softmax(logits))))
            """
            # print('torch.nn.functional.log_softmax: {}'.format(torch.nn.functional.log_softmax(logits)))
            """
            target_onehot = one_hot(target_backup)
            if torch.cuda.is_available():
                target_onehot = target_onehot.cuda()
            mean_loss = torch.sum(- target_onehot * torch.nn.functional.log_softmax(logits, -1), -1)
            mean_loss = torch.mean(mean_loss.float())
            """
            ##################
            # total loss
            # print('mean: {}'.format(means))
            # print('Logits: {}, Cross_entorpy_logits: {}'.format(logits, mean_loss))
            # print('Likelihood Regloss: {}, l2_norm: {}'.format(likelihood_regloss, l2_loss))

            loss = logits  # + likelihood_regloss#+ _WEIGHT_DECAY * l2_loss + likelihood_regloss

        if args.loss == 'ce':
            loss = criterion(output, target)

        losses.update(loss.item(), data.size(0))

        acc = accuracy(output, target)
        percent_acc.update(acc, data.size(0))

        # compute gradient and do SGD step
        #means_param.update(means.item(), data.size(0))

        if args.mean == 'true':
            mean_optimizer.zero_grad()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if args.mean == 'true':
            mean_optimizer.step()

        time_end = time.time() - time_now

        if batch_idx % print_freq == 0:
            print('Training Round: {}, Time: {}'.format(
                batch_idx, np.round(time_end, 2)))
            #print('mean: {}'.format(means))
            #print('Logits: {}, Cross_entorpy_logits: {}'.format(logits, mean_loss))
            if args.loss == 'lgm':
                print('logit: {}'.format(logits))
                #print('Likelihood Regloss: {}, l2_norm: {}, mean_loss: {}'.format(likelihood_regloss, _WEIGHT_DECAY * l2_loss, mean_loss))
            if args.loss == 'ce':
                print('Loss: {}, acc: {}'.format(loss, acc))

    #if args.loss == 'lgm':
    #    print("last batch mean: {}".format(means))
    return losses, percent_acc
Esempio n. 16
0
def test(test_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(test_loader):
        if USE_CUDA:
            input = input.cuda(async=True)
            target = target.cuda(async=True)
        with torch.no_grad():
            input_var = torch.autograd.Variable(input)
            target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        value, predicted = torch.max(output.data, 1)
        #        print('\nGroundTruth: ', ' '.join('%5s' % classes[target_var.item()]))
        print(
            'Species: ',
            ''.join('%5s' % classes[predicted.item()]),
            'Confidence: %0.2f%%' % value,
        )
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    return top1.avg