def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    bar = Bar('Processing', max=len(trainloader))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda(async=True)
        inputs, targets = torch.autograd.Variable(
            inputs), torch.autograd.Variable(targets)

        # compute output
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.data[0], inputs.size(0))
        top1.update(prec1[0], inputs.size(0))
        top5.update(prec5[0], inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(trainloader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            top1=top1.avg,
            top5=top5.avg,
        )
        bar.next()
    bar.finish()
    return (losses.avg, top1.avg)
示例#2
0
def train(trainloader, model, model_index, criterion, optimizer, epoch,
          use_cuda):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    for batch_idx, (inputs, targets) in enumerate(trainloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = torch.autograd.Variable(
            inputs), torch.autograd.Variable(targets)

        # compute output
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.data.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print('Train for model {}: {}/{}'.format(model_index + 1, batch_idx + 1, len(trainloader)))

    return (losses.avg, top1.avg)
示例#3
0
def test(testloader, model, model_index, criterion, epoch, use_cuda):
    global best_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for batch_idx, (inputs, targets) in enumerate(testloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = torch.autograd.Variable(
            inputs, volatile=True), torch.autograd.Variable(targets)

        # compute output
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.data.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # print('Test for model {}: {}/{}'.format(model_index + 1, batch_idx + 1, len(testloader)))
    return (losses.avg, top1.avg)
def compute_accuracy(outputs, targets):
    acc = accuracy(outputs, targets, topk=(1, ))
    return acc[0][0]
示例#5
0
文件: run.py 项目: tund/HydraNet
def test(testloader, model, criterion, epoch, use_cuda):
    global best_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    bar = Bar('Processing', max=len(testloader))
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            # measure data loading time
            data_time.update(time.time() - end)

            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()

            # compute output
            outputs = model(inputs)
            outputs_sum = outputs[0].cuda()
            for i in range(1, args.n_heads):
                outputs_sum = torch.add(outputs_sum, outputs[i].cuda())
            outputs = outputs_sum / args.n_heads
            loss = criterion(outputs, targets)
            loss = torch.mean(loss)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
            if float(torch.__version__[:3]) < 0.5:
                losses.update(loss.data[0], inputs.size(0))
                top1.update(prec1[0], inputs.size(0))
                top5.update(prec5[0], inputs.size(0))
            else:
                losses.update(loss.data, inputs.size(0))
                top1.update(prec1, inputs.size(0))
                top5.update(prec5, inputs.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            # plot progress
            bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
                batch=batch_idx + 1,
                size=len(testloader),
                data=data_time.avg,
                bt=batch_time.avg,
                total=bar.elapsed_td,
                eta=bar.eta_td,
                loss=losses.avg,
                top1=top1.avg,
                top5=top5.avg,
            )
            bar.next()
    bar.finish()
    wandb.log(
        {
            "top1 test": top1.avg,
            "top5 test": top5.avg,
            "losses test": losses.avg
        },
        step=epoch)
    return (losses.avg, top1.avg)
示例#6
0
文件: run.py 项目: tund/HydraNet
def train(trainloader, model, criterion, optimizer, epoch, sample_wts,
          use_cuda):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = [AverageMeter() for i in range(args.n_heads)]
    losses_avg = AverageMeter()
    top1 = [AverageMeter() for i in range(args.n_heads)]
    top5 = [AverageMeter() for i in range(args.n_heads)]
    top1_avg = AverageMeter()
    top5_avg = AverageMeter()
    end = time.time()

    bar = Bar('Processing', max=len(trainloader))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        # measure data loading time
        #         print('.', end='')
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda(async=True)
        inputs, targets = torch.autograd.Variable(
            inputs), torch.autograd.Variable(targets)

        # compute output
        outputs = model(inputs)
        optimizer.zero_grad()
        for head_idx in range(args.n_heads):
            loss = criterion(outputs[head_idx], targets)
            loss = (loss * sample_wts[head_idx][:loss.shape[0]] /
                    sample_wts[head_idx][:loss.shape[0]].sum()).sum()

            # measure accuracy and record loss
            prec1, prec5 = accuracy(outputs[head_idx].data,
                                    targets.data,
                                    topk=(1, 5))
            if float(torch.__version__[:3]) < 0.5:
                losses[head_idx].update(loss.data[0], inputs.size(0))
                top1[head_idx].update(prec1[0], inputs.size(0))
                top5[head_idx].update(prec5[0], inputs.size(0))
            else:
                losses[head_idx].update(loss.data, inputs.size(0))
                top1[head_idx].update(prec1, inputs.size(0))
                top5[head_idx].update(prec5, inputs.size(0))

            # compute gradient and do SGD step
            loss.backward(retain_graph=True)
        losses_avg.update(
            sum([h.avg for h in losses]) / len(losses), inputs.size(0))
        top1_avg.update(sum([h.avg for h in top1]) / len(top1), inputs.size(0))
        top5_avg.update(sum([h.avg for h in top5]) / len(top5), inputs.size(0))
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_avg: {loss:.4f} | top1_avg: {top1: .4f} | top5_avg: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(trainloader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses_avg.avg,
            top1=top1_avg.avg,
            top5=top5_avg.avg,
        )
        bar.next()
    bar.finish()
    wandb.log(
        {
            "top1": [h.avg for h in top1],
            "top1_avg": top1_avg.avg,
            "top5": [h.avg for h in top5],
            "top5_avg": top5_avg.avg,
            "losses": [h.avg for h in losses],
            "losses_avg": losses_avg.avg
        },
        step=epoch)
    return (losses_avg.avg, top1_avg.avg)