Esempio n. 1
0
def infer(valid_queue, model, criterion):
    from taowei.torch2.utils.classif import ProgressMeter
    # epoch = args.start_epoch - 1 if 'epoch' not in args else args.epoch
    progress = ProgressMeter(iters_per_epoch=len(valid_queue),
        epoch=args.epoch, split='val', writer=args.writer, batch_size=args.batch_size)

    model.eval()

    timer = Timer()
    timer.tic()
    for step, (input, target) in enumerate(valid_queue):
        # measure data loading time
        progress.update('data_time', timer.toc(from_last_toc=True))
        input = input.cuda()
        target = target.cuda(non_blocking=True)
        with torch.no_grad():
            logits, _ = model(input)
            loss = criterion(logits, target)
        progress.update('forward_time', timer.toc(from_last_toc=True))

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        n = input.size(0)
        progress.update('loss', loss.item(), n)
        progress.update('top1', prec1.item(), n)
        progress.update('top5', prec5.item(), n)
        progress.update('batch_time', timer.toctic())

        progress.update('batch_time', timer.toctic())

        if step % args.report_freq == 0:
            progress.log_iter_stats(iter=step, batch_size=n)

    progress.log_epoch_stats()
    return progress.stats['top1'].avg, progress.stats['top5'].avg, progress.stats['loss'].avg
Esempio n. 2
0
def train(train_queue, model, criterion, optimizer):
    from taowei.torch2.utils.classif import ProgressMeter
    progress = ProgressMeter(iters_per_epoch=len(train_queue),
                             epoch=args.epoch,
                             epochs=args.epochs,
                             split='train',
                             writer=args.writer,
                             batch_size=args.batch_size)
    # args.epoch = epoch # keep a record of current epoch for evaluate. TODO: a more elegant way

    # objs = utils.AvgrageMeter()
    # top1 = utils.AvgrageMeter()
    # top5 = utils.AvgrageMeter()
    model.train()

    timer = Timer()
    timer.tic()
    for step, (input, target) in enumerate(train_queue):
        # measure data loading time
        progress.update('data_time', timer.toc(from_last_toc=True))

        target = target.cuda(non_blocking=True)
        input = input.cuda()
        input = Variable(input)
        target = Variable(target)

        optimizer.zero_grad()
        logits, logits_aux = model(input)
        loss = criterion(logits, target)
        if args.auxiliary:
            loss_aux = criterion(logits_aux, target)
            loss += args.auxiliary_weight * loss_aux
        progress.update('forward_time', timer.toc(from_last_toc=True))

        loss.backward()
        progress.update('backward_time', timer.toc(from_last_toc=True))
        nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
        optimizer.step()
        progress.update('update_time', timer.toc(from_last_toc=True))

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        n = input.size(0)
        progress.update('loss', loss.item(), n)
        progress.update('top1', prec1.item(), n)
        progress.update('top5', prec5.item(), n)
        # objs.update(loss.item(), n)
        # top1.update(prec1.item(), n)
        # top5.update(prec5.item(), n)

        # measure elapsed time
        progress.update('batch_time', timer.toctic())

        if step % args.report_freq == 0:
            progress.log_iter_stats(iter=step,
                                    batch_size=n,
                                    lr=optimizer.param_groups[0]['lr'])

    progress.log_epoch_stats(lr=optimizer.param_groups[0]['lr'])
    return progress.stats['top1'].avg, progress.stats['loss'].avg
Esempio n. 3
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer,
          lr):
    from taowei.torch2.utils.classif import ProgressMeter
    progress = ProgressMeter(iters_per_epoch=len(train_queue),
                             epoch=args.epoch,
                             epochs=args.epochs,
                             split='train',
                             writer=args.writer,
                             batch_size=args.batch_size)
    # objs = utils.AvgrageMeter()
    # top1 = utils.AvgrageMeter()
    # top5 = utils.AvgrageMeter()

    timer = Timer()
    timer.tic()
    for step, (input, target) in enumerate(train_queue):
        # measure data loading time
        progress.update('data_time', timer.toc(from_last_toc=True))

        model.train()
        n = input.size(0)

        input = Variable(input, requires_grad=False).cuda()
        target = Variable(target, requires_grad=False).cuda(non_blocking=True)

        # get a random minibatch from the search queue with replacement
        input_search, target_search = next(iter(valid_queue))
        input_search = Variable(input_search, requires_grad=False).cuda()
        target_search = Variable(target_search,
                                 requires_grad=False).cuda(non_blocking=True)

        architect.step(input,
                       target,
                       input_search,
                       target_search,
                       lr,
                       optimizer,
                       unrolled=args.unrolled)
        progress.update('arch_step_time', timer.toc(from_last_toc=True))

        optimizer.zero_grad()
        logits = model(input)
        loss = criterion(logits, target)
        progress.update('forward_time', timer.toc(from_last_toc=True))

        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
        progress.update('backward_time', timer.toc(from_last_toc=True))
        optimizer.step()
        progress.update('update_time', timer.toc(from_last_toc=True))

        prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
        progress.update('loss', loss.item(), n)
        progress.update('top1', prec1.item(), n)
        progress.update('top5', prec5.item(), n)
        # objs.update(loss.item(), n)
        # top1.update(prec1.item(), n)
        # top5.update(prec5.item(), n)

        # measure elapsed time
        progress.update('batch_time', timer.toctic())

        if step % args.report_freq == 0:
            progress.log_iter_stats(iter=step,
                                    batch_size=n,
                                    lr=optimizer.param_groups[0]['lr'])

    progress.log_epoch_stats(lr=optimizer.param_groups[0]['lr'])
    return progress.stats['top1'].avg, progress.stats['loss'].avg