def infer(valid_queue, model, criterion, drop_path_prob): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for step, (input, target) in enumerate(valid_queue): # with torch.cuda.device(0): input = Variable(input, volatile=True).cuda() # target = Variable(target, volatile=True).cuda(async=True) target = Variable(target, volatile=True).cuda() # # input = Variable(input, volatile=True) # target = Variable(target, volatile=True) logits, _ = model(input, drop_path_prob) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) # if step == 1: # return top1.avg, objs.avg if step % 50 == 0: print('valid {} {} {} {}'.format(step, objs.avg, top1.avg, top5.avg)) return top1.avg, objs.avg
def train(train_queue, model, criterion, optimizer): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = Variable(input).cuda() target = Variable(target).cuda(async=True) # # input = Variable(input) # target = Variable(target) optimizer.zero_grad() logits, logits_aux = model(input) loss = criterion(logits, target) if AUXILIARY: loss_aux = criterion(logits_aux, target) loss += 0.4 * loss_aux loss.backward() nn.utils.clip_grad_norm(model.parameters(), 5) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % 50 == 0: print(loss.data[0]) print('train {} {} {} {}'.format(step, objs.avg, top1.avg, top5.avg)) return top1.avg, objs.avg