コード例 #1
0
def train(train_loader, model, reglog, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # freeze also batch norm layers
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        #adjust learning rate
        learning_rate_decay(optimizer, len(train_loader) * epoch + i, args.lr)

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input.cuda())
        target_var = torch.autograd.Variable(target)
        # compute output

        output = forward(input_var, model, reglog.conv)
        output = reglog(output)
        loss = criterion(output, target_var)
        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data.item(), input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and i % 100 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))

        if i > 2000:
            break
コード例 #2
0
def train(train_loader, model, reglog, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # freeze also batch norm layers
    model.eval()

    end = time.time()
    prec1 = 0
    train_total = 0
    for i, (input, target) in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        # adjust learning rate
        learning_rate_decay(optimizer, len(train_loader) * epoch + i, args.lr)

        target = target.cuda()
        input_var = torch.autograd.Variable(input.cuda())
        target_var = torch.autograd.Variable(target)
        # compute output

        output = forward(input_var, model, reglog.conv)
        output = reglog(output)
        loss = criterion(output, target_var)
        # measure accuracy and record loss
        _, train_predicted = torch.max(output.data, 1)
        prec1 += (train_predicted == target_var.data).sum()
        train_total += target.size(0)
        # losses.update(loss.data[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print(prec1/train_total)
コード例 #3
0
ファイル: eval_linear.py プロジェクト: GG-yuki/bugs
def train(epochs, train_loader, model, criterion, optimizer, val_loader):
    model_name = format(model.__class__.__name__)
    max_acc = 0
    # freeze also batch norm layers
    model.eval()

    for epoch in range(epochs):
        train_correct = 0
        train_total = 0
        running_loss = 0.0
        for i, (train_input, target) in enumerate(train_loader):
            # adjust learning rate
            learning_rate_decay(optimizer,
                                len(train_loader) * epoch + i, args.lr)

            input_var = torch.autograd.Variable(train_input.cuda())
            target_var = torch.autograd.Variable(target.cuda())
            # compute output
            output = model(input_var)
            # output = model(output)
            loss = criterion(output, target_var)
            # measure accuracy and record loss
            _, train_predicted = torch.max(output.data, 1)
            train_correct += (train_predicted == target_var.data).sum()
            running_loss += loss.item()
            train_total += target.size(0)

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            running_loss += loss.item()

        print('train %d epoch loss: %.3f  acc: %.3f' %
              (epoch + 1, running_loss / train_total,
               100 * train_correct / train_total))
        f = open('./experiment_record(first)/' + model_name + '/result.txt',
                 "a")
        f.write('train %d epoch loss: %.3f  acc: %.3f\n' %
                (epoch + 1, running_loss / train_total,
                 100 * train_correct / train_total))
        f.close()
        # print('Epoch: [{0}][{1}/{2}]\t'
        #       'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
        #       'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
        #       'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
        #       'Prec_acc {acc:.3f}\t'
        #       .format(epoch, i, len(train_loader), batch_time=batch_time,
        #               data_time=data_time, loss=losses, acc = 100 * train_correct / train_total))

        model.eval()
        test_total = 0
        test_loss = 0
        test_correct = 0

        with torch.no_grad():
            for i, (test_input, target) in enumerate(val_loader):
                input_var = torch.autograd.Variable(test_input.cuda())
                target_var = torch.autograd.Variable(target.cuda())
                output = model(input_var)
                _, test_predicted = torch.max(output.data, 1)
                loss = criterion(output, target_var)
                test_loss += loss.item()
                test_total += target.size(0)
                test_correct += (test_predicted == target_var.data).sum()

        acc = 100 * test_correct / test_total
        if max_acc < acc:
            max_acc = acc

        print('test %d epoch loss: %.3f  acc: %.3f  load:%d' %
              (epoch, test_loss / test_total, 100 * test_correct / test_total,
               1))
        f = open('./experiment_record(first)/' + model_name + '/result.txt',
                 "a")
        f.write('test  %d epoch loss: %.3f  acc: %.3f\n' %
                (epoch + 1, test_loss / test_total, acc))
        f.close()

    return max_acc