Ejemplo n.º 1
0
def test(test_loader, model, criterion, model_type):
    model.eval()
    # Prepare value counters and timers
    batch_time, losses = AverageMeter(), AverageMeter()

    end = time.time()
    c = 0
    for i, (X_masked, Mask, Y_unmasked) in enumerate(test_loader):
        X_masked = X_masked.permute(0, 3, 2, 1)
        Mask = Mask.permute(0, 3, 2, 1)
        Y_unmasked = Y_unmasked.permute(0, 3, 2, 1)

        if use_gpu:
            X_masked, Mask, Y_unmasked = X_masked.cuda(), Mask.cuda(
            ), Y_unmasked.cuda()

        X_masked = X_masked.float()
        Mask = Mask.float()
        Y_unmasked = Y_unmasked.float()

        if model_type == 'PartialConv':
            output = model((X_masked, Mask))
        else:
            output = model(X_masked)

        loss = criterion(output, Y_unmasked)
        losses.update(loss.item(), X_masked.size(0))

        # Save images to file

        for j in range(10):
            img1 = output[j]
            img2 = X_masked[j]

            save_name = 'img-{}.jpg'.format(c)
            if model_type == 'PartialConv':
                save_image(img1, "PredictedOutput_PartialConv/" + save_name)
            else:
                save_image(img1, "PredictedOutput_VanillaCNN/" + save_name)

            save_image(img2, "InputImage/" + save_name)
            c += 1
        # Record time to do forward passes and save images
        batch_time.update(time.time() - end)
        end = time.time()

        # Print model accuracy -- in the code below, val refers to both value and validation
        if i % 10 == 9:
            print(model_type + ' Testing: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      i, len(test_loader), batch_time=batch_time, loss=losses))

    print('Finished testing.')
Ejemplo n.º 2
0
def trainCNN(train_loader, model, criterion, optimizer, epoch):
    print('Starting Training Epoch {}'.format(epoch))
    model.train()

    # Prepare value counters and timers
    batch_time, losses = AverageMeter(), AverageMeter()
    end = time.time()

    for i, (x, y) in enumerate(train_loader):
        x = x.unsqueeze(1)
        y = y.unsqueeze(1)

        # x = x.permute(0, 3, 1, 2)
        # y = y.permute(0, 3, 1, 2)
        # Use GPU if available
        if use_gpu:
            x, y = x.cuda(), y.cuda()

        # Run Forward Pass
        output_y = model(x)
        # print(y)
        # print(output_y)
        # break
        loss = criterion(output_y, y)
        losses.update(loss.item(), x.size(0))

        # Compute gradient and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Record time to do backward and forward passes
        batch_time.update(time.time() - end)
        end = time.time()

        # Print model accuracy
        if i % 5 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      loss=losses))

    print('Finished training epoch {}'.format(epoch))
    return losses.avg
Ejemplo n.º 3
0
def validation(valid_loader, model, criterion, epoch, model_type):
    model.eval()
    # Prepare value counters and timers
    batch_time, losses = AverageMeter(), AverageMeter()

    end = time.time()

    for i, (X_masked, Mask, Y_unmasked) in enumerate(valid_loader):
        X_masked = X_masked.permute(0, 3, 2, 1)
        Mask = Mask.permute(0, 3, 2, 1)
        Y_unmasked = Y_unmasked.permute(0, 3, 2, 1)

        if use_gpu:
            X_masked, Mask, Y_unmasked = X_masked.cuda(), Mask.cuda(
            ), Y_unmasked.cuda()

        X_masked = X_masked.float()
        Mask = Mask.float()
        Y_unmasked = Y_unmasked.float()

        if model_type == 'PartialConv':
            output = model((X_masked, Mask))
        else:
            output = model(X_masked)

        loss = criterion(output, Y_unmasked)
        losses.update(loss.item(), X_masked.size(0))

        # Record time to do forward passes and save images
        batch_time.update(time.time() - end)
        end = time.time()

        # Print model accuracy -- in the code below, val refers to both value and validation
        if i % 10 == 0:
            print(model_type + 'Validate: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch,
                      i,
                      len(valid_loader),
                      batch_time=batch_time,
                      loss=losses))

    print('Finished validation.')
    return losses.avg
Ejemplo n.º 4
0
def train(train_loader, model, criterion, optimizer, epoch, model_type):
    print('Starting  training epoch {}'.format(epoch))
    model.train()

    batch_time, losses = AverageMeter(), AverageMeter()

    for i, (X_masked, Mask, Y_unmasked) in enumerate(train_loader):
        X_masked = X_masked.permute(0, 3, 2, 1)
        Mask = Mask.permute(0, 3, 2, 1)
        Y_unmasked = Y_unmasked.permute(0, 3, 2, 1)

        if use_gpu:
            X_masked, Mask, Y_unmasked = X_masked.cuda(), Mask.cuda(
            ), Y_unmasked.cuda()

        X_masked = X_masked.float()
        Mask = Mask.float()
        Y_unmasked = Y_unmasked.float()

        if model_type == 'PartialConv':
            output = model((X_masked, Mask))
        else:
            output = model(X_masked)

        loss = criterion(output, Y_unmasked)
        losses.update(loss.item(), X_masked.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Print model accuracy -- in the code below, val refers to value, not validation
        if i % 20 == 0:
            print(model_type + ' Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      loss=losses))

    print('Finished training epoch {}'.format(epoch))
    def eval_epoch(self, loader, args, writer):
        """
        Test the model using the given data loader
        """
        batch_time = AverageMeter()
        losses = AverageMeter()

        criterion = nn.NLLLoss()

        # switch to evaluate mode
        self.eval()
        step = 0 * len(loader)

        total = 0
        correct = 0

        with torch.no_grad():
            end = time.time()
            for i, (x_batch, y_batch) in enumerate(loader):
                x_batch = x_batch.to(self.device)
                y_batch = y_batch.to(self.device)

                prediction = self.forward(x_batch)

                loss = criterion(prediction, y_batch)

                _, predicted = torch.max(prediction.data, 1)
                total += y_batch.size(0)
                correct += (predicted == y_batch).sum().item()

                # measure accuracy and record loss
                losses.update(loss.item(), args.batch_size)

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if i % args.print_freq == 0 or i == len(loader) - 1:
                    writer.add_scalar('eval/loss', losses.val, step + i)
                    print('Test: [{0}/{1}]\t'
                          'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                          'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                              i,
                              len(loader) - 1,
                              batch_time=batch_time,
                              loss=losses),
                          flush=True)

        print('Accuracy of the network on the test images: {accuracy:.4f}%'.
              format(accuracy=100 * correct / total))
    def validate(val_loader,
                 model,
                 criterion,
                 save_images,
                 epoch,
                 user_input=False):
        model.eval()

        # Prepare value counters and timers
        batch_time, data_time, losses = AverageMeter(), AverageMeter(
        ), AverageMeter()

        end = time.time()
        already_saved_images = False
        for i, (input_gray, input_ab, target,
                input_ab_mean) in enumerate(val_loader):
            data_time.update(time.time() - end)

            # Use GPU
            if use_gpu:
                input_gray, input_ab, target, input_ab_mean = input_gray.cuda(
                ), input_ab.cuda(), target.cuda(), input_ab_mean.cuda()

            # Run model and record loss
            output_ab = model(input_gray)  # throw away class predictions
            loss = criterion(output_ab, input_ab)
            losses.update(loss.item(), input_gray.size(0))

            # Save val images to file
            if save_images and not already_saved_images:
                already_saved_images = True
                for j in range(min(len(output_ab),
                                   10)):  # save at most 10 images
                    save_path = {
                        'grayscale':
                        '../blue_cis6930/rishab.lokray/outputs/gray/',
                        'colorized':
                        '../blue_cis6930/rishab.lokray/outputs/color/'
                    }
                    save_name = 'img-{}-epoch-{}.jpg'.format(
                        i * val_loader.batch_size + j, epoch)
                    to_rgb(input_gray[j].cpu(),
                           ab_input=output_ab[j].detach().cpu(),
                           save_path=save_path,
                           save_name=save_name)

            # Record time to do forward passes and save images
            batch_time.update(time.time() - end)
            end = time.time()

            # Print model accuracy -- in the code below, val refers to both value and validation

            print('Validate: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      i, len(val_loader), batch_time=batch_time, loss=losses))

        print('Finished validation.')
        return losses.avg
Ejemplo n.º 7
0
def evaluate(loader, model, criterion, epoch, writer):
    batch_time = AverageMeter()
    losses = AverageMeter()

    # switch to evaluate mode
    model.eval()
    step = epoch * len(loader)
    with torch.no_grad():
        end = time.time()
        for i, (img, speed, target, mask) in enumerate(loader):
            img = img.cuda(args.gpu, non_blocking=True)
            speed = speed.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)
            mask = mask.cuda(args.gpu, non_blocking=True)

            branches_out, pred_speed = model(img, speed)

            mask_out = branches_out * mask
            branch_loss = criterion(mask_out, target) * 4
            speed_loss = criterion(pred_speed, speed)

            loss = args.branch_weight * branch_loss + \
                args.speed_weight * speed_loss

            # measure accuracy and record loss
            losses.update(loss.item(), args.batch_size)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0 or i == len(loader):
                writer.add_scalar('eval/loss', losses.val, step+i)
                output_log(
                  'Test: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  .format(
                      i, len(loader), batch_time=batch_time,
                      loss=losses), logging)
    return losses.avg
Ejemplo n.º 8
0
    def train(train_loader, model, criterion, optimizer, epoch):
        print('Starting training epoch {}'.format(epoch))
        model.train()

        # Prepare value counters and timers
        batch_time, data_time, losses = AverageMeter(), AverageMeter(
        ), AverageMeter()

        end = time.time()
        for i, (input_gray, input_ab, target,
                input_ab_mean) in enumerate(train_loader):

            # Use GPU if available
            if use_gpu:
                input_gray, input_ab, target, input_ab_mean = input_gray.cuda(
                ), input_ab.cuda(), target.cuda(), input_ab_mean.cuda()

            # Record time to load data (above)
            data_time.update(time.time() - end)
            print("USEGPu", use_gpu)
            print(type(input_gray))
            print(type(input_ab_mean))
            # Run forward pass
            output_ab = model(input_gray)
            loss = criterion(output_ab, input_ab_mean)
            losses.update(loss.item(), input_gray.size(0))

            # Compute gradient and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Record time to do forward and backward passes
            batch_time.update(time.time() - end)
            end = time.time()

            # Print model accuracy -- in the code below, val refers to value, not validation
            if i % 5 == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                          epoch,
                          i,
                          len(train_loader),
                          batch_time=batch_time,
                          data_time=data_time,
                          loss=losses))
        print('Finished training epoch {}'.format(epoch))
        return loss
Ejemplo n.º 9
0
def validate(val_loader, model, criterion):
    model.eval()
    all_acc = []
    all_acc_top5 = []
    all_loss = []

    if args.model == "Elastic_InceptionV3":
        for ix in range((num_outputs - 1)):
            all_loss.append(AverageMeter())
            all_acc.append(AverageMeter())
            all_acc_top5.append(AverageMeter())
    else:
        for ix in range(num_outputs):
            all_loss.append(AverageMeter())
            all_acc.append(AverageMeter())
            all_acc_top5.append(AverageMeter())

    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        losses = 0

        outputs = model(input_var)
        with torch.no_grad():
            for ix in range(len(outputs)):
                loss = criterion(outputs[ix], target_var)
                all_loss[ix].update(loss.item(), input.size(0))
                losses += loss

                prec1 = accuracy(outputs[ix].data, target)
                all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

                # top 5 accuracy
                prec5 = accuracy(outputs[ix].data, target, topk=(5, ))
                all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))
    accs = []
    ls = []
    accs_top5 = []
    for i, j, k in zip(all_acc, all_loss, all_acc_top5):
        accs.append(float(100 - i.avg))
        ls.append(j.avg)
        accs_top5.append(float(100 - k.avg))
    print("validation top 5 error: ", accs_top5)
    return accs, ls, accs_top5
Ejemplo n.º 10
0
    def validate(val_loader,
                 model,
                 criterion,
                 save_images,
                 epoch,
                 user_input=False):
        model.eval()

        # Prepare value counters and timers
        batch_time, data_time, losses = AverageMeter(), AverageMeter(
        ), AverageMeter()

        end = time.time()
        already_saved_images = False
        for i, (input_gray, input_ab, target,
                input_ab_mean) in enumerate(val_loader):
            data_time.update(time.time() - end)

            # Use GPU
            if use_gpu:
                input_gray, input_ab, target, input_ab_mean = input_gray.cuda(
                ), input_ab.cuda(), target.cuda(), input_ab_mean.cuda()

            # Run model and record loss
            output_ab = model(input_gray)  # throw away class predictions
            loss = criterion(output_ab, input_ab_mean)
            losses.update(loss.item(), input_gray.size(0))

            # Record time to do forward passes and save images
            batch_time.update(time.time() - end)
            end = time.time()

            # Print model accuracy -- in the code below, val refers to both value and validation

            print('Validate: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      i, len(val_loader), batch_time=batch_time, loss=losses))

        print('Finished validation.')
        return losses.avg
Ejemplo n.º 11
0
def train(loader, model, optimizer, epoch, writer):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    iou = AverageMeter()   # semantic IoU
    iou_c = AverageMeter() # contour IoU
    iou_m = AverageMeter() # marker IoU
    print_freq = config['train'].getfloat('print_freq')
    only_contour = config['contour'].getboolean('exclusive')
    weight_map = config['param'].getboolean('weight_map')
    model_name = config['param']['model']
    with_contour = config.getboolean(model_name, 'branch_contour')
    with_marker = config.getboolean(model_name, 'branch_marker')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Sets the module in training mode.
    model.train()
    end = time.time()
    n_step = len(loader)
    for i, data in enumerate(loader):
        # measure data loading time
        data_time.update(time.time() - end)
        # split sample data
        inputs = data['image'].to(device)
        labels = data['label'].to(device)
        labels_c = data['label_c'].to(device)
        labels_m = data['label_m'].to(device)
        # get loss weight
        weights = None
        if weight_map and 'weight' in data:
            weights = data['weight'].to(device)
        # zero the parameter gradients
        optimizer.zero_grad()
        # forward step
        outputs = model(inputs)
        if with_contour and with_marker:
            outputs, outputs_c, outputs_m = outputs
        elif with_contour:
            outputs, outputs_c = outputs
        # compute loss
        if only_contour:
            loss = contour_criterion(outputs, labels_c)
        else:
            # weight_criterion equals to segment_criterion if weights is none
            loss = focal_criterion(outputs, labels, weights)
            if with_contour:
                loss += focal_criterion(outputs_c, labels_c, weights)
            if with_marker:
                loss += focal_criterion(outputs_m, labels_m, weights)
        # compute gradient and do backward step
        loss.backward()
        optimizer.step()
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        # measure accuracy and record loss
        # NOT instance-level IoU in training phase, for better speed & instance separation handled in post-processing
        losses.update(loss.item(), inputs.size(0))
        if only_contour:
            batch_iou = iou_mean(outputs, labels_c)
        else:
            batch_iou = iou_mean(outputs, labels)
        iou.update(batch_iou, inputs.size(0))
        if with_contour:
            batch_iou_c = iou_mean(outputs_c, labels_c)
            iou_c.update(batch_iou_c, inputs.size(0))
        if with_marker:
            batch_iou_m = iou_mean(outputs_m, labels_m)
            iou_m.update(batch_iou_m, inputs.size(0))
        # log to summary
        #step = i + epoch * n_step
        #writer.add_scalar('training/loss', loss.item(), step)
        #writer.add_scalar('training/batch_elapse', batch_time.val, step)
        #writer.add_scalar('training/batch_iou', iou.val, step)
        #writer.add_scalar('training/batch_iou_c', iou_c.val, step)
        #writer.add_scalar('training/batch_iou_m', iou_m.val, step)
        if (i + 1) % print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time: {batch_time.avg:.2f} (io: {data_time.avg:.2f})\t'
                'Loss: {loss.val:.4f} (avg: {loss.avg:.4f})\t'
                'IoU: {iou.avg:.3f} (Coutour: {iou_c.avg:.3f}, Marker: {iou_m.avg:.3f})\t'
                .format(
                    epoch, i, n_step, batch_time=batch_time,
                    data_time=data_time, loss=losses, iou=iou, iou_c=iou_c, iou_m=iou_m
                )
            )
    # end of loop, dump epoch summary
    writer.add_scalar('training/epoch_loss', losses.avg, epoch)
    writer.add_scalar('training/epoch_iou', iou.avg, epoch)
    writer.add_scalar('training/epoch_iou_c', iou_c.avg, epoch)
    writer.add_scalar('training/epoch_iou_m', iou_m.avg, epoch)
    return iou.avg # return epoch average iou
Ejemplo n.º 12
0
c = cfg.getfloat('default', 'c')
lag = cfg.getint('default', 'lag')

data_src = cfg.get('default', 'data_src')
log_src = cfg.get('default', 'log_src')

# Dataset
dataset = IndexDataset(os.path.join(data_src, 'futures', 'train'), lag)
dataloader = DataLoader(dataset, shuffle=False, batch_size=1)

# Models
ddrl = DDRL(lag)

# Tools
optimizer = torch.optim.Adam(ddrl.parameters())
reward_meter = AverageMeter(epochs, len(dataloader))

# Training Phase
for e in range(epochs):
    with tqdm(total=len(dataloader)) as progress_bar:
        for i, (returns, fragments) in enumerate(dataloader):
            # Computing actions by using FDDR
            delta = ddrl(fragments).double().squeeze(-1)

            # Computing reward
            pad_delta = F.pad(delta, [1, 0])
            delta_diff = (pad_delta[:, 1:] - pad_delta[:, :-1])
            reward = torch.sum(delta * returns - c * torch.abs(delta_diff))

            # Updating FDDR
            optimizer.zero_grad()
Ejemplo n.º 13
0
def validate(args, epoch, model, loader, criterion, logger):
    steps = len(loader)
    local_loss = AverageMeter()
    local_acc = AverageMeter()
    local_recall = AverageMeter()
    aver_loss = AverageMeter()
    aver_acc = AverageMeter()
    aver_recall = AverageMeter()

    model.eval()
    if args.verbose:
        logger.info("Validating")

    with torch.no_grad():
        for i, (images, targets) in enumerate(loader, start=1):
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
                targets = targets.cuda(args.gpu, non_blocking=True)

            outputs = model(images)
            if args.multi:
                outputs = torch.sigmoid(outputs)
            loss = criterion(outputs, targets)

            if args.multi:
                precision, recall = calculate_metrics(
                    outputs.detach().cpu().numpy(),
                    targets.detach().cpu().numpy(), args.threshold)
            else:
                precision = accuracy(outputs, targets)[0].item()
                recall = precision

            local_loss.update(loss.item(), images.size(0))
            local_acc.update(precision, images.size(0))
            local_recall.update(recall, images.size(0))

            if args.distributed:
                running_metrics = torch.FloatTensor(
                    [loss.item(), precision, recall]).cuda(args.gpu)
                running_metrics /= args.world_size
                dist.all_reduce(running_metrics, op=dist.ReduceOp.SUM)
                aver_loss.update(running_metrics[0].item())
                aver_acc.update(running_metrics[1].item())
                aver_recall.update(running_metrics[2].item())
            else:
                aver_loss.update(loss.item(), images.size(0))
                aver_acc.update(precision, images.size(0))
                aver_recall.update(recall, images.size(0))

            if args.verbose and i % args.log_interval == 0:
                logger.info("Epoch: [{}] [{}]/[{}]({:.2%}) "
                            "Loss: {:.4f} / {:.4f} / {:.4f} "
                            "Acc: {:.2f} / {:.2f} / {:.2f} "
                            "Recall: {:.2f} / {:.2f} / {:.2f}".format(
                                epoch, i, steps, i / steps, loss,
                                local_loss.avg, aver_loss.avg, precision,
                                local_acc.avg, aver_acc.avg, recall,
                                local_recall.avg, aver_recall.avg))

    return aver_loss.avg, aver_acc.avg, aver_recall.avg
Ejemplo n.º 14
0
# streamer.transform('./Data/futures/train', './Data/fuzzy_futures/train')

# Dataset
train_dataset = FuzzyIndexDataset(
    os.path.join(data_src, 'fuzzy_futures', 'train'), lag)
train_dataloader = DataLoader(train_dataset, shuffle=False, batch_size=1)
test_dataset = FuzzyIndexDataset(
    os.path.join(data_src, 'fuzzy_futures', 'test'), lag)
test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=1)

# Models
fddr = FDDR(lag, fuzzy_degree)

# Tools
optimizer = torch.optim.Adam(fddr.parameters())
train_reward_meter = AverageMeter(epochs, len(train_dataloader))
test_reward_meter = AverageMeter(epochs, len(test_dataloader))

# Training Phase
for e in range(epochs):
    with tqdm(total=len(train_dataloader), ncols=130) as progress_bar:
        fddr.train()
        for i, (returns, fragments, mean, var) in enumerate(train_dataloader):
            # Computing actions by using FDDR
            delta = fddr(fragments, running_mean=mean,
                         running_var=var).double().squeeze(-1)

            # Computing reward
            pad_delta = F.pad(delta, [1, 0])
            delta_diff = (pad_delta[:, 1:] - pad_delta[:, :-1])
            reward = torch.sum(delta * returns - c * torch.abs(delta_diff))
Ejemplo n.º 15
0
def train(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()
    #model.apply(freeze_bn)
    dslen = len(train_loader)
    end = time.time()
    for k, (input, target) in enumerate(train_loader):
        if epoch < args.warm:
            warmup_scheduler.step()

        # measure data loading time
        if True:
            data_time.update(time.time() - end)

            target = target.cuda(non_blocking=True)
            input = input.cuda(non_blocking=True)

            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec[0].item(), input.size(0))
            top5.update(prec[1].item(), input.size(0))

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
        if 'mean' in args.weight:
            model.apply(meanweigh)  #(model)

        n_iter = (epoch) * len(train_loader) + k
        if n_iter == 1:
            os.system('nvidia-smi')
        if n_iter % print_freq == 1 and epoch < args.start_epoch + 5:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'LR: {3:.5f}\t'
                  'Time {batch_time.avg:.3f}\t'
                  'Data {data_time.avg:.3f}\t'
                  'LossEpoch {loss.avg:.4f}\t'
                  'Prec@1 {top1.avg:.3f}\t'
                  'Prec@5 {top5.avg:.3f}\t'.format(
                      epoch,
                      n_iter,
                      dslen,
                      optimizer.param_groups[0]['lr'],
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5),
                  flush=True)
            batch_time.reset()
            data_time.reset()
            losses.reset()
            top1.reset()
            top5.reset()
            #validatetrain(val_loader, model, criterion)
        elif k == dslen - 1:
            model.apply(inspect_bn)
            print('Epoch: [{0}][{1}/{2}]\t'
                  'LR: {3:.5f}\t'
                  'Time {batch_time.avg:.3f}\t'
                  'Data {data_time.avg:.3f}\t'
                  'LossEpoch {loss.avg:.4f}\t'
                  'Prec@1 {top1.avg:.3f}\t'
                  'Prec@5 {top5.avg:.3f}\t'.format(
                      epoch,
                      n_iter,
                      dslen,
                      optimizer.param_groups[0]['lr'],
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5),
                  flush=True)
Ejemplo n.º 16
0
def validatetrain(val_loader, model, criterion):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()

    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(non_blocking=True)
        input = input.cuda(non_blocking=True)
        with torch.no_grad():
            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec[0].item(), input.size(0))
            top5.update(prec[1].item(), input.size(0))
    model.train()
    print(' *TRAINMODE Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(
        top1=top1, top5=top5))
def validate(val_loader, model, criterion, print_freq, colorization=False):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
       # target = target.cuda()
       # input = input.cuda()
        input = input.to(device, dtype=torch.float32)
        target = target.to(device, dtype=torch.float32)

        if colorization:
            input = transforms.Resize(500)(input)
            target = transforms.Resize(500)(target)
            input = input.repeat(1,3,1,1)

        with torch.no_grad():
            # compute output
            output = model(input)
            loss = criterion(output, target)
            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))
            if not colorization:
                prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
                top1.update(prec1, input.size(0))
                top5.update(prec5, input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
    if not colorization:
        print('Test: [{0}/{1}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
              'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
              'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
            i, len(val_loader), batch_time=batch_time, loss=losses,
            top1=top1, top5=top5))
    else:
        print('Test: [{0}/{1}]\t'
              'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
              'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
            i, len(val_loader), batch_time=batch_time, loss=losses))
    return losses.avg, top1.avg, top5.avg
Ejemplo n.º 18
0
def validate(val_loader, model, criterion, print_freq):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(async=True)
        input = input.cuda(async=True)
        with torch.no_grad():
            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1[0], input.size(0))
            top5.update(prec5[0], input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i,
                          len(val_loader),
                          batch_time=batch_time,
                          loss=losses,
                          top1=top1,
                          top5=top5))

    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1,
                                                                  top5=top5))

    return top1.avg, top5.avg
Ejemplo n.º 19
0
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()
    # switch to train mode
    model.train()

    end = time()
    for i, s in enumerate(train_loader):
        if len(s) == 5:
            data, label, mid, leng, quat = s
        else:
            data, label, mid, leng = s

        # measure data loading time
        data_time.update(time() - end)
        batch_size = data.size(0)
        input_var = torch.autograd.Variable(data.cuda().float())
        # input_quat_var = torch.autograd.Variable(quat.cuda())
        target_var = torch.autograd.Variable(label.cuda())
        output = model(input_var)
        # record loss
        # leng is voxel length
        leng = leng * (NUM_VOXEL / NUM_GT_SIZE)
        mid = mid - leng.repeat(1, 3) * (NUM_GT_SIZE / 2 - 0.5)
        leng = leng.repeat(1, JOINT_LEN * 3)
        base = mid.repeat(1, JOINT_LEN)

        for j in range(len(output)):
            output[j] = (output[j].mul(leng.cuda())).add(base.cuda())
        loss = criterion(output[0], target_var)
        for k in range(1, nSTACK):
            loss += criterion(output[k], target_var)
        losses.update(loss.item() / batch_size, 1)
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        err_t = float(mean_error(output[-1].cpu(), label)[0])
        # print err_t
        errors.update(err_t, batch_size)

        # measure elapsed time
        batch_time.update(time() - end)
        end = time()

        if i % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t'
                'Acc {err_t.val:.2f} ({err_t.avg:.2f})\t'
                'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                    epoch,
                    i,
                    len(train_loader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses,
                    err_t=errors))
    return losses.avg, errors.avg
Ejemplo n.º 20
0
    def train_iters(self):
        # Initialize search module
        searcher = GreedySearchDecoder(self.encoder,
                                       self.decoder, self.device)
        start = time.time()
        nepoch_no_imprv = 0
        self.train_loss = AverageMeter()
        total_loss      = []
        total_bleu    = []
        total_rouge_l = []
        total_rouge_1 = []
        total_rouge_2 = []
        self.best_bleu  = .0
        for epoch in tqdm(range(self.num_epoch)):
            # Ensure dropout layers are in train mode
            self.encoder.train()
            self.decoder.train()

            print_loss_total = 0
            epoch_loss       = []
            for i_train, batch in enumerate(self.train_loader):
                src_input = batch.src[0]
                per_input = batch.per[0]
                src_length = batch.src[1]
                trg_input = batch.trg[0][:, :-1]
                trg_length = batch.trg[1]

                max_target_lenght = max([len(ele) for ele in trg_input.tolist()])
                max_src_lenght = max([len(ele) for ele in src_input.tolist()])


                loss = self.train(src_input, trg_input, per_input, self.encoder,
                             self.decoder, self.encoder_optimizer, self.decoder_optimizer, self.criterion, max_target_lenght)
                self.train_loss.update(loss)
                epoch_loss.append(loss)
                print_loss_total += loss
            total_loss.append(np.average(epoch_loss))

            ######################################################################
            # Run the overfitting
            # ~~~~~~~~~~~~~~

            if (epoch % self.evaluate_every == 0) and (epoch != 0):
                # Ensure dropout layers are in evaluation mode
                self.encoder.eval()
                self.decoder.eval()
                self.val_bleu    = AverageMeter()
                self.val_rouge_1 = AverageMeter()
                self.val_rouge_2 = AverageMeter()
                self.val_rouge_l = AverageMeter()
                epoch_bleu = []
                epoch_rouge_l = []
                epoch_rouge_1 = []
                epoch_rouge_2 = []
                print("evaluating")
                for i, batch in enumerate(self.val_loader):
                    src_input = batch.src[0]
                    per_input = batch.per[0]
                    trg_output = batch.trg[0][:,:]
                    preds = []

                    for src_input_, per_input_ in zip(src_input, per_input):

                        lengths = []
                        ctr = 0
                        for ele in src_input_:
                            if ele == self.PAD_word:
                                break
                            ctr += 1
                        lengths.append(ctr)

                        src_input_ = torch.LongTensor([src_input_.tolist()]).transpose(0,1).to(self.device)
                        per_input_ = torch.LongTensor([per_input_.tolist()]).transpose(0,1).to(self.device)
                        lengths = torch.tensor(lengths).to(self.device)
                        #trg_output_ = torch.LongTensor([trg_output_.tolist()]).transpose(0,1).to(self.device)

                        pred, scores = searcher(src_input_, per_input_, lengths, self.max_length, self.trg_soi)
                        preds.append(pred)

                    # Compute BLEU and ROUGFE score and Loss
                    pred_sents = []
                    trg_sents  = []
                    pred_STR   = []
                    trg_STR    = []

                    print(trg_output.t().size())
                    for j in range(trg_output.t().size()[1]):

                        pred_sent = self.get_sentence(tensor2np(preds[j]), 'trg')
                        trg_sent = self.get_sentence(tensor2np(trg_output[j]), 'trg')
                        pred_sents.append(pred_sent)
                        pred_STR.append(" ".join(pred_sent))
                        trg_sents.append(trg_sent)
                        trg_STR.append(" ".join(trg_sent))


                    rouge = Rouge()
                    hyps, refs = map(list, zip(*[[d[0], d[1]] for d in [pred_STR, trg_STR]]))

                    rouge_scores = rouge.get_scores(hyps, refs, avg=True)

                    bleu_score = get_bleu(pred_sents, trg_sents)
                    epoch_bleu.append(bleu_score)

                    epoch_rouge_1.append(rouge_scores["rouge-1"]["f"])
                    epoch_rouge_2.append(rouge_scores["rouge-2"]["f"])
                    epoch_rouge_l.append(rouge_scores["rouge-l"]["f"])

                    self.val_bleu.update(bleu_score)
                    self.val_rouge_1.update(rouge_scores["rouge-1"]["f"])
                    self.val_rouge_2.update(rouge_scores["rouge-2"]["f"])
                    self.val_rouge_l.update(rouge_scores["rouge-l"]["f"])

                total_bleu.append(np.average(epoch_bleu))
                total_rouge_1.append(np.average(epoch_rouge_1))
                total_rouge_2.append(np.average(epoch_rouge_2))
                total_rouge_l.append(np.average(epoch_rouge_l))


                print('epochs: ' + str(epoch))
                print('average train loss: ' + str(self.train_loss.avg))
                print('average validation bleu score: ' + str(self.val_bleu.avg))
                print('average validation rouge-l score: ' + str(self.val_rouge_l.avg))
                # early stopping
                # Save model if bleu score is higher than the best
                if self.best_bleu < self.val_bleu.avg:
                    self.best_bleu = self.val_bleu.avg
                    checkpoint = {
                        'encoder': self.encoder,
                        'decoder': self.decoder,
                        'epoch': epoch
                    }
                    torch.save(checkpoint, self.log_path + '/Model_e%d_bleu%.3f.pt' % (epoch, self.val_bleu.avg))
                else:
                    nepoch_no_imprv += 1
                    if nepoch_no_imprv >= self.nepoch_no_imprv:
                        print("- early stopping {} epochs without " \
                              "improvement".format(nepoch_no_imprv))
                        break

        pandas_bleu = pd.DataFrame.from_dict({"bleu_validation": total_bleu})
        pandas_loss = pd.DataFrame.from_dict({"loss_train": total_loss})
        pandas_rouge_1 = pd.DataFrame.from_dict({"rouge_1_val": total_rouge_1})
        pandas_rouge_2 = pd.DataFrame.from_dict({"rouge_2_val": total_rouge_2})
        pandas_rouge_l = pd.DataFrame.from_dict({"rouge_l_val": total_rouge_l})

        pandas_loss.to_csv("./loss_train.csv", sep="\t", index=False)
        pandas_bleu.to_csv("./blue_validation.csv", sep="\t", index=False)
        pandas_rouge_1.to_csv("./rouge_1_validation.csv", sep="\t", index=False)
        pandas_rouge_2.to_csv("./rouge_2_validation.csv", sep="\t", index=False)
        pandas_rouge_l.to_csv("./rouge_l_validation.csv", sep="\t", index=False)
Ejemplo n.º 21
0
c = cfg.getfloat('default', 'c')
lag = cfg.getint('default', 'lag')

data_src = cfg.get('default', 'data_src')
log_src = cfg.get('default', 'log_src')

# Dataset
dataset = IndexDataset(os.path.join(data_src, 'futures', 'train'), lag)
dataloader = DataLoader(dataset, shuffle=False, batch_size=1)

# Models
drl = DRL(lag)

# Tools
optimizer = torch.optim.Adam(drl.parameters())
reward_meter = AverageMeter(epochs, len(dataloader))

# Training Phase
for e in range(epochs):
    with tqdm(total=len(dataloader)) as progress_bar:
        for i, (returns, fragments) in enumerate(dataloader):
            # Computing actions by using FDDR
            delta = drl(fragments).double().squeeze(-1)

            # Computing reward
            pad_delta = F.pad(delta, [1, 0])
            delta_diff = (pad_delta[:, 1:] - pad_delta[:, :-1])
            reward = torch.sum(delta * returns - c * torch.abs(delta_diff))

            # Updating FDDR
            optimizer.zero_grad()
Ejemplo n.º 22
0
def train(train_loader, model, criterion, optimizers, epoch):

    model.train()

    lr = None
    all_acc = []
    all_acc_top5 = []
    all_loss = []

    for ix in range(num_outputs):
        all_loss.append(AverageMeter())
        all_acc.append(AverageMeter())
        all_acc_top5.append(AverageMeter())

    LOG("==> train ", logFile)
    # print("num_outputs: ", num_outputs)

    for i, (input, target) in enumerate(train_loader):
        # print("input: ", input, input.shape)
        # print("target: ", target, target.shape)

        # bp_1
        if args.backpropagation == 1:
            # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)

            target = target.cuda(async=True)
            input_var = torch.autograd.Variable(input)
            target_var = torch.autograd.Variable(target)

            for ix in range(num_outputs):
                outputs = model(input_var)
                #
                optimizers[ix].zero_grad()

                loss = criterion(outputs[ix], target_var)

                loss.backward()

                optimizers[ix].step()

                # optimizer.zero_grad()
                # if ix == (num_outputs - 1):
                #     loss.backward()
                # else:
                #     loss.backward(retain_graph=True)

                # optimizer.step()
                all_loss[ix].update(loss.item(), input.size(0))

                # top 1 accuracy
                prec1 = accuracy(outputs[ix].data, target)
                all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

                # # top 5 accuracy
                prec5 = accuracy(outputs[ix].data, target, topk=(5, ))
                # print("prec top 5-1: ", prec5)
                # print("prec top 5-2: ", prec5[0])
                # print("prec top 5-3: ", prec5[0].data[0].item())
                all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        # elif args.backpropagation == 2:
        #     # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)
        #     # bp_2
        #     for ix in range(num_outputs):

        #         target = target.cuda(async=True)
        #         input_var = torch.autograd.Variable(input)
        #         target_var = torch.autograd.Variable(target)
        #         optimizer.zero_grad()
        #         outputs = model(input_var)
        #         loss = criterion(outputs[ix], target_var)
        #         loss.backward()
        #         optimizer.step()

        #         all_loss[ix].update(loss.item(), input.size(0))

        #         # top 1 accuracy
        #         prec1 = accuracy(outputs[ix].data, target)
        #         all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

        #         # top 5 accuracy
        #         prec5 = accuracy(outputs[ix].data, target, topk=(5,))
        #         all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        # elif args.backpropagation == 3:
        #     # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)
        #     # bp_3
        #     target = target.cuda(async=True)
        #     input_var = torch.autograd.Variable(input)
        #     target_var = torch.autograd.Variable(target)

        #     optimizer.zero_grad()
        #     outputs = model(input_var)
        #     losses = 0
        #     for ix in range(len(outputs)):
        #         # print("outputs[ix]: ", outputs[ix])
        #         loss = criterion(outputs[ix], target_var)
        #         losses += loss

        #         all_loss[ix].update(loss.item(), input.size(0))

        #         # top 1 accuracy
        #         prec1 = accuracy(outputs[ix].data, target)
        #         all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

        #         # top 5 accuracy
        #         prec5 = accuracy(outputs[ix].data, target, topk=(5,))
        #         all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        #     # losses = losses/len(outputs)
        #     losses.backward()
        #     optimizer.step()
        else:
            NotImplementedError

    accs = []
    accs_top5 = []
    ls = []
    for i, j, k in zip(all_acc, all_loss, all_acc_top5):
        accs.append(float(100 - i.avg))
        ls.append(j.avg)
        accs_top5.append(float(100 - k.avg))

    try:
        lr = float(str(optimizers[-1]).split("\n")[-5].split(" ")[-1])
    except:
        lr = 100

    print("train epoch top 5 error: ", accs_top5)
    return accs, ls, lr, accs_top5
Ejemplo n.º 23
0
def valid(loader, model, epoch, writer, n_step):
    iou = AverageMeter()   # semantic IoU
    iou_c = AverageMeter() # contour IoU
    iou_m = AverageMeter() # marker IoU
    losses = AverageMeter()
    only_contour = config['contour'].getboolean('exclusive')
    weight_map = config['param'].getboolean('weight_map')
    model_name = config['param']['model']
    with_contour = config.getboolean(model_name, 'branch_contour')
    with_marker = config.getboolean(model_name, 'branch_marker')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Sets the model in evaluation mode.
    model.eval()
    for i, data in enumerate(loader):
        # get the inputs
        inputs = data['image'].to(device)
        labels = data['label'].to(device)
        labels_c = data['label_c'].to(device)
        labels_m = data['label_m'].to(device)
        # get loss weight
        weights = None
        if weight_map and 'weight' in data:
            weights = data['weight'].to(device)
        # forward step
        outputs = model(inputs)
        if with_contour and with_marker:
            outputs, outputs_c, outputs_m = outputs
        elif with_contour:
            outputs, outputs_c = outputs
        # compute loss
        if only_contour:
            loss = contour_criterion(outputs, labels_c)
        else:
            # weight_criterion equals to segment_criterion if weights is none
            loss = focal_criterion(outputs, labels, weights)
            if with_contour:
                loss += focal_criterion(outputs_c, labels_c, weights)
            if with_marker:
                loss += focal_criterion(outputs_m, labels_m, weights)
        # measure accuracy and record loss (Non-instance level IoU)
        losses.update(loss.item(), inputs.size(0))
        if only_contour:
            batch_iou = iou_mean(outputs, labels_c)
        else:
            batch_iou = iou_mean(outputs, labels)
        iou.update(batch_iou, inputs.size(0))
        if with_contour:
            batch_iou_c = iou_mean(outputs_c, labels_c)
            iou_c.update(batch_iou_c, inputs.size(0))
        if with_marker:
            batch_iou_m = iou_mean(outputs_m, labels_m)
            iou_m.update(batch_iou_m, inputs.size(0))
    # end of loop, dump epoch summary
    writer.add_scalar('CV/epoch_loss', losses.avg, epoch)
    writer.add_scalar('CV/epoch_iou', iou.avg, epoch)
    writer.add_scalar('CV/epoch_iou_c', iou_c.avg, epoch)
    writer.add_scalar('CV/epoch_iou_m', iou_m.avg, epoch)
    print(
        'Epoch: [{0}]\t\tcross-validation\t'
        'Loss: N/A    (avg: {loss.avg:.4f})\t'
        'IoU: {iou.avg:.3f} (Coutour: {iou_c.avg:.3f}, Marker: {iou_m.avg:.3f})\t'
        .format(
            epoch, loss=losses, iou=iou, iou_c=iou_c, iou_m=iou_m
        )
    )
    return iou.avg # return epoch average iou
Ejemplo n.º 24
0
def validate(val_loader, model, criterion, print_freq, epoch):
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    # model.apply(unfreeze_bn)

    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(non_blocking=True)
        input = input.cuda(non_blocking=True)
        with torch.no_grad():
            # compute output
            output = model(input)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec = accuracy(output.data, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec[0].item(), input.size(0))
            top5.update(prec[1].item(), input.size(0))

            # measure elapsed time

            if i % print_freq == 0:
                print('Test: [{0}][{1}/{2}]\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          epoch,
                          i,
                          len(val_loader),
                          loss=losses,
                          top1=top1,
                          top5=top5))
        n_iter = (epoch) * len(train_loader) + i + 1

        # if args.record:
        #     writer.add_scalar('Test/Average loss', losses.val,  n_iter)
        #     writer.add_scalar('Test/Accuracy', top1.val,  n_iter)

    model.train()
    f_loss.write('\n epoch {} test with loss {:.4f} \n'.format(
        epoch, losses.avg))
    f_acc.write('\n epoch {} test with top1 {:.3f} and top5 {:.3f} \n'.format(
        epoch, top1.avg, top5.avg))
    print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1,
                                                                  top5=top5))

    return top1.avg, top5.avg
Ejemplo n.º 25
0
def test(args, model, device, test_loader):
    if test_loader.dataset.train:
        print("test on validation set\r\n")
    else:
        print("test on test set\r\n")

    # validate
    model.eval()
    # test_loss = 0
    # correct = 0
    # num_samples = 0
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    with torch.no_grad():
        for data in test_loader:
            inputs, labels = data[0].to(device), data[1].to(device)
            outputs = model(inputs)
            test_loss = criterion(outputs, labels).item()
            # pred = outputs.argmax(dim=1, keepdim=True)
            # correct += pred.eq(labels.view_as(pred)).sum().item()
            # # _, pred = torch.max(outputs, 1)
            # # correct += (pred == labels).sum().item()
            # num_samples += pred.size(0)
            prec1, prec5 = accuracy(outputs, labels, topk=(1, 5))
            losses.update(test_loss, labels.size(0))
            top1.update(prec1[0], labels.size(0))
            top5.update(prec5[0], labels.size(0))

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: Prec@1:{}/{} ({:.2f}%) Prec@5:{}/{} ({:.2f}%)\n'
        .format(losses.avg, top1.sum // 100, top1.count, top1.avg,
                top5.sum // 100, top1.count, top5.avg))
    return top1.avg, top5.avg
def train(train_loader, model, criterion, optimizer, epoch, print_freq, colorization=False,scheduler=None):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    global cur_itrs

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        
        cur_itrs+=1
        # measure data loading time
        data_time.update(time.time() - end)
       # if torch.cuda.is_available():
       #     target = target.cuda()
      #      input = input.cuda()
        target = target.to(device, dtype=torch.float32)
        input = input.to(device, dtype=torch.float32)

        if colorization:
            input = transforms.Resize(500)(input)
            target = transforms.Resize(500)(target)
            input = input.repeat(1,3,1,1)

        # compute output
        output = model(input)
        loss = criterion(output, target)
        # measure accuracy and record loss
        losses.update(loss.item(), input.size(0))
        if not colorization:
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
            top1.update(prec1, input.size(0))
            top5.update(prec5, input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if scheduler:
            scheduler.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            if not colorization:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'.format(
                    epoch, i, len(train_loader), batch_time=batch_time,
                    data_time=data_time, loss=losses, top1=top1, top5=top5))
            else:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                    epoch, i, len(train_loader), batch_time=batch_time,
                    data_time=data_time, loss=losses))

    # return loss, top1, top5 corresponding to each epoch
    return losses.avg, top1.avg, top5.avg
Ejemplo n.º 27
0
class Trainer(object):
    def __init__(self, train_loader, val_loader, test_loader, vocabs, args):

        self.use_cuda = True
        self.max_length = args.max_len

        # Data Loader
        self.train_loader = train_loader
        self.val_loader   = val_loader
        self.test_loader  = test_loader

        # Hyper-parameters
        self.lr             = args.lr
        self.grad_clip      = args.grad_clip
        self.embed_dim      = args.embed_dim
        self.hidden_dim     = args.hidden_dim
        self.num_layer      = args.num_layer
        self.dropout        = args.dropout
        self.attn_model     = args.attn_model
        self.decoder_lratio = args.decoder_lratio
        self.teacher_forcing= args.teacher_forcing
        self.nepoch_no_imprv= args.early_stoping
        self.evaluate_every = args.evaluate_every

        # Training setting
        self.batch_size     = args.batch_size
        self.num_epoch      = args.num_epoch
        self.iter_per_epoch = len(train_loader)

        USE_CUDA = torch.cuda.is_available()
        self.device = torch.device("cuda" if USE_CUDA else "cpu")


        ARRUMAR BUILD
        self.build_model(vocabs)
        self.log_path = os.path.join('./logs/' + args.log)

    def build_model(self, vocabs):



        self.src_vocab = vocabs['src_vocab']
        self.trg_vocab = vocabs['trg_vocab']
        self.per_vocab = vocabs['per_vocab']
        self.src_inv_vocab = vocabs['src_inv_vocab']
        self.trg_inv_vocab = vocabs['trg_inv_vocab']
        self.per_inv_vocab = vocabs['per_inv_vocab']

        self.trg_soi = self.trg_vocab[SOS_WORD]
        self.PAD_word = self.src_vocab[PAD_WORD]

        self.src_nword = len(self.src_vocab)
        self.trg_nword = len(self.trg_vocab)
        self.per_nword = len(self.per_vocab)


        print('Building encoder and decoder ...')
        # Initialize word embeddings
        embedding_trg = nn.Embedding(self.trg_nword, self.embed_dim)
        embedding_src = nn.Embedding(self.src_nword, self.embed_dim)
        embedding_per = nn.Embedding(self.per_nword, 600)



        #if loadFilename:
        w_trg = get_word_embeddings(600, "./data/embeddings/pt.txt", self.trg_vocab)
        w_src = get_word_embeddings(600, "./data/embeddings/pt.txt", self.src_vocab)
        embedding_trg.from_pretrained(torch.FloatTensor(w_trg))
        embedding_src.from_pretrained(torch.FloatTensor(w_src))



        # Initialize encoder & decoder models
        self.encoder = EncoderRNN(self.hidden_dim, embedding_src, embedding_per, self.num_layer, self.dropout)
        self.decoder = LuongAttnDecoderRNN(self.attn_model, embedding_trg, self.hidden_dim, self.trg_nword, self.num_layer, self.dropout)



        if self.use_cuda:
            self.encoder = self.encoder.cuda()
            self.decoder = self.decoder.cuda()


        # set the criterion and optimizer
        self.encoder_optimizer = optim.Adam(filter(lambda x: x.requires_grad, self.encoder.parameters()),
                                      lr=self.lr)
        self.decoder_optimizer = optim.Adam(filter(lambda x: x.requires_grad, self.decoder.parameters()),
                                      lr=self.lr * self.decoder_lratio)

        self.criterion = nn.NLLLoss()

        print(self.encoder)
        print(self.decoder)
        print(self.criterion)
        print(self.encoder_optimizer)
        print(self.decoder_optimizer)
    def get_mask(self, output_batch):
        #pad value is one
        mask = binaryMatrix(output_batch, self.PAD_word)
        mask = torch.ByteTensor(mask)
        return mask

    def get_sentence(self, sentence, side):
        def _eos_parsing(sentence):
            if EOS_WORD in sentence:
                return sentence[:sentence.index(EOS_WORD) + 1]
            else:
                return sentence

        # index sentence to word sentence
        if side == 'trg':

            sentence = [self.trg_inv_vocab[x] for x in sentence]

        else:
            sentence = [self.src_inv_vocab[x] for x in sentence]



        return _eos_parsing(sentence)


        return torch.LongTensor(pad_vector)
    def train(self, input_variable, target_variable, per_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
          max_length):

        # Zero gradients
        encoder_optimizer.zero_grad()
        decoder_optimizer.zero_grad()

        lengths = []
        for input_ in input_variable:
            ctr = 0
            for ele in input_:
                if ele == self.PAD_word:
                    break
                ctr += 1
            lengths.append(ctr)



        mask = self.get_mask(target_variable)


        # Set device options
        input_variable = input_variable.t().to(self.device)
        per_variable   = per_variable.t().to(self.device)
        lengths = torch.tensor(lengths).to(self.device)
        target_variable = target_variable.t().to(self.device)
        mask = mask.t().to(self.device)

        """
        temos que checar o tamanhoo dp padding e fazer o padding na mão...
        RuntimeError: Expected
        hidden
        size(2, 40, 100), got(2, 3, 100)
        """
        # Initialize variables
        loss = 0
        print_losses = []
        n_totals = 0

        # Forward pass through encoder
        encoder_outputs, encoder_hidden = encoder(input_variable, lengths, per_variable)

        # Create initial decoder input (start with SOS tokens for each sentence)
        decoder_input = torch.LongTensor([[self.trg_soi for _ in range(target_variable.size()[1])]])
        decoder_input = decoder_input.to(self.device)

        # Set initial decoder hidden state to the encoder's final hidden state
        decoder_hidden = encoder_hidden[:decoder.n_layers]

        # Determine if we are using teacher forcing this iteration
        use_teacher_forcing = True if random.random() < self.teacher_forcing else False

        # Forward batch of sequences through decoder one time step at a time
        if use_teacher_forcing:
            for t in range(max_length):
                decoder_output, decoder_hidden = decoder(
                    decoder_input, decoder_hidden, encoder_outputs
                )
                # Teacher forcing: next input is current target
                decoder_input = target_variable[t].view(1, -1)
                # Calculate and accumulate loss
                mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], self.device)
                loss += mask_loss
                print_losses.append(mask_loss.item() * nTotal)
                n_totals += nTotal
        else:
            for t in range(max_length):
                decoder_output, decoder_hidden = decoder(
                    decoder_input, decoder_hidden, encoder_outputs
                )
                # No teacher forcing: next input is decoder's own current output
                _, topi = decoder_output.topk(1)
                decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
                decoder_input = decoder_input.to(self.device)
                # Calculate and accumulate loss
                mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], self.device)
                loss += mask_loss
                print_losses.append(mask_loss.item() * nTotal)
                n_totals += nTotal

        # Perform backpropatation
        loss.backward()

        # Clip gradients: gradients are modified in place
        _ = torch.nn.utils.clip_grad_norm_(encoder.parameters(), self.grad_clip)
        _ = torch.nn.utils.clip_grad_norm_(decoder.parameters(), self.grad_clip)

        # Adjust model weights
        encoder_optimizer.step()
        decoder_optimizer.step()







        return sum(print_losses) / n_totals


    def train_iters(self):
        # Initialize search module
        searcher = GreedySearchDecoder(self.encoder,
                                       self.decoder, self.device)
        start = time.time()
        nepoch_no_imprv = 0
        self.train_loss = AverageMeter()
        total_loss      = []
        total_bleu    = []
        total_rouge_l = []
        total_rouge_1 = []
        total_rouge_2 = []
        self.best_bleu  = .0
        for epoch in tqdm(range(self.num_epoch)):
            # Ensure dropout layers are in train mode
            self.encoder.train()
            self.decoder.train()

            print_loss_total = 0
            epoch_loss       = []
            for i_train, batch in enumerate(self.train_loader):
                src_input = batch.src[0]
                per_input = batch.per[0]
                src_length = batch.src[1]
                trg_input = batch.trg[0][:, :-1]
                trg_length = batch.trg[1]

                max_target_lenght = max([len(ele) for ele in trg_input.tolist()])
                max_src_lenght = max([len(ele) for ele in src_input.tolist()])


                loss = self.train(src_input, trg_input, per_input, self.encoder,
                             self.decoder, self.encoder_optimizer, self.decoder_optimizer, self.criterion, max_target_lenght)
                self.train_loss.update(loss)
                epoch_loss.append(loss)
                print_loss_total += loss
            total_loss.append(np.average(epoch_loss))

            ######################################################################
            # Run the overfitting
            # ~~~~~~~~~~~~~~

            if (epoch % self.evaluate_every == 0) and (epoch != 0):
                # Ensure dropout layers are in evaluation mode
                self.encoder.eval()
                self.decoder.eval()
                self.val_bleu    = AverageMeter()
                self.val_rouge_1 = AverageMeter()
                self.val_rouge_2 = AverageMeter()
                self.val_rouge_l = AverageMeter()
                epoch_bleu = []
                epoch_rouge_l = []
                epoch_rouge_1 = []
                epoch_rouge_2 = []
                print("evaluating")
                for i, batch in enumerate(self.val_loader):
                    src_input = batch.src[0]
                    per_input = batch.per[0]
                    trg_output = batch.trg[0][:,:]
                    preds = []

                    for src_input_, per_input_ in zip(src_input, per_input):

                        lengths = []
                        ctr = 0
                        for ele in src_input_:
                            if ele == self.PAD_word:
                                break
                            ctr += 1
                        lengths.append(ctr)

                        src_input_ = torch.LongTensor([src_input_.tolist()]).transpose(0,1).to(self.device)
                        per_input_ = torch.LongTensor([per_input_.tolist()]).transpose(0,1).to(self.device)
                        lengths = torch.tensor(lengths).to(self.device)
                        #trg_output_ = torch.LongTensor([trg_output_.tolist()]).transpose(0,1).to(self.device)

                        pred, scores = searcher(src_input_, per_input_, lengths, self.max_length, self.trg_soi)
                        preds.append(pred)

                    # Compute BLEU and ROUGFE score and Loss
                    pred_sents = []
                    trg_sents  = []
                    pred_STR   = []
                    trg_STR    = []

                    print(trg_output.t().size())
                    for j in range(trg_output.t().size()[1]):

                        pred_sent = self.get_sentence(tensor2np(preds[j]), 'trg')
                        trg_sent = self.get_sentence(tensor2np(trg_output[j]), 'trg')
                        pred_sents.append(pred_sent)
                        pred_STR.append(" ".join(pred_sent))
                        trg_sents.append(trg_sent)
                        trg_STR.append(" ".join(trg_sent))


                    rouge = Rouge()
                    hyps, refs = map(list, zip(*[[d[0], d[1]] for d in [pred_STR, trg_STR]]))

                    rouge_scores = rouge.get_scores(hyps, refs, avg=True)

                    bleu_score = get_bleu(pred_sents, trg_sents)
                    epoch_bleu.append(bleu_score)

                    epoch_rouge_1.append(rouge_scores["rouge-1"]["f"])
                    epoch_rouge_2.append(rouge_scores["rouge-2"]["f"])
                    epoch_rouge_l.append(rouge_scores["rouge-l"]["f"])

                    self.val_bleu.update(bleu_score)
                    self.val_rouge_1.update(rouge_scores["rouge-1"]["f"])
                    self.val_rouge_2.update(rouge_scores["rouge-2"]["f"])
                    self.val_rouge_l.update(rouge_scores["rouge-l"]["f"])

                total_bleu.append(np.average(epoch_bleu))
                total_rouge_1.append(np.average(epoch_rouge_1))
                total_rouge_2.append(np.average(epoch_rouge_2))
                total_rouge_l.append(np.average(epoch_rouge_l))


                print('epochs: ' + str(epoch))
                print('average train loss: ' + str(self.train_loss.avg))
                print('average validation bleu score: ' + str(self.val_bleu.avg))
                print('average validation rouge-l score: ' + str(self.val_rouge_l.avg))
                # early stopping
                # Save model if bleu score is higher than the best
                if self.best_bleu < self.val_bleu.avg:
                    self.best_bleu = self.val_bleu.avg
                    checkpoint = {
                        'encoder': self.encoder,
                        'decoder': self.decoder,
                        'epoch': epoch
                    }
                    torch.save(checkpoint, self.log_path + '/Model_e%d_bleu%.3f.pt' % (epoch, self.val_bleu.avg))
                else:
                    nepoch_no_imprv += 1
                    if nepoch_no_imprv >= self.nepoch_no_imprv:
                        print("- early stopping {} epochs without " \
                              "improvement".format(nepoch_no_imprv))
                        break

        pandas_bleu = pd.DataFrame.from_dict({"bleu_validation": total_bleu})
        pandas_loss = pd.DataFrame.from_dict({"loss_train": total_loss})
        pandas_rouge_1 = pd.DataFrame.from_dict({"rouge_1_val": total_rouge_1})
        pandas_rouge_2 = pd.DataFrame.from_dict({"rouge_2_val": total_rouge_2})
        pandas_rouge_l = pd.DataFrame.from_dict({"rouge_l_val": total_rouge_l})

        pandas_loss.to_csv("./loss_train.csv", sep="\t", index=False)
        pandas_bleu.to_csv("./blue_validation.csv", sep="\t", index=False)
        pandas_rouge_1.to_csv("./rouge_1_validation.csv", sep="\t", index=False)
        pandas_rouge_2.to_csv("./rouge_2_validation.csv", sep="\t", index=False)
        pandas_rouge_l.to_csv("./rouge_l_validation.csv", sep="\t", index=False)
    def train_epoch(self, loader, args, epoch, optimizer, writer):
        """
        Train for a single epoch
        """
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()

        criterion = torch.nn.NLLLoss()
        self.train()
        end = time.time()
        step = epoch * len(loader)

        for i, (x_batch, y_batch) in enumerate(loader):
            data_time.update(time.time() - end)

            x_batch = x_batch.to(self.device)
            y_batch = y_batch.to(self.device)

            prediction = self.forward(x_batch)
            loss = criterion(prediction, y_batch)

            losses.update(loss.item(), args.batch_size)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0 or i == len(loader) - 1:
                writer.add_scalar('train/loss', losses.val, step + i)
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                          epoch,
                          i,
                          len(loader) - 1,
                          batch_time=batch_time,
                          data_time=data_time,
                          loss=losses),
                      flush=True)
Ejemplo n.º 29
0
def test(test_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    errors = AverageMeter()

    # switch to evaluate mode
    model.eval()
    end = time()
    result = np.empty(shape=(0, JOINT_POS_LEN), dtype=np.float32)
    label_full = np.empty(shape=(0, JOINT_POS_LEN), dtype=np.float32)

    for i, s in enumerate(test_loader):
        if len(s) == 5:
            data, label, mid, leng, quat = s
        else:
            data, label, mid, leng = s

        # measure data loading time
        batch_size = data.size(0)
        input_var = data.cuda().float()
        target_var = label.cuda()
        output = model(input_var)
        # record loss
        leng = leng.cuda() * (NUM_VOXEL / NUM_GT_SIZE)
        base = mid.cuda() - leng.repeat(1, 3) * (NUM_GT_SIZE / 2 - 0.5)
        leng = leng.repeat(1, JOINT_LEN * 3)
        base = base.repeat(1, JOINT_LEN)
        for j in range(len(output)):
            output[j] = (output[j].mul(leng)).add(base)
        loss = criterion(output[0], target_var)
        for k in range(1, nSTACK):
            loss += criterion(output[k], target_var)
        losses.update(loss.item() / batch_size, 1)
        output = output[-1].cpu().detach()
        # quat = quat.numpy().reshape(-1,IMU_NUM,4)
        # joints = label.reshape(-1,JOINT_LEN,3)[0]
        # es_joints = output.reshape(-1,JOINT_LEN,3)[0]
        # print joints[10]
        # right_forearm = quaternion.from_float_array(quat[0][Bone.R_UpArm])
        # vec = np.array([152.6,0,0],dtype=np.float32)
        # diff_gt = joints[Joint.RightArm]-joints[Joint.RightShoulder]
        # diff_es = joints[Joint.RightArm]-es_joints[Joint.RightShoulder]
        # diff = quaternion.rotate_vectors(right_forearm,vec).astype(np.float32)
        # print diff, joints[10]-joints[9]
        # diff_gt = (diff-diff_gt).norm(2)
        # diff_es = (diff-diff_es).norm(2)

        # diff = diff.norm(2)

        r = mean_error(output, label)
        err_t = float(r[0])
        # print err_t,diff_gt,diff_es, 'es',r[1].item(),r[1]-diff_es
        # if err_t>50:
        # from visualization import plot_voxel_label
        # print i,err_t
        # data = data[0]
        # data[data<1] = 0
        # plot_voxel_label(data,(label[0]-base.cpu())/(leng.cpu()/(NUM_VOXEL/NUM_GT_SIZE)))
        # plot_voxel_label(data,(output[0]-base.cpu())/(leng.cpu()/(NUM_VOXEL/NUM_GT_SIZE)))
        errors.update(err_t, batch_size)

        # measure elapsed time
        batch_time.update(time() - end)
        end = time()

        if i % TEST_PRINT == 0:
            print(
                '[{0}/{1}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Loss {loss.val:.5f} ({loss.avg:.5f})\t'
                'acc_in_t {err_t.val:.3f} ({err_t.avg:.3f})'.format(
                    i,
                    len(test_loader),
                    batch_time=batch_time,
                    loss=losses,
                    err_t=errors))

        # measure accuracy
        result = np.append(result, output.numpy(), axis=0)
        label_full = np.append(label_full, label.numpy(), axis=0)

    return result, label_full, errors.avg
Ejemplo n.º 30
0
def train(train_loader, model, criterion, optimizer, epoch, print_freq):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = target.cuda(async=True)
        input = input.cuda(async=True)

        # compute output
        output = model(input)
        loss = criterion(output, target)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))