예제 #1
0
파일: train.py 프로젝트: ml-lab/DepthNet
def validate(val_loader, model, epoch, logger, output_writers=[]):
    batch_time = AverageMeter()
    depth2_metric_errors = AverageMeter()
    depth2_norm_errors = AverageMeter()
    log_outputs = len(output_writers) > 0
    # switch to evaluate mode
    model.eval()

    end = time.time()

    for i, (input, target, _) in enumerate(val_loader):
        target = target.cuda(async=True)
        input_tensors = [i.cuda() for i in input]
        input_var = torch.autograd.Variable(torch.cat(input_tensors, 1),
                                            volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)
        # compute output
        output = model(input_var)
        if log_outputs and i < len(
                output_writers):  # log first output of 3 first batches
            ratio = target.size(2) / target.size(1)
            if epoch == 0:
                output_writers[i].add_image(
                    'GroundTruth',
                    util.tensor2array(target[0].cpu(), max_value=100), 0)
                output_writers[i].add_image(
                    'Inputs', util.tensor2array(input[0][0].cpu()), 0)
                output_writers[i].add_image(
                    'Inputs', util.tensor2array(input[1][0].cpu()), 1)
            output_writers[i].add_image(
                'DepthNet Outputs',
                util.tensor2array(output.data[0].cpu(), max_value=100), epoch)
        depth2_norm_error = metric_loss(output, target_var, normalize=True)
        depth2_metric_error = metric_loss(output, target_var, normalize=False)
        # record depth error
        depth2_norm_errors.update(depth2_norm_error.data[0], target.size(0))
        depth2_metric_errors.update(depth2_metric_error.data[0],
                                    target.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        logger.test_bar.update(i + 1)
        if i % args.print_freq == 0:
            logger.test_writer.write(
                'Validation: '
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                'Depth error {depth2_error.val:.3f} ({depth2_error.avg:.3f})'.
                format(batch_time=batch_time,
                       depth2_error=depth2_metric_errors))

    return depth2_metric_errors.avg, depth2_norm_errors.avg
예제 #2
0
def validate(val_loader, model, epoch, logger, output_writers=[]):
    batch_time = AverageMeter()
    depth2_metric_errors = AverageMeter()
    depth2_norm_errors = AverageMeter()
    log_outputs = len(output_writers) > 0
    # switch to evaluate mode
    model.eval()

    end = time.time()

    for i, (input, target, _) in enumerate(val_loader):
        logging.info("val_loader loop = %d" % i)
        target = target.to(device)
        input = torch.cat(input, 1).to(device)
        input = input.cuda()
        # compute output
        output = model(input)
        if log_outputs and i < len(
                output_writers):  # log first output of 3 first batches
            if epoch == 0:
                output_writers[i].add_image(
                    'GroundTruth', util.tensor2array(target[0], max_value=100),
                    0)
                output_writers[i].add_image('Inputs',
                                            util.tensor2array(input[0, :3]), 0)
                output_writers[i].add_image('Inputs',
                                            util.tensor2array(input[0, 3:]), 1)
            output_writers[i].add_image(
                'DepthNet Outputs', util.tensor2array(output[0],
                                                      max_value=100), epoch)
        depth2_norm_error = metric_loss(output, target, normalize=True)
        depth2_metric_error = metric_loss(output, target, normalize=False)
        # record depth error
        depth2_norm_errors.update(depth2_norm_error.item(), target.size(0))
        depth2_metric_errors.update(depth2_metric_error.item(), target.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        logger.test_bar.update(i + 1)
        if i % args.print_freq == 0:
            logger.test_writer.write(
                'Validation: '
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                'Depth error {depth2_error.val:.3f} ({depth2_error.avg:.3f})'.
                format(batch_time=batch_time,
                       depth2_error=depth2_metric_errors))

    return depth2_metric_errors.avg, depth2_norm_errors.avg
예제 #3
0
def train(train_loader, model, optimizer, epoch_size, term_logger,
          train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    depth2_metric_errors = AverageMeter()
    depth2_normalized_errors = AverageMeter()

    # switch to train mode
    logging.info("switch to train mode : start")
    model.train()
    logging.info("switch to train mode : end")
    end = time.time()

    for i, (input, target, _) in enumerate(train_loader):
        # measure data loading time
        logging.info("train_loader=%d" % i)
        data_time.update(time.time() - end)
        target = target.to(device)
        input = torch.cat(input, 1).to(device)
        input, target = input.cuda(), target.cuda()
        # compute output
        if torch.cuda.device_count() > 1:
            logging.info("%s %s" % ("*" * 100, "data_parallel line 207"))
            input = input.cuda()
            output = torch.nn.parallel.data_parallel(model,
                                                     input,
                                                     device_ids=device_ids)
        else:
            output = model(input)

        loss = metric_loss(output,
                           target,
                           weights=(0.32, 0.08, 0.02, 0.01, 0.005),
                           loss=args.loss)
        depth2_norm_error = metric_loss(output[0], target, normalize=True)
        depth2_metric_error = metric_loss(output[0], target, normalize=False)
        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar('train_loss', loss.item(), n_iter)
        depth2_metric_errors.update(depth2_metric_error.item(), target.size(0))
        depth2_normalized_errors.update(depth2_norm_error.item(),
                                        target.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        #optimizer.module.step()
        optimizer.step()
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        with open(os.path.join(args.save_path, args.log_full), 'a') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')
            writer.writerow([loss.item(), depth2_metric_error.item()])
        term_logger.train_bar.update(i + 1)
        if i % args.print_freq == 0:
            term_logger.train_writer.write(
                'Train: Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Loss {loss.val:.4f} ({loss.avg:.4f}) '
                'Depth error {depth2_error.val:.3f} ({depth2_error.avg:.3f})\r'
                .format(batch_time=batch_time,
                        data_time=data_time,
                        loss=losses,
                        depth2_error=depth2_metric_errors))
        if i >= epoch_size - 1:
            break
        n_iter += 1
    logging.info("leave train function")
    return losses.avg, depth2_metric_errors.avg, depth2_normalized_errors.avg
예제 #4
0
파일: train.py 프로젝트: ml-lab/DepthNet
def train(train_loader, model, optimizer, epoch_size, term_logger,
          train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    depth2_metric_errors = AverageMeter()
    depth2_normalized_errors = AverageMeter()

    if epoch_size == 0:
        epoch_size = len(train_loader)
    # switch to train mode
    model.train()

    end = time.time()

    for i, (input, target, _) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.cuda(async=True)
        input = [i.cuda() for i in input]
        input_var = torch.autograd.Variable(torch.cat(input, 1))
        target_var = torch.autograd.Variable(target)

        # compute output
        output = model(input_var)

        loss = metric_loss(output,
                           target_var,
                           weights=(0.32, 0.08, 0.02, 0.01, 0.005),
                           loss=args.loss)
        depth2_norm_error = metric_loss(output[0], target_var, normalize=True)
        depth2_metric_error = metric_loss(output[0],
                                          target_var,
                                          normalize=False)
        # record loss and EPE
        losses.update(loss.data[0], target.size(0))
        train_writer.add_scalar('train_loss', loss.data[0], n_iter)
        depth2_metric_errors.update(depth2_metric_error.data[0],
                                    target.size(0))
        depth2_normalized_errors.update(depth2_norm_error.data[0],
                                        target.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        with open(os.path.join(args.save_path, args.log_full), 'a') as csvfile:
            writer = csv.writer(csvfile, delimiter='\t')
            writer.writerow([loss.data[0], depth2_metric_error.data[0]])
        term_logger.train_bar.update(i + 1)
        if i % args.print_freq == 0:
            term_logger.train_writer.write(
                'Train: Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Loss {loss.val:.4f} ({loss.avg:.4f}) '
                'Depth error {depth2_error.val:.3f} ({depth2_error.avg:.3f})\r'
                .format(batch_time=batch_time,
                        data_time=data_time,
                        loss=losses,
                        depth2_error=depth2_metric_errors))
        if i >= epoch_size - 1:
            break
        n_iter += 1

    return losses.avg, depth2_metric_errors.avg, depth2_normalized_errors.avg