def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f})\n\t'
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Beispiel #2
0
def validate(val_loader, model, epoch, logger):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode

    end = time.time()

    skip = len(val_loader) // 9  # save images every skip iters

    for i, (input, target) in enumerate(val_loader):

        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        if args.dataset == 'kitti':
            rgb = input[0]
            pred = pred[0]
            target = target[0]
        else:
            rgb = input

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'RML={result.absrel:.2f}({average.absrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    logger.add_scalar('Test/rmse', avg.rmse, epoch)
    logger.add_scalar('Test/Rel', avg.absrel, epoch)
    logger.add_scalar('Test/log10', avg.lg10, epoch)
    logger.add_scalar('Test/Delta1', avg.delta1, epoch)
    logger.add_scalar('Test/Delta2', avg.delta2, epoch)
    logger.add_scalar('Test/Delta3', avg.delta3, epoch)
    return avg, img_merge
Beispiel #3
0
                    rand_image, rand_depth = image.cuda(), depth.cuda()

                    rgb0 = model(real_image, real_depth, real_depth)
                    rgb1 = model(real_image, real_depth, rand_depth)
                    rgb2 = model(rand_image, rand_depth, real_depth)

                    rgb_ori = utils.output_rgb(real_image)
                    dep_ori = utils.output_depth(real_depth)
                    out_ori = utils.output_rgb(rgb0)
                    dep_ran = utils.output_depth(rand_depth)
                    out_dep = utils.output_rgb(rgb1)
                    rgb_ran = utils.output_rgb(rand_image)
                    out_rgb = utils.output_rgb(rgb2)

                    img_merge = utils.merge_into_row([
                        rgb_ori, dep_ori, out_ori, dep_ran, out_dep, rgb_ran,
                        out_rgb
                    ])

                elif (i < 8 * skip) and (i % skip == 0):
                    image, depth = batch['image'], batch['depth']
                    real_image, real_depth = image.cuda(), depth.cuda()

                    rgb0 = model(real_image, real_depth, real_depth)
                    rgb1 = model(real_image, real_depth, rand_depth)
                    rgb2 = model(rand_image, rand_depth, real_depth)

                    rgb_ori = utils.output_rgb(real_image)
                    dep_ori = utils.output_depth(real_depth)
                    out_ori = utils.output_rgb(rgb0)
                    dep_ran = utils.output_depth(rand_depth)
                    out_dep = utils.output_rgb(rgb1)
Beispiel #4
0
def validation(device,
               data_loader,
               model,
               ord_loss,
               output_dir,
               epoch,
               logger,
               PRINT_FREQ,
               BETA,
               GAMMA,
               ORD_NUM=80.0):
    avg80 = AverageMeter()
    avg50 = AverageMeter()
    model.eval()

    end = time.time()
    skip = 1
    img_list = []

    evalbar = tqdm(total=len(data_loader))

    for i, (_input, _sparse_depth, _dense_depth) in enumerate(data_loader):
        _input, _sparse_depth, _dense_depth = _input.to(
            device), _sparse_depth.to(device), _dense_depth.to(device)
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            _pred_prob, _pred_label = model(_input)
            loss = ord_loss(_pred_prob, _dense_depth)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        pred_depth = utils.label2depth_sid(_pred_label,
                                           K=ORD_NUM,
                                           alpha=1.0,
                                           beta=BETA,
                                           gamma=GAMMA)

        abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 = compute_errors(
            _sparse_depth, pred_depth.to(device))

        # measure accuracy and record loss
        result80 = Result()
        result80.evaluate(pred_depth, _sparse_depth.data, cap=80)
        result50 = Result()
        result50.evaluate(pred_depth, _sparse_depth.data, cap=50)

        avg80.update(result80, gpu_time, data_time, _input.size(0))
        avg50.update(result50, gpu_time, data_time, _input.size(0))
        end = time.time()

        # save images for visualization
        if i == 0:
            img_merge = utils.merge_into_row(_input, _dense_depth, pred_depth)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(_input, _dense_depth, pred_depth)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = os.path.join(output_dir,
                                    'eval_{}.png'.format(int(epoch)))
            print('save validation figures at {}'.format(filename))
            utils.save_image(img_merge, filename)

        if (i + 1) % PRINT_FREQ == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'AbsRel={result.absrel:.2f}({average.absrel:.2f}) '
                  'SqRel={result.sqrel:.2f}({average.sqrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'RMSE_log={result.rmse_log:.3f}({average.rmse_log:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result80,
                      average=avg80.average()))

        # update progress bar and show loss
        evalbar.set_postfix(
            ORD_LOSS=
            '{:.2f},RMSE/log= {:.2f}/{:.2f},delta={:.2f}/{:.2f}/{:.2f},AbsRel/SqRe;={:.2f}/{:.2f}'
            .format(loss, rmse, rmse_log, a1, a2, a3, abs_rel, sq_rel))
        evalbar.update(1)
        i = i + 1

    print('\n**** CAP=80 ****\n'
          'RMSE={average.rmse:.3f}\n'
          'RMSE_log={average.rmse_log:.3f}\n'
          'AbsRel={average.absrel:.3f}\n'
          'SqRel={average.sqrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          'iRMSE={average.irmse:.3f}\n'
          'iMAE={average.imae:.3f}\n'
          't_GPU={average.gpu_time:.3f}\n'.format(average=avg80.average()))

    print('\n**** CAP=50 ****\n'
          'RMSE={average.rmse:.3f}\n'
          'RMSE_log={average.rmse_log:.3f}\n'
          'AbsRel={average.absrel:.3f}\n'
          'SqRel={average.sqrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          'iRMSE={average.irmse:.3f}\n'
          'iMAE={average.imae:.3f}\n'
          't_GPU={average.gpu_time:.3f}\n'.format(average=avg50.average()))

    logger.add_scalar('VAL_CAP80/RMSE', avg80.average().rmse, epoch)
    logger.add_scalar('VAL_CAP80/RMSE_log', avg80.average().rmse_log, epoch)
    logger.add_scalar('VAL_CAP80/AbsRel', avg80.average().absrel, epoch)
    logger.add_scalar('VAL_CAP80/SqRel', avg80.average().sqrel, epoch)
    logger.add_scalar('VAL_CAP80/Delta1', avg80.average().delta1, epoch)
    logger.add_scalar('VAL_CAP80/Delta2', avg80.average().delta2, epoch)
    logger.add_scalar('VAL_CAP80/Delta3', avg80.average().delta3, epoch)

    logger.add_scalar('VAL_CAP50/RMSE', avg50.average().rmse, epoch)
    logger.add_scalar('VAL_CAP50/RMSE_log', avg50.average().rmse_log, epoch)
    logger.add_scalar('VAL_CAP50/AbsRel', avg50.average().absrel, epoch)
    logger.add_scalar('VAL_CAP50/SqRel', avg50.average().sqrel, epoch)
    logger.add_scalar('VAL_CAP50/Delta1', avg50.average().delta1, epoch)
    logger.add_scalar('VAL_CAP50/Delta2', avg50.average().delta2, epoch)
    logger.add_scalar('VAL_CAP50/Delta3', avg50.average().delta3, epoch)
Beispiel #5
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        ##############################################################
        ##             Start of PnP-Depth modification              ##
        ##############################################################
        # Original inference
        with torch.no_grad():
            ori_pred = model.pnp_forward_front(model.pnp_forward_rear(
                input))  # equivalent to `ori_pred = model(input)`

        # Inference with PnP
        sparse_target = input[:, -1:]  # NOTE: written for rgbd input
        criterion = criteria.MaskedL1Loss().cuda(
        )  # NOTE: criterion function defined here only for clarity
        pnp_iters = 5  # number of iterations
        pnp_alpha = 0.01  # update/learning rate
        pnp_z = model.pnp_forward_front(input)
        for pnp_i in range(pnp_iters):
            if pnp_i != 0:
                pnp_z = pnp_z - pnp_alpha * torch.sign(pnp_z_grad)  # iFGM
            pnp_z = Variable(pnp_z, requires_grad=True)
            pred = model.pnp_forward_rear(pnp_z)
            if pnp_i < pnp_iters - 1:
                pnp_loss = criterion(pred, sparse_target)
                pnp_z_grad = Grad([pnp_loss], [pnp_z], create_graph=True)[0]
        ##############################################################
        ##              End of PnP-Depth modification               ##
        ##############################################################
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
Beispiel #6
0
def demo(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()

    #viz.line([[0.,0.]],[0],win='demo2',opts=dict(title='resluts2',legend=['RMSE','MAE']))
    viz.line([[0., 0., 0., 0., 0., 0.]], [0],
             win='demo1',
             opts=dict(
                 title='resluts1',
                 legend=['t_GPU', 'Delta1', 'REL', 'lG10', 'RMSE', 'MAE']))

    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # step 50 images for visualization
        skip = 10

        if args.modality == 'rgb':
            rgb = input

        img_merge = utils.merge_into_row(rgb, target, pred)
        #row = utils.merge_into_row(rgb, target, pred)
        #img_merge = utils.add_row(img_merge, row)
        #filename = output_directory + '/comparison_' + str(epoch) + '.png'
        #utils.save_image(img_merge, filename)
        #print(img_merge)
        if i % skip == 0:
            img = np.transpose(img_merge, (2, 0, 1))
            img = torch.from_numpy(img)
            viz.images(img, win='depth_estimation')

            t_GPU = float('{gpu_time:.3f}'.format(gpu_time=gpu_time))
            RMSE = float('{result.rmse:.2f}'.format(result=result))
            MAE = float('{result.mae:.2f}'.format(result=result))
            Delta1 = float('{result.delta1:.3f}'.format(result=result))
            REL = float('{result.absrel:.3f}'.format(result=result))
            Lg10 = float('{result.lg10:.3f}'.format(result=result))

            #print(t_GPU)
            #viz.line([[RMSE,MAE]],[i],win='demo2',update='append')
            viz.line([[t_GPU, Delta1, REL, Lg10, RMSE, MAE]], [i],
                     win='demo1',
                     update='append')
            time.sleep(0.2)

    avg = average_meter.average()

    viz.text('\n*\n'
             'RMSE={result.rmse:.2f}({average.rmse:.3f})\n\t'
             'MAE={result.mae:.2f}({average.mae:.3f})\n\t'
             'Delta1={result.delta1:.3f}({average.delta1:.3f})\n\t'
             'REL={result.absrel:.3f}({average.absrel:.3f})\n\t'
             'Lg10={result.lg10:.3f}({average.lg10:.3f})\n\t'
             't_GPU={gpu_time:.3f}{time:.3f}\n'.format(gpu_time=gpu_time,
                                                       average=avg,
                                                       time=avg.gpu_time,
                                                       result=result))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
def validate(val_loader, model, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    print_freq = 10
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 10
        rgb = input

        if i == 0:
            import matplotlib.pyplot as plt
            plt.imsave('pred.png', np.squeeze(pred.cpu().numpy()))
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)

        if (i + 1) % print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        # io.imshow(np.squeeze(input[:,3:,:,:].cpu().numpy()), interpolation='nearest')
        # io.show()
        #print(input.size())
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        if visualize:
            if args.modality == 'd':
                fig = plt.figure()
                fig.suptitle(
                    'Error Percentage ' + str(round(result.absrel * 100, 2)) +
                    ' GPU TIME ' + str(round(gpu_time, 2)) + '   FPS ' +
                    str(round(60.0 / (gpu_time + data_time), 2)),
                    fontsize=16)

                plt.subplot(131)
                plt.title("SPARSE (Input)")
                plt.axis('off')
                plt.imshow(np.squeeze(input.cpu().numpy()),
                           interpolation='nearest')

                plt.subplot(132)
                plt.title("TARGET (Ground Truth)")
                plt.axis('off')
                plt.imshow(np.squeeze(target.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                plt.subplot(133)
                plt.title("PREDICTED")
                plt.axis('off')
                plt.imshow(np.squeeze(pred.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                # plt.waitforbuttonpress(timeout=2)
                # plt.close()

                plt.waitforbuttonpress()
                plt.close()

            if args.modality == 'rgbd':
                # sparse = np.squeeze(input[:, 3:, :, :].cpu().numpy())
                # print(sparse.shape)
                # sleep(3)

                fig = plt.figure()

                fig.suptitle(
                    'Error Percentage ' + str(round(result.absrel * 100, 2)) +
                    ' GPU TIME ' + str(round(gpu_time, 2)) + '   FPS ' +
                    str(round(60.0 / (gpu_time + data_time), 2)),
                    fontsize=16)

                rgb1 = 255 * np.transpose(
                    np.squeeze(input[:, :3, :, :].cpu().numpy()),
                    (1, 2, 0))  # H, W, C
                rgb1 = Image.fromarray(rgb1.astype('uint8'))
                plt.subplot(221)
                plt.title("RGB (Input)")
                plt.axis('off')
                plt.imshow(rgb1)

                plt.subplot(222)
                plt.title("SPARSE (Input)")
                plt.axis('off')
                plt.imshow(np.squeeze(input[:, 3:, :, :].cpu().numpy()),
                           interpolation='nearest')

                plt.subplot(223)
                plt.title("TARGET (Ground Truth)")
                plt.axis('off')
                plt.imshow(np.squeeze(target.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                plt.subplot(224)
                plt.title("PREDICTED")
                plt.axis('off')
                plt.imshow(np.squeeze(pred.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                # plt.waitforbuttonpress(timeout=2)
                # plt.close()

                plt.waitforbuttonpress()
                plt.close()


#
# save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Beispiel #9
0
def validate(val_loader, model, epoch, write_to_file=True, logger=None):
    average_meter = AverageMeter()
    if args.arch in multistage_group:
        average_meter_stage1 = AverageMeter()
    
    # Include daynight info and rain condition
    avg_meter_day = AverageMeter()
    avg_meter_night = AverageMeter()

    # day, night, sun, rain combinations
    avg_meter_day_sun = AverageMeter()
    avg_meter_day_rain = AverageMeter()
    avg_meter_night_sun = AverageMeter()
    avg_meter_night_rain = AverageMeter()

    # sun and rain
    avg_meter_sun = AverageMeter()
    avg_meter_rain = AverageMeter()

    model.eval() # switch to evaluate mode
    end = time.time()

    # Save something to draw??
    if logger is None:
        import h5py
        output_path = os.path.join(output_directory, "results.h5")
        h5_writer = h5py.File(output_path, "w", libver="latest", swmr=True)

    for i, data in enumerate(val_loader):
        # Add compatibility for nuscenes
        if args.data != "nuscenes":
            inputs, target = data[0].cuda(), data[1].cuda()
        else:
            inputs, target = data["inputs"].cuda(), data["labels"].cuda()
            
        torch.cuda.synchronize()
        data_time = time.time() - end

        # Compute output
        end = time.time()
        with torch.no_grad():
            if args.arch in multistage_group:
                pred_ = model(inputs)
                pred1 = pred_["stage1"]
                pred = pred_["stage2"]
            else:
                pred = model(inputs)
                pred_ = None

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # Record for qualitative results
        if (logger is None) and (i % 5 == 0):
            pred_np = {}
            if pred_ is None:
                pred_np = pred.cpu().numpy()
            else:
                for key in pred_.keys():
                    pred_np[key] = pred_[key][0, ...].cpu().numpy()
            res = {
                "inputs": data["inputs"][0, ...].cpu().numpy(),
                "lidar_depth": data["lidar_depth"][0, ...].cpu().numpy(),
                "radar_depth": data["radar_depth"][0, ...].cpu().numpy(),
                "pred": pred_np
            }
            file_key = "%05d"%(i)
            f_group = h5_writer.create_group(file_key)
            # Store data
            for key, output_data in res.items():
                if isinstance(output_data, dict):
                    for key, data_ in output_data.items():
                        if key in res.keys():
                            key = key + "*"
                        f_group.create_dataset(key, data=data_, compression="gzip")
                elif output_data is None:
                    pass
                else:    
                    f_group.create_dataset(key, data=output_data, compression="gzip")

        # Measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, inputs.size(0))
        if args.arch in multistage_group:
            result_stage1 = Result()
            result_stage1.evaluate(pred1.data, target.data)
            average_meter_stage1.update(result_stage1, gpu_time, data_time, inputs.size(0))
        end = time.time()
        
        # Record the day, night, rain info
        assert inputs.size(0) == 1
        daynight_info = data["daynight_info"][0]
        if ("day" in daynight_info) and ("rain" in daynight_info):
            avg_meter_day_rain.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_day.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_rain.update(result, gpu_time, data_time, inputs.size(0))
        elif "day" in daynight_info:
            avg_meter_day_sun.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_day.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_sun.update(result, gpu_time, data_time, inputs.size(0))

        if ("night" in daynight_info) and ("rain" in daynight_info):
            avg_meter_night_rain.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_night.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_rain.update(result, gpu_time, data_time, inputs.size(0))
        elif "night" in daynight_info:
            avg_meter_night_sun.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_night.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_sun.update(result, gpu_time, data_time, inputs.size(0))

        
        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = inputs
            elif args.modality == 'rgbd':
                rgb = inputs[:,:3,:,:]
                depth = inputs[:,3:,:,:]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8*skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target, pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8*skip:
                filename = output_directory + '/comparison_' + str(epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i+1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                   i+1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))

    # Save the result to pkl file
    if logger is None:
        h5_writer.close()
    avg = average_meter.average()
    if args.arch in multistage_group:
        avg_stage1 = average_meter_stage1.average()
        if logger is not None:
            record_test_scalar_summary(avg_stage1, epoch, logger, "Test_stage1")

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(
        average=avg, time=avg.gpu_time))

    if logger is not None:
        # Record summaries
        record_test_scalar_summary(avg, epoch, logger, "Test")

    print('\n*\n'
        'RMSE={average.rmse:.3f}\n'
        'MAE={average.mae:.3f}\n'
        'Delta1={average.delta1:.3f}\n'
        'REL={average.absrel:.3f}\n'
        'Lg10={average.lg10:.3f}\n'
        't_GPU={time:.3f}\n'.format(
        average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
                'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
                'data_time': avg.data_time, 'gpu_time': avg.gpu_time})

    return avg, img_merge
Beispiel #10
0
def validate(val_loader, model, epoch):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode

    end = time.time()

    skip = len(val_loader) // 8  # save images every skip iters
    count_b = 0
    count = 0
    x = random.randint(0, len(val_loader))
    for i, (input, target, label, mask) in enumerate(val_loader):

        input, target = input.cuda(), target.cuda()
        input = torch.squeeze(input, 0)
        mask = mask.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred, pred_mask, c1, c2, c3 = model(input)
            # pred,c1,c2,c3 = model(input)
        b, c, h, w = pred.size()

        # temp0 = torch.zeros_like(pred_mask)
        # temp1 = torch.ones_like(pred_mask)
        # pred_mask2 = torch.where(pred_mask>0.5,temp1,temp0)
        #pred[:,1,:,:] = pred[:,1,:,:] * pred_mask2

        temp0_2 = torch.zeros_like(c1)
        temp1_2 = torch.ones_like(c1)
        c1_2 = torch.where(c1 > 0.5, temp1_2, temp0_2)
        c2_2 = torch.where(c2 > 0.5, temp1_2, temp0_2)
        c3_2 = torch.where(c3 > 0.5, temp1_2, temp0_2)

        torch.cuda.synchronize()

        gpu_time = time.time() - end

        target = torch.squeeze(target, 1)
        # measure accuracy and record loss
        c1_2 = c1_2.cpu().numpy()
        c2_2 = c2_2.cpu().numpy()
        c3_2 = c3_2.cpu().numpy()
        l = label.numpy()
        for k in range(l.shape[0]):
            if c1_2[k] == 0 and c2_2[k] == 0 and c3_2[k] == 0 and l[k] == -1:
                count_b += 1
            if c1_2[k] == 1 and c2_2[k] == 0 and c3_2[k] == 0 and l[k] == 0:
                count_b += 1
            if c1_2[k] == 0 and c2_2[k] == 1 and c3_2[k] == 0 and l[k] == 1:
                count_b += 1
            if c1_2[k] == 0 and c2_2[k] == 0 and c3_2[k] == 1 and l[k] == 2:
                count_b += 1
        count += l.shape[0]
        result = Result()
        result.evaluate(pred, target, label)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        rgb = input
        # print(rgb.size(),target.size(),pred.size())
        # exit(0)

        # if i == x:
        #     img_merge = utils.merge_into_row(rgb, target, pred, pred_mask2,label)
        #     filename = output_directory + '/comparison_' + str(epoch) + '.png'
        #     utils.save_image(img_merge, filename)
        if i == 0:
            img_merge = utils.merge_into_row(
                rgb, target, pred, target,
                label)  # (rgb, target, pred, pred_mask2,label)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred, target, label)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print("acc: %f" % (count_b * 1.0 / count))
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'MAE={result.mae:.2f}({average.mae:.2f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))
    avg = average_meter.average()

    print("epoch: %d, acc: %f" % (epoch, count_b * 1.0 / count))
    print('\n*\n'
          'MAE={average.mae:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    return avg, img_merge
Beispiel #11
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        #print("Predicted shape ",pred.shape) #(1,1,224,224)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50

        if args.modality == 'rgb':
            rgb = input

        output_dir = "/content/drive/MyDrive/Code/fast-depth/results2/"
        filename_gt = output_dir + "gt_" + str(i) + ".jpg"
        filename_predicted = output_dir + "predicted_" + str(i) + ".jpg"

        gt, predicted = utils.get_depth_map(rgb, target, pred)
        print("Predicted shape ", predicted.shape)
        import scipy.misc
        #scipy.misc.toimage(gt).save(filename_gt)
        #scipy.misc.toimage(predicted).save(filename_predicted)

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
Beispiel #12
0
def validate_coarse(val_loader, model, epoch):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            rgb = input
            upsmaple_size = (rgb.size()[2], rgb.size()[3])
            upsmaple = torch.nn.Upsample(size=upsmaple_size,
                                         mode='bilinear',
                                         align_corners=True)
            target = upsmaple(target)
            pred = upsmaple(pred)

            if i == 0:
                img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/coarse_comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    return avg, img_merge
Beispiel #13
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda(
        )  # 从后面看,这里的target应该是深度图的ground truth
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        depth_pred = model(input_var)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        output1 = torch.index_select(depth_pred.data, 1,
                                     torch.cuda.LongTensor([0]))
        result.evaluate(output1, target)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]

            if i == 0:
                img_merge = utils.merge_into_row(rgb, target, depth_pred)
            # 隔50个图片抽一张作为可视化结果
            elif (i < 8 * skip) and (i % skip == 0):  # and等同于C++中的&&
                row = utils.merge_into_row(rgb, target, depth_pred)
                img_merge = utils.add_row(img_merge, row)  # 添加一行
            elif i == 8 * skip:  # 只保存8张图片,保存够8张后输出
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'  # str():将()中的对象转换为字符串
                utils.save_image(img_merge,
                                 filename)  # 建议:把这种常用的功能写到特定的脚本文件中,再像这样调用

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
def inference(model, rgb_path, sparse_path, b_gpu):
    # sparse_files = glob(sparse_path+"*.tiff")
    # print(sparse_files)
    sparse_files = glob(sparse_path + "*.txt")
    print(sparse_files)
    postfix = sparse_files[0].split('/')[-2]
    for sparse in sparse_files:
        #get frame
        # time_stamp = sparse.split('/')[-1].split('_')[0]
        time_stamp = sparse.split('/')[-1].split('.')[0]
        print(time_stamp)
        filename_rgb = rgb_path + time_stamp + ".png"
        print(filename_rgb)
        model.eval()
        rgb = cv2.imread(filename_rgb)
        if rgb is None:
            continue
        # rgb=cv2.flip( rgb, 1 )
        rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
        # depth = cv2.imread(sparse, cv2.IMREAD_UNCHANGED)
        depth, cloud = get_depth(sparse, (228,304))
        if depth is None:
            continue
        np.save("res/"+postfix+"/"+time_stamp+".npz", cloud)
        
        #transform

        # transform = transforms.Compose([
        #     transforms.Resize(240.0 / 480),
        #     transforms.CenterCrop((228, 304)),
        # ])
        # rgb_np = transform(rgb)
        rgb_np = cv2.resize(rgb,(304,228))

        # cv2.imwrite("res/"+postfix+"/"+time_stamp+"_test.png", rgb_np)

        
        rgb_np = np.asfarray(rgb_np, dtype='float') / 255

        # depth_np = transform(depth)
        # depth_np = cv2.resize(depth,(304,228), interpolation=cv2.INTER_NEAREST)

        # depth_np = np.asfarray(depth_np, dtype='float')
        depth_np = depth
        print(rgb_np.shape)
        print(depth_np.shape)

        input_np = np.append(rgb_np, np.expand_dims(depth_np, axis=2), axis=2)
        print(input_np.shape)
        
        # input_np = np.transpose(input_np, (1, 2, 0))
        # print(input_np.shape)
        input_tensor = to_tensor(input_np)
        while input_tensor.dim() < 4:
            input_tensor = input_tensor.unsqueeze(0)
        if b_gpu:
            input_tensor = input_tensor.cuda()
        with torch.no_grad():
            print(input_tensor.size())
            pred = model(input_tensor)
        if b_gpu:    
            torch.cuda.synchronize()
        depth_pred_cpu = np.squeeze(pred.data.cpu().numpy())
        # print(depth_pred_cpu)
        # print("====")
        # print(np.max(depth_pred_cpu))

        res = depth_pred_cpu / np.max(depth_pred_cpu) * 255
        res = cv2.resize(res,(640,480))
        # res = res.astype(np.uint16)
        print(np.unique(res))

        cv2.imwrite("res/"+postfix+"/"+time_stamp+"_dense.png", res)

        img_merge = utils.merge_into_row(input_tensor[:,:3,:,:], input_tensor[:,3,:,:], pred)
        cv2.imwrite("res/"+postfix+"/"+time_stamp+"_comp.png", img_merge)