Beispiel #1
0
def val_kitti(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_silog = SmoothedValue(len(val_dataloader))
    smoothed_silog2 = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_silog': smoothed_silog,
        'err_silog2': smoothed_silog2
    }
    for i, data in enumerate(val_dataloader):
        pred_depth = model.module.inference_kitti(data)
        smoothed_criteria = validate_err_kitti(pred_depth['b_fake'],
                                               data['B_raw'],
                                               smoothed_criteria)
        print(
            np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (
                smoothed_criteria['err_silog'].GetGlobalAverageValue())**2))
    val_metrics = {
        'abs_rel':
        smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'silog':
        np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() -
                (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2)
    }
    print(val_metrics)
    return val_metrics
Beispiel #2
0
def val(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_whdr = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_whdr': smoothed_whdr
    }
    for i, data in enumerate(val_dataloader):
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['pred'])

        pred_depth_resize = cv2.resize(pred_depth.cpu().numpy(),
                                       (torch.squeeze(data['B_raw']).shape[1],
                                        torch.squeeze(data['B_raw']).shape[0]))
        pred_depth_metric = recover_metric_depth(pred_depth_resize,
                                                 data['B_raw'])
        smoothed_criteria = validate_rel_depth_err(pred_depth_metric,
                                                   data['B_raw'],
                                                   smoothed_criteria,
                                                   scale=1.0)
    return {
        'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()
    }
def val(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_whdr = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_whdr': smoothed_whdr
    }
    for i, data in enumerate(val_dataloader):
        invalid_side = data['invalid_side'][0]
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['b_fake'])

        pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) -
                                invalid_side[1],
                                invalid_side[2]:pred_depth.size(1) -
                                invalid_side[3]]

        pred_depth_resize = resize_image(pred_depth,
                                         torch.squeeze(data['B_raw']).shape)
        pred_depth_metric = recover_metric_depth(pred_depth_resize,
                                                 data['B_raw'])
        smoothed_criteria = validate_rel_depth_err(pred_depth_metric,
                                                   data['B_raw'],
                                                   smoothed_criteria,
                                                   scale=1.0)
    return {
        'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()
    }
 def __init__(self, args, log_period=20, tensorboard_logger=None):
     # Output logging period in SGD iterations
     self.args = args
     self.log_period = log_period
     self.tblogger = tensorboard_logger
     self.tb_ignored_keys = ['iter', 'eta', 'epoch', 'time']
     self.iter_timer = Timer()
     # Window size for smoothing tracked values (with median filtering)
     self.filter_size = log_period
     def create_smoothed_value():
         return SmoothedValue(self.filter_size)
     self.smoothed_losses = defaultdict(create_smoothed_value)
     self.smoothed_metrics = defaultdict(create_smoothed_value)
     self.smoothed_total_loss = SmoothedValue(self.filter_size)
Beispiel #5
0
def val(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {'err_absRel': smoothed_absRel}
    for i, data in enumerate(val_dataloader):
        invalid_side = data['invalid_side'][0]
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['b_fake'])
        pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) - invalid_side[1], :]
        pred_depth = pred_depth / data['ratio'].cuda()
        pred_depth = resize_image(pred_depth, torch.squeeze(data['B_raw']).shape)
        smoothed_criteria = validate_err(pred_depth, data['B_raw'], smoothed_criteria, (45, 471, 41, 601))
    return {'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue()}
Beispiel #6
0
def test(model_path):
    test_args = TestOptions().parse()
    test_args.thread = 0
    test_args.batchsize = 1
    merge_cfg_from_file(test_args)

    data_loader = CustomerDataLoader(test_args)
    test_datasize = len(data_loader)
    logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize))
    # load model
    model = MetricDepthModel()

    model.eval()

    test_args.load_ckpt = model_path

    # load checkpoint
    if test_args.load_ckpt:
        load_ckpt(test_args, model)
    model.cuda()
    # model = torch.nn.DataParallel(model)

    # test
    smoothed_absRel = SmoothedValue(test_datasize)
    smoothed_rms = SmoothedValue(test_datasize)
    smoothed_logRms = SmoothedValue(test_datasize)
    smoothed_squaRel = SmoothedValue(test_datasize)
    smoothed_silog = SmoothedValue(test_datasize)
    smoothed_silog2 = SmoothedValue(test_datasize)
    smoothed_log10 = SmoothedValue(test_datasize)
    smoothed_delta1 = SmoothedValue(test_datasize)
    smoothed_delta2 = SmoothedValue(test_datasize)
    smoothed_delta3 = SmoothedValue(test_datasize)
    smoothed_whdr = SmoothedValue(test_datasize)

    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_squaRel': smoothed_squaRel,
        'err_rms': smoothed_rms,
        'err_silog': smoothed_silog,
        'err_logRms': smoothed_logRms,
        'err_silog2': smoothed_silog2,
        'err_delta1': smoothed_delta1,
        'err_delta2': smoothed_delta2,
        'err_delta3': smoothed_delta3,
        'err_log10': smoothed_log10,
        'err_whdr': smoothed_whdr
    }

    for i, data in enumerate(data_loader):
        out = model.inference(data)
        pred_depth = torch.squeeze(out['b_fake'])
        img_path = data['A_paths']
        invalid_side = data['invalid_side'][0]
        pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) -
                                invalid_side[1], :]
        pred_depth = pred_depth / data['ratio'].cuda()  # scale the depth
        pred_depth = resize_image(pred_depth,
                                  torch.squeeze(data['B_raw']).shape)
        smoothed_criteria = evaluate_err(pred_depth,
                                         data['B_raw'],
                                         smoothed_criteria,
                                         mask=(45, 471, 41, 601),
                                         scale=10.)

        # save images
        model_name = test_args.load_ckpt.split('/')[-1].split('.')[0]
        image_dir = os.path.join(cfg.ROOT_DIR, './evaluation',
                                 cfg.MODEL.ENCODER, model_name)
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)
        img_name = img_path[0].split('/')[-1]
        #plt.imsave(os.path.join(image_dir, 'd_' + img_name), pred_depth, cmap='rainbow')
        #cv2.imwrite(os.path.join(image_dir, 'rgb_' + img_name), data['A_raw'].numpy().squeeze())

        # print('processing (%04d)-th image... %s' % (i, img_path))

    # print("###############absREL ERROR: %f", smoothed_criteria['err_absRel'].GetGlobalAverageValue())
    # print("###############silog ERROR: %f", np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (
    #     smoothed_criteria['err_silog'].GetGlobalAverageValue()) ** 2))
    # print("###############log10 ERROR: %f", smoothed_criteria['err_log10'].GetGlobalAverageValue())
    # print("###############RMS ERROR: %f", np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue()))
    # print("###############delta_1 ERROR: %f", smoothed_criteria['err_delta1'].GetGlobalAverageValue())
    # print("###############delta_2 ERROR: %f", smoothed_criteria['err_delta2'].GetGlobalAverageValue())
    # print("###############delta_3 ERROR: %f", smoothed_criteria['err_delta3'].GetGlobalAverageValue())
    # print("###############squaRel ERROR: %f", smoothed_criteria['err_squaRel'].GetGlobalAverageValue())
    # print("###############logRms ERROR: %f", np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue()))

    f.write("tested model:" + model_path)
    f.write('\n')
    f.write("###############absREL ERROR:" +
            str(smoothed_criteria['err_absRel'].GetGlobalAverageValue()))
    f.write('\n')
    f.write("###############silog ERROR:" + str(
        np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() -
                (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2)))
    f.write('\n')
    f.write("###############log10 ERROR:" +
            str(smoothed_criteria['err_log10'].GetGlobalAverageValue()))
    f.write('\n')
    f.write("###############RMS ERROR:" +
            str(np.sqrt(smoothed_criteria['err_rms'].GetGlobalAverageValue())))
    f.write('\n')
    f.write("###############delta_1 ERROR:" +
            str(smoothed_criteria['err_delta1'].GetGlobalAverageValue()))
    f.write('\n')
    f.write("###############delta_2 ERROR:" +
            str(smoothed_criteria['err_delta2'].GetGlobalAverageValue()))
    f.write('\n')
    f.write("###############delta_3 ERROR:" +
            str(smoothed_criteria['err_delta3'].GetGlobalAverageValue()))
    f.write('\n')
    f.write("###############squaRel ERROR:" +
            str(smoothed_criteria['err_squaRel'].GetGlobalAverageValue()))
    f.write('\n')
    f.write(
        "###############logRms ERROR:" +
        str(np.sqrt(smoothed_criteria['err_logRms'].GetGlobalAverageValue())))
    f.write('\n')
    f.write(
        '-----------------------------------------------------------------------------'
    )
    f.write('\n')
Beispiel #7
0
    data_loader = CustomerDataLoader(test_args)
    test_datasize = len(data_loader)
    logger.info('{:>15}: {:<30}'.format('test_data_size', test_datasize))
    # load model
    model = MetricDepthModel()

    model.eval()

    # load checkpoint
    if test_args.load_ckpt:
        load_ckpt(test_args, model)
    model.cuda()
    model = torch.nn.DataParallel(model)

    # test
    smoothed_absRel = SmoothedValue(test_datasize)
    smoothed_rms = SmoothedValue(test_datasize)
    smoothed_logRms = SmoothedValue(test_datasize)
    smoothed_squaRel = SmoothedValue(test_datasize)
    smoothed_silog = SmoothedValue(test_datasize)
    smoothed_silog2 = SmoothedValue(test_datasize)
    smoothed_log10 = SmoothedValue(test_datasize)
    smoothed_delta1 = SmoothedValue(test_datasize)
    smoothed_delta2 = SmoothedValue(test_datasize)
    smoothed_delta3 = SmoothedValue(test_datasize)
    smoothed_whdr = SmoothedValue(test_datasize)

    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_squaRel': smoothed_squaRel,
        'err_rms': smoothed_rms,
Beispiel #8
0
 def create_smoothed_value():
     return SmoothedValue(self.filter_size)
Beispiel #9
0
class TrainingStats(object):
    """Track vital training statistics."""
    def __init__(self, args, log_period=20, tensorboard_logger=None):
        # Output logging period in SGD iterations
        self.args = args
        self.log_period = log_period
        self.tblogger = tensorboard_logger
        self.tb_ignored_keys = ['iter', 'eta', 'epoch', 'time']
        self.iter_timer = Timer()
        # Window size for smoothing tracked values (with median filtering)
        self.filter_size = 20

        def create_smoothed_value():
            return SmoothedValue(self.filter_size)

        self.smoothed_losses = defaultdict(create_smoothed_value)
        # self.smoothed_metrics = defaultdict(create_smoothed_value)
        self.smoothed_total_loss = SmoothedValue(self.filter_size)

    def IterTic(self):
        self.iter_timer.tic()

    def IterToc(self):
        return self.iter_timer.toc(average=False)

    def ResetIterTimer(self):
        self.iter_timer.reset()

    def UpdateIterStats(self, loss):
        """Update tracked iteration statistics."""
        total_loss = 0
        for k in loss:
            # all losses except the total loss: loss['all']
            if k != 'total_loss':
                self.smoothed_losses[k].AddValue(float(loss[k]))

        total_loss += loss['total_loss']
        self.smoothed_total_loss.AddValue(float(total_loss))

    def LogIterStats(self, cur_iter, cur_epoch, optimizer, val_err={}):
        """Log the tracked statistics."""
        if (cur_iter % self.log_period == 0):
            stats = self.GetStats(cur_iter, cur_epoch, optimizer, val_err)
            log_stats(stats, self.args)
            if self.tblogger:
                self.tb_log_stats(stats, cur_iter)

    def tb_log_stats(self, stats, cur_iter):
        """Log the tracked statistics to tensorboard"""
        for k in stats:
            if k not in self.tb_ignored_keys:
                v = stats[k]
                if isinstance(v, dict):
                    self.tb_log_stats(v, cur_iter)
                else:
                    self.tblogger.add_scalar(k, v, cur_iter)

    def GetStats(self, cur_iter, cur_epoch, optimizer, val_err={}):
        eta_seconds = self.iter_timer.average_time * (cfg.TRAIN.MAX_ITER -
                                                      cur_iter)
        eta = str(datetime.timedelta(seconds=int(eta_seconds)))
        stats = OrderedDict(
            iter=cur_iter,  # 1-indexed
            time=self.iter_timer.average_time,
            eta=eta,
            total_loss=self.smoothed_total_loss.GetMedianValue(),
            epoch=cur_epoch,
        )
        optimizer_state_dict = optimizer.state_dict()
        lr = {}
        for i in range(len(optimizer_state_dict['param_groups'])):
            lr_name = 'group%d_lr' % i
            lr[lr_name] = optimizer_state_dict['param_groups'][i]['lr']

        stats['lr'] = OrderedDict(lr)
        for k, v in self.smoothed_losses.items():
            stats[k] = OrderedDict([(k, v.GetMedianValue())])

        stats['val_err'] = OrderedDict(val_err)
        return stats
Beispiel #10
0
def val_kitti(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_silog = SmoothedValue(len(val_dataloader))
    smoothed_silog2 = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_silog': smoothed_silog,
        'err_silog2': smoothed_silog2
    }
    # rois-level
    rois_smoothed_absRel = SmoothedValue(len(val_dataloader))
    rois_smoothed_silog = SmoothedValue(len(val_dataloader))
    rois_smoothed_silog2 = SmoothedValue(len(val_dataloader))
    rois_smoothed_criteria = {
        'err_absRel': rois_smoothed_absRel,
        'err_silog': rois_smoothed_silog,
        'err_silog2': rois_smoothed_silog2
    }

    # bg-level
    bg_smoothed_absRel = SmoothedValue(len(val_dataloader))
    bg_smoothed_silog = SmoothedValue(len(val_dataloader))
    bg_smoothed_silog2 = SmoothedValue(len(val_dataloader))
    bg_smoothed_criteria = {
        'err_absRel': bg_smoothed_absRel,
        'err_silog': bg_smoothed_silog,
        'err_silog2': bg_smoothed_silog2
    }
    for i, data in enumerate(val_dataloader):
        pred_depth = model.module.inference_kitti(data)
        smoothed_criteria = validate_err_kitti(pred_depth['b_fake'],
                                               data['B_raw'],
                                               smoothed_criteria)
        rois_smoothed_criteria = validate_err_kitti(pred_depth['b_fake'],
                                                    data['B_raw_rois'],
                                                    rois_smoothed_criteria)
        bg_smoothed_criteria = validate_err_kitti(pred_depth['b_fake'],
                                                  data['B_raw_bg'],
                                                  bg_smoothed_criteria)
        #print(np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() - (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2))
    val_metrics = {
        'abs_rel':
        smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'silog':
        np.sqrt(smoothed_criteria['err_silog2'].GetGlobalAverageValue() -
                (smoothed_criteria['err_silog'].GetGlobalAverageValue())**2)
    }
    rois_val_metrics = {
        'abs_rel':
        rois_smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'silog':
        np.sqrt(
            rois_smoothed_criteria['err_silog2'].GetGlobalAverageValue() -
            (rois_smoothed_criteria['err_silog'].GetGlobalAverageValue())**2)
    }
    bg_val_metrics = {
        'abs_rel':
        bg_smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'silog':
        np.sqrt(bg_smoothed_criteria['err_silog2'].GetGlobalAverageValue() -
                (bg_smoothed_criteria['err_silog'].GetGlobalAverageValue())**2)
    }
    print("global: ", val_metrics)
    print("roi: ", rois_val_metrics)
    print("bg: ", bg_val_metrics)
    return val_metrics