예제 #1
0
    def save_result(self, i, batch_data, out_depth):
        b = batch_data['left_im'].size(0)
        for ii in range(b):
            curr_im_name = self.output_dir + ('%05d' %
                                              i) + '_' + str(ii) + '.png'
            depth = out_depth[ii, :, :]
            depth = 1.0 / depth
            depth_3 = self.gray2jet(depth)
            torchvision.utils.save_image(depth_3, curr_im_name)

    def gray2jet(self, dmap):
        cmap = plt.get_cmap('magma')
        if len(dmap.size()) == 4:
            dmap_0 = dmap[0, 0, :, :].cpu().numpy()
        elif len(dmap.size()) == 3:
            dmap_0 = dmap[0, :].cpu().numpy()
        elif len(dmap.size()) == 2:
            dmap_0 = dmap.cpu().numpy()
        else:
            raise 'Wrong dimensions of depth: {}'.format(dmap.size())
        dmap_norm = (dmap_0 - dmap_0.min()) / (dmap_0.max() - dmap_0.min())
        dmap_col = cmap(dmap_norm)
        dmap_col = dmap_col[:, :, 0:3]
        dmap_col = np.transpose(dmap_col, (2, 0, 1))
        return torch.tensor(dmap_col).float().to(self.device)


if __name__ == '__main__':
    opts = Options().opts
    TestFaster(opts)
예제 #2
0
        
    def get_online_train_categories(self):
        model_names = sorted(x for x in os.listdir(self.eval_dir) if x.endswith('.pth'))
        model_names = [x for x in model_names if 'Disp' in x]
        model_tags = [x.split('Disp')[0] for x in model_names]
        if self.dataset_tag == 'nyu':
            model_tags = [x[3:-1] for x in model_tags]
            model_tags = [x[:-1] if x.endswith('_') else x for x in model_tags]
        elif self.dataset_tag == 'kitti':
            model_tags = [x.split('kitti_')[1][:-1] if 'kitti_' in x else x for x in model_tags]
            model_tags = [x.split('_')[0] for x in model_tags]
        return model_tags


if __name__ == '__main__':
    opts = Options().opts 
    if opts.dataset_tag == 'nyu':
        train_opts = OptionsPretrain().opts 
        train_opts.root = '/hdd/local/sdb/umar/nyu_indoor/rectified_nyu/'
        # getting the pretrain
        nyu_cat_file_name = train_opts.nyu_cat_file_name 
        train_index_end = train_opts.nyu_train_index_end 
        nyu_cat_name_dict = np.load(nyu_cat_file_name, allow_pickle=True).item()
        categories = sorted(nyu_cat_name_dict.keys())
        train_categories = categories[:categories.index(train_index_end)]
        opts.nyu_pretrain_categories = [x[:-1] for x in train_categories] 
        # print(opts.nyu_pretrain_categories)
        Evaluator = EvaluateResults(opts)
        print('NYU results: {}'.format(Evaluator.complete_evaluation()))
        
    elif opts.dataset_tag == 'kitti':
예제 #3
0
from dir_options.test_options import Options 
from test_directory import EvalDirectory

import time 
import os 


if __name__ == '__main__':
    opts = Options().opts 
    runs = opts.runs 
    
    list_eval_dir_kitti = ['trained_models/online_models_kitti/']
    
    list_eval_dir_nyu = ['trained_models/online_models_nyu/']
    
    list_results_dir_kitti = ['results/online_test_loss/kitti_online/']
    
    list_results_dir_nyu = ['results/online_test_loss/nyu_online/']
    
    for run in runs:
        list_eval_dir_kitti.append('trained_models/online_models_kitti_replay_reg_run' + run + '/')
        list_eval_dir_nyu.append('trained_models/online_models_nyu_replay_reg_run' + run + '/')
        list_results_dir_kitti.append('results/replay_reg_test_loss_run' + run + '/kitti_online/')
        list_results_dir_nyu.append('results/replay_reg_test_loss_run' + run + '/nyu_online/')
        
    assert len(list_eval_dir_kitti) == \
        len(list_eval_dir_nyu) == \
                    len(list_results_dir_kitti) == \
                        len(list_results_dir_nyu), 'Check the number of elements'
                        
    for i in range(len(list_eval_dir_kitti)): 
예제 #4
0
    metric_mean = {}
    metric_std = {}
    for metric in metrics:
        metric_mean[metric] = np.mean(metric_lists[metric])
        metric_std[metric] = np.std(metric_lists[metric])
        print(metric_std[metric])
    data = np.reshape(np.array(list(metric_mean.values())), (1, -1))
    df = pd.DataFrame(data)
    print(df.to_latex(float_format="%0.4f"))
    data = np.reshape(np.array(metric_std.values()), (1, -1))
    df = pd.DataFrame(data)
    print(df.to_latex(float_format="%0.6f"))


if __name__ == '__main__':
    opts = Options().opts
    metrics = opts.metrics
    models = [
        'trained_models/online_models_vkitti/05_Scene20_Disp_000_15287.pth',
        'trained_models/online_models_vkitti_replay_reg_run1/05_Scene20_Disp_000_15287.pth',
        'trained_models/online_models_vkitti_replay_reg_run2/05_Scene20_Disp_000_15287.pth',
        'trained_models/online_models_vkitti_replay_reg_run3/05_Scene20_Disp_000_15287.pth',
        'trained_models/online_models_vkitti_fog_rain/05_Scene20_Disp_000_19109.pth',
        'trained_models/online_models_vkitti_fog_rain_replay_reg_run1/05_Scene20_Disp_000_19109.pth',
        'trained_models/online_models_vkitti_fog_rain_replay_reg_run2/05_Scene20_Disp_000_19109.pth',
        'trained_models/online_models_vkitti_fog_rain_replay_reg_run3/05_Scene20_Disp_000_19109.pth'
    ]

    tags = ['kitti']
    kitti_dataset = Datasets.KittiDepthTestDataset(opts)
    kitti_dataloader = data.DataLoader(kitti_dataset,
예제 #5
0

def display_latex_style_w_std(mean, std):
    h = len(mean)
    w = len(mean[0])
    for r in range(h):
        for c in range(w):
            mean_str = f"{mean[r][c]:0.4f}"
            std_str = f"{std[r][c]:0.4f}"
            complete_string = mean_str + ' + ' + std_str + ' & '
            print(complete_string, end="")
        print('\\\\')


if __name__ == '__main__':
    opts = Options().opts
    pre_opts = OptionsPretrain().opts
    metrics = opts.metrics
    runs = opts.runs
    nyu_train_index_end = pre_opts.nyu_train_index_end
    nyu_categories = list(NYUCategorySplit(pre_opts).__call__().keys())
    nyu_pretrain_categories = nyu_categories[:nyu_categories.
                                             index(nyu_train_index_end)]
    opts.nyu_pretrain_categories = [x[:-1] for x in nyu_pretrain_categories]

    list_eval_dir_kitti = ['trained_models/online_models_kitti/']
    list_eval_dir_nyu = ['trained_models/online_models_nyu/']
    list_results_dir_kitti = ['results/online_test_loss/kitti_online/']
    list_results_dir_nyu = ['results/online_test_loss/nyu_online/']

    for run in runs:
예제 #6
0
import torch.utils.data as data
import os

from test import TestFaster
from dir_options.test_options import Options
from dir_dataset import Datasets

if __name__ == '__main__':
    opts = Options().opts
    opts.qual_results = True

    kitti_online_model_dir = 'trained_models/online_models_kitti/'
    kitti_rep_reg_model_dir = 'trained_models/online_models_kitti_rep_reg_run3/'
    vkitti_online_model_dir = 'trained_models/online_models_vkitti/'
    vkitti_rep_reg_model_dir = 'trained_models/online_models_vkitti_rep_reg_run3/'
    kitti_online_models = sorted(
        [x for x in os.listdir(kitti_online_model_dir) if x.endswith('.pth')])
    kitti_rep_reg_models = sorted(
        [x for x in os.listdir(kitti_rep_reg_model_dir) if x.endswith('.pth')])
    vkitti_online_models = sorted(
        [x for x in os.listdir(vkitti_online_model_dir) if x.endswith('.pth')])
    vkitti_rep_reg_models = sorted(
        [x for x in os.listdir(vkitti_online_model_dir) if x.endswith('.pth')])
    kitti_online_model = kitti_online_model_dir + kitti_online_models[-1]
    kitti_rep_reg_model = kitti_rep_reg_model_dir + kitti_rep_reg_models[-1]
    vkitti_online_model = vkitti_online_model_dir + vkitti_online_models[-1]
    vkitti_rep_reg_model = vkitti_rep_reg_model_dir + vkitti_rep_reg_models[-1]
    model_paths = [
        kitti_online_model, kitti_rep_reg_model, vkitti_online_model,
        vkitti_rep_reg_model
    ]
예제 #7
0

def display_latex_style_w_std(mean, std):
    h = len(mean)
    w = len(mean[0])
    for r in range(h):
        for c in range(w):
            mean_str = f"{mean[r][c]:0.4f}"
            std_str = f"{std[r][c]:0.4f}"
            complete_string = mean_str + ' + ' + std_str + ' & '
            print(complete_string, end="")
        print('\\\\')


if __name__ == '__main__':
    opts = Options().opts
    metrics = opts.metrics
    runs = opts.runs

    list_eval_dir_kitti = ['trained_models/online_models_kitti/']
    list_eval_dir_vkitti = ['trained_models/online_models_vkitti/']
    list_results_dir_kitti = ['results/online_test_loss/kitti_online/']
    list_results_dir_vkitti = ['results/online_test_loss/vkitti_online/']

    for run in runs:
        list_eval_dir_kitti.append(
            'trained_models/online_models_kitti_rep_reg_run' + run + '/')
        list_eval_dir_vkitti.append(
            'trained_models/online_models_vkitti_rep_reg_run' + run + '/')
        list_results_dir_kitti.append('results/rep_reg_online_test_loss_run' +
                                      run + '/kitti_online/')