Beispiel #1
0
def make_val_data_loader(config):
    transform_val = torchvision.transforms.Compose([
        mtr.Resize(tuple(config['data_size'])),
        mtr.ToTensor(),
        mtr.Normalize(config['data_normalize_mean'],
                      config['data_normalize_std'],
                      elems_do=['img']),
    ])

    val_loaders = list()

    for val_dataset in config['val_datasets']:
        val_set = RgbdSodDataset(val_dataset,
                                 transform=transform_val,
                                 max_num=0,
                                 if_memory=False)
        val_loaders.append(
            DataLoader(val_set, batch_size=1, shuffle=False, pin_memory=True))

    return val_loaders
Beispiel #2
0
def make_train_data_loader(config):
    transform_train = torchvision.transforms.Compose([
        mtr.RandomFlip(),
        mtr.Resize(tuple(config['data_size'])),
        mtr.ToTensor(),
        mtr.Normalize(config['data_normalize_mean'],
                      config['data_normalize_std'],
                      elems_do=['img']),
    ])

    # TODO maxnum ?, num_workers ?

    train_set = RgbdSodDataset(datasets=config['train_datasets'],
                               transform=transform_train,
                               max_num=0,
                               if_memory=False)
    train_loader = DataLoader(train_set,
                              batch_size=config['batch_size'],
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)

    return train_loader
Beispiel #3
0
    def __init__(self, p):
        self.p = p
        os.makedirs(p['snapshot_path'], exist_ok=True)
        shutil.copyfile(
            os.path.join('model_zoo/D3NetBenchmark/model', p['model'] + '.py'),
            os.path.join(p['snapshot_path'], p['model'] + '.py'))
        SetLogFile('{}/log.txt'.format(p['snapshot_path']))
        if p['if_use_tensorboard']:
            self.writer = SummaryWriter(p['snapshot_path'])

        transform_train = torchvision.transforms.Compose([
            mtr.RandomFlip(),
            mtr.Resize(p['size']),
            mtr.ToTensor(),
            mtr.Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225],
                          elems_do=['img']),
        ])

        transform_val = torchvision.transforms.Compose([
            mtr.Resize(p['size']),
            mtr.ToTensor(),
            mtr.Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225],
                          elems_do=['img']),
        ])

        self.train_set = RgbdSodDataset(datasets=p['train_datasets'],
                                        transform=transform_train,
                                        max_num=p['max_num'],
                                        if_memory=p['if_memory'])
        self.train_loader = DataLoader(self.train_set,
                                       batch_size=p['bs'],
                                       shuffle=True,
                                       num_workers=p['num_workers'],
                                       pin_memory=True)

        self.val_loaders = []
        for val_dataset in p['val_datasets']:
            val_set = RgbdSodDataset(val_dataset,
                                     transform=transform_val,
                                     max_num=p['max_num'],
                                     if_memory=p['if_memory'])
            self.val_loaders.append(
                DataLoader(val_set,
                           batch_size=1,
                           shuffle=False,
                           pin_memory=True))

        self.model = MyNet()

        self.model = self.model.cuda()

        self.optimizer = utils.get_optimizer(p['optimizer'][0],
                                             self.model.parameters(), p['lr'],
                                             p['optimizer'][1])
        self.scheduler = utils.get_scheduler(p['scheduler'][0], self.optimizer,
                                             p['scheduler'][1])

        self.best_metric = None

        if p['resume'] != None:
            print('Load checkpoint from [{}]'.format(p['resume']))
            checkpoint = torch.load(p['resume'])
            self.p['start_epoch'] = checkpoint['current_epoch'] + 1
            self.best_metric = checkpoint['best_metric']
            self.model.load_state_dict(checkpoint['model'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.scheduler.load_state_dict(checkpoint['scheduler'])
Beispiel #4
0
                    exist_ok=True)

model_rgb = RgbNet().cuda()
model_rgbd = RgbdNet().cuda()
model_depth = DepthNet().cuda()

model_rgb.load_state_dict(torch.load(pretrained_models['RgbNet'])['model'])
model_rgbd.load_state_dict(torch.load(pretrained_models['RgbdNet'])['model'])
model_depth.load_state_dict(torch.load(pretrained_models['DepthNet'])['model'])

model_rgb.eval()
model_rgbd.eval()
model_depth.eval()

transform_test = torchvision.transforms.Compose([
    mtr.Resize(size),
    mtr.ToTensor(),
    mtr.Normalize(mean=[0.485, 0.456, 0.406],
                  std=[0.229, 0.224, 0.225],
                  elems_do=['img'])
])

test_loaders = []
for test_dataset in test_datasets:
    val_set = RgbdSodDataset(datasets_path + test_dataset,
                             transform=transform_test)
    test_loaders.append(
        DataLoader(val_set, batch_size=1, shuffle=False, pin_memory=True))

for index, test_loader in enumerate(test_loaders):
    dataset = test_datasets[index]
Beispiel #5
0
    for test_dataset in test_datasets:
        os.makedirs(os.path.join(result_path,tmp,test_dataset),exist_ok=True)

model_rgb=RgbNet().cuda()
model_rgbd=RgbdNet().cuda()
model_depth=DepthNet().cuda()

model_rgb.load_state_dict(torch.load(pretrained_models['RgbNet'])['model'])
model_rgbd.load_state_dict(torch.load(pretrained_models['RgbdNet'])['model'])
model_depth.load_state_dict(torch.load(pretrained_models['DepthNet'])['model'])

model_rgb.eval()
model_rgbd.eval()
model_depth.eval()

transform_test = torchvision.transforms.Compose([mtr.Resize(size),mtr.ToTensor(),mtr.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],elems_do=['img'])])

test_loaders=[]
for test_dataset in test_datasets:
    val_set=RgbdSodDataset(datasets_path+test_dataset,transform=transform_test)
    test_loaders.append(DataLoader(val_set, batch_size=1, shuffle=False,pin_memory=True))

for index, test_loader in enumerate(test_loaders):
    dataset=test_datasets[index]
    print('Test [{}]'.format(dataset))

    for i, sample_batched in enumerate(tqdm(test_loader)):
        input, gt = model_rgb.get_input(sample_batched),model_rgb.get_gt(sample_batched)

        with torch.no_grad(): 
            output_rgb = model_rgb(input)