Exemple #1
0
def main():
    # data files
    test_files, _ = data_util.get_train_files(args.input_data_path,
                                              args.test_file_list, '')
    if len(test_files) > args.max_to_vis:
        test_files = test_files[:args.max_to_vis]
    else:
        args.max_to_vis = len(test_files)
    random.seed(42)
    random.shuffle(test_files)
    print('#test files = ', len(test_files))
    test_dataset = scene_dataloader.SceneDataset(test_files, args.input_dim,
                                                 args.truncation,
                                                 args.num_hierarchy_levels,
                                                 args.max_input_height, 0,
                                                 args.target_data_path)
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=2,
        collate_fn=scene_dataloader.collate)

    if os.path.exists(args.output):
        raw_input(
            'warning: output dir %s exists, press key to overwrite and continue'
            % args.output)
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    # start testing
    print('starting testing...')
    loss_weights = np.ones(args.num_hierarchy_levels + 1, dtype=np.float32)
    test(loss_weights, test_dataloader, args.output, args.max_to_vis)
Exemple #2
0
    def __init__(self):
        rospy.init_node('net_visualizer', anonymous=True)
        self.init_param()

        self.size_init = False
        # self.model = model_dense_simple.GenModel(8, 1, 16, 16, self.num_hierarchy_levels, True, True, True, True)
        self.model = model_dense_nosurf.GenModel(8, 1, 16, 16,
                                                 self.num_hierarchy_levels,
                                                 True, True, True,
                                                 True).cuda()
        checkpoint = torch.load(self.model_path)
        self.model.load_state_dict(checkpoint['state_dict'])
        print('loaded model:', self.model_path)
        self.model.eval()
        if not self.use_cpu:
            self.model = self.model.cuda()
        self.sigmoid_func = torch.nn.Sigmoid()
        rospy.loginfo("net node ready!")

        val_files, _ = data_util.get_train_files(self.data_path,
                                                 self.file_list, '')
        print('#val files = ', len(val_files))
        if len(val_files) > 0:
            val_dataset = scene_dataloader.DenseSceneDataset(val_files, 2.0, 2)
            self.val_dataloader = torch.utils.data.DataLoader(
                val_dataset,
                batch_size=1,
                shuffle=True,
                num_workers=0,
                collate_fn=scene_dataloader.collate_dense,
                drop_last=True)  # collate_fn=scene_dataloader.collate
Exemple #3
0
def main():
    # data files
    test_files, _, _ = data_util.get_train_files(args.input_data_path, args.test_file_list, '', 0)
    if len(test_files) > args.max_to_process:
        test_files = test_files[:args.max_to_process]
    else:
        args.max_to_process = len(test_files)
    random.seed(42)
    random.shuffle(test_files)
    print('#test files = ', len(test_files))
    test_dataset = scene_dataloader.SceneDataset(test_files, args.input_dim, args.truncation, True, args.augment_rgb_scaling, (args.augment_scale_min, args.augment_scale_max), args.color_truncation, args.color_space, target_path=args.target_data_path, max_input_height=args.max_input_height)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2, collate_fn=scene_dataloader.collate_voxels)

    if os.path.exists(args.output):
        if args.vis_only:
            print('warning: output dir %s exists, will overwrite any existing files')
        else:
            input('warning: output dir %s exists, press key to delete and continue' % args.output)
            shutil.rmtree(args.output)
    if not os.path.exists(args.output):
        os.makedirs(args.output)
    output_vis_path = os.path.join(args.output, 'vis')
    if not os.path.exists(output_vis_path):
        os.makedirs(output_vis_path)

    # start testing
    print('starting testing...')
    test(test_dataloader, output_vis_path, args.num_to_vis)
Exemple #4
0
    def _get_train_files(self):
        if not hasattr(self, '__data_path'):

            data_path = self.hparams.data_path
            train_file_list = self.hparams.train_file_list
            val_file_list = self.hparams.val_file_list

            train_files, val_files = data_util.get_train_files(data_path, train_file_list, val_file_list)

            self.__data_path = data_path
            self.__train_files = train_files
            self.__val_files = val_files

        return self.__data_path, self.__train_files, self.__val_files
Exemple #5
0
    def _get_train_files(self):
        # print("CD:", pathlib.Path().absolute())
        # print("CD:", )
        root_dir = Path(utils.get_original_cwd())

        if not hasattr(self, '__data_path'):

            data_path = root_dir / self.hparams.data.data_path
            train_file_list = root_dir / self.hparams.data.train_file_list
            val_file_list = root_dir / self.hparams.data.val_file_list

            train_files, val_files = data_util.get_train_files(
                data_path, train_file_list, val_file_list)

            self.__data_path = data_path
            self.__train_files = train_files
            self.__val_files = val_files

        return self.__data_path, self.__train_files, self.__val_files
Exemple #6
0
def main():
    # data files
    test_files, _ = data_util.get_train_files(args.input_data_path,
                                              args.test_file_list, '')
    if len(test_files) > args.max_to_vis:
        test_files = test_files[:args.max_to_vis]
    else:
        args.max_to_vis = len(test_files)
    random.seed(42)
    random.shuffle(test_files)
    print('#test files = ', len(test_files))

    test_dataset = scene_dataloader.DenseSceneDataset(
        test_files,
        args.input_dim,
        args.truncation,
        args.num_hierarchy_levels,
        0,
        0,
        flipped=args.flipped,
        # trans=transform.MyTransforms([transform.AddPepperNoise(0.90),
        #     transform.AddRandomFlip()],
        #     random_lift=transform.RandomliftFloor(), max_lift=2)
    )
    # test_dataset = scene_dataloader.DenseSceneDataset(test_files, args.input_dim, args.truncation, 1, args.max_input_height, 0, args.target_data_path, test = True)
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=1,
        collate_fn=scene_dataloader.collate_dense)

    # if os.path.exists(args.output):
    #     raw_input('warning: output dir %s exists, press key to overwrite and continue' % args.output)
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    # start testing
    print('starting testing...')
    loss_weights = torch.ones(args.num_hierarchy_levels + 1).float()
    test(loss_weights, test_dataloader, args.output, args.max_to_vis)
Exemple #7
0
if args.retrain:
    print('loading model:', args.retrain)
    checkpoint = torch.load(args.retrain)
    args.start_epoch = args.start_epoch if args.start_epoch != 0 else checkpoint[
        'epoch']
    model.load_state_dict(checkpoint['state_dict'])  #, strict=False)
    optimizer.load_state_dict(checkpoint['optimizer'])
last_epoch = -1 if not args.retrain else args.start_epoch - 1
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=args.decay_lr,
                                            gamma=0.5,
                                            last_epoch=last_epoch)

# data files
train_files, val_files = data_util.get_train_files(args.data_path,
                                                   args.train_file_list,
                                                   args.val_file_list)
_OVERFIT = False
if len(train_files) == 1:
    _OVERFIT = True
    args.use_loss_masking = False
num_overfit_train = 0 if not _OVERFIT else 640
num_overfit_val = 0 if not _OVERFIT else 160
print('#train files = ', len(train_files))
print('#val files = ', len(val_files))
train_dataset = scene_dataloader.SceneDataset(train_files, args.input_dim,
                                              args.truncation,
                                              args.num_hierarchy_levels, 0,
                                              num_overfit_train)
train_dataloader = torch.utils.data.DataLoader(
    train_dataset,
Exemple #8
0
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
UP_AXIS = 0  # z is 0th
POINTER = torch.zeros((args.dimx, args.dimx, args.dimz, 3)).float()
for x in range(POINTER.shape[0]):
    POINTER[x, :, :, 0] = x
for y in range(POINTER.shape[1]):
    POINTER[:, y, :, 1] = y
for z in range(POINTER.shape[2]):
    POINTER[:, :, z, 2] = z

SUM = args.dimx * args.dimx * args.dimz

if not args.cpu:
    POINTER = POINTER.cuda()

test_files, _ = data_util.get_train_files(args.input_data_path,
                                          args.test_file_list, '')
if len(test_files) > args.max_to_vis:
    test_files = test_files[:args.max_to_vis]
else:
    args.max_to_vis = len(test_files)

print('#test files = ', len(test_files))
test_dataset = scene_dataloader.DenseSceneDataset(test_files,
                                                  args.input_dim,
                                                  args.truncation,
                                                  1,
                                                  args.max_input_height,
                                                  0,
                                                  args.target_data_path,
                                                  test=True)
test_dataloader = torch.utils.data.DataLoader(