Ejemplo n.º 1
0
def test(from_latest=False):
    ckpt_list = []
    while True:
        saved_models = [
            fn for fn in os.listdir(cfg.LOGDIR) if fn.endswith(".ckpt")
        ]
        saved_models = sorted(
            saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))

        if from_latest:
            saved_models = saved_models[-1:]
        for ckpt in saved_models:
            if ckpt not in ckpt_list:
                # use the latest checkpoint file
                loadckpt = os.path.join(cfg.LOGDIR, ckpt)
                logger.info("resuming " + str(loadckpt))
                state_dict = torch.load(loadckpt)
                model.load_state_dict(state_dict['model'])
                epoch_idx = state_dict['epoch']

                TestImgLoader.dataset.tsdf_cashe = {}

                avg_test_scalars = DictAverageMeter()
                save_mesh_scene = SaveScene(cfg)
                batch_len = len(TestImgLoader)
                for batch_idx, sample in enumerate(TestImgLoader):
                    for n in sample['fragment']:
                        logger.info(n)
                    # save mesh if SAVE_SCENE_MESH and is the last fragment
                    save_scene = cfg.SAVE_SCENE_MESH and batch_idx == batch_len - 1

                    start_time = time.time()
                    loss, scalar_outputs, outputs = test_sample(
                        sample, save_scene)
                    logger.info(
                        'Epoch {}, Iter {}/{}, test loss = {:.3f}, time = {:3f}'
                        .format(epoch_idx, batch_idx, len(TestImgLoader), loss,
                                time.time() - start_time))
                    avg_test_scalars.update(scalar_outputs)
                    del scalar_outputs

                    if batch_idx % 100 == 0:
                        logger.info("Iter {}/{}, test results = {}".format(
                            batch_idx, len(TestImgLoader),
                            avg_test_scalars.mean()))

                    # save mesh
                    if cfg.SAVE_SCENE_MESH:
                        save_mesh_scene(outputs, sample, epoch_idx)
                save_scalars(tb_writer, 'fulltest', avg_test_scalars.mean(),
                             epoch_idx)
                logger.info("epoch {} avg_test_scalars:".format(epoch_idx),
                            avg_test_scalars.mean())

                ckpt_list.append(ckpt)

        time.sleep(10)
Ejemplo n.º 2
0
def test(from_latest=False):
    ckpt_list = []
    saved_models = [
        fn for fn in os.listdir(cfg.LOGDIR) if fn.endswith(".ckpt")
    ]
    saved_models = sorted(saved_models,
                          key=lambda x: int(x.split('_')[-1].split('.')[0]))

    if from_latest:
        saved_models = saved_models[-1:]
    for ckpt in saved_models:
        if ckpt not in ckpt_list:
            # use the latest checkpoint file
            loadckpt = os.path.join(cfg.LOGDIR, ckpt)
            logger.info("resuming " + str(loadckpt))
            state_dict = torch.load(loadckpt)
            model.load_state_dict(state_dict['model'], strict=False)
            optimizer.param_groups[0]['initial_lr'] = state_dict['optimizer'][
                'param_groups'][0]['lr']
            optimizer.param_groups[0]['lr'] = state_dict['optimizer'][
                'param_groups'][0]['lr']
            epoch_idx = state_dict['epoch']

            TestImgLoader.dataset.tsdf_cashe = {}

            avg_test_scalars = DictAverageMeter()
            save_mesh_scene = SaveScene(cfg)
            for batch_idx, sample in enumerate(TestImgLoader):
                for n in sample['fragment']:
                    logger.info(n)
                start_time = time.time()
                loss, scalar_outputs, outputs = test_sample(sample)
                logger.info(
                    'Epoch {}, Iter {}/{}, test loss = {:.3f}, time = {:3f}'.
                    format(epoch_idx, batch_idx, len(TestImgLoader), loss,
                           time.time() - start_time))
                scalar_outputs.update({'time': time.time() - start_time})
                avg_test_scalars.update(scalar_outputs)
                del scalar_outputs

                if batch_idx % 100 == 0:
                    logger.info("Iter {}/{}, test results = {}".format(
                        batch_idx, len(TestImgLoader),
                        avg_test_scalars.mean()))

                # save mesh
                if cfg.SAVE_SCENE_MESH or cfg.SAVE_INCREMENTAL:
                    save_mesh_scene(outputs, sample, epoch_idx)
            logger.info("epoch {} avg_test_scalars:".format(epoch_idx),
                        avg_test_scalars.mean())

            ckpt_list.append(ckpt)

    time.sleep(10)
Ejemplo n.º 3
0
data_loader = DataLoader(test_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TEST.N_WORKERS, drop_last=False)

# model
logger.info("Initializing the model on GPU...")
model = NeuralRecon(cfg).cuda().eval()
model = torch.nn.DataParallel(model, device_ids=[0])

# use the latest checkpoint file
saved_models = [fn for fn in os.listdir(cfg.LOGDIR) if fn.endswith(".ckpt")]
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
loadckpt = os.path.join(cfg.LOGDIR, saved_models[-1])
logger.info("Resuming from " + str(loadckpt))
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
epoch_idx = state_dict['epoch']
save_mesh_scene = SaveScene(cfg)

logger.info("Start inference..")
duration = 0.
gpu_mem_usage = []
frag_len = len(data_loader)
with torch.no_grad():
    for sample in tqdm(data_loader):
        start_time = time.time()
        outputs, loss_dict = model(sample)
        duration += time.time() - start_time
        if cfg.REDUCE_GPU_MEM:
            # will show down the inference
            torch.cuda.empty_cache()
        # save mesh
        if cfg.SAVE_SCENE_MESH or cfg.SAVE_INCREMENTAL:
Ejemplo n.º 4
0
data_loader = DataLoader(test_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TEST.N_WORKERS, drop_last=False)

# model
logger.info("Initializing the model on GPU...")
model = NeuralRecon(cfg).cuda().eval()
model = torch.nn.DataParallel(model, device_ids=[0])

# use the latest checkpoint file
saved_models = [fn for fn in os.listdir(cfg.LOGDIR) if fn.endswith(".ckpt")]
saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0]))
loadckpt = os.path.join(cfg.LOGDIR, saved_models[-1])
logger.info("Resuming from " + str(loadckpt))
state_dict = torch.load(loadckpt)
model.load_state_dict(state_dict['model'], strict=False)
epoch_idx = state_dict['epoch']
save_mesh_scene = SaveScene(cfg)

logger.info("Start inference..")
duration = 0.
gpu_mem_usage = []
frag_len = len(data_loader)
with torch.no_grad():
    for frag_idx, sample in enumerate(tqdm(data_loader)):
        # save mesh if: 1. SAVE_SCENE_MESH and is the last fragment, or
        #               2. SAVE_INCREMENTAL, or
        #               3. VIS_INCREMENTAL
        save_scene = (cfg.SAVE_SCENE_MESH and frag_idx == frag_len - 1) or cfg.SAVE_INCREMENTAL or cfg.VIS_INCREMENTAL

        start_time = time.time()
        outputs, loss_dict = model(sample, save_scene)
        duration += time.time() - start_time