pin_memory=True, drop_last=False) else: TrainImgLoader = DataLoader(train_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TRAIN.N_WORKERS, drop_last=True) TestImgLoader = DataLoader(test_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TEST.N_WORKERS, drop_last=False) # model, optimizer model = NeuralRecon(cfg) if cfg.DISTRIBUTED: model.cuda() model = DistributedDataParallel( model, device_ids=[cfg.LOCAL_RANK], output_device=cfg.LOCAL_RANK, # this should be removed if we update BatchNorm stats broadcast_buffers=False, find_unused_parameters=True) else: model = torch.nn.DataParallel(model, device_ids=[0]) model.cuda() optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR, betas=(0.9, 0.999),
logger.info("Running NeuralRecon...") transform = [transforms.ResizeImage((640, 480)), transforms.ToTensor(), transforms.RandomTransformSpace( cfg.MODEL.N_VOX, cfg.MODEL.VOXEL_SIZE, random_rotation=False, random_translation=False, paddingXY=0, paddingZ=0, max_epoch=cfg.TRAIN.EPOCHS), transforms.IntrinsicsPoseToProjection(cfg.TEST.N_VIEWS, 4)] transforms = transforms.Compose(transform) ARKitDataset = find_dataset_def(cfg.DATASET) test_dataset = ARKitDataset(cfg.TEST.PATH, "test", transforms, cfg.TEST.N_VIEWS, len(cfg.MODEL.THRESHOLDS) - 1) data_loader = DataLoader(test_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TEST.N_WORKERS, drop_last=False) # model logger.info("Initializing the model on GPU...") model = NeuralRecon(cfg).cuda().eval() model = torch.nn.DataParallel(model, device_ids=[0]) # use the latest checkpoint file saved_models = [fn for fn in os.listdir(cfg.LOGDIR) if fn.endswith(".ckpt")] saved_models = sorted(saved_models, key=lambda x: int(x.split('_')[-1].split('.')[0])) loadckpt = os.path.join(cfg.LOGDIR, saved_models[-1]) logger.info("Resuming from " + str(loadckpt)) state_dict = torch.load(loadckpt) model.load_state_dict(state_dict['model'], strict=False) epoch_idx = state_dict['epoch'] save_mesh_scene = SaveScene(cfg) logger.info("Start inference..") duration = 0. gpu_mem_usage = []
TestImgLoader = torch.utils.data.DataLoader( test_dataset, batch_size=cfg.BATCH_SIZE, sampler=test_sampler, num_workers=cfg.TEST.N_WORKERS, pin_memory=True, drop_last=False ) else: TrainImgLoader = DataLoader(train_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TRAIN.N_WORKERS, drop_last=True) TestImgLoader = DataLoader(test_dataset, cfg.BATCH_SIZE, shuffle=False, num_workers=cfg.TEST.N_WORKERS, drop_last=False) # model, optimizer model = NeuralRecon(cfg) if cfg.DISTRIBUTED: model.cuda() model = DistributedDataParallel( model, device_ids=[cfg.LOCAL_RANK], output_device=cfg.LOCAL_RANK, # this should be removed if we update BatchNorm stats broadcast_buffers=False, find_unused_parameters=True ) else: model = torch.nn.DataParallel(model, device_ids=[0]) model.cuda() optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR, betas=(0.9, 0.999), weight_decay=cfg.TRAIN.WD) # main function