Beispiel #1
0
def setup_inversion():
    model_path = sys.argv[1]
    config_path = sys.argv[2]
    cfg, G, lidar, device = utils.setup(model_path,
                                        config_path,
                                        ema=True,
                                        fix_noise=True)
    if osp.exists(cfg.dataset.root):
        dataset = define_dataset(cfg.dataset, phase="test")
    else:
        dataset = None
    return cfg, G, lidar, device, dataset
Beispiel #2
0
    def __init__(self, cfg, local_cfg):
        self.cfg = cfg
        self.local_cfg = local_cfg
        self.device = torch.device(self.local_cfg.gpu)

        # setup models
        self.cfg.model.gen.shape = self.cfg.dataset.shape
        self.cfg.model.dis.shape = self.cfg.dataset.shape
        self.G = define_G(self.cfg)
        self.D = define_D(self.cfg)
        self.G_ema = define_G(self.cfg)
        self.G_ema.eval()
        ema_inplace(self.G_ema, self.G, 0.0)
        self.A = DiffAugment(policy=self.cfg.solver.augment)
        self.lidar = LiDAR(
            num_ring=cfg.dataset.shape[0],
            num_points=cfg.dataset.shape[1],
            min_depth=cfg.dataset.min_depth,
            max_depth=cfg.dataset.max_depth,
            angle_file=osp.join(cfg.dataset.root, "angles.pt"),
        )
        self.lidar.eval()

        self.G.to(self.device)
        self.D.to(self.device)
        self.G_ema.to(self.device)
        self.A.to(self.device)
        self.lidar.to(self.device)

        self.G = DDP(self.G,
                     device_ids=[self.local_cfg.gpu],
                     broadcast_buffers=False)
        self.D = DDP(self.D,
                     device_ids=[self.local_cfg.gpu],
                     broadcast_buffers=False)

        if dist.get_rank() == 0:
            print("minibatch size per gpu:", self.local_cfg.batch_size)
            print("number of gradient accumulation:",
                  self.cfg.solver.num_accumulation)

        self.ema_decay = 0.5**(self.cfg.solver.batch_size /
                               (self.cfg.solver.smoothing_kimg * 1000))

        # training dataset
        self.dataset = define_dataset(self.cfg.dataset, phase="train")
        self.loader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=self.local_cfg.batch_size,
            shuffle=False,
            num_workers=self.local_cfg.num_workers,
            pin_memory=self.cfg.pin_memory,
            sampler=torch.utils.data.distributed.DistributedSampler(
                self.dataset),
            drop_last=True,
        )
        self.loader = cycle(self.loader)

        # validation dataset
        self.val_dataset = define_dataset(self.cfg.dataset, phase="val")
        self.val_loader = torch.utils.data.DataLoader(
            self.val_dataset,
            batch_size=self.local_cfg.batch_size,
            shuffle=True,
            num_workers=self.local_cfg.num_workers,
            pin_memory=self.cfg.pin_memory,
            drop_last=False,
        )

        # loss criterion
        self.loss_weight = dict(self.cfg.solver.loss)
        self.criterion = {}
        self.criterion["gan"] = GANLoss(self.cfg.solver.gan_mode).to(
            self.device)
        if "gp" in self.loss_weight and self.loss_weight["gp"] > 0.0:
            self.criterion["gp"] = True
        if "pl" in self.loss_weight and self.loss_weight["pl"] > 0.0:
            self.criterion["pl"] = True
            self.pl_ema = torch.tensor(0.0).to(self.device)
        if dist.get_rank() == 0:
            print("loss: {}".format(tuple(self.criterion.keys())))

        # optimizer
        self.optim_G = optim.Adam(
            params=self.G.parameters(),
            lr=self.cfg.solver.lr.alpha.gen,
            betas=(self.cfg.solver.lr.beta1, self.cfg.solver.lr.beta2),
        )
        self.optim_D = optim.Adam(
            params=self.D.parameters(),
            lr=self.cfg.solver.lr.alpha.dis,
            betas=(self.cfg.solver.lr.beta1, self.cfg.solver.lr.beta2),
        )

        # automatic mixed precision
        self.enable_amp = cfg.enable_amp
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.enable_amp)
        if dist.get_rank() == 0 and self.enable_amp:
            print("amp enabled")

        # resume from checkpoints
        self.start_iteration = 0
        if self.cfg.resume is not None:
            state_dict = torch.load(self.cfg.resume, map_location="cpu")
            self.start_iteration = state_dict[
                "step"] // self.cfg.solver.batch_size
            self.G.module.load_state_dict(state_dict["G"])
            self.D.module.load_state_dict(state_dict["D"])
            self.G_ema.load_state_dict(state_dict["G_ema"])
            self.optim_G.load_state_dict(state_dict["optim_G"])
            self.optim_D.load_state_dict(state_dict["optim_D"])
            if "pl" in self.criterion:
                self.criterion["pl"].pl_ema = state_dict["pl_ema"].to(
                    self.device)

        # for visual validation
        self.fixed_noise = torch.randn(self.local_cfg.batch_size,
                                       cfg.model.gen.in_ch,
                                       device=self.device)
Beispiel #3
0
        fix_noise=True,
    )

    utils.set_requires_grad(G, False)
    G = DP(G)

    # hyperparameters
    num_step = 1000
    perturb_latent = True
    noise_ratio = 0.75
    noise_sigma = 1.0
    lr_rampup_ratio = 0.05
    lr_rampdown_ratio = 0.25

    # prepare reference
    dataset = define_dataset(cfg.dataset, phase="test")
    loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=cfg.num_workers,
        drop_last=False,
    )

    # -------------------------------------------------------------------
    # utilities
    # -------------------------------------------------------------------
    def preprocess_reals(raw_batch):
        xyz = raw_batch["xyz"].to(device)
        depth = raw_batch["depth"].to(device)
        mask = raw_batch["mask"].to(device).float()
Beispiel #4
0
        points = xyz.flatten(2).transpose(1, 2)  # (B,N,3)
        points = downsample_point_clouds(points, args.num_points)
        return points

    # -------------------------------------------------------------------
    # real data
    # -------------------------------------------------------------------
    reals = {}
    for subset in ("train", "test"):
        cache_path = f"data/cache_{cfg.dataset.name}_{subset}_{args.num_points}.pt"
        if osp.exists(cache_path):
            reals[subset] = torch.load(cache_path, map_location="cpu")
            print("loaded:", cache_path)
        else:
            loader = torch.utils.data.DataLoader(
                define_dataset(cfg.dataset, phase=subset),
                batch_size=cfg.solver.batch_size,
                shuffle=False,
                num_workers=cfg.num_workers,
                drop_last=False,
            )
            reals[subset] = defaultdict(list)
            for data in tqdm(
                    loader,
                    desc=f"real data ({subset})",
                    dynamic_ncols=True,
                    leave=False,
            ):
                inv, mask, points = preprocess_reals(data)
                reals[subset]["2d"].append(inv)
                points = downsample_point_clouds(points, k=args.num_points)