Exemple #1
0
def test():
    with torch.no_grad():
        for i in trange(len(perspectives)):
            R, T = perspectives[i]
            cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
            if isinstance(bsdf, ComposeSpatialVarying):
                got, _ = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=BasisBRDF(bsdf),
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    background=0,
                )
                f, axes = plt.subplots(r, c)
                f.set_figheight(10)
                f.set_figwidth(10)
                got = got.unsqueeze(-1).expand(got.shape + (3, ))
                for k, img in enumerate(got.split(1, dim=-2)):
                    img = img.squeeze(-2).cpu().numpy()
                    axes[unroll(k, c)].imshow(img)
                    axes[unroll(k, c)].axis('off')
                plt.subplots_adjust(wspace=0, hspace=0)
                plt.savefig(f"outputs/weights_{i:04}.png", bbox_inches="tight")
                plt.clf()
                plt.close(f)
            #normals, _ = pt.pathtrace(
            #  shape,
            #  size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=Debug(),
            #  cameras=cameras, lights=lights, device=device, silent=True,
            #  background=0,
            #)
            #save_image(f"outputs/normals_{i:04}.png", normals)
            #illum, _ = pt.pathtrace(
            #  shape,
            #  size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=Illumination(),
            #  cameras=cameras, lights=lights, device=device, silent=True,
            #)
            #save_image(f"outputs/illum_{i:04}.png", illum)

            if (integrator is not None) and False:
                got, _ = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=integrator,
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    background=0,
                )
                save_image(f"outputs/got_{i:04}.png", got)
Exemple #2
0
def test():
  with torch.no_grad():
    for i, (c2w, lp) in enumerate(zip(tqdm(cam_to_worlds), light_locs)):
      exp = exp_imgs[i].clamp(min=0, max=1)
      cameras = NeRFCamera(cam_to_world=c2w.unsqueeze(0), focal=focal, device=device)
      lights = PointLights(intensity=[1,1,1], location=lp[None,...], scale=100, device=device)

      if isinstance(bsdf, ComposeSpatialVarying):
        got = pt.pathtrace(
          shape,
          size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=BasisBRDF(bsdf),
          cameras=cameras, lights=lights, device=device, silent=True,
        )[0].clamp(min=0, max=1)
        f, axes = plt.subplots(r, c)
        f.set_figheight(10)
        f.set_figwidth(10)
        got = got.unsqueeze(-1).expand(got.shape + (3,))
        wm_0 = None
        wm_1 = None
        for k, img in enumerate(got.split(1, dim=-2)):
          img = img.squeeze(-2).cpu().numpy()
          axes[unroll(k, c)].imshow(img)
          axes[unroll(k, c)].axis('off')
          if k == 0: wm_0 = img
          if k == 1: wm_1 = img
        plt.subplots_adjust(wspace=0, hspace=0)
        plt.savefig(f"outputs/nerv_weights_{i:04}.png", bbox_inches="tight")
        plt.clf()
        plt.close(f)

        # render first two and normalize for easy figure
        f, axes = plt.subplots(2)
        f.set_figheight(10)
        f.set_figwidth(10)
        total = wm_0 + wm_1
        wm_0 = wm_0/total
        wm_1 = wm_1/total
        axes[0].imshow(wm_0)
        axes[0].axis('off')
        axes[1].imshow(wm_1)
        axes[1].axis('off')
        plt.subplots_adjust(wspace=0, hspace=0)
        plt.savefig(f"outputs/nerv_wm01_{i:04}.png", bbox_inches="tight")
        plt.clf()
        plt.close(f)
      normals = pt.pathtrace(
        shape,
        size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=Debug(),
        cameras=cameras, lights=lights, device=device, silent=True,
      )[0]
      save_image(f"outputs/nerv_normals_{i:04}.png", normals)

      if (integrator is not None) and False:
        got = pt.pathtrace(
          shape,
          size=SIZE, chunk_size=SIZE, bundle_size=1, bsdf=bsdf, integrator=integrator,
          cameras=cameras, lights=lights, device=device, silent=True,
        )[0].clamp(min=0, max=1)
        save_image(f"outputs/got_{i:04}.png", got)
def sphere_examples(bsdf, device="cuda", size=256, chunk_size=128, scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    out = []

    for basis in bsdf.bsdfs:
        expected = pathtrace(
            sphere,
            cameras=cameras,
            lights=lights,
            chunk_size=chunk_size,
            size=size,
            bsdf=basis,
            integrator=integrators.Direct(),
            device=device,
            silent=True,
        )[0]
        out.append(expected)
    return out
def sphere_render_bsdf(bsdf,
                       integrator=None,
                       device="cuda",
                       size=256,
                       chunk_size=128,
                       scale=100):
    from pytorch3d.pathtracer.shapes import Sphere
    from pytorch3d.pathtracer.bsdf import Diffuse
    from pytorch3d.pathtracer import pathtrace
    from pytorch3d.renderer import (
        look_at_view_transform,
        OpenGLPerspectiveCameras,
        PointLights,
    )
    import pytorch3d.pathtracer.integrators as integrators
    sphere = Sphere([0, 0, 0], 1, device=device)
    R, T = look_at_view_transform(dist=2., elev=0, azim=0)
    cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
    lights = PointLights(device=device, location=[[0., 1., 4.]], scale=scale)
    if integrator is None:
        integrator = integrators.Direct()
    return pathtrace(
        sphere,
        cameras=cameras,
        lights=lights,
        chunk_size=chunk_size,
        size=size,
        bsdf=bsdf,
        integrator=integrator,
        device=device,
        silent=True,
    )[0]
Exemple #5
0
def run_tests(
  num_samples=32,
):
  l1_losses = []
  l2_losses = []
  psnr_losses = []
  gots = []
  num=100
  with torch.no_grad():
    for i, (c2w, lp) in enumerate(zip(tqdm(cam_to_worlds[:num]), light_locs)):
      exp = exp_imgs[i].clamp(min=0, max=1)
      cameras = NeRFCamera(cam_to_world=c2w.unsqueeze(0), focal=focal, device=device)
      lights = PointLights(intensity=[1,1,1], location=lp[None,...], scale=300, device=device)
      got = None
      for _ in range(num_samples):
        sample = pt.pathtrace(
          density_field,
          size=SIZE, chunk_size=min(SIZE, 100), bundle_size=1, bsdf=learned_bsdf,
          integrator=integrator,
          # 0 is for comparison, 1 is for display
          background=0,
          cameras=cameras, lights=lights, device=device, silent=True,
          w_isect=True,
        )[0]
        if got is None: got = sample
        else: got += sample
      got /= num_samples
      got = got.clamp(min=0, max=1)
      save_plot(
        exp ** (1/2.2), got ** (1/2.2),
        f"outputs/path_nerv_armadillo_{i:03}.png",
      )
      l1_losses.append(F.l1_loss(exp,got).item())
      mse = F.mse_loss(exp,got)
      l2_losses.append(mse.item())
      psnr = mse2psnr(mse).item()
      psnr_losses.append(psnr)
      gots.append(got)
  print("Avg l1 loss", np.mean(l1_losses))
  print("Avg l2 loss", np.mean(l2_losses))
  print("Avg PSNR loss", np.mean(psnr_losses))
  with torch.no_grad():
    gots = torch.stack(gots, dim=0).permute(0, 3, 1, 2)
    exps = torch.stack(exp_imgs[:num], dim=0).permute(0, 3, 1, 2)
    # takes a lot of memory
    torch.cuda.empty_cache()
    ssim_loss = ms_ssim(gots, exps, data_range=1, size_average=True).item()
    print("MS-SSIM loss", ssim_loss)

    ssim_loss = ssim(gots, exps, data_range=1, size_average=True).item()
    print("SSIM loss", ssim_loss)
  return
Exemple #6
0
def test():
    with torch.no_grad():
        for i, (pose, intrinsic) in enumerate(zip(tqdm(poses), intrinsics)):
            cameras = DTUCamera(pose=pose[None, ...],
                                intrinsic=intrinsic[None, ...],
                                device=device)
            if isinstance(bsdf, ComposeSpatialVarying):
                got, _ = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=BasisBRDF(bsdf),
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                )
                f, axes = plt.subplots(r, c)
                f.set_figheight(10)
                f.set_figwidth(10)
                got = got.unsqueeze(-1).expand(got.shape + (3, ))
                for k, img in enumerate(got.split(1, dim=-2)):
                    img = img.squeeze(-2).cpu().numpy()
                    axes[unroll(k, c)].imshow(img)
                    axes[unroll(k, c)].axis('off')
                plt.subplots_adjust(wspace=0, hspace=0)
                plt.savefig(f"outputs/weights_{i:04}.png", bbox_inches="tight")
                plt.clf()
                plt.close(f)
            normals, _ = pt.pathtrace(
                shape,
                size=SIZE,
                chunk_size=SIZE,
                bundle_size=1,
                bsdf=bsdf,
                integrator=Debug(),
                cameras=cameras,
                lights=lights,
                device=device,
                silent=True,
                background=1,
            )
            save_image(f"outputs/normals_{i:04}.png", normals)

            if (integrator is not None):
                got = pt.pathtrace(
                    shape,
                    size=SIZE,
                    chunk_size=SIZE,
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=integrator,
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    background=1,
                )[0].clamp(min=0, max=1)
                save_image(f"outputs/got_{i:04}.png", got)
      opt.step()
      loss = loss.detach().item()
      losses.append(loss)
      update(loss, i)

      #if ((i % ckpt_freq) == 0) and (i != 0): save_fn(i)

      if (i % valid_freq) == 0:
        with torch.no_grad():
          R = R[0].unsqueeze(0)
          T = T[0].unsqueeze(0)
          cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
          light_update(cameras, lights)
          validate, _ = pt.pathtrace(
            shape, size=size, chunk_size=min(size, max_valid_size),
            bundle_size=1,
            bsdf=bsdf, integrator=integrator,
            cameras=cameras, lights=lights, device=device, silent=True,
          )
          save_image(valid_name_fn(i), validate)
    return losses

  losses = train_sample(
    nerfle,
    integrator=integrator,
    lights=lights,
    Rs=Rs, Ts=Ts,
    exp_imgs=exp_imgs,
    opt=opt,
    size=SIZE,
    crop_size=16,
    save_freq=20000,
def train_gan(
    nerf,
    nerf_optim,
    disc,
    disc_optim,
    dataloader,
    batch_size=3,
    iters=80,
    device="cuda",
    valid_freq=250,
):
    integrator = NeRFReproduce()
    with trange(iters * len(dataloader)) as t:
        for j in range(iters):
            for i, (data, _tgt) in enumerate(dataloader):
                if data.shape[0] != batch_size:
                    t.update()
                    continue
                data = data.to(device)

                # train discriminator
                # real data:
                disc.zero_grad()

                pred = disc(data)
                label = torch.ones(batch_size, device=device)
                real_loss = F.binary_cross_entropy_with_logits(pred, label)
                real_loss.backward()
                real_loss = real_loss.item()
                # fake data:
                nerf.assign_latent(
                    torch.randn(batch_size, latent_size, device=device))
                v = random.sample(range(Rs.shape[0]), batch_size)
                R, T = Rs[v], Ts[v]
                cameras = OpenGLPerspectiveCameras(R=R, T=T, device=device)
                fake = pt.pathtrace(nerf,
                                    size=64,
                                    chunk_size=8,
                                    bundle_size=1,
                                    integrator=integrator,
                                    cameras=cameras,
                                    background=1,
                                    bsdf=None,
                                    lights=None,
                                    silent=True,
                                    with_noise=False,
                                    device=device)[0].permute(0, 3, 1, 2)

                pred = disc(fake.detach().clone())
                label = torch.zeros(batch_size, device=device)
                fake_loss = F.binary_cross_entropy_with_logits(pred, label)
                fake_loss.backward()
                fake_loss = fake_loss.item()

                disc_optim.step()

                # train generator/nerf
                nerf.zero_grad()
                pred = disc(fake)
                gen_loss = F.binary_cross_entropy_with_logits(
                    pred, torch.ones_like(label))
                gen_loss = gen_loss
                gen_loss.backward()
                gen_loss = gen_loss.item()
                nerf_optim.step()

                t.set_postfix(Dreal=f"{real_loss:.05}",
                              Dfake=f"{fake_loss:.05}",
                              G=f"{gen_loss:.05}")
                t.update()

                ij = i + j * len(dataloader)
                if ij % valid_freq == 0:
                    save_image(f"outputs/gan_valid_{ij:05}.png",
                               fake[0].permute(1, 2, 0))
                    #save_image(f"outputs/ref_{ij:05}.png", data[0].permute(1,2,0))
                ...
            ...
        ...
    ...
Exemple #9
0
def test_nerv_ptl(
    density_field,
    bsdf,
    integrator,
    light_locs,
    cam_to_worlds,
    focal,
    light_weights,
    exp_imgs,
    size,
    name_fn=lambda i: f"outputs/test_{i:03}.png",
    w_isect=True,
):
    device = exp_imgs[0].device
    l1_losses = []
    l2_losses = []
    psnr_losses = []
    gots = []
    with torch.no_grad():
        for i, (c2w, lp) in enumerate(zip(tqdm(cam_to_worlds), light_locs)):
            exp = exp_imgs[i].clamp(min=0, max=1)
            cameras = NeRFCamera(cam_to_world=c2w.unsqueeze(0),
                                 focal=focal,
                                 device=device)
            got = None
            for j, lw in enumerate(light_weights[i]):
                scale = 100 if j == 0 else o_i
                lights = PointLights(intensity=lw[:3],
                                     location=lp[j].unsqueeze(0),
                                     scale=scale,
                                     device=device)
                sample = pt.pathtrace(
                    density_field,
                    size=size,
                    chunk_size=min(size, 100),
                    bundle_size=1,
                    bsdf=bsdf,
                    integrator=integrator,
                    # 0 is for comparison, 1 is for display
                    background=0,
                    cameras=cameras,
                    lights=lights,
                    device=device,
                    silent=True,
                    w_isect=w_isect,
                )[0].clamp(min=0, max=1)
                if got is None: got = sample
                else: got = got + sample
            got = got.clip(min=0, max=1)
            save_plot(exp**(1 / 2.2), got**(1 / 2.2), name_fn(i))
            l1_losses.append(F.l1_loss(exp, got).item())
            mse = F.mse_loss(exp, got)
            l2_losses.append(mse.item())
            psnr = mse2psnr(mse).item()
            psnr_losses.append(psnr)
            gots.append(got)
    print("Avg l1 loss", np.mean(l1_losses))
    print("Avg l2 loss", np.mean(l2_losses))
    print("Avg PSNR loss", np.mean(psnr_losses))
    with torch.no_grad():
        # takes a lot of memory
        gots = torch.stack(gots, dim=0).permute(0, 3, 1, 2)
        tm_gots = gots / (1 + gots)
        exps = torch.stack(exp_imgs, dim=0).permute(0, 3, 1, 2)
        tm_exps = exps / (1 + exps)
        torch.cuda.empty_cache()
        ssim_loss = ms_ssim(tm_gots, tm_exps, data_range=1,
                            size_average=True).item()
        print("MS-SSIM loss", ssim_loss)

        ssim_loss = ssim(tm_gots, tm_exps, data_range=1,
                         size_average=True).item()
        print("SSIM loss", ssim_loss)
    return