def __init__(self,
              dir: str,
              rasterization_settings: dict,
              znear: float = 1.0,
              zfar: float = 1000.0,
              scale_min: float = 0.5,
              scale_max: float = 2.0,
              device: str = 'cuda'):
     super(ToyNeuralGraphicsDataset, self).__init__()
     device = torch.device(device)
     self.device = device
     self.scale_min = scale_min
     self.scale_max = scale_max
     self.scale_range = scale_max - scale_min
     objs = [
         os.path.join(dir, f) for f in os.listdir(dir) if f.endswith('.obj')
     ]
     self.meshes = load_objs_as_meshes(objs, device=device)
     R, T = look_at_view_transform(0, 0, 0)
     self.cameras = FoVPerspectiveCameras(R=R,
                                          T=T,
                                          znear=znear,
                                          zfar=zfar,
                                          device=device)
     self.renderer = MeshRenderer(rasterizer=MeshRasterizer(
         cameras=self.cameras,
         raster_settings=RasterizationSettings(**rasterization_settings),
     ),
                                  shader=HardFlatShader(
                                      device=device,
                                      cameras=self.cameras,
                                  ))
def project_mesh(mesh, angle):
    start = time.time()
    m = Metadata()
    R, T = look_at_view_transform(1.75,
                                  -45,
                                  angle,
                                  up=((0, 1, 0), ),
                                  at=((0, -0.25, 0), ))
    cameras = OpenGLPerspectiveCameras(device=m.device, R=R, T=T)
    raster_settings = m.raster_settings
    lights = m.lights
    renderer = MeshRenderer(rasterizer=MeshRasterizer(
        cameras=cameras, raster_settings=raster_settings),
                            shader=HardFlatShader(cameras=cameras,
                                                  device=m.device,
                                                  lights=lights))
    verts = mesh.verts_list()[0]

    # faces = meshes.faces_list()[0]

    verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
    # verts_rgb = torch.ones((len(mesh.verts_list()[0]), 1))[None]  # (1, V, 3)
    textures = Textures(verts_rgb=verts_rgb.to(m.device))

    mesh.textures = textures
    mesh.textures._num_faces_per_mesh = mesh._num_faces_per_mesh.tolist()
    mesh.textures._num_verts_per_mesh = mesh._num_verts_per_mesh.tolist()

    image = renderer(mesh)
    return image
Ejemplo n.º 3
0
  def __init__(self, opt):
    super(PtRender, self).__init__()
    self.opt = opt
    self.input_res = opt.input_res
    model_path = os.path.join(os.path.dirname(__file__), '..', opt.BFM)
    self.BFM = BFM(model_path)

    f = 1015.
    self.f = f
    c = self.input_res / 2

    K = [[f,  0., c],
         [0., f,  c],
         [0., 0., 1.]]
    self.register_buffer('K', torch.FloatTensor(K))
    self.register_buffer('inv_K', torch.inverse(self.K).unsqueeze(0))
    self.K = self.K.unsqueeze(0)
    self.set_Illu_consts()

    # for pytorch3d
    self.t = torch.zeros([1, 3], dtype=torch.float32)
    self.pt = torch.zeros([1, 2], dtype=torch.float32)
    self.fl = f * 2 / self.input_res,
    ptR = [[[-1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.]]]
    self.ptR = torch.FloatTensor(ptR)

    blend_params = BlendParams(sigma=1e-4, gamma=1e-4, background_color=(0, 0, 0))
    raster_settings = RasterizationSettings(
      image_size=self.input_res,
      blur_radius=0,
      faces_per_pixel=1,
      max_faces_per_bin=1000000,
    )

    # renderer
    cameras = SfMPerspectiveCameras(focal_length=self.fl,
                                    R=self.ptR.expand(opt.batch_size, -1, -1),
                                    device='cuda')
    rasterizer = MeshRasterizer(raster_settings=raster_settings)
    shader_rgb = HardFlatShader(blend_params=blend_params)

    self.renderer = Renderer(rasterizer, shader_rgb, SoftSilhouetteShader(), cameras)
Ejemplo n.º 4
0
def flat_renderer(img_size: tuple, device: str):

    # We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
    raster_settings = RasterizationSettings(
        image_size=img_size,
        blur_radius=0,
        faces_per_pixel=1,
        max_faces_per_bin=5000,
    )
    # We can add a point light in front of the object.
    lights = PointLights(
        device=device,
        location=[[-3, 4, -3]],
        diffuse_color=((0.5, 0.5, 0.5), ),
        specular_color=((0.5, 0.5, 0.5), ),
    )

    flat_renderer = MeshRenderer(
        rasterizer=MeshRasterizer(raster_settings=raster_settings),
        shader=HardFlatShader(device=device, lights=lights),
    )

    return flat_renderer
Ejemplo n.º 5
0
            camera_params=camera_params)

        # Define the settings for rasterization and shading.
        # Refer to raster_points.py for explanations of these parameters.
        raster_settings = RasterizationSettings(
            image_size=opt.image_size,
            blur_radius=0.0,
            faces_per_pixel=5,
            # this setting controls whether naive or coarse-to-fine rasterization is used
            bin_size=None,
            max_faces_per_bin=None  # this setting is for coarse rasterization
        )

        renderer = MeshRenderer(
            rasterizer=MeshRasterizer(raster_settings=raster_settings),
            shader=HardFlatShader(device=device))

        if opt.point_lights:
            template_lights = PointLights()
        else:
            template_lights = DirectionalLights()

        # pcl_dict = {'points': pointclouds.points_padded[0].cpu().numpy()}
        data_dict = {
            "cameras_type":
            '.'.join([
                camera_sampler.camera_type.__module__,
                camera_sampler.camera_type.__name__
            ]),
            "cameras_params":
            camera_params,
Ejemplo n.º 6
0
def main():
    args = parser.parse_args()
    torch.manual_seed(0)
    np.random.seed(0)

    # Set the cuda device
    if torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)
    else:
        device = torch.device("cpu")

    if args.net_version == 1:
        from view_net import Model
    else:
        from view_net2 import Model
    # Initialize an OpenGL perspective camera.
    cameras = OpenGLPerspectiveCameras(device=device)

    # To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
    # edges. Refer to blending.py for more details.
    # blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
    # We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
    raster_settings = RasterizationSettings(
        image_size=224,
        blur_radius=0.0,  # np.log(1. / 1e-4 - 1.) * blend_params.sigma,
        faces_per_pixel=args.faces_per_pixel,  # 100
    )

    # We can add a point light in front of the object.
    # lights = PointLights(device=device)
    # lights = DirectionalLights(device=device, direction=self.camera_position)
    phong_renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=HardFlatShader(device=device,
                              cameras=cameras)  # , lights=lights)
    )

    dataset = ClassificationData(args.data, "train", device)

    test_idx, train_idx = dataset.train_test_split(dataset.paths,
                                                   range(len(dataset)), 20,
                                                   True)
    val_idx, train_idx = dataset.train_test_split(
        [dataset.paths[i] for i in train_idx], train_idx, 20, True)

    test_set = torch.utils.data.Subset(dataset, test_idx)
    val_set = torch.utils.data.Subset(dataset, val_idx)
    train_set = torch.utils.data.Subset(dataset, train_idx)

    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size // args.nviews,
                              shuffle=False,
                              num_workers=0,
                              pin_memory=True,
                              drop_last=True,
                              collate_fn=my_collate)
    val_loader = DataLoader(val_set,
                            batch_size=args.batch_size // args.nviews,
                            shuffle=False,
                            num_workers=1,
                            pin_memory=True,
                            drop_last=True,
                            collate_fn=my_collate)
    test_loader = DataLoader(test_set,
                             batch_size=args.batch_size // args.nviews,
                             shuffle=False,
                             num_workers=1,
                             pin_memory=True,
                             drop_last=True,
                             collate_fn=my_collate)

    model = Model(device, phong_renderer, dataset.nclasses, args)
    args.num_classes = dataset.nclasses
    # Multi GPUs
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model)
    model = model.to(device=device)

    optimizer = Adam(
        [
            {
                'params': model.camera_position,
                'lr': args.learning_rate_camera
            },
            # {'params': model.light_position, 'lr': args.learning_rate_camera},
            {
                'params': model.net_1.parameters()
            },
            {
                'params': model.net_2.parameters()
            }
        ],
        lr=args.learning_rate,
        weight_decay=args.weight_decay)

    criterion = nn.CrossEntropyLoss().to(device=device)

    wandb.init(project="views_net", config=args)

    args.fname_best = 'views_net{}_model_best{}.pth.tar'.format(
        args.nviews,
        datetime.now().strftime("%d_%b_%Y_%H_%M_%S"))
    args.device = device

    args.obj_path = train_set.dataset.paths[0][0]

    train(model, criterion, optimizer, train_loader, val_loader, args)

    load_model(model, args.fname_best)

    val_statistics = validate(test_loader, model, criterion, device)

    log_summary(val_statistics, "val", test_loader.dataset.dataset.classes)
                                      j,
                                      up=((0, 1, 0), ),
                                      at=((0, 0.75, 0), ))
        cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)

        raster_settings = RasterizationSettings(
            image_size=512,
            blur_radius=0.0,
            faces_per_pixel=1,
        )

        lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])

        renderer = MeshRenderer(rasterizer=MeshRasterizer(
            cameras=cameras, raster_settings=raster_settings),
                                shader=HardFlatShader(device=device,
                                                      lights=lights))

        renderers[j] = renderer

    # print(renderers)

    for i, (mesh, file) in enumerate(tqdm(zip(meshes, files))):
        for j in [0, 90, 180, 270]:

            verts = mesh.verts_list()[0]
            # faces = meshes.faces_list()[0]

            verts_rgb = torch.ones_like(verts)[None]  # (1, V, 3)
            textures = Textures(verts_rgb=verts_rgb.to(device))

            mesh.textures = textures
Ejemplo n.º 8
0
def batch_render(
    verts,
    faces,
    faces_per_pixel=10,
    K=None,
    rot=None,
    trans=None,
    colors=None,
    color=(0.53, 0.53, 0.8),  # light_purple
    ambient_col=0.5,
    specular_col=0.2,
    diffuse_col=0.3,
    face_colors=None,
    # color = (0.74117647, 0.85882353, 0.65098039),  # light_blue
    image_sizes=None,
    out_res=512,
    bin_size=0,
    shading="soft",
    mode="rgb",
    blend_gamma=1e-4,
    blend_sigma=1e-4,
    min_depth=None,
):
    device = torch.device("cuda:0")
    K = K.to(device)
    width, height = image_sizes[0]
    out_size = int(max(image_sizes[0]))
    raster_settings = RasterizationSettings(
        image_size=out_size,
        blur_radius=0.0,
        faces_per_pixel=faces_per_pixel,
        bin_size=bin_size,
    )

    fx = K[:, 0, 0]
    fy = K[:, 1, 1]
    focals = torch.stack([fx, fy], 1)
    px = K[:, 0, 2]
    py = K[:, 1, 2]
    principal_point = torch.stack([width - px, height - py], 1)
    if rot is None:
        rot = torch.eye(3).unsqueeze(0).to(device)
    if trans is None:
        trans = torch.zeros(3).unsqueeze(0).to(device)
    cameras = PerspectiveCameras(
        device=device,
        focal_length=focals,
        principal_point=principal_point,
        image_size=[(out_size, out_size) for _ in range(len(verts))],
        R=rot,
        T=trans,
    )
    if mode == "rgb":

        lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
        lights = DirectionalLights(
            device=device,
            direction=((0.6, -0.6, -0.6), ),
            ambient_color=((ambient_col, ambient_col, ambient_col), ),
            diffuse_color=((diffuse_col, diffuse_col, diffuse_col), ),
            specular_color=((specular_col, specular_col, specular_col), ),
        )
        if shading == "soft":
            blend_params = BlendParams(sigma=blend_sigma, gamma=blend_gamma)
            shader = SoftPhongShader(device=device,
                                     cameras=cameras,
                                     lights=lights,
                                     blend_params=blend_params)
        elif shading == "hard":
            shader = HardPhongShader(device=device,
                                     cameras=cameras,
                                     lights=lights)
        elif shading == "flat":
            shader = HardFlatShader(device=device,
                                    cameras=cameras,
                                    lights=lights)
        else:
            raise ValueError(
                f"Shading {shading} for mode rgb not in [sort|hard]")
    elif mode == "silh":
        blend_params = BlendParams(sigma=blend_sigma, gamma=blend_gamma)
        shader = SoftSilhouetteShader(blend_params=blend_params)
    elif shading == "faceidx":
        shader = FaceIdxShader()
    elif (mode == "facecolor") and (shading == "hard"):
        shader = FaceColorShader(face_colors=face_colors)
    elif (mode == "facecolor") and (shading == "soft"):
        shader = SoftFaceColorShader(face_colors=face_colors,
                                     blend_gamma=blend_gamma,
                                     blend_sigma=blend_sigma)
    else:
        raise ValueError(
            f"Unhandled mode {mode} and shading {shading} combination")

    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(cameras=cameras,
                                  raster_settings=raster_settings),
        shader=shader,
    )
    if min_depth is not None:
        verts = torch.cat([verts[:, :, :2], verts[:, :, 2:].clamp(min_depth)],
                          2)
    if mode == "rgb":
        if colors is None:
            colors = get_colors(verts, color)
        tex = textures.TexturesVertex(verts_features=colors)

        meshes = Meshes(verts=verts, faces=faces, textures=tex)
    elif mode in ["silh", "facecolor"]:
        meshes = Meshes(verts=verts, faces=faces)
    else:
        raise ValueError(f"Render mode {mode} not in [rgb|silh]")

    square_images = renderer(meshes, cameras=cameras)
    height_off = int(width - height)
    # from matplotlib import pyplot as plt
    # plt.imshow(square_images.cpu()[0, :, :, 0])
    # plt.savefig("tmp.png")
    images = torch.flip(square_images, (1, 2))[:, height_off:]
    return images