예제 #1
0
    def calculate_rhos(self, Ls, Vs, normals, parameters):
        Hs, NdotHs = BrdfParametrization._calculate_NdotHs(Ls, Vs, normals)
        NdotLs = inner_product(normals, Ls)
        NdotVs = inner_product(normals, Vs)
        VdotHs = inner_product(Vs, Hs)

        p_diffuse = parameters['diffuse'].view(1, -1, 3)
        p_specular = parameters['specular']['albedo'].view(1, -1, 3)
        # somewhat non-standard, we parametrize roughness as its square root
        # this yields better resolution around zero
        p_roughness = parameters['specular']['roughness'].view(1, -1, 1)**2

        # fresnel term -- optional
        if 'eta' in parameters['specular']:
            p_eta = parameters['specular']['eta'].view(1, -1, 1)
            Fs = Fresnel(VdotHs, p_eta)
        else:
            Fs = 1.

        # microfacet distribution
        Ds = GTR(NdotHs, p_roughness)
        # Smith's shadow-masking function
        Gs = SmithG1(NdotLs, p_roughness) * SmithG1(NdotVs, p_roughness)

        denominator = 4 * np.pi * NdotLs * NdotVs
        CTs = p_specular * (Fs * Ds * Gs) / (denominator +
                                             (denominator == 0).float())

        rhos = p_diffuse / np.pi + CTs

        return rhos, NdotHs
예제 #2
0
    def implied_normal_image(self):
        """
        Returns the normal image implied by the current state of the LocationParametrization.
        
        Outputs:
            normals         HxWx3 torch.tensor containing the depth values
        """
        location_image = self.create_image(self.location_vector())  # HxWx3
        if self.mask[-1, :].sum() + self.mask[:, -1].sum() > 0:
            error(
                "The mask should not reach the bottom and right image edges.")
        down_vectors = normalize(location_image[:-1, :-1] -
                                 location_image[1:, :-1])
        right_vectors = normalize(location_image[:-1, :-1] -
                                  location_image[:-1, 1:])
        normal_image = normalize(cross_product(down_vectors, right_vectors))
        camloc = self.invRt[:3, 3:]
        # make sure the normal is the one that points towards the camera
        reorientations = inner_product(
            normal_image,
            camloc.view(1, 1, 3) - location_image[:-1, :-1]).sign()
        normal_image = normal_image * reorientations  # H-1 x W-1 x 3
        # extend the normals to the edges of the mask
        local_normal_mean = normalize(
            torch.nn.functional.conv2d(normal_image.permute(2, 0, 1)[:, None],
                                       torch.ones((1, 1, 5, 5),
                                                  device=normal_image.device),
                                       padding=2)[:, 0].permute(1, 2, 0))

        invalid_normals = norm(normal_image, keepdim=False) == 0
        replacement_mask = (
            self.mask[:-1, :-1] *
            (invalid_normals + ~self.mask[1:, :-1] + ~self.mask[:-1, 1:] > 0))
        normal_image[replacement_mask] = local_normal_mean[replacement_mask]
        # for badly connected masks, some of the pixels are currently still not filled
        # fill them just with a normal pointing roughly in the right direction
        invalid_normals = norm(normal_image) == 0
        replacement_mask = invalid_normals[:, :, 0] * self.mask[:-1, :-1]
        mean_normal = normalize(
            local_normal_mean.view(-1, 3).sum(dim=0, keepdim=True))
        normal_image[replacement_mask] = mean_normal
        normal_image = torch.cat((normal_image,
                                  torch.zeros(1,
                                              normal_image.shape[1],
                                              3,
                                              dtype=normal_image.dtype,
                                              device=normal_image.device)),
                                 dim=0)
        normal_image = torch.cat((normal_image,
                                  torch.zeros(normal_image.shape[0],
                                              1,
                                              3,
                                              dtype=normal_image.dtype,
                                              device=normal_image.device)),
                                 dim=1)

        return normal_image
예제 #3
0
    def _calculate_NdotHs(Ls, Vs, normals):
        """
        Internal function for calculation of half-vectors and their inner products
        with the surface normals.

        Inputs:
            Ls              NxLx3 torch.tensor with the directions between
                                the points and the scene lights
            Vs              Nx3 torch.tensor with the directions between
                                the points and the camera
            normals         Nx3 torch.tensor with the surface normals
        
        Outputs:
            Hs              NxLx3 torch.tensor with the normalized half-vectors between
                                viewing and light directions.
            NdotHs          NxLx1 torch.tensor containing the inner products between
                                the surface normals and the view-light half vectors
        """
        Hs = (Ls + Vs)
        Hs = normalize(Hs)
        NdotHs = inner_product(normals, Hs)
        return Hs, NdotHs
예제 #4
0
 def evaluate(self, simulations, observations, experiment_state,
              data_adapter):
     normals_from_depth = experiment_state.locations.implied_normal_vector()
     normals_estimated = experiment_state.normals.normals()
     inner_products = inner_product(normals_from_depth, normals_estimated)
     return 1 - inner_products
def closed_form_lambertian_solution(experiment_state,
                                    data_adapter,
                                    sample_radius=7,
                                    shadows_occlusions=True,
                                    verbose=True):
    """
    Calculate the closed form solution of the materials and normals, given a lambertian assumption.

    Inputs:
        experiment_state
        data_adapter
        [sample_radius]             Observations can optionally be box-blurred before extraction. Defaults to 7.
        [shadows_occlusions]        Whether to simulate obstructions in the rendering. Defaults to True.
        [verbose]                   Whether to be verbose about the progress. Defaults to True.

    Outputs:
        normals                     Nx3 torch.tensor containing the normal estimates
        albedos                     Nx3 torch.tensor containing the diffuse albedo estimates
        inliers                     NxC containing, for each point and each observation, whether it is an inlier in the RANSAC.
        residuals                   NxCx3 containing, for each point and each observation, the residual modelling error.
    """
    device = torch.device(general_settings.device_name)
    # calculates the closed-form solution given the current state and the measurements
    with torch.no_grad():
        light_intensities = []
        light_directions = []
        shadows = []
        observations = []
        occlusions = []

        training_indices_batches, training_light_infos_batches = data_adapter.get_training_info(
        )

        for batch_indices, batch_light_infos in zip(
                training_indices_batches, training_light_infos_batches):
            batch_light_intensities, batch_light_directions, batch_shadows = experiment_state.light_parametrization.get_light_intensities(
                experiment_state.locations,
                experiment_state.observation_poses.Rts(batch_indices),
                light_infos=batch_light_infos,
                calculate_shadowing=shadows_occlusions)
            light_intensities.append(batch_light_intensities.transpose(0, 1))
            light_directions.append(batch_light_directions.transpose(0, 1))
            shadows.append(batch_shadows.transpose(0, 1))

            batch_observations, batch_occlusions = experiment_state.extract_observations(
                data_adapter,
                batch_indices,
                smoothing_radius=sample_radius,
                calculate_occlusions=shadows_occlusions)
            batch_occlusions = (
                batch_occlusions +
                (batch_observations[..., 1] == OBSERVATION_OUT_OF_BOUNDS)) > 0
            observations.append(batch_observations.transpose(0, 1))
            occlusions.append(batch_occlusions.transpose(0, 1))

        light_intensities = torch.cat(light_intensities, dim=1)
        light_directions = torch.cat(light_directions, dim=1)
        shadows = torch.cat(shadows, dim=1)
        observations = torch.cat(observations, dim=1)
        occlusions = torch.cat(occlusions, dim=1)

        incident_light = (  # points x views x color channels x direction_ray
            light_intensities[:, :, :, None] * light_directions[:, :, None, :])

        shadowing_mask = (shadows == 0).float()[:, :, None]
        occlusion_mask = (occlusions == 0).float()[:, :, None]
        valid_mask = (shadowing_mask * occlusion_mask)
        invalids = (valid_mask.sum(dim=1) <= 2).float().view(-1, 1, 1)
        eye = torch.eye(3, device=invalids.device).view(1, 3, 3) * 1e8

        # normals are estimated from a gray-scale version. However, here we are aiming for maximum SNR rather than
        # a visually pleasing grayscale. As such, no fancy color-dependent scaling
        gray_light_intensities = incident_light.mean(
            dim=-2, keepdim=False) * valid_mask
        gray_observations = observations.mean(dim=-1,
                                              keepdim=True) * valid_mask

        # a RANSAC approach
        # at every iteration, we take a small subset to calculate the solution from
        # and count the number of inliers
        subset_size = min(observations.shape[1], 6)
        threshold = 1e4
        # we aim for 10 succesfull RANSAC samples (i.e. without outliers)
        # not taking into account the sample set size
        estimated_outlier_ratio = 0.20
        targeted_success_chance = 1 - 1e-4
        nr_its = 10 * np.log(1 - targeted_success_chance) / np.log(
            1 - (1 - estimated_outlier_ratio)**subset_size
        ) if subset_size < observations.shape[1] else 1
        best_inlier_counts = torch.zeros(observations.shape[0], 1, 1).to(
            observations.device) - 1
        best_inliers = torch.zeros(*observations.shape[:2],
                                   1).to(observations.device)
        ransac_loop = tqdm(range(int(nr_its))) if verbose else range(
            int(nr_its))
        for iteration in ransac_loop:
            subset = np.random.choice(observations.shape[1],
                                      size=[subset_size],
                                      replace=False)
            subset = torch.tensor(subset,
                                  dtype=torch.long,
                                  device=observations.device)
            intensities_subset = gray_light_intensities.index_select(
                dim=1, index=subset)
            observations_subset = gray_observations.index_select(dim=1,
                                                                 index=subset)
            shadowing_subset = shadowing_mask.index_select(dim=1, index=subset)
            occlusion_subset = occlusion_mask.index_select(dim=1, index=subset)
            valid_subset = shadowing_subset * occlusion_subset
            invalids_subset = (valid_subset.sum(dim=1) <= 2).float().view(
                -1, 1, 1)
            intensities_subset_pinv = (
                intensities_subset.transpose(1, 2) @ intensities_subset + eye *
                invalids_subset).inverse() @ intensities_subset.transpose(
                    1, 2)
            solution_subset = intensities_subset_pinv @ observations_subset

            residuals = gray_light_intensities @ solution_subset - gray_observations
            inliers = ((residuals.abs() < threshold) * valid_mask).float()
            inlier_counts = inliers.sum(dim=1, keepdim=True)
            better_subset = (inlier_counts > best_inlier_counts).float()

            best_inlier_counts = best_inlier_counts * (
                1 - better_subset) + inlier_counts * better_subset
            best_inliers = best_inliers * (
                1 - better_subset) + inliers * better_subset

            if verbose:
                ransac_loop.set_description(
                    desc=
                    "Lambertian closed form | RANSAC total energy | avg inliers %8.5f"
                    % best_inlier_counts.mean())

        # now calculate the normal estimate for the best inlier group
        inlier_intensities = gray_light_intensities * best_inliers
        inlier_observations = gray_observations * best_inliers
        inlier_invalids = ((valid_mask * best_inliers).sum(dim=1) <=
                           2).float().view(-1, 1, 1)
        inlier_intensities_pinv = (
            inlier_intensities.transpose(1, 2) @ inlier_intensities + eye *
            inlier_invalids).inverse() @ inlier_intensities.transpose(1, 2)
        inlier_normals = normalize(
            (inlier_intensities_pinv @ inlier_observations).view(-1, 3))

        albedos = []
        residuals = []
        inlier_mask = best_inliers * valid_mask
        for c in range(3):
            c_light_intensities = inner_product(inlier_normals.view(
                -1, 1, 3), incident_light[:, :, c]) * inlier_mask
            c_observations = observations[:, :, c, None] * inlier_mask
            c_light_intensities_pinv = (
                c_light_intensities.transpose(1, 2) @ c_light_intensities +
                inlier_invalids).inverse() @ c_light_intensities.transpose(
                    1, 2)
            c_albedo = (c_light_intensities_pinv @ c_observations)
            albedos.append(np.pi * c_albedo.view(-1, 1))
            c_residuals = (c_light_intensities @ c_albedo - c_observations)
            residuals.append(c_residuals)
        albedos = torch.cat(albedos, dim=1)
        residuals = torch.cat(residuals, dim=2)

    return inlier_normals, albedos, best_inliers, residuals
예제 #6
0
    def simulate(self,
                 observation_indices,
                 light_infos,
                 shadow_cache=None,
                 override=None,
                 calculate_shadowing=True):
        """
        Simulate the result of the current experiment state for a given set of observation indices and light infos.
        Including the shadow mask if requested, True by default.
        Shadows can be cached persistently by passing a cache dictionary to shadow_cache.
        """
        if shadow_cache is None:
            shadow_cache = {}

        surface_points = self.locations.location_vector()
        obs_Rt = self.observation_poses.Rts(observation_indices)
        shadow_cache_miss = not light_infos in shadow_cache
        light_intensities, light_directions, calculated_shadowing = self.light_parametrization.get_light_intensities(
            self.locations,
            obs_Rt,
            light_infos=light_infos,
            calculate_shadowing=calculate_shadowing and shadow_cache_miss)

        if calculate_shadowing:
            if shadow_cache_miss:
                shadow_mask = calculated_shadowing
                shadow_cache[light_infos] = shadow_mask
            else:
                shadow_mask = shadow_cache[light_infos]
        else:
            shadow_mask = None

        obs_camloc = self.observation_poses.camlocs(observation_indices)
        view_directions = normalize(
            obs_camloc.view(-1, 1, 3) - surface_points[None])
        if override is not None and "geometry" in override:
            surface_normals = NormalParametrizationFactory("hard linked")(
                self.locations).normals()
            brdf = BrdfParametrizationFactory("cook torrance F1")()
            N, device = surface_normals.shape[0], surface_normals.device
            surface_materials = {
                "diffuse": torch.empty(N, 3, device=device).fill_(0.8),
                "specular": {
                    "albedo":
                    torch.empty(N, 3, device=device).fill_(0.2),
                    "roughness":
                    torch.empty(N, 1, device=device).fill_(0.15 - 0.10 *
                                                           ("1" in override)),
                }
            }
        else:
            surface_materials = self.materials.get_brdf_parameters()
            if override is not None:
                surface_materials = dict(surface_materials)
                if override == "diffuse":
                    surface_materials['specular'] = dict(
                        surface_materials['specular'])
                    surface_materials['specular']['albedo'] = torch.zeros_like(
                        surface_materials['specular']['albedo'])
                elif override == "specular":
                    surface_materials['diffuse'] = torch.zeros_like(
                        surface_materials['diffuse'])
            surface_normals = self.normals.normals().view(1, -1, 3)
            brdf = self.brdf

        surface_rhos, NdotHs = brdf.calculate_rhos(light_directions,
                                                   view_directions,
                                                   surface_normals,
                                                   surface_materials)

        incident_light = light_intensities * inner_product(
            light_directions, surface_normals)

        incident_light.clamp_(min=0.)
        if shadow_mask is not None:
            incident_light[shadow_mask] = 0.

        simulation = surface_rhos * incident_light

        # TODO: this is where vignetting would normally come in

        simulation[incident_light <= 0] = SIMULATION_SHADOWED

        return simulation