コード例 #1
0
    def test_to_angle(self):
        """Verify that the output of the to_angle(unit_vectors) function is correct on common inputs.
           Function: reflector_problem.raytracing.utils.to_angle(unit_vectors)
        """
        unit_vectors = torch.Tensor([
            [1., 0.],
            [3**(1 / 2) / 2, 1 / 2],
            [2**(1 / 2) / 2, 2**(1 / 2) / 2],
            [1 / 2, 3**(1 / 2) / 2],
            [0., 1.],
            [-1 / 2, 3**(1 / 2) / 2],
            [-2**(1 / 2) / 2, 2**(1 / 2) / 2],
            [-3**(1 / 2) / 2, 1 / 2],
            [-1., 0.],
            [-3**(1 / 2) / 2, -1 / 2],
            [-2**(1 / 2) / 2, -2**(1 / 2) / 2],
            [-1 / 2, -3**(1 / 2) / 2],
            [0., -1.],
            [1 / 2, -3**(1 / 2) / 2],
            [2**(1 / 2) / 2, -2**(1 / 2) / 2],
            [3**(1 / 2) / 2, -1 / 2],
        ])
        angles = torch.Tensor([
            0., pi / 6, pi / 4, pi / 3, pi / 2, 2 * pi / 3, 3 * pi / 4,
            5 * pi / 6, pi, 7 * pi / 6, 5 * pi / 4, 4 * pi / 3, 3 * pi / 2,
            5 * pi / 3, 7 * pi / 4, 11 * pi / 6
        ])

        output = utils.to_angle(unit_vectors)

        self.assertTrue(torch.allclose(output, angles))
        self.assertEqual(angles.dtype, output.dtype)
コード例 #2
0
    def optim_closure():
        optim.zero_grad()
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target_log.softmax(dim=-1).view(-1).to(
                input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)

        cost = loss(weights, to_angle(rays), extended_source_target,
                    extended_angular_support)
        cost = cost / cost_normalizer

        cost.backward()

        # Ensures the steps is not already saved in the history - in the case of evaluations steps for LBFGS
        if not optim.state[optim._params[0]]["n_iter"] in history.step_numbers:
            history.save_step(
                optim.state[optim._params[0]]["n_iter"],
                modified_target=modified_target_log.softmax(
                    dim=1).detach().cpu().clone(),
                modified_angular_support=modified_angular_support.detach().cpu(
                ).clone(),
                rays=rays.detach().cpu().clone(),
                weights=weights.detach().cpu().clone(),
                cost=cost.detach().cpu().clone(),
                lr=lr)

        return cost
コード例 #3
0
def design_reflector_golds(extended_source_target,
                           extended_angular_support,
                           initial_target,
                           initial_angular_support,
                           input_measure_vector,
                           input_angular_support,
                           raytracer,
                           binning,
                           history,
                           n_steps=20,
                           lr=1.,
                           lr_multiplier=1.):
    history.save_vars(optimization="golds")
    history.save_vars(raytracer=str(raytracer))
    history.save_vars(binning=str(binning))

    modified_target = initial_target.clone()
    modified_angular_support = initial_angular_support.clone()

    input_measure_vector = input_measure_vector / input_measure_vector.sum()
    input_angular_support = input_angular_support.to(
        input_measure_vector.device)
    modified_target = modified_target.to(input_angular_support.device)
    modified_angular_support = modified_angular_support.to(
        input_angular_support.device)

    for i in range(n_steps):
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target.view(-1).to(input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)
        centers, dist = binning(to_angle(rays), weights)

        modified_target = modified_target * \
            (extended_source_target /
             (dist.view(*modified_target.shape)))**(lr)

        modified_target = modified_target / modified_target.sum(dim=-1)

        history.save_step(
            i,
            modified_target=modified_target.detach().cpu().clone(),
            modified_angular_support=modified_angular_support.detach().cpu(
            ).clone(),
            rays=rays.detach().cpu().clone(),
            weights=weights.detach().cpu().clone(),
            lr=lr)

        lr = lr * lr_multiplier

    return modified_target, modified_angular_support, history
コード例 #4
0
    def test_to_angle_batch(self):
        """Verify the shapes of outputs of the function to_angle() function with multiple batch size.
           Function: reflector_problem.raytracing.utils.to_angle(unit_vectors)
        """
        B1, B2, B3, B4 = 4, 5, 6, 7  # Batch dimensions
        xs = torch.linspace(-1, 1, B1 * B2 * B3 * B4).view(B1, B2, B3, B4)
        ys = torch.sqrt(1 - xs**2)
        unit_vectors = torch.stack([xs, ys], dim=-1)

        angles = utils.to_angle(unit_vectors)

        self.assertEqual(angles.shape, (B1, B2, B3, B4))
        self.assertEqual(angles.dtype, unit_vectors.dtype)
コード例 #5
0
def design_reflector_lbfgs(extended_source_target,
                           extended_angular_support,
                           initial_target,
                           initial_angular_support,
                           input_measure_vector,
                           input_angular_support,
                           raytracer,
                           loss,
                           history,
                           cost_normalization=True,
                           n_steps=20,
                           n_eval_steps=20,
                           line_search="strong_wolfe",
                           lr=1.):
    history.save_vars(optimization="lbfgs")
    history.save_vars(raytracer=str(raytracer))
    history.save_vars(loss=str(loss))

    modified_target = initial_target.clone()
    modified_angular_support = initial_angular_support.clone()

    modified_target_log = modified_target.log() + modified_target.logsumexp(
        dim=-1, keepdim=False)
    modified_target_log.requires_grad_(True)
    optim = torch.optim.LBFGS([modified_target_log],
                              lr=lr,
                              max_iter=n_steps,
                              max_eval=n_eval_steps,
                              tolerance_change=1e-13,
                              tolerance_grad=1e-13,
                              line_search_fn=line_search)

    input_angular_support = input_angular_support.to(
        input_measure_vector.device)
    modified_target_log = modified_target_log.to(input_angular_support.device)
    modified_angular_support = modified_angular_support.to(
        input_angular_support.device)

    cost_normalizer = 1.

    if cost_normalization:
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target_log.softmax(dim=-1).view(-1).to(
                input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)
        cost_normalizer = loss(weights, to_angle(rays), extended_source_target,
                               extended_angular_support)
        cost_normalizer = cost_normalizer.detach()

    def optim_closure():
        optim.zero_grad()
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target_log.softmax(dim=-1).view(-1).to(
                input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)

        cost = loss(weights, to_angle(rays), extended_source_target,
                    extended_angular_support)
        cost = cost / cost_normalizer

        cost.backward()

        # Ensures the steps is not already saved in the history - in the case of evaluations steps for LBFGS
        if not optim.state[optim._params[0]]["n_iter"] in history.step_numbers:
            history.save_step(
                optim.state[optim._params[0]]["n_iter"],
                modified_target=modified_target_log.softmax(
                    dim=1).detach().cpu().clone(),
                modified_angular_support=modified_angular_support.detach().cpu(
                ).clone(),
                rays=rays.detach().cpu().clone(),
                weights=weights.detach().cpu().clone(),
                cost=cost.detach().cpu().clone(),
                lr=lr)

        return cost

    optim.step(optim_closure)

    return modified_target_log.softmax(
        dim=-1), modified_angular_support, history
コード例 #6
0
def design_reflector_gd(extended_source_target,
                        extended_angular_support,
                        initial_target,
                        initial_angular_support,
                        input_measure_vector,
                        input_angular_support,
                        raytracer,
                        loss,
                        optimizer,
                        history,
                        cost_normalization=True,
                        n_steps=20,
                        lr=1.,
                        lr_multiplier=1.):
    history.save_vars(optimization="gradient_descent")
    history.save_vars(raytracer=str(raytracer))
    history.save_vars(loss=str(loss))

    modified_target = initial_target.clone()
    modified_angular_support = initial_angular_support.clone()

    modified_target_log = modified_target.log() + modified_target.logsumexp(
        dim=-1, keepdim=False)
    modified_target_log.requires_grad_(True)
    optim = optimizer([modified_target_log], lr=lr)
    scheduler = torch.optim.lr_scheduler.MultiplicativeLR(
        optim, lr_lambda=lambda step: lr_multiplier)

    input_angular_support = input_angular_support.to(
        input_measure_vector.device)
    modified_target_log = modified_target_log.to(input_angular_support.device)
    modified_angular_support = modified_angular_support.to(
        input_angular_support.device)

    cost_normalizer = 1.

    if cost_normalization:
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target_log.softmax(dim=-1).view(-1).to(
                input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)
        cost_normalizer = loss(weights, to_angle(rays), extended_source_target,
                               extended_angular_support)
        cost_normalizer = cost_normalizer.detach()

    for i in range(n_steps):
        optim.zero_grad()
        sinkhorn_result = compute_point_source_reflector(
            input_measure_vector.view(-1).to(input_measure_vector.device),
            input_angular_support.view(-1, 1),
            modified_target_log.softmax(dim=-1).view(-1).to(
                input_measure_vector.device),
            modified_angular_support.view(-1, 1))

        rays, weights = raytracer.raytrace_reflector(sinkhorn_result)

        cost = loss(weights, to_angle(rays), extended_source_target,
                    extended_angular_support)
        cost = cost / cost_normalizer

        cost.backward()

        optim.step()

        history.save_step(i,
                          modified_target=modified_target_log.softmax(
                              dim=1).detach().cpu().clone(),
                          modified_angular_support=modified_angular_support.
                          detach().cpu().clone(),
                          rays=rays.detach().cpu().clone(),
                          weights=weights.detach().cpu().clone(),
                          cost=cost.detach().cpu().clone(),
                          lr=scheduler.get_lr())

        scheduler.step()

    return modified_target_log.softmax(
        dim=-1), modified_angular_support, history
コード例 #7
0
def design_reflector_gd_direct(
        extended_source_target,
        extended_angular_support,

        initial_reflector_potential, 
        initial_reflector_potential_gradients,
        initial_reflector_angular_support,

        raytracer,
        loss,
        optimizer,
        history,
        cost_normalization=True,
        n_steps=20,
        lr=1.,
        lr_multiplier=1.):
    history.save_vars(optimization = "gradient_descent")
    history.save_vars(raytracer = str(raytracer))
    history.save_vars(loss = str(loss))
    
    modified_potential = initial_reflector_potential.clone()
    modified_potential_gradients = initial_reflector_potential_gradients.clone()
    modified_angular_support = initial_reflector_angular_support.clone()

    modified_potential.requires_grad_(True)
    modified_potential_gradients.requires_grad_(True)

    optim = optimizer([modified_potential, modified_potential_gradients],
                      lr=lr)
    scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optim, lr_lambda=lambda step:lr_multiplier)

    cost_normalizer = 1.
 
    if cost_normalization:
        rays, weights = raytracer.raytrace_reflector_raw(modified_angular_support, modified_potential, modified_potential_gradients)
        cost_normalizer = loss(weights, to_angle(rays), extended_source_target, extended_angular_support)
        cost_normalizer = cost_normalizer.detach()

    for i in range(n_steps):
        optim.zero_grad()
        rays, weights = raytracer.raytrace_reflector_raw(modified_angular_support, modified_potential, modified_potential_gradients)

        cost = loss(weights, to_angle(rays),
                    extended_source_target, extended_angular_support)
        cost = cost / cost_normalizer

        cost.backward()

        optim.step()

        history.save_step(i,
                    modified_potential=modified_potential.detach().cpu().clone(),
                    modified_potential_gradients=modified_potential_gradients.detach().cpu().clone(),
                    rays=rays.detach().cpu().clone(),
                    weights=weights.detach().cpu().clone(),
                    cost=cost.detach().cpu().clone(),
                    lr=scheduler.get_lr())

        scheduler.step()


    return modified_potential, modified_potential_gradients, modified_angular_support, history