示例#1
0
def test_prod_mul_bad_node_raise_type_error():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    prod1 = optplan.Product(functions=[var1, var2])

    with pytest.raises(TypeError, match="multiply a node"):
        prod1 * optplan.SimulationSpace()
示例#2
0
def test_prod_two_funs():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    prod1 = var1 * var2

    assert isinstance(prod1, optplan.Product)
    assert prod1.functions == [var1, var2]
示例#3
0
def test_sum_add_bad_node_raise_type_error():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    sum1 = optplan.Sum(functions=[var1, var2])

    with pytest.raises(TypeError, match="add a node"):
        sum1 + optplan.SimulationSpace()
示例#4
0
def test_sum_two_funs():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    sum1 = var1 + var2

    assert isinstance(sum1, optplan.Sum)
    assert sum1.functions == [var1, var2]
示例#5
0
def test_sum_add_to_fun():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    var3 = optplan.Parameter()
    sum1 = optplan.Sum(functions=[var1, var2])
    sum2 = sum1 + var3

    assert isinstance(sum2, optplan.Sum)
    assert sum2.functions == [var1, var2, var3]
示例#6
0
def test_prod_mul_to_fun():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    var3 = optplan.Parameter()
    prod1 = optplan.Product(functions=[var1, var2])
    prod2 = prod1 * var3

    assert isinstance(prod2, optplan.Product)
    assert prod2.functions == [var1, var2, var3]
示例#7
0
def test_prod_mul_to_prod():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    var3 = optplan.Parameter()
    var4 = optplan.Parameter()
    prod1 = optplan.Product(functions=[var1, var2])
    prod2 = optplan.Product(functions=[var3, var4])
    prod3 = prod1 * prod2

    assert isinstance(prod3, optplan.Product)
    assert prod3.functions == [var1, var2, var3, var4]
示例#8
0
def test_prod_mul_to_value():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    prod1 = optplan.Product(functions=[var1, var2])
    prod2 = prod1 * 3

    assert isinstance(prod2, optplan.Product)
    assert var1 in prod2.functions
    assert var2 in prod2.functions
    assert prod2.functions[-1].value.real == 3
    assert prod2.functions[-1].value.imag == 0
示例#9
0
def test_sum_add_to_sum():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    var3 = optplan.Parameter()
    var4 = optplan.Parameter()
    sum1 = optplan.Sum(functions=[var1, var2])
    sum2 = optplan.Sum(functions=[var3, var4])
    sum3 = sum1 + sum2

    assert isinstance(sum3, optplan.Sum)
    assert sum3.functions == [var1, var2, var3, var4]
示例#10
0
def test_sum_add_to_value():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    sum1 = optplan.Sum(functions=[var1, var2])
    sum2 = sum1 + 3

    assert isinstance(sum2, optplan.Sum)
    assert var1 in sum2.functions
    assert var2 in sum2.functions
    assert sum2.functions[-1].value.real == 3
    assert sum2.functions[-1].value.imag == 0
示例#11
0
def test_sum_fun_and_const_reverse():
    var1 = optplan.Parameter()
    sum1 = 2 + var1
    assert isinstance(sum1, optplan.Sum)
    assert len(sum1.functions) == 2
    assert sum1.functions[0] == var1
    assert sum1.functions[1].value.real == 2
    assert sum1.functions[1].value.imag == 0
示例#12
0
def test_sum_fun_and_const_obj():
    var1 = optplan.Parameter()
    const1 = optplan.make_constant(2)
    sum1 = var1 + const1
    assert isinstance(sum1, optplan.Sum)
    assert len(sum1.functions) == 2
    assert sum1.functions[0] == var1
    assert sum1.functions[1] == const1
示例#13
0
def test_prod_fun_and_const_reverse():
    var1 = optplan.Parameter()
    prod1 = 2 * var1
    assert isinstance(prod1, optplan.Product)
    assert len(prod1.functions) == 2
    assert prod1.functions[0] == var1
    assert prod1.functions[1].value.real == 2
    assert prod1.functions[1].value.imag == 0
示例#14
0
def test_prod_fun_and_const_obj():
    var1 = optplan.Parameter()
    const1 = optplan.make_constant(2)
    prod1 = var1 * const1
    assert isinstance(prod1, optplan.Product)
    assert len(prod1.functions) == 2
    assert prod1.functions[0] == var1
    assert prod1.functions[1] == const1
示例#15
0
def test_prod_fun_and_bad_obj_raises_type_error():
    with pytest.raises(TypeError, match="multiply node"):
        optplan.Parameter() * optplan.SimulationSpace()
示例#16
0
def test_div():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    div = var2 / var1
    assert isinstance(div, optplan.Product)
示例#17
0
def test_sub():
    var1 = optplan.Parameter()
    var2 = optplan.Parameter()
    diff = var2 - var1
    assert isinstance(diff, optplan.Sum)
示例#18
0
def test_power_constant():
    var1 = optplan.Parameter()
    power1 = var1**optplan.make_constant(2)
    assert isinstance(power1, optplan.Power)
    assert power1.function == var1
    assert power1.exp == 2
示例#19
0
def test_sum_fun_and_bad_obj_raises_type_error():
    with pytest.raises(TypeError, match="add node"):
        optplan.Parameter() + optplan.SimulationSpace()
示例#20
0
def test_power_number():
    var1 = optplan.Parameter()
    power1 = var1**2
    assert isinstance(power1, optplan.Power)
    assert power1.function == var1
    assert power1.exp == 2
示例#21
0
def create_transformations(
    obj: optplan.Function,
    monitors: List[optplan.Monitor],
    cont_iters: int,
    disc_iters: int,
    sim_space: optplan.SimulationSpaceBase,
    min_feature: float = 100,
    cont_to_disc_factor: float = 1.1,
) -> List[optplan.Transformation]:
    """Creates a list of transformations for the optimization.

    The grating coupler optimization proceeds as follows:
    1) Continuous optimization whereby each pixel can vary between device and
       background permittivity.
    2) Discretization whereby the continuous pixel parametrization is
       transformed into a discrete grating (Note that L2D is implemented here).
    3) Further optimization of the discrete grating by moving the grating
       edges.

    Args:
        opt: The objective function to minimize.
        monitors: List of monitors to keep track of.
        cont_iters: Number of iterations to run in continuous optimization.
        disc_iters: Number of iterations to run in discrete optimization.
        sim_space: Simulation space ot use.
        min_feature: Minimum feature size in nanometers.
        cont_to_disc_factor: Discretize the continuous grating with feature size
            constraint of `min_feature * cont_to_disc_factor`.
            `cont_to_disc_factor > 1` gives discrete optimization more wiggle
            room.

    Returns:
        A list of transformations.
    """
    # Setup empty transformation list.
    trans_list = []

    # First do continuous relaxation optimization.
    cont_param = optplan.PixelParametrization(
        simulation_space=sim_space,
        init_method=optplan.UniformInitializer(min_val=0, max_val=1))
    trans_list.append(
        optplan.Transformation(
            name="opt_cont",
            parametrization=cont_param,
            transformation=optplan.ScipyOptimizerTransformation(
                optimizer="L-BFGS-B",
                objective=obj,
                monitor_lists=optplan.ScipyOptimizerMonitorList(
                    callback_monitors=monitors,
                    start_monitors=monitors,
                    end_monitors=monitors),
                optimization_options=optplan.ScipyOptimizerOptions(
                    maxiter=cont_iters),
            ),
        ))

    # If true, do another round of continous optimization with a discreteness bias.
    if DISCRETENESS_PENALTY:
        # Define parameters necessary to normaize discrete penalty term
        obj_val_param = optplan.Parameter(name="param_obj_final_val",
                                          initial_value=1.0)
        obj_val_param_abs = optplan.abs(obj_val_param)

        discrete_penalty_val = optplan.Parameter(
            name="param_discrete_penalty_val", initial_value=1.0)
        discrete_penalty_val_abs = optplan.abs(discrete_penalty_val)

        # Initial value of scaling is arbitrary and set for specific problem
        disc_scaling = optplan.Parameter(name="discrete_scaling",
                                         initial_value=5)

        normalization = disc_scaling * obj_val_param_abs / discrete_penalty_val_abs

        obj_disc = obj + optplan.DiscretePenalty() * normalization

        trans_list.append(
            optplan.Transformation(
                name="opt_cont_disc",
                parameter_list=[
                    optplan.SetParam(parameter=obj_val_param,
                                     function=obj,
                                     parametrization=cont_param),
                    optplan.SetParam(parameter=discrete_penalty_val,
                                     function=optplan.DiscretePenalty(),
                                     parametrization=cont_param)
                ],
                parametrization=cont_param,
                transformation=optplan.ScipyOptimizerTransformation(
                    optimizer="L-BFGS-B",
                    objective=obj_disc,
                    monitor_lists=optplan.ScipyOptimizerMonitorList(
                        callback_monitors=monitors,
                        start_monitors=monitors,
                        end_monitors=monitors),
                    optimization_options=optplan.ScipyOptimizerOptions(
                        maxiter=cont_iters),
                )))

    # Discretize. Note we add a little bit of wiggle room by discretizing with
    # a slightly larger feature size that what our target is (by factor of
    # `cont_to_disc_factor`). This is to give the optimization a bit more wiggle
    # room later on.
    disc_param = optplan.GratingParametrization(simulation_space=sim_space,
                                                inverted=True)
    trans_list.append(
        optplan.Transformation(
            name="cont_to_disc",
            parametrization=disc_param,
            transformation=optplan.GratingEdgeFitTransformation(
                parametrization=cont_param,
                min_feature=cont_to_disc_factor * min_feature)))

    # Discrete optimization.
    trans_list.append(
        optplan.Transformation(
            name="opt_disc",
            parametrization=disc_param,
            transformation=optplan.ScipyOptimizerTransformation(
                optimizer="SLSQP",
                objective=obj,
                constraints_ineq=[
                    optplan.GratingFeatureConstraint(
                        min_feature_size=min_feature,
                        simulation_space=sim_space,
                        boundary_constraint_scale=1.0,
                    )
                ],
                monitor_lists=optplan.ScipyOptimizerMonitorList(
                    callback_monitors=monitors,
                    start_monitors=monitors,
                    end_monitors=monitors),
                optimization_options=optplan.ScipyOptimizerOptions(
                    maxiter=disc_iters),
            ),
        ))
    return trans_list