コード例 #1
0
ファイル: test_core.py プロジェクト: vsandinh/qoc
def test_strip_slap():
    import numpy as np
    from qoc.models.dummy import Dummy
    from qoc.core.common import (
        slap_controls,
        strip_controls,
    )

    big = 100
    pstate = Dummy()
    pstate.complex_controls = True
    shape_range = np.arange(big) + 1
    for step_count in shape_range:
        for control_count in shape_range:
            pstate.controls_shape = controls_shape = (step_count,
                                                      control_count)
            pstate.max_control_norms = np.ones(control_count) * 2
            controls = np.random.rand(
                *controls_shape) + 1j * np.random.rand(*controls_shape)
            stripped_controls = strip_controls(pstate, controls)
            assert (stripped_controls.ndim == 1)
            assert (not (stripped_controls.dtype
                         in (np.complex64, np.complex128)))
            transformed_controls = slap_controls(pstate.complex_controls,
                                                 stripped_controls,
                                                 pstate.controls_shape)
            assert (np.allclose(controls, transformed_controls))
            assert (controls.shape == transformed_controls.shape)
        #ENDFOR
    #ENDFOR

    pstate.complex_controls = False
    for step_count in shape_range:
        for control_count in shape_range:
            pstate.controls_shape = controls_shape = (step_count,
                                                      control_count)
            pstate.max_control_norms = np.ones(control_count)
            controls = np.random.rand(*controls_shape)
            stripped_controls = strip_controls(pstate.complex_controls,
                                               controls)
            assert (stripped_controls.ndim == 1)
            assert (not (stripped_controls.dtype
                         in (np.complex64, np.complex128)))
            transformed_controls = slap_controls(pstate.complex_controls,
                                                 stripped_controls,
                                                 pstate.controls_shape)
            assert (np.allclose(controls, transformed_controls))
            assert (controls.shape == transformed_controls.shape)
コード例 #2
0
def _esdj_wrap(controls, pstate, reporter, result):
    """
    Do intermediary work between the optimizer feeding controls to 
    the jacobian of _evaluate_schroedinger_discrete.

    Args:
    controls
    pstate
    reporter
    result

    Returns:
    grads
    """
    # Convert the controls from optimizer format to cost function format.
    controls = slap_controls(pstate.complex_controls, controls,
                             pstate.controls_shape)
    # Rescale the controls to their maximum norm.
    clip_control_norms(controls, pstate.max_control_norms)
    # Impose user boundary conditions.
    if pstate.impose_control_conditions is not None:
        controls = pstate.impose_control_conditions(controls)

    # Evaluate the jacobian.
    error, grads = (ans_jacobian(_evaluate_schroedinger_discrete,
                                 0)(controls, pstate, reporter))
    # Autograd defines the derivative of a function of complex inputs as
    # df_dz = du_dx - i * du_dy for z = x + iy, f(z) = u(x, y) + iv(x, y).
    # For optimization, we care about df_dz = du_dx + i * du_dy.
    if pstate.complex_controls:
        grads = np.conjugate(grads)

    # The states need to be unwrapped from their autograd box.
    if isinstance(reporter.final_states, Box):
        final_states = reporter.final_states._value
    else:
        final_states = reporter.final_states

    # Update best configuration.
    if error < result.best_error:
        result.best_controls = controls
        result.best_error = error
        result.best_final_states = final_states
        result.best_iteration = reporter.iteration

    # Save and log optimization progress.
    pstate.log_and_save(controls, error, final_states, grads,
                        reporter.iteration)
    reporter.iteration += 1

    # Convert the gradients from cost function to optimizer format.
    grads = strip_controls(pstate.complex_controls, grads)

    # Determine if optimization should terminate.
    if error <= pstate.min_error:
        terminate = True
    else:
        terminate = False

    return grads, terminate
コード例 #3
0
def _esd_wrap(controls, pstate, reporter, result):
    """
    Do intermediary work between the optimizer feeding controls
    to _evaluate_schroedinger_discrete.

    Args:
    controls
    pstate
    reporter
    result

    Returns:
    error
    """
    # Convert the controls from optimizer format to cost function format.
    controls = slap_controls(pstate.complex_controls, controls,
                             pstate.controls_shape)
    # Rescale the controls to their maximum norm.
    clip_control_norms(controls, pstate.max_control_norms)
    # Impose user boundary conditions.
    if pstate.impose_control_conditions:
        controls = pstate.impose_control_conditions(controls)

    # Evaluate the cost function.
    error = _evaluate_schroedinger_discrete(controls, pstate, reporter)

    # Determine if optimization should terminate.
    if error <= pstate.min_error:
        terminate = True
    else:
        terminate = False

    return error, terminate
コード例 #4
0
ファイル: lindbladdiscrete.py プロジェクト: jmbaker94/qoc
def _eldj_wrap(controls, pstate, reporter, result):
    """
    Do intermediary work between the optimizer feeding controls to 
    the jacobian of _evaluate_indblad_discrete.

    Args:
    controls
    pstate
    reporter
    result

    Returns:
    grads
    """
    # Convert the controls from optimizer format to cost function format.
    controls = slap_controls(pstate.complex_controls, controls,
                             pstate.controls_shape)
    clip_control_norms(pstate.max_control_norms, controls)

    # Evaluate the jacobian.
    total_error, grads = (ans_jacobian(_evaluate_lindblad_discrete,
                                       0)(controls, pstate, reporter))
    # Autograd defines the derivative of a function of complex inputs as
    # df_dz = du_dx - i * du_dy for z = x + iy, f(z) = u(x, y) + iv(x, y).
    # For optimization, we care about df_dz = du_dx + i * du_dy.
    if pstate.complex_controls:
        grads = conjugate(grads)

    # The states need to be unwrapped from their autograd box.
    if isinstance(reporter.final_densities, Box):
        final_densities = reporter.final_densities._value

    # Update best configuration.
    if total_error < result.best_total_error:
        result.best_controls = controls
        result.best_final_densities = final_densities
        result.best_iteration = reporter.iteration
        result.best_total_error = total_error

    # Save and log optimization progress.
    pstate.log_and_save(
        controls,
        final_densities,
        total_error,
        grads,
        reporter.iteration,
    )
    reporter.iteration += 1

    # Convert the gradients from cost function to optimizer format.
    grads = strip_controls(pstate.complex_controls, grads)

    return grads
コード例 #5
0
def test_adam():
    import numpy as np

    from qoc.core.common import (strip_controls, slap_controls)
    from qoc.models.dummy import Dummy
    from qoc.standard.optimizers.adam import Adam

    # Check that the update method was implemented correctly
    # using hand-checked values.
    adam = Adam()
    grads = np.array([[0, 1], [2, 3]])
    params = np.array([[0, 1], [2, 3]], dtype=np.float64)
    params1 = np.array([[0, 0.999], [1.999, 2.999]])
    params2 = np.array([[0, 0.99900003], [1.99900001, 2.99900001]])

    adam.run(None, 0, params, None, None)
    params1_test = adam.update(params, grads)
    params2_test = adam.update(params1, grads)

    assert (np.allclose(params1_test, params1))
    assert (np.allclose(params2_test, params2))

    # Check that complex mapping works and params
    # without gradients are unaffected.
    gstate = Dummy()
    gstate.complex_controls = True
    grads = np.array([[1 + 1j, 0 + 0j], [0 + 0j, -1 - 1j]])
    params = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]])
    gstate.controls_shape = params.shape
    gstate.max_param_norms = np.ones(gstate.controls_shape[0]) * 10

    flat_controls = strip_controls(gstate.complex_controls, params)
    flat_grads = strip_controls(gstate.complex_controls, grads)

    adam.run(None, 0, flat_controls, None, None)
    params1 = adam.update(flat_grads, flat_controls)
    params1 = slap_controls(gstate.complex_controls, params1,
                            gstate.controls_shape)

    assert (np.allclose(params1[0][1], params[0][1]))
    assert (np.allclose(params1[1][0], params[1][0]))
コード例 #6
0
ファイル: lindbladdiscrete.py プロジェクト: jmbaker94/qoc
def _eld_wrap(controls, pstate, reporter, result):
    """
    Do intermediary work between the optimizer feeding controls
    to _evaluate_lindblad_discrete.

    Args:
    controls
    pstate
    reporter
    result

    Returns:
    total_error
    """
    # Convert the controls from optimizer format to cost function format.
    controls = slap_controls(pstate.complex_controls, controls,
                             pstate.controls_shape)
    clip_control_norms(pstate.max_control_norms, controls)

    # Evaluate the cost function.
    return _evaluate_lindblad_discrete(controls, pstate, reporter)