예제 #1
0
g = graph.Graph(num_nodes, True)

X_init = 
X_final = 
Q = trait_matrix.CreateRandomQ(num_species, num_traits)
Y_desired = X_final.dot(Q)
A = g.AdjacencyMatrix()

K = # BUILD MATHER K MATRIX for 3x3 case


sys.stdout.write('Integrating...\t')
sys.stdout.flush()
times = np.linspace(0., t * 2., 100)
Ys = simulation.ComputeY(times, K, X_init, Q)
error_macro = np.sum(np.abs(Y_desired - Ys), axis=(1, 2)) / (np.sum(Y_desired) * 2.)
sys.stdout.write(utils.Highlight('[DONE]\n', utils.GREEN, bold=True))

error_micro = []
for i in range(num_simulations):
  sys.stdout.write('Simulating (%d/%d)...\t' % (i + 1, num_simulations))
  sys.stdout.flush()
  Ys, timesteps = simulation.SimulateY(np.max(times), K, X_init, Q, dt=0.1 / max_rate)
  error_micro.append(np.sum(np.abs(Y_desired - Ys), axis=(1, 2)) / (np.sum(Y_desired) * 2.))
  sys.stdout.write(utils.Highlight('[DONE]\n', utils.GREEN, bold=True))
error_micro = np.stack(error_micro, axis=0)
mean_error_micro = np.mean(error_micro, axis=0)
std_error_micro = np.std(error_micro, axis=0)

error_rhc = []
예제 #2
0
def Optimize(Y_desired,
             A,
             X_init,
             Q,
             var_Q,
             max_rate,
             gamma=0,
             warm_start_parameters=None,
             specified_time=None,
             minimize_convergence_time=True,
             stabilize_robot_distribution=True,
             allow_trait_overflow=False,
             norm_order=2,
             analytical_gradients=True,
             verify_gradients=False,
             minimize_variance=False,
             max_meta_iteration=200,
             max_error=1e3,
             verbose=False):
    assert norm_order in (1, 2)
    assert (specified_time is None) == minimize_convergence_time

    global current_iteration
    current_iteration = 0

    if norm_order == 1 and allow_trait_overflow:
        mode = ABSOLUTE_AT_LEAST
    elif norm_order == 1 and not allow_trait_overflow:
        mode = ABSOLUTE_EXACT
    elif norm_order == 2 and allow_trait_overflow:
        mode = QUADRATIC_AT_LEAST
    elif norm_order == 2 and not allow_trait_overflow:
        mode = QUADRATIC_EXACT

    # Set base parameters. They should work for most input values.
    alpha = 1. if minimize_convergence_time else 0.
    if minimize_variance:
        beta = 10. if stabilize_robot_distribution else 0.
    else:
        beta = 5. if stabilize_robot_distribution else 0.
    gamma = gamma if minimize_variance else 0.
    nu = 1. if stabilize_robot_distribution else None
    margin = 0. if allow_trait_overflow else None

    # Normalize input parameters.
    sum_X = float(np.sum(X_init))
    X_init = X_init.astype(np.float) / sum_X * 800.
    Y_desired = Y_desired.astype(np.float) / sum_X * 800.
    old_max_rate = max_rate
    max_rate = 2.
    expected_convergence_time = 1.

    # Initial parameters (only where the adjacency matrix has a 1).
    num_species = X_init.shape[1]
    num_nonzero_elements = np.sum(A)
    if warm_start_parameters is None:
        init_parameters = np.random.rand(
            num_nonzero_elements * num_species) * max_rate
        if minimize_convergence_time:
            init_parameters = np.concatenate([
                init_parameters,
                np.array([np.random.rand() * expected_convergence_time * 2.])
            ],
                                             axis=0)
    else:
        init_parameters = warm_start_parameters

    # Bound by max rate.

    bound_fun = lambda *args, **kargs: BoundParameters(
        num_nonzero_elements * num_species, max_rate,
        minimize_convergence_time, *args, **kargs)
    bounds = [(0., max_rate)] * num_nonzero_elements * num_species
    if minimize_convergence_time:
        bounds.append((0., None))

    if analytical_gradients:
        cost_fun = lambda x: Cost(x,
                                  Y_desired,
                                  A,
                                  X_init,
                                  Q,
                                  var_Q,
                                  alpha=alpha,
                                  specified_time=specified_time,
                                  beta=beta,
                                  gamma=gamma,
                                  nu=nu,
                                  mode=mode,
                                  margin=margin)
        minimizer_kwargs = {
            'method': 'L-BFGS-B',
            'bounds': bounds,
            'jac': True,
            'options': {
                'disp': False,
                'ftol': 1e-3,
                'maxiter': 100
            }
        }
    else:
        cost_fun = lambda x: cost_without_grad(x,
                                               Y_desired,
                                               A,
                                               X_init,
                                               Q,
                                               var_Q,
                                               alpha=alpha,
                                               specified_time=specified_time,
                                               beta=beta,
                                               gamma=gamma,
                                               nu=nu,
                                               mode=mode,
                                               margin=margin)
        minimizer_kwargs = {
            'method': 'L-BFGS-B',
            'bounds': bounds,
            'jac': False,
            'options': {
                'disp': False,
                'ftol': 1e-3,
                'maxiter': 100
            }
        }

    # Check gradient if requested.
    if verify_gradients and analytical_gradients:
        for i in range(10):
            gradient_parameters = np.random.rand(*init_parameters.shape)
            CheckGradient(
                lambda x: Cost(x,
                               Y_desired,
                               A,
                               X_init,
                               Q,
                               var_Q,
                               alpha=alpha,
                               specified_time=specified_time,
                               beta=beta,
                               nu=nu,
                               mode=mode,
                               margin=margin), gradient_parameters)

    # Basinhopping function.
    success = False
    global meta_iteration
    meta_iteration = 0
    while not success:
        meta_iteration += 1
        if meta_iteration > max_meta_iteration:
            break
        # print('\nMeta iteration %i...' % meta_iteration)
        # It happens very rarely that the eigenvector matrix becomes close to singular and
        # cannot be inverted. In that case, we simply restart the optimization.
        try:
            with warnings.catch_warnings():
                warnings.simplefilter('ignore', RuntimeWarning)
                ret = scipy.optimize.basinhopping(
                    cost_fun,
                    init_parameters,
                    accept_test=bound_fun,
                    minimizer_kwargs=minimizer_kwargs,
                    niter=50,
                    niter_success=6,
                    callback=Print if verbose else None)

            constraints_satisfied = check_solution(ret.x, max_rate,
                                                   minimize_convergence_time)
            success = (ret.fun < max_error) and constraints_satisfied
            if (ret.fun < 1e3) and ~constraints_satisfied:
                dummy = 1
        except (ValueError, np.linalg.linalg.LinAlgError):
            # Make completely new random elements.
            init_parameters = np.random.rand(
                num_nonzero_elements * num_species) * max_rate
            if minimize_convergence_time:
                init_parameters = np.concatenate([
                    init_parameters,
                    np.array(
                        [np.random.rand() * expected_convergence_time * 2.])
                ],
                                                 axis=0)
            success = False

    final_parameters = np.copy(ret.x)

    # Remove the optimized t.
    if minimize_convergence_time:
        optimal_t = ret.x[-1]
        K = UnflattenParameters(ret.x[:-1], A, num_species)
    else:
        optimal_t = specified_time
        K = UnflattenParameters(ret.x, A, num_species)

    # Renormalize.
    optimal_t *= max_rate / old_max_rate
    K *= old_max_rate / max_rate
    X_init *= sum_X
    Y_desired *= sum_X

    if verbose:
        Y = simulation.ComputeY(optimal_t, K, X_init, Q)
        if allow_trait_overflow:
            error = np.sum(np.maximum(Y_desired - Y, np.zeros(
                Y.shape))) / np.sum(Y_desired)
        else:
            error = np.sum(np.abs(Y_desired - Y)) / (np.sum(Y_desired) * 2.)

        print('\nConverged after %i meta iterations' % meta_iteration)
        print('\nConstraints satisfied')
        print('\nTrait mismatch error (at time %.2f): %.3f%%' %
              (optimal_t, error * 100.))
        print('Final cost:', ret.fun)

    # Return transition matrices (3D matrix for all species)
    return K, optimal_t, final_parameters, success
예제 #3
0
    var_Q,
    max_rate,
    allow_trait_overflow=min_trait_matching,
    minimize_variance=minimize_variance,
    analytical_gradients=True,
    verify_gradients=True,
    verbose=True)

sys.stdout.write(
    utils.Highlight('[OPTIMIZATION DONE]\n', utils.GREEN, bold=True))

# Simulate the optimized system
sys.stdout.write(utils.Highlight('[Simulating...]\n', utils.BLUE, bold=True))
sys.stdout.flush()
sim_time_steps = np.linspace(
    0., t_opt * 2.,
    100)  # simulate for twice as long as the optimal settling time
Y_seq = simulation.ComputeY(
    sim_time_steps, K_opt, X_init,
    Q)  # time-series evolution of actual trait distribution
Y_ss = Y_seq[-1]  # steady-state Y
sys.stdout.write(utils.Highlight('[SIMULATION DONE]\n', utils.GREEN,
                                 bold=True))
print('\n--------------\n')
print('Desired Y:\n', Y_desired)
print('\n')
print('Achieved Y:\n', Y_ss)
print('\n--------------\n')

# [Insert code to save the results as needed]