Пример #1
0
def CheckGradient(f, x, h=1e-4, max_reldiff=1e-4):
    fx, grad = f(x)  # Evaluate function value at original point
    assert x.shape == grad.shape, 'Variable and gradient must have the same shape'
    passed = True
    numerical_grad = np.empty_like(x)
    # Iterate over all indexes in x
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        ix = it.multi_index
        x[ix] -= h
        y1 = f(x)[0]
        x[ix] += 2 * h
        y2 = f(x)[0]
        numgrad = (y2 - y1) / (2 * h)
        x[ix] -= h
        numerical_grad[ix] = numgrad
        # Compare gradients
        reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))
        if reldiff > max_reldiff and passed:
            # Only print the first error.
            with warnings.catch_warnings():
                warnings.simplefilter(
                    'ignore', np.ComplexWarning)  # Ignore complex warning.
                print utils.Highlight('Gradient check failed.',
                                      utils.RED,
                                      bold=True)
                print 'First gradient error found at index %s' % str(ix)
                print 'Your gradient: %f \t Numerical gradient: %f' % (
                    grad[ix], numgrad)
            passed = False
        it.iternext()  # Step to next dimension.
    if passed:
        print utils.Highlight('Gradient check passed!', utils.GREEN, bold=True)
    return numerical_grad
Пример #2
0
X_init = 
X_final = 
Q = trait_matrix.CreateRandomQ(num_species, num_traits)
Y_desired = X_final.dot(Q)
A = g.AdjacencyMatrix()

K = # BUILD MATHER K MATRIX for 3x3 case


sys.stdout.write('Integrating...\t')
sys.stdout.flush()
times = np.linspace(0., t * 2., 100)
Ys = simulation.ComputeY(times, K, X_init, Q)
error_macro = np.sum(np.abs(Y_desired - Ys), axis=(1, 2)) / (np.sum(Y_desired) * 2.)
sys.stdout.write(utils.Highlight('[DONE]\n', utils.GREEN, bold=True))

error_micro = []
for i in range(num_simulations):
  sys.stdout.write('Simulating (%d/%d)...\t' % (i + 1, num_simulations))
  sys.stdout.flush()
  Ys, timesteps = simulation.SimulateY(np.max(times), K, X_init, Q, dt=0.1 / max_rate)
  error_micro.append(np.sum(np.abs(Y_desired - Ys), axis=(1, 2)) / (np.sum(Y_desired) * 2.))
  sys.stdout.write(utils.Highlight('[DONE]\n', utils.GREEN, bold=True))
error_micro = np.stack(error_micro, axis=0)
mean_error_micro = np.mean(error_micro, axis=0)
std_error_micro = np.std(error_micro, axis=0)

error_rhc = []
for i in range(num_simulations_rhc):
  desc = 'Simulating RHC (%d/%d)' % (i + 1, num_simulations_rhc)
Пример #3
0
#              minimize_variance=False, max_meta_iteration=200, max_error=1e3, verbose=False)
K_opt, t_opt, _, _ = optimization_STRATA.Optimize(
    Y_desired,
    A,
    X_init,
    Q,
    var_Q,
    max_rate,
    allow_trait_overflow=min_trait_matching,
    minimize_variance=minimize_variance,
    analytical_gradients=True,
    verify_gradients=True,
    verbose=True)

sys.stdout.write(
    utils.Highlight('[OPTIMIZATION DONE]\n', utils.GREEN, bold=True))

# Simulate the optimized system
sys.stdout.write(utils.Highlight('[Simulating...]\n', utils.BLUE, bold=True))
sys.stdout.flush()
sim_time_steps = np.linspace(
    0., t_opt * 2.,
    100)  # simulate for twice as long as the optimal settling time
Y_seq = simulation.ComputeY(
    sim_time_steps, K_opt, X_init,
    Q)  # time-series evolution of actual trait distribution
Y_ss = Y_seq[-1]  # steady-state Y
sys.stdout.write(utils.Highlight('[SIMULATION DONE]\n', utils.GREEN,
                                 bold=True))
print('\n--------------\n')
print('Desired Y:\n', Y_desired)