def multivariateNewton(f, x0, tol, N):
    x0 = jnp.asarray(x0).T  # Convert Input Array to Jax Array

    def J_inv(x):  # Create Inverse Jacobian Function
        jacobian = jax.jacfwd(
            f
        )  # Calculate the jacobian function from the provided systems with Forward Auto-differentiation
        J = jacobian(x)  # Calculate the Jacobian at x
        J_inv = jnp.linalg.inv(J)  # Calculate the Inverse Jacobian
        return jnp.asarray(
            J_inv)  # Return Inverse Jacobian at x as a Jax Array

    for k in range(1, N):  # Start Loop for Maximum Iterations
        x = jnp.subtract(x0, jnp.matmul(
            J_inv(x0),
            f(x0).T))  # Perform Newton Iteration: x_{n+1} = x_n-J^(-1)*f
        # reltol = jnp.divide(jnp.linalg.norm(jnp.subtract(x,x0), np.inf),jnp.linalg.norm(x, np.inf)) # Calculate: ||x_{n+1}-x_n||/||x_{n+1}||
        atol = jnp.linalg.norm(jnp.subtract(
            x, x0), np.inf)  # Calculate: ||x_{n+1}-x_n||/||x_{n+1}||
        # print(i, tol)             # Print iteration and relTol
        if atol < tol:  # Check for convergence
            # print(x)              # Print Result
            return x  # Return Result
        x0 = x  # Update x0 for Next iteration
    print("Failed to converge")  # Print Message if Convergence did not occur
Exemplo n.º 2
0
def test_function():

    rng = jax.random.PRNGKey(42)

    y_true = jax.random.randint(rng, shape=(2, 3), minval=0, maxval=2)
    y_pred = jax.random.uniform(rng, shape=(2, 3))

    loss = elegy.losses.huber(y_true, y_pred, delta=1.0)
    assert loss.shape == (2,)

    y_pred = y_pred.astype(float)
    y_true = y_true.astype(float)
    delta = 1.0
    error = jnp.subtract(y_pred, y_true)
    abs_error = jnp.abs(error)
    quadratic = jnp.minimum(abs_error, delta)
    linear = jnp.subtract(abs_error, quadratic)
    assert jnp.array_equal(
        loss,
        jnp.mean(
            jnp.add(
                jnp.multiply(0.5, jnp.multiply(quadratic, quadratic)),
                jnp.multiply(delta, linear),
            ),
            axis=-1,
        ),
    )
Exemplo n.º 3
0
def troe_falloff_correction(
    T: float, lPr: np.ndarray, troe_coeffs: np.ndarray, troe_indices: np.ndarray
) -> np.ndarray:
    """
    modify rate constants use TROE falloff parameters
    returns: np.ndarray of F(T,P) 
    """
    troe_coeffs = troe_coeffs[troe_indices]
    F_cent = (
        np.multiply(
            np.subtract(1, troe_coeffs[:, 0]), np.exp(np.divide(-T, troe_coeffs[:, 3]))
        )
        + np.multiply(troe_coeffs[:, 0], np.exp(np.divide(-T, troe_coeffs[:, 1])))
        + np.exp(np.divide(-troe_coeffs[:, 2], T))
    )
    lF_cent = np.log10(F_cent)
    C = np.subtract(-0.4, np.multiply(0.67, lF_cent))
    N = np.subtract(0.75, np.multiply(1.27, lF_cent))
    f1_numerator = lPr + C
    f1_denominator_1 = N
    f1_denominator_2 = np.multiply(0.14, f1_numerator)
    f1 = np.divide(f1_numerator, np.subtract(f1_denominator_1, f1_denominator_2))
    F = np.power(10.0, np.divide(lF_cent, (1.0 + np.square(f1))))
    # F = 10**(lF_cent / (1. + f1**2.))
    return F
Exemplo n.º 4
0
def huber_loss(labels, predictions, weights=1.0, delta=1.0):
    """Adds a Huber Loss term to the training procedure.

  For each value x in `error=labels-predictions`, the following is calculated:
  ```
    0.5 * x^2                  if |x| <= d
    0.5 * d^2 + d * (|x| - d)  if |x| > d
  ```
  where d is `delta`.
  See: https://en.wikipedia.org/wiki/Huber_loss
  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  `[batch_size]`, then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.
  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    delta: `float`, the point where the huber loss function
      changes from a quadratic to linear.
  Returns:
    Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
    shape as `labels`; otherwise, it is scalar.
  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.  Also if `labels` or
     `predictions` is None.
  """
    if labels is None:
        raise ValueError('labels must not be None.')
    if predictions is None:
        raise ValueError('predictions must not be None.')
    predictions = predictions.astype(jnp.float32)
    labels = labels.astype(jnp.float32)
    # TODO(deveci): check for shape compabitibilty
    # predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    error = jnp.subtract(predictions, labels)
    abs_error = jnp.abs(error)
    quadratic = jnp.minimum(abs_error, delta)
    # The following expression is the same in value as
    # tf.maximum(abs_error - delta, 0), but importantly the gradient for the
    # expression when abs_error == delta is 0 (for tf.maximum it would be 1).
    # This is necessary to avoid doubling the gradient, since there is already a
    # nonzero contribution to the gradient from the quadratic term.
    linear = jnp.subtract(abs_error, quadratic)
    losses = jnp.add(jnp.multiply(0.5, jnp.multiply(quadratic, quadratic)),
                     jnp.multiply(delta, linear))

    input_dtype = losses.dtype
    losses = losses.astype(jnp.float32)
    weights = jnp.asarray(weights, jnp.float32)
    weighted_losses = jnp.multiply(losses, weights)
    loss = weighted_losses.astype(input_dtype)
    return loss
Exemplo n.º 5
0
def sum_f(var):
    ret = 0
    for i in range(10**4):
        # x = 10 / 10**4 * i - 5
        x = np.subtract(np.divide(10, np.dot(np.power(10, 4), i)), 5)
        # ret -= (x - var[1])**2 / var[0]**2
        ret -= np.subtract(
            ret,
            np.divide(np.power(np.subtract(x, var[1]), 2), np.power(var[0],
                                                                    2)))
    return np.dot(np.log(np.divide(1, var[0])), ret)
Exemplo n.º 6
0
def func(var):
    # (var[0]-args[:,0])**3
    temp = np.sin(np.power(np.subtract(var[0], args[:, 0]), 3))
    ele1 = np.sum(temp)
    # sin(var[1]**3 - var[2]**2) * args[:,1]
    temp = np.sin(np.subtract(np.power(var[1], 3), np.power(var[2], 2)))
    ele2 = np.sum(np.dot(temp, args[:, 1]))
    # log(args[:,1]*args[:,2]*var[0]*var[1]*var[2])
    temp = np.multiply(args[:, 1], args[:, 2])
    ele3 = np.sum(np.log(1 + np.abs(np.dot(var[0] * var[1] * var[2], temp))))
    # print("\n\n", ele1, "\n", ele2, "\n", ele3)
    # print(res, type(res))
    return ele1 + ele2 + ele3
Exemplo n.º 7
0
def generate_data_01():
    batch_size = 8
    input_shape = (batch_size, 4)

    def synth_batches():
        while True:
            images = npr.rand(*input_shape).astype("float32")
            yield images

    batches = synth_batches()
    inputs = next(batches)

    init_func, predict_func = stax.serial(
        HomotopyDense(out_dim=4, W_init=glorot_uniform(), b_init=normal()),
        HomotopyDense(out_dim=1, W_init=glorot_uniform(), b_init=normal()),
        Sigmoid,
    )

    ae_shape, ae_params = init_func(random.PRNGKey(0), input_shape)
    # assert ae_shape == input_shape
    bparam = [np.array([0.0], dtype=np.float64)]
    logits = predict_func(ae_params,
                          inputs,
                          bparam=bparam[0],
                          activation_func=sigmoid)
    loss = np.mean(
        (np.subtract(logits, logits))) + l2_norm(ae_params) + l2_norm(bparam)

    return inputs, logits, ae_params, bparam, init_func, predict_func
Exemplo n.º 8
0
 def objective(params, bparam) -> float:
     logits = predict_fun(params,
                          inputs,
                          bparam=bparam[0],
                          activation_func=sigmoid)
     loss = np.mean((np.subtract(logits, outputs)))
     loss += l2_norm(params) + l2_norm(bparam)
     return loss
Exemplo n.º 9
0
def func(var=np.array([0.5, 1.])):
    temp = np.subtract(arg, var[1])
    temp = np.power(temp, 2)
    temp_sum = np.sum(temp)
    divider = np.power(var[0], 2)
    res = np.divide(temp_sum, divider)
    common = np.log(np.divide(1, var[0]))
    return np.dot(res, common)
Exemplo n.º 10
0
        def np_fn(input_np, v_current, gamma, tau_m, Vth, dt):
            v_current = ((input_np - v_current) / tau_m) * dt
            spike = np.greater_equal(
                v_current + np.multiply(
                    np.divide(np.subtract(input_np, v_current), tau_m), dt),
                Vth).astype('float32')

            gamma += np.where(spike >= Vth, 1, 0)
            return spike, v_current, gamma
Exemplo n.º 11
0
    def forward(self, x, v_current):
        dV_tau = jnp.multiply(jnp.subtract(x, v_current), self.dt)
        dV = jnp.divide(dV_tau, self.tau_m)
        v_current = index_add(v_current, index[:], dV)
        spike_list = jnp.greater_equal(v_current, self.Vth).astype('int32')
        v_current = jnp.where(v_current >= self.Vth, 0,
                              v_current * jnp.exp(-1 / self.tau_m))

        return spike_list, v_current
Exemplo n.º 12
0
    def objective(params, bparam, batch) -> float:
        x, _ = batch
        x = np.reshape(x, (x.shape[0], -1))
        logits = predict_fun(params, x, bparam=bparam[0], rng=key)
        keep = random.bernoulli(key, bparam[0], x.shape)
        inputs_d = np.where(keep, x, 0)

        loss = np.mean(np.square((np.subtract(logits, inputs_d))))
        # loss += 0.1*(l2_norm(params) + l2_norm(bparam))
        return loss
Exemplo n.º 13
0
 def objective(params, bparam, batch) -> float:
     x, _ = batch
     x = np.reshape(x, (x.shape[0], -1))
     logits = predict_fun(params,
                          x,
                          bparam=bparam[0],
                          activation_func=sigmoid)
     loss = np.mean(np.square((np.subtract(logits, x))))
     #loss += 5e-6 * (l2_norm(params)) #+ l2_norm(bparam))
     return loss
Exemplo n.º 14
0
    def backward(self, time, spike_list, weights, e_gradient):
        gamma = spike_list[0]
        t_Tk_divby_tau_m = jnp.divide(
            jnp.subtract(time, spike_list[1]), -self.tau_m)
        f_prime_t = jnp.multiply(jnp.exp(t_Tk_divby_tau_m), (-1 / self.tau_m))
        aLIFnet = jnp.multiply(
            1 / self.Vth, (1 + jnp.multiply(jnp.divide(1, gamma), f_prime_t)))
        d_w = jnp.matmul(weights, e_gradient)

        return jnp.multiply(d_w, aLIFnet)
Exemplo n.º 15
0
def gaussian_kernel(x, y, sigma):
    """Gaussian Function to be used in Kernel learning
        :param x, y are two datapoints, in best scenario 1D
        :param sigma: width of the function
        :type sigma: float
    """
    #print("type x:", type(x))
    distance = jnp.subtract(x, y)
    absolute = jnp.linalg.norm(distance)
    k = jnp.exp(-(absolute**2) / (2 * sigma**2))
    return (k)
Exemplo n.º 16
0
def target_critic_inference(actor_model, target_critic_model, next_state, reward, done, alpha, key):
    next_actions, next_entropies, _ = actor_model(next_state, key=key, sample=True)
    next_state_action               = jnp.concatenate((next_state, next_actions), axis=-1)
    next_q1, next_q2                = target_critic_model(next_state_action)
    min_q                           = jnp.min([next_q1, next_q2], axis=0)
    next_q                          = min_q+alpha*next_entropies
    rewards                         = jnp.reshape(reward, (reward.shape[0], 1))
    dones                           = jnp.subtract(1.0, done)
    dones                           = jnp.reshape(dones, (dones.shape[0], 1))
    target_q                        = rewards+gamma*next_q*dones
    return target_q
def shooting_error(
    state_guesses, pos_start, pos_end, metric_fn, metric_fn_kwargs, times
):
    def ode_fn(state, t):
        print("inside ode_fn")
        return geodesic_ode(t, state, metric_fn, metric_fn_kwargs)

    def integrate_guesses(ode_fn, state_init, times):
        states_integrated = odeint(ode_fn, state_init, times)
        end_state_integrated = np.array(states_integrated)[-1, :]
        return end_state_integrated

    input_dim = pos_start.shape[0]
    state_guesses = state_guesses.reshape([-1, 2 * input_dim])
    state_guesses = jax.ops.index_update(
        state_guesses, jax.ops.index[0, :input_dim], pos_start
    )
    state_guesses = jax.ops.index_update(
        state_guesses, jax.ops.index[-1, :input_dim], pos_end
    )
    # print('state guesses after update')
    # print(state_guesses)
    ode_args = [metric_fn, metric_fn_kwargs]
    states_integrated = integrate_multiple_guesses(
        state_guesses, times, ode_args
    )
    # print('states integrated')
    # print(states_integrated)
    states_integrated = jax.ops.index_update(
        states_integrated, jax.ops.index[0, :], state_guesses[0, :]
    )
    # print(states_integrated)
    # states_integrated = integrate_guesses(ode_fn, state_guesses[0, :], times)
    # states_integrated = jax.vmap(integrate_guesses,
    #                              (None, 0, 0))(ode_fn, state_guesses, times)
    # pos_guesses = state_guesses[:, 0:input_dim]
    # pos_guesses = pos_guesses[1:, :]
    # pos_integrated = states_integrated[:, 0:input_dim]

    error = np.subtract(states_integrated, state_guesses)
    # print('error')
    # print(error.shape)
    # error = jax.ops.index_update(error, jax.ops.index[-1, :],
    #                              np.zeros([2 * input_dim]))
    # error = jax.ops.index_update(error, jax.ops.index[-1, input_dim:],
    #                              np.zeros([input_dim]))

    print("Target pos error: ", error)
    error = error.reshape([-1])
    return error
Exemplo n.º 18
0
def get_equilibirum_constants(
    T: float,
    P: float,
    R: float,
    sdivR: np.ndarray,
    hdivRT: np.ndarray,
    gas_info: GasInfo,
) -> np.ndarray:
    """
    Calculate equilibrium constants
    returns: np.ndarray of equilibrium constants
    """
    vk = -gas_info.reactant_stioch_coeffs + gas_info.product_stioch_coeffs
    delta_entropies = np.matmul(vk.T, sdivR)
    delta_enthalpies = np.matmul(vk.T, hdivRT)
    Ka = np.exp(np.subtract(delta_entropies, delta_enthalpies))
    Kc = np.multiply(Ka, np.power(P / (R * T), vk.sum(0)))
    return Kc
Exemplo n.º 19
0
def get_production_rates(kf: np.ndarray, kr: np.ndarray, C: np.ndarray,
                         gas_info: GasInfo) -> np.ndarray:
    """
    calculate net production rates from rate constants and concentration
    """
    vk = -gas_info.reactant_stioch_coeffs + gas_info.product_stioch_coeffs
    forward_rates_of_progress = kf * np.exp(
        np.matmul(np.log(C + 1e-300), gas_info.reactant_stioch_coeffs))
    reverse_rates_of_progress = kr * np.exp(
        np.matmul(np.log(C + 1e-300), gas_info.product_stioch_coeffs))
    qdot = np.subtract(forward_rates_of_progress, reverse_rates_of_progress)
    wdot = np.matmul(qdot, vk.T)
    return ProductionRates(
        forward_rates_of_progress=forward_rates_of_progress,
        reverse_rates_of_progress=reverse_rates_of_progress,
        qdot=qdot,
        wdot=wdot,
    )
Exemplo n.º 20
0
    def apply(self, x, noise_rng, n_actions):
        dense_layer_1 = flax.nn.Dense(x, 64)
        activation_layer_1 = flax.nn.relu(dense_layer_1)
        noisy_layer = NoisyDense(activation_layer_1, noise_rng, 64)
        activation_layer_2 = flax.nn.relu(noisy_layer)

        noisy_value = NoisyDense(activation_layer_2, noise_rng, 64)
        value = flax.nn.relu(noisy_value)
        value = NoisyDense(value, noise_rng, 1)

        noisy_advantage = NoisyDense(activation_layer_2, noise_rng, 64)
        advantage = flax.nn.relu(noisy_advantage)
        advantage = NoisyDense(advantage, noise_rng, n_actions)

        advantage_average = jnp.mean(advantage, keepdims=True)

        q_values_layer = jnp.subtract(jnp.add(advantage, value),
                                      advantage_average)
        return q_values_layer
Exemplo n.º 21
0
    def apply(self, x, n_actions):
        dense_layer_1 = flax.nn.Dense(x, 64)
        activation_layer_1 = flax.nn.relu(dense_layer_1)
        dense_layer_2 = flax.nn.Dense(activation_layer_1, 64)
        activation_layer_2 = flax.nn.relu(dense_layer_2)

        value_dense = flax.nn.Dense(activation_layer_2, 64)
        value = flax.nn.relu(value_dense)
        value = flax.nn.Dense(value, 1)

        advantage_dense = flax.nn.Dense(activation_layer_2, 64)
        advantage = flax.nn.relu(advantage_dense)
        advantage = flax.nn.Dense(advantage, n_actions)

        advantage_average = jnp.mean(advantage, keepdims=True)

        q_values_layer = jnp.subtract(jnp.add(advantage, value),
                                      advantage_average)
        return q_values_layer
Exemplo n.º 22
0
def backpropagate_actor(optimizer, critic_model, props):
    # props[0] - states
    # props[1] - discounted_rewards
    # props[2] - actions
    values = jax.lax.stop_gradient(critic_model(props[0]))
    values = jnp.reshape(values, (values.shape[0], ))
    advantages = jnp.subtract(props[1], values)

    def loss_fn(model):
        action_probabilities = model(props[0])
        probabilities = gather(action_probabilities, props[2])
        log_probabilities = -jnp.log(probabilities)
        alpha = 0.4  # Entropy temperature
        entropies = -jnp.sum(jnp.multiply(action_probabilities,
                                          jnp.log(action_probabilities)),
                             axis=1) * alpha
        advantages_with_entropies = jnp.add(advantages, entropies)
        return jnp.mean(
            jnp.multiply(log_probabilities, advantages_with_entropies))

    loss, gradients = jax.value_and_grad(loss_fn)(optimizer.target)
    optimizer = optimizer.apply_gradient(gradients)
    return optimizer, loss
Exemplo n.º 23
0
    def backward(ctx):
        args = ctx.args
        spike_list = args[0][0]
        weight = arg[1]
        fn, c, fh, fw = weight.shape
        gamma = spike_list[0]

        tau = jnp.divide(jnp.subtract(timestep, spike_list[1]), -self.tau)
        prime = jnp.multiply(jnp.exp(tau), (-1 / self.tau))
        aLIFnet = jnp.multiply(1 / self.Vth,
                               (1 + jnp.multiply(jnp.divide(1, gamma, prime))))
        self.grad = jnp.multiply(weight, aLIFnet)

        self.grad = self.grad.transpose(1, 0).reshape(fn, c, fh, fw)

        d_col = jnp.multiply(weight, aLIFnet)
        dx = col2im_indices(d_col, self.con.shape, fh, fw)

        return LIF_backward(self.tau,
                            self.Vth,
                            dx,
                            spike_list=self.spike_list,
                            e_grad=e_grad,
                            time=timestep), self.weight
Exemplo n.º 24
0
 def _sub(a, b):
     return jnp.subtract(a, b)
Exemplo n.º 25
0
 def call(self, y_true, y_preds):
     loss = self.reduction(jnp.abs(jnp.subtract(y_preds, y_true)))
     return loss
Exemplo n.º 26
0
    def second_step(self, lr, grads):
        for g, p in zip(grads, self.train_vars):
            p.value = jn.subtract(p.value, self.state[str(p.ref)]["e_w"])

        self.base_optimizer(lr, grads)
Exemplo n.º 27
0
def subtract(x1, x2):
  if isinstance(x1, JaxArray): x1 = x1.value
  if isinstance(x2, JaxArray): x2 = x2.value
  return JaxArray(jnp.subtract(x1, x2))
Exemplo n.º 28
0
 def forward(self, x, v_current, time_step):
     dV_tau = jnp.multiply(jnp.subtract(x, v_current), self.dt)
     dV = jnp.divide(dV_tau, self.tau_m)
     v_current = index_add(v_current, index[:], dV)
     return jnp.divide(v_current * jnp.exp(-1 / self.tau_m),
                       time_step * self.dt)
Exemplo n.º 29
0
 def objective(params, bparam, batch_input) -> float:
     targets = np.multiply(0.5, params[0])
     logits = np.divide(
         1, 1 + np.exp(-(np.multiply(5.0, params[0]) + bparam[0])))
     loss = np.mean(np.square(np.subtract(logits, targets)))
     return loss
Exemplo n.º 30
0
def subtract(a: Numeric, b: Numeric):
    return jnp.subtract(a, b)