Ejemplo n.º 1
0
 def apply(
     self,
     x,
     noise_rng,
     features,
     sigma_init=0.017,
     use_bias=True,
     kernel_initializer=jax.nn.initializers.orthogonal(),
     bias_initializer=jax.nn.initializers.zeros,
 ):
     input_features = x.shape[-1]
     kernel_shape = (input_features, features)
     kernel = self.param('kernel', kernel_shape, kernel_initializer)
     sigma_kernel = self.param('sigma_kernel', kernel_shape,
                               sigma_initializer(value=sigma_init))
     perturbed_kernel = jnp.add(
         kernel,
         jnp.multiply(sigma_kernel,
                      jax.random.uniform(noise_rng, kernel_shape)))
     outputs = jnp.dot(x, perturbed_kernel)
     if use_bias:
         bias = self.param('bias', (features, ), bias_initializer)
         sigma_bias = self.param('sigma_bias', (features, ),
                                 sigma_initializer(value=sigma_init))
         perturbed_bias = jnp.add(
             bias,
             jnp.multiply(sigma_bias,
                          jax.random.uniform(noise_rng, (features, ))))
         outputs = jnp.add(outputs, perturbed_bias)
     return outputs
Ejemplo n.º 2
0
def test_function():

    rng = jax.random.PRNGKey(42)

    y_true = jax.random.randint(rng, shape=(2, 3), minval=0, maxval=2)
    y_pred = jax.random.uniform(rng, shape=(2, 3))

    loss = elegy.losses.huber(y_true, y_pred, delta=1.0)
    assert loss.shape == (2,)

    y_pred = y_pred.astype(float)
    y_true = y_true.astype(float)
    delta = 1.0
    error = jnp.subtract(y_pred, y_true)
    abs_error = jnp.abs(error)
    quadratic = jnp.minimum(abs_error, delta)
    linear = jnp.subtract(abs_error, quadratic)
    assert jnp.array_equal(
        loss,
        jnp.mean(
            jnp.add(
                jnp.multiply(0.5, jnp.multiply(quadratic, quadratic)),
                jnp.multiply(delta, linear),
            ),
            axis=-1,
        ),
    )
Ejemplo n.º 3
0
def lstm_step(params: np.ndarray, carry: tuple, x_t: np.ndarray) -> np.ndarray:
    """
    One step in the lstm.

    :param params: Dictionary of parameters.
    :param x_t: One row from the input data.
    :param carry: h_t and c_t from previous step.
        h_t is the hidden state vector,
        while c_t is the cell state vector.
    """
    # transpose x
    h_t, c_t = carry
    x_t = np.transpose(x_t)
    # concatenate the previous hidden state with new input
    h_t = np.concatenate([h_t, x_t])

    i_t = relu(np.dot(params["W_i"], h_t) + params["b_i"])
    ctilde_t = tanh(np.dot(params["W_c"], h_t) + params["b_c"])
    f_t = relu(np.dot(params["W_f"], h_t) + params["b_f"])
    c_t = np.multiply(f_t, ctilde_t) + np.multiply(i_t, ctilde_t)

    o_t = relu(np.dot(params["W_o"], h_t) + params["b_o"])
    h_t = np.multiply(o_t, tanh(c_t))

    return (h_t, c_t), h_t
    def apply(self, x, features, bias=True, kernel_init=None):
        def sample_noise(shape):
            noise = jax.random.normal(random.PRNGKey(0), shape)
            return noise

        def f(x):
            return jnp.multiply(jnp.sign(x), jnp.power(jnp.abs(x), 0.5))

        # Initializer of \mu and \sigma

        def mu_init(key, shape):
            low = -1 * 1 / jnp.power(x.shape[1], 0.5)
            high = 1 * 1 / jnp.power(x.shape[1], 0.5)
            return onp.random.uniform(low, high, shape)

        def sigma_init(key, shape, dtype=jnp.float32):
            return jnp.ones(shape, dtype) * (0.1 / onp.sqrt(x.shape[1]))

        # Sample noise from gaussian
        p = sample_noise([x.shape[1], 1])
        q = sample_noise([1, features])
        f_p = f(p)
        f_q = f(q)
        w_epsilon = f_p * f_q
        b_epsilon = jnp.squeeze(f_q)
        w_mu = self.param('kernel', (x.shape[1], features), mu_init)
        w_sigma = self.param('kernell', (x.shape[1], features), sigma_init)
        w = w_mu + jnp.multiply(w_sigma, w_epsilon)
        ret = jnp.matmul(x, w)

        b_mu = self.param('bias', (features, ), mu_init)
        b_sigma = self.param('biass', (features, ), sigma_init)
        b = b_mu + jnp.multiply(b_sigma, b_epsilon)
        return jnp.where(bias, ret + b, ret)
Ejemplo n.º 5
0
def lstm_torch(params, state, x_t):
    """Implement the GRU equations.

    Arguments:
    params: dictionary of GRU parameters
    h: np array of  hidden state
    x: np array of input
    bfg: bias on forget gate (useful for learning if > 0.0)

    Returns:
    np array of hidden state after GRU update"""
    w = params['w']
    b = params['b']

    h_tm1, c_tm1 = np.split(state, 2)
    args = np.concatenate((x_t, h_tm1))

    out = np.matmul(args, w) + b

    i, j, f, o = np.split(out, 4)

    g = np.tanh(j)
    sigmoid_f = sigmoid(f + 1.)
    c_t = np.multiply(c_tm1, sigmoid_f) + np.multiply(sigmoid(i), g)
    h_t = np.multiply(np.tanh(c_t), sigmoid(o))

    return np.concatenate((h_t, c_t))
Ejemplo n.º 6
0
    def T_seperate(params_all, x, dx, coeff_mask, z0):
        params_psi = params_all[:hyper_params['n_psi']]
        sindy_coeff = params_all[-1][0]

        z_opt = phi_vec(params_psi, x, z0)
        Theta = sindy_library(z_opt)

        dz = dz_func_vec(params_psi, x, dx, z0)

        dz_pred = dz_pred_vec(Theta, coeff_mask, sindy_coeff)
        x_rec = psi_vec(params_psi, z_opt)
        dx_rec = dx_network_vec(params_psi, z_opt, dz_pred)

        results = {}
        results['x_loss'] = jnp.mean(jnp.power(x - x_rec, 2))
        results['dx_loss'] = np.mean(jnp.power(dx - dx_rec, 2))
        results['dz_loss'] = jnp.mean(jnp.power(dz - dz_pred, 2))
        results['regul'] = jnp.mean(jnp.abs(sindy_coeff))

        dx_loss = jnp.multiply(hyper_params['eta1'], results['dx_loss'])
        dz_loss = jnp.multiply(hyper_params['eta2'], results['dz_loss'])
        regul = jnp.multiply(hyper_params['eta3'], results['regul'])

        results['loss'] = results['x_loss'] + dx_loss + dz_loss + regul
        results['x_rec'] = x_rec
        results['dx_rec'] = dx_rec
        results['z'] = z_opt
        results['dz'] = dz
        results['dz_pred'] = dz_pred
        return results
Ejemplo n.º 7
0
def RBFKernel(X, Z, params: dict):
    """
    RBF Kernel - K(X,Z)
    :param X: Array of data points (x_i).
    :param Z: Array of data points (z_i.
    :param params: Parameters of the RBF Kernel.
    :return: RBFKernel of X and Z.
    """

    assert "length" in params
    assert "var" in params

    lengthscale = params["length"]
    var = params["var"]

    scaled_X = X / lengthscale
    scaled_Z = Z / lengthscale
    X2 = np.sum(np.multiply(scaled_X, scaled_X), 1,
                keepdims=True)  # sum col of the matrix
    Z2 = np.sum(np.multiply(scaled_Z, scaled_Z), 1, keepdims=True)
    XZ = np.matmul(scaled_X, scaled_Z.T)

    K0 = X2 + Z2.T - 2 * XZ

    K = var * np.exp(-0.5 * K0)
    return K
Ejemplo n.º 8
0
    def __next__(self) -> types.LabelledData:
        mnist_batch = next(self._iterator)
        mnist_image = mnist_batch['data']
        # Colors are supported by turning off and on RGB channels.
        # Thus possible colors are
        # [black (excluded), red, green, yellow, blue, magenta, cyan, white]
        # color_id takes value from [1,8)
        color_id = np.random.randint(7, size=self._batch_size) + 1
        red_channel_bool = np.mod(color_id, 2)
        red_channel_bool = jnp.reshape(red_channel_bool, [-1, 1, 1, 1])
        blue_channel_bool = np.floor_divide(color_id, 4)
        blue_channel_bool = jnp.reshape(blue_channel_bool, [-1, 1, 1, 1])
        green_channel_bool = np.mod(np.floor_divide(color_id, 2), 2)
        green_channel_bool = jnp.reshape(green_channel_bool, [-1, 1, 1, 1])

        color_mnist_image = jnp.stack([
            jnp.multiply(red_channel_bool, mnist_image),
            jnp.multiply(blue_channel_bool, mnist_image),
            jnp.multiply(green_channel_bool, mnist_image)
        ],
                                      axis=3)
        color_mnist_image = jnp.reshape(color_mnist_image, [-1, 28, 28, 3])
        # Color id takes value [1, 8)
        # Although to make classification code easier `color` label attached to data
        # takes value in [0, 7) (by subtracting 1 from color id)
        return types.LabelledData(data=color_mnist_image,
                                  label=mnist_batch['label'])
Ejemplo n.º 9
0
        def sample_scan(params, tup, x):
            """ Perform single step update of the network """
            _, (update_W, update_U,
                update_b), (reset_W, reset_U,
                            reset_b), (out_W, out_U, out_b), (sm_W,
                                                              sm_b) = params
            hidden = tup[3]
            logP = tup[2]
            key = tup[0]
            inp = tup[1]

            update_gate = sigmoid(
                np.dot(inp, update_W) + np.dot(hidden, update_U) + update_b)
            reset_gate = sigmoid(
                np.dot(inp, reset_W) + np.dot(hidden, reset_U) + reset_b)
            output_gate = np.tanh(
                np.dot(inp, out_W) +
                np.dot(np.multiply(reset_gate, hidden), out_U) + out_b)
            output = np.multiply(update_gate, hidden) + np.multiply(
                1 - update_gate, output_gate)
            hidden = output
            logits = np.dot(hidden, sm_W) + sm_b

            key, subkey = random.split(key)

            samples = random.categorical(
                subkey, logits, axis=1, shape=None)  # sampling the conditional
            samples = one_hot(
                samples, sm_b.shape[0])  # convert to one hot encoded vector
            log_P_new = np.sum(samples * log_softmax(logits), axis=1)
            log_P_new = log_P_new + logP  # update the value of the logP of the sample

            return (key, samples, log_P_new, output), samples
Ejemplo n.º 10
0
def _momentum_angle(inverse_mass_matrix, r_left, r_right, r_sum):
    if isinstance(inverse_mass_matrix, dict):
        left_angle, right_angle = jnp.zeros(()), jnp.zeros(())
        for site_names, inverse_mm in inverse_mass_matrix.items():
            r_left_b = tuple(r_left[k] for k in site_names)
            r_right_b = tuple(r_right[k] for k in site_names)
            r_sum_b = tuple(r_sum[k] for k in site_names)
            left_a, right_a = _momentum_angle(inverse_mm, r_left_b, r_right_b,
                                              r_sum_b)
            left_angle = left_angle + left_a
            right_angle = right_angle + right_a
        return left_angle, right_angle

    r_left, _ = ravel_pytree(r_left)
    r_right, _ = ravel_pytree(r_right)
    r_sum, _ = ravel_pytree(r_sum)

    if inverse_mass_matrix.ndim == 2:
        v_left = jnp.matmul(inverse_mass_matrix, r_left)
        v_right = jnp.matmul(inverse_mass_matrix, r_right)
    elif inverse_mass_matrix.ndim == 1:
        v_left = jnp.multiply(inverse_mass_matrix, r_left)
        v_right = jnp.multiply(inverse_mass_matrix, r_right)
    else:
        raise ValueError("inverse_mass_matrix should have 1 or 2 dimensions.")

    # This implements dynamic termination criterion (ref [2], section A.4.2).
    r_sum = r_sum - (r_left + r_right) / 2
    return jnp.dot(v_left, r_sum), jnp.dot(v_right, r_sum)
Ejemplo n.º 11
0
    def weightmask_multiply(g):     
        g['change']  = ops.index_update(g['change'] ,ops.index[:,:],
                                        np.multiply(g['change'],weightfreeze_mask['change']))
        g['predict']  = ops.index_update(g['predict'] ,ops.index[:,:],
                                         np.multiply(g['predict'],weightfreeze_mask['predict']))

        return g  
Ejemplo n.º 12
0
def helmholtz(array,
              k,
              step=1.0,
              aspect_ratio=1.0,
              mask_f=make_mask,
              mask_f_dual=make_mask_dual):
    """Finite difference approx of the helmholtz operator in 2D."""
    if array.ndim == 2:
        kernel = np.array([[0, 1, 0], [1, -4 + np.sign(k) * k**2 * step**2, 1],
                           [0, 1, 0]])
    else:
        raise NotImplementedError
    mask = mask_f(array.shape[0], aspect_ratio)
    array_masked = np.multiply(array, mask)
    mask_dual = mask_f_dual(array.shape[0], aspect_ratio)
    arr2 = np.multiply(array, mask_dual)
    lhs = array_masked[np.newaxis, np.newaxis, Ellipsis]
    rhs = kernel[np.newaxis, np.newaxis, Ellipsis] / step**2
    result = jax.lax.conv(lhs,
                          rhs,
                          window_strides=(1, ) * array.ndim,
                          padding='SAME')
    squeezed = np.squeeze(result, axis=(0, 1))
    squeezed = np.multiply(squeezed, mask)
    return squeezed + arr2
Ejemplo n.º 13
0
    def to_quantized(self, x, *, dtype):
        """Quantizes the argument to the target format.

    integer: "upscales", rounds or floors and clips.
    floating-point: optionally upscales, then downcasts to target precision.

    Args:
      x: Argument to be quantized.
      dtype: Type of returned quantized value of x. If quantized x is an input
        to a matmul, we might be want to set it to jnp.int8. If quantized x is
        weights stored in memory, same applies. In fake_quant style we might
        prefer to set dtype=SCALE_DTYPE, since quantized x might get constant
        folded with rescale op (`from_quantized`). Please take a look at the
        comment on SCALE_DTYPE.

    Returns:
      Quantized value of x.
    """
        if isinstance(self._prec, _FloatQuant):
            if self._prec.is_scaled:
                x = jnp.multiply(x, self._scale).astype(x.dtype)
            fp_spec = self._prec.fp_spec
            return fp_cast.downcast_sat_ftz(
                x,
                fp_spec.exp_min,
                fp_spec.exp_max,
                fp_spec.sig_bits,
            )
        else:
            if self._symmetric:
                quantize = primitives.round_and_clip_to_signed_int
            else:
                quantize = primitives.floor_and_clip_to_unsigned_int
            scaled_x = jnp.multiply(x, self._scale)
            return quantize(scaled_x, prec=self._prec, dtype=dtype)
Ejemplo n.º 14
0
def troe_falloff_correction(
    T: float, lPr: np.ndarray, troe_coeffs: np.ndarray, troe_indices: np.ndarray
) -> np.ndarray:
    """
    modify rate constants use TROE falloff parameters
    returns: np.ndarray of F(T,P) 
    """
    troe_coeffs = troe_coeffs[troe_indices]
    F_cent = (
        np.multiply(
            np.subtract(1, troe_coeffs[:, 0]), np.exp(np.divide(-T, troe_coeffs[:, 3]))
        )
        + np.multiply(troe_coeffs[:, 0], np.exp(np.divide(-T, troe_coeffs[:, 1])))
        + np.exp(np.divide(-troe_coeffs[:, 2], T))
    )
    lF_cent = np.log10(F_cent)
    C = np.subtract(-0.4, np.multiply(0.67, lF_cent))
    N = np.subtract(0.75, np.multiply(1.27, lF_cent))
    f1_numerator = lPr + C
    f1_denominator_1 = N
    f1_denominator_2 = np.multiply(0.14, f1_numerator)
    f1 = np.divide(f1_numerator, np.subtract(f1_denominator_1, f1_denominator_2))
    F = np.power(10.0, np.divide(lF_cent, (1.0 + np.square(f1))))
    # F = 10**(lF_cent / (1. + f1**2.))
    return F
Ejemplo n.º 15
0
def huber_loss(labels, predictions, weights=1.0, delta=1.0):
    """Adds a Huber Loss term to the training procedure.

  For each value x in `error=labels-predictions`, the following is calculated:
  ```
    0.5 * x^2                  if |x| <= d
    0.5 * d^2 + d * (|x| - d)  if |x| > d
  ```
  where d is `delta`.
  See: https://en.wikipedia.org/wiki/Huber_loss
  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  `[batch_size]`, then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.
  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    delta: `float`, the point where the huber loss function
      changes from a quadratic to linear.
  Returns:
    Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
    shape as `labels`; otherwise, it is scalar.
  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.  Also if `labels` or
     `predictions` is None.
  """
    if labels is None:
        raise ValueError('labels must not be None.')
    if predictions is None:
        raise ValueError('predictions must not be None.')
    predictions = predictions.astype(jnp.float32)
    labels = labels.astype(jnp.float32)
    # TODO(deveci): check for shape compabitibilty
    # predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    error = jnp.subtract(predictions, labels)
    abs_error = jnp.abs(error)
    quadratic = jnp.minimum(abs_error, delta)
    # The following expression is the same in value as
    # tf.maximum(abs_error - delta, 0), but importantly the gradient for the
    # expression when abs_error == delta is 0 (for tf.maximum it would be 1).
    # This is necessary to avoid doubling the gradient, since there is already a
    # nonzero contribution to the gradient from the quadratic term.
    linear = jnp.subtract(abs_error, quadratic)
    losses = jnp.add(jnp.multiply(0.5, jnp.multiply(quadratic, quadratic)),
                     jnp.multiply(delta, linear))

    input_dtype = losses.dtype
    losses = losses.astype(jnp.float32)
    weights = jnp.asarray(weights, jnp.float32)
    weighted_losses = jnp.multiply(losses, weights)
    loss = weighted_losses.astype(input_dtype)
    return loss
Ejemplo n.º 16
0
 def grad_fn(grad_outputs, s_time_list, time, tau_m, gamma, Vth):
     return jnp.multiply(
         grad_outputs, 1 / Vth * (1 + jnp.multiply(
             1 / gamma,
             jnp.sum(
                 jnp.multiply(
                     -1 / tau_m,
                     jnp.exp((-1 / tau_m) * (time - s_time_list)))))))
Ejemplo n.º 17
0
def simple_energy(conf, charge_params, exclusion_idxs, charge_scales, cutoff):
    """
    Numerically stable implementation of the pairwise term:
    
    eij = qi*qj/dij

    """

    box = None
    # charges = params[param_idxs]
    charges = charge_params
    qi = np.expand_dims(charges, 0)  # (1, N)
    qj = np.expand_dims(charges, 1)  # (N, 1)
    qij = np.multiply(qi, qj)
    ri = np.expand_dims(conf, 0)
    rj = np.expand_dims(conf, 1)

    assert box is None

    dij = distance(ri, rj, box)

    # (ytz): trick used to avoid nans in the diagonal due to the 1/dij term.
    keep_mask = 1 - np.eye(conf.shape[0])
    qij = np.where(keep_mask, qij, np.zeros_like(qij))
    dij = np.where(keep_mask, dij, np.zeros_like(dij))
    eij = np.where(keep_mask, qij / dij,
                   np.zeros_like(dij))  # zero out diagonals

    # print(dij)

    if cutoff is not None:
        # sw = switch_fn(dij, cutoff)
        # eij = eij*sw
        eij = np.where(dij > cutoff, np.zeros_like(eij), eij)

    src_idxs = exclusion_idxs[:, 0]
    dst_idxs = exclusion_idxs[:, 1]
    ri = conf[src_idxs]
    rj = conf[dst_idxs]
    dij = distance(ri, rj, box)

    qi = charges[src_idxs]
    qj = charges[dst_idxs]
    qij = np.multiply(qi, qj)

    scale_ij = charge_scales
    eij_exc = scale_ij * qij / dij

    if cutoff is not None:
        # sw = switch_fn(dij, cutoff)
        # eij_exc = eij_exc*sw
        eij_exc = np.where(dij > cutoff, np.zeros_like(eij_exc), eij_exc)
        eij_exc = np.where(src_idxs == dst_idxs, np.zeros_like(eij_exc),
                           eij_exc)

    return np.sum(eij / 2) - np.sum(eij_exc)
Ejemplo n.º 18
0
    def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
        if not inputs.shape:
            raise ValueError("Input must not be scalar.")

        input_size = self.input_size = inputs.shape[-1]
        output_size = self.output_size
        dtype = inputs.dtype

        w_init = self.w_init
        if w_init is None:
            stddev = 1. / np.sqrt(self.input_size)
            w_init = hk.initializers.TruncatedNormal(stddev=stddev)
        w = hk.get_parameter("w", [input_size, output_size],
                             dtype,
                             init=w_init)
        out = jnp.dot(inputs, w)

        if self.with_bias:
            b = hk.get_parameter("b", [self.output_size],
                                 dtype,
                                 init=self.b_init)
            b = jnp.broadcast_to(b, out.shape)
            out = out + b

        w_mu_init = self.w_mu_init
        if w_mu_init is None:
            stddev = 1. / np.sqrt(self.input_size)
            w_mu_init = hk.initializers.TruncatedNormal(stddev=stddev)
        w_mu = hk.get_parameter("w_mu", [input_size, output_size],
                                dtype,
                                init=w_mu_init)
        w_sigma_init = self.w_sigma_init
        if w_sigma_init is None:
            stddev = 1. / np.sqrt(self.input_size)
            w_sigma_init = hk.initializers.TruncatedNormal(stddev=stddev)
        w_sigma = hk.get_parameter("w_sigma", [input_size, output_size],
                                   dtype,
                                   init=w_sigma_init)
        w_noise = jax.random.normal(next(self.rng), w_sigma.shape)
        out_noisy = jnp.dot(inputs,
                            jnp.add(w_mu, jnp.multiply(w_sigma, w_noise)))

        if self.with_bias:
            b_mu = hk.get_parameter("b_mu", [self.output_size],
                                    dtype,
                                    init=self.b_mu_init)
            b_sigma = hk.get_parameter("b_sigma", [self.output_size],
                                       dtype,
                                       init=self.b_sigma_init)
            b_mu = jnp.broadcast_to(b_mu, out.shape)
            b_sigma = jnp.broadcast_to(b_sigma, out.shape)
            b_noise = jax.random.normal(next(self.rng), b_sigma.shape)
            out_noisy = out_noisy + jnp.add(b_mu, jnp.multiply(
                b_sigma, b_noise))

        return out + out_noisy
Ejemplo n.º 19
0
    def backward(self, time, spike_list, weights, e_gradient):
        gamma = spike_list[0]
        t_Tk_divby_tau_m = jnp.divide(
            jnp.subtract(time, spike_list[1]), -self.tau_m)
        f_prime_t = jnp.multiply(jnp.exp(t_Tk_divby_tau_m), (-1 / self.tau_m))
        aLIFnet = jnp.multiply(
            1 / self.Vth, (1 + jnp.multiply(jnp.divide(1, gamma), f_prime_t)))
        d_w = jnp.matmul(weights, e_gradient)

        return jnp.multiply(d_w, aLIFnet)
Ejemplo n.º 20
0
def single_matrix_correlators(n, alpha, g, t1, t2):
    """This is a the function the we want to abstract, so we can explore more
  interesting models.

  This function is jitted for all but the final arg.

  """
    s1 = np.multiply(t1, alpha)
    s2 = np.multiply(t2, np.square(alpha))
    xs = initial_state(s1, s2, n)
    return correlators(g, alpha, xs)
Ejemplo n.º 21
0
 def loss_fn(model):
     action_probabilities = model(props[0])
     probabilities = gather(action_probabilities, props[2])
     log_probabilities = -jnp.log(probabilities)
     alpha = 0.4  # Entropy temperature
     entropies = -jnp.sum(jnp.multiply(action_probabilities,
                                       jnp.log(action_probabilities)),
                          axis=1) * alpha
     advantages_with_entropies = jnp.add(advantages, entropies)
     return jnp.mean(
         jnp.multiply(log_probabilities, advantages_with_entropies))
Ejemplo n.º 22
0
def H_ising_1(grid: np.array) -> np.float32:
    """Calculates Hamiltonian for an Ising model with first-order neighbors

    :param grid: grid with spins
    :type grid: np.array
    :return: value of Hamiltonian
    :rtype: np.float32
    """
    x = np.roll(grid, 1, axis=1)
    y = np.roll(grid, 1, axis=0)
    x = np.sum(np.multiply(grid, x))  # Ising
    y = np.sum(np.multiply(grid, y))
    return -(x+y).astype(np.float32)
Ejemplo n.º 23
0
def is_u_turn(
    initial_position: np.DeviceArray,
    position: np.DeviceArray,
    inverse_mass_matrix: np.DeviceArray,
    momentum: np.DeviceArray,
) -> bool:
    """Detect when the trajectory starts turning back towards the point
    where it started.
    """
    v = np.multiply(inverse_mass_matrix, momentum)
    position_vec = position - initial_position
    projection = np.multiply(position_vec, v)
    return np.where(projection < 0, True, False)
Ejemplo n.º 24
0
    def __init__(self,
                 sampler,
                 snrTol=2,
                 svdTol=1e-14,
                 makeReal='imag',
                 rhsPrefactor=1.j,
                 diagonalShift=0.):

        self.sampler = sampler
        self.snrTol = snrTol
        self.svdTol = svdTol
        self.diagonalShift = diagonalShift
        self.rhsPrefactor = rhsPrefactor

        self.makeReal = realFun
        if makeReal == 'imag':
            self.makeReal = imagFun

        if global_defs.usePmap:
            self.subtract_helper_Eloc = global_defs.pmap_for_my_devices(
                lambda x, y: x - y, in_axes=(0, None))
            self.subtract_helper_grad = global_defs.pmap_for_my_devices(
                lambda x, y: x - y, in_axes=(0, None))
            self.get_EO = global_defs.pmap_for_my_devices(
                lambda f, Eloc, grad: -f * jnp.multiply(
                    Eloc[:, None], jnp.conj(grad)),
                in_axes=(None, 0, 0, 0),
                static_broadcasted_argnums=(0))
            self.get_EO_p = global_defs.pmap_for_my_devices(
                lambda f, p, Eloc, grad: -f * jnp.multiply(
                    (p * Eloc)[:, None], jnp.conj(grad)),
                in_axes=(None, 0, 0, 0),
                static_broadcasted_argnums=(0))
            self.transform_EO = global_defs.pmap_for_my_devices(
                lambda eo, v: jnp.matmul(eo, jnp.conj(v)), in_axes=(0, None))
        else:
            self.subtract_helper_Eloc = global_defs.jit_for_my_device(
                lambda x, y: x - y)
            self.subtract_helper_grad = global_defs.jit_for_my_device(
                lambda x, y: x - y)
            self.get_EO = global_defs.jit_for_my_device(
                lambda f, Eloc, grad: -f * jnp.multiply(
                    Eloc[:, None], jnp.conj(grad)),
                static_argnums=(0))
            self.get_EO_p = global_defs.jit_for_my_device(
                lambda f, p, Eloc, grad: -f * jnp.multiply(
                    (p * Eloc)[:, None], jnp.conj(grad)),
                static_argnums=(0))
            self.transform_EO = global_defs.jit_for_my_device(
                lambda eo, v: jnp.matmul(eo, jnp.conj(v)))
Ejemplo n.º 25
0
def signed_torsion_angle(ci, cj, ck, cl):
    """
    Batch compute the signed angle of a torsion angle.  The torsion angle
    between two planes should be periodic but not necessarily symmetric.

    Parameters
    ----------
    ci: shape [num_torsions, 3] np.array
        coordinates of the 1st atom in the 1-4 torsion angle

    cj: shape [num_torsions, 3] np.array
        coordinates of the 2nd atom in the 1-4 torsion angle

    ck: shape [num_torsions, 3] np.array
        coordinates of the 3rd atom in the 1-4 torsion angle

    cl: shape [num_torsions, 3] np.array
        coordinates of the 4th atom in the 1-4 torsion angle

    Returns
    -------
    shape [num_torsions,] np.array
        array of torsion angles.

    """

    # Taken from the wikipedia arctan2 implementation:
    # https://en.wikipedia.org/wiki/Dihedral_angle

    # We use an identical but numerically stable arctan2
    # implementation as opposed to the OpenMM energy function to
    # avoid asingularity when the angle is zero.

    rij = delta_r(cj, ci)
    rkj = delta_r(cj, ck)
    rkl = delta_r(cl, ck)

    n1 = np.cross(rij, rkj)
    n2 = np.cross(rkj, rkl)

    lhs = np.linalg.norm(n1, axis=-1)
    rhs = np.linalg.norm(n2, axis=-1)
    bot = lhs * rhs

    y = np.sum(np.multiply(np.cross(n1, n2),
                           rkj / np.linalg.norm(rkj, axis=-1, keepdims=True)),
               axis=-1)
    x = np.sum(np.multiply(n1, n2), -1)

    return np.arctan2(y, x)
Ejemplo n.º 26
0
def MSAWeight_PB(msa):
    gap_idx = msa.abc.charmap['-']
    q = msa.abc.q
    ax = msa.ax
    (N, L) = ax.shape

    ## step 1: get counts:

    c = np.sum(msa.ax_1hot, axis=0)

    # set gap counts to 0
    c = index_update(c, index[:, gap_idx], 0)

    # get N x L array with count value for corresponding residue in alignment
    # first, get  N x L "column id" array (convenient for vmap)
    # col_id[n,i] = i
    col_id = np.int16(np.tensordot(np.ones(N), np.arange(L), axes=0))
    # ax_c[n, i] = c[i, ax[n,i]]
    ax_c = Get_Henikoff_Counts_Residue(col_id, ax, c)

    ## step 2: get number of unique characters in each column
    r = np.float32(np.sum(np.array(c > 0), axis=1))

    # transform r from Lx1 array to NxL array, where r2[n,i] = r[i])
    # will allow for easy elementwise operations with ax_c
    r2 = np.tensordot(np.ones(N), r, axes=0)

    ## step 3: get ungapped seq lengths
    nongap = np.array(ax != gap_idx)
    l = np.float32(np.sum(nongap, axis=1))

    ## step 4: calculate unnormalized weights
    # get array of main terms in Henikoff sum
    #wgt_un[n,i] = 1 / (r_[i] * c[i, ax[n,i] ])
    wgt_un = np.reciprocal(np.multiply(ax_c, r2))

    # set all terms involving  gap to zero
    wgt_un = np.nan_to_num(np.multiply(wgt_un, nongap))

    # sum accoss all positions to get prelim unnormalized weight for each sequence
    wgt_un = np.sum(wgt_un, axis=1)

    # divide by gapless sequence length
    wgt_un = np.divide(wgt_un, l)

    # step 4: Normalize sequence wieghts
    wgt = (wgt_un * np.float32(N)) / np.sum(wgt_un)
    msa.wgt = wgt

    return
Ejemplo n.º 27
0
    def __call__(self, x):
        def sample_noise(rng_input, shape):
            noise = jax.random.normal(rng_input, shape)
            return noise

        def f(x):
            return jnp.multiply(jnp.sign(x), jnp.power(jnp.abs(x), 0.5))

        # Initializer of \mu and \sigma

        def mu_init(key, shape, rng):
            low = -1 * 1 / jnp.power(x.shape[-1], 0.5)
            high = 1 * 1 / jnp.power(x.shape[-1], 0.5)
            return random.uniform(rng,
                                  shape=shape,
                                  dtype=jnp.float32,
                                  minval=low,
                                  maxval=high)

        def sigma_init(key, shape, dtype=jnp.float32):
            return jnp.ones(shape, dtype) * (0.1 / jnp.sqrt(x.shape[-1]))

        rng, rng2, rng3, rng4, rng5 = jax.random.split(self.rng, 5)

        if prn_inf["count"] == 0:
            prn_inf["rng2_"] = rng2
            prn_inf["rng3_"] = rng3
            prn_inf["count"] = prn_inf["count"] + 1

        # Sample noise from gaussian
        p = sample_noise(prn_inf["rng2_"], [x.shape[-1], 1])
        q = sample_noise(prn_inf["rng3_"], [1, self.features])
        f_p = f(p)
        f_q = f(q)

        w_epsilon = f_p * f_q
        b_epsilon = jnp.squeeze(f_q)
        w_mu = self.param('kernel', mu_init, (x.shape[-1], self.features),
                          rng4)
        w_sigma = self.param('kernell', sigma_init,
                             (x.shape[-1], self.features))
        w = w_mu + jnp.multiply(w_sigma, w_epsilon)
        ret = jnp.matmul(x, w)

        b_mu = self.param('bias', mu_init, (self.features, ), rng5)
        b_sigma = self.param('biass', sigma_init, (self.features, ))
        b = b_mu + jnp.multiply(b_sigma, b_epsilon)

        return jnp.where(self.bias_in, ret + b, ret)
Ejemplo n.º 28
0
    def body_fun(i, val):
        s_q, s_u = val

        t_Q = q_operationA + jnp.multiply(q_operationB, s_q)
        t_U = u_operationA + jnp.multiply(u_operationB, s_u)
        # in E, B representation
        t_E, t_B = ks93(t_Q, t_U)
        s_E = (scov_ft_E / (scov_ft_E + tcov_ft)) * jnp.fft.fft2(t_E)
        s_B = (scov_ft_B / (scov_ft_B + tcov_ft)) * jnp.fft.fft2(t_B)
        s_E = jnp.fft.ifft2(s_E)
        s_B = jnp.fft.ifft2(s_B)
        # in Q, U representation
        s_q, s_u = ks93inv(s_E, s_B)

        return (s_q, s_u)
Ejemplo n.º 29
0
def _V_Cycle(x, f, num_cycle, shapebc='R', k=0, aspect_ratio=1.0):
    # https://en.wikipedia.org/wiki/Multigrid_method
    # Pre-Smoothing
    # bc are not included

    h = 1.0 / (x.shape[0] + 1)
    if shapebc == 'R':
        mask_f = equations.make_mask
        mask_f_dual = equations.make_mask_dual
    elif shapebc == 'L':
        mask_f = equations.make_mask_L
        mask_f_dual = equations.make_mask_L_dual
    x = smoothing_helmholtz(f, h, x, k, aspect_ratio, shapebc)

    # Compute Residual Errors

    # no bc here because we assume they are 0
    r = f - equations.helmholtz(x,
                                k,
                                step=h,
                                aspect_ratio=aspect_ratio,
                                mask_f=mask_f,
                                mask_f_dual=mask_f_dual)
    # Restriction from h to 2h
    rhs = restriction(r)
    eps = np.zeros(rhs.shape)
    mask = mask_f(eps.shape[0], aspect_ratio)
    eps = np.multiply(eps, mask)
    # stop recursion after 3 cycles
    if num_cycle == 3:

        eps = smoothing_helmholtz(rhs, 2 * h, eps, k, aspect_ratio, shapebc)
    else:
        eps = _V_Cycle(eps,
                       rhs,
                       num_cycle + 1,
                       shapebc,
                       k=k,
                       aspect_ratio=aspect_ratio)

    # Prolongation and Correction
    x = x + prolongation(eps)
    mask = mask_f(x.shape[0], aspect_ratio)
    x = np.multiply(x, mask)
    # Post-Smoothing
    x = smoothing_helmholtz(f, h, x, k, aspect_ratio, shapebc)

    return x
Ejemplo n.º 30
0
def _is_turning(inverse_mass_matrix, r_left, r_right, r_sum):
    r_left, _ = ravel_pytree(r_left)
    r_right, _ = ravel_pytree(r_right)
    r_sum, _ = ravel_pytree(r_sum)

    if inverse_mass_matrix.ndim == 2:
        v_left = np.matmul(inverse_mass_matrix, r_left)
        v_right = np.matmul(inverse_mass_matrix, r_right)
    elif inverse_mass_matrix.ndim == 1:
        v_left = np.multiply(inverse_mass_matrix, r_left)
        v_right = np.multiply(inverse_mass_matrix, r_right)

    # This implements dynamic termination criterion (ref [2], section A.4.2).
    turning_at_left = np.dot(v_left, r_sum - r_left) <= 0
    turning_at_right = np.dot(v_right, r_sum - r_right) <= 0
    return turning_at_left | turning_at_right