Beispiel #1
0
def make_event(xarr):
    """Generate event kinematics"""
    shat, jac, x1, x2 = get_x1x2(xarr)

    mV = tf.sqrt(shat * x1 * x2)
    mV2 = mV*mV
    ecmo2 = mV/2
    zeros = tf.zeros_like(ecmo2, dtype=DTYPE)

    p0 = tf.stack([ecmo2, zeros, zeros, ecmo2])
    p1 = tf.stack([ecmo2, zeros, zeros,-ecmo2])

    pV = p0 + p1
    YV = 0.5 * tf.math.log(tf.abs((pV[0] + pV[3])/(pV[0] - pV[3])))
    pVt2 = tf.square(pV[1]) + tf.square(pV[2])
    phi = 2 * np.pi * xarr[:, 3]
    ptmax = 0.5 * mV2 / (tf.sqrt(mV2 + pVt2) - (pV[1]*tf.cos(phi) + pV[2]*tf.sin(phi)))
    pta = ptmax * xarr[:, 2]
    pt = tf.stack([zeros, pta*tf.cos(phi), pta*tf.sin(phi), zeros])
    Delta = (mV2 + 2 * (pV[1]*pt[1] + pV[2]*pt[2]))/2.0/pta/tf.sqrt(mV2 + pVt2)
    y = YV - tf.acosh(Delta)
    kallenF = 2.0 * ptmax/tf.sqrt(mV2 + pVt2)/tf.abs(tf.sinh(YV-y))

    p2 = tf.stack([pta*tf.cosh(y), pta*tf.cos(phi), pta*tf.sin(phi),pta*tf.sinh(y)])
    p3 = pV - p2

    psw = 1 / (8*np.pi)* kallenF # psw
    psw *= jac # jac for tau, y
    flux = 1 / (2 * mV2) # flux

    return psw, flux, p0, p1, p2, p3, x1, x2
Beispiel #2
0
def sample_gauss_pd(shape, oversample=5, radius=0.85):
    # rejection sample in Poincaré disk
    d = shape[1]
    n_samples = tf.reduce_prod(shape) * oversample
    phi = tf.random_uniform([n_samples]) * np.pi
    p = tf.random_uniform([n_samples])
    r = tf.acosh(1 + p *
                 (tf.cosh(radius - 1e-5) - 1))  # support (-3sigma, 3sigma)
    unif_samples = tf.stack(
        [tf.sinh(r) * tf.cos(phi),
         tf.sinh(r) * tf.sin(phi)], axis=1)

    # zero mean, unit sigma in half plane
    mean = tf.constant(np.array([[0.0, 0.0]]), tf.float32)
    sigma = 1.0

    p_samples = gauss_prob_pd(unif_samples, mean, sigma)
    # accept in proportion to highest
    max_value = tf.reduce_max(p_samples)
    accepted = tf.squeeze(
        tf.where(tf.random_uniform([n_samples]) < (p_samples / max_value)), 1)
    # select the samples - make sure it's enough
    u = tf.boolean_mask(unif_samples[:, 0], accepted)
    v = tf.boolean_mask(unif_samples[:, 1], accepted)
    # transform samples using cayley mapping z = (w-i)/(w+i) - project to unit disk
    disk_samples = tf.stack([u, v], axis=1)
    idx = tf.cast(tf.range(tf.reduce_prod(shape) / 2), tf.int32)
    return tf.reshape(tf.gather(disk_samples, idx), shape)
Beispiel #3
0
def displaced_squeezed(alpha, r, phi, D, pure=True, batched=False, eps=1e-12):
    """creates a single mode input displaced squeezed state"""
    alpha = tf.cast(alpha, def_type)
    r = tf.cast(
        r, def_type
    ) + eps  # to prevent nans if r==0, we add an epsilon (default is miniscule)
    phi = tf.cast(phi, def_type)

    phase = tf.exp(1j * phi)
    sinh = tf.sinh(r)
    cosh = tf.cosh(r)
    tanh = tf.tanh(r)

    # create Hermite polynomials
    gamma = alpha * cosh + tf.conj(alpha) * phase * sinh
    hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r))

    prefactor = tf.expand_dims(
        tf.exp(-0.5 * alpha * tf.conj(alpha) -
               0.5 * tf.conj(alpha)**2 * phase * tanh), -1)
    coeff = tf.stack([
        _numer_safe_power(0.5 * phase * tanh, n / 2.) /
        tf.sqrt(factorial(n) * cosh) for n in range(D)
    ],
                     axis=-1)
    hermite_terms = tf.stack(
        [tf.cast(H(n, hermite_arg), def_type) for n in range(D)], axis=-1)
    squeezed_coh = prefactor * coeff * hermite_terms

    if not pure:
        squeezed_coh = mixed(squeezed_coh, batched)
    return squeezed_coh
def get_infos2Convection_5D(input_dim=1,
                            out_dim=1,
                            mesh_number=2,
                            intervalL=0.0,
                            intervalR=1.0,
                            equa_name=None):
    if equa_name == 'Boltzmann1':
        lam = 2
        mu = 30
        f = lambda x, y: (lam * lam + mu * mu) * (tf.sin(mu * x) + tf.sin(mu *
                                                                          y))
        A_eps = lambda x, y: 1.0 * tf.ones_like(x)
        kappa = lambda x, y: lam * lam * tf.ones_like(x)
        u = lambda x, y: -1.0 * (np.sin(mu) / np.sinh(lam)) * tf.sinh(
            lam * x) + tf.sin(mu * x) - 1.0 * (np.sin(mu) / np.sinh(
                lam)) * tf.sinh(lam * y) + tf.sin(mu * y)
        u_00 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_01 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_10 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_11 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_20 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_21 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_30 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_31 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_40 = lambda x, y, z, s, t: tf.zeros_like(x)
        u_41 = lambda x, y, z, s, t: tf.zeros_like(x)

    return A_eps, kappa, u, f, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
def get_infos2Convection_1D(in_dim=1,
                            out_dim=1,
                            region_a=0.0,
                            region_b=1.0,
                            index2p=2,
                            eps=0.01,
                            eqs_name=None):
    if eqs_name == 'Boltzmann1':
        llam = 20
        mu = 50
        f = lambda x: (llam * llam + mu * mu) * tf.sin(x)
        Aeps = lambda x: 1.0 * tf.ones_like(x)
        kappa = lambda x: llam * llam * tf.ones_like(x)
        utrue = lambda x: -1.0 * (np.sin(mu) / np.sinh(llam)) * tf.sinh(
            llam * x) + tf.sin(mu * x)
        ul = lambda x: tf.zeros_like(x)
        ur = lambda x: tf.zeros_like(x)
        return Aeps, kappa, utrue, ul, ur, f
    elif eqs_name == 'Boltzmann2':
        kappa = lambda x: tf.ones_like(x)
        Aeps = lambda x: 1.0 / (2 + tf.cos(2 * np.pi * x / eps))

        utrue = lambda x: x - tf.square(x) + (eps / (4 * np.pi)) * tf.sin(
            np.pi * 2 * x / eps)

        ul = lambda x: tf.zeros_like(x)

        ur = lambda x: tf.zeros_like(x)

        if index2p == 2:
            f = lambda x: 2.0/(2 + tf.cos(2 * np.pi * x / eps)) + (4*np.pi*x/eps)*tf.sin(np.pi * 2 * x / eps)/\
                          ((2 + tf.cos(2 * np.pi * x / eps))*(2 + tf.cos(2 * np.pi * x / eps))) + x - tf.square(x) \
                          + (eps / (4*np.pi)) * tf.sin(np.pi * 2 * x / eps)

        return Aeps, kappa, utrue, ul, ur, f
Beispiel #6
0
def nac_complex_single_layer(x_in, out_units, epsilon = 0.000001):

    '''
    :param x_in: input feature vector
    :param out_units: number of output units of the cell
    :param epsilon: small value to avoid log(0) in the output result
    :return: associated weight matrix and output tensor
    '''

    in_shape = x_in.shape[1]

    W_hat = tf.get_variable(shape=[in_shape, out_units],
                            initializer= tf.initializers.random_uniform(minval=-2, maxval=2),
                            trainable=True, name="W_hat2")

    M_hat = tf.get_variable(shape=[in_shape, out_units],
                            initializer=tf.initializers.random_uniform(minval=-2, maxval=2),
                            trainable=True, name="M_hat2")

    W = tf.nn.tanh(W_hat) * tf.nn.sigmoid(M_hat)

    # Express Input feature in log space to learn complex functions
    x_modified = tf.asinh(x_in)

    m = tf.sinh( tf.matmul(x_modified, W) )

    return m, W
Beispiel #7
0
def get_unary_op(x, option):
    unary_ops = {
        'log': tf.log(x),
        'exp': tf.exp(x),
        'neg': tf.negative(x),
        'ceil': tf.ceil(x),
        'floor': tf.floor(x),
        'log1p': tf.log1p(x),
        'sqrt': tf.sqrt(x),
        'square': tf.square(x),
        'abs': tf.abs(x),
        'relu': tf.nn.relu(x),
        'elu': tf.nn.elu(x),
        'selu': tf.nn.selu(x),
        'leakyRelu': tf.nn.leaky_relu(x),
        'sigmoid': tf.sigmoid(x),
        'sin': tf.sin(x),
        'cos': tf.cos(x),
        'tan': tf.tan(x),
        'asin': tf.asin(x),
        'acos': tf.acos(x),
        'atan': tf.atan(x),
        'sinh': tf.sinh(x),
        'cosh': tf.cosh(x),
        'tanh': tf.tanh(x),
    }

    assert option in unary_ops, 'Unary option not found: ' + option
    return unary_ops[option]
Beispiel #8
0
def squeezer_matrix(r, theta, D, batched=False):
    """creates the single mode squeeze matrix"""
    r = tf.cast(r, tf.float64)
    if not batched:
        r = tf.expand_dims(r, 0)  # introduce artificial batch dimension
    r = tf.reshape(r, [-1, 1, 1, 1])
    theta = tf.cast(theta, def_type)
    theta = tf.reshape(theta, [-1, 1, 1, 1])

    rng = np.arange(D)
    n = np.reshape(rng, [-1, D, 1, 1])
    m = np.reshape(rng, [-1, 1, D, 1])
    k = np.reshape(rng, [-1, 1, 1, D])

    phase = tf.exp(1j * theta * (n - m) / 2)
    signs = squeeze_parity(D).reshape([1, D, 1, D])
    mask = np.logical_and((m + n) % 2 == 0, k <= np.minimum(
        m, n))  # kills off terms where the sum index k goes past min(m,n)
    k_terms = signs * \
                        tf.pow(tf.sinh(r) / 2, mask * (n + m - 2 * k) / 2) * mask / \
                        tf.pow(tf.cosh(r), (n + m + 1) / 2) * \
                        tf.exp(0.5 * tf.lgamma(tf.cast(m + 1, tf.float64)) + \
                               0.5 * tf.lgamma(tf.cast(n + 1, tf.float64)) - \
                               tf.lgamma(tf.cast(k + 1, tf.float64)) -
                               tf.lgamma(tf.cast((m - k) / 2 + 1, tf.float64)) - \
                               tf.lgamma(tf.cast((n - k) / 2 + 1, tf.float64))
                              )
    output = tf.reduce_sum(phase * tf.cast(k_terms, def_type), axis=-1)

    if not batched:
        # remove extra batch dimension
        output = tf.squeeze(output, 0)
    return output
Beispiel #9
0
	def exponential_mapping( self, p, x ):

		def normalise_to_hyperboloid(x):
			return x / K.sqrt( -minkowski_dot(x, x) )

		norm_x = K.sqrt( K.maximum(np.float64(0.), minkowski_dot(x, x) ) ) 
		####################################################
		exp_map_p = tf.cosh(norm_x) * p
		
		idx = tf.cast( tf.where(norm_x > K.cast(0., K.floatx()), )[:,0], tf.int64)
		non_zero_norm = tf.gather(norm_x, idx)
		z = tf.gather(x, idx) / non_zero_norm

		updates = tf.sinh(non_zero_norm) * z
		dense_shape = tf.cast( tf.shape(p), tf.int64)
		exp_map_x = tf.scatter_nd(indices=idx[:,None], updates=updates, shape=dense_shape)
		
		exp_map = exp_map_p + exp_map_x 
		#####################################################
		# z = x / K.maximum(norm_x, K.epsilon()) # unit norm 
		# exp_map = tf.cosh(norm_x) * p + tf.sinh(norm_x) * z
		#####################################################
		exp_map = normalise_to_hyperboloid(exp_map) # account for floating point imprecision

		return exp_map
Beispiel #10
0
def displaced_squeezed(r_d,
                       phi_d,
                       r_s,
                       phi_s,
                       cutoff,
                       pure=True,
                       batched=False,
                       eps=1e-12,
                       dtype=tf.complex64):
    """creates a single mode input displaced squeezed state"""
    alpha = tf.cast(r_d, dtype) * tf.exp(1j * tf.cast(phi_d, dtype))
    r_s = (
        tf.cast(r_s, dtype) + eps
    )  # to prevent nans if r==0, we add an epsilon (default is miniscule)
    phi_s = tf.cast(phi_s, dtype)

    phase = tf.exp(1j * phi_s)
    sinh = tf.sinh(r_s)
    cosh = tf.cosh(r_s)
    tanh = tf.tanh(r_s)

    # create Hermite polynomials
    gamma = alpha * cosh + tf.math.conj(alpha) * phase * sinh
    hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r_s))

    prefactor = tf.expand_dims(
        tf.exp(-0.5 * alpha * tf.math.conj(alpha) -
               0.5 * tf.math.conj(alpha)**2 * phase * tanh),
        -1,
    )
    coeff = tf.stack(
        [
            _numer_safe_power(0.5 * phase * tanh, n / 2.0, dtype) /
            tf.sqrt(factorial(n) * cosh) for n in range(cutoff)
        ],
        axis=-1,
    )
    hermite_terms = tf.stack(
        [tf.cast(H(n, hermite_arg, dtype), dtype) for n in range(cutoff)],
        axis=-1)
    squeezed_coh = prefactor * coeff * hermite_terms

    if not pure:
        squeezed_coh = mix(squeezed_coh, batched)
    return squeezed_coh
Beispiel #11
0
    def max_entropy_loss(y, mu, k, T):
        """Compute the maximum entropy loss.

        Args:
            y: Ground-truth fiber direction vectors.
            mu: Predicted mean vectors.
            k: Concentration parameters.
            T: Temperature parameter.

        Returns:
            loss: The maximum entropy loss.

        """
        dot_products = tf.reduce_sum(tf.multiply(mu, y), axis=1)
        cost = -tf.multiply(
            (tf.cosh(k) / tf.sinh(k) - tf.reciprocal(k)), dot_products)
        entropy = 1 - k / tf.tanh(k) - tf.log(k / (4 * np.pi * tf.sinh(k)))
        loss = cost - T * entropy
        loss = tf.reduce_mean(loss)
        return loss
def exp_map_0(x):
    r = norm(x)
    r = K.maximum(r, K.epsilon())

    # unit norm
    x = x / r

    x = tf.sinh(r) * x
    t = tf.cosh(r)

    return K.concatenate([x, t], axis=-1)
Beispiel #13
0
    def __call__(self, input, reuse=False, is_training=False):

        with tf.variable_scope(self.name):
            # setup layer
            x = input
            input_size = input.shape[1].value

            g = tf.get_variable(
                "_w_g_", [input_size, self.output_size],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=.01),
                trainable=is_training)

            wt = tf.get_variable(
                "_w_wt_", [input_size, self.output_size],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=.01),
                trainable=is_training)

            mt = tf.get_variable(
                "_w_mt_", [input_size, self.output_size],
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(stddev=.01),
                trainable=is_training)

            with tf.variable_scope('nac_w'):
                w = tf.multiply(tf.tanh(wt), tf.sigmoid(mt))

            with tf.variable_scope('simple_nac'):
                a = tf.matmul(x, w)

            with tf.variable_scope('complex_nac'):
                # m  = tf.exp( self._mult_div_nac( tf.log( tf.abs( x ) + 1e-10 ) ) )
                m = tf.sinh(tf.matmul(tf.asinh(x), w))

            with tf.variable_scope('math_gate'):
                gc = tf.sigmoid(tf.matmul(x, g))

            with tf.variable_scope('result'):
                x = (gc * a) + ((1 - gc) * m)

            # activation
            if not self.act is None:
                x = self.act(x)

            # setup dropout
            if self.dropout > 0 and is_training:
                x = tf.layers.dropout(inputs=x, rate=self.dropout)

            if not reuse: self.layer = x

            print(x)
            return x
Beispiel #14
0
    def __call__(self, input):
        """
        Performs forward propagation for the NAC cell

        :param input: a tensorflow input tensor
        :return: the outputs of the forward propagation
        """
        g = tf.sigmoid(tf.matmul(self._g, input))
        a = self._add_sub_nac(input)
        m = tf.sinh(self._mult_div_nac(tf.asinh((input))))
        y = tf.multiply(g, a) + tf.multiply(1 - g, m)

        return y
Beispiel #15
0
def exponential_mapping(p, x):

    # minkowski unit norm
    r = minkowski_norm(x)

    x = x / K.maximum(r, K.epsilon())

    ####################################################

    r = K.minimum(r, 1e-0)

    # idx = (r > 1e-7)[:,0]

    # updates = tf.cosh(r) * p + tf.sinh(r) * x
    # updates = normalise_to_hyperboloid(updates)

    # return tf.where(idx, updates, p)

    ####################################################

    idx = tf.where(r > 0)[:, 0]

    # clip
    # r = K.minimum(r, 1e-0)

    cosh_r = tf.cosh(r)
    exp_map_p = cosh_r * p

    non_zero_norm = tf.gather(r, idx)

    z = tf.gather(x, idx)

    updates = tf.sinh(non_zero_norm) * z

    dense_shape = tf.shape(p, out_type=tf.int64)
    exp_map_x = tf.scatter_nd(indices=idx[:, None],
                              updates=updates,
                              shape=dense_shape)

    exp_map = exp_map_p + exp_map_x

    #####################################################
    # z = x / K.maximum(r, K.epsilon()) # unit norm
    # exp_map = tf.cosh(r) * p + tf.sinh(r) * x
    #####################################################
    exp_map = normalise_to_hyperboloid(
        exp_map)  # account for floating point imprecision

    return exp_map
def call_with_evaluator(
    num_scalars,
    problem,
    f,
    punish_scalars_beyond=2.0,
    problem_args=(), problem_kwargs={},
    f_args=(), f_kwargs={}):
  """Calls `f` with potential-stationarity-gradient-evaluator in TF context.

  Sets up the TensorFlow graph that implements a wrapped-branes potential,
  such as (3.20) of arXiv:1906.08900.

  Args:
    num_scalars: The number of scalars.
    problem: The function that specifies the scalar potential computation
      for the problem under study. This must have the following siguature:
      problem(scalars) -> potential.
    f: The function to call with an evaluator (and optionally extra arguments).
    punish_scalars_beyond: Threshold numerical magnitude of scalar parameters
      beyond which a regularizing term drives optimization back to a physically
      plausible region.
    problem_args: Extra positional arguments for `problem`.
    problem_kwargs: Extra keyword arguments for `problem`.
    f_args: Extra positional arguments for `f`
    f_kwargs: Extra keyword arguments for `f`.

  Returns:
    The result of f(evaluator, *f_args, **f_kwargs) as evaluated in a TensorFlow
    session context set up as required by the evaluator.
  """
  graph = tf.Graph()
  with graph.as_default():
    t_input = tf.Variable(numpy.zeros(num_scalars), dtype=tf.float64)
    t_potential = problem(t_input, *problem_args, **problem_kwargs)
    t_grad_potential = tf.gradients(t_potential, [t_input])[0]
    t_stationarity = tf.reduce_sum(tf.square(t_grad_potential))
    # Punish large scalars.
    # This drives the search away from 'unphysical' regions.
    t_eff_stationarity = t_stationarity + tf.reduce_sum(
        tf.sinh(  # Make stationarity-violation grow rapidly for far-out scalars.
            tf.nn.relu(tf.abs(t_input) - punish_scalars_beyond)))
    t_grad_stationarity = tf.gradients(t_eff_stationarity, [t_input])[0]
    with tf.compat.v1.Session() as session:
      session.run([tf.compat.v1.global_variables_initializer()])
      def evaluator(scalars):
        return session.run(
            (t_potential, t_stationarity, t_grad_stationarity),
            feed_dict={t_input: scalars})
      return f(evaluator, *f_args, **f_kwargs)
Beispiel #17
0
def calogero_moser(x, type, omegasq=1., gsq=1., mu=1.):
    """
    Notation: https://www1.maths.leeds.ac.uk/~siru/papers/p38.pdf (2.38)
    and http://www.scholarpedia.org/article/Calogero-Moser_system
    
    H = \frac{1}{2}\sum_{i=1}^n (p_i^2 + \omega^2 q_i^2) + g^2 \sum_{1\le j < k \le n} V(q_j - q_k)
    
    V(x) = 
    'rational':      1/x^2
    'hyperbolic':    \mu^2/4\sinh(\mu x/2)
    'trigonometric': \mu^2/4\sin(\mu x/2)
    """
    assert (x.shape[1] == 1)

    if type == 'rational':
        V = lambda x: 1 / x**2
    elif type == 'hyperbolic':
        V = lambda x: mu**2 / 4. / tf.sinh(mu / 2. * x)
    elif type == 'trigonometric':
        V = lambda x: mu**2 / 4. / tf.sin(mu / 2. * x)
    else:
        raise NotImplementedError

    q, p = extract_q_p(x)
    h_free = 0.5 * tf.reduce_sum(tf.square(p) + omegasq * tf.square(q),
                                 axis=[1, 2, 3])  # (batch,)

    # Compute matrix of deltaq q[i]-q[j] and extract upper triangular part (triu)
    q = tf.squeeze(q, 1)  # (N,n,1)
    deltaq = tf.transpose(q, [0, 2, 1]) - q  # (N,n,n)
    n = tf.shape(deltaq)[1]
    ones = tf.ones([n, n])
    triu_mask = tf.cast(tf.matrix_band_part(ones, 0, -1) - \
                        tf.matrix_band_part(ones, 0, 0), dtype=tf.bool)
    triu = tf.boolean_mask(deltaq, triu_mask, axis=1)
    eps = 1e-5  # regulizer for inverse
    h_int = gsq * tf.reduce_sum(V(triu + eps), axis=1)  # (batch,)

    return h_free + h_int


# sum_{i<j} V(x(i) - x(j)) :
# e.g.:
# x = np.arange(10)
# x = np.expand_dims(x, 1)
# diffs = np.transpose(x) - x
# idx = np.triu_indices(np.shape(diffs)[1],k=1)
# np.sum(V(diffs[idx]))
Beispiel #18
0
    def call(self, X):

        log_energy = tf.expand_dims(tf.math.log(X[:, :, 4]+1.0), axis=-1)

        #X[:, :, 0] - categorical index of the element type
        Xid = tf.cast(tf.one_hot(tf.cast(X[:, :, 0], tf.int32), self.num_input_classes), dtype=X.dtype)
        #Xpt = tf.expand_dims(tf.math.log1p(X[:, :, 1]), axis=-1)
        Xpt = tf.expand_dims(tf.math.log(X[:, :, 1] + 1.0), axis=-1)

        Xpt_0p5 = tf.math.sqrt(Xpt)
        Xpt_2 = tf.math.pow(Xpt, 2)

        Xeta1 = tf.clip_by_value(tf.expand_dims(tf.sinh(X[:, :, 2]), axis=-1), -10, 10)
        Xeta2 = tf.clip_by_value(tf.expand_dims(tf.cosh(X[:, :, 2]), axis=-1), -10, 10)
        Xabs_eta = tf.expand_dims(tf.math.abs(X[:, :, 2]), axis=-1)
        Xphi1 = tf.expand_dims(tf.sin(X[:, :, 3]), axis=-1)
        Xphi2 = tf.expand_dims(tf.cos(X[:, :, 3]), axis=-1)

        #Xe = tf.expand_dims(tf.math.log1p(X[:, :, 4]), axis=-1)
        Xe = log_energy
        Xe_0p5 = tf.math.sqrt(log_energy)
        Xe_2 = tf.math.pow(log_energy, 2)

        Xe_transverse = log_energy - tf.math.log(Xeta2)

        Xlayer = tf.expand_dims(X[:, :, 5]*10.0, axis=-1)
        Xdepth = tf.expand_dims(X[:, :, 6]*10.0, axis=-1)

        Xphi_ecal1 = tf.expand_dims(tf.sin(X[:, :, 10]), axis=-1)
        Xphi_ecal2 = tf.expand_dims(tf.cos(X[:, :, 10]), axis=-1)
        Xphi_hcal1 = tf.expand_dims(tf.sin(X[:, :, 12]), axis=-1)
        Xphi_hcal2 = tf.expand_dims(tf.cos(X[:, :, 12]), axis=-1)

        return tf.concat([
            Xid,
            Xpt, Xpt_0p5, Xpt_2,
            Xeta1, Xeta2,
            Xabs_eta,
            Xphi1, Xphi2,
            Xe, Xe_0p5, Xe_2,
            Xe_transverse,
            Xlayer, Xdepth,
            Xphi_ecal1, Xphi_ecal2,
            Xphi_hcal1, Xphi_hcal2,
            X], axis=-1
        )
Beispiel #19
0
    def call(self, X):
        # X[:, :, 0] - categorical index of the element type
        Xid = tf.cast(tf.one_hot(tf.cast(X[:, :, 0], tf.int32), self.num_input_classes), dtype=X.dtype)
        Xpt = tf.expand_dims(tf.math.log(X[:, :, 1] + 1.0), axis=-1)
        Xe = tf.expand_dims(tf.math.log(X[:, :, 4] + 1.0), axis=-1)

        Xpt_0p5 = tf.math.sqrt(Xpt)
        Xpt_2 = tf.math.pow(Xpt, 2)

        Xeta1 = tf.clip_by_value(tf.expand_dims(tf.sinh(X[:, :, 2]), axis=-1), -10, 10)
        Xeta2 = tf.clip_by_value(tf.expand_dims(tf.cosh(X[:, :, 2]), axis=-1), -10, 10)
        Xabs_eta = tf.expand_dims(tf.math.abs(X[:, :, 2]), axis=-1)
        Xphi1 = tf.expand_dims(tf.sin(X[:, :, 3]), axis=-1)
        Xphi2 = tf.expand_dims(tf.cos(X[:, :, 3]), axis=-1)

        Xe_0p5 = tf.math.sqrt(Xe)
        Xe_2 = tf.math.pow(Xe, 2)

        Xphi_ecal1 = tf.expand_dims(tf.sin(X[:, :, 10]), axis=-1)
        Xphi_ecal2 = tf.expand_dims(tf.cos(X[:, :, 10]), axis=-1)
        Xphi_hcal1 = tf.expand_dims(tf.sin(X[:, :, 12]), axis=-1)
        Xphi_hcal2 = tf.expand_dims(tf.cos(X[:, :, 12]), axis=-1)

        return tf.concat(
            [
                Xid,
                Xpt,
                Xpt_0p5,
                Xpt_2,
                Xeta1,
                Xeta2,
                Xabs_eta,
                Xphi1,
                Xphi2,
                Xe,
                Xe_0p5,
                Xe_2,
                Xphi_ecal1,
                Xphi_ecal2,
                Xphi_hcal1,
                Xphi_hcal2,
                X,
            ],
            axis=-1,
        )
Beispiel #20
0
def calc_tf(x):
    x_init = x
    list1 = []
    for i in tf.range(n_loops):
        # for i in range(n_loops):
        x = tf.sqrt(tf.abs(x_init * (tf.cast(i, dtype=tf.float64) + 1.)))
        print(x)
        x = tf.cos(x - 0.3)
        x = tf.pow(x, tf.cast(i + 1, tf.float64))
        x = tf.sinh(x + 0.4)
        # print("calc_tf is being traced")
        x = x**2
        x += tf.random.normal(shape=size, dtype=tf.float64)
        x /= tf.reduce_mean(x)
        x = tf.abs(x)
        list1.append(x)
    x = tf.reduce_sum(x, axis=0)
    x = tf.reduce_mean(tf.math.log(x))
    # tf.py_function(dummy, [], Tout=[])
    return x
Beispiel #21
0
    def exp(self, a: tf.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> tf.Tensor:
        """Returns the exponential of the passed geometric algebra tensor.
        Only works for multivectors that square to scalars.

        Args:
            a: Geometric algebra tensor to return exponential for
            square_scalar_tolerance: Tolerance to use for the square scalar check
                or None if the check should be skipped

        Returns:
            `exp(a)`
        """
        # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm
        # for an explanation of how to exponentiate multivectors.

        self_sq = self.geom_prod(a, a)

        if square_scalar_tolerance is not None:
            tf.Assert(tf.reduce_all(
                tf.abs(self_sq[..., 1:]) < square_scalar_tolerance
            ), [self_sq])

        scalar_self_sq = self_sq[..., :1]

        # "Complex" square root (argument can be negative)
        s_sqrt = tf.sign(scalar_self_sq) * tf.sqrt(tf.abs(scalar_self_sq))

        # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||))
        # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||))
        # TODO: Does this work for values other than 1 too? eg. square to +0.5?
        # TODO: Find a solution that doesnt require calculating all possibilities
        #       first.
        non_zero_result = tf.where(
            scalar_self_sq < 0,
            (self.from_tensor(tf.cos(s_sqrt), [0]) +
                a / s_sqrt * tf.sin(s_sqrt)),
            (self.from_tensor(tf.cosh(s_sqrt), [0]) +
                a / s_sqrt * tf.sinh(s_sqrt))
        )

        return tf.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)
Beispiel #22
0
def build_SGPA_graph(X, layers_width, n_samples, n_basis):
    KL = 0
    Z = tf.expand_dims(tf.tile(tf.expand_dims(X, 0), [n_samples, 1, 1]), 2)
    for h, n_out in enumerate(layers_width[1:]):
        # Hidden layer
        if(h < len(layers_width)-2):
            # Perform affine mapping at each layer of the neural network
            Z = tf.layers.dense(Z, n_basis//2)
            # Define variational parameters
            alpha_mean = tf.get_variable('alpha_mean_layer'+str(h),
                shape=[1, 1, n_basis, n_out],
                initializer=tf.random_normal_initializer())
            alpha_logstd = tf.get_variable('alpha_logstd_layer'+str(h),
                shape=[1, 1, n_basis, n_out],
                initializer=tf.random_normal_initializer())
            alpha_std = tf.exp(alpha_logstd)
            # Compute epsilon from {n_samples} standard Gaussian
            # epsilon = tf.random_normal([n_samples, 1, n_out*2, n_out])
            epsilon = tf.random_uniform([n_samples, 1, n_basis, n_out])
            hyp_params = tf.get_variable('hyp_params_layer'+str(h),
                shape=[2],
                initializer=tf.random_normal_initializer())
            l1, l2 = tf.nn.sigmoid(hyp_params[0]), tf.exp(hyp_params[1])
            epsilon = tf.sinh(epsilon*l2)/tf.cosh(epsilon*l2)**l1/l2
            # Compute A_{h+1}
            A = tf.tile(alpha_mean+epsilon*alpha_std, [1, tf.shape(X)[0], 1, 1])
            # Compute z_{h}A_{h+1}
            Z1 = tf.matmul(Z, A[:,:,:n_basis//2,:])/tf.sqrt(n_basis*.5)
            Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5)
            # Compute u_{h+1} and v_{h+1}
            U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2)
            Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
            KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2.
        # Output layer
        else:
            F = tf.squeeze(tf.layers.dense(Z, n_out), [2])
    return F, KL
Beispiel #23
0
def lorentz(num_moments, l, precision=32, name_scope=None):
    """
    This function generates the Lorentz kernel for a given  number of
    Chebyscev moments and a positive real number, l

    Parameters
    ----------
        num_moments: (int)
            positive integer, number of Chebyshev moments
        l: (float)
            positve number,
        tf_float: (tensorflow float type)
            valids values are tf.float32, tf.float64, or tf.float128
        name_scope: (str) (default="lorentz_kernel")
            scope name for tensorflow

    Return
    ------
        kernel: Tensor(shape=(num_moments,), dtype=tf_float)

    Note
    ----
        See .. _The Kernel Polynomial Method:
        https://arxiv.org/pdf/cond-mat/0504627.pdf for more details
    """
    tf_float = tf.float64
    if precision == 32:
        tf_float = tf.float32

    with tf.name_scope(name_scope, "lorentz_kernel"):

        kernel_moments = tf.range(0, num_moments, dtype=tf_float)
        phases = 1. - kernel_moments / num_moments

        kernel = tf.math.divide(tf.sinh(l * phases), tf.math.sinh(l))
        return kernel
Beispiel #24
0
def dtfcosh(y, x):
    d[x] = d[y] * tf.sinh(x)
Beispiel #25
0
 def _inverse(self, y):
     return tf.sinh(tf.asinh(y) / self.tailweight - self.skewness)
Beispiel #26
0
 def _forward(self, x):
     return tf.sinh((tf.asinh(x) + self.skewness) * self.tailweight)
Beispiel #27
0
def sinh(x):
    return tf.sinh(x)
Beispiel #28
0
 def _forward(self, x):
   return tf.sinh((tf.asinh(x) + self.skewness) * self.tailweight)
Beispiel #29
0
#make weights
first_weight = tf.Variable(
    tf.random_uniform([num_in, num_hidden_per_layer], -1, 1))
second_weight = tf.Variable(
    tf.random_uniform([num_hidden_per_layer, num_hidden_per_layer], -1, 1))
third_weight = tf.Variable(
    tf.random_uniform([num_hidden_per_layer, num_hidden_per_layer], -1, 1))
fourth_weight = tf.Variable(
    tf.random_uniform([num_hidden_per_layer, num_out], -1, 1))

#make layers
#each layer is a mathematical operation. In each line, I multiply weights by inputs and then plug that into an activation function. In math terms, I do y=activation(mx). We'll worry about what an activation function is and why its used later, the important part is that each layer does y=mx+b (m=weights, x=features, b=bias , bias is 0).

#used 4 layers because that's what I found worked best through experimentation
first_hidden_layer = tf.sigmoid(tf.matmul(input_label, first_weight))
second_hidden_layer = tf.sinh(tf.matmul(first_hidden_layer, second_weight))
third_hidden_layer = tf.sigmoid(tf.matmul(second_hidden_layer, third_weight))
dropout = tf.layers.dropout(inputs=third_hidden_layer)
output_layer = tf.sigmoid(tf.matmul(dropout, fourth_weight),
                          name="output_layer")
#note - one layers output is the next layers input.

#make the loss function
loss = tf.reduce_mean(output_layer - output_label)
opt = tf.train.GradientDescentOptimizer(.001).minimize(loss)

init = tf.initialize_all_variables()
save = tf.train.Saver(max_to_keep=3)
#6. train the model
with tf.Session() as sess:
    sess.run(init)
Beispiel #30
0
 def _inverse(self, y):
   return tf.sinh(tf.asinh(y) / self.tailweight - self.skewness)
Beispiel #31
0
def tfe_sinh(t):
  return tf.sinh(t)
Beispiel #32
0
def ttfcosh(y, x):
    d[y] = d[x] * tf.sinh(x)