コード例 #1
0
def get_filters(R, filter_size, P=None, n_rings=None):
    """Perform single-frequency DFT on each ring of a polar-resampled patch"""
    k = filter_size
    filters = {}
    N = n_samples(k)
    from scipy.linalg import dft
    for m, r in R.iteritems():
        rsh = r.get_shape().as_list()
        # Get the basis matrices
        weights = get_interpolation_weights(k, m, n_rings=n_rings)
        DFT = dft(N)[m,:]
        LPF = np.dot(DFT, weights).T

        cosine = np.real(LPF).astype(np.float32)
        sine = np.imag(LPF).astype(np.float32)
        # Reshape for multiplication with radial profile
        cosine = tf.constant(cosine)
        sine = tf.constant(sine)
        # Project taps on to rotational basis
        r = tf.reshape(r, tf.stack([rsh[0],rsh[1]*rsh[2]]))
        ucos = tf.reshape(tf.matmul(cosine, r), tf.stack([k, k, rsh[1], rsh[2]]))
        usin = tf.reshape(tf.matmul(sine, r), tf.stack([k, k, rsh[1], rsh[2]]))
        if P is not None:
            # Rotate basis matrices
            ucos_ = tf.cos(P[m])*ucos + tf.sin(P[m])*usin
            usin = -tf.sin(P[m])*ucos + tf.cos(P[m])*usin
            ucos = ucos_
        filters[m] = (ucos, usin)
    return filters
コード例 #2
0
ファイル: sin_function_iter.py プロジェクト: chengyake/karch
    def tl_net(self, inputs):


        layer = self.genes[0][:,0]*tf.sin(0.01*inputs+self.genes[0][:,1])
        for i in range(1, jishu):
            layer = tf.add(layer, self.genes[i][:,0]*tf.sin(0.01*i+0.01*inputs+self.genes[i][:,1]))

        return layer
コード例 #3
0
ファイル: poisson_models.py プロジェクト: KordingLab/spykes
    def call(self, inputs):
        k1 = tf.matmul(tf.cos(inputs), self.k1 * tf.cos(self.mu))
        k2 = tf.matmul(tf.sin(inputs), self.k2 * tf.sin(self.mu))

        # Defines the two model formulations: "glm" vs "gvm".
        if self.model_type == 'glm':
            return tf.exp(k1 + k2 + self.k0)
        else:
            return tf.nn.softplus(self.b) + self.g * tf.exp(k1 + k2)
コード例 #4
0
ファイル: project.py プロジェクト: pcm17/models
def _euler2mat(z, y, x):
  """Converts euler angles to rotation matrix.

   From:
   https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174

   TODO: Remove the dimension for 'N' (deprecated for converting all source
   poses altogether).

  Args:
    z: rotation angle along z axis (in radians) -- size = [B, n]
    y: rotation angle along y axis (in radians) -- size = [B, n]
    x: rotation angle along x axis (in radians) -- size = [B, n]

  Returns:
    Rotation matrix corresponding to the euler angles, with shape [B, n, 3, 3].
  """
  batch_size = tf.shape(z)[0]
  n = 1
  z = tf.clip_by_value(z, -np.pi, np.pi)
  y = tf.clip_by_value(y, -np.pi, np.pi)
  x = tf.clip_by_value(x, -np.pi, np.pi)

  # Expand to B x N x 1 x 1
  z = tf.expand_dims(tf.expand_dims(z, -1), -1)
  y = tf.expand_dims(tf.expand_dims(y, -1), -1)
  x = tf.expand_dims(tf.expand_dims(x, -1), -1)

  zeros = tf.zeros([batch_size, n, 1, 1])
  ones = tf.ones([batch_size, n, 1, 1])

  cosz = tf.cos(z)
  sinz = tf.sin(z)
  rotz_1 = tf.concat([cosz, -sinz, zeros], axis=3)
  rotz_2 = tf.concat([sinz, cosz, zeros], axis=3)
  rotz_3 = tf.concat([zeros, zeros, ones], axis=3)
  zmat = tf.concat([rotz_1, rotz_2, rotz_3], axis=2)

  cosy = tf.cos(y)
  siny = tf.sin(y)
  roty_1 = tf.concat([cosy, zeros, siny], axis=3)
  roty_2 = tf.concat([zeros, ones, zeros], axis=3)
  roty_3 = tf.concat([-siny, zeros, cosy], axis=3)
  ymat = tf.concat([roty_1, roty_2, roty_3], axis=2)

  cosx = tf.cos(x)
  sinx = tf.sin(x)
  rotx_1 = tf.concat([ones, zeros, zeros], axis=3)
  rotx_2 = tf.concat([zeros, cosx, -sinx], axis=3)
  rotx_3 = tf.concat([zeros, sinx, cosx], axis=3)
  xmat = tf.concat([rotx_1, rotx_2, rotx_3], axis=2)

  return tf.matmul(tf.matmul(xmat, ymat), zmat)
コード例 #5
0
ファイル: kernels.py プロジェクト: vincentadam87/GPflow
 def _J(self, theta):
     """
     Implements the order dependent family of functions defined in equations
     4 to 7 in the reference paper.
     """
     if self.order == 0:
         return np.pi - theta
     elif self.order == 1:
         return tf.sin(theta) + (np.pi - theta) * tf.cos(theta)
     elif self.order == 2:
         return 3. * tf.sin(theta) * tf.cos(theta) + \
                (np.pi - theta) * (1. + 2. * tf.cos(theta) ** 2)
コード例 #6
0
ファイル: tljs_function.py プロジェクト: chengyake/karch
    def tl_net(self, inputs):
        for i in range(jishu):
            if i < 8:
                self.arg[i]=tf.Variable(tf.random_normal((self.data.num_input,2)), trainable=True)
            else:
                self.arg[i]=tf.Variable(tf.zeros((self.data.num_input,2)), trainable=False)


        layer = self.arg[0][:,0]*tf.sin(0.1*inputs+self.arg[0][:,1])
        for i in range(1, jishu):
            layer = tf.add(layer, self.arg[i][:,0]*tf.sin((0.1*i+0.1)*inputs+self.arg[i][:,1]))
        return layer
コード例 #7
0
ファイル: quantile.py プロジェクト: RomainBrault/Thesis
def phigrad(X, omegas, D):
    Z = tf.matmul(X, omegas)
    Zc = tf.cos(Z)
    Zs = tf.sin(Z)
    phiX = tf.concat([Zc, Zs], 1) / np.sqrt(D)
    phiXg = tf.concat([-omegas * Zs, omegas * Zc], 1) / np.sqrt(D)
    return phiX, phiXg
コード例 #8
0
ファイル: audio_demo.py プロジェクト: jlewi/tensorboard
def bisine_wahwah_wave(frequency):
  """Emit two sine waves with balance oscillating left and right."""
  #
  # This is clearly intended to build on the bisine wave defined above,
  # so we can start by generating that.
  waves_a = bisine_wave(frequency)
  #
  # Then, by reversing axis 2, we swap the stereo channels. By mixing
  # this with `waves_a`, we'll be able to create the desired effect.
  waves_b = tf.reverse(waves_a, axis=[2])
  #
  # Let's have the balance oscillate from left to right four times.
  iterations = 4
  #
  # Now, we compute the balance for each sample: `ts` has values
  # in [0, 1] that indicate how much we should use `waves_a`.
  xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
  thetas = xs / _samples() * iterations
  ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2
  #
  # Finally, we can mix the two together, and we're done.
  wave = ts * waves_a + (1.0 - ts) * waves_b
  #
  # Alternately, we can make the effect more pronounced by exaggerating
  # the sample data. Let's emit both variations.
  exaggerated_wave = wave ** 3.0
  return tf.concat([wave, exaggerated_wave], axis=0)
コード例 #9
0
    def __init__(self, args):
        with tf.device(args.device):
            def circle(x):
                spherenet = tf.square(x)
                spherenet = tf.reduce_sum(spherenet, 1)
                lam = tf.sqrt(spherenet)
                return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])

            def modes(x):
                shape = x.get_shape()
                return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)

            if args.distribution == 'circle':
                x = tf.random_normal([args.batch_size, 2])
                x = circle(x)
            elif args.distribution == 'modes':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                x = modes(x)
            elif args.distribution == 'modal-gaussian':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                y = tf.random_normal([args.batch_size, 2], stddev=0.04, mean=0.15)
                x = tf.round(x) + y
            elif args.distribution == 'sin':
                x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
                x = tf.transpose(x)
                r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
                xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
                x = tf.concat([xy,x], 1)/16.0

            elif args.distribution == 'static-point':
                x = tf.ones([args.batch_size, 2])

            self.x = x
            self.xy = tf.zeros_like(self.x)
コード例 #10
0
ファイル: model_util.py プロジェクト: donrv/frustum-pointnets
def get_box3d_corners_helper(centers, headings, sizes):
    """ TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) """
    #print '-----', centers
    N = centers.get_shape()[0].value
    l = tf.slice(sizes, [0,0], [-1,1]) # (N,1)
    w = tf.slice(sizes, [0,1], [-1,1]) # (N,1)
    h = tf.slice(sizes, [0,2], [-1,1]) # (N,1)
    #print l,w,h
    x_corners = tf.concat([l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2], axis=1) # (N,8)
    y_corners = tf.concat([h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2], axis=1) # (N,8)
    z_corners = tf.concat([w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2], axis=1) # (N,8)
    corners = tf.concat([tf.expand_dims(x_corners,1), tf.expand_dims(y_corners,1), tf.expand_dims(z_corners,1)], axis=1) # (N,3,8)
    #print x_corners, y_corners, z_corners
    c = tf.cos(headings)
    s = tf.sin(headings)
    ones = tf.ones([N], dtype=tf.float32)
    zeros = tf.zeros([N], dtype=tf.float32)
    row1 = tf.stack([c,zeros,s], axis=1) # (N,3)
    row2 = tf.stack([zeros,ones,zeros], axis=1)
    row3 = tf.stack([-s,zeros,c], axis=1)
    R = tf.concat([tf.expand_dims(row1,1), tf.expand_dims(row2,1), tf.expand_dims(row3,1)], axis=1) # (N,3,3)
    #print row1, row2, row3, R, N
    corners_3d = tf.matmul(R, corners) # (N,3,8)
    corners_3d += tf.tile(tf.expand_dims(centers,2), [1,1,8]) # (N,3,8)
    corners_3d = tf.transpose(corners_3d, perm=[0,2,1]) # (N,8,3)
    return corners_3d
コード例 #11
0
ファイル: model_utils.py プロジェクト: 812864539/models
def get_position_encoding(
    length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
  """Return positional encoding.

  Calculates the position encoding as a mix of sine and cosine functions with
  geometrically increasing wavelengths.
  Defined and formulized in Attention is All You Need, section 3.5.

  Args:
    length: Sequence length.
    hidden_size: Size of the
    min_timescale: Minimum scale that will be applied at each position
    max_timescale: Maximum scale that will be applied at each position

  Returns:
    Tensor with shape [length, hidden_size]
  """
  position = tf.to_float(tf.range(length))
  num_timescales = hidden_size // 2
  log_timescale_increment = (
      math.log(float(max_timescale) / float(min_timescale)) /
      (tf.to_float(num_timescales) - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
  return signal
コード例 #12
0
  def testVonMisesSampleMoments(self):
    locs_v = np.array([-2., -1., 0.3, 2.3])
    concentrations_v = np.array([0.1, 1.0, 2.0, 10.0])
    von_mises = tfd.VonMises(
        self.make_tensor(locs_v), self.make_tensor(concentrations_v))

    n = 10000
    samples = von_mises.sample(n, seed=12345)

    expected_mean = von_mises.mean()
    actual_mean = tf.atan2(
        tf.reduce_mean(tf.sin(samples), 0), tf.reduce_mean(tf.cos(samples), 0))

    expected_variance = von_mises.variance()
    standardized_samples = samples - tf.expand_dims(von_mises.mean(), 0)
    actual_variance = 1. - tf.reduce_mean(tf.cos(standardized_samples), axis=0)

    [
        expected_mean_val, expected_variance_val, actual_mean_val,
        actual_variance_val
    ] = self.evaluate(
        [expected_mean, expected_variance, actual_mean, actual_variance])

    self.assertAllClose(expected_mean_val, actual_mean_val, rtol=0.1)
    self.assertAllClose(expected_variance_val, actual_variance_val, rtol=0.1)
コード例 #13
0
ファイル: util.py プロジェクト: ALISCIFP/models
def tf_cheating_contcartpole(state, action):
    gravity = 9.8
    masscart = 1.0
    masspole = 0.1
    total_mass = (masspole + masscart)
    length = 0.5 # actually half the pole's length
    polemass_length = (masspole * length)
    force_mag = 10.0
    tau = 0.02  # seconds between state updates

    # Angle at which to fail the episode
    theta_threshold_radians = 12 * 2 * math.pi / 360
    x_threshold = 2.4

    x, x_dot, theta, theta_dot = tf.split(state, 4, axis=-1)
    done =  tf.logical_or(x < -x_threshold,
                          tf.logical_or(x > x_threshold,
                          tf.logical_or(theta < -theta_threshold_radians,
                                        theta > theta_threshold_radians)))

    force = force_mag * action
    costheta = tf.cos(theta)
    sintheta = tf.sin(theta)
    temp = old_div((force + polemass_length * theta_dot * theta_dot * sintheta), total_mass)
    thetaacc = old_div((gravity * sintheta - costheta* temp), (length * (old_div(4.0,3.0) - masspole * costheta * costheta / total_mass)))
    xacc  = temp - polemass_length * thetaacc * costheta / total_mass
    x  = x + tau * x_dot
    x_dot = x_dot + tau * xacc
    theta = theta + tau * theta_dot
    theta_dot = theta_dot + tau * thetaacc
    state = tf.concat([x,x_dot,theta,theta_dot], -1)
    done = tf.squeeze(tf.cast(done, tf.float32), -1)
    reward = 1.0 - done
    done *= 0.
    return state, reward, done
コード例 #14
0
    def test_cwise_unary_grad(self):
        """
        Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
        """
        test_config = tf.ConfigProto(allow_soft_placement=False)
        test_config.graph_options.optimizer_options.opt_level = -1
        with tf.Session(config=test_config) as s:
            arg_np = np.random.random(100)
            grad_above = tf.constant(np.random.random(100))

            arg = tf.constant(arg_np)

            def test_grad(fcn, tf_fcn):
                ovl_out = as_tensorflow(fcn(arg))
                tf_out = tf_fcn(arg)

                ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
                tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
                ovl_out, tf_out, ovl_grad, tf_grad = s.run([ovl_out, tf_out, ovl_grad, tf_grad])

                assert np.allclose(ovl_out, tf_out)
                assert np.allclose(ovl_grad, tf_grad)

            test_grad(lambda x: neg(x), lambda x: tf.neg(x))
            test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
            test_grad(lambda x: sin(x), lambda x: tf.sin(x))
            test_grad(lambda x: cos(x), lambda x: tf.cos(x))
            test_grad(lambda x: tan(x), lambda x: tf.tan(x))
            test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
コード例 #15
0
ファイル: networkarch.py プロジェクト: hedgefair/DeepKoopman
def FormLStack(omega_output, deltat):
        # encoded_layer is [None, 2]
        # omega_output is [None, 1]
        if omega_output.shape[1] == 1:
                entry11 = tf.cos(omega_output*deltat)
                entry12 = tf.sin(omega_output*deltat)
                row1 = tf.concat([entry11, -entry12], axis=1) # [None, 2]
                row2 = tf.concat([entry12, entry11], axis=1) # [None, 2]

        elif omega_output.shape[1] == 2:
                scale = tf.exp(omega_output[:,1] * deltat)
                entry11 = tf.multiply(scale, tf.cos(omega_output[:,0]*deltat))
                entry12 = tf.multiply(scale, tf.sin(omega_output[:,0]*deltat))
                row1 = tf.stack([entry11, -entry12], axis=1) # [None, 2]
                row2 = tf.stack([entry12, entry11], axis=1) # [None, 2]
        Lstack = tf.stack([row1, row2], axis=2) # [None, 2, 2] put one row below other
        return Lstack
コード例 #16
0
def gabor(n_values=32, sigma=1.0, mean=0.0):
	x = tf.linspace(-3.0, 3.0, n_values)
	z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0)/ (2.0 * tf.pow(sigma, 2.0)))) * (1.0 / (sigma * tf.sqrt(2.0 * 3.145))))
	gauss_kernel = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z,[1, n_values]))
	x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
	y = tf.reshape(tf.ones_like(x), [1, n_values])
	gabor_kernel = tf.multiply(tf.matmul(x ,y), gauss_kernel)
	return gabor_kernel
コード例 #17
0
ファイル: adv_cppn_model.py プロジェクト: lukemetz/cppn
def sin_bank(x, bank_size, length, scope=None):
    with tf.variable_op_scope([x], scope, "SinBank") as scope:
        bank = tf.get_variable("bank", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        shift = tf.get_variable("shift", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        if not tf.get_variable_scope().reuse:
            tf.histogram_summary(bank.name, bank)
        return tf.sin(x*bank+shift)
コード例 #18
0
 def get_timing_signal_1d(self, length, channels):
     position = tf.to_float(tf.range(length))
     num_timescales = channels // 2
     log_timescale_increment = (math.log(float(self.max_timescale) / float(self.min_timescale)) / (tf.to_float(num_timescales) - 1))
     inv_timescales = self.min_timescale * tf.exp(tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
     scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
     signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
     signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
     signal = tf.reshape(signal, [1, length, channels])
     return signal
コード例 #19
0
ファイル: tljs.py プロジェクト: chengyake/karch
    def fly_net(self, inputs):
        genes={}
        for i in range(100):
            genes[i]=tf.Variable(tf.random_normal((self.data.num_input,2))*10.0, trainable=True)

        layer = genes[0][:,0]*tf.sin(0.1*inputs+genes[0][:,1])
        for i in range(1, 100):
            layer = tf.add(layer, genes[i][:,0]*tf.sin(0.1*i+0.1*inputs+genes[i][:,1]))

        w3 = tf.Variable(tf.random_normal([self.data.num_input, 1]))
        b3 = tf.Variable(tf.zeros([1]) + 0.000001)
        x3 = self.fc_layer("layer3", layer, w3, b3)


        #layer = tf.nn.sigmoid(layer)+0.000001
        #out = tf.reduce_sum(-tf.log(layer), axis=-1)

        #out = tf.reduce_sum(layer, axis=-1)

        return x3
コード例 #20
0
def activation(source):
    # Tanh
    # return tf.nn.tanh(source)
    # Relu
    # x = tf.nn.relu(source)
    # Leaky ReLU
    # alpha = 0.001
    # return tf.maximum(alpha*source, source)
    # My evil slide of doom activation:
    alpha = 0.02
    beta = 1.1
    return tf.maximum(alpha * source, tf.sin(source) + (beta * source))
コード例 #21
0
def times_diag_tf(input_matrix, n_hidden, diag):
    input_re = input_matrix[:, :n_hidden] #okay so the first left half of the matrix is real numbers
    input_im = input_matrix[:, n_hidden:] #the right half is the imaginary numbers that correspond
    Re = tf.diag(tf.cos(diag))
    Im = tf.diag(tf.sin(diag))
    input_re_times_Re = tf.matmul(input_re, Re) #matmul is the equivalent of dot
    input_re_times_Im = tf.matmul(input_re, Im)
    input_im_times_Re = tf.matmul(input_im, Re)
    input_im_times_Im = tf.matmul(input_im, Im)

    return tf.concat(1, [input_re_times_Re - input_im_times_Im,
                          input_re_times_Im + input_im_times_Re]) #this will combine two matrixes
コード例 #22
0
def Position_Embedding(inputs, position_size):
    batch_size,seq_len = tf.shape(inputs)[0],tf.shape(inputs)[1]
    position_j = 1. / tf.pow(10000., \
                             2 * tf.range(position_size / 2, dtype=tf.float32 \
                            ) / position_size)
    position_j = tf.expand_dims(position_j, 0)
    position_i = tf.range(tf.cast(seq_len, tf.float32), dtype=tf.float32)
    position_i = tf.expand_dims(position_i, 1)
    position_ij = tf.matmul(position_i, position_j)
    position_ij = tf.concat([tf.cos(position_ij), tf.sin(position_ij)], 1)
    position_embedding = tf.expand_dims(position_ij, 0) \
                         + tf.zeros((batch_size, seq_len, position_size))
    return position_embedding
コード例 #23
0
ファイル: kernels.py プロジェクト: fujiisoup/GPflow
    def K(self, X, X2=None):
        X, X2 = self._slice(X, X2)
        if X2 is None:
            X2 = X

        # Introduce dummy dimension so we can use broadcasting
        f = tf.expand_dims(X, 1)  # now N x 1 x D
        f2 = tf.expand_dims(X2, 0)  # now 1 x M x D

        r = np.pi * (f-f2) / self.period
        r = tf.reduce_sum(tf.square(tf.sin(r)/self.lengthscales), 2)

        return self.variance * tf.exp(-0.5 * r)
コード例 #24
0
ファイル: icp_util.py プロジェクト: ALISCIFP/models
def get_transformation_matrix(transform):
  """Converts [tx, ty, tz, rx, ry, rz] to a transform matrix."""
  rx = transform[3]
  ry = transform[4]
  rz = transform[5]

  rz = tf.clip_by_value(rz, -np.pi, np.pi)
  ry = tf.clip_by_value(ry, -np.pi, np.pi)
  rx = tf.clip_by_value(rx, -np.pi, np.pi)

  cos_rx = tf.cos(rx)
  sin_rx = tf.sin(rx)
  rotx_1 = tf.stack([1.0, 0.0, 0.0])
  rotx_2 = tf.stack([0.0, cos_rx, -sin_rx])
  rotx_3 = tf.stack([0.0, sin_rx, cos_rx])
  xmat = tf.stack([rotx_1, rotx_2, rotx_3])

  cos_ry = tf.cos(ry)
  sin_ry = tf.sin(ry)
  roty_1 = tf.stack([cos_ry, 0.0, sin_ry])
  roty_2 = tf.stack([0.0, 1.0, 0.0])
  roty_3 = tf.stack([-sin_ry, 0.0, cos_ry])
  ymat = tf.stack([roty_1, roty_2, roty_3])

  cos_rz = tf.cos(rz)
  sin_rz = tf.sin(rz)
  rotz_1 = tf.stack([cos_rz, -sin_rz, 0.0])
  rotz_2 = tf.stack([sin_rz, cos_rz, 0.0])
  rotz_3 = tf.stack([0.0, 0.0, 1.0])
  zmat = tf.stack([rotz_1, rotz_2, rotz_3])

  rotate = tf.matmul(tf.matmul(xmat, ymat), zmat)

  translate = transform[:3]
  mat = tf.concat([rotate, tf.expand_dims(translate, 1)], axis=1)

  hom_filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 4], dtype=tf.float32)
  mat = tf.concat([mat, hom_filler], axis=0)
  return mat
コード例 #25
0
 def test_toco(self):
   """Run a couple of TensorFlow graphs against TOCO through the python bin."""
   with tf.Session() as sess:
     img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3))
     val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
     out = tf.identity(val, name="out")
     out2 = tf.sin(val, name="out2")
     # This is a valid mdoel
     self._run(sess, img, out, True)
     # This uses an invalid function.
     # TODO(aselle): Check to make sure a warning is included.
     self._run(sess, img, out2, True)
     # This is an identity graph, which doesn't work
     self._run(sess, img, img, False)
コード例 #26
0
def rotate_points(orig_points, angle, w, h):
    """Return rotated points

    Args:
        orig_points: 'Tensor' with shape [N,2], each entry is point (x,y)
        angle: rotate radians

    Returns:
        'Tensor' with shape [N,2], with rotated points
    """

    # rotation
    rotate_mat = tf.stack([[tf.cos(angle) / w, tf.sin(angle) / h],
                           [-tf.sin(angle) / w, tf.cos(angle) / h]])

    # shift coord
    orig_points = tf.subtract(orig_points, 0.5)

    orig_points = tf.stack([orig_points[:, 0] * w,
                            orig_points[:, 1] * h], axis=1)
    print(orig_points)
    rotated_points = tf.matmul(orig_points, rotate_mat) + 0.5

    return rotated_points
コード例 #27
0
def gaussian(config, gan, net):
    z_dim = net.get_shape().as_list()[-1]
    net = (net + 1) / 2

    if len(gan.ops.shape(net)) == 4:
        za = tf.slice(net, [0,0,0,0], [gan.batch_size(), -1, -1, z_dim//2])
        zb = tf.slice(net, [0,0,0,z_dim//2], [gan.batch_size(), -1, -1, z_dim//2])
    else:
        za = tf.slice(net, [0,0], [gan.batch_size(), z_dim//2])
        zb = tf.slice(net, [0,z_dim//2], [gan.batch_size(), z_dim//2])

    pi = np.pi
    ra = tf.sqrt(-2 * tf.log(za+TINY))*tf.cos(2*pi*zb)
    rb = tf.sqrt(-2 * tf.log(za+TINY))*tf.sin(2*pi*zb)

    return tf.reshape(tf.concat(axis=len(net.get_shape())-1, values=[ra, rb]), net.get_shape())
コード例 #28
0
def test_node(Simulator):
    minibatch_size = 3
    with nengo.Network() as net:
        node0 = TensorNode(lambda t: tf.tile(tf.reshape(t, (1, -1)),
                                             (minibatch_size, 1)))
        node1 = TensorNode(lambda t, x: tf.sin(x), size_in=1)
        nengo.Connection(node0, node1, synapse=None)

        p0 = nengo.Probe(node0)
        p1 = nengo.Probe(node1)

    with Simulator(net, minibatch_size=minibatch_size) as sim:
        sim.run_steps(10)

    assert np.allclose(sim.data[p0], sim.trange()[None, :, None])
    assert np.allclose(sim.data[p1], np.sin(sim.trange()[None, :, None]))
コード例 #29
0
ファイル: tf_utils.py プロジェクト: 812864539/models
def get_flow(t, theta, map_size, name_scope='gen_flow'):
  """
  Rotates the map by theta and translates the rotated map by t.
  
  Assume that the robot rotates by an angle theta and then moves forward by
  translation t. This function returns the flow field field. For every pixel in
  the new image it tells us which pixel in the original image it came from:
  NewI(x, y) = OldI(flow_x(x,y), flow_y(x,y)).

  Assume there is a point p in the original image. Robot rotates by R and moves
  forward by t.  p1 = Rt*p; p2 = p1 - t; (the world moves in opposite direction.
  So, p2 = Rt*p - t, thus p2 came from R*(p2+t), which is what this function
  calculates.

    t:      ... x 2 (translation for B batches of N motions each).
    theta:  ... x 1 (rotation for B batches of N motions each).
    
    Output: ... x map_size x map_size x 2
  """

  with tf.name_scope(name_scope):
    tx, ty = tf.unstack(tf.reshape(t, shape=[-1, 1, 1, 1, 2]), axis=4)
    theta = tf.reshape(theta, shape=[-1, 1, 1, 1])
    c = tf.constant((map_size-1.)/2., dtype=tf.float32)

    x, y = np.meshgrid(np.arange(map_size), np.arange(map_size))
    x = tf.constant(x[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='x', 
                    shape=[1, map_size, map_size, 1])
    y = tf.constant(y[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='y',
                    shape=[1,map_size, map_size, 1])

    x = x-(-tx+c)
    y = y-(-ty+c)

    sin_theta = tf.sin(theta)
    cos_theta = tf.cos(theta)
    xr = cos_theta*x - sin_theta*y
    yr = sin_theta*x + cos_theta*y

    xr = xr + c
    yr = yr + c
    
    flow = tf.stack([xr, yr], axis=-1)
    sh = tf.unstack(tf.shape(t), axis=0)
    sh = tf.stack(sh[:-1]+[tf.constant(_, dtype=tf.int32) for _ in [map_size, map_size, 2]])
    flow = tf.reshape(flow, shape=sh)
    return flow
コード例 #30
0
ファイル: adv_cppn_model.py プロジェクト: lukemetz/cppn
def cppn_func(inp, context, z):
    with arg_scope([fc], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2 - 1
        #n = 64
        n = 32
        h = inp[:, :, 0:1]
        w = inp[:, :, 1:2]

        r_h = sin_bank(h, 64, length=3)
        fc_h = three_fc(r_h, num_units_out=n)

        r_w = sin_bank(w, 64, length=3)
        fc_w = three_fc(r_w, num_units_out=n)

        d = tf.sqrt((h-0.5)**2 + (w-0.5)**2)
        r_d = sin_bank(d, 64, length=3)
        fc_d = three_fc(r_d, num_units_out=n)

        #fc_inp = three_fc(inp-0.5, num_units_out=n)

        pi = 3.1415 / 2.0
        wh = tf.cos(pi) * h - tf.sin(w)
        r_wh = sin_bank(wh, 64, length=3)
        fc_wh = three_fc(r_wh, num_units_out=n)


        context_proc = fc(flatten(context), num_units_out=n)
        context_proc = tf.expand_dims(context_proc, 1)

        z_comb = fc(z, num_units_out=n)
        z_comb = tf.expand_dims(z_comb, 1)

        #res = (fc_h + fc_w + fc_d) * context_proc + z_comb
        res = (fc_h + fc_w + fc_d + fc_wh) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = (fc_h + fc_w + fc_d) + z_comb
        #res = fc_h + fc_w
        z_mul = fc(z, num_units_out=n)
        z_mul = tf.expand_dims(z_mul, 1)

        #res *= z_mul

        h = three_fc(res, num_units_out=n)
        h2 = three_fc(h, num_units_out=n)
        h3 = three_fc(h2, num_units_out=n)
        return three_fc(h3, num_units_out=1, batch_norm_params=None)
コード例 #31
0
        def body(elems):
            screan_x, vertexes = elems
            vs = vertexes
            sx = screan_x
            maxes = tf.floor(tf.reduce_max(sx * self.img_scale, axis=0),
                             name="maxes")
            mins = tf.floor(tf.reduce_min(sx * self.img_scale, axis=0),
                            name="mins")
            rows = tf.range(mins[1], maxes[1], name="rows")
            columns = tf.range(mins[0], maxes[0], name="columns")
            # (ij, h, w)
            ij = tf.meshgrid(rows, columns, indexing="ij", name="meshgrid_ij")
            ij = tf.transpose(ij, (1, 2, 0))
            # (h*w, ij)
            ij = tf.reshape(ij, (-1, 2))
            scale = tf.cast(self.img_scale, dtype=tf.float64)
            ji = ij[:, ::-1]
            xy = tf.divide(ji + 0.5, scale, name="xy")

            x_vector = tf.stack([sx[1] - sx[0], sx[2] - sx[1], sx[0] - sx[2]],
                                name="x_vector")
            x_vector = tf.divide(x_vector,
                                 tf.norm(x_vector, axis=1, keep_dims=True),
                                 name="x_vector_norm")
            horizon = tf.constant([[1], [0]], dtype=tf.float64)
            theta = tf.acos(tf.matmul(x_vector, horizon))
            theta = tf.reshape(theta, (-1, ))
            theta = tf.multiply(theta, (-1)**tf.cast((x_vector[:, 1] < 0),
                                                     tf.float64),
                                name="theta")
            rot_y = tf.stack([-tf.sin(theta), tf.cos(theta)],
                             axis=0,
                             name="rot_y")
            rot_y = tf.expand_dims(rot_y, axis=0)
            slided = tf.expand_dims(xy, axis=2) -\
                tf.expand_dims(tf.transpose(sx, (1, 0)), axis=0, name="slided")

            adopt = tf.multiply(slided, rot_y, name="rotated")
            adopt = tf.reduce_sum(adopt, axis=1)
            adopt = adopt <= 0
            adopt = tf.reduce_all(adopt, axis=1, name="adopt")
            indices = tf.where(adopt)[:, 0]
            bbox_pixels = tf.concat([ij, xy], axis=1)
            bbox_pixels = tf.gather(bbox_pixels, indices)
            # 法線
            normal = tf.cross(vs[0], vs[1])
            normal = normal / tf.norm(normal)
            # 分子
            numerator = tf.matmul(tf.reshape(normal, (1, -1)),
                                  tf.reshape(vs[0], (-1, 1))) * input_screan_z
            screan_xy = bbox_pixels[:, 2:4]
            screan_xyz = tf.concat([
                screan_xy,
                tf.tile([[input_screan_z]], (tf.shape(bbox_pixels)[0], 1))
            ],
                                   axis=1)
            denominator = tf.matmul(screan_xyz, tf.reshape(normal, (-1, 1)))
            z = numerator / denominator

            color = tf.constant([[255., 255., 255.]], tf.float64)
            color = tf.tile(color, (tf.shape(bbox_pixels)[0], 1))
            bbox_pixels = tf.concat([bbox_pixels, z, color],
                                    axis=1,
                                    name="bbox_pixels")
            gap = self.image_size[0] * self.image_size[1] - tf.shape(
                bbox_pixels)[0]
            padding = [(0, gap), (0, 0)]
            pixels = tf.pad(bbox_pixels,
                            padding,
                            mode="CONSTANT",
                            constant_values=outside_indices)
            return pixels
コード例 #32
0
ファイル: audio_demo.py プロジェクト: yunpeng-ma/agent
def sine_wave(frequency):
    """Emit a sine wave at the given frequency."""
    xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
    ts = xs / FLAGS.sample_rate
    return tf.sin(2 * math.pi * frequency * ts)
コード例 #33
0
ファイル: GA_width.py プロジェクト: AlexDreaming/Utah
 def g_fun(x, t, u):
     return t * (-1.0 - x) * (1.0 - x) * u + tf.sin(np.pi * x)
コード例 #34
0
 def map_preds_fn(self, x):
     amplitudes_wo_bg = tf.abs(x)
     intensities = amplitudes_wo_bg**2 + self._background_level
     amplitudes_bg = intensities**0.5
     phases = tf.angle(x)
     return tf.math.complex(amplitudes_bg * tf.cos(phases), amplitudes_bg * tf.sin(phases))
コード例 #35
0
 def objective(input_data: TensorType) -> TensorType:
     x, y = input_data[..., -2], input_data[..., -1]
     z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
     return z[:, None]
コード例 #36
0
ファイル: Iterative_method.py プロジェクト: wmorning/MagNet
def pol2cart(r, th):
    '''
    convert from polar to cartesian coordinates (using tensors)
    '''
    return tf.multiply(r, tf.cos(th)), tf.multiply(r, tf.sin(th))
コード例 #37
0
def inverse_smoothstep(image):
    """Approximately inverts a global tone mapping curve."""
    image = tf.clip_by_value(image, 0.0, 1.0)
    return 0.5 - tf.sin(tf.asin(1.0 - 2.0 * image) / 3.0)
コード例 #38
0
ファイル: train.py プロジェクト: yashenkoxciv/semantic-vision
def train():
    # Prepare Training Data
    (X_train, Y_train), (X_test, Y_test) = mnist.load_data()

    X_train = prepare_mnist(X_train)
    X_test = prepare_mnist(X_test)

    # Initialize Models
    real_data = tf.placeholder(tf.float32, (None, *IMG_DIM), name="input_img")
    real_data_rot = tf.placeholder(tf.float32, (None, *IMG_DIM))
    angles_tf = tf.placeholder(tf.float32, (None, ), name="input_angles")

    # convert angles to acosi
    aco = tf.cos(angles_tf * math.pi)
    asi = tf.sin(angles_tf * math.pi)
    acosi = tf.stack((aco, asi), axis=-1)

    # W_control -> (-1,N,N,1,1,2)
    W_control = tf.layers.dense(acosi, N * N * 2, activation=None)
    W_control = tf.reshape(W_control, (-1, N, N, 1, 1, 2))

    # [-1,1] - > [0,N-1] (to index space)
    W_control = (W_control + 1) * (N - 1) / 2.0

    # fixed address on the images (in index space)
    NI = fixed_address(N)
    NI = tf.constant(NI, dtype=W_control.dtype, name="NI")

    # NI -> (1,1,1,N,N,2)
    NI = tf.reshape(NI, (1, 1, 1, N, N, 2))

    x = tf.nn.relu(1 - tf.abs(NI - W_control))
    x = tf.reduce_prod(x, axis=-1)  # (1-dx) * (1-dy)

    layer1 = tf.layers.conv2d(real_data,
                              16,
                              5,
                              2,
                              padding='same',
                              activation=tf.nn.tanh)
    layer2 = tf.layers.conv2d(layer1,
                              32,
                              5,
                              2,
                              padding='same',
                              activation=tf.nn.relu)

    layerControl = tf.einsum('bijc,bijxy->bxyc', layer2, x)

    layer3 = tf.layers.conv2d_transpose(layerControl,
                                        16,
                                        5,
                                        2,
                                        padding='same',
                                        activation=tf.nn.relu)
    layer4 = tf.layers.conv2d_transpose(layer3,
                                        1,
                                        5,
                                        2,
                                        padding='same',
                                        activation=tf.nn.tanh)
    rec_data = tf.identity(layer4, "output_img")

    r_cost = tf.losses.mean_squared_error(rec_data, real_data_rot)

    r_train_op = tf.train.AdamOptimizer(
        learning_rate=LEARNING_RATE).minimize(r_cost)

    saver = tf.train.Saver(max_to_keep=20)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    f_train_stat = open("train_log.txt", "w", buffering=1)
    f_test_stat = open("test_log.txt", "w", buffering=1)

    os.system("mkdir -p figs_rec")
    for it in range(ITERS):
        start_time = time.time()

        # first reconstruction phase
        angles, Xb, Xb_rot = get_batch(X_train, Y_train)
        r_cost_rez, _ = sess.run([r_cost, r_train_op],
                                 feed_dict={
                                     real_data: Xb,
                                     real_data_rot: Xb_rot,
                                     angles_tf: angles
                                 })

        f_train_stat.write("%i %g\n" % (it, r_cost_rez))

        print(it, (time.time() - start_time))

        if ((it + 1) % 500 == 0):

            angles, Xb, Xb_rot = get_batch(X_train, Y_train)
            samples = sess.run([rec_data],
                               feed_dict={
                                   real_data: Xb,
                                   real_data_rot: Xb_rot,
                                   angles_tf: angles
                               })
            plot_pair_samples(Xb_rot, samples,
                              'figs_rec/samples_%.6i_seen.png' % (it))

            angles, Xb, Xb_rot = get_batch(X_test, Y_test)
            samples = sess.run([rec_data],
                               feed_dict={
                                   real_data: Xb,
                                   real_data_rot: Xb_rot,
                                   angles_tf: angles
                               })
            plot_pair_samples(Xb_rot, samples,
                              'figs_rec/samples_%.6i_unseen.png' % (it))

            r_cost_rez = sess.run(r_cost,
                                  feed_dict={
                                      real_data: Xb,
                                      real_data_rot: Xb_rot,
                                      angles_tf: angles
                                  })
            f_test_stat.write("%i %g\n" % (it, r_cost_rez))

        if ((it + 1) % 10000 == 0):
            saver.save(sess, 'save/model', global_step=it)

    saver.save(sess, 'save/final-model')
コード例 #39
0
ファイル: activations.py プロジェクト: xiaobaile/Nlp
def minsin(x, name="minsin_act"):
    with tf.variable_scope(name):
        return tf.minimum(x, tf.sin(x))
コード例 #40
0
    def __init__(
        self,
        conf,
        video=None,
        poses=None,
        reuse_scope=None,
    ):
        """
        :param conf:
        :param video:
        :param actions:
        :param states:
        :param lt_states: latent states
        :param test:
        :param ltprop:   whether to porpagate laten state forward
        """
        poses = tf.squeeze(poses)

        self.iter_num = tf.placeholder(tf.float32, [])
        summaries = []

        first_row = tf.reshape(np.arange(conf['batch_size']),
                               shape=[conf['batch_size'], 1])
        rand_ind = np.random.randint(0,
                                     conf['sequence_length'],
                                     size=[conf['batch_size'], 1])

        self.num_ind_0 = num_ind_0 = tf.concat(1, [first_row, rand_ind])
        self.image = image = tf.gather_nd(video, num_ind_0)
        self.true_pose = true_pose = tf.gather_nd(poses, num_ind_0)

        if reuse_scope is None:
            is_training = True
        else:
            is_training = False

        if reuse_scope is None:
            inferred_pose = construct_model(conf,
                                            image,
                                            is_training=is_training)
        else:
            # If it's a validation or test model.
            if 'nomoving_average' in conf:
                is_training = True
                print 'valmodel with is_training: ', is_training

            with tf.variable_scope(reuse_scope, reuse=True):
                inferred_pose = construct_model(conf,
                                                image,
                                                is_training=is_training)

        self.inferred_pose = inferred_pose

        inferred_pos = tf.slice(inferred_pose, [0, 0], [-1, 2])
        true_pos = tf.slice(true_pose, [0, 0], [-1, 2])
        pos_cost = tf.reduce_sum(tf.square(inferred_pos - true_pos))

        inferred_ori = tf.slice(inferred_pose, [0, 2], [-1, 1])
        true_ori = tf.slice(true_pose, [0, 2], [-1, 1])

        c1 = tf.cos(inferred_ori)
        s1 = tf.sin(inferred_ori)
        c2 = tf.cos(true_ori)
        s2 = tf.sin(true_ori)
        ori_cost = tf.reduce_sum(tf.square(c1 - c2) + tf.square(s1 - s2))

        total_cost = pos_cost + ori_cost

        self.prefix = prefix = tf.placeholder(tf.string, [])
        summaries.append(tf.scalar_summary(prefix + 'pos_cost', pos_cost))
        summaries.append(tf.scalar_summary(prefix + 'ori_cost', ori_cost))
        summaries.append(tf.scalar_summary(prefix + 'total_cost', total_cost))
        self.loss = loss = total_cost
        self.lr = tf.placeholder_with_default(conf['learning_rate'], ())

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if update_ops:
            updates = tf.group(*update_ops)
            with tf.control_dependencies([updates]):
                self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
        else:
            self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)

        self.summ_op = tf.merge_summary(summaries)
コード例 #41
0
ファイル: generation_process.py プロジェクト: tgaudin/gryffin
    def construct_comp_graph(self):

        tf.compat.v1.reset_default_graph()
        self.tf_descs = tf.compat.v1.placeholder(tf.float32,
                                                 [None, self.num_descs])
        self.tf_objs = tf.compat.v1.placeholder(tf.float32, [None, 1])

        with tf.name_scope('auto_desc_gen'):

            self.weights_0 = tf.compat.v1.get_variable(
                'weights_0', [self.num_descs, self.num_descs],
                initializer=tf.initializers.identity())
            self.biases_0 = tf.compat.v1.get_variable(
                'biases_0', [self.num_descs],
                initializer=tf.initializers.zeros())

            self.weights_0 = self.weights_0 + tf.random.normal(
                [self.num_descs, self.num_descs], 0., 1e-5)
            self.biases_0 = self.biases_0 + tf.random.normal([self.num_descs],
                                                             0., 1e-5)

            activation = lambda x: tf.nn.softsign(x)
            regressor = lambda x: activation(
                tf.matmul(x, self.weights_0) + self.biases_0)

            gen_descs = regressor(self.tf_descs)
            self.gen_descs = gen_descs

            # compute correlation coefficients between descriptors and objectives
            gen_descs_mean, gen_descs_var = tf.nn.moments(gen_descs, axes=0)
            objs_mean, objs_var = tf.nn.moments(self.tf_objs, axes=0)

            gen_descs_var += 1e-6
            objs_var += 1e-6

            numerator = tf.reduce_mean(
                (self.tf_objs - objs_mean) * (gen_descs - gen_descs_mean),
                axis=0)
            denominator = tf.sqrt(gen_descs_var * objs_var)
            corr_coeffs = numerator / denominator
            self.corr_coeffs = corr_coeffs

            # compute correlation coefficients among descriptors
            gen_descs_expand = tf.expand_dims(gen_descs - gen_descs_mean, -1)
            gen_descs_transpose = tf.transpose(gen_descs_expand,
                                               perm=[0, 2, 1])

            gen_descs_var_expand = tf.expand_dims(gen_descs_var, -1)
            gen_descs_var_transpose = tf.transpose(gen_descs_var_expand,
                                                   perm=[1, 0])

            cov_gen_descs = tf.reduce_mean(tf.matmul(gen_descs_expand,
                                                     gen_descs_transpose),
                                           axis=0)
            cov_gen_descs /= tf.sqrt(
                tf.matmul(gen_descs_var_expand, gen_descs_var_transpose))
            self.cov_gen_descs = cov_gen_descs

            # compute loss for deviating from target binary matrix
            #			min_corr         = 2. * 2. / (3. * np.sqrt(self.num_samples - 1) )    # corresponds to 95 % confidence interval
            min_corr = 1. / np.sqrt(self.num_samples - 2)
            self.min_corr = min_corr
            norm_corr_coeffs = tf.nn.leaky_relu(
                (tf.abs(corr_coeffs) - min_corr) / (1. - min_corr), 0.01)

            loss_0 = tf.reduce_mean(tf.square(tf.sin(np.pi *
                                                     norm_corr_coeffs)))
            loss_1 = (1. - tf.reduce_max(tf.abs(norm_corr_coeffs)))
            #			loss_1  = tf.square( tf.cos( np.pi * norm_corr_coeffs[0] ) ) + tf.square( tf.cos( corr_coeffs[0] * np.pi / 2. ) )
            #           loss_1 /= self.num_descs

            # compute loss for non-zero correlations in generated descriptors
            norm_cov_x = tf.nn.leaky_relu(
                (tf.abs(cov_gen_descs) - min_corr) / (1. - min_corr), 0.01)
            loss_2 = tf.reduce_sum(tf.square(tf.sin(
                np.pi * norm_cov_x / 2.))) / (self.num_descs**2 -
                                              self.num_descs)

            # weight regularization
            loss_3 = 1e-2 * tf.reduce_mean(tf.abs(self.weights_0))

            self.loss_0 = loss_0
            self.loss_1 = loss_1
            self.loss_2 = loss_2
            self.loss_3 = loss_3

            # register training operation
            self.loss = loss_0 + loss_1 + loss_2 + loss_3
            optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=self.eta)
            self.train_op = optimizer.minimize(self.loss)

#		print('initializing graph variables')
        init_op = tf.group(tf.compat.v1.global_variables_initializer(),
                           tf.compat.v1.local_variables_initializer())
        config = tf.compat.v1.ConfigProto(inter_op_parallelism_threads=1,
                                          intra_op_parallelism_threads=1)
        self.sess = tf.compat.v1.Session(config=config)
        #		print('running init_op')
        self.sess.run(init_op)
コード例 #42
0
 def sin_network(self, x):
     z1 = tf.sin(x, name="z1")
     return z1
コード例 #43
0
# Open graph session
sess = tf.Session()

# div() vs truediv() vs floordiv()
print(sess.run(tf.div(3, 4)))
print(sess.run(tf.truediv(3, 4)))
print(sess.run(tf.floordiv(3.0, 4.0)))

# Mod function
print(sess.run(tf.mod(22.0, 5.0)))

# Cross Product
print(sess.run(tf.cross([1., 0., 0.], [0., 1., 0.])))

# Trig functions
print(sess.run(tf.sin(3.1416)))
print(sess.run(tf.cos(3.1416)))
# Tangemt
print(sess.run(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.))))

# Custom operation
test_nums = range(15)


#from tensorflow.python.ops import math_ops
#print(sess.run(tf.equal(test_num, 3)))
def custom_polynomial(x_val):
    # Return 3x^2 - x + 10
    return (tf.subtract(3 * tf.square(x_val), x_val) + 10)

コード例 #44
0
    def timeflow(self, t=0.0):
        self.setalpha(t)
        Momentum = tf.matmul(tf.matmul(
            self.body.wb, self.body.Ib), self.body.Q) + tf.scalar_mul(
                self.body.m, tf.cross(self.body.rs, self.body.vs))
        Feqc = tf.scalar_mul(Mtot, g)
        Feqa = tf.diag([Mtot, Mtot, Mtot])
        Crossvec = tf.zeros((1, 3), dtype=tf.float32)
        Teqalpha = tf.zeros((3, 3), dtype=tf.float32)
        Teqc = tf.zeros((1, 3), dtype=tf.float32)
        mlsum = tf.zeros((1, 3), dtype=tf.float32)
        sumDs = tf.zeros((3, 3), dtype=tf.float32)
        wbs = tf.matmul(self.body.wb, self.body.Q)  #[1,3] matrix
        tot_lbtomots = []
        for p in range(numLeg):
            for i in range(numsubleg):
                self.leg[p].sub[i].omega += self.leg[p].sub[
                    i].alpha * dtime  #omega를 시간에 따라 갱신
                self.leg[p].sub[i].theta += self.leg[p].sub[
                    i].omega * dtime  #theta를 시간에 따라 갱신
                self.leg[p].sub[i].Q = tf.scalar_mul(tf.cos(self.leg[p].sub[i].theta), tf.eye(3, dtype=tf.float32)) + \
                tf.scalar_mul(1.-tf.cos(self.leg[p].sub[i].theta), tf.matmul(self.leg[p].sub[i].axis, self.leg[p].sub[i].axis, transpose_a = True)) + \
                tf.scalar_mul(tf.sin(self.leg[p].sub[i].theta), tf.cross(tf.tile(self.leg[p].sub[i].axis,[3,1]), tf.eye(3, dtype=tf.float32)))
            Qs = [tf.matmul(self.leg[p].sub[0].Q,
                            self.body.Q)]  #Qs는 i번째 subleg에서 space로의 좌표변환
            #List of rotation matrices of each sublegs in space frame
            #Type : list of [3,3] Tensor
            for i in range(1, numsubleg):
                Qs.append(tf.matmul(self.leg[p].sub[i].Q, Qs[i - 1]))

            Is = [
                tf.matmul(
                    tf.matmul(Qs[i], self.leg[p].sub[i].Ib, transpose_a=True),
                    Qs[i]) for i in range(numsubleg)
            ]

            e = [
                tf.matmul(self.leg[p].sub[i].axis, Qs[i])
                for i in range(numsubleg)
            ]
            #List of axes of each sublegs in space frame
            #Type : list of [None,3] Tensor

            Qalpha = [
                tf.scalar_mul(self.leg[p].sub[i].alpha, e[i])
                for i in range(numsubleg)
            ]

            Qalphasum = [Qalpha[0]]
            for i in range(1, numsubleg):
                Qalphasum.append(Qalphasum[i - 1] + Qalpha[i])

            Qw = [
                tf.scalar_mul(self.leg[p].sub[i].omega, e[i])
                for i in range(numsubleg)
            ]

            ws = [wbs + Qw[0]]
            for i in range(1, numsubleg):
                ws.append(ws[i - 1] + Qw[i])

            w = [
                tf.matmul(ws[i], Qs[i], transpose_b=True)
                for i in range(numsubleg)
            ]

            ls = [[
                tf.matmul(self.leg[p].sub[i].l[0], Qs[i]),
                tf.matmul(self.leg[p].sub[i].l[1], Qs[i])
            ] for i in range(numsubleg)]  #ls = 2Dtensor

            lbtomotbs = tf.matmul(self.body.lbtomot[p],
                                  self.body.Q)  # lbtomotbs = 2Dtensor

            lbtomots = [lbtomotbs + ls[0][0]]  # lbtomots = 2Dtensor

            for i in range(1, numsubleg):
                lbtomots.append(lbtomots[i - 1] + ls[i - 1][1] + ls[i][0])
            for i in range(numsubleg):
                mlsum += tf.scalar_mul(self.leg[p].sub[i].m, lbtomots[i])
            #각운동량 디버깅용
            vmotbs = [tf.cross(wbs, lbtomotbs) + tf.cross(ws[0], ls[0][0])]
            for i in range(1, numsubleg):
                vmotbs.append(vmotbs[i - 1] +
                              tf.cross(ws[i - 1], ls[i - 1][1]) +
                              tf.cross(ws[i], ls[i][0]))

            #Calculating External Forces
            vs = self.body.vs
            for i in range(numsubleg):
                #Collisiontemp = tf.cast(tf.less(lbtomots[i]+self.body.rs+ls[i][1],tf.zeros((1,3),dtype=tf.float32)),tf.float32)
                Collisiontemp = Offset - tf.multiply(
                    Offset,
                    tf.sigmoid(
                        tf.scalar_mul(Sigscale,
                                      lbtomots[i] + self.body.rs + ls[i][1])))
                Collisionz = tf.multiply(
                    Collisiontemp, tf.constant([[0, 0, 1]], dtype=tf.float32))
                Collisionxy = tf.matmul(
                    Collisionz,
                    tf.constant([[0, 0, 0], [0, 0, 0], [1, 1, 0]],
                                tf.float32))  ##더 연산량을 줄일 수 있을 듯 방법을 강구하라
                vs += tf.cross(ws[i], ls[i][0] + ls[i][1])
                vCollision = Offset - tf.multiply(
                    Offset, tf.sigmoid(tf.scalar_mul(Sigscale, vs)))
                #vCollision = tf.cast(tf.less( vs , tf.zeros((1,3),dtype=tf.float32) ),tf.float32)
                Ftemp = tf.multiply(
                    Collisionz, Fadded + tf.multiply(
                        (vCollision - Offset), Fsubed))
                Feqc += Ftemp
                Teqc += tf.cross(lbtomots[i] + ls[i][1], Ftemp)
                FrictionTemp = -tf.multiply(tf.scalar_mul(
                    Fricscale, vs), Collisionxy)  ##########하.. 힘이 너무 다 틀렸어
                Feqc += FrictionTemp
                Teqc += tf.cross(lbtomots[i] + ls[i][1], FrictionTemp)

            A = [
                tf.cross(wbs, tf.cross(wbs, lbtomotbs)) +
                tf.cross(Qalphasum[0], ls[0][0]) +
                tf.cross(ws[0], tf.cross(ws[0], ls[0][0]))
            ]

            for i in range(1, numsubleg):
                A.append(
                    tf.cross(Qalphasum[i - 1], ls[i - 1][1]) +
                    tf.cross(Qalphasum[i], ls[i][0]) +
                    tf.cross(ws[i - 1], tf.cross(ws[i - 1], ls[i - 1][1])) +
                    tf.cross(ws[i], tf.cross(ws[i], ls[i][0])))

            mlsquare = tf.zeros((1), dtype=tf.float32)
            for i in range(numsubleg):
                mlsquare += tf.scalar_mul(
                    self.leg[p].sub[i].m,
                    tf.matmul(lbtomots[i], lbtomots[i], transpose_b=True))
            mlsquare = tf.reshape(mlsquare, [-1])
            Dya = tf.zeros([3, 3], dtype=tf.float32)
            for i in range(numsubleg):
                Dya += tf.scalar_mul(
                    self.leg[p].sub[i].m,
                    tf.matmul(lbtomots[i], lbtomots[i], transpose_a=True))
            ###############
            Ds = tf.diag(tf.concat([mlsquare, mlsquare, mlsquare],
                                   axis=0)) - Dya
            Teqalpha += Ds
            sumDs += Ds
            #Qb * Ib * Qb.transpose()

            for i in range(numsubleg):
                Feqc -= tf.scalar_mul(self.leg[p].sub[i].m, A[i])
                Crossvec += tf.scalar_mul(self.leg[p].sub[i].m, lbtomots[i])
                Teqc += tf.matmul(
                    tf.cross(tf.matmul(w[i], self.leg[p].sub[i].Ib), w[i]),
                    Qs[i])
                Teqc -= tf.matmul(Qalphasum[i], Is[i])
                Teqalpha += Is[i]
                #Qs_i * I_i * Qs_i^T
            for i in range(numsubleg):
                Momentum += tf.matmul(tf.matmul(w[i], self.leg[p].sub[i].Ib),
                                      Qs[i])
                Momentum += tf.scalar_mul(
                    self.leg[p].sub[i].m,
                    tf.cross(lbtomots[i] + self.body.rs,
                             vmotbs[i] + self.body.vs))
            #leg update
            #float32 -> float32 conversion : 171013 Fine
            #update 'Q's of leg - 20171012 fine
            tot_lbtomots += lbtomots
        Teqalpha += tf.matmul(
            tf.matmul(self.body.Q, self.body.Ib, transpose_a=True),
            self.body.Q)
        Teqc += tf.matmul(
            tf.cross(tf.matmul(self.body.wb, self.body.Ib), self.body.wb),
            self.body.Q)
        Teqc += tf.cross(mlsum, g)
        Teqanorm = tf.reshape(tf.matmul(mlsum, mlsum, transpose_b=True), [-1])
        alphabs = tf.matmul(
            Teqc - tf.scalar_mul(1. / Mtot, tf.cross(mlsum, Feqc)),
            tf.matrix_inverse(Teqalpha + tf.scalar_mul(
                1. / Mtot,
                tf.diag(tf.concat([Teqanorm, Teqanorm, Teqanorm], axis=0)) -
                tf.matmul(mlsum, mlsum, transpose_a=True))  #여기가 너무 헷갈림.......
                              ))
        asb = tf.scalar_mul(1. / Mtot, Feqc - tf.cross(mlsum, alphabs))
        alphab = tf.matmul(alphabs, self.body.Q, transpose_b=True)
        self.body.wb += tf.scalar_mul(dtime, alphab)
        self.body.Q += tf.scalar_mul(
            dtime, tf.cross(tf.concat([wbs, wbs, wbs], axis=0), self.body.Q))
        self.body.vs += tf.scalar_mul(dtime, asb)
        self.body.rs += tf.scalar_mul(dtime, self.body.vs)

        # Q to quaternion

        qw = tf.scalar_mul(
            0.5, tf.sqrt(tf.reduce_sum(tf.diag_part(self.body.Q)) + 1.))
        qv = tf.reduce_sum(tf.cross(self.body.Q, tf.eye(3, dtype=tf.float32)),
                           axis=0) / tf.scalar_mul(4., qw)

        # quaternion normalization

        qvsquare = tf.reduce_sum(tf.square(qv))
        qnorm = tf.sqrt(tf.square(qw) + qvsquare)
        qw /= qnorm
        qv /= qnorm
        # quaternion to Q

        self.body.Q = tf.scalar_mul(qw*qw-qvsquare,tf.eye(3, dtype = tf.float32))\
            + 2 * tf.matmul(tf.reshape(qv, [3, 1]), tf.reshape(qv, [1, 3]))\
            - 2 * qw * tf.cross(tf.tile(tf.reshape(qv, [1,3]), [3,1]), tf.eye(3, dtype = tf.float32))

        return Momentum, [x + self.body.rs for x in tot_lbtomots]
コード例 #45
0
ファイル: common.py プロジェクト: liuxinren/pyramidpoints
    def tf_augment_input(self, stacked_points, batch_inds, config):

        # Parameter
        num_batches = batch_inds[-1] + 1

        ##########
        # Rotation
        ##########

        if config.augment_rotation == 'vertical':

            # Choose a random angle for each element
            theta = tf.random_uniform((num_batches, ),
                                      minval=0,
                                      maxval=2 * np.pi)

            # Rotation matrices
            c, s = tf.cos(theta), tf.sin(theta)
            cs0 = tf.zeros_like(c)
            cs1 = tf.ones_like(c)
            R = tf.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1)
            R = tf.reshape(R, (-1, 3, 3))

            # Create N x 3 x 3 rotation matrices to multiply with stacked_points
            stacked_rots = tf.gather(R, batch_inds)

            # Apply rotations
            stacked_points = tf.reshape(
                tf.matmul(tf.expand_dims(stacked_points, axis=1),
                          stacked_rots), [-1, 3])

        elif config.augment_rotation == 'none':
            R = tf.eye(3, batch_shape=(num_batches, ))

        else:
            raise ValueError('Unknown rotation augmentation : ' +
                             config.augment_rotation)

        #######
        # Scale
        #######

        # Choose random scales for each example
        min_s = config.augment_scale_min
        max_s = config.augment_scale_max

        if config.augment_scale_anisotropic:
            s = tf.random_uniform((num_batches, 3), minval=min_s, maxval=max_s)
        else:
            s = tf.random_uniform((num_batches, 1), minval=min_s, maxval=max_s)

        symmetries = []
        for i in range(3):
            if config.augment_symmetries[i]:
                symmetries.append(
                    tf.round(tf.random_uniform((num_batches, 1))) * 2 - 1)
            else:
                symmetries.append(tf.ones([num_batches, 1], dtype=tf.float32))
        s *= tf.concat(symmetries, 1)

        # Create N x 3 vector of scales to multiply with stacked_points
        stacked_scales = tf.gather(s, batch_inds)

        # Apply scales
        stacked_points = stacked_points * stacked_scales

        #######
        # Noise
        #######

        noise = tf.random_normal(tf.shape(stacked_points),
                                 stddev=config.augment_noise)
        stacked_points = stacked_points + noise

        return stacked_points, s, R
コード例 #46
0
ファイル: test_golden_search.py プロジェクト: zeroyou/neupy
 def f(x):
     return tf.sin(x) * x**-0.5
コード例 #47
0
def sin(x):
    '''Computes sin of x element-wise.
    '''
    return tf.sin(x)
コード例 #48
0
ファイル: main.py プロジェクト: zhouweiti/DaNet-Tensorflow
    def build(self):
        # create sub-modules
        encoder = hparams.get_encoder()(self, 'encoder')
        # ===================
        # build the model

        input_shape = [
            hparams.BATCH_SIZE, hparams.MAX_N_SIGNAL, None,
            hparams.FEATURE_SIZE
        ]

        s_src_signals = tf.placeholder(hparams.COMPLEXX,
                                       input_shape,
                                       name='source_signal')
        s_dropout_keep = tf.placeholder(hparams.FLOATX, [],
                                        name='dropout_keep')
        reger = hparams.get_regularizer()
        with tf.variable_scope('global', regularizer=reger):
            # TODO add mixing coeff ?

            # get mixed signal
            s_mixed_signals = tf.reduce_sum(s_src_signals, axis=1)

            s_src_signals_pwr = tf.abs(s_src_signals)
            s_mixed_signals_phase = tf.atan2(tf.imag(s_mixed_signals),
                                             tf.real(s_mixed_signals))
            s_mixed_signals_power = tf.abs(s_mixed_signals)
            s_mixed_signals_log = tf.log1p(s_mixed_signals_power)
            # int[B, T, F]
            # float[B, T, F, E]
            s_embed = encoder(s_mixed_signals_log)
            s_embed_flat = tf.reshape(
                s_embed, [hparams.BATCH_SIZE, -1, hparams.EMBED_SIZE])

            # TODO make attractor estimator a submodule ?
            estimator = hparams.get_estimator(hparams.TRAIN_ESTIMATOR_METHOD)(
                self, 'train_estimator')
            s_attractors = estimator(s_embed,
                                     s_src_pwr=s_src_signals_pwr,
                                     s_mix_pwr=s_mixed_signals_power)

            using_same_method = (hparams.INFER_ESTIMATOR_METHOD ==
                                 hparams.TRAIN_ESTIMATOR_METHOD)

            if using_same_method:
                s_valid_attractors = s_attractors
            else:
                valid_estimator = hparams.get_estimator(
                    hparams.INFER_ESTIMATOR_METHOD)(self, 'infer_estimator')
                assert not valid_estimator.USE_TRUTH
                s_valid_attractors = valid_estimator(s_embed)

            separator = hparams.get_separator(hparams.SEPARATOR_TYPE)(
                self, 'separator')
            s_separated_signals_pwr = separator(s_mixed_signals_power,
                                                s_attractors, s_embed_flat)

            if using_same_method:
                s_separated_signals_pwr_valid = s_separated_signals_pwr
            else:
                s_separated_signals_pwr_valid = separator(
                    s_mixed_signals_power, s_valid_attractors, s_embed_flat)

            # use mixture phase and estimated power to get separated signal
            s_mixed_signals_phase = tf.expand_dims(s_mixed_signals_phase, 1)
            s_separated_signals = tf.complex(
                tf.cos(s_mixed_signals_phase) * s_separated_signals_pwr,
                tf.sin(s_mixed_signals_phase) * s_separated_signals_pwr)

            # loss and SNR for training
            # s_train_loss, v_perms, s_perm_sets = ops.pit_mse_loss(
            # s_src_signals_pwr, s_separated_signals_pwr)
            s_train_loss, v_perms, s_perm_sets = ops.pit_mse_loss(
                s_src_signals, s_separated_signals)

            # resolve permutation
            s_perm_idxs = tf.stack([
                tf.tile(tf.expand_dims(tf.range(hparams.BATCH_SIZE), 1),
                        [1, hparams.MAX_N_SIGNAL]),
                tf.gather(v_perms, s_perm_sets)
            ],
                                   axis=2)
            s_perm_idxs = tf.reshape(
                s_perm_idxs, [hparams.BATCH_SIZE * hparams.MAX_N_SIGNAL, 2])
            s_separated_signals = tf.gather_nd(s_separated_signals,
                                               s_perm_idxs)
            s_separated_signals = tf.reshape(s_separated_signals, [
                hparams.BATCH_SIZE, hparams.MAX_N_SIGNAL, -1,
                hparams.FEATURE_SIZE
            ])

            s_train_snr = tf.reduce_mean(
                ops.batch_snr(s_src_signals, s_separated_signals))

            # ^ for validation / inference
            s_valid_loss, v_perms, s_perm_sets = ops.pit_mse_loss(
                s_src_signals_pwr, s_separated_signals_pwr_valid)
            s_perm_idxs = tf.stack([
                tf.tile(tf.expand_dims(tf.range(hparams.BATCH_SIZE), 1),
                        [1, hparams.MAX_N_SIGNAL]),
                tf.gather(v_perms, s_perm_sets)
            ],
                                   axis=2)
            s_perm_idxs = tf.reshape(
                s_perm_idxs, [hparams.BATCH_SIZE * hparams.MAX_N_SIGNAL, 2])
            s_separated_signals_pwr_valid_pit = tf.gather_nd(
                s_separated_signals_pwr_valid, s_perm_idxs)
            s_separated_signals_pwr_valid_pit = tf.reshape(
                s_separated_signals_pwr_valid_pit, [
                    hparams.BATCH_SIZE, hparams.MAX_N_SIGNAL, -1,
                    hparams.FEATURE_SIZE
                ])

            s_separated_signals_valid = tf.complex(
                tf.cos(s_mixed_signals_phase) *
                s_separated_signals_pwr_valid_pit,
                tf.sin(s_mixed_signals_phase) *
                s_separated_signals_pwr_valid_pit)
            s_separated_signals_infer = tf.complex(
                tf.cos(s_mixed_signals_phase) * s_separated_signals_pwr_valid,
                tf.sin(s_mixed_signals_phase) * s_separated_signals_pwr_valid)
            s_valid_snr = tf.reduce_mean(
                ops.batch_snr(s_src_signals, s_separated_signals_valid))

        # ===============
        # prepare summary
        # TODO add impl & summary for word error rate
        with tf.name_scope('train_summary'):
            s_loss_summary_t = tf.summary.scalar('loss', s_train_loss)
            s_snr_summary_t = tf.summary.scalar('SNR', s_train_snr)
            s_lr_summary_t = tf.summary.scalar('LR', self.v_learn_rate)

        with tf.name_scope('valid_summary'):
            s_loss_summary_v = tf.summary.scalar('loss', s_valid_loss)
            s_snr_summary_v = tf.summary.scalar('SNR', s_valid_snr)
            s_lr_summary_v = tf.summary.scalar('LR', self.v_learn_rate)

        # apply optimizer
        ozer = hparams.get_optimizer()(learn_rate=self.v_learn_rate,
                                       lr_decay=hparams.LR_DECAY)

        v_params_li = tf.trainable_variables()
        r_apply_grads = ozer.compute_gradients(s_train_loss, v_params_li)
        if hparams.GRAD_CLIP_THRES is not None:
            r_apply_grads = [(tf.clip_by_value(g, -hparams.GRAD_CLIP_THRES,
                                               hparams.GRAD_CLIP_THRES), v)
                             for g, v in r_apply_grads if g is not None]
        self.op_sgd_step = ozer.apply_gradients(r_apply_grads)

        self.op_init_params = tf.variables_initializer(v_params_li)
        self.op_init_states = tf.variables_initializer(
            list(self.s_states_di.values()))

        self.train_feed_keys = [s_src_signals, s_dropout_keep]
        train_summary = tf.summary.merge(
            [s_loss_summary_t, s_snr_summary_t, s_lr_summary_t])
        self.train_fetches = [
            train_summary,
            dict(loss=s_train_loss, SNR=s_train_snr, LR=self.v_learn_rate),
            self.op_sgd_step
        ]

        self.valid_feed_keys = self.train_feed_keys
        valid_summary = tf.summary.merge(
            [s_loss_summary_v, s_snr_summary_v, s_lr_summary_v])
        self.valid_fetches = [
            valid_summary,
            dict(loss=s_valid_loss, SNR=s_valid_snr)
        ]

        self.infer_feed_keys = [s_mixed_signals, s_dropout_keep]
        self.infer_fetches = dict(signals=s_separated_signals_infer)

        if hparams.DEBUG:
            self.debug_feed_keys = [s_src_signals, s_dropout_keep]
            self.debug_fetches = dict(embed=s_embed,
                                      attrs=s_attractors,
                                      input=s_src_signals,
                                      output=s_separated_signals)
            self.debug_fetches.update(encoder.debug_fetches)
            self.debug_fetches.update(separator.debug_fetches)
            if estimator is not None:
                self.debug_fetches.update(estimator.debug_fetches)

        self.saver = tf.train.Saver(var_list=v_params_li)
コード例 #49
0
 def constraint(input_data: TensorType) -> TensorType:
     x, y = input_data[:, -2], input_data[:, -1]
     z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
     return z[:, None]
コード例 #50
0
def get_transform_matrix_tf_(theta, phi, invert_rot=False, invert_focal=False):
    #INPUT IN DEGREES

    #extrinsic matrix:
    #
    # RRRD
    # RRRD
    # RRRD
    # 000D
    # scale = 1
    fx = W / 2.0 * 1.0 / math.tan(fov * math.pi / 180 / 2)
    fy = fx
    focal_length = fx / (W / 2.0)
    FOCAL_LENGTH_X = focal_length
    FOCAL_LENGTH_Y = focal_length
    print(fov)
    sin_phi = tf.sin(phi / 180 * np.pi)
    cos_phi = tf.cos(phi / 180 * np.pi)
    # st()
    sin_theta = tf.sin(theta / 180.0 * np.pi)  #why is theta negative???
    cos_theta = tf.cos(theta / 180.0 * np.pi)

    #these are inverted from normal!
    rotation_azimuth_flat = [
        cos_theta, 0.0, -sin_theta, 0.0, 1.0, 0.0, sin_theta, 0.0, cos_theta
    ]

    rotation_elevation_flat = [
        cos_phi, sin_phi, 0.0, -sin_phi, cos_phi, 0.0, 0.0, 0.0, 1.0
    ]

    f = lambda x: tf.reshape(tf.stack(x), (3, 3))
    rotation_azimuth = f(rotation_azimuth_flat)
    rotation_elevation = f(rotation_elevation_flat)

    rotation_matrix = tf.matmul(rotation_azimuth, rotation_elevation)
    if invert_rot:
        rotation_matrix = tf.linalg.inv(rotation_matrix)

    displacement = np.zeros((3, 1), dtype=np.float32)
    displacement[0, 0] = RADIUS
    displacement = tf.constant(displacement, dtype=np.float32)
    displacement = tf.matmul(rotation_matrix, displacement)

    bottom_row = np.zeros((1, 4), dtype=np.float32)
    bottom_row[0, 3] = 1.0
    bottom_row = tf.constant(bottom_row)

    #print rotation_matrix
    #print bottom_row
    #print displacement

    extrinsic_matrix = tf.concat(
        [tf.concat([rotation_matrix, -displacement], axis=1), bottom_row],
        axis=0)

    if invert_focal:
        intrinsic_diag = [
            1.0, float(FOCAL_LENGTH_X),
            float(FOCAL_LENGTH_Y), 1.0
        ]
    else:
        intrinsic_diag = [
            1.0, 1.0 / float(FOCAL_LENGTH_X), 1.0 / float(FOCAL_LENGTH_Y), 1.0
        ]
    intrinsic_matrix = tf.linalg.tensor_diag(
        tf.constant(intrinsic_diag, dtype=tf.float32))

    camera_matrix = tf.matmul(extrinsic_matrix, intrinsic_matrix)
    return camera_matrix
コード例 #51
0
def get_unit_variable_c(name, scope, shape):
    theta = tf.get_variable(name,
                            shape=shape,
                            initializer=tf.random_uniform_initializer(-pi, pi))
    return tf.complex(tf.cos(theta), tf.sin(theta))
コード例 #52
0
ファイル: basic3.py プロジェクト: ThienPhucNguyen/Tensorflow
"""
Some examples about standard and
non - standard operations for tensors
"""
import tensorflow as tf

session = tf.Session()

# the floor of division if inputs are integers
print("3 / 4 =", session.run(tf.div(3, 4)))

# the true result of division
print("3 / 4 =", session.run(tf.truediv(3, 4)))

# the floor of division
print("3.0 / 4.0 =", session.run(tf.floordiv(3., 4.)))

# mod operation
print("22.0 mod 5.0 =", session.run(tf.mod(22.0, 5.0)))

# cross product operaiton
print("cross product of [1., 0., 0.] and [0., 1., 0.]\n",
      session.run(tf.cross([1., 0., 0.], [0., 1., 0.])), "\n")

# self-defined tangent function (tan(pi / 4) = 1)
print("tan(pi / 4) =",
      session.run(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.))))
コード例 #53
0
ファイル: RVFF_IV_1D.py プロジェクト: AndreasICL/MSc_Project
    def innerProduct(intervalLen, omegas, phis, kernelVar, lambda_):
        angle1 = intervalLen * (
            omegas[:, None] - omegas[None, :]
        ) + omegas[:, None] * phis[:, None] - omegas[None, :] * phis[None, :]
        angle2 = intervalLen * (
            omegas[:, None] + omegas[None, :]
        ) + omegas[:, None] * phis[:, None] + omegas[None, :] * phis[None, :]
        angle3 = omegas[:, None] * phis[:,
                                        None] - omegas[None, :] * phis[None, :]
        angle4 = omegas[:, None] * phis[:,
                                        None] + omegas[None, :] * phis[None, :]
        angle5 = omegas * (2 * intervalLen + phis[:, None] + phis[None, :])
        angle6 = omegas * (phis[:, None] + phis[None, :])

        denom1 = tf.Variable(omegas[:, None] - omegas[None, :])
        denom2 = tf.Variable(omegas[:, None] + omegas[None, :])
        denom3 = tf.Variable(2 * omegas)

        denom1 = tf.where(denom1 == 0, 1.0, denom1)
        denom2 = tf.where(denom2 == 0, 1.0, denom2)
        denom3 = tf.where(denom3 == 0, 1.0, denom3)

        coeff1 = (omegas[:, None] * omegas[None, :] +
                  lambda_ * lambda_) / denom1
        coeff2 = (omegas[:, None] * omegas[None, :] - lambda_ *
                  lambda_) / denom2  #( omegas[:, None] + omegas[None, :] )
        coeff3 = (omegas * omegas - lambda_ * lambda_) / denom3
        coeff4 = -coeff3
        coeff5 = lambda_ * lambda_ + omegas * omegas

        denom = 4 * kernelVar * lambda_

        firstTerm = (coeff1 * tf.sin(angle1) + coeff2 * tf.sin(angle2) -
                     lambda_ * tf.cos(angle2) + lambda_ * tf.cos(angle1) -
                     coeff1 * tf.sin(angle3) - coeff2 * tf.sin(angle4) +
                     lambda_ * tf.cos(angle4) -
                     lambda_ * tf.cos(angle3)) / denom

        firstTermForEqualOmegas = (
            (omegas * omegas + lambda_ * lambda_) *
            tf.cos(omegas * (phis[:, None] - phis[None, :])) * intervalLen +
            coeff3 * tf.sin(angle5) - (lambda_ * tf.cos(angle5)) -
            (coeff3 * tf.sin(angle6)) + lambda_ * tf.cos(angle6)) / denom

        firstTermForZeroBothOmegas = tf.reshape(tf.Variable(
            intervalLen * lambda_ / kernelVar / 2),
                                                shape=(-1))

        firstTermForZeroOmegas = 0.0

        firstTermForOppositeOmegas = (
            coeff4 * tf.sin(angle5) -
            coeff5 * tf.cos(omegas * (phis[:, None] - phis[None, :])) +
            lambda_ * tf.cos(angle5) - coeff4 * tf.sin(angle6) -
            lambda_ * tf.cos(angle6)) / denom

        firstTerm = tf.where(denom1 == 1.0, firstTermForEqualOmegas, firstTerm)
        firstTerm = tf.where(denom3 == 1.0, firstTermForZeroOmegas, firstTerm)
        firstTerm = tf.where(denom2 == 1.0, firstTermForOppositeOmegas,
                             firstTerm)
        firstTerm = tf.where(
            denom2 == 1.0,
            tf.where(denom1 == 1.0, firstTermForZeroBothOmegas, firstTerm),
            firstTerm)

        # firstTerm = tf.where( denom1 == 1.0, firstTermForEqualOmegas, firstTerm )
        # firstTerm = tf.where( denom3 == 1.0, 0.0, firstTerm )
        # firstTerm = tf.where( denom2 == 1.0, firstTermForZeroOmegas, firstTerm )

        secondTermfactors = tf.where(denom3 != 1.0, tf.sin(omegas * phis),
                                     tf.sin(phis))

        secondTerm = secondTermfactors[:, None] * secondTermfactors[
            None, :] / kernelVar

        res = firstTerm + secondTerm
        res = 0.5 * (res + tf.transpose(res))

        if inducing_variable.jitter != None:
            res = res + tf.cast(
                tf.linalg.diag(inducing_variable.jitter *
                               tf.ones(res.shape[0])), default_float())

        return res
コード例 #54
0
def sample_homography(
        shape, perspective=True, scaling=True, rotation=True, translation=True,
        n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.1,
        perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=pi/2,
        allow_artifacts=False, translation_overflow=0.):
    """Sample a random valid homography.

    Computes the homography transformation between a random patch in the original image
    and a warped projection with the same image size.
    As in `tf.contrib.image.transform`, it maps the output point (warped patch) to a
    transformed input point (original patch).
    The original patch, which is initialized with a simple half-size centered crop, is
    iteratively projected, scaled, rotated and translated.

    Arguments:
        shape: A rank-2 `Tensor` specifying the height and width of the original image.
        perspective: A boolean that enables the perspective and affine transformations.
        scaling: A boolean that enables the random scaling of the patch.
        rotation: A boolean that enables the random rotation of the patch.
        translation: A boolean that enables the random translation of the patch.
        n_scales: The number of tentative scales that are sampled when scaling.
        n_angles: The number of tentatives angles that are sampled when rotating.
        scaling_amplitude: Controls the amount of scale.
        perspective_amplitude_x: Controls the perspective effect in x direction.
        perspective_amplitude_y: Controls the perspective effect in y direction.
        patch_ratio: Controls the size of the patches used to create the homography.
        max_angle: Maximum angle used in rotations.
        allow_artifacts: A boolean that enables artifacts when applying the homography.
        translation_overflow: Amount of border artifacts caused by translation.

    Returns:
        A `Tensor` of shape `[1, 8]` corresponding to the flattened homography transform.
    """

    # Corners of the output image
    margin = (1 - patch_ratio) / 2
    pts1 = margin + tf.constant([[0, 0], [0, patch_ratio],
                                 [patch_ratio, patch_ratio], [patch_ratio, 0]],
                                tf.float32)
    # Corners of the input patch
    pts2 = pts1

    # Random perspective and affine perturbations
    if perspective:
        if not allow_artifacts:
            perspective_amplitude_x = min(perspective_amplitude_x, margin)
            perspective_amplitude_y = min(perspective_amplitude_y, margin)
        perspective_displacement = tf.truncated_normal([1], 0., perspective_amplitude_y/2)
        h_displacement_left = tf.truncated_normal([1], 0., perspective_amplitude_x/2)
        h_displacement_right = tf.truncated_normal([1], 0., perspective_amplitude_x/2)
        pts2 += tf.stack([tf.concat([h_displacement_left, perspective_displacement], 0),
                          tf.concat([h_displacement_left, -perspective_displacement], 0),
                          tf.concat([h_displacement_right, perspective_displacement], 0),
                          tf.concat([h_displacement_right, -perspective_displacement],
                                    0)])

    # Random scaling
    # sample several scales, check collision with borders, randomly pick a valid one
    if scaling:
        scales = tf.concat(
                [[1.], tf.truncated_normal([n_scales], 1, scaling_amplitude/2)], 0)
        center = tf.reduce_mean(pts2, axis=0, keepdims=True)
        scaled = tf.expand_dims(pts2 - center, axis=0) * tf.expand_dims(
                tf.expand_dims(scales, 1), 1) + center
        if allow_artifacts:
            valid = tf.range(n_scales)  # all scales are valid except scale=1
        else:
            valid = tf.where(tf.reduce_all((scaled >= 0.) & (scaled < 1.), [1, 2]))[:, 0]
        idx = valid[tf.random_uniform((), maxval=tf.shape(valid)[0], dtype=tf.int32)]
        pts2 = scaled[idx]

    # Random translation
    if translation:
        t_min, t_max = tf.reduce_min(pts2, axis=0), tf.reduce_min(1 - pts2, axis=0)
        if allow_artifacts:
            t_min += translation_overflow
            t_max += translation_overflow
        pts2 += tf.expand_dims(tf.stack([tf.random_uniform((), -t_min[0], t_max[0]),
                                         tf.random_uniform((), -t_min[1], t_max[1])]),
                               axis=0)

    # Random rotation
    # sample several rotations, check collision with borders, randomly pick a valid one
    if rotation:
        angles = tf.lin_space(tf.constant(-max_angle), tf.constant(max_angle), n_angles)
        angles = tf.concat([[0.], angles], axis=0)  # in case no rotation is valid
        center = tf.reduce_mean(pts2, axis=0, keepdims=True)
        rot_mat = tf.reshape(tf.stack([tf.cos(angles), -tf.sin(angles), tf.sin(angles),
                                       tf.cos(angles)], axis=1), [-1, 2, 2])
        rotated = tf.matmul(
                tf.tile(tf.expand_dims(pts2 - center, axis=0), [n_angles+1, 1, 1]),
                rot_mat) + center
        if allow_artifacts:
            valid = tf.range(n_angles)  # all angles are valid, except angle=0
        else:
            valid = tf.where(tf.reduce_all((rotated >= 0.) & (rotated < 1.),
                                           axis=[1, 2]))[:, 0]
        idx = valid[tf.random_uniform((), maxval=tf.shape(valid)[0], dtype=tf.int32)]
        pts2 = rotated[idx]

    # Rescale to actual size
    shape = tf.to_float(shape[::-1])  # different convention [y, x]
    pts1 *= tf.expand_dims(shape, axis=0)
    pts2 *= tf.expand_dims(shape, axis=0)

    def ax(p, q): return [p[0], p[1], 1, 0, 0, 0, -p[0] * q[0], -p[1] * q[0]]

    def ay(p, q): return [0, 0, 0, p[0], p[1], 1, -p[0] * q[1], -p[1] * q[1]]

    a_mat = tf.stack([f(pts1[i], pts2[i]) for i in range(4) for f in (ax, ay)], axis=0)
    p_mat = tf.transpose(tf.stack(
        [[pts2[i][j] for i in range(4) for j in range(2)]], axis=0))
    homography = tf.transpose(tf.matrix_solve_ls(a_mat, p_mat, fast=True))
    return homography
コード例 #55
0
# Final
g = f / e

session = tf.Session()
out = session.run(g)
session.close()
print(out)

# Question 2:
a = tf.constant(40)
b = tf.constant(20)

# Op1:
c = tf.multiply(a, b)
print(type(c))

# Op2
d = tf.sin(tf.cast(c, tf.float32))

# Op3
e = d / tf.cast(b, tf.float32)

# creting the session
session = tf.Session()
out = session.run(e)
print(out)
session.close()


コード例 #56
0
# In[ ]:

# %% Let's multiply the two to get a 2d gaussian
z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))


# In[ ]:

# %% Execute the graph and store the value that `out` represents in `result`.
plt.imshow(z_2d.eval())


# In[ ]:

# %% For fun let's create a gabor patch:
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
z = tf.mul(tf.matmul(x, y), z_2d)
plt.imshow(z.eval())


# In[ ]:

# %% We can also list all the operations of a graph:
ops = tf.get_default_graph().get_operations()
print([op.name for op in ops])


# In[ ]:

# %% Lets try creating a generic function for computing the same thing:
コード例 #57
0
def gauss_random_step(data, decay_chain, r_name, sigma, dat_order):
    angle = cal_helicity_angle(data["particle"],
                               decay_chain.standard_topology())
    decay_chain.standard_topology()
    tp_map = decay_chain.topology_map()

    r_particle = tp_map[get_particle(r_name)]

    mass = {}
    for i in data["particle"]:
        mi = data["particle"][i]["m"]
        if i == r_particle:
            mi = mi + tf.random.normal(mi.shape, 0, sigma, dtype=mi.dtype)
        mass[i] = mi

    mask = True
    p4_all = {}
    for i in decay_chain:
        phi = angle[tp_map[i]][tp_map[i.outs[0]]]["ang"]["alpha"]
        theta = angle[tp_map[i]][tp_map[i.outs[0]]]["ang"]["beta"]

        m0 = mass[tp_map[i.core]]
        m1 = mass[tp_map[i.outs[0]]]
        m2 = mass[tp_map[i.outs[1]]]

        mask = mask & (m0 >= m1 + m2)

        p_square = get_relative_p2(m0, m1, m2)

        p = tf.sqrt(tf.where(p_square > 0, p_square, 0))
        pz = p * tf.cos(theta)
        px = p * tf.sin(theta) * tf.cos(phi)
        py = p * tf.sin(theta) * tf.sin(phi)
        E1 = tf.sqrt(m1 * m1 + p * p)
        E2 = tf.sqrt(m2 * m2 + p * p)
        p1 = tf.stack([E1, px, py, pz], axis=-1)
        p2 = tf.stack([E2, -px, -py, -pz], axis=-1)
        p4_all[i.outs[0]] = p1
        p4_all[i.outs[1]] = p2

    core_boost = {}
    for i in decay_chain:
        if i.core != decay_chain.top:
            core_boost[i.outs[0]] = i.core
            core_boost[i.outs[1]] = i.core
    ret = {}
    for i in decay_chain.outs:
        tmp = i
        ret[i] = p4_all[i]
        while tmp in core_boost:
            tmp = core_boost[tmp]
            # print(i, tmp)
            ret[i] = lv.rest_vector(lv.neg(p4_all[tmp]), ret[i])

    ret2 = {}
    mask = tf.expand_dims(mask, -1)
    for i in ret:
        ret2[i] = tf.where(mask, ret[i], data["particle"][tp_map[i]]["p"])
    # print(ret)
    # print({i: data["particle"][tp_map[i]]["p"] for i in decay_chain.outs})

    pi = np.stack([ret2[i] for i in dat_order], axis=1)
    return pi
コード例 #58
0
ファイル: common.py プロジェクト: liuxinren/pyramidpoints
    def tf_augment_input_bbox(self, stacked_points, bboxes, batch_inds,
                              config):

        # Parameter
        num_batches = batch_inds[-1] + 1

        ##########
        # Rotation
        ##########

        if config.augment_rotation == 'vertical':

            # Choose a random angle for each element
            theta = tf.random_uniform((num_batches, ),
                                      minval=0,
                                      maxval=2 * np.pi)

            # Rotation matrices
            c, s = tf.cos(theta), tf.sin(theta)
            cs0 = tf.zeros_like(c)
            cs1 = tf.ones_like(c)
            R = tf.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1)
            R = tf.reshape(R, (-1, 3, 3))

            # Create N x 3 x 3 rotation matrices to multiply with stacked_points
            stacked_rots = tf.gather(R, batch_inds)

            # Apply rotations
            stacked_points = tf.reshape(
                tf.matmul(tf.expand_dims(stacked_points, axis=1),
                          stacked_rots), [-1, 3])

            # Apply rotations to bboxes
            new_centers = tf.expand_dims(bboxes[:, :, :3], axis=2)
            tmp_R = tf.tile(tf.expand_dims(R, axis=1),
                            tf.shape(new_centers[:1, :, :1, :1]))
            new_centers = tf.matmul(new_centers, tmp_R)
            bboxes = tf.concat((tf.squeeze(new_centers), bboxes[:, :, :3]),
                               axis=2)

        elif config.augment_rotation == 'none':
            R = tf.eye(3, batch_shape=(num_batches, ))

        else:
            raise ValueError('Unknown rotation augmentation : ' +
                             config.augment_rotation)

        #######
        # Scale
        #######

        # Choose random scales for each example
        min_s = config.augment_scale_min
        max_s = config.augment_scale_max

        if config.augment_scale_anisotropic:
            s = tf.random_uniform((num_batches, 3), minval=min_s, maxval=max_s)
            raise ValueError(
                "Applying anisotropic scale augmentation to cylinders is not advised."
            )
        else:
            s = tf.random_uniform((num_batches, 1), minval=min_s, maxval=max_s)

        # Apply scale to height and radius before symmetries
        new_hr = bboxes[:, :, 3:] * tf.expand_dims(s, axis=2)

        if config.augment_symmetries:
            symetries = tf.round(tf.random_uniform((num_batches, 3))) * 2 - 1
            s = s * symetries

        # Create N x 3 vector of scales to multiply with stacked_points
        stacked_scales = tf.gather(s, batch_inds)

        # Apply scales
        stacked_points = stacked_points * stacked_scales

        # Apply scale to bboxes
        new_centers = bboxes[:, :, :3] * tf.expand_dims(s, axis=1)
        bboxes = tf.concat((new_centers, new_hr), axis=2)

        #######
        # Noise
        #######

        noise = tf.random_normal(tf.shape(stacked_points),
                                 stddev=config.augment_noise)
        stacked_points = stacked_points + noise

        return stacked_points, bboxes, s, R
コード例 #59
0
# Import the deep learning library
import tensorflow as tf

# Define our compuational graph
W1 = tf.constant(5.0, name="x")
W2 = tf.constant(3.0, name="y")
W3 = tf.cos(W1, name="cos")
W4 = tf.sin(W2, name="sin")
W5 = tf.multiply(W3, W4, name="mult")
W6 = tf.divide(W1, W2, name="div")
W7 = tf.add(W5, W6, name="add")

# Open the session
with tf.Session() as sess:

    cos = sess.run(W3)
    sin = sess.run(W4)
    mult = sess.run(W5)
    div = sess.run(W6)
    add = sess.run(W7)

    # Before running TensorBoard, make sure you have generated summary data in a log directory by creating a summary writer
    writer = tf.summary.FileWriter("./Desktop/ComputationGraph", sess.graph)

    # Once you have event files, run TensorBoard and provide the log directory
    # Command: tensorboard --logdir="path/to/logs"
    # tensorboard --logdir="E:\90.work\python\tensorflow-master\tensorflow\examples\tutorials\mnist\Desktop\ComputationGraph"
コード例 #60
0
 def _sin_fn(x):
     ranger = tf.linspace(tf.reshape(x[0], []),
                          (sequence_length - 1) * increment,
                          sequence_length + 1)
     return tf.sin(ranger)