Example #1
0
 def step(h, x):
     # simple rnn
     ### Replace tensor[:, i, :] with x. bryan ###
     y = ln(
         tf.matmul(tensor[:, i, :], w) + tf.matmul(h, u) +
         (b if opt.bias else 0))
     return y
Example #2
0
 def _linear(self, arys):
     scope = tf.get_variable_scope()
     with tf.variable_scope(scope, reuse=True):
         w_i2h = tf.get_variable("w_i2h")
         w_h2h = tf.get_variable("w_h2h")
         w_b = tf.get_variable("w_b") if self._bias == True else 0
     i2h = tf.matmul(arys[0], w_i2h)
     h2h = tf.matmul(arys[1], w_h2h)
     out = i2h + h2h + w_b
     return out
Example #3
0
 def step(h, c, x):
     # forget gate
     f = tf.sigmoid(
         ln(
             tf.matmul(x, w_f) + tf.matmul(h, u_f) +
             (b_f if opt.bias else 0)))
     # input gate
     i = tf.sigmoid(
         ln(
             tf.matmul(x, w_i) + tf.matmul(h, u_i) +
             (b_i if opt.bias else 0)))
     # new cell value
     cc = tf.tanh(
         ln(
             tf.matmul(x, w_c) + tf.matmul(h, u_c) +
             (b_c if opt.bias else 0)))
     # out gate
     o = tf.sigmoid(
         ln(
             tf.matmul(x, w_o) + tf.matmul(h, u_o) +
             (b_o if opt.bias else 0)))
     # cell update
     cell = f * c + i * cc
     # final output
     y = o * tf.tanh(cell)
     return y, cell
Example #4
0
def sg_dense(tensor, opt):
    r"""Applies a full connection.
    
    Args:
      tensor: A 2-D tensor (automatically passed by decorator).
      opt:
        in_dim: An `integer`. The size of input dimension.
        dim: An `integer`. The size of output dimension.
        bias: Boolean. If True, biases are added.
        regularizer:  A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
          will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
        summary: If True, summaries are added. The default is True.

      
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # parameter initialize
    w = tf.sg_initializer.he_uniform('W', (opt.in_dim, opt.dim),
                                     regularizer=opt.regularizer, summary=opt.summary)
    b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0

    # apply transform
    out = tf.matmul(tensor, w) + b

    return out
def linear(input_, output_size, scope=None):
    '''
    Linear map: output[k] = sum_i(Matrix[k, i] * args[i] ) + Bias[k]
    Args:
        args: a tensor or a list of 2D, batch x n, Tensors.
    output_size: int, second dimension of W[i].
    scope: VariableScope for the created subgraph; defaults to "Linear".
  Returns:
    A 2D Tensor with shape [batch x output_size] equal to
    sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
  Raises:
    ValueError: if some of the arguments has unspecified or wrong shape.
  '''

    shape = input_.get_shape().as_list()
    if len(shape) != 2:
        raise ValueError("Linear is expecting 2D arguments: %s" % str(shape))
    if not shape[1]:
        raise ValueError("Linear expects shape[1] of arguments: %s" %
                         str(shape))
    input_size = shape[1]

    # Now the computation.
    with tf.variable_scope(scope or "SimpleLinear"):
        matrix = tf.get_variable("Matrix", [output_size, input_size],
                                 dtype=input_.dtype)
        bias_term = tf.get_variable("Bias", [output_size], dtype=input_.dtype)

    return tf.matmul(input_, tf.transpose(matrix)) + bias_term
Example #6
0
def chamfer_loss(A,B):    
    r=tf.reduce_sum(A*A,2)
    r=tf.reshape(r,[int(r.shape[0]),int(r.shape[1]),1])
    r2=tf.reduce_sum(B*B,2)
    r2=tf.reshape(r2,[int(r.shape[0]),int(r.shape[1]),1])
    t=(r-2*tf.matmul(A, tf.transpose(B,perm=[0, 2, 1])) + tf.transpose(r2,perm=[0, 2, 1]))
    return tf.reduce_mean((tf.reduce_min(t, axis=1)+tf.reduce_min(t,axis=2))/2.0)
Example #7
0
def sg_dense(tensor, opt):
    # parameter initialize
    w = init.he_uniform('W', (opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply transform
    out = tf.matmul(tensor, w) + (b if opt.bias else 0)

    return out
Example #8
0
    def step(h, x):

        # layer normalization
        def ln(xx, opt):
            if opt.ln:
                # calc layer mean, variance for final axis
                mean, variance = tf.nn.moments(xx, axes=[len(xx.get_shape()) - 1])

                # apply layer normalization ( explicit broadcasting needed )
                broadcast_shape = [-1] + [1] * (len(xx.get_shape()) - 1)
                xx = (xx - tf.reshape(mean, broadcast_shape)) \
                         / tf.reshape(tf.sqrt(variance + tf.sg_eps), broadcast_shape)

                # apply parameter
                return gamma * xx + beta

        # apply transform
        y = ln(tf.matmul(x, w) + tf.matmul(h, u) + (b if opt.bias else 0), opt)

        return y
Example #9
0
    def step(h, x):

        # layer normalization
        def ln(xx, opt):
            if opt.ln:
                # calc layer mean, variance for final axis
                mean, variance = tf.nn.moments(xx, axes=[len(xx.get_shape()) - 1])

                # apply layer normalization ( explicit broadcasting needed )
                broadcast_shape = [-1] + [1] * (len(xx.get_shape()) - 1)
                xx = (xx - tf.reshape(mean, broadcast_shape)) \
                         / tf.reshape(tf.sqrt(variance + tf.sg_eps), broadcast_shape)

                # apply parameter
                return gamma * xx + beta

        # update gate
        z = tf.sigmoid(ln(tf.matmul(x, w_z) + tf.matmul(h, u_z) + (b_z if opt.bias else 0), opt))
        # reset gate
        r = tf.sigmoid(ln(tf.matmul(x, w_r) + tf.matmul(h, u_r) + (b_r if opt.bias else 0), opt))
        # h_hat
        hh = tf.sigmoid(ln(tf.matmul(x, w_h) + tf.matmul(r*h, u_h) + (b_h if opt.bias else 0), opt))
        # final output
        y = (1. - z) * h + z * hh

        return y
Example #10
0
 def step(hh, x):
     # update gate
     z = tf.sigmoid(ln(tf.matmul(x, w_z) + tf.matmul(hh, u_z) + (b_z if opt.bias else 0)))
     # reset gate
     r = tf.sigmoid(ln(tf.matmul(x, w_r) + tf.matmul(hh, u_r) + (b_r if opt.bias else 0)))
     # h_hat
     h_hat = tf.tanh(ln(tf.matmul(x, w_h) + tf.matmul(r * hh, u_h) + (b_h if opt.bias else 0)))
     # final output
     y = (1. - z) * h_hat + z * hh
     return y
Example #11
0
def sg_dense(tensor, opt):
    r"""Applies a full connection.
    
    Args:
      tensor: A 2-D `Tensor`.
      in_dim: An `integer`. The size of input dimension.
      dim: An `integer`. The size of output dimension.
      bias: Boolean. If True, biases are added. 
      
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # parameter initialize
    w = init.he_uniform('W', (opt.in_dim, opt.dim))
    if opt.bias:
        b = init.constant('b', opt.dim)

    # apply transform
    out = tf.matmul(tensor, w) + (b if opt.bias else 0)

    return out
Example #12
0
def sg_dense(tensor, opt):
    r"""Applies a full connection.
    
    Args:
      tensor: A 2-D tensor (automatically passed by decorator).
      opt:
        in_dim: An `integer`. The size of input dimension.
        dim: An `integer`. The size of output dimension.
        bias: Boolean. If True, biases are added.
      
    Returns:
      A `Tensor` with the same type as `tensor`.
    """
    # parameter initialize
    w = tf.sg_initializer.he_uniform('W', (opt.in_dim, opt.dim))
    b = tf.sg_initializer.constant('b', opt.dim) if opt.bias else 0

    # apply transform
    out = tf.matmul(tensor, w) + b

    return out
Example #13
0
        def get_gram_mat(tensor):
            '''
            Arg:
              tensor: 4-D tensor. The first  dimension must be 1.
            
            Returns:
              gram matrix. Read `https://en.wikipedia.org/wiki/Gramian_matrix` for details.
              512 by 512.
            '''
            assert tensor.get_shape(
            ).ndims == 4, "The tensor must be 4 dimensions."

            dim0, dim1, dim2, dim3 = tensor.get_shape().as_list()
            tensor = tensor.sg_reshape(shape=[dim0 * dim1 * dim2,
                                              dim3])  #(1*7*7, 512)

            # normalization: Why? Because the original value of gram mat. would be too huge.
            mean, variance = tf.nn.moments(tensor, [0, 1])
            tensor = (tensor - mean) / tf.sqrt(variance + tf.sg_eps)

            tensor_t = tensor.sg_transpose(perm=[1, 0])  #(512, 1*7*7)
            gram_mat = tf.matmul(tensor_t, tensor)  # (512, 512)

            return gram_mat
Example #14
0
 def step(hh, x):
     # simple rnn
     y = ln(tf.matmul(x, w) + tf.matmul(hh, u) + (b if opt.bias else 0))
     return y
Example #15
0
        batch_s, TF_max_timesteps = shape[0], shape[1]

        with tf.name_scope('outputs'):
            outputs = tf.reshape(outputs, [-1, num_hidden])

        with tf.name_scope('weights'):
            W = tf.Variable(tf.truncated_normal([num_hidden, num_classes],
                                                stddev=0.1),
                            name='weights')
        with tf.name_scope('biases'):
            b = tf.get_variable("b",
                                initializer=tf.constant(0.,
                                                        shape=[num_classes]))

        with tf.name_scope('logits'):
            logits = tf.matmul(outputs, W) + b
            logits = tf.reshape(logits, [batch_s, -1, num_classes])
            logits = tf.transpose(logits, (1, 0, 2), name="out/logits")
        with tf.name_scope('loss'):
            loss = tf.nn.ctc_loss(targets,
                                  logits,
                                  seq_len,
                                  ctc_merge_repeated=True,
                                  preprocess_collapse_repeated=True)
        with tf.name_scope('cost'):
            cost = tf.reduce_mean(loss)
        tf.summary.scalar("cost", cost)
        with tf.name_scope('optimizer'):
            optimizer = tf.train.RMSPropOptimizer(learning_rate,
                                                  decay=decay,
                                                  momentum=momentum,
Example #16
0
                                act='leaky_relu',
                                bn=False)
    d_p4 = ops.upconv_and_scale(d_p3,
                                dim=1,
                                size=size,
                                stride=stride,
                                act='linear',
                                bn=False)
    disc = d_p4

#
# pull-away term ( PT ) regularizer
#

sample = gen.sg_flatten()
nom = tf.matmul(sample, tf.transpose(sample, perm=[1, 0]))
denom = tf.reduce_sum(tf.square(sample), reduction_indices=[1], keep_dims=True)
pt = tf.square(nom / denom)
pt -= tf.diag(tf.diag_part(pt))
pt = tf.reduce_sum(pt) / (batch_size * (batch_size - 1))

#
# loss & train ops
#

# mean squared errors
mse = tf.reduce_mean(tf.square(disc - xx), reduction_indices=[1, 2, 3])
mse_real, mse_fake = mse[:batch_size], mse[batch_size:]

loss_disc = mse_real + tf.maximum(margin - mse_fake, 0)  # discriminator loss
loss_gen = mse_fake + pt * pt_weight  # generator loss + PT regularizer
Example #17
0
 def step(h, x):
     # simple rnn
     y = ln(
         tf.matmul(tensor[:, i, :], w) + tf.matmul(h, u) +
         (b if opt.bias else 0))
     return y
Example #18
0
File: nn.py Project: jackyzha0/vybe
def t_get_indices(batchsize):
    index = np.arange(batchsize)
    np.random.shuffle(index)
    return index


## Training Loop
sd = 1 / np.sqrt(num_features)
with tf.name_scope('input'):
    X = tf.placeholder(tf.float32, [None, num_features], name="x_inp")
    Y = tf.placeholder(tf.float32, [None, num_classes], name="y_inp")

W_1 = tf.Variable(
    tf.random_normal([num_features, n_hidden_units_one], mean=0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean=0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X, W_1) + b_1)

W_2 = tf.Variable(
    tf.random_normal([n_hidden_units_one, n_hidden_units_two],
                     mean=0,
                     stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean=0, stddev=sd))
h_2 = tf.nn.tanh(tf.matmul(h_1, W_2) + b_2)

W_3 = tf.Variable(
    tf.random_normal([n_hidden_units_two, n_hidden_units_three],
                     mean=0,
                     stddev=sd))
b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean=0, stddev=sd))
h_3 = tf.nn.sigmoid(tf.matmul(h_2, W_3) + b_3)