コード例 #1
0
ファイル: ops.py プロジェクト: scroix/TecoGAN
def conv2_tran(batch_input,
               kernel=3,
               output_channel=64,
               stride=1,
               use_bias=True,
               scope='conv'):
    # kernel: An integer specifying the width and height of the 2D convolution window
    with tf.variable_scope(scope):
        if use_bias:
            return slim.conv2d_transpose(
                batch_input,
                output_channel, [kernel, kernel],
                stride,
                'SAME',
                data_format='NHWC',
                activation_fn=None,
                weights_initializer=tf.glorot_uniform_initializer())
        else:
            return slim.conv2d_transpose(
                batch_input,
                output_channel, [kernel, kernel],
                stride,
                'SAME',
                data_format='NHWC',
                activation_fn=None,
                weights_initializer=tf.glorot_uniform_initializer(),
                biases_initializer=None)
コード例 #2
0
def align(encoder_states, decoder_hidden_state,scope="attention"):
    
    with tf.variable_scope(scope,reuse=tf.AUTO_REUSE):
        Wp = tf.get_variable("Wp", shape=[2*hidden_size,128],# pt = S·sigmoid(vp tanh(Wp.ht)) #ht shape [32,600]
                                    dtype=tf.float32,
                                    trainable=True,
                                    initializer=tf.glorot_uniform_initializer())
        
        Vp = tf.get_variable("Vp", shape=[128,1],
                            dtype=tf.float32,
                            trainable=True,
                            initializer=tf.glorot_uniform_initializer())
    
    positions = tf.cast(S-window_len,dtype=tf.float32) # Maximum valid attention window starting position
    
    # Predict attention window starting position 
    sigmoid_values = tf.nn.sigmoid(tf.matmul(tf.tanh(tf.matmul(decoder_hidden_state,Wp)),Vp))
   
    ps = positions*sigmoid_values   #tf.nn.sigmoid(tf.matmul(tf.tanh(tf.matmul(decoder_hidden_state,Wp)),Vp))
    # ps = (soft-)predicted starting position of attention window
    pt = ps+D # pt = center of attention window where the whole window length is 2*D+1
    pt = tf.reshape(pt,[N]) # pt is the point of interest i.e is center next step is to create a gausian dis
    # tribution centered around pt whose size is sentence length so point near to pt will be given more priority
    
    i = 0
    gaussian_position_based_scores = tf.TensorArray(size=S,dtype=tf.float32)
    sigma = tf.constant(D/2,dtype=tf.float32)
コード例 #3
0
    def _network(self, inputs):
        with tf.variable_scope('network', reuse=tf.AUTO_REUSE):
            input_dim = int(inputs.shape[-1])
            prev_dim = input_dim
            prev_outputs = inputs
            # Hidden layers.
            for layer in range(self._parameters.hidden_layers):
                with tf.variable_scope('layer%d' % layer, reuse=tf.AUTO_REUSE):
                    weight = tf.get_variable(
                        'weight', [prev_dim, self._parameters.hidden_dim],
                        initializer=tf.glorot_uniform_initializer())
                    bias = tf.get_variable('bias',
                                           initializer=tf.zeros(
                                               [self._parameters.hidden_dim]))
                    pre_activation = tf.matmul(prev_outputs, weight) + bias
                    post_activation = self._parameters.activation(
                        pre_activation)
                prev_dim = self._parameters.hidden_dim
                prev_outputs = post_activation

            # Final layer.
            weight = tf.get_variable(
                'weight_final', [prev_dim, 1],
                initializer=tf.glorot_uniform_initializer())
            bias = tf.get_variable('bias_final', [1],
                                   initializer=tf.zeros_initializer())
            output = tf.matmul(prev_outputs, weight) + bias
            return output[Ellipsis, 0]
コード例 #4
0
def LSTM(x,hidden_state,cell,input_dim,hidden_size,scope):
    
    with tf.variable_scope(scope,reuse=tf.AUTO_REUSE):
        
        w = tf.get_variable("w", shape=[4,input_dim,hidden_size],
                                    dtype=tf.float32,
                                    trainable=True,
                                    initializer=tf.glorot_uniform_initializer())
        
        u = tf.get_variable("u", shape=[4,hidden_size,hidden_size],
                            dtype=tf.float32,
                            trainable=True,
                            initializer=tf.glorot_uniform_initializer())
        
        b = tf.get_variable("bias", shape=[4,1,hidden_size],
                    dtype=tf.float32,
                    trainable=True,
                    initializer=tf.zeros_initializer())
        
    input_gate = tf.nn.sigmoid( tf.matmul(x,w[0]) + tf.matmul(hidden_state,u[0]) + b[0])
    forget_gate = tf.nn.sigmoid( tf.matmul(x,w[1]) + tf.matmul(hidden_state,u[1]) + b[1])
    output_gate = tf.nn.sigmoid( tf.matmul(x,w[2]) + tf.matmul(hidden_state,u[2]) + b[2])
    cell_ = tf.nn.tanh( tf.matmul(x,w[3]) + tf.matmul(hidden_state,u[3]) + b[3])
    cell = forget_gate*cell + input_gate*cell_
    hidden_state = output_gate*tf.tanh(cell)
    
    return hidden_state, cell
コード例 #5
0
ファイル: ops.py プロジェクト: scroix/TecoGAN
def conv2_NCHW(batch_input,
               kernel=3,
               output_channel=64,
               stride=1,
               use_bias=True,
               scope='conv_NCHW'):
    # Use NCWH to speed up the inference
    # kernel: list of 2 integer specifying the width and height of the 2D convolution window
    with tf.variable_scope(scope):
        if use_bias:
            return slim.conv2d(
                batch_input,
                output_channel, [kernel, kernel],
                stride,
                'SAME',
                data_format='NCWH',
                activation_fn=None,
                weights_initializer=tf.glorot_uniform_initializer())
        else:
            return slim.conv2d(
                batch_input,
                output_channel, [kernel, kernel],
                stride,
                'SAME',
                data_format='NCWH',
                activation_fn=None,
                weights_initializer=tf.glorot_uniform_initializer(),
                biases_initializer=None)
コード例 #6
0
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.
    use_xavier: bool, whether to use xavier initializer

  Returns:
    Variable Tensor
  """
  if use_xavier:
    initializer = tf.glorot_uniform_initializer()
  else:
    initializer = tf.truncated_normal_initializer(stddev=stddev)
  var = _variable_on_cpu(name, shape, initializer)
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
コード例 #7
0
ファイル: TextCNN.py プロジェクト: mu0gua/fish-learing
    def OutputLayer(self, num_filters_total, num_classes):
        '''
        输出层
        '''

        #L2正则化损失
        self.l2_loss = tf.constant(0.0)

        with tf.name_scope("output"):
            weight = tf.get_variable(
                "weight",
                shape=[num_filters_total, num_classes],
                initializer=tf.glorot_uniform_initializer()  #保持每一层梯度大小差不多
            )
            bias = tf.Variable(
                tf.constant(0.1, shape=[num_classes], name="bias"))

            self.l2_loss += tf.nn.l2_loss(weight)
            self.l2_loss += tf.nn.l2_loss(bias)

            #得到结果
            self.score = tf.nn.xw_plus_b(
                self.h_drop, weight, bias,
                name="score")  #相当与matmul(x,weigt)+bias
            self.predictions = tf.argmax(self.score, 1, name="predictions")
コード例 #8
0
def multihot_embedding(sess, slot_id):
    slotx_emb_table = tf.get_variable(
        name='multi_hot_emb_slot_%s' % str(slot_id),
        shape=(g_dict_len, g_emb_size),
        initializer=tf.glorot_uniform_initializer())
    '''
    slotx_emb_table = tf.constant([[6.4, 1.2, 0.5, 3.3],
                                   [0.3, 0.4, 0.5, 0.8],
                                   [1.5, 0.3, 2.2, 1.9],
                                   [0.4, 0.9, 1.1, 4.3]])
    '''

    #定义稀疏矩阵, indices是位置[0,0]表示矩阵的第0行第0列,这样拼出来稀疏矩阵. values是对应emb_table中的索引。dense_shape是稀疏矩阵的长*宽
    #这个稀疏矩阵就是下面这个样子,每一行是一个multihot,行数代表batch_size,列数代表multihot最多允许多少个hot。N表示稀疏矩阵这位置没有存
    #[[1, 2, 3, N, N],
    # [N, N, 2, N, N],
    # [N, N, 3, 1, N]]
    slotx_idx = tf.SparseTensor(indices=[[0, 0], [0, 1], [0, 2], [1, 2],
                                         [2, 2], [2, 3]],
                                values=[1, 2, 3, 2, 3, 1],
                                dense_shape=(10, 5))
    print("slotx_emb_table.shape=", slotx_emb_table.shape)

    slotx_emb = tf.nn.embedding_lookup_sparse(
        slotx_emb_table, slotx_idx, sp_weights=None,
        combiner="sum")  #combiner=sum表示multihot用sum方式聚合
    sess.run(tf.global_variables_initializer())
    #print("emb_table(slot"+str(slot_id)+")=\n", sess.run(slotx_emb_table))
    print("emb(slot" + str(slot_id) + ")=\n", sess.run(slotx_emb))
    return slotx_emb
コード例 #9
0
  def build_subnetwork(self,
                       features,
                       logits_dimension,
                       training,
                       iteration_step,
                       summary,
                       previous_ensemble=None):
    """See `adanet.subnetwork.Builder`."""

    input_layer = tf.to_float(features[FEATURES_KEY])
    kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)
    last_layer = input_layer
    for _ in range(self._num_layers):
      last_layer = tf.layers.dense(
          last_layer,
          units=self._layer_size,
          activation=tf.nn.relu,
          kernel_initializer=kernel_initializer)
    logits = tf.layers.dense(
        last_layer,
        units=logits_dimension,
        kernel_initializer=kernel_initializer)

    shared = {_NUM_LAYERS_KEY: self._num_layers}
    return adanet.Subnetwork(
        last_layer=last_layer,
        logits=logits,
        complexity=self._measure_complexity(),
        shared=shared)
コード例 #10
0
def dense_layer(hiddenSize, input, scope_name):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE, use_resource=True):
        w = tf.get_variable("weight", shape=[input.shape[-1], hiddenSize],
                            initializer=tf.glorot_uniform_initializer())
        b = tf.get_variable("bias", shape=[hiddenSize],
                            initializer=tf.zeros_initializer())
        return tf.nn.relu_layer(input, w, b)
コード例 #11
0
ファイル: autoencoder_mnist.py プロジェクト: ntselepidis/kfac
    def __init__(self,
                 input_size,
                 regularizers=None,
                 initializers=None,
                 custom_getter=None,
                 name='AutoEncoder'):
        super(AutoEncoderManualReg, self).__init__(custom_getter=custom_getter,
                                                   name=name)

        if initializers is None:
            initializers = {
                'w': tf.glorot_uniform_initializer(),
                'b': tf.zeros_initializer()
            }
        if regularizers is None:
            regularizers = {
                'w': lambda w: FLAGS.l2_reg * tf.nn.l2_loss(w),
                'b': lambda w: FLAGS.l2_reg * tf.nn.l2_loss(w),
            }

        with self._enter_variable_scope():
            self._encoder = MLPManualReg(output_sizes=_ENCODER_SIZES,
                                         regularizers=regularizers,
                                         initializers=initializers,
                                         custom_getter=custom_getter,
                                         activation=_NONLINEARITY,
                                         activate_final=False)
            self._decoder = MLPManualReg(output_sizes=_DECODER_SIZES +
                                         [input_size],
                                         regularizers=regularizers,
                                         initializers=initializers,
                                         custom_getter=custom_getter,
                                         activation=_NONLINEARITY,
                                         activate_final=False)
コード例 #12
0
 def conv3d_layer(self,
                  inputs,
                  out_dim,
                  name,
                  k_h=4,
                  k_w=4,
                  k_d=4,
                  d_h=2,
                  d_w=2,
                  d_d=2):
     with tf.name_scope('conv_layer'):
         with tf.name_scope('weights'):
             weights = tf.get_variable(
                 name=name + '/weights',
                 shape=[k_d, k_h, k_w,
                        inputs.get_shape()[-1], out_dim],
                 initializer=tf.glorot_uniform_initializer())
             tf.summary.histogram(name + '/weights', weights)
         with tf.name_scope('biases'):
             biases = tf.get_variable(
                 name=name + '/biases',
                 shape=[out_dim],
                 dtype=tf.float32,
                 initializer=tf.constant_initializer(0.0))
             tf.summary.histogram(name + '/biases', biases)
         with tf.name_scope('conv_out'):
             conv = tf.nn.bias_add(
                 tf.nn.conv3d(inputs,
                              weights,
                              strides=[1, d_d, d_h, d_w, 1],
                              padding='SAME'), biases)
     return conv
コード例 #13
0
    def embedding_layer(self, token_inp: tf.Tensor) -> tf.Tensor:
        """
        Creates embedding layer that is in common between many encoders.

        Args:
            token_inp:  2D tensor that is of shape (batch size, sequence length)

        Returns:
            3D tensor of shape (batch size, sequence length, embedding dimension)
        """

        token_embeddings = tf.get_variable(
            name='token_embeddings',
            initializer=tf.glorot_uniform_initializer(),
            shape=[
                len(self.metadata['token_vocab']),
                self.get_hyper('token_embedding_size')
            ],
        )
        self.__embeddings = token_embeddings

        token_embeddings = tf.nn.dropout(
            token_embeddings, keep_prob=self.placeholders['dropout_keep_rate'])

        return tf.nn.embedding_lookup(params=token_embeddings, ids=token_inp)
コード例 #14
0
def conv2d(x,
           output_dims,
           kernel_size,
           stride=[1, 1],
           padding='SAME',
           use_bias=True,
           activation=tf.nn.relu,
           bn=False,
           bn_decay=None,
           is_training=None):
    input_dims = x.get_shape()[-1].value
    kernel_shape = kernel_size + [input_dims, output_dims]
    kernel = tf.Variable(tf.glorot_uniform_initializer()(shape=kernel_shape),
                         dtype=tf.float32,
                         trainable=True,
                         name='kernel')
    x = tf.nn.conv2d(x, kernel, [1] + stride + [1], padding=padding)
    if use_bias:
        bias = tf.Variable(tf.zeros_initializer()(shape=[output_dims]),
                           dtype=tf.float32,
                           trainable=True,
                           name='bias')
        x = tf.nn.bias_add(x, bias)
    if activation is not None:
        if bn:
            x = batch_norm(x, is_training=is_training, bn_decay=bn_decay)
        x = activation(x)
    return x
コード例 #15
0
ファイル: model_train.py プロジェクト: jack139/ocr-with-ctpn
def Bilstm(net, input_channel, hidden_unit_num, output_channel, scope_name):
    # width--->time step
    with tf.variable_scope(scope_name) as scope:
        shape = tf.shape(net)
        N, H, W, C = shape[0], shape[1], shape[2], shape[3]
        net = tf.reshape(net, [N * H, W, C])
        net.set_shape([None, None, input_channel])

        lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_unit_num,
                                               state_is_tuple=True)
        lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_unit_num,
                                               state_is_tuple=True)

        lstm_out, last_state = tf.nn.bidirectional_dynamic_rnn(
            lstm_fw_cell, lstm_bw_cell, net, dtype=tf.float32)
        lstm_out = tf.concat(lstm_out, axis=-1)

        lstm_out = tf.reshape(lstm_out, [N * H * W, 2 * hidden_unit_num])

        #init_weights = tf.contrib.layers.variance_scaling_initializer(factor=0.01, mode='FAN_AVG', uniform=False)
        init_weights = tf.glorot_uniform_initializer()
        init_biases = tf.constant_initializer(0.0)
        weights = make_var('weights', [2 * hidden_unit_num, output_channel],
                           init_weights)
        biases = make_var('biases', [output_channel], init_biases)

        outputs = tf.matmul(lstm_out, weights) + biases

        outputs = tf.reshape(outputs, [N, H, W, output_channel])
        return outputs
コード例 #16
0
ファイル: aggregators.py プロジェクト: youngch12/Cluster_KGCN
    def __init__(self, batch_size, dim, dropout=0., act=tf.nn.relu, name=None):
        super(ConcatAggregator, self).__init__(batch_size, dim, dropout, act, name)

        with tf.variable_scope(self.name):
            self.weights = tf.get_variable(
                shape=[self.dim * 2, self.dim], initializer=tf.glorot_uniform_initializer(), name='weights')
            self.bias = tf.get_variable(shape=[self.dim], initializer=tf.zeros_initializer(), name='bias')
コード例 #17
0
ファイル: nell995.py プロジェクト: yyht/language
 def linear_text_remapper(type_name):
     num_input_dims = FLAGS.num_text_dims
     num_output_dims = c.get_max_id(type_name)
     initializer = tf.glorot_uniform_initializer()(
         [num_input_dims, num_output_dims])
     weight_matrix = tf.Variable(initializer)
     remapped_text = tf.matmul(model.query_encoding, weight_matrix)
     return c.as_nql(remapped_text, type_name)
コード例 #18
0
 def f(a):
     with tf.variable_scope(variable_scope_name, use_resource=True):
         w = tf.get_variable("w",
                             shape=[64, 64],
                             initializer=tf.glorot_uniform_initializer(
                                 dtype=tf.float32))
     x = tf.matmul(a, w)
     return x
コード例 #19
0
    def __init__(self, loss_type, feature_space_dimension, margin_in_loss=0.25):
        # self.x1 = tf.placeholder(tf.float32, [None, 49152])
        # self.x1Image = tf.reshape(self.x1, [-1, 128, 128, 3])
        # self.x2 = tf.placeholder(tf.float32, [None, 49152])
        # self.x2Image = tf.reshape(self.x2, [-1, 128, 128, 3])
        # self.x3 = tf.placeholder(tf.float32, [None, 49152])
        # self.x3Image = tf.reshape(self.x3, [-1, 128, 128, 3])

        self.x1 = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.x1Image = self.x1
        self.x2 = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.x2Image = self.x2
        self.x3 = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.x3Image = self.x3

        self.margin_in_loss = margin_in_loss

        # self.loss_type = tf.placeholder(tf.float32, [1, 1])

        # self.weights = {
        #     'wc1': tf.get_variable('W0', shape=(3,3,3,32), initializer=tf.contrib.layers.xavier_initializer()),
        #     'wc2': tf.get_variable('W1', shape=(3,3,32,64), initializer=tf.contrib.layers.xavier_initializer()),
        #     'wc3': tf.get_variable('W2', shape=(3,3,64,128), initializer=tf.contrib.layers.xavier_initializer()),
        #     'wd1': tf.get_variable('W3', shape=(4*4*128,1024), initializer=tf.contrib.layers.xavier_initializer()),
        #     }

        self.weights = {
            'wc1': tf.get_variable('W0', shape=(3, 3, 3, 32), initializer=tf.glorot_uniform_initializer()),
            'wc2': tf.get_variable('W1', shape=(3, 3, 32, 64), initializer=tf.glorot_uniform_initializer()),
            'wc3': tf.get_variable('W2', shape=(3, 3, 64, 128), initializer=tf.glorot_uniform_initializer()),
            'wd1': tf.get_variable('W3', shape=(16 * 16 * 128, 500), initializer=tf.glorot_uniform_initializer()),
            'out': tf.get_variable('W6', shape=(500, feature_space_dimension), initializer=tf.glorot_uniform_initializer()),
        }
        self.biases = {
            'bc1': tf.get_variable('B0', shape=(32), initializer=tf.glorot_uniform_initializer()),
            'bc2': tf.get_variable('B1', shape=(64), initializer=tf.glorot_uniform_initializer()),
            'bc3': tf.get_variable('B2', shape=(128), initializer=tf.glorot_uniform_initializer()),
            'bd1': tf.get_variable('B3', shape=(500), initializer=tf.glorot_uniform_initializer()),
            'out': tf.get_variable('B4', shape=(feature_space_dimension), initializer=tf.glorot_uniform_initializer()),
        }

        self.loss_type = loss_type
        # Create loss
        if self.loss_type == "triplet":
            with tf.variable_scope("siamese") as scope:
                self.o1 = self.conv_net(self.x1Image, self.weights, self.biases)
                self.o2 = self.conv_net(self.x2Image, self.weights, self.biases)
                self.o3 = self.conv_net(self.x3Image, self.weights, self.biases)
            self.loss = self.loss_with_spring()
        elif self.loss_type == "FDA":
            with tf.variable_scope("siamese") as scope:
                self.o1 = self.conv_net_FDA(self.x1Image, self.weights, self.biases, o_index=1)
                self.o2 = self.conv_net_FDA(self.x2Image, self.weights, self.biases, o_index=2)
                self.o3 = self.conv_net_FDA(self.x3Image, self.weights, self.biases, o_index=3)
            self.loss = self.loss_FDA()
コード例 #20
0
ファイル: nql_test.py プロジェクト: xf05888/language
 def test_group_rel_from_ph(self):
   ph = tf.placeholder(
       tf.float32, shape=(None, self.context.get_max_id('place_t')))
   x = self.context.as_nql(ph, 'place_t')
   initializer = tf.glorot_uniform_initializer()(
       [1, self.context.get_max_id('dir_g')])
   dir_tf_var = tf.Variable(initializer)
   dir_nql_exp = self.context.as_nql(dir_tf_var, 'dir_g')
   x.follow(dir_nql_exp)  # This test ensures this doesn't throw an error.
コード例 #21
0
    def predictor_model(self, features, features_dim):
        with tf.variable_scope("predictor_model"):
            W1 = tf.get_variable('W1', [self.features_dim, 1], initializer=tf.glorot_uniform_initializer())
            b1 = tf.Variable(tf.zeros(shape=[1]), name='b1')

            pred_logits = tf.matmul(features, W1) + b1
            pred_labels = tf.sigmoid(pred_logits)

        return pred_labels, pred_logits
コード例 #22
0
 def _get_variable(self):
     self.weight = tf.get_variable(
         self.name + 'kernel',
         shape=self.input_shape,
         initializer=tf.glorot_uniform_initializer(),
         regularizer=tf.keras.regularizers.L2(scale=0.01),
         dtype=self.data_type)
     self.activation = tf.tanh
     self.batch_normal = tf.keras.layers.BatchNormalization()
コード例 #23
0
ファイル: nql_test.py プロジェクト: xf05888/language
 def test_group_rel_from_variable(self):
   x = self.context.one(cell(2, 2), 'place_t')
   initializer = tf.glorot_uniform_initializer()(
       [1, self.context.get_max_id('dir_g')])
   dir_tf_var = tf.Variable(initializer)
   dir_nql_exp = self.context.as_nql(dir_tf_var, 'dir_g')
   y = x.follow(dir_nql_exp)
   self.session.run(dir_tf_var.initializer)
   y.eval(self.session)
コード例 #24
0
ファイル: mnist_tf.py プロジェクト: graphcore/examples
def dense_layer(hiddenSize, x, scope_name):
    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE, use_resource=True):
        w = tf.get_variable("weight",
                            shape=[x.shape[-1], hiddenSize],
                            initializer=tf.glorot_uniform_initializer())
        b = tf.get_variable("bias",
                            shape=[hiddenSize],
                            initializer=tf.zeros_initializer())
        x = tf.matmul(x, w) + b
        return tf.nn.relu(x)
コード例 #25
0
 def _get_variable(self):
     self.kernel = tf.get_variable(
         self.name + '_kernel',
         shape=(self.input_dim, self.output_dim),
         initializer=tf.glorot_uniform_initializer(),
         regularizer=tf.keras.regularizers.L2(scale=0.01),
         dtype=self.data_type)
     self.kernel1 = tf.get_variable(
         self.name + '_kernel_1',
         shape=(self.input_dim, self.input_dim),
         initializer=tf.glorot_uniform_initializer(),
         regularizer=tf.keras.regularizers.L2(scale=0.01),
         dtype=self.data_type)
     self.kernel2 = tf.get_variable(
         self.name + '_kernel_2',
         shape=(self.input_dim, self.input_dim),
         initializer=tf.glorot_uniform_initializer(),
         regularizer=tf.keras.regularizers.L2(scale=0.01),
         dtype=self.data_type)
     self.batch_normlization = tf.keras.layers.BatchNormalization()
コード例 #26
0
def body(x):
    w1 = tf.get_variable(
        "w1",
        shape=[64, 64],
        initializer=tf.glorot_uniform_initializer(dtype=tf.float32))
    w2 = tf.get_variable(
        "w2",
        shape=[64, 64],
        initializer=tf.glorot_uniform_initializer(dtype=tf.float32))

    def func(a, b):
        x = tf.matmul(a, b)
        x = normalization_ops.layer_norm(x)
        x = nn_ops.gelu(x)
        return x

    x = func(x, w1)
    x = func(x, w2)
    outfeed = outfeed_queue.enqueue(x)
    return outfeed
コード例 #27
0
def model(batch):
    @ipu.function
    def func(lhs, rhs):
        x = tf.matmul(lhs, rhs)
        return x

    # Create the variables.
    with tf.variable_scope("vs", use_resource=True):
        w1 = tf.get_variable(
            "w1",
            shape=[64, 64],
            initializer=tf.glorot_uniform_initializer(dtype=tf.float32))
        w2 = tf.get_variable(
            "w2",
            shape=[64, 64],
            initializer=tf.glorot_uniform_initializer(dtype=tf.float32))

    # Pass the variables as inputs to the function.
    partial = func(batch, w1)
    partial = func(partial, w2)
コード例 #28
0
 def __call__(self, shape, dtype, partition_info=None):
   if self._base_initializer is None:
     # mimic default initialization in tf.get_variable()
     if dtype.is_floating:
       ret = tf.glorot_uniform_initializer()(shape, dtype)
     else:
       ret = tf.zeros(shape, dtype)
   else:
     ret = self._base_initializer(shape, dtype, partition_info=partition_info)
   noise = 0.0  # no random noise in the initializer.
   return tf.cast(self._parameter_encoding.encode(ret, noise), dtype)
コード例 #29
0
ファイル: ops.py プロジェクト: scroix/TecoGAN
def denselayer(inputs, output_size):
    # Rachel todo, put it to Model variable_scope
    denseLayer = tf.layers.Dense(
        output_size,
        activation=None,
        kernel_initializer=tf.glorot_uniform_initializer())
    output = denseLayer.apply(inputs)
    tf.add_to_collection(name=tf.GraphKeys.MODEL_VARIABLES,
                         value=denseLayer.kernel)
    #output = tf.layers.dense(inputs, output_size, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer())

    return output
コード例 #30
0
    def adversarial_model(self, pred_logits, true_labels):
        with tf.variable_scope("adversary_model"):
            c = tf.get_variable('c', initializer=tf.constant(1.0))
            s = tf.sigmoid((1 + tf.abs(c)) * pred_logits)

            W2 = tf.get_variable('W2', [3, 1], initializer=tf.glorot_uniform_initializer())
            b2 = tf.Variable(tf.zeros(shape=[1]), name='b2')

            pred_protected_attribute_logits = tf.matmul(tf.concat([s, s * true_labels, s * (1.0 - true_labels)], axis=1), W2) + b2
            pred_protected_attribute_labels = tf.sigmoid(pred_protected_attribute_logits)

        return pred_protected_attribute_labels, pred_protected_attribute_logits