コード例 #1
0
    def __init__(self):
        self.input = tf.placeholder(tf.float32, shape=(9))
        self.targetQ = tf.placeholder(tf.float32, shape=(5))
        features = [tf.reshape(self.input, [-1])]

        regularizer = layers.l2_regularizer(0.01)

        # Structure
        features = layers.fully_connected(features,
                                          100,
                                          weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          100,
                                          weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          5,
                                          weights_regularizer=regularizer)

        self.predict = features[0]

        self.loss = tf.reduce_sum(tf.square(self.targetQ - self.predict))

        trainer = tf.train.AdamOptimizer()
        self.train = trainer.minimize(self.loss)
コード例 #2
0
def conv1D(x, inp, out, kernel, stride, name):
    W = tf.get_variable(name + "W",
                        shape=[kernel, inp, out],
                        initializer=tf.contrib.layers.xavier_initializer())
    x = tf.nn.conv1d(x, W, stride, 'SAME')
    x = layers.bias_add(x)
    return x
コード例 #3
0
ファイル: layers.py プロジェクト: yaya325/tensornets
def darkconv(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    onlyconv = kwargs.pop('onlyconv', False)
    with tf.variable_scope(scope):
        conv_kwargs = {
            'padding': 'SAME',
            'activation_fn': None,
            'weights_initializer': variance_scaling_initializer(1.53846),
            'weights_regularizer': l2(5e-4),
            'biases_initializer': None,
            'scope': 'conv'
        }
        if onlyconv:
            conv_kwargs.pop('biases_initializer')
        with arg_scope([conv2d], **conv_kwargs):
            x = conv2d(*args, **kwargs)
            if onlyconv: return x
            x = batch_norm(x,
                           decay=0.99,
                           center=False,
                           scale=True,
                           epsilon=1e-5,
                           scope='bn')
            x = bias_add(x, scope='bias')
            x = leaky_relu(x, alpha=0.1, name='lrelu')
            return x
コード例 #4
0
def conv1D(x,
           inp,
           out,
           kernel,
           stride,
           dilation_rate,
           name,
           use_bias=True,
           activation=None,
           batch_norm=False):
    try:
        with tf.variable_scope("conv"):
            W = tf.get_variable(
                name + "W",
                shape=[kernel, inp, out],
                initializer=tf.contrib.layers.xavier_initializer())
    except:
        with tf.variable_scope("conv", reuse=True):
            W = tf.get_variable(
                name + "W",
                shape=[kernel, inp, out],
                initializer=tf.contrib.layers.xavier_initializer())
    x = tf.nn.convolution(input=x,
                          filter=W,
                          strides=(stride, ),
                          dilation_rate=(dilation_rate, ),
                          padding="SAME",
                          data_format="NWC")
    if use_bias:
        x = layers.bias_add(x)
    if batch_norm:
        x = layers.batch_norm(x)
    if activation is not None:
        x = activation(x)
    return x
コード例 #5
0
def conv(X, do_activation_first=True):
    if do_activation_first:
        X = activation(X)
    X = conv2d(X,
               num_kernels,
               kernel_size,
               activation_fn=None,
               normalizer_fn=conv_normalization)
    X = tf.nn.dropout(X, keep_prob)
    X = bias_add(X)
    return X
コード例 #6
0
    def __init__(self):
        self.input = tf.placeholder(tf.float32, shape=(210, 160, 3))
        self.targetQ = tf.placeholder(tf.float32, shape=(4))
        features = [tf.reshape(self.input, [-1])]
        features = features[0:210 * 160]

        regularizer = layers.l2_regularizer(0.01)

        # Structure
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          550,
                                          weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.dropout(features)
        #features = layers.fully_connected(features, 1520, weights_regularizer=regularizer)
        #features = layers.fully_connected(features, 2010)
        #features = layers.bias_add(features, regularizer=regularizer)
        #features = layers.fully_connected(features, 700, weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          200,
                                          weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          100,
                                          weights_regularizer=regularizer)
        features = layers.bias_add(features, regularizer=regularizer)
        features = layers.fully_connected(features,
                                          4,
                                          weights_regularizer=regularizer)

        self.predict = features[0]

        self.loss = tf.reduce_sum(tf.square(self.targetQ - self.predict))

        trainer = tf.train.AdamOptimizer()
        self.train = trainer.minimize(self.loss)
コード例 #7
0
ファイル: network.py プロジェクト: KotoriCANOE/DeblurNet
 def DBlock(self, last, channels, resblocks=1,
     kernel=[3, 3], stride=[2, 2], biases=True, format=DATA_FORMAT,
     activation=ACTIVATION, normalizer=None, regularizer=None, collections=None):
     # residual blocks
     for i in range(resblocks):
         with tf.variable_scope('ResBlock_{}'.format(i)):
             last = self.ResBlock(last, self.res_kernel, biases=False, format=format,
                 activation=self.activation_res, normalizer=normalizer,
                 regularizer=regularizer, collections=collections)
     # pre-activation
     if activation: last = activation(last)
     # up-convolution
     if stride[-1] > 1:
         last = layers.upscale2d_conv2d(last, channels, kernel[-1], format=format)
         # last = layers.blur2d(last, format=format)
     else:
         last = layers.conv2d(last, channels, kernel[-1], format=format)
     # bias
     if biases:
         last = slim.bias_add(last, variables_collections=collections, data_format=format)
     return last
コード例 #8
0
ファイル: cifar_5.py プロジェクト: 99991/DeepLearningProjects
                X = conv(X, num_kernels) + conv(bn(conv(X, num_kernels)),
                                                num_kernels)
            X = tf.nn.dropout(X, drop_keep_prob, name='dropout')
            with tf.name_scope("residual_convolution") as node:
                X += conv(relu(bn(conv(relu(bn(X)), num_kernels))),
                          num_kernels)
            X = tf.nn.dropout(X, drop_keep_prob, name='dropout')
            X = pool(X)
with tf.name_scope("normalization") as node:
    X = bn(X)
#X = tf.reduce_mean(X, [1, 2])
with tf.name_scope("flattening") as node:
    X = flatten(X)
with tf.name_scope("hidden") as scope:
    with tf.name_scope('bias') as node:
        X = bias_add(X)
    with tf.name_scope('weights') as node:
        X = linear(X, num_hidden, activation_fn=relu, normalizer_fn=batch_norm)
with tf.name_scope("output-Layer") as scope:
    X = linear(X, num_labels, activation_fn=None)
    Y = X
#######################################################
with tf.name_scope('training'):
    with tf.name_scope('cross_entropy'):
        with tf.name_scope('total'):
            loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(Y, labels))
    tf.summary.scalar('loss', loss)

    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
コード例 #9
0
    def __init__(self, data, training=False):
        self.data = data
        self.initializer = tf.orthogonal_initializer()
        q_mask = make_mask(self.data.ql, 25)  # (1, L_q, E)
        s_mask = make_mask(self.data.sl, 29)  # (N, L_s, E)
        a_mask = make_mask(self.data.al, 34)  # (5, L_a, E)

        ques_shape = tf.shape(q_mask)
        subt_shape = tf.shape(s_mask)
        ans_shape = tf.shape(a_mask)

        with tf.variable_scope('Embedding'):
            self.embedding = tf.get_variable('embedding_matrix',
                                             initializer=np.load(
                                                 _mp.embedding_file),
                                             trainable=False)

            self.ques = tf.nn.embedding_lookup(self.embedding,
                                               self.data.ques)  # (1, L_q, E)
            self.ans = tf.nn.embedding_lookup(self.embedding,
                                              self.data.ans)  # (5, L_a, E)
            self.subt = tf.nn.embedding_lookup(self.embedding,
                                               self.data.subt)  # (N, L_s, E)

            # self.ques = dropout(self.ques, training=training)  # (1, L_q, E)
            # self.ans = dropout(self.ans, training=training)  # (5, L_a, E)
            # self.subt = dropout(self.subt, training=training)  # (N, L_s, E)

        with tf.variable_scope('Embedding_Linear'):
            # (1, L_q, E_t)
            self.ques_embedding = unit_norm(
                mask_dense(self.ques, q_mask, reuse=False))
            # (5, L_a, E_t)
            self.ans_embedding = unit_norm(mask_dense(self.ans, a_mask))
            # (N, L_s, E_t)
            self.subt_embedding = unit_norm(mask_dense(self.subt, s_mask))

        with tf.variable_scope('Language_Encode'):
            mask = tf.expand_dims(tf.sequence_mask(self.data.ql, 25), axis=-1)
            # (1, E_t)
            self.ques_enc = unit_norm(
                conv_encode(self.ques_embedding, mask, reuse=False) +
                dilated_conv_encode(self.ques_embedding, mask, reuse=False),
                dim=1)
            mask = tf.expand_dims(tf.sequence_mask(self.data.al, 34), axis=-1)
            # (5, E_t)
            self.ans_enc = unit_norm(
                conv_encode(self.ans_embedding, mask) +
                dilated_conv_encode(self.ans_embedding, mask),
                dim=1)
            mask = tf.expand_dims(tf.sequence_mask(self.data.sl, 29), axis=-1)
            # (N, E_t)
            self.subt_enc = unit_norm(
                conv_encode(self.subt_embedding, mask) +
                dilated_conv_encode(self.subt_embedding, mask),
                dim=1)

        with tf.variable_scope('Temporal_Attention'):
            # (N, E_t)
            self.temp_attn = dense(self.ques_enc, use_bias=False, activation=None, reuse=False) + \
                             dense(self.subt_enc, use_bias=False, activation=None, reuse=False)

            self.temp_attn = dense(layers.bias_add(self.temp_attn, tf.nn.tanh),
                                   units=1,
                                   use_bias=False,
                                   activation=None,
                                   reuse=False)

            amount = tf.squeeze(
                dense(self.ques_enc, 1, tf.nn.sigmoid, reuse=False))
            nth = nn.nth_element(
                tf.transpose(self.temp_attn),
                tf.cast(tf.cast(subt_shape[0], tf.float32) * amount, tf.int32),
                True)
            # (N, 1)
            attn_mask = tf.cast(
                tf.squeeze(tf.greater_equal(self.temp_attn, nth), axis=1),
                tf.int32)
            _, self.subt_enc = tf.dynamic_partition(self.subt_enc, attn_mask,
                                                    2)
            _, self.temp_attn = tf.dynamic_partition(self.temp_attn, attn_mask,
                                                     2)
            self.subt_enc = self.subt_enc * self.temp_attn

        self.summarize = unit_norm(tf.reduce_mean(self.subt_enc,
                                                  axis=0,
                                                  keepdims=True),
                                   dim=1)  # (1, 4 * E_t)

        # gamma = tf.get_variable('gamma', [1, 1], initializer=tf.zeros_initializer)

        # self.ans_vec = self.summarize * tf.nn.sigmoid(gamma) + \
        #                tf.squeeze(self.ques_enc, axis=0) * (1 - tf.nn.sigmoid(gamma))

        self.ans_vec = unit_norm(self.summarize + self.ques_enc,
                                 dim=1)  # (1, 4 * E_t)

        self.output = tf.matmul(self.ans_vec, self.ans_enc,
                                transpose_b=True)  # (1, 5)
コード例 #10
0
ファイル: utils.py プロジェクト: yerman21/modelsTensorFlow
def quantizable_separable_conv2d(inputs,
                                 num_outputs,
                                 kernel_size,
                                 is_quantized=True,
                                 depth_multiplier=1,
                                 stride=1,
                                 activation_fn=tf.nn.relu6,
                                 normalizer_fn=None,
                                 scope=None):
    """Quantization friendly backward compatible separable conv2d.

  This op has the same API is separable_conv2d. The main difference is that an
  additional BiasAdd is manually inserted after the depthwise conv, such that
  the depthwise bias will not have name conflict with pointwise bias. The
  motivation of this op is that quantization script need BiasAdd in order to
  recognize the op, in which a native call to separable_conv2d do not create
  for the depthwise conv.

  Args:
    inputs: A tensor of size [batch_size, height, width, channels].
    num_outputs: The number of pointwise convolution output filters. If is
      None, then we skip the pointwise convolution stage.
    kernel_size: A list of length 2: [kernel_height, kernel_width] of the
      filters. Can be an int if both values are the same.
    is_quantized: flag to enable/disable quantization.
    depth_multiplier: The number of depthwise convolution output channels for
      each input channel. The total number of depthwise convolution output
      channels will be equal to num_filters_in * depth_multiplier.
    stride: A list of length 2: [stride_height, stride_width], specifying the
      depthwise convolution stride. Can be an int if both strides are the same.
    activation_fn: Activation function. The default value is a ReLU function.
      Explicitly set it to None to skip it and maintain a linear activation.
    normalizer_fn: Normalization function to use instead of biases.
    scope: Optional scope for variable_scope.

  Returns:
    Tensor resulting from concatenation of input tensors
  """
    if is_quantized:
        outputs = contrib_layers.separable_conv2d(
            inputs,
            None,
            kernel_size,
            depth_multiplier=depth_multiplier,
            stride=1,
            activation_fn=None,
            normalizer_fn=None,
            biases_initializer=None,
            scope=scope)
        outputs = contrib_layers.bias_add(outputs,
                                          trainable=True,
                                          scope='%s_bias' % scope)
        outputs = contrib_layers.conv2d(outputs,
                                        num_outputs, [1, 1],
                                        activation_fn=activation_fn,
                                        stride=stride,
                                        normalizer_fn=normalizer_fn,
                                        scope=scope)
    else:
        outputs = contrib_layers.separable_conv2d(
            inputs,
            num_outputs,
            kernel_size,
            depth_multiplier=depth_multiplier,
            stride=stride,
            activation_fn=activation_fn,
            normalizer_fn=normalizer_fn,
            scope=scope)
    return outputs
コード例 #11
0
    def __init__(self, data, training=False):
        self.data = data
        self.initializer = layers.xavier_initializer()

        with tf.variable_scope('Embedding_Linear'):
            # (1, L_q, E_t)
            self.ques = l2_norm(
                tf.layers.dense(self.data.ques,
                                hp['emb_dim'],
                                activation=tf.nn.relu,
                                kernel_initializer=self.initializer,
                                bias_initializer=self.initializer))
            # (5, L_a, E_t)
            self.ans = l2_norm(
                tf.layers.dense(self.data.ans,
                                hp['emb_dim'],
                                activation=tf.nn.relu,
                                kernel_initializer=self.initializer,
                                bias_initializer=self.initializer,
                                reuse=True))
            # (N, L_s, E_t)
            self.subt = l2_norm(
                tf.layers.dense(self.data.subt,
                                hp['emb_dim'],
                                activation=tf.nn.relu,
                                kernel_initializer=self.initializer,
                                bias_initializer=self.initializer,
                                reuse=True))

            # self.ques = self.data.ques
            # self.ans = self.data.ans
            # self.subt = self.data.subt

        with tf.variable_scope('Temporal_Attention'):
            # (N, E_t)
            self.temp_attn = tf.layers.dense(self.ques, hp['emb_dim'], use_bias=False, activation=None,
                                             kernel_initializer=self.initializer) + \
                             tf.layers.dense(self.subt, hp['emb_dim'], use_bias=False, activation=None,
                                             kernel_initializer=self.initializer)

            self.temp_attn = tf.layers.dense(
                layers.bias_add(self.temp_attn,
                                tf.nn.tanh,
                                initializer=self.initializer),
                units=1,
                use_bias=False,
                activation=None,
                kernel_initializer=self.initializer)

            self.temp_attn = tf.nn.softmax(self.temp_attn, axis=1)

            # amount = tf.squeeze(dense(self.ques_enc, 1, tf.nn.sigmoid, reuse=False))
            # nth = nn.nth_element(tf.transpose(self.temp_attn),
            #                      tf.cast(tf.cast(subt_shape[0], tf.float32) * amount, tf.int32), True)
            # # (N, 1)
            # attn_mask = tf.cast(tf.squeeze(tf.greater_equal(self.temp_attn, nth), axis=1), tf.int32)
            # _, self.subt_enc = tf.dynamic_partition(self.subt_enc, attn_mask, 2)
            # _, self.temp_attn = tf.dynamic_partition(self.temp_attn, attn_mask, 2)
            self.subt_temp = self.subt * self.temp_attn

        self.summarize = l2_norm(
            tf.reduce_sum(self.subt_temp, axis=0, keepdims=True))  # (1, E_t)

        # gamma = tf.get_variable('gamma', [1, 1], initializer=tf.zeros_initializer)

        # self.ans_vec = self.summarize * tf.nn.sigmoid(gamma) + \
        #                tf.squeeze(self.ques_enc, axis=0) * (1 - tf.nn.sigmoid(gamma))

        self.ans_vec = l2_norm(self.summarize + self.ques)  # (1, E_t)

        self.output = tf.matmul(self.ans_vec, self.ans,
                                transpose_b=True)  # (1, 5)