Exemplo n.º 1
0
def conv1d(batch_input,
           kernel=3,
           output_channel=64,
           stride=1,
           use_bias=False,
           scope='conv1d'):
    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
        if use_bias:
            return slim.conv1d(
                batch_input,
                output_channel,
                kernel,
                stride,
                'SAME',
                data_format='NHWC',
                activation_fn=None,
                weights_initializer=tf.contrib.layers.xavier_initializer())
        else:
            return slim.conv1d(batch_input,
                               output_channel,
                               kernel,
                               stride,
                               'SAME',
                               data_format='NHWC',
                               activation_fn=None)
Exemplo n.º 2
0
def bulid_wavenet(inputs, num_classes, is_training=False, num_hidden_size=128, num_layers=3, rates=[1, 2, 4, 8, 16],
                  scope=None):
  """
  I don't kown how to implement the he_uniform(the default initializer of orignal code)with tensorflow.
  and I use the tf.contrib.layers.xavier_initializer() as default weights_initializer.

  :param inputs:
  :param num_classes:
  :param num_hidden_size:
  :param num_layers:
  :param rates:
  :param scope:
  :return:
  """

  def get_initializer(name=None):
    if name is None:
      return tf.contrib.layers.xavier_initializer()

  def get_normalizer_params():
    return {'is_training': is_training, 'scale': True}

  outputs = 0
  with tf.variable_scope(scope, default_name='wavenet'):
    with tf.variable_scope('input'):
      nets = slim.conv1d(inputs, num_hidden_size,
                         kernel_size=1,
                         activation_fn=tf.nn.tanh,
                         normalizer_fn=slim.batch_norm,
                         normalizer_params=get_normalizer_params(),
                         weights_initializer=get_initializer(),
                         scope='conv')

    with tf.variable_scope('resnet'):
      for i in range(num_layers):
        for rate in rates:
          nets, output = _resnet_block(nets, num_hidden_size,
                                       kernel_size=7, rate=rate,
                                       normalizer_fn=slim.batch_norm,
                                       normalizer_params=get_normalizer_params(),
                                       weights_initializer=get_initializer(),
                                       scope='block_%d_%d' % (i, rate))
          outputs += output

    with tf.variable_scope('output'):
      outputs = slim.conv1d(outputs, num_hidden_size,
                            kernel_size=1,
                            activation_fn=tf.nn.tanh,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=get_normalizer_params(),
                            weights_initializer=get_initializer(),
                            scope='conv')
      return slim.conv1d(outputs,
                         num_outputs=num_classes,
                         kernel_size=1,
                         normalizer_params=get_normalizer_params(),
                         weights_initializer=get_initializer(),
                         scope='logit')
Exemplo n.º 3
0
 def encoder_cnn_fn(x, vocab_size, params, is_training=True):
     h = x
     h = embedding_fn(h,
                      vocab_size=vocab_size,
                      dim_out=params.encoder_dim,
                      name='encoder_embedding')
     for i in range(layers):
         h = slim.conv1d(h,
                         num_outputs=params.encoder_dim,
                         kernel_size=kernel_size,
                         activation_fn=tf.nn.leaky_relu,
                         padding=padding,
                         scope='encoder_conv1d_{}'.format(i))
         if bn:
             h = slim.batch_norm(h,
                                 is_training=is_training,
                                 scope='encoder_bn_{}'.format(i))
         # h = slim.batch_norm(h, is_training=is_training)
     mu = slim.fully_connected(h,
                               num_outputs=params.latent_dim,
                               activation_fn=None,
                               scope='encoder_mu')
     logsigma = slim.fully_connected(h,
                                     num_outputs=params.latent_dim,
                                     activation_fn=None,
                                     scope='encoder_logsigma')
     # sigma = tf.nn.softplus(logsigma)
     # rnd = tf.random_normal(shape=tf.shape(logsigma))
     # z = tf.add(mu, rnd * sigma, name='encoder_z')
     # return z
     return mu, logsigma
Exemplo n.º 4
0
def inference(input_tensor, regularizer=None, trainable=True, keep_prob=0.5):
    with slim.arg_scope([slim.conv1d, slim.max_pool2d],
                        stride=1,
                        padding='SAME'):

        with tf.variable_scope("layer1-initconv"):

            data = slim.conv1d(input_tensor,
                               CONV_DEEP,
                               15,
                               trainable=trainable)
            # data = slim.max_pool2d(data,[2,2],stride=2)
            data = tf.nn.max_pool1d(input=data,
                                    ksize=3,
                                    strides=2,
                                    padding='SAME')

            with tf.variable_scope("resnet_layer"):

                data = res_block(input_tensor=data,
                                 kshape=CONV_SIZE,
                                 deph=CONV_DEEP,
                                 layer=6,
                                 half=False,
                                 name="layer4-9-conv",
                                 trainable=trainable,
                                 keep_prob=keep_prob)
                data = res_block(input_tensor=data,
                                 kshape=CONV_SIZE,
                                 deph=CONV_DEEP * 2,
                                 layer=8,
                                 half=True,
                                 name="layer10-15-conv",
                                 trainable=trainable,
                                 keep_prob=keep_prob)
                data = res_block(input_tensor=data,
                                 kshape=CONV_SIZE + 4,
                                 deph=CONV_DEEP * 4,
                                 layer=12,
                                 half=True,
                                 name="layer16-27-conv",
                                 trainable=trainable,
                                 keep_prob=keep_prob)
                data = res_block(input_tensor=data,
                                 kshape=CONV_SIZE,
                                 deph=CONV_DEEP * 8,
                                 layer=6,
                                 half=True,
                                 name="layer28-33-conv",
                                 trainable=trainable,
                                 keep_prob=keep_prob)

                # data = slim.avg_pool2d(data,[2,2],stride=2) # 此时tensor的shape是:[10,1,313,512],无法继续池化.
                # data = tf.layers.average_pooling1d(inputs=data, pool_size=3, strides=2, padding=1)
                data = tf.layers.average_pooling1d(
                    inputs=data, pool_size=data.shape.as_list()[1],
                    strides=1)  # (batch_siaze,1,512)
                return data
Exemplo n.º 5
0
def res_layer2d(input_tensor,
                kshape=5,
                deph=64,
                conv_stride=1,
                padding='SAME',
                trainable=True,
                keep_prob=0.5):
    data = input_tensor

    #模块内部第一层卷积
    #data = slim.conv2d(data,num_outputs=deph,kernel_size=kshape,stride=conv_stride,padding=padding, trainable=trainable)
    data = slim.conv1d(data,
                       num_outputs=deph,
                       kernel_size=kshape,
                       stride=conv_stride,
                       padding=padding,
                       trainable=trainable,
                       activation_fn=None)  # 无激活函数
    data = slim.batch_norm(data, activation_fn=tf.nn.relu, trainable=trainable)

    data = slim.dropout(data, keep_prob=keep_prob, is_training=trainable)
    # #模块内部第二层卷积
    # data = slim.conv2d(data,num_outputs=deph,kernel_size=kshape,stride=conv_stride,padding=padding,activation_fn=None, trainable=trainable)
    data = slim.conv1d(data,
                       num_outputs=deph,
                       kernel_size=kshape,
                       stride=conv_stride,
                       padding=padding,
                       activation_fn=None,
                       trainable=trainable)
    data = slim.batch_norm(data, activation_fn=None, trainable=trainable)
    output_deep = input_tensor.get_shape().as_list()[2]

    #当输出深度和输入深度不相同时,进行对输入深度的全零填充
    if output_deep != deph:
        input_tensor = tf.pad(
            input_tensor,
            [[0, 0], [0, 0],
             [abs(deph - output_deep) // 2,
              abs(deph - output_deep) // 2]])  #这里的变纬度,用的是论文中的A方法
    data = tf.add(data, input_tensor)
    data = tf.nn.relu(data)
    return data
Exemplo n.º 6
0
def res_layer1d(input_tensor,
                kshape=[5, 5],
                deph=64,
                conv_stride=1,
                padding='SAME',
                trainable=True):
    data = input_tensor
    data = slim.batch_norm(data, activation_fn=tf.nn.relu)

    #模块内部第一层卷积
    data = slim.conv1d(data,
                       num_outputs=deph,
                       kernel_size=kshape,
                       stride=conv_stride,
                       padding=padding,
                       trainable=trainable)

    # #模块内部第二层卷积
    data = slim.conv1d(data,
                       num_outputs=deph,
                       kernel_size=kshape,
                       stride=conv_stride,
                       padding=padding,
                       activation_fn=None,
                       trainable=trainable)
    output_deep = input_tensor.get_shape().as_list()[3]

    #当输出深度和输入深度不相同时,进行对输入深度的全零填充
    if output_deep != deph:
        input_tensor = tf.pad(
            input_tensor,
            [[0, 0], [0, 0], [0, 0],
             [abs(deph - output_deep) // 2,
              abs(deph - output_deep) // 2]])  #这里的变纬度,用的是论文中的A方法
    data = tf.add(data, input_tensor)
    data = tf.nn.relu(data)
    return data
Exemplo n.º 7
0
def inference(input_tensor, regularizer=None, trainable=True):
    with slim.arg_scope([slim.conv1d, slim.max_pool2d],
                        stride=1,
                        padding='SAME'):  # 没有max_pool1d

        with tf.variable_scope("layer1-initconv"):

            data = slim.conv1d(input_tensor,
                               CONV_DEEP, [7],
                               trainable=trainable)
            data = slim.max_pool2d(data, [2, 2], stride=2)

            with tf.variable_scope("resnet_layer"):

                data = res_block(input_tensor=data,
                                 kshape=[CONV_SIZE, CONV_SIZE],
                                 deph=CONV_DEEP,
                                 layer=6,
                                 half=False,
                                 name="layer4-9-conv",
                                 trainable=trainable)
                data = res_block(input_tensor=data,
                                 kshape=[CONV_SIZE, CONV_SIZE],
                                 deph=CONV_DEEP * 2,
                                 layer=8,
                                 half=True,
                                 name="layer10-15-conv",
                                 trainable=trainable)
                data = res_block(input_tensor=data,
                                 kshape=[CONV_SIZE, CONV_SIZE],
                                 deph=CONV_DEEP * 4,
                                 layer=12,
                                 half=True,
                                 name="layer16-27-conv",
                                 trainable=trainable)
                data = res_block(input_tensor=data,
                                 kshape=[CONV_SIZE, CONV_SIZE],
                                 deph=CONV_DEEP * 8,
                                 layer=6,
                                 half=True,
                                 name="layer28-33-conv",
                                 trainable=trainable)

                # data = slim.avg_pool1d(data,[2,2],stride=2) # 此时tensor的shape是:[10,1,313,512],无法继续池化.
                return data
Exemplo n.º 8
0
 def decoder_cnn_fn(z, vocab_size, params, is_training=True):
     h = z
     for i in range(layers):
         h = slim.conv1d(h,
                         num_outputs=params.decoder_dim,
                         kernel_size=kernel_size,
                         activation_fn=tf.nn.leaky_relu,
                         padding=padding,
                         scope='decoder_conv1d_{}'.format(i))
         if bn:
             h = slim.batch_norm(h,
                                 is_training=is_training,
                                 scope='decoder_bn_{}'.format(i))
     logits = slim.fully_connected(h,
                                   num_outputs=vocab_size,
                                   activation_fn=None,
                                   scope='decoder_logits')
     return logits
Exemplo n.º 9
0
def _resnet_block(inputs, num_outputs, kernel_size, rate,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  weights_regularizer=None,
                  biases_regularizer=None,
                  scope=None):
  with tf.variable_scope(scope, default_name="block_%d" % rate):
    conv_filter = _aconv1d(inputs,
                           kerner_size=kernel_size,
                           rate=rate,
                           activation_fn=tf.nn.tanh,
                           normalizer_fn=normalizer_fn,
                           normalizer_params=normalizer_params,
                           weights_initializer=weights_initializer,
                           biases_initializer=biases_initializer,
                           weights_regularizer=weights_regularizer,
                           biases_regularizer=biases_regularizer,
                           scope='filter')
    conv_gate = _aconv1d(inputs,
                         kerner_size=kernel_size,
                         rate=rate,
                         activation_fn=tf.nn.sigmoid,
                         normalizer_fn=normalizer_fn,
                         normalizer_params=normalizer_params,
                         weights_initializer=weights_initializer,
                         biases_initializer=biases_initializer,
                         weights_regularizer=weights_regularizer,
                         biases_regularizer=biases_regularizer,
                         scope='gate')
    outputs = conv_filter * conv_gate
    outputs = slim.conv1d(outputs, num_outputs,
                          kernel_size=1,
                          activation_fn=tf.nn.tanh,
                          normalizer_fn=normalizer_fn,
                          normalizer_params=normalizer_params,
                          weights_initializer=weights_initializer,
                          biases_initializer=biases_initializer,
                          weights_regularizer=weights_regularizer,
                          biases_regularizer=biases_regularizer,
                          scope='conv')
    return outputs + inputs, outputs
Exemplo n.º 10
0
def get_half(input_tensor, deph, trainable=True):
    data = input_tensor
    data = slim.conv1d(data, deph // 2, 1, stride=2,
                       trainable=trainable)  #用1*1的卷积代替下采样
    return data
Exemplo n.º 11
0
    def scoring(self, scope):
        with tf.variable_scope(scope):
            self.inputs_tm = tf.placeholder(shape=[None, (TM_SIZE**2)],
                                            dtype=tf.float32,
                                            name='in_tm')
            self.tmIn = tf.reshape(self.inputs_tm,
                                   shape=[-1, TM_SIZE, TM_SIZE, 1],
                                   name='reshape_tm')

            self.correct_value = tf.placeholder(shape=[None, 1],
                                                dtype=tf.float32,
                                                name='final_val')

            TM_conv_0 = slim.conv2d(
                activation_fn=tf.nn.relu,
                inputs=self.tmIn,
                num_outputs=16,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                kernel_size=[4, 4],
                stride=[1, 1],
                padding='VALID',
                scope='conv-0')

            TM_conv_1 = slim.conv2d(
                activation_fn=tf.nn.relu,
                inputs=TM_conv_0,
                num_outputs=32,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                kernel_size=[4, 4],
                stride=[1, 1],
                padding='VALID',
                scope='conv-1')

            self.topo = tf.placeholder(shape=[None, A_SIZE],
                                       dtype=tf.float32,
                                       name='in_nt')
            self.ntIn = tf.reshape(self.topo,
                                   shape=[-1, A_SIZE, 1],
                                   name='reshape_nt')  # [?, 8, 8, 1]

            TOPO_conv_0 = slim.conv1d(
                activation_fn=tf.nn.relu,
                inputs=self.ntIn,
                num_outputs=16,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                kernel_size=[4],
                stride=[1],
                padding='VALID',
                scope='conv-2')

            TOPO_conv_1 = slim.conv1d(
                activation_fn=tf.nn.relu,
                inputs=TOPO_conv_0,
                num_outputs=32,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                kernel_size=[4],
                stride=[1],
                padding='VALID',
                scope='conv-3')

            combined_input = tf.concat(
                [slim.flatten(TOPO_conv_1),
                 slim.flatten(TM_conv_1)],
                1,
                name='concat')
            # [?, 832]

            # Connect the input and output
            hidden_1 = slim.fully_connected(combined_input,
                                            256,
                                            activation_fn=tf.nn.elu,
                                            scope='fc0')
            hidden_2 = slim.fully_connected(hidden_1,
                                            128,
                                            activation_fn=tf.nn.elu,
                                            scope='fc1')

            self.score = tf.contrib.layers.fully_connected(
                hidden_2,
                1,
                activation_fn=None,
                weights_initializer=normalized_columns_initializer(0.01),
                biases_initializer=None)

            self.loss = (self.correct_value[0][0] - self.score[0][0])**2

            self.optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
            self.train_op = self.optimizer.minimize(self.loss)
Exemplo n.º 12
0
def _density_modulator(density_hist,
                       mod_early_conv,
                       density_channels,
                       init_channels,
                       scope=None,
                       is_training=False,
                       with_conv=False,
                       conv_init_num_outs=64):
    with tf.variable_scope(scope, "mod_density", [density_hist]):
        n_modulator_param = init_channels * (mod_early_conv + 2 + 4 + 8 +
                                             16) * 2

        net = density_hist
        if with_conv:
            net = tf.expand_dims(net, axis=-1)
            net = slim.repeat(net,
                              2,
                              slim.conv1d,
                              conv_init_num_outs,
                              3,
                              scope="conv1")
            net = tf.layers.max_pooling1d(net, 2, 2, padding="same")
            net = slim.repeat(net,
                              2,
                              slim.conv1d,
                              conv_init_num_outs * 2,
                              3,
                              scope="conv2")
            net = tf.layers.max_pooling1d(net, 2, 2, padding="same")
            net = slim.repeat(net,
                              3,
                              slim.conv1d,
                              conv_init_num_outs * 4,
                              3,
                              scope="conv3")
            net = tf.layers.max_pooling1d(net, 2, 2, padding="same")
            net = slim.repeat(net,
                              3,
                              slim.conv1d,
                              conv_init_num_outs * 8,
                              3,
                              scope="conv4")
            net = tf.layers.max_pooling1d(net, 2, 2, padding="same")
            net = slim.repeat(net,
                              3,
                              slim.conv1d,
                              conv_init_num_outs * 16,
                              3,
                              scope="conv5")
            net = tf.layers.max_pooling1d(net, 2, 2, padding="same")
            net = slim.conv1d(net,
                              density_channels,
                              7,
                              padding="VALID",
                              scope="fc6")
            net = slim.dropout(net,
                               0.5,
                               is_training=is_training,
                               scope="dropout1")
            net = slim.conv1d(net, density_channels, 1, scope="fc7")
            net = slim.dropout(net,
                               0.5,
                               is_training=is_training,
                               scope="dropout2")
            modulator_params = slim.conv1d(
                net,
                n_modulator_param,
                1,
                weights_initializer=tf.zeros_initializer(),
                biases_initializer=tf.ones_initializer(),
                activation_fn=None,
                normalizer_fn=None,
                scope="fc8")
            modulator_params = tf.squeeze(modulator_params, axis=1)
        else:
            net = slim.fully_connected(density_hist,
                                       density_channels,
                                       scope="fc1")
            net = slim.dropout(net,
                               0.5,
                               is_training=is_training,
                               scope="dropout1")
            net = slim.fully_connected(net, density_channels, scope="fc2")
            net = slim.dropout(net,
                               0.5,
                               is_training=is_training,
                               scope="dropout2")
            modulator_params = slim.fully_connected(
                net,
                n_modulator_param,
                weights_initializer=tf.zeros_initializer(),
                biases_initializer=tf.ones_initializer(),
                activation_fn=None,
                normalizer_fn=None,
                scope="fc3")
        return modulator_params
Exemplo n.º 13
0
    def wdcnn_network_structure(self, is_trained):
        with slim.arg_scope(
            [slim.conv1d],
                padding="same",
                activation_fn=slim.nn.relu,
                weights_initializer=tf.truncated_normal_initializer(
                    stddev=0.01),
                weights_regularizer=slim.l2_regularizer(0.005)):
            net = slim.conv1d(inputs=self.inputs,
                              num_outputs=16,
                              kernel_size=64,
                              stride=16,
                              scope="conv_1")
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_2")
            net = def_max_pool(net)

            net = slim.conv1d(net,
                              num_outputs=32,
                              kernel_size=3,
                              stride=1,
                              scope="conv_3")
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_4")
            net = def_max_pool(net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=2,
                              stride=1,
                              scope="conv_5")
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_6")
            net = def_max_pool(net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=3,
                              stride=1,
                              scope="conv_7")
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_8")
            net = def_max_pool(net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=3,
                              stride=1,
                              padding="VALID",
                              scope="conv_9")
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_10")
            net = def_max_pool(net)

            net = slim.flatten(net, scope="flatten_11")

            net = slim.fully_connected(net,
                                       num_outputs=100,
                                       activation_fn=tf.nn.sigmoid,
                                       scope="fully_connected_12")

            net = slim.dropout(net,
                               keep_prob=self.keep_prob,
                               is_training=is_trained,
                               scope="dropout_13")

            digits_onehot = slim.fully_connected(
                net,
                num_outputs=self.num_class,
                activation_fn=tf.nn.softmax,
                weights_initializer=tf.truncated_normal_initializer(
                    stddev=0.01),
                weights_regularizer=slim.l2_regularizer(0.005),
                scope="fully_connected_14")
            tf.summary.histogram("fully_connected_14", digits_onehot)
        return digits_onehot
Exemplo n.º 14
0
def bottleneck_block_v2(inputs,
                        filters,
                        training,
                        projection_shortcut,
                        strides,
                        data_format,
                        activation=tf.nn.relu,
                        kernel_size=3):
    """A single block for ResNet v2, with a bottleneck.
    Similar to _building_block_v2(), except using the "bottleneck" blocks
    described in:
      Convolution then batch normalization then ReLU as described by:
        Deep Residual Learning for Image Recognition
        https://arxiv.org/pdf/1512.03385.pdf
        by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Dec 2015.
    Adapted to the ordering conventions of:
      Batch normalization then ReLu then convolution as described by:
        Identity Mappings in Deep Residual Networks
        https://arxiv.org/pdf/1603.05027.pdf
        by Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, Jul 2016.
    Args:
      inputs: A tensor of size [batch, channels, height_in, width_in] or
        [batch, height_in, width_in, channels] depending on data_format.
      filters: The number of filters for the convolutions.
      training: A Boolean for whether the model is in training or inference
        mode. Needed for batch normalization.
      projection_shortcut: The function to use for projection shortcuts
        (typically a 1x1 convolution when downsampling the input).
      strides: The block's stride. If greater than 1, this block will ultimately
        downsample the input.
      data_format: The input format ('channels_last' or 'channels_first').
    Returns:
      The output tensor of the block; shape should match inputs.
    """
    shortcut = inputs
    inputs = slim.batch_norm(inputs,
                             is_training=training,
                             data_format=data_format,
                             scope='resnet_bn_0')
    inputs = activation(inputs)

    # The projection shortcut should come after the first batch norm and ReLU
    # since it performs a 1x1 convolution.
    if projection_shortcut is not None:
        shortcut = projection_shortcut(inputs)

    inputs = slim.conv1d(inputs=inputs,
                         filters=filters,
                         kernel_size=1,
                         strides=1,
                         data_format=data_format,
                         scope='resnet_conv1d_0',
                         activation_fn=None)

    inputs = slim.batch_norm(inputs,
                             is_training=training,
                             data_format=data_format,
                             scope='resnet_bn_1')
    inputs = activation(inputs)
    inputs = slim.conv1d(inputs=inputs,
                         filters=filters,
                         kernel_size=kernel_size,
                         strides=strides,
                         data_format=data_format,
                         scope='resnet_conv1d_1',
                         activation_fn=None)

    inputs = slim.batch_norm(inputs,
                             is_training=training,
                             data_format=data_format,
                             scope='resnet_bn_2')
    inputs = activation(inputs)
    inputs = slim.conv1d(inputs=inputs,
                         filters=4 * filters,
                         kernel_size=1,
                         strides=1,
                         data_format=data_format,
                         scope='resnet_conv1d_2',
                         activation_fn=None)

    return inputs + shortcut
Exemplo n.º 15
0
    def siamese_base_structure(self, inputs, reuse):
        # left_inputs = tf.placeholder(dtype=tf.float32, shape=[None, 2048, 2])
        # right_inputs = tf.placeholder(dtype=tf.float32, shape=[None, 2048, 2])   在类内不能使用嵌套函数
        with slim.arg_scope(
            [slim.conv1d],
                padding="same",
                activation_fn=slim.nn.relu,
                weights_initializer=tf.truncated_normal_initializer(
                    stddev=0.01),
                weights_regularizer=slim.l2_regularizer(0.005)):
            net = slim.conv1d(inputs=inputs,
                              num_outputs=16,
                              kernel_size=64,
                              stride=16,
                              reuse=reuse,
                              scope="conv_1")
            # tf.summary.histogram("conv_1", net)
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_2")
            net = def_max_pool(net)
            # tf.summary.histogram("max_pool_2", net)

            net = slim.conv1d(net,
                              num_outputs=32,
                              kernel_size=3,
                              stride=1,
                              reuse=reuse,
                              scope="conv_3")
            # tf.summary.histogram("conv_3", net)
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_4")
            net = def_max_pool(net)
            # tf.summary.histogram("max_pool_4", net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=2,
                              stride=1,
                              reuse=reuse,
                              scope="conv_5")
            # tf.summary.histogram("conv_5", net)
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_6")
            net = def_max_pool(net)
            # tf.summary.histogram("max_pool_6", net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=3,
                              stride=1,
                              reuse=reuse,
                              scope="conv_7")
            # tf.summary.histogram("conv_7", net)
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_8")
            net = def_max_pool(net)
            # tf.summary.histogram("max_pool_8", net)

            net = slim.conv1d(net,
                              num_outputs=64,
                              kernel_size=3,
                              stride=1,
                              padding="VALID",
                              reuse=reuse,
                              scope="conv_9")
            # tf.summary.histogram("conv_9", net)
            def_max_pool = tf.layers.MaxPooling1D(pool_size=2,
                                                  strides=2,
                                                  padding="VALID",
                                                  name="max_pool_10")
            net = def_max_pool(net)
            # tf.summary.histogram("max_pool_10", net)

            net = slim.flatten(net, scope="flatten_11")
            # tf.summary.histogram("flatten_11", net)

            output_step_one = slim.fully_connected(
                net,
                num_outputs=100,
                activation_fn=tf.nn.sigmoid,
                reuse=reuse,
                weights_initializer=tf.truncated_normal_initializer(
                    stddev=0.01),
                weights_regularizer=slim.l2_regularizer(0.005),
                scope="fully_connected_12")
            # tf.summary.histogram("fully_connected_12", output_step_one)
        return output_step_one