def _squeezenet1d(flat_input, keep_prob, n_classes):
     w_ini, b_ini, r_ini = initializers()
     x_multichannel = tf.reshape(flat_input,
                                 [-1, conf.seq_lngth, conf.num_ch])
     net = conv1d(x_multichannel,
                  filters=96,
                  kernel_size=7,
                  name='conv1',
                  kernel_initializer=w_ini,
                  bias_initializer=b_ini,
                  kernel_regularizer=r_ini)
     net = max_pooling1d(net, pool_size=3, strides=2, name='maxpool1')
     net = fire_module1d(net, 16, 64, name='fire2')
     net = fire_module1d(net, 16, 64, name='fire3')
     net = fire_module1d(net, 32, 128, name='fire4')
     net = max_pooling1d(net, pool_size=3, strides=2, name='maxpool4')
     net = fire_module1d(net, 32, 128, name='fire5')
     net = fire_module1d(net, 48, 192, name='fire6')
     net = fire_module1d(net, 48, 192, name='fire7')
     net = fire_module1d(net, 64, 256, name='fire8')
     net = max_pooling1d(net, pool_size=3, strides=2, name='maxpool8')
     net = fire_module1d(net, 64, 256, name='fire9')
     net = tf.nn.dropout(net, keep_prob=keep_prob, name='dropout9')
     net = conv1d(net,
                  n_classes,
                  1,
                  1,
                  name='conv10',
                  kernel_initializer=w_ini,
                  bias_initializer=b_ini,
                  kernel_regularizer=r_ini)
     logits = tf.reduce_mean(
         net, axis=1, name='global_avgpool10')  # global average pooling
     return logits
예제 #2
0
def res_block_1d(x, kernel_size, activation, training, batch_norm=True):

    assert len(x.shape) == 3, "Input tensor must be 3-dimensional."
    activation = activation.lower()

    filters = int(x.shape[2])

    y = ly.conv1d(inputs=x,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    y = nonlinear[activation](y)

    y = ly.conv1d(inputs=y,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    return tf.add(x, y)
예제 #3
0
def misconception_model(inputs,
                        filters_list,
                        kernel_size,
                        strides_list,
                        training,
                        objective_functions,
                        sub_filters=128,
                        sub_layers=2,
                        dropout_rate=0.5,
                        virtual_batch_size=None,
                        feature_means=None,
                        feature_stds=None):
    """ A misconception tower.

  Args:
    input: a tensor of size [batch_size, 1, width, depth].
    window_size: the width of the conv and pooling filters to apply.
    depth: the depth of the output tensor.
    levels: the height of the tower in misconception layers.
    objective_functions: a list of objective functions to add to the top of
                         the network.
    is_training: whether the network is training.

  Returns:
    a tensor of size [batch_size, num_classes].
  """
    layers = []
    net = inputs
    if feature_means is not None:
        net = net - tf.constant(feature_means)[None, None, :]
    if feature_stds is not None:
        net = net / (tf.constant(feature_stds) + 1e-6)
    layers.append(net)
    for filters, strides in zip(filters_list, strides_list):
        net = misconception_with_bypass(net,
                                        filters,
                                        kernel_size,
                                        strides,
                                        training,
                                        virtual_batch_size=virtual_batch_size)
        layers.append(net)
    outputs = []
    for ofunc in objective_functions:
        onet = net
        for _ in range(sub_layers - 1):
            onet = ly.conv1d(onet,
                             sub_filters,
                             1,
                             activation=None,
                             use_bias=False)
            onet = ly.batch_normalization(
                onet, training=training, virtual_batch_size=virtual_batch_size)
            onet = tf.nn.relu(onet)
        onet = ly.conv1d(onet, sub_filters, 1, activation=tf.nn.relu)
        onet = ly.flatten(onet)
        #
        onet = ly.dropout(onet, training=training, rate=dropout_rate)
        outputs.append(ofunc.build(onet))

    return outputs, layers
예제 #4
0
def misconception_fishing(inputs,
                          filters_list,
                          kernel_size,
                          strides_list,
                          objective_function,
                          training,
                          pre_filters=128,
                          post_filters=128,
                          post_layers=1,
                          dropout_rate=0.5,
                          internal_dropout_rate=0.5,
                          other_objectives=(),
                          feature_means=None,
                          feature_stds=None):

    _, layers = misconception_model(inputs,
                                    filters_list,
                                    kernel_size,
                                    strides_list,
                                    training,
                                    other_objectives,
                                    sub_filters=post_filters,
                                    sub_layers=2,
                                    dropout_rate=internal_dropout_rate,
                                    feature_means=feature_means,
                                    feature_stds=feature_stds)

    expanded_layers = []
    for i, lyr in enumerate(layers):
        lyr = ly.conv1d(lyr, pre_filters, 1, activation=None)
        lyr = ly.batch_normalization(lyr, training=training)
        lyr = tf.nn.relu(lyr)
        expanded_layers.append(repeat_tensor(lyr, 2**i))

    embedding = tf.add_n(expanded_layers)

    for _ in range(post_layers - 1):
        embedding = ly.conv1d(embedding,
                              post_filters,
                              1,
                              activation=None,
                              use_bias=False)
        embedding = ly.batch_normalization(embedding, training=training)
        embedding = tf.nn.relu(embedding)

    embedding = ly.conv1d(embedding, post_filters, 1, activation=tf.nn.relu)
    embedding = ly.dropout(embedding, training=training, rate=dropout_rate)

    fishing_outputs = ly.conv1d(embedding, 1, 1, activation=None)

    return objective_function.build(fishing_outputs)
예제 #5
0
def shake2_with_thru_max(inputs,
                         filters,
                         kernel_size,
                         stride,
                         training,
                         scope=None):

    with tf.name_scope(scope):

        ss = shake2(inputs, filters, kernel_size, stride, training)

        mp = tf.layers.max_pooling1d(inputs,
                                     kernel_size,
                                     strides=stride,
                                     padding="valid")
        concat = tf.concat([ss, mp], 2)

        y = ly.conv1d(concat, filters, 1, activation=None, use_bias=False)
        residual = ly.batch_normalization(y, training=training)

        # crop = (kernel_size - stride // 2) // 2 # TODO: work this out more generally / cleanly
        crop = kernel_size // 2
        thru = inputs
        if crop:
            thru = inputs[:, crop:-crop]
        thru = thru[:, ::stride]
        # if stride > 1:
        #     thru = tf.layers.max_pooling1d(
        #         thru, kernel_size, strides=stride, padding="valid")
        thru = zero_pad_features(thru, filters)

        return thru + residual
예제 #6
0
def misconception_layer(inputs,
                        filters,
                        kernel_size,
                        strides,
                        training,
                        scope=None,
                        virtual_batch_size=None):
    """ A single layer of the misconception convolutional network.

  Args:
    input: a tensor of size [batch_size, 1, width, depth]
    window_size: the width of the conv and pooling filters to apply.
    stride: the downsampling to apply when filtering.
    depth: the depth of the output tensor.

  Returns:
    a tensor of size [batch_size, 1, width/stride, depth].
  """
    with tf.name_scope(scope):
        extra = kernel_size - strides
        p0 = extra // 2
        p1 = extra - p0
        padded = tf.pad(inputs, [[0, 0], [p0, p1], [0, 0]])
        stage_conv = ly.conv1d(padded,
                               filters,
                               kernel_size,
                               strides=strides,
                               padding="valid",
                               activation=None,
                               use_bias=False)
        stage_conv = ly.batch_normalization(
            stage_conv,
            training=training,
            virtual_batch_size=virtual_batch_size)
        stage_conv = tf.nn.relu(stage_conv)
        stage_max_pool_reduce = tf.layers.max_pooling1d(padded,
                                                        kernel_size,
                                                        strides=strides,
                                                        padding="valid")
        concat = tf.concat([stage_conv, stage_max_pool_reduce], 2)

        total = ly.conv1d(concat, filters, 1, activation=None, use_bias=False)
        total = ly.batch_normalization(total,
                                       training=training,
                                       virtual_batch_size=virtual_batch_size)
        total = tf.nn.relu(total)
        return total
예제 #7
0
def shakeout(inputs, filters, kernel_size, stride, training, scope=None):

    with tf.name_scope(scope):

        y = tf.nn.relu(inputs)

        y1, y2 = shake_out(y, training)

        y1 = ly.conv1d(y1,
                       filters,
                       kernel_size,
                       activation=None,
                       use_bias=False,
                       strides=stride,
                       padding="valid")
        y1 = ly.batch_normalization(y1, training=training)
        y1 = tf.nn.relu(y1)
        y1 = ly.conv1d(y1,
                       filters,
                       1,
                       activation=None,
                       use_bias=False,
                       padding="valid")
        y1 = ly.batch_normalization(y1, training=training)

        y2 = ly.conv1d(y,
                       filters,
                       kernel_size,
                       activation=None,
                       use_bias=False,
                       strides=stride,
                       padding="valid")
        y2 = ly.batch_normalization(y2, training=training)
        y2 = tf.nn.relu(y2)
        y2 = ly.conv1d(y2,
                       filters,
                       1,
                       activation=None,
                       use_bias=False,
                       padding="valid")
        y2 = ly.batch_normalization(y2, training=training)

        return y1 + y2
 def _squeeze1d(inputs, n_outputs):
     w_ini, b_ini, r_ini = initializers()
     return conv1d(inputs=inputs,
                   filters=n_outputs,
                   kernel_size=1,
                   strides=1,
                   name='squeeze',
                   activation=tf.nn.relu,
                   kernel_initializer=w_ini,
                   bias_initializer=b_ini,
                   kernel_regularizer=r_ini)
예제 #9
0
 def _conv1d(inputs, n_filters, size, name, padding='VALID'):
     """wrapper function for 1D convolutional layer"""
     w_ini, b_ini, r_ini = initializers(conf.bias_init, conf.l2_str)
     return conv1d(inputs,
                   n_filters,
                   size,
                   padding=padding,
                   activation=tf.nn.relu,
                   bias_initializer=b_ini,
                   kernel_initializer=w_ini,
                   kernel_regularizer=r_ini,
                   name=name)
예제 #10
0
def conv1d_layer(x,
                 n_out,
                 kernel_size,
                 stride=1,
                 activation=ACTIVATION,
                 regularize=True,
                 use_bias=True,
                 drop_rate=0.0,
                 batch_norm=BATCH_NORM,
                 training=True,
                 name=None,
                 reuse=None):

    if batch_norm:
        if name:
            x = batch_norm_layer(x, training, name=name + '_bn', reuse=reuse)
        else:
            x = batch_norm_layer(x, training, name=name, reuse=reuse)

    #wt_init = tf.truncated_normal_initializer(stddev=0.2)
    #bi_init = tf.truncated_normal_initializer(mean=BIAS_SHIFT,stddev=0.01)
    wt_init = None
    bi_init = None

    if regularize:
        wt_reg = WT_REG
        bi_reg = BI_REG
    else:
        wt_reg = None
        bi_reg = None

    y = layers.conv1d(x,
                      n_out,
                      kernel_size,
                      strides=stride,
                      padding='same',
                      data_format='channels_last',
                      dilation_rate=1,
                      activation=activation,
                      use_bias=use_bias,
                      kernel_initializer=wt_init,
                      bias_initializer=bi_init,
                      kernel_regularizer=None,
                      bias_regularizer=None,
                      activity_regularizer=None,
                      trainable=True,
                      name=None,
                      reuse=None)
    y = layers.dropout(y, rate=drop_rate, training=training)
    return y
 def _expand1d(inputs, n_outputs):
     w_ini, b_ini, r_ini = initializers()
     with tf.variable_scope('expand'):
         e1x1 = conv1d(inputs=inputs,
                       filters=n_outputs,
                       kernel_size=1,
                       strides=1,
                       name='1x1',
                       activation=tf.nn.relu,
                       kernel_initializer=w_ini,
                       bias_initializer=b_ini,
                       kernel_regularizer=r_ini)
         e3x3 = conv1d(inputs=inputs,
                       filters=n_outputs,
                       padding='SAME',
                       kernel_size=3,
                       strides=1,
                       name='3x3',
                       activation=tf.nn.relu,
                       kernel_initializer=w_ini,
                       bias_initializer=b_ini,
                       kernel_regularizer=r_ini)
     return tf.concat([e1x1, e3x3], -1)
예제 #12
0
def shake2_with_max(inputs,
                    filters,
                    kernel_size,
                    stride,
                    training,
                    scope=None):

    with tf.name_scope(scope):

        ss = shake2(inputs, filters, kernel_size, stride, training)

        mp = tf.layers.max_pooling1d(inputs,
                                     kernel_size,
                                     strides=stride,
                                     padding="valid")
        concat = tf.concat([ss, mp], 2)

        y = ly.conv1d(concat, filters, 1, activation=None, use_bias=False)
        y = ly.batch_normalization(y, training=training)

        return y
예제 #13
0
def conv_block_1d(x,
                  kernel_size,
                  filters,
                  strides,
                  activation,
                  training,
                  batch_norm=True):

    activation = activation.lower()

    y = ly.conv1d(inputs=x,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same')

    y = nonlinear[activation](y)

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    return y
예제 #14
0
            test_set.append(
                test_tss_b[:,
                           int(r - 2 * stop_time -
                               inp_size // 2):int(r - 2 * stop_time +
                                                  inp_size // 2)].transpose())
        continue
    for i in np.arange(max(0, r - 1000), min(stop_time * 2, r + 1000)):
        tss_mask[0, int(i)] = 1  #normal(i,std=400)
test_set = np.stack(test_set)

tss_mask = tss_mask / np.max(tss_mask)
tss_mask = amp.reshape(1, -1) / np.max(amp)
x = tf.placeholder(tf.float32, [None, inp_size, 255])
y = tf.placeholder(tf.float32, [None, 1, 1])
relu = tf.nn.relu
x1 = relu(layers.conv1d(x, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = relu(layers.conv1d(x1, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = relu(layers.conv1d(x1, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = relu(layers.conv1d(x1, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = relu(layers.conv1d(x1, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = relu(layers.conv1d(x1, 250, [10], padding='valid'))
x1 = layers.max_pooling1d(x1, 100, strides=1, padding='valid')
x1 = layers.conv1d(x1, 1, [10], padding='valid')
x1_prob = tf.nn.sigmoid(x1)
# loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(y,x1,pos_weight))
loss = tf.reduce_mean(tf.square(y - x1) * (y * pos_weight + 1))
예제 #15
0
    def network(self, x, is_training):
        # Embedding Lookup 16
        with tf.device('/cpu:0'), tf.name_scope("embedding"):

            # use_he_uniform:
            self.embedding_W = tf.get_variable(
                name='lookup_W',
                shape=[self.char_dict_size,
                       self.embedding_size],  # TODO bigger embedding?
                initializer=tf.keras.initializers.he_uniform())

            self.embedded_characters = tf.nn.embedding_lookup(
                params=self.embedding_W, ids=x, name='embed_lu')
            print("-" * 20)
            print("Embedded Lookup:", self.embedded_characters.get_shape())
            print("-" * 20)

        # Temp(First) Conv Layer
        with tf.variable_scope("temp_conv"):
            print("-" * 20)
            print("Convolutional Block 1")
            print("-" * 20)

            he_std = np.sqrt(2 / (1 * 64))

            conv1 = conv1d(
                inputs=self.embedded_characters,
                filters=128,
                padding='valid',
                kernel_size=3,
                strides=2,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(stddev=he_std))
            #conv1 = dropout(conv1, rate=0.5, training=is_training)
            conv1 = tf.layers.batch_normalization(inputs=conv1,
                                                  momentum=0.997,
                                                  epsilon=1e-5,
                                                  center=True,
                                                  scale=True,
                                                  training=is_training)

            print("Conv 1", conv1.get_shape())

            print("-" * 20)
            print("Convolutional Block 2")
            print("-" * 20)

            conv2 = conv1d(
                inputs=conv1,
                filters=128,
                padding='valid',
                kernel_size=3,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(stddev=he_std))
            conv2 = tf.layers.batch_normalization(inputs=conv2,
                                                  momentum=0.997,
                                                  epsilon=1e-5,
                                                  center=True,
                                                  scale=True,
                                                  training=is_training)

            print("Conv 2", conv2.get_shape())

            print("-" * 20)
            print("Convolutional Block 3")
            print("-" * 20)

            conv3 = conv1d(
                inputs=conv2,
                filters=128,
                padding='valid',
                kernel_size=3,
                activation=tf.nn.relu,
                kernel_initializer=tf.random_normal_initializer(stddev=he_std))
            conv3 = tf.layers.batch_normalization(inputs=conv3,
                                                  momentum=0.997,
                                                  epsilon=1e-5,
                                                  center=True,
                                                  scale=True,
                                                  training=is_training)

            print("Conv 3", conv3.get_shape())

        flatten1 = tf.contrib.layers.flatten(conv3)

        print("-" * 20)
        print("Dense1")
        print("-" * 20)

        print("flatten", flatten1.get_shape())

        dense1 = dense(flatten1, 512, activation=tf.nn.relu, name='dense1')
        print("dense1", dense1.get_shape())

        dense2 = dense(dense1,
                       self.output_embedding_size,
                       activation=tf.nn.log_softmax,
                       name='dense2')  #
        print("dense2", dense2.get_shape())

        return dense2
예제 #16
0
def vgg_1d(input_tensor, initializer=tf.initializers.he_normal(), reuse=None):
    """

    :param initializer:
    :param input_tensor: [batch_size, M, H]
    :return:
    """
    net = layers.conv1d(input_tensor,
                        config.spectral_hidden_dimension,
                        kernel_size=7,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv11",
                        reuse=reuse)
    net = layers.conv1d(net,
                        config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv12",
                        reuse=reuse)
    net = layers.conv1d(net,
                        config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv13",
                        reuse=reuse)
    if (not reuse) and config.histo_summary_flag:
        tf.summary.histogram("output_conv13", net)
    # [2000 64]

    net = layers.conv1d(net,
                        2 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv21",
                        reuse=reuse)
    net = layers.conv1d(net,
                        2 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv22",
                        reuse=reuse)
    net = layers.conv1d(net,
                        2 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv23",
                        reuse=reuse)
    if (not reuse) and config.histo_summary_flag:
        tf.summary.histogram("output_conv23", net)
    # [1000, 128], 0.15 params

    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv31",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv32",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv33",
                        reuse=reuse)
    if (not reuse) and config.histo_summary_flag:
        tf.summary.histogram("output_conv33", net)
    # [500, 256], 0.6M params

    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv41",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv42",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv43",
                        reuse=reuse)
    if (not reuse) and config.histo_summary_flag:
        tf.summary.histogram("output_conv43", net)
    # [250, 256], 0.6M

    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=2,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv51",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv52",
                        reuse=reuse)
    net = layers.conv1d(net,
                        4 * config.spectral_hidden_dimension,
                        kernel_size=3,
                        strides=1,
                        padding='same',
                        kernel_initializer=initializer,
                        activation=activation_func,
                        kernel_regularizer=kernel_regularizer,
                        name="conv53",
                        reuse=reuse)
    if (not reuse) and config.histo_summary_flag:
        tf.summary.histogram("output_conv53", net)
    # [125, 256], 0.6M

    # finally a fully connected layer
    k_size = net.get_shape()[1]
    net = layers.conv1d(net,
                        1,
                        kernel_size=(k_size, ),
                        strides=1,
                        padding='valid',
                        kernel_initializer=initializer,
                        activation=None,
                        kernel_regularizer=kernel_regularizer,
                        name="conv_final",
                        reuse=reuse)
    logits = tf.squeeze(net, axis=[1, 2])

    return logits
예제 #17
0
def misconception_model_2(inputs,
                          filters_list,
                          kernel_size,
                          strides_list,
                          training,
                          objective_functions,
                          sub_filters=128,
                          sub_layers=2,
                          dropout_rate=0.5):
    """ A misconception tower.

  Args:
    input: a tensor of size [batch_size, 1, width, depth].
    window_size: the width of the conv and pooling filters to apply.
    depth: the depth of the output tensor.
    levels: the height of the tower in misconception layers.
    objective_functions: a list of objective functions to add to the top of
                         the network.
    is_training: whether the network is training.

  Returns:
    a tensor of size [batch_size, num_classes].
  """
    layers = []
    net = inputs
    layers.append(net)
    for filters, strides in zip(filters_list, strides_list):
        net = misconception_with_bypass(net, filters, kernel_size, strides,
                                        training)
        layers.append(net)
    onet = net
    for _ in range(sub_layers - 1):
        onet = ly.conv1d(onet, sub_filters, 1, activation=None, use_bias=False)
        onet = ly.batch_normalization(onet, training=training)
        onet = tf.nn.relu(onet)
    onet = ly.conv1d(onet, sub_filters, 1, activation=tf.nn.relu)
    snet = ly.conv1d(onet, 1, 1, activation=tf.nn.relu)[:, :, 0]
    selector = tf.expand_dims(tf.nn.softmax(snet), 2)

    outputs = []
    for ofunc in objective_functions:
        onet = net
        for _ in range(sub_layers - 1):
            onet = ly.conv1d(onet,
                             sub_filters,
                             1,
                             activation=None,
                             use_bias=False)
            onet = ly.batch_normalization(onet, training=training)
            onet = tf.nn.relu(onet)
        onet = ly.conv1d(onet, sub_filters, 1, activation=tf.nn.relu)

        onet = onet * selector
        n = int(onet.get_shape().dims[1])
        onet = ly.average_pooling1d(onet, n, n)
        onet = ly.flatten(onet)
        #
        onet = ly.dropout(onet, training=training, rate=dropout_rate)
        outputs.append(ofunc.build(onet))

    return outputs, layers
예제 #18
0
def deepLoco_decoder(x, img_channels=1):
    '''

    :param inputs: input tensor, shape [?,nx,ny,img_channels]

    '''

    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    inputs = tf.reshape(x, tf.stack([-1,nx,ny,img_channels]))
    batch_size = tf.shape(inputs)[0]

    print("input shape:",tf.shape(x))
    conv1 = conv2d(inputs=x, filters=16, kernel_size=5, activation = tf.nn.relu, padding = 'same')
    # print ("conv1 shape:",conv1.shape)
    conv1 = conv2d(inputs=conv1, filters=16, kernel_size=5, activation = tf.nn.relu, padding = 'same')
    # print ("conv1 shape:",conv1.shape)
    conv1 = conv2d(inputs=conv1, filters=64, kernel_size=5, activation = tf.nn.relu, strides= 2, padding = 'same')
    print ("conv1 shape:",tf.shape(conv1))

    conv2 = conv2d(inputs=conv1, filters=64, kernel_size=5, activation = tf.nn.relu, padding = 'same')
    # print ("conv2 shape:",conv2.shape)
    conv2 = conv2d(inputs=conv2, filters=64, kernel_size=5, activation = tf.nn.relu, padding = 'same')
    # print ("conv2 shape:",conv2.shape)
    conv2 = conv2d(inputs=conv2, filters=256, kernel_size=3, activation = tf.nn.relu, strides=2, padding = 'same')
    print ("conv2 shape:",tf.shape(conv2))

    conv3 = conv2d(inputs=conv2, filters=256, kernel_size=3, activation = tf.nn.relu, padding = 'same')
    # print ("conv3 shape:",conv3.shape)
    conv3 = conv2d(inputs=conv3, filters=256, kernel_size=3, activation = tf.nn.relu, padding = 'same')
    # print ("conv3 shape:",conv3.shape)
    conv3 = conv2d(inputs=conv3, filters=256, kernel_size=3, activation = tf.nn.relu, strides=4, padding = 'same')
    print ("conv3 shape:",tf.shape(conv3))

    # flat1 = tf.layers.flatten(inputs=conv3)
    tensor_shape = conv3.get_shape()
    print(tensor_shape)
    flat1 = tf.reshape(conv3, shape=[-1, tensor_shape[1]*tensor_shape[2]*tensor_shape[3]])
    print("flat ", flat1.get_shape())
    dense1 = tf.layers.dense(inputs=flat1, units=2048, activation=tf.nn.relu)
    print("dense1 ",dense1.get_shape())

    reshape1 = tf.reshape(dense1, shape = [-1, 2048, 1])
    # print("reshape1", reshape1.shape)

    shortcut = reshape1
    # res1 = build_resnet(reshape1, basic_block, [2])
    res1 = conv1d(inputs=reshape1, filters=1, kernel_size=3, strides=1, padding='same')
    # print("res1 ", res1.shape)
    res1 = LeakyReLU()(res1)
    res1 = batch_normalization(inputs=res1)
    # res1 = add_common_layers(res1)
    add1 = tf.add(shortcut,res1)
    # print("add1 ",add1.shape)
    add1 = LeakyReLU()(add1)
    add1 = batch_normalization(inputs=add1)

    shortcut = add1
    res2 = conv1d(inputs=add1, filters=1, kernel_size=3, strides=1, padding='same')
    res2 = LeakyReLU()(res2)
    res2 = batch_normalization(inputs=res2)
    # res2 = add_common_layers(res2)
    # print("res2 ",res2.shape)
    add2 = tf.add(shortcut,res2)
    # print("add2 ",add2.shape)
    add2 = LeakyReLU()(add2)
    add2 = batch_normalization(inputs=add2)

    weights = conv1d(inputs=add2, filters=1, kernel_size=3, strides=8, padding='same', activation=tf.nn.relu)
    print("weights ", weights.get_shape())

    positions = conv1d(inputs=add2, filters=2, kernel_size=3, strides=8, padding='same', activation=tf.nn.sigmoid)
    print("positions ",positions.get_shape())

    output = tf.concat([weights,positions], 2)
    print(output.get_shape())
    # return [weights, positions]
    return output