예제 #1
0
    def conv2d_bn(self,
                  x,
                  nb_filter,
                  num_row,
                  num_col,
                  padding='SAME',
                  strides=(1, 1),
                  use_bias=True):
        x = tf.layers.Conv2D(nb_filter, (num_row, num_col),
                             strides=strides,
                             padding=padding,
                             use_bias=use_bias,
                             kernel_regularizer=l2(0.00004),
                             kernel_initializer=VarianceScaling(
                                 factor=2.0,
                                 mode='FAN_IN',
                                 uniform='normal',
                                 seed=None))(x)

        x = tf.layers.batch_normalization(inputs=x,
                                          axis=self.channel_axis,
                                          momentum=0.9997,
                                          scale=False)
        x = tf.nn.relu(x)
        return x
예제 #2
0
def darkconv(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    onlyconv = kwargs.pop('onlyconv', False)
    with tf.variable_scope(scope):
        conv_kwargs = {
            'padding': 'SAME',
            'activation_fn': None,
            'weights_initializer': variance_scaling_initializer(1.53846),
            'weights_regularizer': l2(5e-4),
            'biases_initializer': None,
            'scope': 'conv'
        }
        if onlyconv:
            conv_kwargs.pop('biases_initializer')
        with arg_scope([conv2d], **conv_kwargs):
            x = conv2d(*args, **kwargs)
            if onlyconv: return x
            x = batch_norm(x,
                           decay=0.99,
                           center=False,
                           scale=True,
                           epsilon=1e-5,
                           scope='bn')
            x = bias_add(x, scope='bias')
            x = leaky_relu(x, alpha=0.1, name='lrelu')
            return x
예제 #3
0
def darknet_conv2d_bn_leaky(*args, **kwargs):
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4), 'use_bias': False}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides') == (
        2, 2) else 'same'
    darknet_conv_kwargs.update(kwargs)

    conv1 = conv2d(*args, **darknet_conv_kwargs)
    conv1_bn = batch_normalization(conv1)
    conv1_bn_leaky = tf.leaky_relu(conv1_bn, alpha=0.1)
    return conv1_bn_leaky
예제 #4
0
    print("(Again for the sake of expediency, we used a stripped down")
    print("version of our model with only one recurrent layer)")
    tf.reset_default_graph()

    X = tf.placeholder(tf.float32, shape=(None, n_steps, n_inputs))
    y = tf.placeholder(tf.int64, shape=(None))
    seq_length = tf.placeholder(tf.int64, shape=(None))

    cell_factory = GRUCell(num_units=n_neurons)
    cell_drop = DropoutWrapper(cell_factory, k_prob)
    rnn_outputs, states = tf.nn.dynamic_rnn(cell_drop,
                                            X,
                                            dtype=tf.float32,
                                            sequence_length=seq_length)

    with arg_scope([fc], weights_regularizer=l2(reg_param)):
        hidden = fc(states, n_hidden)
        logits = fc(hidden, n_outputs, activation_fn=None)

    xentropy = softmax(labels=y, logits=logits)
    base_loss = tf.reduce_mean(xentropy)
    reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    cost = tf.add_n([base_loss] + reg_loss)

    optimizer = tf.train.AdamOptimizer(learning_rate)
    training_op = optimizer.minimize(cost)

    correct = tf.nn.in_top_k(logits, y, 2)
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

    init = tf.global_variables_initializer()