コード例 #1
0
def model(x, y, batch_size, is_training=True, reuse=None):
    with tf.variable_scope('model', reuse=reuse):
        x_tensor = tf.reshape(x, [-1, 28, 28, 1])

        fc1 = fc(x, 20, is_training, reuse, name='fc1', activation=None)
        fc1 = tf.tanh(fc1)
        fc1 = dropout(fc1, is_training, drop_p=0.5)
        fc2 = fc(fc1, 6, is_training, reuse, use_bias=False, name='fc2')
        initial = np.array([[1., 0, 0], [0, 1., 0]])
        initial = initial.astype('float32')
        initial = initial.flatten()
        fc2_b = tf.Variable(initial_value=initial, name='fc2/b')

        fc2 = tf.nn.bias_add(fc2, bias=fc2_b)
        fc2 = tf.tanh(fc2)
        h_trans = spatialtransformer(x_tensor, fc2, batch_size=batch_size)

        conv1 = conv2d(h_trans,
                       16,
                       is_training,
                       reuse,
                       activation=prelu,
                       name='conv1')
        conv2 = conv2d(conv1,
                       16,
                       is_training,
                       reuse,
                       stride=(2, 2),
                       activation=prelu,
                       name='conv2')
        fcmain = fc(conv2,
                    1024,
                    is_training,
                    reuse,
                    name='fc',
                    activation=prelu)
        fcmain = dropout(fcmain, is_training, drop_p=0.5)
        logits = fc(fcmain,
                    10,
                    is_training,
                    reuse,
                    name='logits',
                    activation=None)
        prediction = softmax(logits, 'prediction')

        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
        opt = tf.train.AdamOptimizer()
        optimizer = opt.minimize(loss)
        # grads = opt.compute_gradients(loss, [fc2_b])

        correct_prediction = tf.equal(tf.argmax(prediction, 1),
                                      tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        return accuracy, loss, optimizer
コード例 #2
0
def model(inputs, is_training, reuse, num_classes=2):
    common_args = common_layer_args(is_training, reuse)
    conv1 = conv2d(inputs, 32, name='conv1', activation=prelu, **common_args)
    conv1 = conv2d(conv1, 32, name='conv2', activation=prelu, **common_args)
    fc1 = fc(conv1, num_classes, name='logits', **common_args)
    prediction = softmax(fc1, name='prediction', **common_args)
    return end_points(is_training)
コード例 #3
0
def model(x, is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x = lstm(x, 34, reuse, is_training)
    logits = fc(x, 2, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
コード例 #4
0
def model(x, is_training, reuse, num_classes=2, **kwargs):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x = bidirectional_rnn(x, LSTMCell(128, reuse), LSTMCell(128, reuse),
                          **common_args)
    logits = fc(x, num_classes, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
コード例 #5
0
def model(input_data, target, num_hidden=34, is_training=True, reuse=None):
    cell = LSTMCell(num_hidden, reuse)
    val, _ = tf.nn.dynamic_rnn(cell, input_data, dtype=tf.float32)
    val = tf.transpose(val, [1, 0, 2])
    last = tf.gather(val, int(val.get_shape()[0]) - 1)
    logit = fc(last, int(target.get_shape()[1]), is_training, reuse)
    prediction = tf.nn.softmax(logit)
    loss = - \
        tf.reduce_sum(
            target * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)))
    optimizer = tf.train.AdamOptimizer()
    train_op = optimizer.minimize(loss)
    error = tf.not_equal(tf.argmax(target, 1), tf.argmax(prediction, 1))
    error = tf.reduce_mean(tf.cast(error, tf.float32))
    return prediction, error, train_op
コード例 #6
0
def model(x, is_training, reuse, num_classes=2, **kwargs):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x1 = conv1d(x, 128, name='conv1_1', **common_args)
    x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args)
    x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args)
    x = merge([x1, x2, x3], 'concat', axis=1)
    x = lstm(x, 384, reuse, is_training)
    x = dropout(x, drop_p=0.3, **common_args)
    logits = fc(x, num_classes, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
コード例 #7
0
def model(x, is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x1 = conv1d(x, 128, name='conv1_1', **common_args)
    x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args)
    x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args)
    x = merge([x1, x2, x3], 'concat', axis=1)
    x = tf.expand_dims(x, 2)
    x = global_max_pool(x)
    x = dropout(x, drop_p=0.3, **common_args)
    logits = fc(x, 2, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
コード例 #8
0
ファイル: alexnet.py プロジェクト: openAGI/models
def model(inputs,
          is_training,
          reuse,
          num_classes=5,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          name='alexnet_v2',
          **kwargs):
    """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    name: Optional name for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logit_args = make_args(activation=None,
                           w_init=initz.he_normal(scale=1),
                           **common_args)
    pred_args = make_args(activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          **common_args)
    pool_args = make_args(padding='SAME', **common_args)

    # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope(name, 'alexnet_v2', [inputs]):
        net = conv2d(inputs,
                     64,
                     filter_size=(11, 11),
                     stride=(4, 4),
                     name='conv1',
                     **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool1', **pool_args)
        net = conv2d(net, 192, filter_size=(5, 5), name='conv2', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool2', **pool_args)
        net = conv2d(net, 384, name='conv3', **conv_args)
        net = conv2d(net, 384, name='conv4', **conv_args)
        net = conv2d(net, 256, name='conv5', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool5', **pool_args)

        # Use conv2d instead of fully_connected layers.
        net = conv2d(net, 4096, filter_size=(5, 5), name='fc6', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout6',
                      **common_args)
        net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout7',
                      **common_args)
        net = global_avg_pool(net)
        logits = fc(net, num_classes, name='logits', **logit_args)

        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
コード例 #9
0
def model(height,
          width,
          num_actions,
          is_training=False,
          reuse=None,
          name=None):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    fc_args = make_args(activation=prelu,
                        w_init=initz.he_normal(scale=1),
                        **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    with tf.variable_scope(name):
        state = register_to_collections(tf.placeholder(
            shape=[None, 4, height, width], dtype=tf.float32, name='state'),
                                        name='state',
                                        **common_args)
        state_perm = tf.transpose(state, perm=[0, 2, 3, 1])
        summary_ops = [
            tf.summary.image("states",
                             state[:, 0, :, :][..., tf.newaxis],
                             max_outputs=10,
                             collections='train')
        ]
        conv1_0 = conv2d(state_perm,
                         32,
                         filter_size=8,
                         stride=(1, 1),
                         name="conv1_0",
                         **conv_args)
        conv1_1 = conv2d(conv1_0,
                         64,
                         filter_size=8,
                         stride=(2, 2),
                         name="conv1_1",
                         **conv_args)
        pool = max_pool(conv1_1, filter_size=2, name="maxpool", **pool_args)
        conv2_0 = conv2d(pool,
                         128,
                         filter_size=4,
                         stride=2,
                         name="conv2_0",
                         **conv_args)
        conv2_1 = conv2d(conv2_0,
                         256,
                         filter_size=3,
                         stride=(2, 2),
                         name="conv2_1",
                         **conv_args)
        conv3_0 = conv2d(conv2_1,
                         256,
                         filter_size=4,
                         stride=1,
                         name="conv3_0",
                         **conv_args)
        conv3_1 = conv2d(conv3_0,
                         512,
                         filter_size=4,
                         stride=2,
                         name="conv3_1",
                         **conv_args)
        # Dueling
        value_hid = fc(conv3_1, 512, name="value_hid", **fc_args)
        adv_hid = fc(conv3_1, 512, name="adv_hid", **fc_args)

        value = fc(value_hid, 1, name="value", **logits_args)
        advantage = fc(adv_hid, num_actions, name="advantage", **logits_args)

        # Average Dueling
        Qs = value + (advantage -
                      tf.reduce_mean(advantage, axis=1, keep_dims=True))

        # action with highest Q values
        a = register_to_collections(tf.argmax(Qs, 1), name='a', **common_args)
        # Q value belonging to selected action
        Q = register_to_collections(tf.reduce_max(Qs, 1),
                                    name='Q',
                                    **common_args)
        summary_ops.append(tf.summary.histogram("Q", Q, collections='train'))
        # For training
        Q_target = register_to_collections(tf.placeholder(shape=[None],
                                                          dtype=tf.float32),
                                           name='Q_target',
                                           **common_args)
        actions = register_to_collections(tf.placeholder(shape=[None],
                                                         dtype=tf.int32),
                                          name='actions',
                                          **common_args)
        actions_onehot = tf.one_hot(actions,
                                    num_actions,
                                    on_value=1.,
                                    off_value=0.,
                                    axis=1,
                                    dtype=tf.float32)

        Q_tmp = tf.reduce_sum(tf.multiply(Qs, actions_onehot), axis=1)
        loss = register_to_collections(tf.reduce_mean(
            tf.square(Q_target - Q_tmp)),
                                       name='loss',
                                       **common_args)
        summary_ops.append(tf.summary.scalar("loss", loss,
                                             collections='train'))
        register_to_collections(summary_ops, name='summary_ops', **common_args)
        return end_points(is_training)