Esempio n. 1
0
def test_conv_module():
    img_height = 4
    img_width = 4
    x = tf.constant(np.array([[1, 2, 3, 4],
                              [4, 3, 2, 1],
                              [5, 6, 7, 8],
                              [1, 2, 3, 4]]), dtype='float32')
    # 1 * img_height * img_width * 1
    x = tf.expand_dims(x, 0)

    filter_size = 2
    depth = 2
    stride = 2
    conv = modeling.conv(x, filter_size, depth, stride)
    conv_shape = conv.get_shape().as_list()
    assert conv_shape == [1,
                          math.ceil(float(img_height) / stride),
                          math.ceil(float(img_width) / stride),
                          depth]

    init()
    conv_val = sess.run(conv)
    assert conv_val.shape == (1,
                              math.ceil(float(img_height) / stride),
                              math.ceil(float(img_width) / stride),
                              depth)
Esempio n. 2
0
def logits(x):
    hidden_sizes = [500, 350, 250, 230]

    with tf.name_scope('model'):
        x = tf.reshape(x, [data.BATCH_SIZE, data.IMG_HEIGHT, data.IMG_WIDTH])
        conv1 = modeling.conv(x, 5, 32, 2, name='conv')
        conv_shape = conv1.get_shape().as_list()

        # Flatten for fully-connected layers
        input_t = tf.reshape(
                conv1,
                [conv_shape[0],
                 conv_shape[1] * conv_shape[2] * conv_shape[3]])
        for i, hsize in enumerate(hidden_sizes):
            layer_no = i + 1
            input_t = modeling.hidden_layer('hidden%d' % layer_no,
                                            input_t, hsize)
        last_hidden = input_t

        logits = modeling.linear_softmax(last_hidden, NUM_CLASSES)

    logits = tf.identity(logits, name='logits')

    return logits