Esempio n. 1
0
    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in inv_arrow.param_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected

        tf.summary.image("g_tf_output", inp)
        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        inp = conv_2d(inp,
                      nb_filter=options['nsteps'],
                      filter_size=1,
                      activation="sigmoid")
        out = []
        for i, shape in enumerate(shapes):
            this_inp = inp[:, :, :, i:i + 1]
            if shape[1] == width * height:
                this_inp = tf.reshape(this_inp,
                                      (options['batch_size'], -1)) + eps
                out.append(this_inp)
            else:
                r_length = int(np.ceil(np.sqrt(shape[1])))
                rs = tf.image.resize_images(this_inp, (r_length, r_length))
                rs = tf.reshape(rs, (options['batch_size'], -1)) + eps
                out.append(rs[:, 0:shape[1]])
        return out
Esempio n. 2
0
def color_transform_layers(inputs):
    # convolution 1x1
    net = conv_2d(inputs, 20, 1)
    net = max_out(net, 10)

    net = conv_2d(net, 6, 1)
    net = max_out(net, 3)

    return net
Esempio n. 3
0
def conv(classes, input_shape):
    model = input_data(input_shape, name="input")
    model = conv_2d(model, 32, (3, 3), activation='relu')
    model = conv_2d(model, 64, (3, 3), activation='relu')
    model = max_pool_2d(model, (2, 2))
    model = dropout(model, 0.25)
    model = flatten(model)
    model = fully_connected(model, 128, activation='relu')
    model = dropout(model, 0.5)
    model = fully_connected(model, classes, activation='softmax')
    model = regression(model,
                       optimizer='adam',
                       learning_rate=0.001,
                       loss='categorical_crossentropy',
                       name='target')
    # Training
    model = tflearn.DNN(model, tensorboard_verbose=3)
    return model
Esempio n. 4
0
def conv_2d_layer(input, n_filters, stride):
    return conv_2d(input,
                   n_filters,
                   3,
                   strides=stride,
                   padding='same',
                   activation='elu',
                   bias_init='zeros',
                   scope=None,
                   name='Conv3D')
Esempio n. 5
0
def res18_forward(incoming, scope=None, name="resnet_18", reuse=False):
    with tf.variable_scope(scope, default_name=name, reuse=reuse):
        network = conv_2d(incoming, 32, 5, 2, name="conv1",)
        network = residual_block(network, 2, 32, downsample=True, batch_norm=True, name="rb1")
        network = residual_block(network, 2, 64, downsample=True, batch_norm=True, name="rb2")
        network = residual_block(network, 2, 128, downsample=True, batch_norm=True, name="rb3")
        network = residual_block(network, 2, 256, downsample=True, batch_norm=True, name="rb4")
        network = relu(batch_normalization(fully_connected(network, 256, name="fc1")))
        network = fully_connected(network, 5, name="fc2")

    return network
Esempio n. 6
0
    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in fwd.in_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected
        tf.summary.image("g_tf_output", inp)

        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        ratio = width / options['res']
        inp = conv_2d(inp,
                      nb_filter=options['res'],
                      filter_size=1,
                      strides=int(ratio),
                      activation="sigmoid")
        return [tf.reshape(inp, (options['batch_size'], -1))]
Esempio n. 7
0
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = conv_2d(image, 32, 5, 2, name="conv1")
        output = residual_block(output,
                                2,
                                32,
                                downsample=True,
                                batch_norm=True,
                                name="rb1")
        output = residual_block(output,
                                1,
                                64,
                                downsample=True,
                                batch_norm=True,
                                name="rb2")
        output = leaky_relu(
            fully_connected(
                tf.reshape(output, [config.batch_size, 4 * 4 * 64]), 1024))

        prod = tf.matmul(code_output[:, :, None], output[:, None, :])
        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([
            code_output,
            tf.matmul(prob, output[:, :, None])[:, :, 0],
            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]
        ],
                           axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 5)

    return output