Ejemplo n.º 1
0
def discriminator_forward(config,
                          labels,
                          incoming,
                          scope="discriminator",
                          name=None,
                          reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(
            batch_normalization(
                conv_2d(incoming, config.dim, 5, 2, name="conv1"), 0.2))
        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 2 * config.dim, 5, 2, name="conv2"), 0.2))
        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 4 * config.dim, 5, 2, name="Conv3"), 0.2))
        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])

        output = fully_connected(output, 56 * config.dim, name="fc1_1")
        embed = fully_connected(labels, 8 * config.dim, name="fc1_2")

        output = leaky_relu(
            batch_normalization(tf.concat([output, embed], axis=-1)), 0.2)
        output = fully_connected(output, 8 * config.dim, name="fc2")
        output = batch_normalization(output)
        output = leaky_relu(output, 0.2)
        output = tf.reshape(fully_connected(output, 1, bias=False, name="fc3"),
                            [-1])

    return output
Ejemplo n.º 2
0
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        output = relu(fully_connected(incoming, 512))
        output1 = dropout(output, 0.8)

        print(config.batch_size, image.shape)
        output = relu(
            fully_connected(
                tf.reshape(image, [config.batch_size, 32 * 32 * 3]), 512))
        output2 = dropout(output, 0.8)

        output = tf.concat([output1, output2], axis=-1)

        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.5)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.8)

        output = fully_connected(output, 10)

    return output
Ejemplo n.º 3
0
 def disc_func(args, reuse=False):
     # import pdb; pdb.set_trace()
     """Discriminator function """
     with tf.variable_scope("discriminator", reuse=reuse):
         inp = tf.concat(args, axis=2)
         inp = fully_connected(inp, 10, activation='elu')
         inp = fully_connected(inp, n_samples, activation='sigmoid')
         return [inp]
Ejemplo n.º 4
0
 def gen_func(args):
     """Generator function"""
     with tf.variable_scope("generator", reuse=False):
         inp = tf.concat(args, axis=1)
         inp = fully_connected(inp, 1, activation='elu')
         inp = batch_normalization(inp)
         inp = fully_connected(inp, 1, activation='elu')
         inp = batch_normalization(inp)
         return [inp]
Ejemplo n.º 5
0
 def disc_func(args, reuse=False):
     # import pdb; pdb.set_trace()
     """Discriminator function """
     with tf.variable_scope("discriminator", reuse=reuse):
         inp = tf.concat(args, axis=2)
         # inp = tf.Print(inp, [inp[0]], message="inp to disc", summarize=100)
         # inp = fully_connected(inp, 20, activation='elu')
         inp = fully_connected(inp, 10, activation='elu')
         inp = fully_connected(inp, n_samples, activation='sigmoid')
         return [inp]
Ejemplo n.º 6
0
 def gen_func(args):
     """Generator function"""
     with tf.variable_scope("generator", reuse=False):
         # inp = tf.concat(args, axis=1
         shapes = [info[port]['shape'] for port in inv.param_ports()]
         inp = args[0]
         inp = fully_connected(inp, 2, activation='elu')
         return [
             fully_connected(inp, shape[1], activation='elu')
             for shape in shapes
         ]
Ejemplo n.º 7
0
def res18_forward(incoming, scope=None, name="resnet_18", reuse=False):
    with tf.variable_scope(scope, default_name=name, reuse=reuse):
        network = conv_2d(incoming, 32, 5, 2, name="conv1",)
        network = residual_block(network, 2, 32, downsample=True, batch_norm=True, name="rb1")
        network = residual_block(network, 2, 64, downsample=True, batch_norm=True, name="rb2")
        network = residual_block(network, 2, 128, downsample=True, batch_norm=True, name="rb3")
        network = residual_block(network, 2, 256, downsample=True, batch_norm=True, name="rb4")
        network = relu(batch_normalization(fully_connected(network, 256, name="fc1")))
        network = fully_connected(network, 5, name="fc2")

    return network
Ejemplo n.º 8
0
 def disc_func(args):
     """Discriminator function"""
     with tf.variable_scope("discriminator", reuse=False):
         assert len(args) == 1
         inp = args[0]
         inp = fully_connected(inp, 5, activation='elu')
         # inp = batch_normalization(inp)
         inp = fully_connected(inp, 5, activation='elu')
         # inp = batch_normalization(inp)
         inp = args[0]
         inp = fully_connected(inp, n_samples, activation='sigmoid')
         return [inp]
Ejemplo n.º 9
0
    def __init__(self,
                 max_document_length,
                 num_classes=2,
                 num_characters=71,
                 num_blocks=None,
                 char_vec_size=16,
                 weight_decay=2e-4):
        self.input_text = layers.input_data((None, max_document_length))
        self.target_label = tf.placeholder(shape=(None, num_classes),
                                           dtype=tf.float32)

        embeded_text = layers.embedding(self.input_text, num_characters,
                                        char_vec_size)

        top_feature = embeded_text
        filters = 64
        if num_blocks[0] == 0:
            self.block = (2, 2, 2, 2)
        else:
            self.block = num_blocks

        for i, num_block in enumerate(self.block):
            if i > 0:
                filters *= 2
                top_feature = layers.max_pool_1d(top_feature,
                                                 3,
                                                 strides=2,
                                                 padding='same')
            for block_i in range(num_block):
                top_feature = self.conv_block(top_feature, filters)

        pooled_feature = layers.flatten(
            layers.custom_layer(top_feature, self.kmax_pool_1d))
        fc1 = layers.fully_connected(pooled_feature,
                                     2048,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=weight_decay)
        fc2 = layers.fully_connected(fc1,
                                     2048,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=weight_decay)
        self.probas = layers.fully_connected(fc2,
                                             num_classes,
                                             activation='softmax',
                                             regularizer='L2',
                                             weight_decay=weight_decay)
        self.train_op = layers.regression(self.probas,
                                          placeholder=self.target_label)
Ejemplo n.º 10
0
 def g(y, z):
     """Generator"""
     with tf.name_scope("generator"):
         with tf.variable_scope("generator"):
             # y = tf.expand_dims(y, 1)
             # inp = tf.concat([y, z], axis=1)
             inp = y
             inp = fully_connected(inp, 10, activation='elu')
             # inp = batch_normalization(inp)
             inp = fully_connected(inp, 10, activation='elu')
             # inp = batch_normalization(inp)
             inp = fully_connected(inp, x_len, activation='elu')
             # inp = batch_normalization(inp)
             return inp
Ejemplo n.º 11
0
    def __init__(self, sequence_length, num_classes, embeddings, num_filters, l2_reg_lambda=0.0, dropout=None):
        self.input_text = layers.input_data( (None, sequence_length), dtype=tf.int32)
        
        with tf.variable_scope('Embedding'):
            embeddings_var = tf.Variable(embeddings, name='W', dtype=tf.float32)
            embeddings_var = tf.concat([np.zeros((1, embeddings.shape[1]) ), embeddings_var[1:] ] , axis = 0)
            self.embeded_text = tf.gather(embeddings_var, self.input_text)
        
        net = self.embeded_text
        
        self.mask = tf.expand_dims(tf.cast(tf.not_equal(self.input_text, 0), tf.float32), axis = 2)
        if dropout is not None:
            dropout = map(float, dropout.split(',') )
        for num_filter in num_filters:
            net = layers.lstm(net, num_filter, return_seq=True, dropout=dropout)
            net = tf.transpose(tf.stack(net), (1, 0, 2) )

        features = tf.reduce_sum(net * self.mask, axis=1) / (tf.reduce_sum(self.mask, axis=1) + 1e-5)
        
        self.probas = layers.fully_connected(features, num_classes, activation='softmax', regularizer='L2', weight_decay=l2_reg_lambda)
        optimizer = tflearn.optimizers.Adam(learning_rate=0.001)
        self.train_op = layers.regression(
            self.probas, 
            optimizer=optimizer,
            batch_size=128)
Ejemplo n.º 12
0
    def transform_embedded_sequences(self, embedded_sequences):
        drop_1, drop_2 = self.dropout_rates
        net = dropout(embedded_sequences, drop_1)

        conv_blocks = []
        for sz in self.filter_sizes:
            conv = conv_1d(net,
                           nb_filter=self.num_filters,
                           filter_size=sz,
                           padding="valid",
                           activation="relu",
                           regularizer="L2")
            conv_blocks.append(conv)

        net = merge(conv_blocks, mode='concat',
                    axis=1) if len(conv_blocks) > 1 else conv_blocks[0]
        net = tf.expand_dims(net, 2)
        net = global_max_pool(net)
        net = dropout(net, drop_2)

        model_output = fully_connected(net,
                                       self.class_count,
                                       activation="softmax")

        return model_output
Ejemplo n.º 13
0
 def __init__(self, sequence_length, num_classes, embeddings, num_filters, l2_reg_lambda=0.0, dropout=None, bn=False):
     self.input_text = layers.input_data( (None, sequence_length), dtype=tf.int32)
     
     with tf.variable_scope('Embedding'):
         embeddings_var = tf.Variable(embeddings, name='W', dtype=tf.float32)
         embeddings_var = tf.concat([np.zeros((1, embeddings.shape[1]) ), embeddings_var[1:] ] , axis = 0)
         self.embeded_text = tf.gather(embeddings_var, self.input_text)
     
     net = self.embeded_text
     for num_filter in num_filters:
         if bn:
             # , weights_init=tflearn.initializations.uniform(minval=-0.001, maxval=0.001)
             net = layers.conv_1d(net, num_filter, 3, padding='valid', activation='linear', bias=False)
             net = layers.batch_normalization(net)
             net = layers.activation(net, 'relu')
         else:
             net = layers.conv_1d(net, num_filter, 3, padding='valid', activation='relu', bias=True, regularizer='L2', weight_decay=l2_reg_lambda)
             
     if dropout is not None:
         net = layers.dropout(net, float(dropout) )
    
     features = layers.flatten( layers.max_pool_1d(net, net.shape.as_list()[1], padding='valid') )
     self.probas = layers.fully_connected(features, num_classes, activation='softmax', regularizer='L2', weight_decay=l2_reg_lambda)
     #optimizer = tflearn.optimizers.Momentum(learning_rate=0.1, momentum=0.9, lr_decay=0.2, decay_step=1000, staircase=True)
     optimizer = tflearn.optimizers.Adam(learning_rate=0.001)
     self.train_op = layers.regression(
         self.probas, 
         optimizer=optimizer,
         batch_size=128)
Ejemplo n.º 14
0
 def disc_func(args):
     """Discriminator function"""
     with tf.variable_scope("discriminator", reuse=False):
         assert len(args) == 1
         inp = args[0]
         l1 = fully_connected(inp, n_samples, activation='sigmoid')
         return [l1]
Ejemplo n.º 15
0
def discriminator_forward(config,
                          incoming,
                          labels,
                          scope="discriminator",
                          name=None,
                          reuse=False):

    with tf.variable_scope(scope, name, reuse=reuse):
        output = leaky_relu(
            batch_normalization(
                conv_2d(incoming, config.dim, 5, 2, name="conv1")), 0.2)

        output = leaky_relu(
            batch_normalization(
                conv_2d(output, 2 * config.dim, 5, 2, name="conv2")), 0.2)

        output_shared = conv_2d(output,
                                2 * config.dim,
                                5,
                                2,
                                name="conv3_shared")
        output_cs = [
            conv_2d(output, 2 * config.dim, 5, 2, name="conv3_cs")
            for _ in xrange(5)
        ]

        output = tf.concat([output_cs, output_shared])

        output = tf.reshape(output, [-1, 4 * 4 * 4 * config.dim])
        output = tf.reshape(fully_connected(output, 1, bias=False), [-10])
Ejemplo n.º 16
0
 def gen_func(args, reuse=False):
     # import pdb; pdb.set_trace()
     """Generator function"""
     with tf.variable_scope("generator", reuse=reuse):
         inp = tf.concat(args, axis=1)
         inp = fully_connected(inp, nitems, activation='elu')
         inps = tf.split(inp, axis=1, num_or_size_splits=nitems)
         return inps
Ejemplo n.º 17
0
 def g_pi(y, z):
     """Parametric Inverse Generator"""
     with tf.name_scope("generator"):
         with tf.variable_scope("generator"):
             theta_len = 1
             # the neural network will take as input z, and output
             # the two parameters for
             inp = z
             inp = fully_connected(inp, 20, activation='elu')
             inp = batch_normalization(inp)
             inp = fully_connected(inp, 20, activation='elu')
             inp = batch_normalization(inp)
             theta = fully_connected(inp, theta_len, activation='elu')
             theta = batch_normalization(theta)
             x_1 = tf.expand_dims(y, 1) - theta
             x_2 = theta
             x = tf.concat([x_1, x_2], 1)
             return x
Ejemplo n.º 18
0
def score_net(inputs):
    state = inputs[0]
    curr_layer = state
    layers = []
    curr_layer = conv_2d_layer(curr_layer, 8, 1)
    curr_layer = batch_normalization(curr_layer)
    curr_layer = conv_2d_layer(curr_layer, 2, 4)
    curr_layer = batch_normalization(curr_layer)
    curr_layer = fully_connected(curr_layer, 1, activation='elu')
    curr_layer = batch_normalization(curr_layer)
    return [curr_layer]
Ejemplo n.º 19
0
def discriminator_net(inputs):
    field = inputs[0]
    curr_layer = field

    layers = []
    curr_layer = tf.reshape(curr_layer, (-1, 16, 16, 1))
    curr_layer = conv_2d_layer(curr_layer, 16, 1)
    curr_layer = batch_normalization(curr_layer)
    curr_layer = fully_connected(curr_layer, 1)
    curr_layer = batch_normalization(curr_layer)
    curr_layer = tflearn.activations.sigmoid(curr_layer)
    return [curr_layer]
Ejemplo n.º 20
0
 def disc(x, y, reuse, use_y=False):
     """Discriminator"""
     with tf.name_scope("discriminator"):
         with tf.variable_scope("discriminator", reuse=reuse):
             if use_y:
                 inp = tf.concat([x, tf.expand_dims(y, 1)], 1)
             else:
                 inp = x
             # import pdb; pdb.set_trace()
             # inp = fully_connected(inp, 3, activation='elu')
             out = fully_connected(inp, 1, activation='sigmoid')
             return out
Ejemplo n.º 21
0
 def gen_func(args, reuse=False):
     # import pdb; pdb.set_trace()
     """Generator function"""
     with tf.variable_scope("generator", reuse=reuse):
         inp = tf.concat(args, axis=1)
         # inp = fully_connected(inp, 10, activation='elu')
         inp = fully_connected(inp, inv.num_param_ports(), activation='elu')
         inps = tf.split(inp,
                         axis=1,
                         num_or_size_splits=inv.num_param_ports())
         # inps = [tf.Print(inp, [inp[0]], message="Generated!", summarize=100) for inp in inps]
         return inps
Ejemplo n.º 22
0
def nn_model(input_size):
    # same implementation with keras
    # model = Sequential()
    # model.add(Dense(128, input_shape=size, activation='relu'))

    network = input_data(shape=[None, input_size, 1], name='input')

    network = fully_connected(network, 128, activation='relu')
    network = dropout(network,
                      0.8)  # meaning 0.8 will be kept, opposite in keras

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.8)

    network = fully_connected(network, 2, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         learning_rate=LR,
                         loss='categorical_crossentropy',
                         name='targets')

    model = tflearn.DNN(network, tensorboard_dir='log')
    return model
Ejemplo n.º 23
0
def code_classifier_forward(config, incoming=None, image=None,
                            scope="code_classifier", name=None, reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = leaky_relu(fully_connected(tf.reshape(image, [config.batch_size, 28 * 28]), 512))
        prod = tf.matmul(code_output[:, :, None], output[:, None, :])

        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([code_output,
                            tf.matmul(prob, output[:, :, None])[:, :, 0],
                            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]], axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 10)

    return output
Ejemplo n.º 24
0
def generator_forward(config,
                      labels,
                      noise=None,
                      scope="generator",
                      name=None,
                      reuse=False,
                      num_samples=-1):
    with tf.variable_scope(scope, name, reuse=reuse):
        if noise is None:
            noise = tf.random_normal(
                [config.batch_size if num_samples == -1 else num_samples, 128],
                name="noise")
        embed = fully_connected(labels, 8 * config.dim)
        noise = fully_connected(noise, 56 * config.dim)
        cat = relu(batch_normalization(tf.concat([embed, noise], axis=-1)))
        output = fully_connected(cat, 4 * 4 * 4 * config.dim)
        output = batch_normalization(output)
        output = tf.nn.relu(output)
        output = tf.reshape(output, [-1, 4, 4, 4 * config.dim])

        output = conv_2d_transpose(output,
                                   2 * config.dim,
                                   5, [8, 8],
                                   strides=2)
        output = output[:, :7, :7, :]
        output = batch_normalization(output)
        output = relu(output)

        output = conv_2d_transpose(output, config.dim, 5, [14, 14], strides=2)
        output = batch_normalization(output)
        output = tf.nn.relu(output)

        output = conv_2d_transpose(output, 1, 5, [28, 28], strides=2)

        output = tf.tanh(output)

    return output
Ejemplo n.º 25
0
def code_classifier_forward(config,
                            incoming=None,
                            image=None,
                            scope="code_classifier",
                            name=None,
                            reuse=False):
    with tf.variable_scope(scope, name, reuse=reuse):
        code_output = leaky_relu(fully_connected(incoming, 512))

        output = conv_2d(image, 32, 5, 2, name="conv1")
        output = residual_block(output,
                                2,
                                32,
                                downsample=True,
                                batch_norm=True,
                                name="rb1")
        output = residual_block(output,
                                1,
                                64,
                                downsample=True,
                                batch_norm=True,
                                name="rb2")
        output = leaky_relu(
            fully_connected(
                tf.reshape(output, [config.batch_size, 4 * 4 * 64]), 1024))

        prod = tf.matmul(code_output[:, :, None], output[:, None, :])
        prob = tf.nn.softmax(prod)
        prob2 = tf.nn.softmax(tf.transpose(prod, perm=[0, 2, 1]))

        output = tf.concat([
            code_output,
            tf.matmul(prob, output[:, :, None])[:, :, 0],
            tf.matmul(prob2, code_output[:, :, None])[:, :, 0]
        ],
                           axis=-1)
        output = relu(fully_connected(output, 1024))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 512))
        output = dropout(output, 0.6)

        output = relu(fully_connected(output, 256))
        output = dropout(output, 0.8)

        output = fully_connected(output, 5)

    return output
Ejemplo n.º 26
0
import tflearn.datasets.mnist as mnist

X, Y, test_x, test_y = mnist.load_data(one_hot=True)

X = X.reshape([-1, 28, 28, 1])
test_x = test_x.reshape([-1, 28, 28, 1])

convnet = input_data(shape=[None, 28, 28, 1], name='input')

convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)

convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)

convnet = fully_connected(convnet, 10, activation='softmax')

convnet = regression(convnet,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='targets')

model = tflearn.DNN(convnet)

model.fit({'input': X}, {'targets': Y},
          n_epoch=3,
          validation_set=({
Ejemplo n.º 27
0
    def __init__(self,
                 max_document_length,
                 num_classes=2,
                 num_characters=71,
                 char_vec_size=16,
                 weight_decay=2e-4,
                 optimizer='sgd',
                 dropout=None,
                 num_blocks=None):
        self.input_text = layers.input_data((None, max_document_length))
        self.target_label = tf.placeholder(shape=(None, num_classes),
                                           dtype=tf.float32)

        embeded_text = layers.embedding(self.input_text, num_characters,
                                        char_vec_size)
        mask = tf.cast(tf.not_equal(self.input_text, 0), tf.float32)
        embeded_text = embeded_text * tf.expand_dims(mask, 2)
        self.embeded_text = embeded_text

        top_feature = embeded_text
        filters = 64
        if num_blocks[0] == 0:
            self.block = (1, 1, 1, 1)
        else:
            self.block = num_blocks
        for i, num_block in enumerate(self.block):
            if i > 0:
                filters *= 2
                top_feature = layers.max_pool_1d(top_feature,
                                                 3,
                                                 strides=2,
                                                 padding='same')
            for block_i in range(num_block):
                top_feature = self.conv_block(top_feature, filters)

        pooled_feature = layers.flatten(
            layers.custom_layer(top_feature, self.kmax_pool_1d))
        if dropout is not None:
            pooled_feature = layers.dropout(pooled_feature, dropout)
        fc1 = layers.fully_connected(pooled_feature,
                                     2048,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=weight_decay)
        if dropout is not None:
            fc1 = layers.dropout(fc1, dropout)
        fc2 = layers.fully_connected(fc1,
                                     2048,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=weight_decay)
        self.probas = layers.fully_connected(fc2,
                                             num_classes,
                                             activation='softmax',
                                             regularizer='L2',
                                             weight_decay=weight_decay)

        def build_sgd(learning_rate):
            step_tensor = tf.Variable(0.,
                                      name="Training_step",
                                      trainable=False)
            steps = [-1.0, 16000.0, 24000.0]
            lrs = [1e-1, 1e-2, 1e-3]
            lr = tf.reduce_min(
                tf.cast(tf.less(step_tensor, steps), tf.float32) + lrs)
            tflearn.helpers.summarizer.summarize(
                lr, 'scalar', 'lr', 'Optimizer_training_summaries')
            return tf.train.MomentumOptimizer(learning_rate=lr,
                                              momentum=0.9), step_tensor

        if optimizer == 'sgd':
            optimizer = build_sgd
        self.train_op = layers.regression(self.probas,
                                          optimizer=optimizer,
                                          learning_rate=0.001,
                                          placeholder=self.target_label)