Exemplo n.º 1
0
 def inference_model(self, inp, training, reuse=False, resize=True): # construct the graph of the inference net
     a = time.time()
     
     with tf.variable_scope("inference", reuse=reuse): # define variable scope
         if resize:
             inp = layers.max_pool_layer(inp, pool_size=(2,2), strides=(2,2), padding=(12,12))
             inp = layers.max_pool_layer(inp, pool_size=(2,2), strides=(2,2))
             inp = layers.max_pool_layer(inp, pool_size=(2,2), strides=(2,2))
             inp = layers.max_pool_layer(inp, pool_size=(2,2), strides=(2,2))
             inp = layers.max_pool_layer(inp, pool_size=(2,2), strides=(2,2))
         
         flat = tf.reshape(inp, [self.batch_size, -1])
         dense1 = layers.dense_layer(flat, units=1024, use_bias=True)
         relu1 = tf.nn.softplus(dense1) # <-------------------------- maybe try softplus
         dense2 = layers.dense_layer(relu1, units=512, use_bias=True)
         relu2 = tf.nn.softplus(dense2)
         dense3 = layers.dense_layer(relu2, units=512, use_bias=True)
         relu3 = tf.nn.softplus(dense3)
         dense4 = layers.dense_layer(relu3, units=2*self.latent_dim, use_bias=True)
         
         mean, logvar = tf.split(dense4, num_or_size_splits=2, axis=1)
         
     print("Built Inference model in {} s".format(time.time()-a))
     list_ops = [flat, dense1, relu1, dense2, relu2, dense3, relu3, dense4] # list of operations, can be used to run the graph up to a certain op
                                                                                                                  # i,e get the subgraph
     return inp, mean, logvar, list_ops
 def scorer_head_model(self, features, training, reuse=False):
     with tf.variable_scope("scorer_head", reuse=reuse): # define variable scope
         dense1 = layers.dense_layer(features, units=1024, use_bias=True)
         relu1 = layers.relu_layer(dense1)
         dense2 = layers.dense_layer(relu1, units=512, use_bias=True)
         relu2 = layers.relu_layer(dense2)
         dense3 = layers.dense_layer(relu2, units=256, use_bias=True)
         relu3 = layers.relu_layer(dense3)
         dense4 = layers.dense_layer(relu3, units=1, use_bias=True)
         
         
     return dense4
Exemplo n.º 3
0
    def discriminator_model(self, inp, feats, training, reuse=False, resize=False, minibatch=False):  # construct the graph of the discriminator
        a = time.time()
        with tf.variable_scope("discriminator",
                               reuse=reuse):  # define variable scope to easily retrieve vars of the discriminator

            if resize:
                inp = layers.max_pool_layer(inp, pool_size=(2, 2), strides=(2, 2), padding=(12, 12))
                inp = layers.max_pool_layer(inp, pool_size=(2, 2), strides=(2, 2))
                inp = layers.max_pool_layer(inp, pool_size=(2, 2), strides=(2, 2))
                inp = layers.max_pool_layer(inp, pool_size=(2, 2), strides=(2, 2))

            conv1 = layers.conv_block_mcgan(inp, training, momentum=0.8, out_channels=128, filter_size=(4, 4), strides=(2, 2),
                                            padding="same", use_bias=True, batch_norm=False,
                                            alpha=0.3)  # shape=(batch_size, 128, 32, 32)
            conv2 = layers.conv_block_mcgan(conv1, training, momentum=0.8, out_channels=256, filter_size=(4, 4), strides=(2, 2),
                                            padding="same", use_bias=True,
                                            alpha=0.3)  # shape=(batch_size, 256, 16, 16)
            conv3 = layers.conv_block_mcgan(conv2, training, momentum=0.8, out_channels=512, filter_size=(4, 4), strides=(2, 2),
                                            padding="same", use_bias=True, alpha=0.3)  # shape=(batch_size, 512, 8, 8)
            conv4 = layers.conv_block_mcgan(conv3, training, momentum=0.8, out_channels=1024, filter_size=(4, 4), strides=(2, 2),
                                            padding="same", use_bias=True, alpha=0.3)  # shape=(batch_size, 1024, 4, 4)
            flat = tf.reshape(conv4, [-1, 1024 * 4 * 4])

            if(minibatch):
                minibatched = layers.minibatch(flat, num_kernels=5, kernel_dim=3)
                dense1 = layers.dense_layer(minibatched, 128, use_bias=True)
            else:
                dense1 = layers.dense_layer(flat, 128, use_bias=True)
            
            drop1 = layers.dropout_layer(dense1, training, dropout_rate=0.3)
            LRU1 = layers.leaky_relu_layer(drop1, alpha=0.3)

            dense2 = layers.dense_layer(feats, units=3, use_bias=True)
            relu2 = layers.relu_layer(dense2)
            bn2 = layers.batch_norm_layer_mcgan(relu2, training, 0.8)
            dense3 = layers.dense_layer(bn2, units=1)

            merged = tf.concat([LRU1, dense3], axis=-1)

            logits = layers.dense_layer(merged, units=2, use_bias=True)
            out = logits
        print("Built Discriminator model in {} s".format(time.time() - a))

        list_ops = {"inp": inp, "conv1": conv1, "conv2": conv2, "conv3": conv3, "conv4": conv4, "flat": flat,
                    "dense1":dense1, "drop1":drop1, "LRU1":LRU1, "dense2":dense2, "relu2":relu2, "bn2":bn2,
                    "dense3":dense3, "logits": logits, "out": out}

        return out, list_ops
Exemplo n.º 4
0
    def generator_model(self, noise, feats, training, reuse=False):  # construct the graph of the generator
        a = time.time()

        with tf.variable_scope("generator",
                               reuse=reuse):  # define variable scope to easily retrieve vars of the generator

            gen_inp = tf.concat([noise, feats], -1)
            with tf.name_scope("preprocess_inp"):
                dense1 = layers.dense_layer(gen_inp, units=4 * 4 * 1024, use_bias=False)
                bn1 = layers.batch_norm_layer_mcgan(dense1, training, 0.8)
                relu1 = layers.relu_layer(bn1)
                reshaped = tf.reshape(relu1, shape=[-1, 1024, 4, 4])  # shape=(batch_size, 1024, 4, 4)

            deconv1 = layers.deconv_block_mcgan(reshaped, training, momentum=0.8, out_channels=512, filter_size=(4, 4),
                                                strides=(2, 2), padding="same",
                                                use_bias=True)  # shape=(batch_size, 512, 8, 8)
            deconv2 = layers.deconv_block_mcgan(deconv1, training, momentum=0.8, out_channels=256, filter_size=(4, 4), strides=(2, 2),
                                                padding="same", use_bias=True)  # shape=(batch_size, 256, 16, 16)
            deconv3 = layers.deconv_block_mcgan(deconv2, training, momentum=0.8, out_channels=128, filter_size=(4, 4), strides=(2, 2),
                                                padding="same", use_bias=True)  # shape=(batch_size, 128, 32, 32)
            deconv4 = layers.deconv_layer(deconv3, out_channels=1, filter_size=(4, 4), strides=(2, 2), padding="same",
                                          use_bias=True)  # shape=(batch_size, 1, 64, 64)

            gen_out = layers.tanh_layer(deconv4)
        print("Built Generator model in {} s".format(time.time() - a))
        list_ops = {"dense1": dense1, "bn1":bn1, "relu1": relu1, "reshaped": reshaped, "deconv1": deconv1, "deconv2": deconv2,
                    "deconv3": deconv3, "deconv4": deconv4,
                    "gen_out": gen_out}  # list of operations, can be used to run the graph up to a certain op
        # i,e get the subgraph
        return gen_out, list_ops
Exemplo n.º 5
0
def generator(source,
              target,
              sequence_length,
              vocab_size,
              decoder_fn=None,
              **opts):
    """
    Args:
        source: TensorFlow queue or placeholder tensor for word ids for source 
        target: TensorFlow queue or placeholder tensor for word ids for target
        sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
        vocab_size: max vocab size determined from data
        decoder_fn: if using custom decoder_fn else use the default dynamic_rnn
    """
    tf.logging.info(" Setting up generator")

    embedding_layer = lay.embedding_layer(vocab_size,
                                          opts["embedding_dim"],
                                          name="embedding_matrix")

    # TODO: add batch norm?
    rnn_outputs = (source >> embedding_layer >> lay.word_dropout_layer(
        keep_prob=opts["word_dropout_keep_prob"]) >> lay.recurrent_layer(
            hidden_dims=opts["rnn_hidden_dim"],
            keep_prob=opts["recurrent_dropout_keep_prob"],
            sequence_length=sequence_length,
            decoder_fn=decoder_fn,
            name="rnn_cell"))

    output_projection_layer = lay.dense_layer(hidden_dims=vocab_size,
                                              name="output_projections")

    flat_logits = (rnn_outputs >> lay.reshape_layer(
        shape=(-1, opts["rnn_hidden_dim"])) >> output_projection_layer)

    probs = flat_logits >> lay.softmax_layer()

    embedding_matrix = embedding_layer.get_variables_in_scope()
    output_projections = output_projection_layer.get_variables_in_scope()

    if decoder_fn is not None:
        return GeneratorTuple(rnn_outputs=rnn_outputs,
                              flat_logits=flat_logits,
                              probs=probs,
                              loss=None,
                              embedding_matrix=embedding_matrix[0],
                              output_projections=output_projections)

    loss = (flat_logits >> lay.cross_entropy_layer(target=target) >>
            lay.reshape_layer(shape=tf.shape(target)) >>
            lay.mean_loss_by_example_layer(sequence_length=sequence_length))

    # TODO: add dropout penalty
    return GeneratorTuple(rnn_outputs=rnn_outputs,
                          flat_logits=flat_logits,
                          probs=probs,
                          loss=loss,
                          embedding_matrix=embedding_matrix[0],
                          output_projections=output_projections)
Exemplo n.º 6
0
def discriminator(input_vectors, sequence_length, is_real=True, **opts):
    """
    Args:
        input_vectors: output of the RNN either from real or generated data
        sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
        is_real: if True, RNN outputs when feeding in actual data, if False feeds in generated data
    """
    tf.logging.info(" Setting up discriminator")

    rnn_final_state = (
        input_vectors >>
        lay.dense_layer(hidden_dims=opts["embedding_dim"]) >>
        lay.recurrent_layer(sequence_length=sequence_length, hidden_dims=opts["rnn_hidden_dim"],
                            return_final_state=True)
    )

    prediction_logits = (
        rnn_final_state >>
        lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
        lay.relu_layer() >>
        lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
        lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
        lay.relu_layer() >>
        lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
        lay.dense_layer(hidden_dims=1)
    )

    if is_real:
        target = tf.ones_like(prediction_logits)
    else:
        target = tf.zeros_like(prediction_logits)

    # TODO: add accuracy
    loss = (
        prediction_logits >>
        lay.sigmoid_cross_entropy_layer(target=target)
    )

    # TODO: return logits in case for WGAN and l2 GANs
    return DiscriminatorTuple(rnn_final_state=rnn_final_state, prediction_logits=prediction_logits, loss=loss)
Exemplo n.º 7
0
def discriminator(input_vectors, sequence_length, is_real=True, **opts):
    """
    Args:
        input_vectors: output of the RNN either from real or generated data
        sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
        is_real: if True, RNN outputs when feeding in actual data, if False feeds in generated data
    """
    tf.logging.info(" Setting up discriminator")

    rnn_final_state = (
        input_vectors >>
        lay.dense_layer(hidden_dims=opts["embedding_dim"]) >>
        lay.recurrent_layer(sequence_length=sequence_length, hidden_dims=opts["rnn_hidden_dim"],
                            return_final_state=True)
    )

    prediction_logits = (
        rnn_final_state >>
        lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
        lay.relu_layer() >>
        lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
        lay.dense_layer(hidden_dims=opts["output_hidden_dim"]) >>
        lay.relu_layer() >>
        lay.dropout_layer(opts["output_dropout_keep_prob"]) >>
        lay.dense_layer(hidden_dims=1)
    )

    if is_real:
        target = tf.ones_like(prediction_logits)
    else:
        target = tf.zeros_like(prediction_logits)

    # TODO: add accuracy
    loss = (
        prediction_logits >>
        lay.sigmoid_cross_entropy_layer(target=target)
    )

    # TODO: return logits in case for WGAN and l2 GANs
    return DiscriminatorTuple(rnn_final_state=rnn_final_state, prediction_logits=prediction_logits, loss=loss)
Exemplo n.º 8
0
    def generative_model(self, noise, training, reuse=False): # construct the graph of the generative net
        a = time.time()
        
        with tf.variable_scope("generative", reuse=reuse): # define variable scope to easily retrieve vars of the discriminator
        
            gen_inp = noise
            dense1 = layers.dense_layer(noise, units=512, use_bias=True)
            relu1 = tf.nn.softplus(dense1) # <-------------------------- maybe try softplus
            dense2 = layers.dense_layer(relu1, units=512, use_bias=True)
            relu2 = tf.nn.softplus(dense2)
            dense3 = layers.dense_layer(relu2, units=1024, use_bias=True)
            relu3 = tf.nn.softplus(dense3)
            dense4 = layers.dense_layer(relu3, units=1024, use_bias=True)
            
            reshaped = tf.reshape(dense4, [-1, 1, 32, 32])

            logits = reshaped
            out = tf.sigmoid(logits)
        print("Built Generative model in {} s".format(time.time()-a))
        list_ops = [dense1, relu1, dense2, relu2, dense3, relu3, dense4, reshaped]
        
        return logits, out, list_ops
Exemplo n.º 9
0
def generator(source, target, sequence_length, vocab_size, decoder_fn=None, **opts):
    """
    Args:
        source: TensorFlow queue or placeholder tensor for word ids for source 
        target: TensorFlow queue or placeholder tensor for word ids for target
        sequence_length: TensorFlow queue or placeholder tensor for number of word ids for each sentence
        vocab_size: max vocab size determined from data
        decoder_fn: if using custom decoder_fn else use the default dynamic_rnn
    """
    tf.logging.info(" Setting up generator")

    embedding_layer = lay.embedding_layer(vocab_size, opts["embedding_dim"], name="embedding_matrix")

    # TODO: add batch norm?
    rnn_outputs = (
        source >>
        embedding_layer >>
        lay.word_dropout_layer(keep_prob=opts["word_dropout_keep_prob"]) >>
        lay.recurrent_layer(hidden_dims=opts["rnn_hidden_dim"], keep_prob=opts["recurrent_dropout_keep_prob"],
                            sequence_length=sequence_length, decoder_fn=decoder_fn, name="rnn_cell")
    )

    output_projection_layer = lay.dense_layer(hidden_dims=vocab_size, name="output_projections")

    flat_logits = (
        rnn_outputs >>
        lay.reshape_layer(shape=(-1, opts["rnn_hidden_dim"])) >>
        output_projection_layer
    )

    probs = flat_logits >> lay.softmax_layer()

    embedding_matrix = embedding_layer.get_variables_in_scope()
    output_projections = output_projection_layer.get_variables_in_scope()

    if decoder_fn is not None:
        return GeneratorTuple(rnn_outputs=rnn_outputs, flat_logits=flat_logits, probs=probs, loss=None,
                              embedding_matrix=embedding_matrix[0], output_projections=output_projections)

    loss = (
        flat_logits >>
        lay.cross_entropy_layer(target=target) >>
        lay.reshape_layer(shape=tf.shape(target)) >>
        lay.mean_loss_by_example_layer(sequence_length=sequence_length)
    )

    # TODO: add dropout penalty
    return GeneratorTuple(rnn_outputs=rnn_outputs, flat_logits=flat_logits, probs=probs, loss=loss,
                          embedding_matrix=embedding_matrix[0], output_projections=output_projections)
Exemplo n.º 10
0
    def __call__(
        self,
        inp,
        training,
        pad=True,
        zero_centered=False
    ):  # pad by 12 pixels in each side to get 1024x1024 image if the image size is 1000x1000
        a = time.time()
        pad_value = 0
        if zero_centered:
            pad_value = -1

        with tf.variable_scope("Scorer"):  # define variable scope

            #            histograms = tf.map_fn(lambda a: tf.cast(tf.histogram_fixed_width((a+1)*128.0 if zero_centered else (a*255.0), value_range=[0.0, 255.0], nbins=10), tf.float32), inp)

            if pad:
                inp = layers.padding_layer(inp,
                                           padding=(12, 12),
                                           pad_values=pad_value)  # 1024x1024

            max_pool1 = layers.max_pool_layer(inp,
                                              pool_size=(2, 2),
                                              strides=(2, 2))  # 512x512
            max_pool2 = layers.max_pool_layer(max_pool1,
                                              pool_size=(2, 2),
                                              strides=(2, 2))  # 256x256
            max_pool3 = layers.max_pool_layer(max_pool2,
                                              pool_size=(2, 2),
                                              strides=(2, 2))  # 128x128
            max_pool4 = layers.max_pool_layer(max_pool3,
                                              pool_size=(2, 2),
                                              strides=(2, 2))  # 64x64

            resized = layers.resize_layer(
                inp,
                new_size=[64, 64],
                resize_method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

            concat1 = tf.concat([max_pool4, resized], axis=1)

            #            conv1 = layers.conv_block_scorer(inp, training, out_channels=8, filter_size=(4, 4), strides=(2, 2), padding=(1, 1), pad_values=pad_value, use_bias=True, alpha=0.2) # 512x512
            #            conv2 = layers.conv_block_scorer(conv1, training, out_channels=16, filter_size=(4, 4), strides=(2, 2), padding=(1, 1), pad_values=pad_value, use_bias=True, alpha=0.2) # 256x256
            #            conv3 = layers.conv_block_scorer(conv2, training, out_channels=32, filter_size=(4, 4), strides=(2, 2), padding=(1, 1), pad_values=pad_value, use_bias=True, alpha=0.2) # 128x128
            #            conv4 = layers.conv_block_scorer(conv3, training, out_channels=64, filter_size=(4, 4), strides=(2, 2), padding=(1, 1), pad_values=pad_value, use_bias=True, alpha=0.2) # 64x64
            #
            #            concat2 = tf.concat([concat1, conv4], axis=1)

            conv5 = layers.conv_block_scorer(concat1,
                                             training,
                                             out_channels=128,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=pad_value,
                                             use_bias=True,
                                             alpha=0.2)  # 32x32
            conv6 = layers.conv_block_scorer(conv5,
                                             training,
                                             out_channels=256,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=pad_value,
                                             use_bias=True,
                                             alpha=0.2)  # 16x16
            conv7 = layers.conv_block_scorer(conv6,
                                             training,
                                             out_channels=512,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=pad_value,
                                             use_bias=True,
                                             alpha=0.2)  # 8x8
            conv8 = layers.conv_block_scorer(conv7,
                                             training,
                                             out_channels=1024,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=pad_value,
                                             use_bias=True,
                                             alpha=0.2)  # 4x4

            flat = tf.reshape(conv8, [-1, 1024 * 4 * 4])
            #            concat3 = tf.concat([flat, histograms], axis=-1)

            dense1 = layers.dense_block_scorer(flat,
                                               training,
                                               units=1024,
                                               use_bias=True,
                                               dropout_rate=0.3)
            #            dense2 = layers.dense_block_scorer(dense1, training, units=256, use_bias=True, dropout_rate=0.3)
            #            dense3 = layers.dense_block_scorer(dense2, training, units=128, use_bias=True, dropout_rate=0.3)
            dense4 = layers.dense_layer(dense1, units=1, use_bias=True)

            #            print(dense4.shape)
            #            sys.exit(0)
            output = dense4

        print("Scorer Model built in {} s".format(time.time() - a))
        return output
Exemplo n.º 11
0
    def discriminator_model(
            self,
            inp,
            training,
            reuse=False,
            resize=False,
            minibatch=False):  # construct the graph of the discriminator
        a = time.time()
        with tf.variable_scope(
                "discriminator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the discriminator

            if resize:
                inp = layers.max_pool_layer(inp,
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            padding=(12, 12))
                inp = layers.max_pool_layer(inp,
                                            pool_size=(2, 2),
                                            strides=(2, 2))
                inp = layers.max_pool_layer(inp,
                                            pool_size=(2, 2),
                                            strides=(2, 2))
                inp = layers.max_pool_layer(inp,
                                            pool_size=(2, 2),
                                            strides=(2, 2))

            conv1 = layers.conv_block_dcgan(
                inp,
                training,
                out_channels=128,
                filter_size=(4, 4),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 128, 32, 32)
            conv2 = layers.conv_block_dcgan(
                conv1,
                training,
                out_channels=256,
                filter_size=(4, 4),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 256, 16, 16)
            conv3 = layers.conv_block_dcgan(
                conv2,
                training,
                out_channels=512,
                filter_size=(4, 4),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 512, 8, 8)
            conv4 = layers.conv_block_dcgan(
                conv3,
                training,
                out_channels=1024,
                filter_size=(4, 4),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 1024, 4, 4)
            flat = tf.reshape(conv4, [-1, 1024 * 4 * 4])
            if (minibatch):
                minibatched = layers.minibatch(flat,
                                               num_kernels=5,
                                               kernel_dim=3)
                logits = layers.dense_layer(minibatched,
                                            units=2,
                                            use_bias=True)
            else:
                logits = layers.dense_layer(flat, units=2, use_bias=True)

            out = logits
        print("Built Discriminator model in {} s".format(time.time() - a))
        list_ops = {
            "inp": inp,
            "conv1": conv1,
            "conv2": conv2,
            "conv3": conv3,
            "conv4": conv4,
            "flat": flat,
            "logits": logits,
            "out": out
        }

        return out, list_ops
Exemplo n.º 12
0
    def generator_model(self,
                        noise,
                        y,
                        training,
                        reuse=False):  # construct the graph of the generator
        a = time.time()

        with tf.variable_scope(
                "generator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the generator

            gen_inp = tf.concat([noise, y], axis=-1)

            with tf.name_scope("processing_inp"):
                dense1 = layers.dense_layer(gen_inp, units=5 * 5 * 256)
                batch_norm1 = layers.batch_norm_layer(dense1,
                                                      training,
                                                      momentum=0.8)
                leaky_relu1 = layers.leaky_relu_layer(batch_norm1)
                reshaped = tf.reshape(leaky_relu1, [-1, 256, 5, 5])

            deconv1 = layers.deconv_block_cgan(
                reshaped,
                training,
                momentum=0.8,
                out_channels=128,
                filter_size=(3, 3),
                strides=(1, 1),
                padding='same',
                use_bias=False)  # shape=(batch_size, 128, 5, 5)

            deconv2 = layers.deconv_block_cgan(
                deconv1,
                training,
                momentum=0.8,
                out_channels=64,
                filter_size=(5, 5),
                strides=(5, 5),
                padding='same',
                use_bias=False)  # shape=(batch_size, 64, 25, 25)

            deconv3 = layers.deconv_block_cgan(
                deconv2,
                training,
                momentum=0.8,
                out_channels=32,
                filter_size=(3, 3),
                strides=(1, 1),
                padding='same',
                use_bias=False)  # shape=(batch_size, 32, 25, 25)

            deconv4 = layers.deconv_block_cgan(
                deconv3,
                training,
                momentum=0.8,
                out_channels=16,
                filter_size=(5, 5),
                strides=(5, 5),
                padding='same',
                use_bias=False)  # shape=(batch_size, 16, 125, 125)

            deconv5 = layers.deconv_block_cgan(
                deconv4,
                training,
                momentum=0.8,
                out_channels=8,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=False)  # shape=(batch_size, 8, 250, 250)

            deconv6 = layers.deconv_block_cgan(
                deconv5,
                training,
                momentum=0.8,
                out_channels=4,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=False)  # shape=(batch_size, 4, 500, 500)

            gen_out = layers.deconv_layer(
                deconv6,
                out_channels=1,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=False,
                activation="tanh")  # shape=(batch_size, 1, 1000, 1000)

        print("Built Generator model in {} s".format(time.time() - a))
        list_ops = [
            dense1, batch_norm1, leaky_relu1, deconv1, deconv2, deconv3,
            deconv4, deconv5, deconv6, gen_out
        ]  # list of operations, can be used to run the graph up to a certain op
        # i,e get the subgraph
        return gen_out, list_ops
Exemplo n.º 13
0
    def discriminator_model(
            self,
            inp,
            y,
            training,
            reuse=False):  # construct the graph of the discriminator
        a = time.time()
        with tf.variable_scope(
                "discriminator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the discriminator

            y_image = tf.tile(tf.reshape(y, [-1, 1, 1, 1]),
                              [1, 1, tf.shape(inp)[2],
                               tf.shape(inp)[3]])
            discr_inp = tf.concat([inp, y_image],
                                  axis=1)  # concat along channels dimension

            conv1 = layers.conv_block_cgan(
                discr_inp,
                training,
                dropout_rate=0.3,
                out_channels=4,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=True)  # shape=(batch_size, 4, 500, 500)

            conv2 = layers.conv_block_cgan(
                conv1,
                training,
                dropout_rate=0.3,
                out_channels=8,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=True)  # shape=(batch_size, 4, 500, 500)

            conv3 = layers.conv_block_cgan(
                conv2,
                training,
                dropout_rate=0.3,
                out_channels=16,
                filter_size=(3, 3),
                strides=(2, 2),
                padding='same',
                use_bias=True)  # shape=(batch_size, 16, 125, 125)

            conv4 = layers.conv_block_cgan(
                conv3,
                training,
                dropout_rate=0.3,
                out_channels=32,
                filter_size=(5, 5),
                strides=(5, 5),
                padding='same',
                use_bias=True)  # shape=(batch_size, 32, 25, 25)

            conv5 = layers.conv_block_cgan(
                conv4,
                training,
                dropout_rate=0.3,
                out_channels=64,
                filter_size=(3, 3),
                strides=(1, 1),
                padding='same',
                use_bias=True)  # shape=(batch_size, 64, 25, 25)

            conv6 = layers.conv_block_cgan(
                conv5,
                training,
                dropout_rate=0.3,
                out_channels=128,
                filter_size=(5, 5),
                strides=(5, 5),
                padding='same',
                use_bias=True)  # shape=(batch_size, 128, 5, 5)

            conv7 = layers.conv_block_cgan(
                conv6,
                training,
                dropout_rate=0.3,
                out_channels=256,
                filter_size=(3, 3),
                strides=(1, 1),
                padding='same',
                use_bias=True)  # shape=(batch_size, 256, 5, 5)

            flat = tf.reshape(conv7, [tf.shape(conv7)[0], -1])

            dense_block1 = layers.dense_block_cgan(
                flat, training, units=4000, dropout_rate=0.3, use_bias=True
            )  # apply dense_block: dense -> leakyReLU -> dropout

            dense_block2 = layers.dense_block_cgan(
                dense_block1,
                training,
                units=500,
                dropout_rate=0.3,
                use_bias=True
            )  # apply dense_block: dense -> leakyReLU -> dropout

            logits = layers.dense_layer(dense_block2, units=2)

        print("Built Discriminator model in {} s".format(time.time() - a))
        list_ops = [
            conv1, conv2, conv3, conv4, conv5, conv6, conv7, flat,
            dense_block1, dense_block2, logits
        ]

        return logits, list_ops
Exemplo n.º 14
0
import Base_Symbol as BS
import ActivateFunctions as AF
import layers
import numpy as np
import sklearn.datasets as skdata

X, y = skdata.load_iris(return_X_y=True)
X = X / np.max(X, axis=0)
X = X - np.mean(X, axis=0)
y = np.array([[0 if i != j else 1 for j in range(3)] for i in y])

xs = BS.Placeholder()
ys = BS.Placeholder()

l1 = layers.dense_layer(xs,
                        init_w=(np.random.random([4, 64]) - 0.5),
                        init_b=np.zeros([1, 64]) + 0.1,
                        activation_func=AF.ReLu)

l2 = layers.dense_layer(l1,
                        init_w=(np.random.random([64, 64]) - 0.5),
                        init_b=np.zeros([1, 64]) + 0.1,
                        activation_func=AF.ReLu)

l3 = layers.dense_layer(l2,
                        init_w=(np.random.random([64, 64]) - 0.5),
                        init_b=np.zeros([1, 64]) + 0.1,
                        activation_func=AF.ReLu)

out = layers.dense_layer(l3,
                         init_w=(np.random.random([64, 3]) - 0.5),
                         init_b=np.zeros([1, 3]) + 0.1,
Exemplo n.º 15
0
    def generator_model(self,
                        noise,
                        training,
                        reuse=False):  # construct the graph of the generator
        a = time.time()

        with tf.variable_scope(
                "generator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the generator

            gen_inp = noise
            with tf.name_scope("preprocess_inp"):
                dense1 = layers.dense_layer(gen_inp,
                                            units=5 * 5 * 256,
                                            use_bias=True)
                batch1 = layers.batch_norm_layer(dense1,
                                                 training=training,
                                                 momentum=0.8)
                relu1 = layers.leaky_relu_layer(dense1)
                reshaped = tf.reshape(
                    relu1, shape=[-1, 256, 5,
                                  5])  # shape=(batch_size, 256, 5, 5)

            deconv1 = layers.deconv_block_fullres(
                reshaped,
                training,
                out_channels=128,
                filter_size=(3, 3),
                strides=(1, 1),
                padding="same",
                use_bias=False)  # shape=(batch_size, 128, 5, 5)
            deconv2 = layers.deconv_block_fullres(
                deconv1,
                training,
                out_channels=64,
                filter_size=(5, 5),
                strides=(5, 5),
                padding="same",
                use_bias=False)  # shape=(batch_size, 64, 25, 25)
            deconv3 = layers.deconv_block_fullres(
                deconv2,
                training,
                out_channels=32,
                filter_size=(3, 3),
                strides=(1, 1),
                padding="same",
                use_bias=False)  # shape=(batch_size, 32, 25, 25)
            deconv4 = layers.deconv_block_fullres(
                deconv3,
                training,
                out_channels=16,
                filter_size=(5, 5),
                strides=(5, 5),
                padding="same",
                use_bias=False)  # shape=(batch_size, 16, 125, 125)
            deconv5 = layers.deconv_block_fullres(
                deconv4,
                training,
                out_channels=8,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 8, 250, 250)
            deconv6 = layers.deconv_block_fullres(
                deconv5,
                training,
                out_channels=4,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 4, 500, 500)
            deconv7 = layers.deconv_layer(
                deconv6,
                out_channels=1,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 1, 1000, 1000)

            gen_out = layers.tanh_layer(deconv7)
        print("Built Generator model in {} s".format(time.time() - a))
        list_ops = {
            "dense1": dense1,
            "batch1": batch1,
            "relu1": relu1,
            "reshaped": reshaped,
            "deconv1": deconv1,
            "deconv2": deconv2,
            "deconv3": deconv3,
            "deconv4": deconv4,
            "deconv5": deconv5,
            "deconv6": deconv6,
            "deconv7": deconv7,
            "gen_out": gen_out
        }  # list of operations, can be used to run the graph up to a certain op
        # i,e get the subgraph
        return gen_out, list_ops
Exemplo n.º 16
0
    def discriminator_model(
            self,
            inp,
            training,
            reuse=False,
            minibatch=False):  # construct the graph of the discriminator
        a = time.time()
        with tf.variable_scope(
                "discriminator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the discriminator

            conv1 = layers.conv_block_fullres(
                inp,
                training,
                out_channels=4,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 4, 500, 500)
            conv2 = layers.conv_block_fullres(
                conv1,
                training,
                out_channels=8,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 8, 250, 250)
            conv3 = layers.conv_block_fullres(
                conv2,
                training,
                out_channels=16,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 16, 125, 125)
            conv4 = layers.conv_block_fullres(
                conv3,
                training,
                out_channels=32,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 32, 63, 63)
            conv5 = layers.conv_block_fullres(
                conv4,
                training,
                out_channels=64,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 64, 32, 32)
            conv6 = layers.conv_block_fullres(
                conv5,
                training,
                out_channels=128,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 128, 16, 16)
            conv7 = layers.conv_block_fullres(
                conv6,
                training,
                out_channels=256,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 256, 8, 8)
            conv8 = layers.conv_block_fullres(
                conv7,
                training,
                out_channels=512,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False,
                alpha=0.3)  # shape=(batch_size, 512, 4, 4)
            flat = tf.reshape(conv8, [-1, 512 * 4 * 4])
            dense1 = layers.dense_block_fullres(flat, training, units=4000)
            dense2 = layers.dense_block_fullres(dense1, training, units=500)

            if (minibatch):
                minibatched = layers.minibatch(dense2,
                                               num_kernels=5,
                                               kernel_dim=3)
                logits = layers.dense_layer(minibatched,
                                            units=2,
                                            use_bias=True)
            else:
                logits = layers.dense_layer(flat, units=2, use_bias=True)

            out = logits
        print("Built Discriminator model in {} s".format(time.time() - a))
        list_ops = {
            "inp": inp,
            "conv1": conv1,
            "conv2": conv2,
            "conv3": conv3,
            "conv4": conv4,
            "conv5": conv5,
            "conv6": conv6,
            "conv7": conv7,
            "flat": flat,
            "dense1": dense1,
            "dense2": dense2,
            "logits": logits,
            "out": out
        }

        return out, list_ops