Esempio n. 1
0
def gaussian_encoder(name, x, n_hidden, n_output, keep_prob, reuse=False):

    with tf.variable_scope("%s_gaussian_encoder" % name, reuse=reuse):

        if type(n_hidden) is int:
            n_hidden = [x.get_shape()[1], n_hidden, 2 * n_output]
        elif type(n_hidden) is list:
            n_hidden.append(2 * n_output)
            n_hidden = [x.get_shape()[1]] + n_hidden
        else:
            raise ("type of n_hidden needs to be int or list")

        num_layers = len(n_hidden)

        #h = layers.tanh_layer(x, x.get_shape()[0], n_hidden[0],
        #  "enc_l0", reuse, True, keep_prob)

        h = x
        for i in range(num_layers - 1):
            h = layers.tanh_layer(h, n_hidden[i], n_hidden[i + 1],
                                  "enc_l%i" % i, reuse, True, keep_prob)

        # The mean parameter is unconstrained
        mean = h[:, :n_output]
        # The standard deviation must be positive. Parametrize with a softplus and
        # add a small epsilon for numerical stability
        stddev = 1e-6 + tf.nn.softplus(h[:, n_output:])

    return mean, stddev
Esempio n. 2
0
    def generator_model(self, noise, feats, training, reuse=False):  # construct the graph of the generator
        a = time.time()

        with tf.variable_scope("generator",
                               reuse=reuse):  # define variable scope to easily retrieve vars of the generator

            gen_inp = tf.concat([noise, feats], -1)
            with tf.name_scope("preprocess_inp"):
                dense1 = layers.dense_layer(gen_inp, units=4 * 4 * 1024, use_bias=False)
                bn1 = layers.batch_norm_layer_mcgan(dense1, training, 0.8)
                relu1 = layers.relu_layer(bn1)
                reshaped = tf.reshape(relu1, shape=[-1, 1024, 4, 4])  # shape=(batch_size, 1024, 4, 4)

            deconv1 = layers.deconv_block_mcgan(reshaped, training, momentum=0.8, out_channels=512, filter_size=(4, 4),
                                                strides=(2, 2), padding="same",
                                                use_bias=True)  # shape=(batch_size, 512, 8, 8)
            deconv2 = layers.deconv_block_mcgan(deconv1, training, momentum=0.8, out_channels=256, filter_size=(4, 4), strides=(2, 2),
                                                padding="same", use_bias=True)  # shape=(batch_size, 256, 16, 16)
            deconv3 = layers.deconv_block_mcgan(deconv2, training, momentum=0.8, out_channels=128, filter_size=(4, 4), strides=(2, 2),
                                                padding="same", use_bias=True)  # shape=(batch_size, 128, 32, 32)
            deconv4 = layers.deconv_layer(deconv3, out_channels=1, filter_size=(4, 4), strides=(2, 2), padding="same",
                                          use_bias=True)  # shape=(batch_size, 1, 64, 64)

            gen_out = layers.tanh_layer(deconv4)
        print("Built Generator model in {} s".format(time.time() - a))
        list_ops = {"dense1": dense1, "bn1":bn1, "relu1": relu1, "reshaped": reshaped, "deconv1": deconv1, "deconv2": deconv2,
                    "deconv3": deconv3, "deconv4": deconv4,
                    "gen_out": gen_out}  # list of operations, can be used to run the graph up to a certain op
        # i,e get the subgraph
        return gen_out, list_ops
Esempio n. 3
0
def gen_ffd_logit(name, x, n_hidden, n_output, keep_prob, reuse=False):

    with tf.variable_scope("%s_gen_ffd_logit" % name, reuse=reuse):

        if type(n_hidden) is int:
            n_hidden = [x.get_shape()[1], n_hidden, n_output]
        elif type(n_hidden) is list:
            n_hidden.append(2 * n_output)
            n_hidden = [x.get_shape()[1]] + n_hidden
        else:
            raise ("type of n_hidden needs to be int or list")

        num_layers = len(n_hidden)

        #h = layers.tanh_layer(x, x.get_shape()[0], n_hidden[0],
        #  "enc_l0", reuse, True, keep_prob)

        h = x
        for i in range(num_layers - 1):
            h = layers.tanh_layer(h, n_hidden[i], n_hidden[i + 1],
                                  "gen_ffd_logit_l%i" % i, reuse, True,
                                  keep_prob)

        #output layer
        w_out = tf.get_variable('w_out', [n_hidden[-1], n_output],\
          initializer=layers.INIT_W)
        b_out = tf.get_variable('b_out', [n_output], initializer=layers.INIT_B)

        output = tf.nn.sigmoid(tf.matmul(h, w_out) + b_out)
    return output
Esempio n. 4
0
    def __call__(self, inp, training):
        a = time.time()

        with tf.variable_scope("StackedSRM"):  # define variable scope
            inter = inp  # intermediate input
            outputs = []
            for i in range(self.nb_stacks):
                with tf.name_scope("stack"):
                    conv = layers.conv_layer(inter,
                                             out_channels=64,
                                             filter_size=(4, 4),
                                             strides=(2, 2),
                                             padding=(1, 1),
                                             pad_values=0,
                                             use_bias=False)
                    relu = layers.relu_layer(conv)
                    res_module = layers.residual_module_srm(relu,
                                                            training,
                                                            out_channels=64,
                                                            nb_blocks=6,
                                                            pad_values=0,
                                                            use_bias=False)

                    h, w = tf.shape(res_module)[2], tf.shape(res_module)[3]
                    up_sample1 = layers.resize_layer(
                        res_module,
                        new_size=[2 * h, 2 * w],
                        resize_method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
                    )  # nearest neighbor up sampling
                    conv1 = layers.conv_layer(up_sample1,
                                              out_channels=128,
                                              filter_size=(3, 3),
                                              strides=(1, 1),
                                              padding=(1, 1),
                                              pad_values=0,
                                              use_bias=False)

                    h, w = tf.shape(conv1)[2], tf.shape(conv1)[3]
                    up_sample2 = layers.resize_layer(
                        conv1,
                        new_size=[2 * h, 2 * w],
                        resize_method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
                    )  # nearest neighbor up sampling
                    conv2 = layers.conv_layer(up_sample2,
                                              out_channels=1,
                                              filter_size=(3, 3),
                                              strides=(1, 1),
                                              padding=(1, 1),
                                              pad_values=0,
                                              use_bias=False)

                    inter = (
                        layers.tanh_layer(conv2) + 1
                    ) / 2.0  # apply tanh and renormalize so that the output is in the range [0, 1] to prepare it to be inputted to the next stack

                outputs.append(inter)

        print("SRM Model built in {} s".format(time.time() - a))
        return outputs
Esempio n. 5
0
def gen_decoder(name, z, n_hidden, n_output, keep_prob, reuse=False,\
  output_zero_one=True,output_scalar=False,output_softmax=False):

    with tf.variable_scope("%s_bernoulli_decoder" % name, reuse=reuse):
        if type(n_hidden) is int:
            n_hidden = [z.get_shape()[1], n_hidden]
        elif type(n_hidden) is list:
            n_hidden = [z.get_shape()[1]] + n_hidden
        else:
            raise ("type of n_hidden needs to be int or list")

        num_layers = len(n_hidden)

        #h = layers.tanh_layer(x, x.get_shape()[0], n_hidden[0],
        #  "enc_l0", reuse, True, keep_prob)

        h = z
        for i in range(num_layers - 1):
            h = layers.tanh_layer(h, n_hidden[i], n_hidden[i + 1],
                                  "dec_l%i" % i, reuse, True, keep_prob)

        if output_zero_one:
            h = layers.sigmoid_layer(h, n_hidden[-1], n_output,
                                     "dec_l%i" % (num_layers - 1), reuse, True,
                                     keep_prob)
        elif output_scalar:
            h = layers.linear_layer(h, n_hidden[-1], n_output,
                                    "dec_l%i" % (num_layers - 1), reuse, True,
                                    keep_prob)
        elif output_softmax:
            h = layers.softmax_layer(h, n_hidden[-1], n_output,
                                     "dec_l%i" % (num_layers - 1), reuse, True,
                                     keep_prob)
        else:
            print("Not reachable")
            raise ("wtf")

    return h
Esempio n. 6
0
def gen_encoder(name, x, n_hidden, n_output, keep_prob, reuse=False):

    with tf.variable_scope("%s_gen_encoder" % name, reuse=reuse):

        if type(n_hidden) is int:
            n_hidden = [x.get_shape()[1], n_hidden, n_output]
        elif type(n_hidden) is list:
            n_hidden.append(2 * n_output)
            n_hidden = [x.get_shape()[1]] + n_hidden
        else:
            raise ("type of n_hidden needs to be int or list")

        num_layers = len(n_hidden)

        #h = layers.tanh_layer(x, x.get_shape()[0], n_hidden[0],
        #  "enc_l0", reuse, True, keep_prob)

        h = x
        for i in range(num_layers - 1):
            h = layers.tanh_layer(h, n_hidden[i], n_hidden[i + 1],
                                  "enc_l%i" % i, reuse, True, keep_prob)

    return h
Esempio n. 7
0
    def generator_model(self,
                        noise,
                        training,
                        reuse=False):  # construct the graph of the generator
        a = time.time()

        with tf.variable_scope(
                "generator", reuse=reuse
        ):  # define variable scope to easily retrieve vars of the generator

            gen_inp = noise
            with tf.name_scope("preprocess_inp"):
                dense1 = layers.dense_layer(gen_inp,
                                            units=5 * 5 * 256,
                                            use_bias=True)
                batch1 = layers.batch_norm_layer(dense1,
                                                 training=training,
                                                 momentum=0.8)
                relu1 = layers.leaky_relu_layer(dense1)
                reshaped = tf.reshape(
                    relu1, shape=[-1, 256, 5,
                                  5])  # shape=(batch_size, 256, 5, 5)

            deconv1 = layers.deconv_block_fullres(
                reshaped,
                training,
                out_channels=128,
                filter_size=(3, 3),
                strides=(1, 1),
                padding="same",
                use_bias=False)  # shape=(batch_size, 128, 5, 5)
            deconv2 = layers.deconv_block_fullres(
                deconv1,
                training,
                out_channels=64,
                filter_size=(5, 5),
                strides=(5, 5),
                padding="same",
                use_bias=False)  # shape=(batch_size, 64, 25, 25)
            deconv3 = layers.deconv_block_fullres(
                deconv2,
                training,
                out_channels=32,
                filter_size=(3, 3),
                strides=(1, 1),
                padding="same",
                use_bias=False)  # shape=(batch_size, 32, 25, 25)
            deconv4 = layers.deconv_block_fullres(
                deconv3,
                training,
                out_channels=16,
                filter_size=(5, 5),
                strides=(5, 5),
                padding="same",
                use_bias=False)  # shape=(batch_size, 16, 125, 125)
            deconv5 = layers.deconv_block_fullres(
                deconv4,
                training,
                out_channels=8,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 8, 250, 250)
            deconv6 = layers.deconv_block_fullres(
                deconv5,
                training,
                out_channels=4,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 4, 500, 500)
            deconv7 = layers.deconv_layer(
                deconv6,
                out_channels=1,
                filter_size=(3, 3),
                strides=(2, 2),
                padding="same",
                use_bias=False)  # shape=(batch_size, 1, 1000, 1000)

            gen_out = layers.tanh_layer(deconv7)
        print("Built Generator model in {} s".format(time.time() - a))
        list_ops = {
            "dense1": dense1,
            "batch1": batch1,
            "relu1": relu1,
            "reshaped": reshaped,
            "deconv1": deconv1,
            "deconv2": deconv2,
            "deconv3": deconv3,
            "deconv4": deconv4,
            "deconv5": deconv5,
            "deconv6": deconv6,
            "deconv7": deconv7,
            "gen_out": gen_out
        }  # list of operations, can be used to run the graph up to a certain op
        # i,e get the subgraph
        return gen_out, list_ops