Esempio n. 1
0
    def encoder(self, Xs, name='encoder'):
        with tf.variable_scope(name):
            stack = Stacker(Xs)
            stack.flatten()
            stack.linear_block(512, relu)
            stack.linear_block(256, relu)
            stack.linear_block(128, relu)
            stack.linear_block(self.code_size, relu)

        return stack.last_layer
Esempio n. 2
0
    def discriminator(self, X, reuse=False):
        with tf.variable_scope('discriminator', reuse=reuse):
            layer = Stacker(X)
            layer.conv_block(128, CONV_FILTER_5522, lrelu)
            layer.conv_block(256, CONV_FILTER_5522, lrelu)
            layer.reshape([self.batch_size, -1])
            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 3
0
    def decoder(self, zs, reuse=False, name='decoder'):
        with tf.variable_scope(name, reuse=reuse):
            stack = Stacker(zs)
            stack.linear_block(128, relu)
            stack.linear_block(256, relu)
            stack.linear_block(512, relu)
            stack.linear_block(self.X_flatten_size, sigmoid)
            stack.reshape(self.Xs_shape)

        return stack.last_layer
Esempio n. 4
0
    def generator(self, z, net_shapes, reuse=False, name='generator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(z)

            for shape in net_shapes:
                layer.linear(shape)

            layer.linear(self.X_flatten_size)
            layer.sigmoid()
            layer.reshape(self.Xs_shape)

        return layer.last_layer
Esempio n. 5
0
    def discriminator(self, X, Y, reuse=False):
        with tf.variable_scope('discriminator', reuse=reuse):
            Y = linear(Y, self.input_h * self.input_w)
            Y = reshape(Y, [self.batch_size, self.input_h, self.input_w, 1])
            layer = Stacker(tf.concat((X, Y), axis=3))
            layer.conv_block(128, CONV_FILTER_5522, lrelu)
            layer.conv_block(256, CONV_FILTER_5522, lrelu)
            layer.reshape([self.batch_size, -1])
            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 6
0
    def encoder(self, Xs, reuse=False, name='encoder'):
        with tf.variable_scope(name, reuse=reuse):
            stack = Stacker(Xs)
            stack.flatten()
            stack.linear_block(512, relu)
            stack.linear_block(256, relu)
            stack.linear_block(128, relu)
            stack.linear_block(self.z_size + self.Y_size, relu)
            zs = stack.last_layer[:, :self.z_size]
            Ys_gen = stack.last_layer[:, self.z_size:]

            hs = softmax(Ys_gen)
        return zs, Ys_gen, hs
Esempio n. 7
0
    def encoder(self, Xs, name='encoder'):
        with tf.variable_scope(name):
            stack = Stacker(Xs)
            stack.flatten()
            stack.linear_block(512, relu)
            stack.linear_block(256, relu)
            stack.linear_block(128, relu)
            stack.linear_block(self.z_size * 2, relu)

            h = stack.last_layer
            mean = h[:, :self.z_size]
            std = tf.nn.softplus(h[:, self.z_size:])

        return mean, std
Esempio n. 8
0
    def discriminator_cate(self, zs, reuse=False, name='discriminator_cate'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(zs)
            layer.linear_block(256, relu)
            layer.linear_block(256, relu)
            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 9
0
    def encoder(self, Xs, net_shapes, reuse=False, name='encoder'):
        with tf.variable_scope(name, reuse=reuse):
            stack = Stacker(Xs)
            stack.flatten()
            for shape in net_shapes:
                stack.linear_block(shape, relu)

            stack.linear_block(self.latent_code_size, relu)

        return stack.last_layer
Esempio n. 10
0
    def discriminator(self, X, net_shapes, reuse=False, name='discriminator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(flatten(X))

            for shape in net_shapes:
                layer.linear(shape)

            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 11
0
    def decoder(self, zs, Ys, net_shapes, reuse=False, name='decoder'):
        with tf.variable_scope(name, reuse=reuse):
            stack = Stacker(concat((zs, Ys), axis=1))

            for shape in net_shapes:
                stack.linear_block(shape, relu)

            stack.linear_block(self.X_flatten_size, sigmoid)
            stack.reshape(self.Xs_shape)

        return stack.last_layer
Esempio n. 12
0
    def discriminator_gauss(self,
                            zs,
                            net_shapes,
                            reuse=False,
                            name='discriminator_gauss'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(zs)
            for shape in net_shapes:
                layer.linear_block(shape, relu)

            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 13
0
    def Q_function(self, X_gen, reuse=False):
        with tf.variable_scope('Q_function', reuse=reuse):
            layer = Stacker(X_gen)
            layer.linear_block(128, relu)
            layer.linear_block(128, relu)
            code_logit = layer.linear(10 + 2)
            code = layer.softmax()

        return code, code_logit
Esempio n. 14
0
    def classifier(self, Xs, net_shapes, name='classifier'):
        with tf.variable_scope(name):
            layer = Stacker(flatten(Xs))

            for net_shape in net_shapes:
                layer.linear_block(net_shape, relu)

            layer.linear(self.Y_size)
            logit = layer.last_layer
            h = softmax(logit)
        return logit, h
Esempio n. 15
0
    def classifier(self, x, dropout_rate):
        with tf.variable_scope('classifier'):
            layer = Stacker(x)

            layer.linear_block(128, lrelu)
            layer.dropout(dropout_rate)

            layer.linear_block(128, relu)
            layer.dropout(dropout_rate)

            # layer.linear_block(32, lrelu)
            # layer.dropout(dropout_rate)
            #
            # layer.linear_block(16, lrelu)
            # layer.dropout(dropout_rate)

            layer.linear(2)
            logit = layer.last_layer

            h = softmax(logit)
        return logit, h
Esempio n. 16
0
    def generator(self, z, reuse=False):
        with tf.variable_scope('generator', reuse=reuse):
            layer = Stacker(z)
            layer.add_layer(linear, 7 * 7 * 128)
            layer.reshape([self.batch_size, 7, 7, 128])
            layer.upscale_2x_block(256, CONV_FILTER_5522, relu)
            layer.conv2d_transpose(self.Xs_shape, CONV_FILTER_5522)
            layer.conv2d(self.input_c, CONV_FILTER_3311)
            layer.sigmoid()

        return layer.last_layer
Esempio n. 17
0
    def CNN(self, input_):
        with tf.variable_scope('classifier'):
            layer = Stacker(input_, name='seq1')
            layer.add_layer(conv_block, 64, CONV_FILTER_5522, lrelu)
            size16 = layer.last_layer
            layer.add_layer(inception_layer, 32)
            layer.add_layer(inception_layer, 64)
            layer.add_layer(inception_layer, 128)
            layer.add_layer(tf.reshape, [self.batch_size, -1])

            layer2 = Stacker(size16, name='seq2')
            layer2.add_layer(conv_block, 128, CONV_FILTER_5522, lrelu)
            size8 = layer2.last_layer
            layer2.add_layer(inception_layer, 64)
            layer2.add_layer(inception_layer, 128)
            layer2.add_layer(inception_layer, 256)
            layer2.add_layer(tf.reshape, [self.batch_size, -1])

            layer3 = Stacker(size8, name='seq3')
            layer3.add_layer(conv_block, 256, CONV_FILTER_5522, lrelu)
            layer3.add_layer(inception_layer, 128)
            layer3.add_layer(inception_layer, 256)
            layer3.add_layer(inception_layer, 512)
            layer3.add_layer(tf.reshape, [self.batch_size, -1])

            merge = tf.concat(
                [layer.last_layer, layer2.last_layer, layer3.last_layer],
                axis=1)
            after_merge = Stacker(merge, name='after_merge')
            after_merge.add_layer(linear, self.label_size)

            logit = after_merge.last_layer
            h = softmax(logit)

        return logit, h
Esempio n. 18
0
    def discriminator(self, x, reuse=None, name='discriminator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(x)
            layer.add_layer(conv2d, 64, CONV_FILTER_5522)
            layer.add_layer(bn)
            layer.add_layer(lrelu)

            layer.add_layer(conv2d, 128, CONV_FILTER_5522)
            layer.add_layer(bn)
            layer.add_layer(lrelu)

            layer.add_layer(conv2d, 256, CONV_FILTER_5522)
            layer.add_layer(bn)
            layer.add_layer(lrelu)

            layer.add_layer(conv2d, 256, CONV_FILTER_5522)
            layer.add_layer(bn)
            layer.add_layer(lrelu)

            layer.add_layer(tf.reshape, [self.batch_size, -1])
            out_logit = layer.add_layer(linear, 1)
            out = layer.add_layer(tf.sigmoid)

        return out, out_logit
Esempio n. 19
0
    def generator(self, z, reuse=False, name='generator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(z)
            layer.add_layer(linear, 4 * 4 * 512)
            layer.add_layer(tf.reshape, [self.batch_size, 4, 4, 512])

            layer.add_layer(conv2d_transpose, [self.batch_size, 8, 8, 256],
                            CONV_FILTER_7722)
            layer.add_layer(bn)
            layer.add_layer(relu)

            layer.add_layer(conv2d_transpose, [self.batch_size, 16, 16, 128],
                            CONV_FILTER_7722)
            layer.add_layer(bn)
            layer.add_layer(relu)

            layer.add_layer(conv2d_transpose,
                            [self.batch_size, 32, 32, self.input_c],
                            CONV_FILTER_7722)
            layer.add_layer(conv2d, self.input_c, CONV_FILTER_5511)
            layer.add_layer(tf.sigmoid)
            net = layer.last_layer

        return net
Esempio n. 20
0
def inception_layer(input_, channel_size, name='inception_layer'):
    with tf.variable_scope(name):
        with tf.variable_scope('out1'):
            layer = Stacker(input_)
            layer.add_layer(avg_pooling, CONV_FILTER_2211)
            out1 = layer.last_layer

        with tf.variable_scope('out2'):
            layer = Stacker(input_)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, lrelu)
            out2 = layer.last_layer

        with tf.variable_scope('out3'):
            layer = Stacker(input_)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, lrelu)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, relu)
            out3 = layer.last_layer

        with tf.variable_scope('out4'):
            layer = Stacker(input_)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, lrelu)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, lrelu)
            layer.add_layer(conv_block, channel_size, CONV_FILTER_5511, lrelu)
            out4 = layer.last_layer

        out = tf.concat([out1, out2 + out3 + out4], 3)

        return out