Пример #1
0
    def model(X):  # assumes mnist.
        n, d = X.shape

        # This differs from MNIST to CIFAR in original article.
        #g = invtf.Generator(latent.Logistic())
        g = invtf.Generator(latent.Normal())

        # Pre-process steps.
        g.add(UniformDequantize(input_shape=[d]))
        g.add(Normalize(input_shape=[d]))

        # Build model using additive coupling layers.
        for i in range(0, 8):

            ac = AdditiveCoupling(part=i % 2, strategy=EvenOddStrategy())
            ac.add(
                Dense(2000,
                      activation="relu",
                      bias_initializer="zeros",
                      kernel_initializer="zeros"))
            ac.add(
                Dense(2000,
                      activation="relu",
                      bias_initializer="zeros",
                      kernel_initializer="zeros"))
            ac.add(
                Dense(2000,
                      activation="relu",
                      bias_initializer="zeros",
                      kernel_initializer="zeros"))
            ac.add(
                Dense(2000,
                      activation="relu",
                      bias_initializer="zeros",
                      kernel_initializer="zeros"))
            ac.add(Dense(d // 2, bias_initializer="zeros"))

            g.add(ac)

        g.add(Affine(exp=True))

        g.compile(optimizer=keras.optimizers.Adam(
            0.001, beta_1=0.9, beta_2=0.01, epsilon=10**(-4)))

        g.init(X[:100])

        return g
Пример #2
0
    def model(X, verbose=False):

        default_initializer = keras.initializers.RandomNormal(stddev=0.05)
        width = 128
        c = 12

        input_shape = X.shape[1:]
        d = np.prod(input_shape)

        g = invtf.Generator(latent.Normal(d))

        # Pre-process steps.
        #g.add(UniformDequantize	(input_shape=input_shape))

        g.add(keras.layers.InputLayer(input_shape=input_shape))

        # Build model
        g.add(Squeeze())

        vardeq = VariationalDequantize()

        for j in range(2):
            ac = AffineCoupling(part=j % 2, strategy=SplitChannelsStrategy())
            ac.add(
                Conv2D(width,
                       kernel_size=(3, 3),
                       activation="relu",
                       padding="SAME",
                       kernel_initializer=default_initializer,
                       bias_initializer="zeros"))
            ac.add(
                Conv2D(width,
                       kernel_size=(1, 1),
                       activation="relu",
                       padding="SAME",
                       kernel_initializer=default_initializer,
                       bias_initializer="zeros"))
            ac.add(
                Conv2D(c,
                       kernel_size=(3, 3),
                       padding="SAME",
                       kernel_initializer="zeros",
                       bias_initializer="ones")
            )  # they add 2 here and apply sigmoid.

            vardeq.add(ActNorm())
            #vardeq.add(ac)  # this loss starts being 8 or something crazy...

        g.add(vardeq)  # print all loss constituents of this?

        g.add(Normalize(input_shape=input_shape))

        for i in range(0, 2):

            for j in range(2):

                g.add(ActNorm())
                #g.add(Conv3DCirc())
                #g.add(Inv1x1Conv())

                ac = AffineCoupling(part=j % 2,
                                    strategy=SplitChannelsStrategy())

                ac.add(
                    Conv2D(width,
                           kernel_size=(3, 3),
                           activation="relu",
                           padding="SAME",
                           kernel_initializer=default_initializer,
                           bias_initializer="zeros"))
                ac.add(
                    Conv2D(width,
                           kernel_size=(1, 1),
                           activation="relu",
                           padding="SAME",
                           kernel_initializer=default_initializer,
                           bias_initializer="zeros"))
                ac.add(
                    Conv2D(c,
                           kernel_size=(3, 3),
                           padding="SAME",
                           kernel_initializer="zeros",
                           bias_initializer="ones")
                )  # they add 2 here and apply sigmoid.

                #g.add(Inv1x1Conv())
                g.add(ac)

            #g.add(Squeeze())
            #c = c * 4

            #g.add(MultiScale()) # adds directly to output. For simplicity just add half of channels.
            #d = d//2

        g.compile(optimizer=keras.optimizers.Adam(0.001))

        g.init_actnorm(X[:1000])  # how much does this change loss?

        if verbose:
            for layer in g.layers:
                if isinstance(layer, AffineCoupling): layer.summary()

            for layer in g.layers:
                if isinstance(layer, VariationalDequantize): layer.summary()

        return g
Пример #3
0
    def model(X, verbose=False):

        # Glow details can be found at : https://github.com/openai/glow/blob/master/model.py#L376
        default_initializer = keras.initializers.RandomNormal(stddev=0.05)
        width = 128  # width in glow is 512 but we lowered for speed; how much does this hurt performance?
        c = X.shape[-1]

        input_shape = X.shape[1:]
        d = np.prod(input_shape)

        g = invtf.Generator(latent.Normal())

        # Pre-process steps.
        g.add(keras.layers.InputLayer(input_shape=input_shape))
        g.add(UniformDequantize(input_shape=input_shape))
        g.add(Normalize(input_shape=input_shape))

        # Build model using additive coupling layers.
        g.add(Squeeze())
        c = 4 * c

        for i in range(0, 2):

            for j in range(1):

                g.add(ActNorm())
                #g.add(Inv1x1Conv())

                ac = AffineCoupling(part=j % 2,
                                    strategy=SplitChannelsStrategy())

                ac.add(
                    Conv2D(width,
                           kernel_size=(3, 3),
                           activation="relu",
                           padding="SAME",
                           kernel_initializer=default_initializer,
                           bias_initializer="zeros"))
                ac.add(
                    Conv2D(width,
                           kernel_size=(1, 1),
                           activation="relu",
                           padding="SAME",
                           kernel_initializer=default_initializer,
                           bias_initializer="zeros"))
                ac.add(
                    Conv2D(c,
                           kernel_size=(3, 3),
                           padding="SAME",
                           kernel_initializer="zeros",
                           bias_initializer="ones")
                )  # they add 2 here and apply sigmoid.

                g.add(ac)

            #g.add(Squeeze())
            #c = c * 4

            #g.add(MultiScale()) # adds directly to output. For simplicity just add half of channels.
            #d = d//2

        g.compile(optimizer=keras.optimizers.Adam(0.001))

        g.init(X[:1000])  # initialize actnorm data dependently.

        if verbose:
            for layer in g.layers:
                if isinstance(layer, AffineCoupling): layer.summary()

            for layer in g.layers:
                if isinstance(layer, VariationalDequantize): layer.summary()

        return g
Пример #4
0
    def model(X):
        input_shape = X.shape[1:]
        d = np.prod(input_shape)
        c = X.shape[-1]

        g = invtf.Generator(latent.Normal())

        # Pre-process steps.
        g.add(UniformDequantize(input_shape=input_shape))
        g.add(Normalize(input_shape=input_shape))

        # Build model using additive coupling layers.
        g.add(Squeeze())
        c = c * 4

        strategy = SplitChannelsStrategy()

        for i in range(0, 3):
            for j in range(2):

                g.add(ActNorm())  # not in realnvp model.
                ac = AffineCoupling(part=j % 2, strategy=strategy)
                ac.add(
                    Conv2D(filters=64,
                           kernel_size=3,
                           padding="SAME",
                           bias_initializer="ones",
                           kernel_initializer="zeros"))
                ac.add(
                    Conv2D(filters=512,
                           kernel_size=3,
                           padding="SAME",
                           bias_initializer="ones",
                           kernel_initializer="zeros"))
                ac.add(
                    Conv2D(filters=c,
                           kernel_size=3,
                           padding="SAME",
                           bias_initializer="ones",
                           kernel_initializer="zeros"))

                g.add(ac)

            for j in range(2):
                g.add(ActNorm())  # not in realnvp model.

                ac = AffineCoupling(part=j % 2, strategy=strategy)
                ac.add(Flatten())
                ac.add(Dense(200, activation="relu"))
                ac.add(Dense(200, activation="relu"))
                ac.add(
                    Dense(d,
                          bias_initializer="ones",
                          kernel_initializer="zeros"))

                g.add(ac)

            g.add(Squeeze())
            c = c * 4

            g.add(
                MultiScale()
            )  # adds directly to output. For simplicity just add half of channels.
            d = d // 2
            c = c // 2

        ac = AffineCoupling(part=j % 2, strategy=strategy)
        ac.add(Flatten())
        ac.add(Dense(200, activation="relu"))
        ac.add(Dense(200, activation="relu"))
        ac.add(Dense(d, bias_initializer="ones", kernel_initializer="zeros"))

        g.add(ac)

        g.compile(optimizer=keras.optimizers.Adam(0.0001))

        g.init(X[:1000])

        ac.summary()

        return g
Пример #5
0
    def __init__(self, latent=latent.Normal()):
        """
			
		"""
        super(Generator, self).__init__()
        self.latent = latent
Пример #6
0
    def __init__(self, latent=latent.Normal(28**2)):
        self.latent = latent

        super(Generator, self).__init__()