def decode(z, **kwargs):
    z_interm = kwargs["z_interm"]
    n_out = kwargs["n_out"]
    out_interm = kwargs["out_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)

    z_t = fun(z,
              nout=z_interm,
              act=tf.nn.elu,
              name="z_transformed",
              weight_factor=weight_factor,
              layers_num=layers_num)

    output_t = fun(z_t,
                   nout=out_interm,
                   act=tf.nn.elu,
                   name="out_transform",
                   weight_factor=weight_factor,
                   layers_num=layers_num)
    post_logit = fun(output_t,
                     nout=n_out,
                     act=tf.identity,
                     name="out_mu",
                     weight_factor=weight_factor)
    return post_logit, z_t
예제 #2
0
    def generate(self, *conditionals, **kwargs):
        reuse = kwargs.get("reuse", False)
        conditionals = list(reversed(list(conditionals)))
        config = kwargs["config"]

        x_t = fun(*conditionals,
                  nout=config.x_interm,
                  act=tf.nn.elu,
                  name="encode_to_{}".format(self._name),
                  layers_num=config.layers_num,
                  config=config,
                  reuse=reuse)

        rate_not_norm = fun(x_t,
                            nout=self._dim,
                            act=tf.identity,
                            name="{}_rate".format(self._name),
                            reuse=reuse,
                            config=config,
                            layers_num=1)
        rate = tf.nn.sigmoid(self._bias + rate_not_norm)
        U0 = tf.random_uniform((self._batch_size, self._dim))
        res = tf.where(rate > U0,
                       tf.ones((self._batch_size, self._dim)),
                       tf.zeros((self._batch_size, self._dim)),
                       name=self._name)

        return (DistrOutput(res, ds.Bernoulli(p=rate)), )
예제 #3
0
def decode(z):
    z_t = fun(z, nout = z_interm, act = tf.nn.relu, name = "z_transformed", weight_factor = weight_factor, layers_num = layers_num)

    output_t = fun(z_t, nout = out_interm, act = tf.nn.relu, name = "out_transform", weight_factor = weight_factor, layers_num = layers_num)
    post_mu = fun(output_t, nout = input_dim, act = tf.identity, name = "out_mu", weight_factor = weight_factor)
    post_sigma = fun(output_t, nout = input_dim, act = tf.nn.softplus, name = "out_sigma", weight_factor = weight_factor)
    post_alpha = fun(output_t, nout = input_dim, act = tf.nn.softmax, name = "out_alpha", weight_factor = weight_factor)
    return post_mu, post_sigma, post_alpha
예제 #4
0
def encode(x):
    x_t = fun(x, nout = x_transformed, act = tf.nn.relu, name = "x_transformed", weight_factor = weight_factor, layers_num = layers_num)

    phi = fun(x_t, nout = phi_interm, act = tf.nn.relu, name = "phi", weight_factor = weight_factor, layers_num = layers_num)
    z_mu = fun(phi, nout = z_dim, act = tf.identity, name = "z_mu", weight_factor = weight_factor)
    z_sigma = fun(phi, nout = z_dim, act = tf.nn.softplus, name = "z_sigma", weight_factor = weight_factor)

    epsilon = tf.random_normal((batch_size, z_dim), name='epsilon')
    z = z_mu + tf.exp(2.0 * z_sigma) * epsilon
    return z, z_mu, z_sigma
def decode(z, **kwargs):
    z_interm = kwargs["z_interm"]
    n_out = kwargs["n_out"]
    out_interm = kwargs["out_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)

    z_t = fun(z, nout = z_interm, act = tf.nn.elu, name = "z_transformed", weight_factor = weight_factor, layers_num = layers_num)

    output_t = fun(z_t, nout = out_interm, act = tf.nn.elu, name = "out_transform", weight_factor = weight_factor, layers_num = layers_num)
    post_logit = fun(output_t, nout = n_out, act =tf.identity, name = "out_mu", weight_factor = weight_factor)
    return post_logit, z_t
예제 #6
0
 def decode(self, *z):
     z_t = fun(*z,
               nout=self._config.z_interm,
               act=tf.nn.elu,
               name="decode_z_transformed",
               layers_num=self._config.layers_num,
               config=self._config)
     post_mu = fun(z_t,
                   nout=self._config.output_dim,
                   act=tf.identity,
                   name="decode_out_mu",
                   layers_num=1,
                   config=self._config)
     return post_mu
예제 #7
0
def decode(z, **kwargs):
    z_interm = kwargs["z_interm"]
    n_mix = kwargs["n_mix"]
    out_interm = kwargs["out_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)

    z_t = fun(z, nout = z_interm, act = tf.nn.relu, name = "z_transformed", weight_factor = weight_factor, layers_num = layers_num)

    output_t = fun(z_t, nout = out_interm, act = tf.nn.relu, name = "out_transform", weight_factor = weight_factor, layers_num = layers_num)
    post_mu = fun(output_t, nout = n_mix, act =tf.identity, name = "out_mu", weight_factor = weight_factor)
    post_sigma = fun(output_t, nout = n_mix, act =tf.nn.softplus, name = "out_sigma", weight_factor = weight_factor)
    post_alpha = fun(output_t, nout = n_mix, act =tf.nn.softmax, name = "out_alpha", weight_factor = weight_factor)
    return post_mu, post_sigma, post_alpha, z_t
예제 #8
0
    def generate(self, *conditionals, **kwargs):
        reuse = kwargs.get("reuse", False)
        conditionals = list(reversed(list(conditionals)))
        config = kwargs["config"]

        x_t = fun(*conditionals, nout = config.x_interm, act = tf.nn.elu, name = "encode_to_{}".format(self._name), layers_num=config.layers_num, config=config, reuse=reuse)

        rate_not_norm = fun(x_t, nout = self._dim, act = tf.identity, name = "{}_rate".format(self._name), reuse = reuse, config = config, layers_num=1)
        rate = tf.nn.sigmoid(self._bias + rate_not_norm)
        U0 = tf.random_uniform((self._batch_size, self._dim))
        res = tf.where(rate > U0, tf.ones((self._batch_size, self._dim)), tf.zeros((self._batch_size, self._dim)), name=self._name)

        return (
            DistrOutput(res, ds.Bernoulli(p=rate)),
        )
예제 #9
0
def encode(x, h, generator, **kwargs):
    x_transformed = kwargs["x_transformed"]
    z_dim = kwargs["z_dim"]
    phi_interm = kwargs["phi_interm"]
    prior_interm = kwargs["prior_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)
    batch_size = x.get_shape().as_list()[0]
    
    x_t = fun(x, nout = x_transformed, act = tf.nn.relu, name = "x_transformed", weight_factor = weight_factor, layers_num = layers_num)

    prior = fun(h, nout = prior_interm, act = tf.nn.relu, name = "prior", weight_factor = weight_factor, layers_num = layers_num)
    prior_mu = fun(prior, nout = z_dim, act = tf.identity, name = "prior_mu", weight_factor = weight_factor)
    prior_sigma = fun(prior, nout = z_dim, act = tf.nn.softplus, name = "prior_sigma", weight_factor = weight_factor)

    phi = fun(x_t, h, nout = phi_interm, act = tf.nn.relu, name = "phi", weight_factor = weight_factor, layers_num = layers_num)
    z_mu = fun(phi, nout = z_dim, act = tf.identity, name = "z_mu", weight_factor = weight_factor)
    z_sigma = fun(phi, nout = z_dim, act = tf.nn.softplus, name = "z_sigma", weight_factor = weight_factor)

    epsilon = tf.random_normal((batch_size, z_dim), name='epsilon')

    z = tf.cond(
        generator, 
        lambda: prior_mu + tf.exp(prior_sigma) * epsilon, 
        lambda: z_mu + tf.exp(z_sigma) * epsilon
    )

    return z, z_mu, z_sigma, prior_mu, prior_sigma, x_t
예제 #10
0
    def discriminate(self, x, *latent, **kwargs):
        reuse = kwargs.get("reuse", False)

        a_t = fun(x,
                  *latent,
                  nout=self._config.a_interm,
                  act=tf.nn.elu,
                  name="discriminator_transformed",
                  layers_num=self._config.layers_num,
                  config=self._config,
                  reuse=reuse)
        a_out = fun(a_t,
                    nout=self._config.output_dim,
                    act=tf.identity,
                    name="discriminator_out",
                    reuse=reuse,
                    layers_num=1,
                    config=self._config)
        return a_out
예제 #11
0
def encode(x, h, generator, **kwargs):
    x_transformed = kwargs["x_transformed"]
    z_dim = kwargs["z_dim"]
    phi_interm = kwargs["phi_interm"]
    prior_interm = kwargs["prior_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)
    batch_size = x.get_shape().as_list()[0]

    x_t = fun(x,
              nout=x_transformed,
              act=tf.nn.relu,
              name="x_transformed",
              weight_factor=weight_factor,
              layers_num=layers_num)

    prior = fun(h,
                nout=prior_interm,
                act=tf.nn.relu,
                name="prior",
                weight_factor=weight_factor,
                layers_num=layers_num)
    prior_mu = fun(prior,
                   nout=z_dim,
                   act=tf.identity,
                   name="prior_mu",
                   weight_factor=weight_factor)
    prior_sigma = fun(prior,
                      nout=z_dim,
                      act=tf.nn.softplus,
                      name="prior_sigma",
                      weight_factor=weight_factor)

    phi = fun(x_t,
              h,
              nout=phi_interm,
              act=tf.nn.relu,
              name="phi",
              weight_factor=weight_factor,
              layers_num=layers_num)
    z_mu = fun(phi,
               nout=z_dim,
               act=tf.identity,
               name="z_mu",
               weight_factor=weight_factor)
    z_sigma = fun(phi,
                  nout=z_dim,
                  act=tf.nn.softplus,
                  name="z_sigma",
                  weight_factor=weight_factor)

    epsilon = tf.random_normal((batch_size, z_dim), name='epsilon')

    z = tf.cond(generator, lambda: prior_mu + tf.exp(prior_sigma) * epsilon,
                lambda: z_mu + tf.exp(z_sigma) * epsilon)

    return z, z_mu, z_sigma, prior_mu, prior_sigma, x_t
예제 #12
0
    def generate(self, *conditionals, **kwargs):
        reuse = kwargs.get("reuse", False)
        conditionals = list(reversed(list(conditionals)))
        config = kwargs["config"]
        
        logging.info(
            "\t{}: Generating mu and sigma, conditioning on: {}".format(
            self._name, 
            ", ".join(["{}".format(c.name) for c in conditionals])
        ))
        
        x_t = fun(*conditionals, nout = config.x_interm, act = tf.nn.elu, name = "encode_to_{}".format(self._name), layers_num=config.layers_num, config=config, reuse=reuse)
        
        mean = fun(x_t, nout = self._dim, act = tf.identity, name = "{}_mean".format(self._name), reuse = reuse, config = config, layers_num=1)
        logvar = fun(x_t, nout = self._dim, act = tf.nn.softplus, name = "{}_logvar".format(self._name), reuse = reuse, config = config, layers_num=1)

        N0 = tf.random_normal((self._batch_size, self._dim))
        stddev = tf.exp(0.5 * logvar)
        
        return (
            DistrOutput(tf.add(mean, stddev * N0, name=self._name), ds.Normal(mean, stddev)),
        )
예제 #13
0
    def generate(self, *conditionals, **kwargs):
        reuse = kwargs.get("reuse", False)
        conditionals = list(reversed(list(conditionals)))
        config = kwargs["config"]

        logging.info(
            "\t{}: Generating mu and sigma, conditioning on: {}".format(
                self._name,
                ", ".join(["{}".format(c.name) for c in conditionals])))

        x_t = fun(*conditionals,
                  nout=config.x_interm,
                  act=tf.nn.elu,
                  name="encode_to_{}".format(self._name),
                  layers_num=config.layers_num,
                  config=config,
                  reuse=reuse)

        mean = fun(x_t,
                   nout=self._dim,
                   act=tf.identity,
                   name="{}_mean".format(self._name),
                   reuse=reuse,
                   config=config,
                   layers_num=1)
        logvar = fun(x_t,
                     nout=self._dim,
                     act=tf.nn.softplus,
                     name="{}_logvar".format(self._name),
                     reuse=reuse,
                     config=config,
                     layers_num=1)

        N0 = tf.random_normal((self._batch_size, self._dim))
        stddev = tf.exp(0.5 * logvar)

        return (DistrOutput(tf.add(mean, stddev * N0, name=self._name),
                            ds.Normal(mean, stddev)), )
예제 #14
0
def decode(z, **kwargs):
    z_interm = kwargs["z_interm"]
    n_mix = kwargs["n_mix"]
    out_interm = kwargs["out_interm"]
    weight_factor = kwargs.get("weight_factor", 1.0)
    layers_num = kwargs.get("layers_num", 1)

    z_t = fun(z,
              nout=z_interm,
              act=tf.nn.relu,
              name="z_transformed",
              weight_factor=weight_factor,
              layers_num=layers_num)

    output_t = fun(z_t,
                   nout=out_interm,
                   act=tf.nn.relu,
                   name="out_transform",
                   weight_factor=weight_factor,
                   layers_num=layers_num)
    post_mu = fun(output_t,
                  nout=n_mix,
                  act=tf.identity,
                  name="out_mu",
                  weight_factor=weight_factor)
    post_sigma = fun(output_t,
                     nout=n_mix,
                     act=tf.nn.softplus,
                     name="out_sigma",
                     weight_factor=weight_factor)
    post_alpha = fun(output_t,
                     nout=n_mix,
                     act=tf.nn.softmax,
                     name="out_alpha",
                     weight_factor=weight_factor)
    return post_mu, post_sigma, post_alpha, z_t
    def __call__(self, x, h, scope=None):
        batch_size = x.get_shape().as_list()[0]

        n_out = self.config["n_out"]
        x_transformed = self.config["x_transformed"]
        weight_factor = self.config.get("weight_factor", 1.0)
        layers_num = self.config.get("layers_num", 1)

        z, z_mu, z_sigma, prior_mu, prior_sigma, x_t = encode(
            x, h, self._generator, **self.config)
        post_logit, z_t = decode(z, **self.config)

        epsilon_gen = tf.random_uniform((batch_size, n_out),
                                        name='epsilon_gen')

        x_sampled = tf.cast(tf.less(epsilon_gen, tf.nn.sigmoid(post_logit)),
                            post_logit.dtype)

        x_t_gen = fun(x_sampled,
                      nout=x_transformed,
                      act=tf.nn.elu,
                      name="x_transformed",
                      weight_factor=weight_factor,
                      layers_num=layers_num,
                      reuse=True)

        # x_c = tf.cond(
        #     self._generator,
        #     lambda: tf.concat_v2([x_t_gen, z_t], 1),
        #     lambda: tf.concat_v2([x_t, z_t], 1)
        # )
        x_c = tf.cond(
            self._generator,
            lambda: x_t_gen,
            lambda: x_t,
        )
        # x_c = tf.concat_v2([x_t, z_t], 1)
        _, new_h = self._base_cell(x_c, h)

        return VAEOutputTuple(prior_mu, prior_sigma, z_mu, z_sigma,
                              post_logit), new_h
예제 #16
0
    def __call__(self, x, h, scope=None):
        batch_size = x.get_shape().as_list()[0]

        n_mix = self.config["n_mix"]
        x_transformed = self.config["x_transformed"]
        weight_factor = self.config.get("weight_factor", 1.0)
        layers_num = self.config.get("layers_num", 1)

        z, z_mu, z_sigma, prior_mu, prior_sigma, x_t = encode(
            x, h, self._generator, **self.config)
        post_mu, post_sigma, post_alpha, z_t = decode(z, **self.config)

        epsilon_gen = tf.random_normal((batch_size, n_mix), name='epsilon_gen')
        x_sampled = tf.reduce_sum(post_alpha *
                                  (post_mu + epsilon_gen * post_sigma),
                                  1,
                                  keep_dims=True)

        x_t_gen = fun(x_sampled,
                      nout=x_transformed,
                      act=tf.nn.relu,
                      name="x_transformed",
                      weight_factor=weight_factor,
                      layers_num=layers_num,
                      reuse=True)

        # x_c = tf.cond(
        #     self._generator,
        #     lambda: tf.concat_v2([x_t_gen, z_t], 1),
        #     lambda: tf.concat_v2([x_t, z_t], 1)
        # )
        x_c = tf.cond(
            self._generator,
            lambda: x_t_gen,
            lambda: x_t,
        )
        # x_c = tf.concat_v2([x_t, z_t], 1)
        _, new_h = self._base_cell(x_c, h)

        return VAEOutputTuple(prior_mu, prior_sigma, z_mu, z_sigma, post_mu,
                              post_sigma, post_alpha), new_h
예제 #17
0
    def __call__(self, x, h, scope=None):
        batch_size = x.get_shape().as_list()[0]
    
        n_mix = self.config["n_mix"]
        x_transformed = self.config["x_transformed"]
        weight_factor = self.config.get("weight_factor", 1.0)
        layers_num = self.config.get("layers_num", 1)

        z, z_mu, z_sigma, prior_mu, prior_sigma, x_t = encode(x, h, self._generator, **self.config)
        post_mu, post_sigma, post_alpha, z_t = decode(z, **self.config)

        epsilon_gen = tf.random_normal((batch_size, n_mix), name='epsilon_gen')
        x_sampled = tf.reduce_sum(post_alpha*(post_mu + epsilon_gen * post_sigma), 1, keep_dims=True)

        x_t_gen = fun(x_sampled, nout = x_transformed, act = tf.nn.relu, name = "x_transformed",
            weight_factor = weight_factor, layers_num = layers_num, reuse=True
        )

        # x_c = tf.cond(
        #     self._generator,
        #     lambda: tf.concat_v2([x_t_gen, z_t], 1),
        #     lambda: tf.concat_v2([x_t, z_t], 1)
        # )
        x_c = tf.cond(
            self._generator,
            lambda: x_t_gen,
            lambda: x_t,
        )
        # x_c = tf.concat_v2([x_t, z_t], 1)
        _, new_h = self._base_cell(x_c, h)

        return VAEOutputTuple(
            prior_mu,
            prior_sigma,
            z_mu,
            z_sigma,
            post_mu, 
            post_sigma, 
            post_alpha), new_h
    def __call__(self, x, h, scope=None):
        batch_size = x.get_shape().as_list()[0]
    
        n_out = self.config["n_out"]
        x_transformed = self.config["x_transformed"]
        weight_factor = self.config.get("weight_factor", 1.0)
        layers_num = self.config.get("layers_num", 1)

        z, z_mu, z_sigma, prior_mu, prior_sigma, x_t = encode(x, h, self._generator, **self.config)
        post_logit, z_t = decode(z, **self.config)
        
        epsilon_gen = tf.random_uniform((batch_size, n_out), name='epsilon_gen')
        
        x_sampled = tf.cast(tf.less(epsilon_gen, tf.nn.sigmoid(post_logit)), post_logit.dtype)
        
        x_t_gen = fun(x_sampled, nout = x_transformed, act = tf.nn.elu, name = "x_transformed",
            weight_factor = weight_factor, layers_num = layers_num, reuse=True
        )

        # x_c = tf.cond(
        #     self._generator,
        #     lambda: tf.concat_v2([x_t_gen, z_t], 1),
        #     lambda: tf.concat_v2([x_t, z_t], 1)
        # )
        x_c = tf.cond(
            self._generator,
            lambda: x_t_gen,
            lambda: x_t,
        )
        # x_c = tf.concat_v2([x_t, z_t], 1)
        _, new_h = self._base_cell(x_c, h)

        return VAEOutputTuple(
            prior_mu,
            prior_sigma,
            z_mu,
            z_sigma,
            post_logit), new_h
예제 #19
0
def function():
    response = {'str': util.fun()}
    return response
예제 #20
0
    def discriminate(self, x, *latent, **kwargs):
        reuse = kwargs.get("reuse", False)

        a_t = fun(x, *latent, nout = self._config.a_interm, act = tf.nn.elu, name = "discriminator_transformed", layers_num=self._config.layers_num, config=self._config, reuse=reuse)
        a_out = fun(a_t, nout = self._config.output_dim, act = tf.identity, name = "discriminator_out", reuse=reuse, layers_num = 1, config=self._config)
        return a_out
예제 #21
0
 def decode(self, *z):
     z_t = fun(*z, nout = self._config.z_interm, act = tf.nn.elu, name = "decode_z_transformed", layers_num=self._config.layers_num, config=self._config)
     post_mu = fun(z_t, nout = self._config.output_dim, act = tf.identity, name = "decode_out_mu", layers_num = 1, config=self._config)
     return post_mu
예제 #22
0
    def __call__(self, x, h, scope=None):
        x_t = fun(x, nout = x_transformed, act = tf.nn.tanh, name = "x_transformed", weight_factor = weight_factor, layers_num = layers_num)

        # prior, depends only on state

        prior_t = fun(h, nout = prior_interm, act = tf.nn.tanh, name = "prior", weight_factor = weight_factor, layers_num = layers_num)
        prior_mu_t = fun(prior_t, nout = z_dim, act = tf.identity, name = "prior_mu", weight_factor = weight_factor)
        prior_sigma_t = fun(prior_t, nout = z_dim, act = tf.nn.softplus, name = "prior_sigma", weight_factor = weight_factor)

        # phi
        phi_t = fun(x_t, h, nout = phi_interm, act = tf.nn.tanh, name = "phi", weight_factor = weight_factor, layers_num = layers_num)
        phi_mu_t = fun(phi_t, nout = z_dim, act = tf.identity, name = "phi_mu", weight_factor = weight_factor)
        phi_sigma_t = fun(phi_t, nout = z_dim, act = tf.nn.softplus, name = "phi_sigma", weight_factor = weight_factor)

        # z generating
        epsilon = tf.random_normal(tf.shape(z_dim), name='epsilon')

        if not self._generator:
            z = phi_mu_t + phi_sigma_t * epsilon
        else:
            z = prior_mu_t + prior_sigma_t * epsilon

        z_t = fun(z, nout = z_interm, act = tf.nn.tanh, name = "z_transformed", weight_factor = weight_factor, layers_num = layers_num)

        output_t = fun(z_t, h, nout = out_interm, act = tf.nn.tanh, name = "out_transform", weight_factor = weight_factor, layers_num = layers_num)
        output_mu = fun(output_t, nout = 1, act =tf.identity, name = "out_mu", weight_factor = weight_factor)
        # output_sigma = fun(out_t, nout = 1, act = tf.nn.softplus, name = "out_sigma", weight_factor = weight_factor)

        if not self._generator:
            x_c = tf.concat_v2([x_t, z_t], 1)
        else:
            x_t_gen = fun(output_mu, nout = x_transformed, act = tf.nn.tanh, name = "x_transformed", weight_factor = weight_factor, layers_num = layers_num)
            x_c = tf.concat_v2([x_t_gen, z_t], 1)

        _, new_h = self._base_cell(x_c, h)

        return VAEOutputTuple(
            prior_mu_t,
            prior_sigma_t,
            phi_mu_t,
            phi_sigma_t,
            z,
            z_t,
            output_mu), new_h