Пример #1
0
data = tf.sg_data.Mnist(batch_size=batch_size)

# input images and label
x = data.train.image
y = data.train.label

# labels for discriminator
y_real = tf.ones(batch_size)
y_fake = tf.zeros(batch_size)

# discriminator labels ( half 1s, half 0s )
y_disc = tf.concat(0, [y, y * 0])

# categorical latent variable
z_cat = tf.multinomial(
    tf.ones((batch_size, cat_dim), dtype=tf.sg_floatx) / cat_dim,
    1).sg_squeeze().sg_int()
# continuous latent variable
z_con = tf.random_normal((batch_size, con_dim))
# random latent variable dimension
z_rand = tf.random_normal((batch_size, rand_dim))
# latent variable
z = tf.concat(1, [z_cat.sg_one_hot(depth=cat_dim), z_con, z_rand])

#
# Computational graph
#

# generator
gen = generator(z)
Пример #2
0

# MNIST input tensor ( with QueueRunner )
data = Mnist(batch_size=batch_size)
# input images
x = data.train.image
# generator labels ( all ones )
y = tf.ones(batch_size, dtype=tf.sg_floatx)
# discriminator labels ( half 1s, half 0s )
y_disc = tf.concat([y, y * 0], 0)
#
# create generator
#
# random class number
z_cat = tf.multinomial(
    tf.ones((batch_size, num_category), dtype=tf.sg_floatx) / num_category,
    1).sg_squeeze().sg_int()
# random seed = random categorical variable + random uniform
z = z_cat.sg_one_hot(depth=num_category).sg_concat(
    target=tf.random_uniform((batch_size, num_dim - num_category)))
# random continuous variable
z_cont = z[:, num_category:num_category + num_cont]
# category label
label = tf.concat([data.train.label, z_cat], 0)
# generator network
with tf.sg_context(name='generator', size=4, stride=2, act='relu', bn=True):
    # gen = (z.sg_dense(dim=1024)
    #        .sg_dense(dim=7*7*128)
    #        .sg_reshape(shape=(-1, 7, 7, 128))
    #        .sg_upconv(dim=64)
    #        .sg_upconv(dim=1, act='sigmoid', bn=False))
Пример #3
0
data = TimeSeriesData(batch_size=batch_size)
x = data.X

# generator labels ( all ones )
y = tf.ones(batch_size, dtype=tf.sg_floatx)

# discriminator labels ( half 1s, half 0s )
y_disc = tf.concat(0, [y, y * 0])


#
# create generator
#

# random class number
z_cat = tf.multinomial(tf.ones((batch_size, num_category), dtype=tf.sg_floatx) / num_category, 1).sg_squeeze()

# random seed = random categorical variable + random uniform
z = z_cat.sg_one_hot(depth=num_category).sg_concat(target=tf.random_uniform((batch_size, num_dim-num_category)))

# random continuous variable
z_cont = z[:, num_category:num_category+num_cont]

# generator network
with tf.sg_context(name='generator', size=(4, 1), stride=(2, 1), act='relu', bn=True):
    gen = (z.sg_dense(dim=1024)
           .sg_dense(dim=48*1*128)
           .sg_reshape(shape=(-1, 48, 1, 128))
           .sg_upconv(dim=64)
           .sg_upconv(dim=32)
           .sg_upconv(dim=2, act='sigmoid', bn=False))
Пример #4
0
    def __init__(self,
                 x,
                 y,
                 num_batch,
                 vocab_size,
                 emb_dim,
                 hidden_dim,
                 max_ep=240,
                 infer_shape=(1, 1),
                 mode="train"):

        self.num_batch = num_batch
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.vocab_size = vocab_size
        self.max_len_infer = 512
        self.max_ep = max_ep

        # reuse = len([t for t in tf.global_variables() if t.name.startswith('gen')]) > 0
        reuse = (mode == 'infer')

        if mode == "train":
            self.x = x
            self.y = y
        elif mode == "infer":
            self.x = tf.placeholder(tf.int32, shape=infer_shape)
            self.y = tf.placeholder(tf.int32, shape=infer_shape)

        with tf.variable_scope("gen_embs", reuse=reuse):
            self.emb_x = tf.get_variable("emb_x",
                                         [self.vocab_size, self.emb_dim])
            self.emb_y = tf.get_variable("emb_y",
                                         [self.vocab_size, self.emb_dim])
            self.X = tf.nn.embedding_lookup(self.emb_x, self.x)
            self.Y = tf.nn.embedding_lookup(self.emb_y, self.y)

        with tf.sg_context(name='gen', reuse=reuse):
            #     self.emb_x = tf.Variable(tf.random_uniform([self.vocab_size, self.emb_dim], 0.0, 1.0), name="emb_x")
            #     self.emb_y = tf.Variable(tf.random_uniform([self.vocab_size, self.emb_dim], 0.0, 1.0), name="emb_y")
            # self.emb_x = tf.sg_emb(name='emb_x', voca_size=self.vocab_size, dim=self.emb_dim)  # (68,16)
            # self.emb_y = tf.sg_emb(name='emb_y', voca_size=self.vocab_size, dim=self.emb_dim)  # (68,16)
            # self.X = self.x.sg_lookup(emb=self.emb_x)  # (8,63,16)
            # self.Y = self.y.sg_lookup(emb=self.emb_y)  # (8,63,16)

            if mode == "train":
                self.lstm_layer = self.X.sg_lstm(in_dim=self.emb_dim,
                                                 dim=self.vocab_size,
                                                 name="lstm")  # (8, 63, 68)
                self.test = self.lstm_layer.sg_softmax(name="testtt")

                print "mazum??"
                print self.test

            elif mode == "infer":
                self.lstm_layer = self.X.sg_lstm(in_dim=self.emb_dim,
                                                 dim=self.vocab_size,
                                                 last_only=True,
                                                 name="lstm")
                self.log_prob = tf.log(self.lstm_layer)

                # next_token: select by distribution probability, preds: select by argmax

                self.multinormed = tf.multinomial(self.log_prob, 1)
                self.next_token = tf.cast(
                    tf.reshape(tf.multinomial(self.log_prob, 1),
                               [1, infer_shape[0]]), tf.int32)
                self.preds = self.lstm_layer.sg_argmax()

        if mode == "train":
            self.loss = self.lstm_layer.sg_ce(target=self.y)
            self.istarget = tf.not_equal(self.y, 0).sg_float()

            self.reduced_loss = (self.loss.sg_sum()) / (
                self.istarget.sg_sum() + 0.0000001)
            tf.sg_summary_loss(self.reduced_loss, "reduced_loss")