Ejemplo n.º 1
0
def encoder(x, y, phase, scope='enc', reuse=None, internal_update=False):
    with tf.variable_scope(scope, reuse=reuse):
        with arg_scope([conv2d, dense], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([batch_norm], internal_update=internal_update):

            # Ignore y
            x = conv2d(x, 64, 3, 2)
            x = conv2d(x, 128, 3, 2)
            x = conv2d(x, 256, 3, 2)
            x = dense(x, 1024)

            # Autoregression (4 steps)
            ms = []
            vs = []
            zs = [x]

            for i in xrange(5):
                h = tf.concat(zs, axis=-1)
                h = dense(h, 100)
                m = dense(h, 20, activation=None)
                v = dense(h, 20, activation=tf.nn.softplus) + 1e-5
                z = gaussian_sample(m, v)
                ms += [m]
                vs += [v]
                zs += [z]

            m = tf.concat(ms, 1)
            v = tf.concat(vs, 1)
            z = tf.concat(zs[1:], 1)

    return z, (m, v)
Ejemplo n.º 2
0
    def fit(self, x_input, epochs = 1000, learning_rate = 0.001, batch_size = 100, print_size = 50, train=True):
        # training setting
        self.DO_SHARE = False
        self.epochs = epochs
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.print_size = print_size

        self.g = tf.Graph()
        # inference process
        x_ = placeholder((None, self.input_dim))
        x = x_
        depth_inf = len(self.encoding_dims)
        for i in range(depth_inf):
            x = dense(x, self.encoding_dims[i], scope="enc_layer"+"%s" %i, activation=tf.nn.sigmoid)
        h_encode = x
        z_mu = dense(h_encode, self.z_dim, scope="mu_layer")
        z_log_sigma_sq = dense(h_encode, self.z_dim, scope = "sigma_layer")
        e = tf.random_normal(tf.shape(z_mu))
        z = z_mu + tf.sqrt(tf.maximum(tf.exp(z_log_sigma_sq), self.eps)) * e

        # generative process
        if self.useTranse == False:
            depth_gen = len(self.decoding_dims)

            for i in range(depth_gen):
                y = dense(z, self.decoding_dims[i], scope="dec_layer"+"%s" %i, activation=tf.nn.sigmoid)
                # if last_layer_nonelinear: depth_gen -1

        else:
            depth_gen = depth_inf
            ## haven't finnished yet...

        x_recons = y

        if self.loss == "cross_entropy":
            loss_recons = tf.reduce_mean(tf.reduce_sum(binary_crossentropy(x_recons, x_, self.eps), axis=1))
            loss_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_mu) + tf.exp(z_log_sigma_sq) - z_log_sigma_sq - 1, 1))
            # loss_kl = 0.5 * tf.reduce_mean(tf.reduce_sum(tf.square(z_mu) + tf.exp(z_log_sigma_sq) - z_log_sigma_sq - 1, 1))
            loss = loss_recons + loss_kl
        # other cases not finished yet
        train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)

        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        ckpt_dir = "pre_model/" + "vae.ckpt"
        if train == True:
            # num_turn = x_input.shape[0] / self.batch_size
            for i in range(epochs):
                idx = np.random.choice(x_input.shape[0], batch_size, replace=False)
                x_batch = x_input[idx]
                _, l = sess.run((train_op, loss), feed_dict={x_:x_batch})
                if i % self.print_size == 0:
                    print "{:>10s}{:>10s}".format("epoces","loss")
                    print "{:10.2e}{:10.2e}".format(i, l)
            saver.save(sess, ckpt_dir)
        else:
            saver.restore(sess, ckpt_dir)
def feature_discriminator(x, phase, C=1, reuse=None):
    with tf.variable_scope('disc/feat', reuse=reuse):
        with arg_scope([dense], activation=tf.nn.relu):  # Switch to leaky?

            x = dense(x, 100)
            x = dense(x, C, activation=None)

    return x
Ejemplo n.º 4
0
def feature_discriminator(x, phase, C=1, reuse=None):
    with tf.variable_scope('disc/feat', reuse=reuse):
        with arg_scope([dense], activation=tf.nn.relu): # Switch to leaky?

            x = dense(x, 100)
            x = dense(x, C, activation=None)

    return x
Ejemplo n.º 5
0
def classifier(x, phase, reuse=None):
    with tf.variable_scope('class', reuse=reuse):
        with arg_scope([conv2d, dense],
                       bn=True,
                       phase=phase,
                       activation=tf.nn.relu):
            for i in range(4):
                x = conv2d(x, 64 + 64 * i, 3, 2)
                x = conv2d(x, 64 + 64 * i, 3, 1)

            x = dense(x, 500)
            x = dense(x, 10, activation=None)

    return x
def encoder(x, y, phase, reuse=None):
    with tf.variable_scope('enc', reuse=reuse):
        with arg_scope([conv2d, dense], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([noise], phase=phase):

            # Ignore y
            x = conv2d(x, 64, 3, 2, bn=False)
            x = conv2d(x, 128, 3, 2)
            x = conv2d(x, 256, 3, 2)
            x = dense(x, 1024)

            m = dense(x, 100, activation=None)
            v = dense(x, 100, activation=tf.nn.softplus) + 1e-5

            return (m, v)
Ejemplo n.º 7
0
def discriminator(x, phase, reuse=None, depth=1):
    with tf.variable_scope('disc', reuse=reuse):
        with arg_scope([conv2d, dense], bn=True, phase=phase, activation=lrelu), \
             arg_scope([noise], phase=phase):

            x = dropout(x, rate=0.2, training=phase)
            x = conv2d(x, 64, 3, 2, bn=False)
            x = dropout(x, training=phase)
            x = conv2d(x, 128, 3, 2)
            x = dropout(x, training=phase)
            x = conv2d(x, 256, 3, 2)
            x = dropout(x, training=phase)
            x = dense(x, 1024)
            x = dense(x, depth, activation=None, bn=False)
    return x
Ejemplo n.º 8
0
def generator(x, phase, reuse=None):
    with tf.variable_scope('gen', reuse=reuse):
        with arg_scope([dense], bn=True, phase=phase, activation=tf.nn.relu), \
             arg_scope([conv2d_transpose], bn=True, phase=phase, activation=tf.nn.relu):

            if args.dense:
                raise NotImplementedError('yep')
                # Level 0: 1
                l0 = tf.reshape(x, [-1, 1, 1, 128])

                # Level 1: 1 -> 4, 8, 16
                a1 = conv2d_transpose(l0, 64, 1, 1)
                a2 = conv2d_transpose(l0, 64, 1, 1)
                a3 = conv2d_transpose(l0, 64, 1, 1)
                l1 = a1

                # Level 2: 4 -> 8, 16
                b2 = conv2d_transpose(l1, 64, )

                # Level 2: 8 -> 16, 32
                b2 = conv2d_transpose(l1, 64, 3, 2)
                l2 = tf.concat([a2, b2], -1)

                # Level 3: 16 -> 32
                c3 = conv2d_transpose(l2, 64, 3, 2)
                l3 = tf.concat([a3, b3, c3], -1)

            else:
                x = dense(x, 4 * 4 * 512)
                x = tf.reshape(x, [-1, 4, 4, 512])
                x = conv2d_transpose(x, 256, 5, 2)
                x = conv2d_transpose(x, 128, 5, 2)
                x = conv2d_transpose(x, 1, 5, 2, bn=False, activation=tf.nn.tanh)

    return x
Ejemplo n.º 9
0
def encoder(x, y, phase, scope='enc', reuse=None, internal_update=False):
    with tf.variable_scope(scope, reuse=reuse):
        with arg_scope([conv2d, dense], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([batch_norm], internal_update=internal_update):

            # Ignore y
            x = conv2d(x, 64, 3, 2)
            x = conv2d(x, 128, 3, 2)
            x = conv2d(x, 256, 3, 2)
            x = dense(x, 1024)

            m = dense(x, 100, activation=None)
            v = dense(x, 100, activation=tf.nn.softplus) + 1e-5
            z = gaussian_sample(m, v)

            return z, (m, v)
Ejemplo n.º 10
0
def generator(z, y, phase, scope='gen', reuse=None, internal_update=False):
    with tf.variable_scope(scope, reuse=reuse):
        with arg_scope([dense, conv2d_transpose], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([batch_norm], internal_update=internal_update):

            x = tf.concat([z, y], 1)
            x = dense(x, 4 * 4 * 512)
            x = tf.reshape(x, [-1, 4, 4, 512])
            x = conv2d_transpose(x, 256, 5, 2)
            x = conv2d_transpose(x, 128, 5, 2)
            x = conv2d_transpose(x, 3, 5, 2, bn=False, activation=tf.nn.tanh)

    return x
def encoder(x, phase, reuse=None):
    with tf.variable_scope('enc', reuse=reuse):
        with arg_scope([dense], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([conv2d], bn=True, phase=phase, activation=leaky_relu):

            if x._shape_as_list()[-1] == 1:
                x = tf.image.grayscale_to_rgb(x)

            x = conv2d(x, 64, 3, 2)
            x = conv2d(x, 128, 3, 2)
            x = conv2d(x, 256, 3, 2)
            x = dense(x, 128)

    return x
Ejemplo n.º 12
0
def encoder(x, phase, reuse=None):
    with tf.variable_scope('enc', reuse=reuse):
        with arg_scope([dense], bn=True, phase=phase, activation=leaky_relu), \
             arg_scope([conv2d], bn=True, phase=phase, activation=leaky_relu):

            if x._shape_as_list()[-1] == 1:
                x = tf.image.grayscale_to_rgb(x)

            if args.dense:
                # Level 0: 32
                l0 = x

                # Level 1: 32 -> 16, 8, 4
                a1 = conv2d(l0, 64, 3, 2)
                a2 = conv2d(l0, 64, 5, 4)
                a3 = conv2d(l0, 64, 9, 8)
                l1 = a1

                # Level 2: 16 -> 8, 4
                b2 = conv2d(l1, 64, 3, 2)
                b3 = conv2d(l1, 64, 5, 4)
                l2 = tf.concat([a2, b2], -1)

                # Level 3: 8 -> 4
                c3 = conv2d(l2, 64, 3, 2)
                l3 = tf.concat([a3, b3, c3], -1)

                # Level 4: Dense
                x = dense(l3, 128)

            else:
                x = conv2d(x, 64, 3, 2)
                x = conv2d(x, 128, 3, 2)
                x = conv2d(x, 256, 3, 2)
                x = dense(x, 128)

    return x
Ejemplo n.º 13
0
def generator(x, y, phase, reuse=None):
    with tf.variable_scope('gen', reuse=reuse):
        with arg_scope([dense], bn=True, phase=phase, activation=tf.nn.relu), \
             arg_scope([conv2d_transpose], bn=True, phase=phase, activation=tf.nn.relu):

            if y is not None:
                x = tf.concat([x, y], 1)

            x = dense(x, 4 * 4 * 512)
            x = tf.reshape(x, [-1, 4, 4, 512])
            x = conv2d_transpose(x, 256, 5, 2)
            x = conv2d_transpose(x, 128, 5, 2)
            x = wnconv2d_transpose(x, 3, 5, 2, bn=False, activation=tf.nn.tanh, scale=True)

    return x
def discriminator(x, phase, reuse=None):
    with tf.variable_scope('disc/gan', reuse=reuse):
        with arg_scope([wnconv2d, wndense], activation=leaky_relu):

            x = dropout(x, rate=0.2, training=phase)
            x = wnconv2d(x, 64, 3, 2)

            x = dropout(x, training=phase)
            x = wnconv2d(x, 128, 3, 2)

            x = dropout(x, training=phase)
            x = wnconv2d(x, 256, 3, 2)

            x = dropout(x, training=phase)
            x = wndense(x, 1024)

            x = dense(x, 1, activation=None, bn=False)

    return x
def classify(x, phase, reuse=None):
    z = encoder(x, phase, reuse=reuse)
    with tf.variable_scope('enc/final', reuse=reuse):
        y = dense(z, 10, activation=None, bn=False)
    return y
Ejemplo n.º 16
0
    def e_step(self, x_data):
        print "e_step finetuning"
        tf.reset_default_graph()
        self.x_ = placeholder(
            (None, self.input_dim))  # we need these global nodes
        self.v_ = placeholder((None, self.num_factors))

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        # inference process

        x = self.x_
        depth_inf = len(self.encoding_dims)
        for i in range(depth_inf):
            x = dense(x,
                      self.encoding_dims[i],
                      scope="enc_layer" + "%s" % i,
                      activation=tf.nn.sigmoid)
            # print("enc_layer0/weights:0".graph)
        h_encode = x
        z_mu = dense(h_encode, self.z_dim, scope="mu_layer")
        z_log_sigma_sq = dense(h_encode, self.z_dim, scope="sigma_layer")
        e = tf.random_normal(tf.shape(z_mu))
        z = z_mu + tf.sqrt(tf.maximum(tf.exp(z_log_sigma_sq), self.eps)) * e

        # generative process
        depth_gen = len(self.decoding_dims)
        for i in range(depth_gen):
            y = dense(z,
                      self.decoding_dims[i],
                      scope="dec_layer" + "%s" % i,
                      activation=tf.nn.sigmoid)
            # if last_layer_nonelinear: depth_gen -1

        x_recons = y

        if self.loss_type == "cross_entropy":
            loss_recons = tf.reduce_mean(
                tf.reduce_sum(binary_crossentropy(x_recons, self.x_, self.eps),
                              axis=1))
            loss_kl = 0.5 * tf.reduce_mean(
                tf.reduce_sum(
                    tf.square(z_mu) + tf.exp(z_log_sigma_sq) - z_log_sigma_sq -
                    1, 1))
            loss_v = 1.0 * self.params.lambda_v / self.params.lambda_r * tf.reduce_mean(
                tf.reduce_sum(tf.square(self.v_ - z), 1))
            # reg_loss we don't use reg_loss temporailly
        self.loss_e_step = loss_recons + loss_kl + loss_v
        train_op = tf.train.AdamOptimizer(self.params.learning_rate).minimize(
            self.loss_e_step)

        ckpt_file = "pre_model/" + "vae.ckpt"
        self.saver = tf.train.Saver()
        # if init == True:
        self.saver.restore(self.sess, ckpt_file)
        for i in range(self.params.num_iter):
            idx = np.random.choice(self.num_items,
                                   self.params.batch_size,
                                   replace=False)
            x_batch = x_data[idx]
            v_batch = self.V[idx]
            _, l = self.sess.run((train_op, self.loss_e_step),
                                 feed_dict={
                                     self.x_: x_batch,
                                     self.v_: v_batch
                                 })
            if i % 50 == 0:
                print "{:>10s}{:>10s}".format("epochs", "loss_e_step")
                print "{:>10d}{:>10.2e}".format(i, l)

        self.z_mu = z_mu
        self.x_recons = x_recons
        self.saver.save(self.sess, ckpt_file)
        return None