def encoder(self, Xs, name='encoder'): with tf.variable_scope(name): stack = Stacker(Xs) stack.flatten() stack.linear_block(512, relu) stack.linear_block(256, relu) stack.linear_block(128, relu) stack.linear_block(self.code_size, relu) return stack.last_layer
def encoder(self, Xs, net_shapes, reuse=False, name='encoder'): with tf.variable_scope(name, reuse=reuse): stack = Stacker(Xs) stack.flatten() for shape in net_shapes: stack.linear_block(shape, relu) stack.linear_block(self.latent_code_size, relu) return stack.last_layer
def encoder(self, Xs, reuse=False, name='encoder'): with tf.variable_scope(name, reuse=reuse): stack = Stacker(Xs) stack.flatten() stack.linear_block(512, relu) stack.linear_block(256, relu) stack.linear_block(128, relu) stack.linear_block(self.z_size + self.Y_size, relu) zs = stack.last_layer[:, :self.z_size] Ys_gen = stack.last_layer[:, self.z_size:] hs = softmax(Ys_gen) return zs, Ys_gen, hs
def encoder(self, Xs, name='encoder'): with tf.variable_scope(name): stack = Stacker(Xs) stack.flatten() stack.linear_block(512, relu) stack.linear_block(256, relu) stack.linear_block(128, relu) stack.linear_block(self.z_size * 2, relu) h = stack.last_layer mean = h[:, :self.z_size] std = tf.nn.softplus(h[:, self.z_size:]) return mean, std