Beispiel #1
0
    def __init__(self, logger, learning_rate, input_dim, z_dim, ae_h_dims, *args, **kwargs):
        self.scope = "AE"

        self.logger = logger
        self.learning_rate = learning_rate

        self.input_dim = input_dim
        self.z_dim = z_dim
        self.enc_layer_dims = [input_dim, *ae_h_dims, z_dim]
        self.dec_layer_dims = [z_dim, *list(reversed(ae_h_dims)), input_dim] #todo : just reverse enc_layer_dims

        self.logger.info("[*] Building AE model")

        with tf.variable_scope(self.scope):
            self.input = tf.placeholder(tf.float32, [None, self.input_dim])

            enc = Encoder(self.enc_layer_dims)
            dec = Decoder(self.dec_layer_dims)

            z_layer = enc.encode(self.input)
            # todo : how to handle output?
            _, _, self.output = dec.decode(z_layer)

            # todo: refactoring get theta method --> get solver?
            enc_theta = enc.get_theta()
            dec_theta = dec.get_theta()
            self.theta = [*enc_theta, *dec_theta]

            #l2_loss = enc.get_l2_loss() +
            self.recon_loss = tf.reduce_mean(tf.square(self.input-self.output))
            self.solver = tf.train.AdamOptimizer(self.learning_rate).minimize(self.recon_loss, var_list=theta)
Beispiel #2
0
    def test_encoder_output_shape(self):
        with self.test_session():

            input_dim = 100
            output_dim = 10
            batch_size = 512

            layer_dims = [input_dim, 50, output_dim]

            X = tf.constant(np.random.random(batch_size * input_dim),
                            shape=[batch_size, input_dim],
                            dtype=tf.float32)
            enc = Encoder(layer_dims)

            self.assertShapeEqual(
                np.reshape(np.zeros(batch_size * output_dim),
                           [batch_size, output_dim]), enc.encode(X))