Example #1
0
 def test_combine_mean_std(self):
     """Test that Transpose can be invoked."""
     dim = 2
     batch_size = 10
     mean_tensor = np.random.rand(dim)
     std_tensor = np.random.rand(1, )
     with self.session() as sess:
         mean_tensor = tf.convert_to_tensor(mean_tensor, dtype=tf.float32)
         std_tensor = tf.convert_to_tensor(std_tensor, dtype=tf.float32)
         out_tensor = CombineMeanStd()(mean_tensor, std_tensor)
         out_tensor = out_tensor.eval()
         assert out_tensor.shape == (dim, )
Example #2
0
 def test_combine_mean_std(self):
   """Test that Transpose can be invoked."""
   dim = 2
   batch_size = 10
   mean_tensor = np.random.rand(dim)
   std_tensor = np.random.rand(1,)
   with self.session() as sess:
     mean_tensor = tf.convert_to_tensor(mean_tensor, dtype=tf.float32)
     std_tensor = tf.convert_to_tensor(std_tensor, dtype=tf.float32)
     out_tensor = CombineMeanStd()(mean_tensor, std_tensor)
     out_tensor = out_tensor.eval()
     assert out_tensor.shape == (dim,)
Example #3
0
def test_CombineMeanStd_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = CombineMeanStd(in_layers=[feature, feature])
  tg.add_output(layer)
  tg.set_loss(layer)
  tg.build()
  tg.save()
Example #4
0
    def build_graph(self):
        print("building")
        features = Feature(shape=(None, self.n_features))
        last_layer = features
        for layer_size in self.encoder_layers:
            last_layer = Dense(in_layers=last_layer,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.mean = Dense(in_layers=last_layer,
                          activation_fn=None,
                          out_channels=1)
        self.std = Dense(in_layers=last_layer,
                         activation_fn=None,
                         out_channels=1)

        readout = CombineMeanStd([self.mean, self.std], training_only=True)
        last_layer = readout
        for layer_size in self.decoder_layers:
            last_layer = Dense(in_layers=readout,
                               activation_fn=tf.nn.elu,
                               out_channels=layer_size)

        self.reconstruction = Dense(in_layers=last_layer,
                                    activation_fn=None,
                                    out_channels=self.n_features)
        weights = Weights(shape=(None, self.n_features))
        reproduction_loss = L2Loss(
            in_layers=[features, self.reconstruction, weights])
        reproduction_loss = ReduceSum(in_layers=reproduction_loss, axis=0)
        global_step = TensorWrapper(self._get_tf("GlobalStep"))
        kl_loss = KLDivergenceLoss(
            in_layers=[self.mean, self.std, global_step],
            annealing_start_step=self.kl_annealing_start_step,
            annealing_stop_step=self.kl_annealing_stop_step)
        loss = Add(in_layers=[kl_loss, reproduction_loss], weights=[0.5, 1])

        self.add_output(self.mean)
        self.add_output(self.reconstruction)
        self.set_loss(loss)