Example #1
0
 def _image_summaries(self, data, embed, image_pred):
     summary_size = 6  # nr images to be shown
     summary_length = 5  # nr step observed before dreaming
     if self._c.obs_type in ['image', 'lidar']:
         truth = data[self._c.obs_type][:summary_size] + 0.5
         recon = image_pred.mode()[:summary_size]
         init, _ = self._dynamics.observe(embed[:summary_size, :summary_length],
                                          data['action'][:summary_size, :summary_length])
         init = {k: v[:, -1] for k, v in init.items()}
         prior = self._dynamics.imagine(data['action'][:summary_size, summary_length:], init)
         openl = self._decode(self._dynamics.get_feat(prior)).mode()
         model = tf.concat([recon[:, :summary_length] + 0.5, openl + 0.5], 1)
         if self._c.obs_type == "lidar":
             truth = tools.lidar_to_image(truth)
             model = tools.lidar_to_image(model)
             error = model - truth
         else:
             error = (model - truth + 1) / 2
         openl = tf.concat([truth, model, error], 2)
     elif self._c.obs_type == 'lidar_occupancy':
         truth = data[self._c.obs_type][:summary_size]
         recon = image_pred.mode()[:summary_size]
         recon = tf.cast(recon, tf.float32)  # concatenation requires same type
         init, _ = self._dynamics.observe(embed[:summary_size, :summary_length],
                                          data['action'][:summary_size, :summary_length])
         init = {k: v[:, -1] for k, v in init.items()}
         prior = self._dynamics.imagine(data['action'][:summary_size, summary_length:], init)
         openl = self._decode(self._dynamics.get_feat(prior)).mode()
         openl = tf.cast(openl, tf.float32)
         model = tf.concat([recon[:, :summary_length], openl],
                           1)  # note: recon/openl is already 0 or 1, no need scaling
         error = (model - truth + 1) / 2
         openl = tf.concat([truth, model, error], 2)
     tools.graph_summary(self._writer, tools.video_summary,
                         'agent/train/autoencoder', openl, self._step, int(100 / self._c.action_repeat))
Example #2
0
 def _reward_summaries(self, data, reward_pred):
     summary_size = 6  # nr images to be shown
     truth = tools.reward_to_image(data['reward'][:summary_size])
     model = tools.reward_to_image(reward_pred.mode()[:summary_size])
     error = model - truth
     video_image = tf.concat([truth, model, error], 1)  # note: no T dimension, then stack over dim 1
     video_image = tf.expand_dims(video_image, axis=1)  # since no gif, expand dim=1 (T), B,H,W,C -> B,T,H,W,C
     tools.graph_summary(self._writer, tools.video_summary,
                         'agent/train/reward', video_image, self._step, int(100 / self._c.action_repeat))
Example #3
0
 def _image_summaries(self, data, embed, image_pred):
     truth = data["image"][:6] + 0.5
     recon = image_pred.mode()[:6]
     init, _ = self._dynamics.observe(embed[:6, :5], data["action"][:6, :5])
     init = {k: v[:, -1] for k, v in init.items()}
     prior = self._dynamics.imagine(data["action"][:6, 5:], init)
     openl = self._decode(self._dynamics.get_feat(prior)).mode()
     model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1)
     error = (model - truth + 1) / 2
     openl = tf.concat([truth, model, error], 2)
     tools.graph_summary(self._writer, tools.video_summary, "agent/openl", openl)
Example #4
0
    def _image_summaries(self, data, embed, image_pred):
        # print("data['obs']:", data["obs"].shape) #  (50, 10, 64, 64, 1)

        truth = data["obp1s"][:6] + 0.5
        recon = image_pred.mode()[:6]
        init, _ = self.dynamics.observe(
            embed[:6, :5],
            data["actions"][:6, :5])  # scaning st-1 to get post of st
        init = {k: v[:, -1] for k, v in init.items()}
        prior = self.dynamics.imagine(data["actions"][:6, 5:], init)  # scaning
        openl = self.decoder(self.dynamics.get_feat(prior)).mode()
        model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1)
        error = (model - truth + 1) / 2
        openl = tf.concat([truth, model, error], 2)
        tools.graph_summary(self._writer, tools.video_summary, "agent/openl",
                            openl)