Пример #1
0
 def __init__(self, temperature=0.25):
     # constants
     self.TEMPERATURE = temperature
     self.SEQLEN = 99
     self.DT = 0.2  # should be the same as data rnn was trained with
     initial_z_path = os.path.expanduser(
         "~/navrep/datasets/M/toy/000_mus_logvars_robotstates_actions_rewards_dones.npz"
     )
     tcn_model_path = os.path.expanduser("~/navrep/models/M/toytcn.json")
     vae_model_path = os.path.expanduser("~/navrep/models/V/toyvae.json")
     # V + M Models
     reset_graph()
     params = sample_hps_params._replace(max_seq_len=self.SEQLEN + 1)
     self.tcn = MDNTCN(params, gpu_mode=False)
     self.vae = ConvVAE(batch_size=1, is_training=False)
     self.vae.load_json(vae_model_path)
     self.tcn.load_json(tcn_model_path)
     # load initial image encoding
     arrays = np.load(initial_z_path)
     # other tools
     self.rings_def = generate_rings(64, 64)
     self.viewer = None
     # environment state variables
     self.reset()
     # hot-start the tcn state
     self.sequence_z = arrays["mus"][:self.SEQLEN].reshape(
         (1, self.SEQLEN, _Z))
     self.sequence_action = arrays["actions"][:self.SEQLEN].reshape(
         (1, self.SEQLEN, 3))
     self.sequence_restart = arrays["dones"][:self.SEQLEN].reshape(
         (1, self.SEQLEN))
Пример #2
0
 def __init__(self, directory, sequence_size,
              file_limit=None,
              channel_first=True, as_torch_tensors=True,
              lidar_mode="rings",
              pre_convert_obs=False,
              regen=None):
     self.pre_convert_obs = pre_convert_obs
     self.regen = regen
     self.regen_prob = 0.1
     self.lidar_mode = lidar_mode
     self.sequence_size = sequence_size
     self.channel_first = channel_first
     self.as_torch_tensors = as_torch_tensors
     self.rings_def = generate_rings(_64, _64)
     self.data = self._load_data(directory, file_limit=file_limit)
     if self.pre_convert_obs:
         self._preconvert_obs()
     self.regen_head_index = 0  # which part of the data array to regen next
     print("data has %d steps." % (len(self.data["scans"])))
Пример #3
0
 def __init__(
         self,
         temperature=0.25,
         initial_z_path=os.path.
     expanduser(
         "~/navrep/datasets/M/ian/000_mus_logvars_robotstates_actions_rewards_dones.npz"
     ),
         rnn_model_path=os.path.expanduser("~/navrep/models/M/rnn.json"),
         vae_model_path=os.path.expanduser("~/navrep/models/V/vae.json"),
 ):
     # constants
     self.TEMPERATURE = temperature
     self.DT = 0.5  # should be the same as data rnn was trained with
     # V + M Models
     reset_graph()
     self.rnn = MDNRNN(sample_hps_params, gpu_mode=False)
     self.vae = ConvVAE(batch_size=1, is_training=False)
     self.vae.load_json(vae_model_path)
     self.rnn.load_json(rnn_model_path)
     # load initial image encoding
     arrays = np.load(initial_z_path)
     initial_mu = arrays["mus"][0]
     initial_logvar = arrays["logvars"][0]
     initial_robotstate = arrays["robotstates"][0]
     ini_lidar_z = initial_mu + np.exp(
         initial_logvar / 2.0) * np.random.randn(*(initial_mu.shape))
     ini_goal_z = initial_robotstate[:2] / MAX_GOAL_DIST
     self.initial_z = np.concatenate([ini_lidar_z, ini_goal_z], axis=-1)
     # other tools
     self.rings_def = generate_rings(64, 64)
     self.viewer = None
     # environment state variables
     self.reset()
     # hot-start the rnn state
     for i in range(20):
         self.step(np.array([0, 0, 0]), override_next_z=self.initial_z)
Пример #4
0
    def __init__(self,
                 directory,
                 sequence_size,
                 file_limit=None,
                 channel_first=True,
                 as_torch_tensors=True,
                 lidar_mode="rings",
                 pre_convert_obs=False,
                 regen=None):
        """
        Loads data files into a pytorch-compatible dataset

        arguments
        ------
        directory: a path or list of paths in which to look for data files
        sequence_size: the desired length of RNN sequences
        channel_first: if True, outputs samples in (Sequence, Channel, Width, Height) shape,
                        else (Sequence, Width, Height, Channel)
        as_torch_tensors: outputs data samples as torch tensors for convenience
        lidar_mode: "rings", "scans" or "images". Determines how to interpret the sensor data
        pre_convert_obs: converts observation at load time, instead of at sample time
        regen: "navreptrain" or None, in the first case the dataset will be partially replaced with new data
        """
        self.pre_convert_obs = pre_convert_obs
        self.regen = regen
        self.regen_prob = 0.1
        self.lidar_mode = lidar_mode
        self.sequence_size = sequence_size
        self.channel_first = channel_first
        self.as_torch_tensors = as_torch_tensors
        self.rings_def = generate_rings(_64, _64)
        self.data = self._load_data(directory, file_limit=file_limit)
        if self.pre_convert_obs:
            self._preconvert_obs()
        self.regen_head_index = 0  # which part of the data array to regen next
        print("data has %d steps." % (len(self.data["scans"])))
Пример #5
0
                    values_logs = values_log.copy()
                else:
                    values_logs = values_logs.append(values_log,
                                                     ignore_index=True)
                values_logs.to_csv(log_path)
                with open(log_hyperparams_path, "wb") as f:
                    pickle.dump(hps, f)

            if common_args.render:  # Visually check that the batch is sound
                import matplotlib.pyplot as plt
                from navrep.tools.rings import generate_rings

                reset_graph()
                vae = ConvVAE(z_size=_Z, batch_size=1, is_training=False)
                vae.load_json(vae_model_path)
                rings_def = generate_rings(64, 64)
                rings_pred = vae.decode(
                    batch_z_rs[0, :, :_Z]) * rings_def["rings_to_bool"]
                plt.ion()
                for i, ring in enumerate(rings_pred):
                    rings_def["visualize_rings"](ring, scan=None)
                    plt.scatter(batch_z_rs[0, i, _Z],
                                batch_z_rs[0, i, 33],
                                color='red')
                    plt.ylim([0, 10])
                    plt.title("{:.1f} {:.1f} {:.1f}".format(*batch_action[0,
                                                                          i]))
                    plt.pause(0.5)
                exit()
            if False:  # render all sequences in batch at once
                from navrep.tools.render import render_lidar_batch
Пример #6
0
 def __init__(self):
     self._N = _64*_64 + _RS
     self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
                                         shape=(self._N,1), dtype=np.float32)
     self.rings_def = generate_rings(_64, _64)
Пример #7
0
 def __init__(self,
              backend, encoding,
              rnn_model_path=os.path.expanduser("~/navrep/models/M/rnn.json"),
              rnn1d_model_path=os.path.expanduser("~/navrep/models/M/rnn1d.json"),
              vae_model_path=os.path.expanduser("~/navrep/models/V/vae.json"),
              vae1d_model_path=os.path.expanduser("~/navrep/models/V/vae1d.json"),
              gpt_model_path=os.path.expanduser("~/navrep/models/W/gpt"),
              gpt1d_model_path=os.path.expanduser("~/navrep/models/W/gpt1d"),
              vae1dlstm_model_path=os.path.expanduser("~/navrep/models/W/vae1dlstm"),
              vaelstm_model_path=os.path.expanduser("~/navrep/models/W/vaelstm"),
              gpu=False,
              encoder_to_share_model_with=None,  # another EnvEncoder
              ):
     LIDAR_NORM_FACTOR = None
     if backend == "GPT":
         from navrep.scripts.train_gpt import _Z, _H
     elif backend == "GPT1D":
         from navrep.scripts.train_gpt1d import _Z, _H
         from navrep.tools.wdataset import LIDAR_NORM_FACTOR
     elif backend == "VAE1DLSTM":
         from navrep.scripts.train_vae1dlstm import _Z, _H
         from navrep.tools.wdataset import LIDAR_NORM_FACTOR
     elif backend == "VAELSTM":
         from navrep.scripts.train_vaelstm import _Z, _H
     elif backend == "VAE_LSTM":
         from navrep.scripts.train_vae import _Z
         from navrep.scripts.train_rnn import _H
     elif backend == "VAE1D_LSTM":
         from navrep.scripts.train_vae1d import _Z
         from navrep.scripts.train_rnn import _H
         from navrep.scripts.train_vae1d import MAX_LIDAR_DIST as LIDAR_NORM_FACTOR
     self._Z = _Z
     self._H = _H
     self.LIDAR_NORM_FACTOR = LIDAR_NORM_FACTOR
     self.encoding = encoding
     self.backend = backend
     if self.encoding == "V_ONLY":
         self.encoding_dim = _Z + _RS
     elif self.encoding == "VM":
         self.encoding_dim = _Z + _H + _RS
     elif self.encoding == "M_ONLY":
         self.encoding_dim = _H + _RS
     else:
         raise NotImplementedError
     self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
                                         shape=(self.encoding_dim,), dtype=np.float32)
     # V + M Models
     if encoder_to_share_model_with is not None:
         self.vae = encoder_to_share_model_with.vae
         self.rnn = encoder_to_share_model_with.rnn
     else:
         # load world model
         if self.backend == "VAE_LSTM":
             reset_graph()
             self.vae = ConvVAE(z_size=_Z, batch_size=1, is_training=False)
             self.vae.load_json(vae_model_path)
             if self.encoding in ["VM", "M_ONLY"]:
                 hps = sample_hps_params. _replace(seq_width=_Z+_G, action_width=_A, rnn_size=_H)
                 self.rnn = MDNRNN(hps, gpu_mode=gpu)
                 self.rnn.load_json(rnn_model_path)
         elif self.backend == "VAE1D_LSTM":
             reset_graph()
             self.vae = Conv1DVAE(z_size=_Z, batch_size=1, is_training=False)
             self.vae.load_json(vae1d_model_path)
             if self.encoding in ["VM", "M_ONLY"]:
                 hps = sample_hps_params. _replace(seq_width=_Z+_G, action_width=_A, rnn_size=_H)
                 self.rnn = MDNRNN(hps, gpu_mode=gpu)
                 self.rnn.load_json(rnn1d_model_path)
         elif self.backend == "GPT":
             mconf = GPTConfig(BLOCK_SIZE, _H)
             model = GPT(mconf, gpu=gpu)
             load_checkpoint(model, gpt_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "GPT1D":
             mconf = GPTConfig(BLOCK_SIZE, _H)
             model = GPT1D(mconf, gpu=gpu)
             load_checkpoint(model, gpt1d_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "VAELSTM":
             mconf = VAELSTMConfig(_Z, _H)
             model = VAELSTM(mconf, gpu=gpu)
             load_checkpoint(model, vaelstm_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "VAE1DLSTM":
             mconf = VAE1DLSTMConfig(_Z, _H)
             model = VAE1DLSTM(mconf, gpu=gpu)
             load_checkpoint(model, vae1dlstm_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         else:
             raise NotImplementedError
     # other tools
     self.rings_def = generate_rings(_64, _64)
     self.viewer = None
     # environment state variables
     self.reset()