Beispiel #1
0
 def __init__(self, temperature=0.25):
     # constants
     self.TEMPERATURE = temperature
     self.DT = 0.2  # should be the same as data rnn was trained with
     initial_z_path = os.path.expanduser(
         "~/navrep/datasets/M/im/corridor_koze_kids_bag_mus_logvars_robotstates_actions_rewards_dones.npz"
     )
     rnn_model_path = os.path.expanduser("~/navrep/models/M/imrnn.json")
     vae_model_path = os.path.expanduser("~/navrep/models/V/imvae.json")
     # V + M Models
     reset_graph()
     self.rnn = MDNRNN(sample_hps_params, gpu_mode=False)
     self.vae = ConvVAE(batch_size=1, is_training=False, channels=3)
     self.vae.load_json(vae_model_path)
     self.rnn.load_json(rnn_model_path)
     # load initial image encoding
     arrays = np.load(initial_z_path)
     initial_mu = arrays["mus"][0]
     initial_logvar = arrays["logvars"][0]
     self.initial_z = initial_mu + np.exp(initial_logvar / 2.0) * np.random.randn(
         *(initial_mu.shape)
     )
     # other tools
     self.viewer = None
     # environment state variables
     self.reset()
     # hot-start the rnn state
     for i in range(20):
         self.step(np.array([0,0,0]), override_next_z=self.initial_z)
Beispiel #2
0
 def __init__(
         self,
         temperature=0.25,
         initial_z_path=os.path.
     expanduser(
         "~/navrep/datasets/M/ian/000_mus_logvars_robotstates_actions_rewards_dones.npz"
     ),
         rnn_model_path=os.path.expanduser("~/navrep/models/M/rnn.json"),
         vae_model_path=os.path.expanduser("~/navrep/models/V/vae.json"),
 ):
     # constants
     self.TEMPERATURE = temperature
     self.DT = 0.5  # should be the same as data rnn was trained with
     # V + M Models
     reset_graph()
     self.rnn = MDNRNN(sample_hps_params, gpu_mode=False)
     self.vae = ConvVAE(batch_size=1, is_training=False)
     self.vae.load_json(vae_model_path)
     self.rnn.load_json(rnn_model_path)
     # load initial image encoding
     arrays = np.load(initial_z_path)
     initial_mu = arrays["mus"][0]
     initial_logvar = arrays["logvars"][0]
     initial_robotstate = arrays["robotstates"][0]
     ini_lidar_z = initial_mu + np.exp(
         initial_logvar / 2.0) * np.random.randn(*(initial_mu.shape))
     ini_goal_z = initial_robotstate[:2] / MAX_GOAL_DIST
     self.initial_z = np.concatenate([ini_lidar_z, ini_goal_z], axis=-1)
     # other tools
     self.rings_def = generate_rings(64, 64)
     self.viewer = None
     # environment state variables
     self.reset()
     # hot-start the rnn state
     for i in range(20):
         self.step(np.array([0, 0, 0]), override_next_z=self.initial_z)
Beispiel #3
0
        arrays = np.load(path)
        all_data.append([
            arrays["mus"],
            arrays["logvars"],
            arrays["robotstates"],
            arrays["actions"],
            arrays["dones"],
            arrays["rewards"],
        ])
    n_total_frames = np.sum([mu.shape[0] for mu, _, _, _, _, _ in all_data])
    chunksize = hps.batch_size * hps.max_seq_len  # frames per batch (100'000)
    print("total frames: ", n_total_frames)
    if n_total_frames < chunksize:
        raise ValueError()

    reset_graph()
    model = MDNRNN(hps)
    model.print_trainable_params()
    vae = None

    viewer = None
    values_logs = None

    start = time.time()
    for epoch in range(1, N_EPOCHS + 1):
        #     print('preparing data for epoch', epoch)
        batches_start = time.time()
        # flatten all sequences into one
        mu_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)
        logvar_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)
        robotstate_sequence = np.zeros((n_total_frames, 5), dtype=np.float32)
Beispiel #4
0
 def __init__(self,
              backend, encoding,
              rnn_model_path=os.path.expanduser("~/navrep/models/M/rnn.json"),
              rnn1d_model_path=os.path.expanduser("~/navrep/models/M/rnn1d.json"),
              vae_model_path=os.path.expanduser("~/navrep/models/V/vae.json"),
              vae1d_model_path=os.path.expanduser("~/navrep/models/V/vae1d.json"),
              gpt_model_path=os.path.expanduser("~/navrep/models/W/gpt"),
              gpt1d_model_path=os.path.expanduser("~/navrep/models/W/gpt1d"),
              vae1dlstm_model_path=os.path.expanduser("~/navrep/models/W/vae1dlstm"),
              vaelstm_model_path=os.path.expanduser("~/navrep/models/W/vaelstm"),
              gpu=False,
              encoder_to_share_model_with=None,  # another EnvEncoder
              ):
     LIDAR_NORM_FACTOR = None
     if backend == "GPT":
         from navrep.scripts.train_gpt import _Z, _H
     elif backend == "GPT1D":
         from navrep.scripts.train_gpt1d import _Z, _H
         from navrep.tools.wdataset import LIDAR_NORM_FACTOR
     elif backend == "VAE1DLSTM":
         from navrep.scripts.train_vae1dlstm import _Z, _H
         from navrep.tools.wdataset import LIDAR_NORM_FACTOR
     elif backend == "VAELSTM":
         from navrep.scripts.train_vaelstm import _Z, _H
     elif backend == "VAE_LSTM":
         from navrep.scripts.train_vae import _Z
         from navrep.scripts.train_rnn import _H
     elif backend == "VAE1D_LSTM":
         from navrep.scripts.train_vae1d import _Z
         from navrep.scripts.train_rnn import _H
         from navrep.scripts.train_vae1d import MAX_LIDAR_DIST as LIDAR_NORM_FACTOR
     self._Z = _Z
     self._H = _H
     self.LIDAR_NORM_FACTOR = LIDAR_NORM_FACTOR
     self.encoding = encoding
     self.backend = backend
     if self.encoding == "V_ONLY":
         self.encoding_dim = _Z + _RS
     elif self.encoding == "VM":
         self.encoding_dim = _Z + _H + _RS
     elif self.encoding == "M_ONLY":
         self.encoding_dim = _H + _RS
     else:
         raise NotImplementedError
     self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
                                         shape=(self.encoding_dim,), dtype=np.float32)
     # V + M Models
     if encoder_to_share_model_with is not None:
         self.vae = encoder_to_share_model_with.vae
         self.rnn = encoder_to_share_model_with.rnn
     else:
         # load world model
         if self.backend == "VAE_LSTM":
             reset_graph()
             self.vae = ConvVAE(z_size=_Z, batch_size=1, is_training=False)
             self.vae.load_json(vae_model_path)
             if self.encoding in ["VM", "M_ONLY"]:
                 hps = sample_hps_params. _replace(seq_width=_Z+_G, action_width=_A, rnn_size=_H)
                 self.rnn = MDNRNN(hps, gpu_mode=gpu)
                 self.rnn.load_json(rnn_model_path)
         elif self.backend == "VAE1D_LSTM":
             reset_graph()
             self.vae = Conv1DVAE(z_size=_Z, batch_size=1, is_training=False)
             self.vae.load_json(vae1d_model_path)
             if self.encoding in ["VM", "M_ONLY"]:
                 hps = sample_hps_params. _replace(seq_width=_Z+_G, action_width=_A, rnn_size=_H)
                 self.rnn = MDNRNN(hps, gpu_mode=gpu)
                 self.rnn.load_json(rnn1d_model_path)
         elif self.backend == "GPT":
             mconf = GPTConfig(BLOCK_SIZE, _H)
             model = GPT(mconf, gpu=gpu)
             load_checkpoint(model, gpt_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "GPT1D":
             mconf = GPTConfig(BLOCK_SIZE, _H)
             model = GPT1D(mconf, gpu=gpu)
             load_checkpoint(model, gpt1d_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "VAELSTM":
             mconf = VAELSTMConfig(_Z, _H)
             model = VAELSTM(mconf, gpu=gpu)
             load_checkpoint(model, vaelstm_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         elif self.backend == "VAE1DLSTM":
             mconf = VAE1DLSTMConfig(_Z, _H)
             model = VAE1DLSTM(mconf, gpu=gpu)
             load_checkpoint(model, vae1dlstm_model_path, gpu=gpu)
             self.vae = model
             self.rnn = model
         else:
             raise NotImplementedError
     # other tools
     self.rings_def = generate_rings(_64, _64)
     self.viewer = None
     # environment state variables
     self.reset()