コード例 #1
0
def encode(_input, weights, latent, reconstruct):
    conv_vae = VAE()
    conv_vae.make_vae(_input + ".npz", int(latent))
    conv_vae.load_model(weights)

    latent_vectors = []
    # for i in range(len(conv_vae.x_test)):
    #     latent_vectors.append(conv_vae.encode_image(conv_vae.x_test[i:i+1]))
    raw_data = np.load(_input + ".npz")
    for f in sorted(raw_data.files):
        images = raw_data[f]
        latent_vectors.append(
            [conv_vae.encode_image(np.array([image]))[0] for image in images])

    np.savez_compressed(_input + "_latent.npz", *latent_vectors)
    if reconstruct:
        data = np.load(_input + "_latent.npz")
        # files = data.files
        # vectors = np.array(data[files[0]])
        reconstructed_images = []
        for f in sorted(data.files):
            latents = data[f]
            # a = a.reshape(-1, a.shape[-2], a.shape[-1])
            reconstructed_images.append(
                [conv_vae.decode_latent(np.array([l]))[0] for l in latents])
        # print(np.shape(np.array(reconstructed_images)))

        # plt.imshow(reconstructed_images[1])

        # plt.show()
        np.savez_compressed(_input + "_recon.npz", *reconstructed_images)
コード例 #2
0
def vae_process_images(images,
                       weights,
                       latent_size,
                       latents=None,
                       encode=True,
                       decode=True,
                       image_size=None):
    conv_vae = VAE()
    sys.stdout = open(os.devnull, 'w')
    if image_size:
        conv_vae.make_vae_shape(image_size, image_size, latent_size)
    else:
        conv_vae.make_vae(images + ".npz", latent_size)
    conv_vae.load_model(weights)
    sys.stdout = sys.__stdout__

    latent_vectors = []
    raw_data = np.load(images + ".npz")
    if encode:
        print("Encoding images...")
        for i in range(len(raw_data.files)):
            f = 'arr_' + str(i)
            file_images = raw_data[f]
            latent_vectors.append([
                conv_vae.encode_image(np.array([image]))[0]
                for image in file_images
            ])
        np.savez_compressed(images + "_latent.npz", *latent_vectors)

    if decode:
        print("Decoding latent vectors...")
        data = ""
        if latents:
            data = np.load(latents + ".npz")
        else:
            data = np.load(images + "_latent.npz")
        # files = data.files
        # vectors = np.array(data[files[0]])

        reconstructed_images = []
        for f in sorted(data.files):
            latents = data[f]
            # a = a.reshape(-1, a.shape[-2], a.shape[-1])
            reconstructed_images.append(
                [conv_vae.decode_latent(np.array([l]))[0] for l in latents])
        # print(np.shape(np.array(reconstructed_images)))

        # plt.imshow(reconstructed_images[1])

        # plt.show()
        np.savez_compressed(images + "_recon.npz", *reconstructed_images)
コード例 #3
0
class Simulation:
    def __init__(self, path, controller_weights=None):
        self.params = json.load(open(path))[0]
        self.load_model(controller_weights=controller_weights)
        self.env = gym.make(self.params['env_name'])

    def load_model(self, controller_weights=None):
        p = self.params
        self.action_utils = ActionUtils(p['env_name'])
        self.action_size = self.action_utils.action_size()

        self.vae = VAE()
        sys.stdout = open(os.devnull, 'w')
        self.vae.make_vae_shape(p['img_size'], p['img_size'], p['latent_size'])
        sys.stdout = sys.__stdout__
        self.vae.load_model('../' + p['vae_hps']['weights_path'])

        # TODO: Make MDN just take in all of params.
        mdn_hps = p['mdn_hps']
        mdn_hps['max_seq_len'] = p['max_seq_len']
        mdn_hps['in_width'] = p['latent_size'] + self.action_size
        mdn_hps['out_width'] = p['latent_size']
        mdn_hps['action_size'] = self.action_size
        mdn_hps['rnn_size'] = p['hidden_size']
        mdn_hps['batch_size'] = 1
        mdn_hps['max_seq_len'] = 1
        mdn_hps['use_recurrent_dropout'] = 0
        mdn_hps['training'] = 0
        # self.mdn_rnn = MDNRNN(mdn_hps)
        # hps_inf = MDNRNN.set_hps_to_inference(hps)
        self.mdn_rnn = MDNRNN(mdn_hps)
        self.mdn_rnn.load('../' + p['mdn_hps']['weights_path'])

        self.controller = ControllerModel(
            [p['latent_size'] + p['hidden_size'], self.action_size])
        if controller_weights:
            self.controller.load_weights(controller_weights)

    def simulate(self, dreaming=False, render=False):
        rewards = []
        for i in range(1):
            obs = self.env.reset()

            # initialize hidden + action variables
            state = self.mdn_rnn.rnn_init_state()
            a = self.env.action_space.sample()
            h = np.zeros((1, self.params['hidden_size']))
            c = np.zeros((1, self.params['hidden_size']))

            total_reward = 0
            if dreaming:
                img = self.env.render(mode='rgb_array')
                img = compress_image(img, size=self.params['img_size'])
                z = self.vae.encode_image(np.array([img]))[0]
                for t in range(self.params['max_seq_len']):
                    z_current = z.copy()
                    z, state = self.mdn_rnn.sample_z(
                        z_current, self.action_utils.action_to_input(a), state)
                    z = z[0][0]
                    h, c = state[0], state[1]
                    out = self.controller.get_action(
                        np.concatenate((z_current, h[0])))

                    obs, reward, done, info = self.env.step(
                        self.action_utils.output_to_action(out))
                    total_reward += reward
                    if done:
                        print('Episode finished after {} timesteps'.format(t +
                                                                           1))
                        break
            else:
                for t in range(self.params['max_seq_len']):
                    img = self.env.render(mode='rgb_array')
                    img = compress_image(img, size=self.params['img_size'])

                    # compute action
                    z = self.vae.encode_image(np.array([img]))[0]
                    print(a)
                    state = self.mdn_rnn.rnn_next_state(
                        z, self.action_utils.action_to_input(a), state)
                    h, c = state[0], state[1]
                    out = self.controller.get_action(np.concatenate((z, h[0])))

                    a = self.action_utils.output_to_action(out)
                    obs, reward, done, info = self.env.step(a)
                    total_reward += reward
                    if done:
                        print('Episode finished after {} timesteps'.format(t +
                                                                           1))
                        break
            rewards.append(total_reward)
        return -np.mean(rewards)