def main():
    k = 1000
    h = 100
    w = 100
    n = 8192
    headers = ['batch size', f'time to raster {n} images (s)']
    table_data = []
    for pow in range(1, 12):
        batch_size = 2**pow

        res = tf.convert_to_tensor([0.1] * batch_size, dtype=tf.float32)
        origin = tf.convert_to_tensor([[100.0, 100.0]] * batch_size,
                                      dtype=tf.float32)
        state = tf.convert_to_tensor([np.random.randn(22)] * batch_size,
                                     dtype=tf.float32)
        # warmup
        raster_2d(state, res, origin, h, w, k, batch_size)

        t0 = perf_counter()
        for i in range(int(n / batch_size)):
            raster_2d(state, res, origin, h, w, k, batch_size)
        dt = perf_counter() - t0
        table_data.append([batch_size, dt])
    print(
        tabulate(table_data,
                 headers=headers,
                 tablefmt='fancygrid',
                 floatfmt='.4f'))
Example #2
0
    def make_trajectory_images(self,
                               environment,
                               start_states,
                               local_env_center_point,
                               batch_size,
                               ):
        """
        :arg: environment [B, H, W]
        :return: [batch, time, h, w, 1 + n_points]
        """
        # this will produce images even for "null" data,
        # but are masked out in the RNN, and not actually used in the computation
        local_env, local_env_origin = get_local_env(center_point=local_env_center_point,
                                                    full_env=environment['env'],
                                                    full_env_origin=environment['origin'],
                                                    res=environment['res'],
                                                    local_h_rows=self.local_env_h_rows,
                                                    local_w_cols=self.local_env_w_cols)

        concat_args = []
        for planned_state in start_states.values():
            planned_rope_image = raster_2d(state=planned_state,
                                           res=environment['res'],
                                           origin=local_env_origin,
                                           h=self.local_env_h_rows,
                                           w=self.local_env_w_cols,
                                           k=self.rope_image_k,
                                           batch_size=batch_size)
            concat_args.append(planned_rope_image)

        concat_args.append(tf.expand_dims(local_env, axis=3))
        images = tf.concat(concat_args, axis=3)
        return images
Example #3
0
    def test_raster_differentiable2(self):
        state = np.array([[-0.02, -0.02]], dtype=np.float32)
        res = [0.01]
        h = 5
        w = 5
        origin = np.array([[h // 2, w // 2]], dtype=np.float32)
        k = 100000
        batch_size = state.shape[0]

        image = raster_2d(state, res, origin, h, w, k, batch_size)

        self.assertAlmostEqual(image[0, 0, 0, 0].numpy(), 1)
        self.assertAlmostEqual(image[0, 1, 1, 0].numpy(), 0)
        self.assertAlmostEqual(image[0, 2, 2, 0].numpy(), 0)
Example #4
0
    def call(self, dataset_element, training, mask=None):
        input_dict, _ = dataset_element
        actions = input_dict['action']
        input_sequence_length = actions.shape[1]

        # Combine all the states into one big vector, based on which states were listed in the hparams file
        substates_0 = []
        for state_key, n in self.used_states_description.items():
            substate_0 = input_dict[state_key][:, 0]
            substates_0.append(substate_0)
        s_0 = tf.concat(substates_0, axis=1)

        # Remember everything this batched, but to keep things clear plural variable names will be reserved for sequences
        res = input_dict['full_env/res']
        full_env = input_dict['full_env/env']
        full_env_origin = input_dict['full_env/origin']

        pred_states = [s_0]
        for t in range(input_sequence_length):
            s_t = pred_states[-1]

            action_t = actions[:, t]

            if self.hparams['use_full_env']:
                env = full_env
                env_origin = full_env_origin
                env_h_rows = self.full_env_params.h_rows
                env_w_cols = self.full_env_params.w_cols
            else:
                state = self.state_vector_to_state_dict(s_t)
                local_env_center = self.scenario.local_environment_center_differentiable(
                    state)
                # NOTE: we assume same resolution for local and full environment
                env, env_origin = self.get_local_env(local_env_center,
                                                     full_env_origin, full_env,
                                                     res)
                env_h_rows = self.local_env_h_rows
                env_w_cols = self.local_env_w_cols

            rope_image_t = raster_2d(s_t,
                                     res,
                                     env_origin,
                                     env_h_rows,
                                     env_w_cols,
                                     k=self.rope_image_k,
                                     batch_size=self.batch_size)

            env = tf.expand_dims(env, axis=3)

            # CNN
            z_t = self.concat([rope_image_t, env])

            # import matplotlib.pyplot as plt
            # plt.figure()
            # ax = plt.gca()
            # state_image_t = state_image_to_cmap(rope_image_t[0].numpy())
            # image_t = paste_over(state_image_t, env[0].numpy())
            # ax.imshow(np.flipud(image_t), vmin=0, vmax=1)
            # plt.show()

            for conv_layer, pool_layer in zip(self.conv_layers,
                                              self.pool_layers):
                z_t = conv_layer(z_t)
                z_t = pool_layer(z_t)
            conv_z_t = self.flatten_conv_output(z_t)
            if self.hparams['mixed']:
                full_z_t = self.concat2([s_t, action_t, conv_z_t])
            else:
                full_z_t = conv_z_t

            # dense layers
            for dense_layer in self.dense_layers:
                full_z_t = dense_layer(full_z_t)

            if self.hparams['residual']:
                # residual prediction, otherwise just take the final hidden representation as the next state
                residual_t = full_z_t
                s_t_plus_1_flat = s_t + residual_t
            else:
                s_t_plus_1_flat = full_z_t

            pred_states.append(s_t_plus_1_flat)

        pred_states = tf.stack(pred_states, axis=1)

        # Split the stack of state vectors up by state name/dim
        output_states = self.state_vector_to_state_sequence_dict(pred_states)

        return output_states