def generate_vae_dataset( N=10000, test_p=0.9, use_cached=False, imsize=84, show=False, dataset_path=None, env_class=SawyerReachTorqueEnv, env_kwargs=None, init_camera=sawyer_torque_reacher_camera, ): filename = "/tmp/sawyer_torque_data" + str(N) + ".npy" info = {} if dataset_path is not None: filename = local_path_from_s3_or_local_path(dataset_path) dataset = np.load(filename) elif use_cached and osp.isfile(filename): dataset = np.load(filename) print("loaded data from saved file", filename) else: now = time.time() if env_kwargs == None: env_kwargs = dict() env = env_class(**env_kwargs) env = ImageEnv( env, imsize, transpose=True, init_camera=init_camera, normalize=True, ) info['env'] = env policy = RandomPolicy(env.action_space) es = OUStrategy(action_space=env.action_space, theta=0) exploration_policy = PolicyWrappedWithExplorationStrategy( exploration_strategy=es, policy=policy, ) dataset = np.zeros((N, imsize * imsize * 3), dtype=np.uint8) for i in range(N): if i % 50 == 0: print('Reset') env.reset_model() exploration_policy.reset() for _ in range(1): action = exploration_policy.get_action()[0] * 1 / 10 env.wrapped_env.step(action) img = env._get_flat_img() dataset[i, :] = unormalize_image(img) if show: cv2.imshow('img', img.reshape(3, 84, 84).transpose()) cv2.waitKey(1) print(i) print("done making training data", time.time() - now) np.save(filename, dataset) n = int(N * test_p) train_dataset = dataset[:n, :] test_dataset = dataset[n:, :] return train_dataset, test_dataset, info
def generate_vae_dataset( N=10000, test_p=0.9, use_cached=True, imsize=84, show=False, dataset_path=None, policy_path=None, action_space_sampling=False, env_class=SawyerDoorEnv, env_kwargs=None, init_camera=sawyer_door_env_camera_v2, ): if policy_path is not None: filename = "/tmp/sawyer_door_pull_open_oracle+random_policy_data_closer_zoom_action_limited" + str(N) + ".npy" elif action_space_sampling: filename = "/tmp/sawyer_door_pull_open_zoomed_in_action_space_sampling" + str(N) + ".npy" else: filename = "/tmp/sawyer_door_pull_open" + str(N) + ".npy" info = {} if dataset_path is not None: filename = local_path_from_s3_or_local_path(dataset_path) dataset = np.load(filename) elif use_cached and osp.isfile(filename): dataset = np.load(filename) print("loaded data from saved file", filename) else: now = time.time() env = env_class(**env_kwargs) env = ImageEnv( env, imsize, transpose=True, init_camera=init_camera, normalize=True, ) info['env'] = env policy = RandomPolicy(env.action_space) es = OUStrategy(action_space=env.action_space, theta=0) exploration_policy = PolicyWrappedWithExplorationStrategy( exploration_strategy=es, policy=policy, ) env.wrapped_env.reset() dataset = np.zeros((N, imsize * imsize * 3), dtype=np.uint8) for i in range(N): if i % 20==0: env.reset_model() exploration_policy.reset() for _ in range(10): action = exploration_policy.get_action()[0] env.wrapped_env.step( action ) # env.set_to_goal_angle(env.get_goal()['state_desired_goal']) img = env._get_flat_img() dataset[i, :] = unormalize_image(img) if show: cv2.imshow('img', img.reshape(3, 84, 84).transpose()) cv2.waitKey(1) print(i) print("done making training data", filename, time.time() - now) np.save(filename, dataset) n = int(N * test_p) train_dataset = dataset[:n, :] test_dataset = dataset[n:, :] return train_dataset, test_dataset, info
def generate_vae_dataset( N=10000, test_p=0.9, use_cached=True, imsize=84, show=False, dataset_path=None, env_class=None, env_kwargs=None, init_camera=sawyer_door_env_camera, ): filename = "/tmp/sawyer_door_push_open_and_reach" + str(N) + ".npy" info = {} if dataset_path is not None: filename = local_path_from_s3_or_local_path(dataset_path) dataset = np.load(filename) elif use_cached and osp.isfile(filename): dataset = np.load(filename) print("loaded data from saved file", filename) else: env = env_class(**env_kwargs) env = ImageEnv( env, imsize, transpose=True, init_camera=init_camera, normalize=True, ) oracle_sampled_data = int(N/2) dataset = np.zeros((N, imsize * imsize * 3)) print('Goal Space Sampling') for i in range(oracle_sampled_data): goal = env.sample_goal() env.set_to_goal(goal) img = env._get_flat_img() dataset[i, :] = img if show: cv2.imshow('img', img.reshape(3, 84, 84).transpose()) cv2.waitKey(1) print(i) env._wrapped_env.min_y_pos=.6 policy = RandomPolicy(env.action_space) es = OUStrategy(action_space=env.action_space, theta=0) exploration_policy = PolicyWrappedWithExplorationStrategy( exploration_strategy=es, policy=policy, ) print('Random Sampling') for i in range(oracle_sampled_data, N): if i % 20==0: env.reset() exploration_policy.reset() for _ in range(10): action = exploration_policy.get_action()[0] env.wrapped_env.step( action ) img = env._get_flat_img() dataset[i, :] = img if show: cv2.imshow('img', img.reshape(3, 84, 84).transpose()) cv2.waitKey(1) print(i) n = int(N * test_p) train_dataset = dataset[:n, :] test_dataset = dataset[n:, :] return train_dataset, test_dataset, info
def generate_uniform_dataset_reacher(env_class=None, env_kwargs=None, num_imgs=1000, use_cached_dataset=False, init_camera=None, imsize=48, show=False, save_file_prefix=None, env_id=None, tag='', dataset_path=None): if dataset_path is not None: dataset = load_local_or_remote_file(dataset_path) return dataset import gym from gym.envs import registration # trigger registration import multiworld.envs.pygame import multiworld.envs.mujoco if not env_class or not env_kwargs: env = gym.make(env_id) else: env = env_class(**env_kwargs) env = ImageEnv( env, imsize, init_camera=init_camera, transpose=True, normalize=True, ) env.non_presampled_goal_img_is_garbage = True if save_file_prefix is None and env_id is not None: save_file_prefix = env_id filename = "/tmp/{}_N{}_imsize{}uniform_images_{}.npy".format( save_file_prefix, str(num_imgs), env.imsize, tag, ) if use_cached_dataset and osp.isfile(filename): images = np.load(filename) print("Loaded data from {}".format(filename)) return images print('Sampling Uniform Dataset') dataset = np.zeros((num_imgs, 3 * env.imsize**2), dtype=np.uint8) for j in range(num_imgs): obs = env.reset() env.set_to_goal(env.get_goal()) img_f = env._get_flat_img() if show: img = img_f.reshape(3, env.imsize, env.imsize).transpose() img = img[::-1, :, ::-1] cv2.imshow('img', img) cv2.waitKey(1) print(j) dataset[j, :] = unormalize_image(img_f) np.save(filename, dataset) print("Saving file to {}".format(filename)) return dataset
def generate_uniform_dataset_pick_and_place(env_class=None, env_kwargs=None, num_imgs=1000, use_cached_dataset=False, init_camera=None, imsize=48, save_file_prefix=None, env_id=None, tag='', dataset_path=None): if dataset_path is not None: dataset = load_local_or_remote_file(dataset_path) return dataset import gym from gym.envs import registration # trigger registration import multiworld.envs.pygame import multiworld.envs.mujoco if not env_class or not env_kwargs: env = gym.make(env_id) else: env = env_class(**env_kwargs) env = ImageEnv( env, imsize, init_camera=init_camera, transpose=True, normalize=True, ) env.non_presampled_goal_img_is_garbage = True if save_file_prefix is None and env_id is not None: save_file_prefix = env_id filename = "/tmp/{}_N{}_imsize{}uniform_images_{}.npy".format( save_file_prefix, str(num_imgs), env.imsize, tag, ) if use_cached_dataset and osp.isfile(filename): images = np.load(filename) print("Loaded data from {}".format(filename)) return images print('Sampling Uniform Dataset') dataset = unormalize_image( get_image_presampled_goals(env, num_imgs)['image_desired_goal']) np.save(filename, dataset) print("Saving file to {}".format(filename)) return dataset
def load_env(): env = SawyerMultiobjectEnv( fixed_start=True, fixed_colors=False, num_objects=1, object_meshes=None, preload_obj_dict= [{'color1': [1, 1, 1], 'color2': [1, 1, 1]}], num_scene_objects=[1], maxlen=0.1, action_repeat=1, puck_goal_low=(x_low + 0.01, y_low + 0.01), puck_goal_high=(x_high - 0.01, y_high - 0.01), hand_goal_low=(x_low + 0.01, y_low + 0.01), hand_goal_high=(x_high - 0.01, y_high - 0.01), mocap_low=(x_low, y_low, 0.0), mocap_high=(x_high, y_high, 0.5), object_low=(x_low + 0.01, y_low + 0.01, 0.02), object_high=(x_high - 0.01, y_high - 0.01, 0.02), use_textures=False, init_camera=sawyer_init_camera_zoomed_in, cylinder_radius=0.05, ) wrapped_env = ImageEnv( env, 48, init_camera=sawyer_init_camera_zoomed_in, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=False, ) return wrapped_env
def create_image_84_point2d_wall_flappy_bird_v2(): from multiworld.core.image_env import ImageEnv from multiworld.envs.pygame.point2d import Point2DWallEnv kwargs = { 'action_scale': 0.25, 'wall_shape': 'flappy-bird', 'wall_thickness': 0.50, 'render_target': False, 'render_size': 84, 'images_are_rgb': True, 'sample_realistic_goals': True, 'norm_order': 2, 'reward_type': 'vectorized_dense', 'ball_low': (-3.5, -3.0), 'ball_high': (-3, -0.5), 'goal_low': (3, 0.5), 'goal_high': (3.5, 3.0), } wrapped_env = Point2DWallEnv(**kwargs) return ImageEnv( wrapped_env, 84, init_camera=None, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=False, )
def get_environment_from_params_custom(environment_params): universe = environment_params['universe'] task = environment_params['task'] domain = environment_params['domain'] # st() environment_kwargs_gym = environment_params.get('kwargs', {}).copy() if "map3D" in environment_kwargs_gym: environment_kwargs_gym.pop("map3D") if "observation_keys" in environment_kwargs_gym: environment_kwargs_gym.pop("observation_keys") env = gym.make(f"{domain}-{task}",**environment_kwargs_gym) camera_space={'dist_low': 0.7,'dist_high': 1.5,'angle_low': 0,'angle_high': 180,'elev_low': -180,'elev_high': -90} env_n = ImageEnv( wrapped_env=env, imsize=64, normalize=True, camera_space=camera_space, init_camera=(lambda x: init_multiple_cameras(x, camera_space)), num_cameras=4,#4 for training depth=True, cam_info=True, reward_type='wrapped_env', flatten=False ) environment_kwargs = environment_params.get('kwargs', {}).copy() environment_kwargs["env"] = env_n return get_environment(universe, domain, task, environment_kwargs)
def build_env(env_id): assert (env_id is not ""), "Unspecified environment." env = gym.make(env_id) if env_id == "SawyerPushAndReachEnvEasy-v0": env = FlatGoalEnv(ImageEnv(env, transpose=True), obs_keys=['image_observation'], append_goal_to_obs=True) env._max_episode_steps = 50 return env
def create_image_48_sawyer_push_forward_v0(): from multiworld.core.flat_goal_env import FlatGoalEnv from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2 image_env = ImageEnv( wrapped_env=gym.make('BaseSawyerPushForwardEnv-v0'), imsize=48, init_camera=sawyer_pusher_camera_upright_v2, normalize=True, ) return FlatGoalEnv(image_env, obs_keys=['image_observation'])
def create_image_48_sawyer_push_multi_goal_v0(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2 wrapped_env = gym.make('BaseSawyerPushMultiGoalEnv-v0') imsize = 48 image_env = ImageEnv( wrapped_env=wrapped_env, imsize=imsize, init_camera=sawyer_pusher_camera_upright_v2, normalize=True,) return image_env
def create_image_48_sawyer_push_and_reach_arena_env_reset_free_v0(**kwargs): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2 wrapped_env = gym.make('SawyerPushAndReachArenaResetFreeEnv-v0', **kwargs) return ImageEnv( wrapped_env, 48, init_camera=sawyer_pusher_camera_upright_v2, transpose=False, normalize=True, )
def create_image_48_sawyer_reach_xy_env_v1(**kwargs): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera_v0 wrapped_env = gym.make('SawyerReachXYEnv-v1', **kwargs) return ImageEnv( wrapped_env, 48, init_camera=sawyer_xyz_reacher_camera_v0, transpose=False, normalize=True, )
def create_image_sawyer_three_blocks_shelf_env_v0(image_size=48, **kwargs): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_block_stacking_camera wrapped_env = gym.make('SawyerThreeBlocksShelfXYZEnv-v0', **kwargs) return ImageEnv( wrapped_env, image_size, init_camera=sawyer_block_stacking_camera, transpose=False, normalize=True, )
def create_Image48SawyerPushAndReacherXYEnv_v0(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_top_down wrapped_env = gym.make('SawyerPushAndReacherXYEnv-v0') return ImageEnv( wrapped_env, 48, init_camera=sawyer_pusher_camera_top_down, transpose=True, normalize=True, )
def create_image_48_sawyer_reach_and_reach_xy_easy_env_v0(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2 wrapped_env = gym.make('SawyerPushAndReachXYEasyEnv-v0') return ImageEnv( wrapped_env, 48, init_camera=sawyer_pusher_camera_upright_v2, transpose=True, normalize=True, )
def create_image_84_pointmass_uwall_test_env_big_v1(): from multiworld.core.image_env import ImageEnv wrapped_env = gym.make('PointmassUWallTestEnvBig-v1') return ImageEnv( wrapped_env, 84, init_camera=None, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=False, )
def create_image_84_sawyer_reach_xy_env_v0(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_xyz_reacher_camera wrapped_env = gym.make('SawyerReachXYEnv-v0') return ImageEnv( wrapped_env, 84, init_camera=sawyer_xyz_reacher_camera, transpose=True, normalize=True, )
def point2d_image_v0(**kwargs): from multiworld.core.image_env import ImageEnv from multiworld.envs.pygame.point2d import Point2DEnv env = Point2DEnv( images_are_rgb=True, render_onscreen=False, show_goal=False, ball_radius=2, render_size=8, ) env = ImageEnv(env, imsize=env.render_size, transpose=True) return env
def create_Image48SawyerDoorHookResetFreeEnv_v1(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_door_env_camera_v3 wrapped_env = gym.make('SawyerDoorHookResetFreeEnv-v1') return ImageEnv( wrapped_env, 48, init_camera=sawyer_door_env_camera_v3, transpose=True, normalize=True, )
def create_image_84_pointmass_uwall_train_env_small_v0(): from multiworld.core.image_env import ImageEnv wrapped_env = gym.make('PointmassUWallTrainEnvSmall-v0') return ImageEnv( wrapped_env, 84, init_camera=None, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=False, )
def create_image_48_pointmass_flappy_bird_train_env_v1(): from multiworld.core.image_env import ImageEnv wrapped_env = gym.make('PointmassFlappyBirdTrainEnv-v1') return ImageEnv( wrapped_env, 48, init_camera=None, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=False, )
def get_video_save_func(rollout_function, env, policy, variant): from multiworld.core.image_env import ImageEnv from railrl.core import logger from railrl.envs.vae_wrappers import temporary_mode from railrl.visualization.video import dump_video logdir = logger.get_snapshot_dir() save_period = variant.get('save_video_period', 50) do_state_exp = variant.get("do_state_exp", False) dump_video_kwargs = variant.get("dump_video_kwargs", dict()) dump_video_kwargs['horizon'] = variant['max_path_length'] if do_state_exp: imsize = variant.get('imsize') dump_video_kwargs['imsize'] = imsize image_env = ImageEnv( env, imsize, init_camera=variant.get('init_camera', None), transpose=True, normalize=True, ) def save_video(algo, epoch): if epoch % save_period == 0 or epoch == algo.num_epochs: filename = osp.join( logdir, 'video_{epoch}_env.mp4'.format(epoch=epoch)) dump_video(image_env, policy, filename, rollout_function, **dump_video_kwargs) else: image_env = env dump_video_kwargs['imsize'] = env.imsize def save_video(algo, epoch): if epoch % save_period == 0 or epoch == algo.num_epochs: filename = osp.join( logdir, 'video_{epoch}_env.mp4'.format(epoch=epoch)) temporary_mode(image_env, mode='video_env', func=dump_video, args=(image_env, policy, filename, rollout_function), kwargs=dump_video_kwargs) filename = osp.join( logdir, 'video_{epoch}_vae.mp4'.format(epoch=epoch)) temporary_mode(image_env, mode='video_vae', func=dump_video, args=(image_env, policy, filename, rollout_function), kwargs=dump_video_kwargs) return save_video
def generate_vae_data(variant): env_id = variant.get('env_id', None) N = variant.get('N', 1000) test_p = variant.get('test_p', 0.9) image_size = variant.get('image_size', 84) num_channels = variant.get('num_channels', 3) init_camera = variant.get('init_camera', None) oracle_dataset_using_set_to_goal = variant.get( 'oracle_dataset_using_set_to_goal', False) random_rollout_data = variant.get('random_rollout_data', False) random_and_oracle_policy_data = variant.get( 'random_and_oracle_policy_data', False) random_and_oracle_policy_data_split = variant.get( 'random_and_oracle_policy_data_split', 0) n_random_steps = variant.get('n_random_steps', 100) show = variant.get('show', False) import rlkit.torch.pytorch_util as ptu info = {} env = gym.make(env_id) env = ImageEnv(env, image_size, init_camera=init_camera, transpose=True, normalize=True, non_presampled_goal_img_is_garbage=None) dataset = np.zeros((N, image_size * image_size * num_channels), dtype=np.uint8) # print('aa') for i in range(N): if oracle_dataset_using_set_to_goal: goal = env.sample_goal() print(goal) env.set_to_goal(goal) obs = env._get_obs() # print(obs) img = obs['image_observation'] print('length of image arr:', len(img)) ### this block to test image ############################# if show: # print(obs['image_observation']) img = img.reshape(3, image_size, image_size).transpose() print(img.size) img = img[::-1, :, ::-1] img = (img * 255).astype(np.uint8) img = Image.fromarray(img, 'RGB') print(img.size) # print(len(img)) # img.save('/home/manhlt/extra_disk/data/RIG_data/image_'+str(i)+'.png') ############################################################# dataset[i:] = unormalize_image(img) n = int(N * test_p) train_dataset = dataset[:n, :] test_dataset = dataset[n:, :] return train_dataset, test_dataset
def get_video_save_func(rollout_function, env, policy, variant): logdir = logger.get_snapshot_dir() save_period = variant.get("save_video_period", 50) do_state_exp = variant.get("do_state_exp", False) dump_video_kwargs = variant.get("dump_video_kwargs", dict()) if do_state_exp: imsize = variant.get("imsize") dump_video_kwargs["imsize"] = imsize image_env = ImageEnv( env, imsize, init_camera=variant.get("init_camera", None), transpose=True, normalize=True, ) def save_video(algo, epoch): if epoch % save_period == 0 or epoch == algo.num_epochs: filename = osp.join( logdir, "video_{epoch}_env.mp4".format(epoch=epoch)) dump_video(image_env, policy, filename, rollout_function, **dump_video_kwargs) else: image_env = env dump_video_kwargs["imsize"] = env.imsize def save_video(algo, epoch): if epoch % save_period == 0 or epoch == algo.num_epochs: filename = osp.join( logdir, "video_{epoch}_env.mp4".format(epoch=epoch)) temporary_mode( image_env, mode="video_env", func=dump_video, args=(image_env, policy, filename, rollout_function), kwargs=dump_video_kwargs, ) filename = osp.join( logdir, "video_{epoch}_vae.mp4".format(epoch=epoch)) temporary_mode( image_env, mode="video_vae", func=dump_video, args=(image_env, policy, filename, rollout_function), kwargs=dump_video_kwargs, ) return save_video
def create_image_48_sawyer_pickup_easy_v0(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera import os.path import numpy as np goal_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'goals/pickup_goals.npy') goals = np.load(goal_path).item() return ImageEnv( wrapped_env=gym.make('SawyerPickupEnvYZEasyFewGoals-v0'), imsize=48, init_camera=sawyer_pick_and_place_camera, transpose=True, normalize=True, presampled_goals=goals, )
def create_image_48_sawyer_door_hook_reset_free_v1(): from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_door_env_camera_v0 import os.path import numpy as np goal_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'goals/door_goals.npy') goals = np.load(goal_path).item() return ImageEnv( wrapped_env=gym.make('SawyerDoorHookResetFreeEnv-v1'), imsize=48, init_camera=sawyer_door_env_camera_v0, transpose=True, normalize=True, presampled_goals=goals, )
def generate_vae_dataset( N=10000, test_p=0.9, use_cached=True, imsize=84, show=False, dataset_path=None, ): filename = "/tmp/sawyer_xy_pos_control_imgs" + str(N) + ".npy" info = {} if dataset_path is not None: filename = local_path_from_s3_or_local_path(dataset_path) dataset = np.load(filename) elif use_cached and osp.isfile(filename): dataset = np.load(filename) print("loaded data from saved file", filename) else: now = time.time() env = SawyerReachXYEnv(hide_goal_markers=True) env = ImageEnv( env, imsize, transpose=True, init_camera=init_sawyer_camera_v1, normalize=True, ) info['env'] = env dataset = np.zeros((N, imsize * imsize * 3)) for i in range(N): # Move the goal out of the image env.reset() for _ in range(50): env.wrapped_env.step(env.wrapped_env.action_space.sample()) img = env.step(env.action_space.sample())[0]['image_observation'] dataset[i, :] = img if show: cv2.imshow('img', img.reshape(3, 84, 84).transpose()) cv2.waitKey(1) print(i) print("done making training data", filename, time.time() - now) np.save(filename, dataset) n = int(N * test_p) train_dataset = dataset[:n, :] test_dataset = dataset[n:, :] return train_dataset, test_dataset, info
def create_image_48_sawyer_pick_and_place_v0(): from multiworld.core.flat_goal_env import FlatGoalEnv from multiworld.core.image_env import ImageEnv from multiworld.envs.mujoco.cameras import sawyer_pick_and_place_camera_zoomed wrapped_env = gym.make('BaseSawyerPickAndPlaceEnv-v0') state_desired_goal = wrapped_env.fixed_goal goal_dim = len(state_desired_goal) imsize = 48 image_env = ImageEnv( wrapped_env=wrapped_env, imsize=imsize, init_camera=sawyer_pick_and_place_camera_zoomed, normalize=True, presampled_goals={'state_desired_goal': state_desired_goal.reshape(1,goal_dim), 'image_desired_goal': np.zeros((1, imsize*imsize*3))}, ) return FlatGoalEnv(image_env, obs_keys=['image_observation'])
def get_env(env_id, init_camera, imsize=48): env = gym.make(env_id) render=False reward_params = dict( type='latent_distance' ) vae = None image_env = ImageEnv( env, imsize, init_camera=init_camera, transpose=True, normalize=True, # grayscale=True ) return image_env