def demo_get_video_to_watch_gym_render(): import cv2 # pip3 install opencv-python import gym # pip3 install gym==0.17 pyglet==1.5.0 # env.render() bug in gym==0.18, pyglet==1.6 import torch """parameters""" env_name = 'LunarLanderContinuous-v2' env = PreprocessEnv(env=gym.make(env_name)) '''initialize agent''' agent = None # means use random action if agent is None: # use random action device = None else: from elegantrl2.agent import AgentPPO agent = AgentPPO() # means use the policy network which saved in cwd cwd = f'./{env_name}_{agent.__class__.__name__}/' # current working directory path net_dim = 2 ** 9 # 2 ** 7 state_dim = env.state_dim action_dim = env.action_dim agent.init(net_dim, state_dim, action_dim) agent.save_load_model(cwd=cwd, if_save=False) device = agent.device '''initialize evaluete and env.render()''' save_frame_dir = '' # means don't save video, just open the env.render() # save_frame_dir = 'frames' # means save video in this directory if save_frame_dir: os.makedirs(save_frame_dir, exist_ok=True) state = env.reset() episode_return = 0 step = 0 for i in range(2 ** 10): print(i) if i % 128 == 0 else None for j in range(1): if agent is None: action = env.action_space.sample() else: s_tensor = torch.as_tensor((state,), dtype=torch.float32, device=device) a_tensor = agent.act(s_tensor) action = a_tensor.detach().cpu().numpy()[0] # if use 'with torch.no_grad()', then '.detach()' not need. next_state, reward, done, _ = env.step(action) episode_return += reward step += 1 if done: print(f'{i:>6}, {step:6.0f}, {episode_return:8.3f}, {reward:8.3f}') state = env.reset() episode_return = 0 step = 0 else: state = next_state if save_frame_dir: frame = env.render('rgb_array') cv2.imwrite(f'{save_frame_dir}/{i:06}.png', frame) cv2.imshow('OpenCV Window', frame) cv2.waitKey(1) else: env.render() env.close() '''convert frames png/jpg to video mp4/avi using ffmpeg''' if save_frame_dir: frame_shape = cv2.imread(f'{save_frame_dir}/{3:06}.png').shape print(f"frame_shape: {frame_shape}") save_video = 'gym_render.mp4' os.system(f"| Convert frames to video using ffmpeg. Save in {save_video}") os.system(f'ffmpeg -r 60 -f image2 -s {frame_shape[0]}x{frame_shape[1]} ' f'-i ./{save_frame_dir}/%06d.png ' f'-crf 25 -vb 20M -pix_fmt yuv420p {save_video}')
def get_video_to_watch_gym_render(): import cv2 # pip3 install opencv-python import gym # pip3 install gym==0.17 pyglet==1.5.0 # env.render() bug in gym==0.18, pyglet==1.6 import torch '''choose env''' import pybullet_envs # for python-bullet-gym dir(pybullet_envs) # from elegantrl2.env import PreprocessEnv env_name = [ 'BipedalWalker-v3', 'AntBulletEnv-v0', 'KukaBulletEnv-v0', 'ReacherBulletEnv-v0', 'PusherBulletEnv-v0', "ThrowerBulletEnv-v0", "StrikerBulletEnv-v0" ][1] env = PreprocessEnv(env=gym.make(env_name)) '''initialize agent''' agent = None from elegantrl2.agent import AgentPPO agent = AgentPPO() agent.if_use_dn = True net_dim = 2**8 cwd = f'./{env_name}_4/' # from elegantrl2.agent import AgentModSAC # agent = AgentModSAC() # agent.if_use_dn = True # net_dim = 2 ** 8 # cwd = f'./{env_name}_2/' device = None if agent is not None: state_dim = env.state_dim action_dim = env.action_dim agent.init(net_dim, state_dim, action_dim) agent.save_load_model(cwd=cwd, if_save=False) device = agent.device rd.seed(194686) torch.manual_seed(1942876) '''initialize evaluete and env.render()''' save_frame_dir = 'frames' if save_frame_dir: os.makedirs(save_frame_dir, exist_ok=True) state = env.reset() episode_return = 0 step = 0 for i in range(2**9): print(i) if i % 128 == 0 else None for j in range(1): if agent is not None: s_tensor = torch.as_tensor((state, ), dtype=torch.float32, device=device) a_tensor = agent.act(s_tensor) action = a_tensor.detach().cpu().numpy( )[0] # if use 'with torch.no_grad()', then '.detach()' not need. else: action = env.action_space.sample() next_state, reward, done, _ = env.step(action) episode_return += reward step += 1 if done: print( f'{i:>6}, {step:6.0f}, {episode_return:8.3f}, {reward:8.3f}' ) state = env.reset() episode_return = 0 step = 0 else: state = next_state frame = env.render('rgb_array') frame = frame[50:210, 50:270] # (240, 320) AntPyBulletEnv-v0 # frame = cv2.resize(frame[:, :500], (500//2, 720//2)) cv2.imwrite(f'{save_frame_dir}/{i:06}.png', frame) cv2.imshow('', frame) cv2.waitKey(1) env.close() # exit() '''convert frames png/jpg to video mp4/avi using ffmpeg''' if save_frame_dir: frame_shape = cv2.imread(f'{save_frame_dir}/{3:06}.png').shape print(f"frame_shape: {frame_shape}") save_video = 'gym_render.mp4' os.system( f"| Convert frames to video using ffmpeg. Save in {save_video}") os.system( f'ffmpeg -r 60 -f image2 -s {frame_shape[0]}x{frame_shape[1]} ' f'-i ./{save_frame_dir}/%06d.png ' f'-crf 25 -vb 20M -pix_fmt yuv420p {save_video}')