コード例 #1
0
from keras.optimizers import Adam

import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from src.r2d2 import R2D2, Actor
from src.r2d2_callbacks import *
from src.processor import AtariProcessor
from src.image_model import DQNImageModel
from src.memory import *
from src.policy import *
from src.common import InputType, LstmType, DuelingNetwork, seed_everything, LoggerType
from src.callbacks import ConvLayerView, MovieLogger

seed_everything(42)
ENV_NAME = "BreakoutDeterministic-v4"


class MyActor(Actor):
    def getPolicy(self, actor_index, actor_num):
        return EpsilonGreedy(0.1)

    def fit(self, index, agent):
        env = gym.make(ENV_NAME)
        agent.fit(env, visualize=False, verbose=0)
        env.close()


class MyActor1(MyActor):
    def getPolicy(self, actor_index, actor_num):
コード例 #2
0
import gym
from keras.optimizers import Adam

import traceback

import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))

from src.common import seed_everything
seed_everything(43)

from src.r2d3 import Actor
from src.processor import AcrobotProcessor
from src.policy import EpsilonGreedy, AnnealingEpsilonGreedy
from src.image_model import DQNImageModel
from src.memory import PERRankBaseMemory, PERProportionalMemory
from src.common import InputType, LstmType, DuelingNetwork, LoggerType

from Lib import run_gym_rainbow, run_gym_r2d3, run_play, run_replay

ENV_NAME = "Acrobot-v1"
episode_save_dir = "tmp_{}.".format(ENV_NAME)


def create_parameter():

    env = gym.make(ENV_NAME)

    # ゲーム情報
    print("action_space      : " + str(env.action_space))