def _init(self, config, env_creator): env = env_creator(config["env_config"]) from ray.rllib import models preprocessor = models.ModelCatalog.get_preprocessor(env) self.sess = utils.make_session(single_threaded=False) self.policy = policies.GenericPolicy(self.sess, env.action_space, env.observation_space, preprocessor, config["observation_filter"], config["model"]) self.optimizer = optimizers.SGD(self.policy, config["sgd_stepsize"]) self.rollouts_used = config["rollouts_used"] self.num_rollouts = config["num_rollouts"] self.report_length = config["report_length"] # Create the shared noise table. logger.info("Creating shared noise table.") noise_id = create_shared_noise.remote(config["noise_size"]) self.noise = SharedNoiseTable(ray.get(noise_id)) # Create the actors. logger.info("Creating actors.") self.workers = [ Worker.remote(config, env_creator, noise_id) for _ in range(config["num_workers"]) ] self.episodes_so_far = 0 self.reward_list = [] self.tstart = time.time()
def _init(self, config, env_creator): # PyTorch check. config["use_pytorch"] = False if config["use_pytorch"]: raise ValueError( "ARS does not support PyTorch yet! Use tf instead." ) env = env_creator(config["env_config"]) from ray.rllib import models preprocessor = models.ModelCatalog.get_preprocessor(env) self.sess = utils.make_session(single_threaded=False) self.policy = policies.GenericPolicy( self.sess, env.action_space, env.observation_space, preprocessor, config["observation_filter"], config["model"]) self.optimizer = optimizers.SGD(self.policy, config["sgd_stepsize"]) self.rollouts_used = config["rollouts_used"] self.num_rollouts = config["num_rollouts"] self.report_length = config["report_length"] # Create the shared noise table. logger.info("Creating shared noise table.") noise_id = create_shared_noise.remote(config["noise_size"]) self.noise = SharedNoiseTable(ray.get(noise_id)) # Getting the configurations of the random environments self.extra_config = config["env_config"]["extra_trainer_configs"] self.domain_randomization_config = self.extra_config["domain_randomization"] self.domain_randomization_flag = False if(self.extra_config is not None): if("domain_randomization" in self.extra_config.keys()): domain_randomization = self.extra_config["domain_randomization"] self.domain_randomization_flag = True if("angle" in domain_randomization.keys()): self.min_random_angles = [0,0] self.max_random_angles = [0,0] for i,flag in enumerate(domain_randomization["angle"][0]): if(flag == True): self.min_random_angles[i] = domain_randomization["angle"][1] self.max_random_angles[i] = domain_randomization["angle"][2] # Create the actors. # TODO: Change Worker and add the config logger.info("Creating actors.") self.workers = [ Worker.remote(config, env_creator, noise_id) for _ in range(config["num_workers"]) ] self.episodes_so_far = 0 self.reward_list = [] self.tstart = time.time()
def __init__(self, config, env_creator, noise, min_task_runtime=0.2): self.min_task_runtime = min_task_runtime self.config = config self.noise = SharedNoiseTable(noise) self.env = env_creator(config["env_config"]) from ray.rllib import models self.preprocessor = models.ModelCatalog.get_preprocessor(self.env) self.sess = utils.make_session(single_threaded=True) self.policy = policies.GenericPolicy( self.sess, self.env.action_space, self.env.observation_space, self.preprocessor, config["observation_filter"], config["model"])