def __init__(self, trainable_agents: AbsAgentManager, actor, logger=DummyLogger()): super().__init__() self._trainable_agents = trainable_agents self._actor = actor self._logger = logger
def __init__(self, actor: ParallelActor, trainable_agents: SimpleAgentManger, logger=DummyLogger()): super().__init__() self._actor = actor self._trainable_agents = trainable_agents self._logger = logger
def __init__(self, agent_manager: SimpleAgentManager, actor: Union[SimpleActor, ActorProxy], scheduler: Scheduler, logger: Logger = DummyLogger()): super().__init__() self._agent_manager = agent_manager self._actor = actor self._scheduler = scheduler self._logger = logger
def __init__(self, protocol: str = PROTOCOL, send_timeout: int = SEND_TIMEOUT, receive_timeout: int = RECEIVE_TIMEOUT, logger=DummyLogger()): self._protocol = protocol self._send_timeout = send_timeout self._receive_timeout = receive_timeout self._ip_address = socket.gethostbyname(socket.gethostname()) self._zmq_context = zmq.Context() self._logger = logger self._setup_sockets()
def __init__(self, trainable_agents: AgentManager, actor, logger=DummyLogger(), seed: int = None): """ seed (int): initial random seed value for the underlying simulator. If None, no manual seed setting \n is performed. Args: trainable_agents (dict or AgentManager): an AgentManager instance that manages all agents actor (Actor of ActorProxy): an Actor or VectorActorProxy instance. logger: used for logging important events seed (int): initial random seed value for the underlying simulator. If None, no seed fixing is done \n for the underlying simulator. """ assert isinstance(trainable_agents, AgentManager), \ "SimpleLearner only accepts AgentManager for parameter trainable_agents" super().__init__(trainable_agents=trainable_agents, actor=actor, logger=logger) self._seed = seed
def __init__(self, component_type: str, protocol: str = PROTOCOL, send_timeout: int = SEND_TIMEOUT, receive_timeout: int = RECEIVE_TIMEOUT, logger=DummyLogger()): self._component_type = component_type self._protocol = protocol self._send_timeout = send_timeout self._receive_timeout = receive_timeout self._ip_address = socket.gethostbyname(socket.gethostname()) self._zmq_context = zmq.Context() self._disconnected_peer_name_list = [] self._logger = logger self._setup_sockets()
def __init__(self, trainable_agents: AbsAgentManager, actor, logger=DummyLogger()): """ seed (int): initial random seed value for the underlying simulator. If None, no manual seed setting is performed. Args: trainable_agents (AbsAgentManager): an AgentManager instance that manages all agents. actor (Actor or ActorProxy): an Actor or VectorActorProxy instance. logger: used for logging important events. """ super().__init__() self._trainable_agents = trainable_agents self._actor = actor self._logger = logger
def __init__(self, *, trainable_agents: Union[dict, AgentManager], actor: Union[SimpleActor, ActorProxy], logger=DummyLogger() ): """ This class contains the main driver logic for a RL task. Args: trainable_agents (dict or AgentManager): a dict of individual agents or an AgentManager instance that manages all agents actor (Actor of ActorProxy): an Actor or VectorActorProxy instance. logger: used for logging important messages """ self._trainable_agents = trainable_agents self._actor = actor self._logger = logger
def __init__(self, group_name: str, component_type: str, expected_peers: dict, driver_type: DriverType = DriverType.ZMQ, driver_parameters: dict = None, redis_address=(HOST, PORT), max_retries: int = MAX_RETRIES, base_retry_interval: float = BASE_RETRY_INTERVAL, fault_tolerant: bool = FAULT_TOLERANT, log_enable: bool = True): self._group_name = group_name self._component_type = component_type self._redis_hash_name = f"{self._group_name}:{self._component_type}" unique_id = str(uuid.uuid1()).replace("-", "") self._name = f"{self._component_type}_proxy_{unique_id}" self._driver_type = driver_type self._driver_parameters = driver_parameters self._max_retries = max_retries self._retry_interval = base_retry_interval self._is_enable_fault_tolerant = fault_tolerant self._log_enable = log_enable self._logger = InternalLogger( component_name=self._name) if self._log_enable else DummyLogger() try: self._redis_connection = redis.Redis(host=redis_address[0], port=redis_address[1]) except Exception as e: raise RedisConnectionError( f"{self._name} failure to connect to redis server due to {e}") # Record the peer's redis information. self._peers_info_dict = {} for peer_type, number in expected_peers.items(): self._peers_info_dict[peer_type] = _PEER_INFO( hash_table_name=f"{self._group_name}:{peer_type}", expected_number=number) # Record connected peers' name. self._onboard_peers_name_dict = {} # Temporary store the message. self._message_cache = defaultdict(list) self._join()
def __init__(self, name, agent_id_list, port_code_list, vessel_code_list, demo_env, state_shaper: GNNStateShaper, logger=DummyLogger()): super().__init__(name, AgentMode.TRAIN, agent_id_list, state_shaper=state_shaper, action_shaper=None, experience_shaper=None, explorer=None) self.port_code_list = copy(port_code_list) self.vessel_code_list = copy(vessel_code_list) self.demo_env = demo_env self._logger = logger
def __init__( self, group_name: str, component_type: str, expected_peers: dict, driver_type: DriverType = DriverType.ZMQ, driver_parameters: dict = None, redis_address: Tuple = (HOST, PORT), max_retries: int = MAX_RETRIES, retry_interval_base_value: float = BASE_RETRY_INTERVAL, log_enable: bool = True, enable_rejoin: bool = ENABLE_REJOIN, minimal_peers: Union[int, dict] = MINIMAL_PEERS, peers_catch_lifetime: int = PEERS_CATCH_LIFETIME, enable_message_cache_for_rejoin: bool = ENABLE_MESSAGE_CACHE_FOR_REJOIN, max_length_for_message_cache: int = MAX_LENGTH_FOR_MESSAGE_CACHE, timeout_for_minimal_peer_number: int = TIMEOUT_FOR_MINIMAL_PEER_NUMBER, is_remove_failed_container: bool = IS_REMOVE_FAILED_CONTAINER, max_rejoin_times: int = MAX_REJOIN_TIMES): self._group_name = group_name self._component_type = component_type self._redis_hash_name = f"{self._group_name}:{self._component_type}" if "COMPONENT_NAME" in os.environ: self._name = os.getenv("COMPONENT_NAME") else: unique_id = str(uuid.uuid1()).replace("-", "") self._name = f"{self._component_type}_proxy_{unique_id}" self._max_retries = max_retries self._retry_interval_base_value = retry_interval_base_value self._log_enable = log_enable self._logger = InternalLogger( component_name=self._name) if self._log_enable else DummyLogger() # TODO:In multiprocess with spawn start method, the driver must be initiated before the Redis. # Otherwise it will cause Error 9: Bad File Descriptor in proxy.__del__(). Root cause not found. # Initialize the driver. if driver_type == DriverType.ZMQ: self._driver = ZmqDriver( component_type=self._component_type, **driver_parameters, logger=self._logger) if driver_parameters else ZmqDriver( component_type=self._component_type, logger=self._logger) else: self._logger.error( f"Unsupported driver type {driver_type}, please use DriverType class." ) sys.exit(NON_RESTART_EXIT_CODE) # Initialize the Redis. self._redis_connection = redis.Redis(host=redis_address[0], port=redis_address[1], socket_keepalive=True) try: self._redis_connection.ping() except Exception as e: self._logger.error( f"{self._name} failure to connect to redis server due to {e}") sys.exit(NON_RESTART_EXIT_CODE) # Record the peer's redis information. self._peers_info_dict = {} for peer_type, number in expected_peers.items(): self._peers_info_dict[peer_type] = _PEER_INFO( hash_table_name=f"{self._group_name}:{peer_type}", expected_number=number) self._onboard_peer_dict = defaultdict(dict) # Temporary store the message. self._message_cache = defaultdict(list) # Parameters for dynamic peers. self._enable_rejoin = enable_rejoin self._is_remove_failed_container = is_remove_failed_container self._max_rejoin_times = max_rejoin_times if self._enable_rejoin: self._peers_catch_lifetime = peers_catch_lifetime self._timeout_for_minimal_peer_number = timeout_for_minimal_peer_number self._enable_message_cache = enable_message_cache_for_rejoin if self._enable_message_cache: self._message_cache_for_exited_peers = defaultdict( lambda: deque([], maxlen=max_length_for_message_cache)) if isinstance(minimal_peers, int): self._minimal_peers = { peer_type: max(minimal_peers, 1) for peer_type, peer_info in self._peers_info_dict.items() } elif isinstance(minimal_peers, dict): self._minimal_peers = { peer_type: max(minimal_peers[peer_type], 1) for peer_type, peer_info in self._peers_info_dict.items() } else: self._logger.error( "Unsupported minimal peers type, please use integer or dict." ) sys.exit(NON_RESTART_EXIT_CODE) self._join()
def __init__(self, name, algorithm, experience_pool, logger=DummyLogger()): self._logger = logger super().__init__(name, algorithm, experience_pool)