Exemplo n.º 1
0
 def _setup_graph(self, ob_space, ac_space):
     _, self.logit_dim = ModelCatalog.get_action_dist(ac_space)
     self._model = ModelCatalog.get_torch_model(self.registry, ob_space,
                                                self.logit_dim,
                                                self.config["model"])
     self.optimizer = torch.optim.Adam(self._model.parameters(),
                                       lr=self.config["lr"])
Exemplo n.º 2
0
 def __init__(self, obs_space, action_space, config):
     config = dict(ray.rllib.a3c.a3c.DEFAULT_CONFIG, **config)
     self.config = config
     _, self.logit_dim = ModelCatalog.get_action_dist(
         action_space, self.config["model"])
     self.model = ModelCatalog.get_torch_model(
         obs_space.shape, self.logit_dim, self.config["model"])
     loss = A3CLoss(
         self.model, self.config["vf_loss_coeff"],
         self.config["entropy_coeff"])
     TorchPolicyGraph.__init__(
         self, obs_space, action_space, self.model, loss,
         loss_inputs=[
             "obs", "actions", "advantages", "value_targets"])
Exemplo n.º 3
0
    def __init__(self, obs_space, action_space, config):
        config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)
        self.config = config
        _, self.logit_dim = ModelCatalog.get_action_dist(
            action_space, self.config["model"])
        self.model = ModelCatalog.get_torch_model(obs_space, self.logit_dim,
                                                  self.config["model"])
        loss = PGLoss(self.model)

        TorchPolicyGraph.__init__(self,
                                  obs_space,
                                  action_space,
                                  self.model,
                                  loss,
                                  loss_inputs=["obs", "actions", "advantages"])
Exemplo n.º 4
0
 def __init__(self, obs_space, action_space, config):
     config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)
     self.config = config
     _, self.logit_dim = ModelCatalog.get_action_dist(
         action_space, self.config["model"])
     self.model = ModelCatalog.get_torch_model(obs_space, self.logit_dim,
                                               self.config["model"])
     loss = A3CLoss(self.model, self.config["vf_loss_coeff"],
                    self.config["entropy_coeff"])
     TorchPolicyGraph.__init__(
         self,
         obs_space,
         action_space,
         self.model,
         loss,
         loss_inputs=["obs", "actions", "advantages", "value_targets"])
Exemplo n.º 5
0
    def __init__(self, obs_space, action_space, config):
        config = dict(ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG, **config)
        self.config = config
        _, self.logit_dim = ModelCatalog.get_action_dist(
            action_space, self.config["model"])
        self.model = ModelCatalog.get_torch_model(obs_space, self.logit_dim,
                                                  self.config["model"])
        loss = PGLoss(self.model)

        TorchPolicyGraph.__init__(
            self,
            obs_space,
            action_space,
            self.model,
            loss,
            loss_inputs=[
                SampleBatch.CUR_OBS, SampleBatch.ACTIONS,
                Postprocessing.ADVANTAGES
            ])
Exemplo n.º 6
0
        def __init__(self, obs_space, action_space, config):
            if get_default_config:
                config = dict(get_default_config(), **config)
            self.config = config

            if before_init:
                before_init(self, obs_space, action_space, config)

            if make_model_and_action_dist:
                self.model, self.dist_class = make_model_and_action_dist(
                    self, obs_space, action_space, config)
            else:
                self.dist_class, logit_dim = ModelCatalog.get_action_dist(
                    action_space, self.config["model"], torch=True)
                self.model = ModelCatalog.get_torch_model(
                    obs_space, logit_dim, self.config["model"])

            TorchPolicy.__init__(self, obs_space, action_space, self.model,
                                 loss_fn, self.dist_class)

            if after_init:
                after_init(self, obs_space, action_space, config)
Exemplo n.º 7
0
    def __init__(self, obs_space, action_space, config):
        _validate(obs_space, action_space)
        config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
        self.config = config
        self.observation_space = obs_space
        self.action_space = action_space
        self.n_agents = len(obs_space.original_space.spaces)
        self.n_actions = action_space.spaces[0].n
        self.h_size = config["model"]["lstm_cell_size"]

        agent_obs_space = obs_space.original_space.spaces[0]
        if isinstance(agent_obs_space, Dict):
            space_keys = set(agent_obs_space.spaces.keys())
            if space_keys != {"obs", "action_mask"}:
                raise ValueError(
                    "Dict obs space for agent must have keyset "
                    "['obs', 'action_mask'], got {}".format(space_keys))
            mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
            if mask_shape != (self.n_actions, ):
                raise ValueError("Action mask shape must be {}, got {}".format(
                    (self.n_actions, ), mask_shape))
            self.has_action_mask = True
            self.obs_size = _get_size(agent_obs_space.spaces["obs"])
            # The real agent obs space is nested inside the dict
            agent_obs_space = agent_obs_space.spaces["obs"]
        else:
            self.has_action_mask = False
            self.obs_size = _get_size(agent_obs_space)

        self.model = ModelCatalog.get_torch_model(agent_obs_space,
                                                  self.n_actions,
                                                  config["model"],
                                                  default_model_cls=RNNModel)
        self.target_model = ModelCatalog.get_torch_model(
            agent_obs_space,
            self.n_actions,
            config["model"],
            default_model_cls=RNNModel)

        # Setup the mixer network.
        # The global state is just the stacked agent observations for now.
        self.state_shape = [self.obs_size, self.n_agents]
        if config["mixer"] is None:
            self.mixer = None
            self.target_mixer = None
        elif config["mixer"] == "qmix":
            self.mixer = QMixer(self.n_agents, self.state_shape,
                                config["mixing_embed_dim"])
            self.target_mixer = QMixer(self.n_agents, self.state_shape,
                                       config["mixing_embed_dim"])
        elif config["mixer"] == "vdn":
            self.mixer = VDNMixer()
            self.target_mixer = VDNMixer()
        else:
            raise ValueError("Unknown mixer type {}".format(config["mixer"]))

        self.cur_epsilon = 1.0
        self.update_target()  # initial sync

        # Setup optimizer
        self.params = list(self.model.parameters())
        if self.mixer:
            self.params += list(self.mixer.parameters())
        self.loss = QMixLoss(self.model, self.target_model, self.mixer,
                             self.target_mixer, self.n_agents, self.n_actions,
                             self.config["double_q"], self.config["gamma"])
        self.optimiser = RMSprop(params=self.params,
                                 lr=config["lr"],
                                 alpha=config["optim_alpha"],
                                 eps=config["optim_eps"])
Exemplo n.º 8
0
 def _setup_graph(self, ob_space, ac_space):
     _, self.logit_dim = ModelCatalog.get_action_dist(ac_space)
     self._model = ModelCatalog.get_torch_model(ob_space, self.logit_dim)
     self.optimizer = torch.optim.Adam(self._model.parameters(), lr=0.0001)
Exemplo n.º 9
0
 def _setup_graph(self, ob_space, ac_space):
     _, self.logit_dim = ModelCatalog.get_action_dist(ac_space)
     self._model = ModelCatalog.get_torch_model(
         self.registry, ob_space, self.logit_dim, self.config["model"])
     self.optimizer = torch.optim.Adam(
         self._model.parameters(), lr=self.config["lr"])
Exemplo n.º 10
0
    def __init__(self, obs_space, action_space, config):
        _validate(obs_space, action_space)
        config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
        self.config = config
        self.observation_space = obs_space
        self.action_space = action_space
        self.n_agents = len(obs_space.original_space.spaces)
        self.n_actions = action_space.spaces[0].n
        self.h_size = config["model"]["lstm_cell_size"]

        agent_obs_space = obs_space.original_space.spaces[0]
        if isinstance(agent_obs_space, Dict):
            space_keys = set(agent_obs_space.spaces.keys())
            if space_keys != {"obs", "action_mask"}:
                raise ValueError(
                    "Dict obs space for agent must have keyset "
                    "['obs', 'action_mask'], got {}".format(space_keys))
            mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
            if mask_shape != (self.n_actions, ):
                raise ValueError("Action mask shape must be {}, got {}".format(
                    (self.n_actions, ), mask_shape))
            self.has_action_mask = True
            self.obs_size = _get_size(agent_obs_space.spaces["obs"])
            # The real agent obs space is nested inside the dict
            agent_obs_space = agent_obs_space.spaces["obs"]
        else:
            self.has_action_mask = False
            self.obs_size = _get_size(agent_obs_space)

        self.model = ModelCatalog.get_torch_model(
            agent_obs_space,
            self.n_actions,
            config["model"],
            default_model_cls=RNNModel)
        self.target_model = ModelCatalog.get_torch_model(
            agent_obs_space,
            self.n_actions,
            config["model"],
            default_model_cls=RNNModel)

        # Setup the mixer network.
        # The global state is just the stacked agent observations for now.
        self.state_shape = [self.obs_size, self.n_agents]
        if config["mixer"] is None:
            self.mixer = None
            self.target_mixer = None
        elif config["mixer"] == "qmix":
            self.mixer = QMixer(self.n_agents, self.state_shape,
                                config["mixing_embed_dim"])
            self.target_mixer = QMixer(self.n_agents, self.state_shape,
                                       config["mixing_embed_dim"])
        elif config["mixer"] == "vdn":
            self.mixer = VDNMixer()
            self.target_mixer = VDNMixer()
        else:
            raise ValueError("Unknown mixer type {}".format(config["mixer"]))

        self.cur_epsilon = 1.0
        self.update_target()  # initial sync

        # Setup optimizer
        self.params = list(self.model.parameters())
        self.loss = QMixLoss(self.model, self.target_model, self.mixer,
                             self.target_mixer, self.n_agents, self.n_actions,
                             self.config["double_q"], self.config["gamma"])
        self.optimiser = RMSprop(
            params=self.params,
            lr=config["lr"],
            alpha=config["optim_alpha"],
            eps=config["optim_eps"])