Esempio n. 1
0
    def __init__(self, embedding_file, graph_path, params={}):
        Evaluation.__init__(self)

        self._embedding_file = embedding_file
        self._graph_path = graph_path
        self._directed = params['directed'] if 'directed' in params else False

        self.results = None
Esempio n. 2
0
    def __init__(self, config):
        BaseAgent.__init__(self, config)
        Evaluation.__init__(self, self.config.metrics_k)
        self.metrics_k = config.metrics_k
        self.state_config = self.config.hyperparameters["State"]
        self.embedding = self.get_embedding().to(self.device)
        self.embedding_dim = self.embedding.embedding_dim

        # Initialize state-module
        self.state_agg = RNNStateAgg(self.embedding,
                                     state_config=self.state_config,
                                     reward_range=[0, 1],
                                     with_rewards=False).to(self.device)
        self.state_size = self.state_agg.state_size
        self.state_optimizer = self.create_state_optimizer()

        if self.config.hyperparameters[
                "state-only-pretrain"] or self.config.hyperparameters[
                    "pretrain"]:
            save_dir = Path(config.file_to_save_model).parent / "pretrain"
            self.pretrain_model_saver = ModelSaver(save_dir)

        if self.config.hyperparameters["state-only-pretrain"]:
            self.output_layer = torch.nn.Linear(
                self.state_size,
                self.environment.action_space.n).to(self.device)
            self.output_layer_optimizer = torch.optim.Adam(
                self.output_layer.parameters())

        self.user_history_mask_items = None
        self.masking_enabled = self.hyperparameters.get("history_masking")

        self.model_saver = ModelSaver(Path(config.file_to_save_model).parent)
        self.exploration_strategy = Epsilon_Greedy_Exploration(self.config)

        self.last_done = np.zeros(self.environment.num_envs, dtype=np.bool)
Esempio n. 3
0
 def __init__(self, train_env, k):
     Evaluation.__init__(self, k)
     self.train_env = train_env