Esempio n. 1
0
 def __init__(self, num_threads: int = 0):
     if num_threads <= 0:
         self.thread_pool = self._get_nothread_pool()
     else:
         self.thread_pool = pydipcc.ThreadPool(num_threads,
                                               ORDER_VOCABULARY_TO_IDX,
                                               MAX_VALID_LEN)
 def __init__(self,
              model_path,
              temperature,
              top_p=1.0,
              device=_DEFAULT_DEVICE):
     self.model = load_diplomacy_model(model_path,
                                       map_location=device,
                                       eval=True)
     self.temperature = temperature
     self.device = device
     self.top_p = top_p
     self.thread_pool = pydipcc.ThreadPool(1, ORDER_VOCABULARY_TO_IDX,
                                           get_order_vocabulary_idxs_len())
    def __init__(
        self,
        *,
        model_path,
        value_model_path=None,
        max_batch_size,
        max_rollout_length=3,
        rollout_temperature,
        rollout_top_p=1.0,
        n_rollout_procs=70,
        device=0,
        mix_square_ratio_scoring=0,
        clear_old_all_possible_orders=False,
        **kwargs,
    ):
        super().__init__(**kwargs)
        self.n_rollout_procs = n_rollout_procs
        self.rollout_temperature = rollout_temperature
        self.rollout_top_p = rollout_top_p
        self.max_batch_size = max_batch_size
        self.max_rollout_length = max_rollout_length
        self.mix_square_ratio_scoring = mix_square_ratio_scoring
        self.clear_old_all_possible_orders = clear_old_all_possible_orders
        self.device = parse_device(
            device) if torch.cuda.is_available() else "cpu"

        self.model = load_diplomacy_model(model_path, eval=True)
        # Loading model to gpu right away will load optimizer state we don't care about.
        self.model.to(self.device)
        if value_model_path is not None:
            self.value_model = load_diplomacy_model(value_model_path,
                                                    eval=True)
            # Loading model to gpu right away will load optimizer state we don't care about.
            self.value_model.to(self.device)
        else:
            self.value_model = self.model

        self.thread_pool = pydipcc.ThreadPool(n_rollout_procs,
                                              ORDER_VOCABULARY_TO_IDX,
                                              get_order_vocabulary_idxs_len())
Esempio n. 4
0
 def _get_nothread_pool(cls) -> pydipcc.ThreadPool:
     if cls.nothread_pool_singleton is None:
         cls.nothread_pool_singleton = pydipcc.ThreadPool(
             0, ORDER_VOCABULARY_TO_IDX, MAX_VALID_LEN)
     return cls.nothread_pool_singleton