def __init__(self, word_features_vocab_sizes: List[int], vec_features_size: int, hidden_size: int, num_layers: int) -> None: super().__init__() self._word_features_encoder = maybe_cuda( WordFeaturesEncoder(word_features_vocab_sizes, hidden_size, 1, hidden_size)) self._features_classifier = maybe_cuda( DNNScorer(hidden_size + vec_features_size, hidden_size, num_layers))
def __init__(self, vec_features_size: int, word_feature_sizes: List[int], hidden_size: int, num_layers: int) -> None: super().__init__() # Consider making the word embedding the same for all # token-type inputs, also for tactic-type inputs self._word_features_encoder = maybe_cuda( WordFeaturesEncoder(word_feature_sizes, hidden_size, 1, hidden_size)) self._features_classifier = maybe_cuda( DNNScorer(hidden_size + vec_features_size, hidden_size, num_layers))
def __init__(self, input_vocab_size : int, hidden_size : int, num_layers : int) -> None: super().__init__() self._token_embedding = maybe_cuda(nn.Embedding(input_vocab_size, hidden_size)) self._scorer = maybe_cuda(DNNScorer(hidden_size, hidden_size, num_layers)) self._hidden_size = hidden_size self._lstm = maybe_cuda(nn.LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=1, batch_first=True))
def __init__(self, num_tactics: int, hidden_size: int, num_layers: int) -> None: super().__init__() self.num_tactics = num_tactics # self.embedding = maybe_cuda(nn.Embedding(num_tactics, hidden_size)) self.dnn = maybe_cuda(DNNScorer(num_tactics, hidden_size, num_layers))