示例#1
0
 def __init__(self, word_features_vocab_sizes: List[int],
              vec_features_size: int, hidden_size: int,
              num_layers: int) -> None:
     super().__init__()
     self._word_features_encoder = maybe_cuda(
         WordFeaturesEncoder(word_features_vocab_sizes, hidden_size, 1,
                             hidden_size))
     self._features_classifier = maybe_cuda(
         DNNScorer(hidden_size + vec_features_size, hidden_size,
                   num_layers))
 def __init__(self, vec_features_size: int, word_feature_sizes: List[int],
              hidden_size: int, num_layers: int) -> None:
     super().__init__()
     # Consider making the word embedding the same for all
     # token-type inputs, also for tactic-type inputs
     self._word_features_encoder = maybe_cuda(
         WordFeaturesEncoder(word_feature_sizes, hidden_size, 1,
                             hidden_size))
     self._features_classifier = maybe_cuda(
         DNNScorer(hidden_size + vec_features_size, hidden_size,
                   num_layers))
示例#3
0
 def __init__(self,
              wordf_sizes : List[int],
              vecf_size : int,
              hidden_size : int,
              num_layers : int,
              stem_vocab_size : int)\
     -> None:
     super().__init__()
     self._word_features_encoder = maybe_cuda(
         WordFeaturesEncoder(wordf_sizes, hidden_size, 1, hidden_size))
     self._features_classifier = maybe_cuda(
         DNNClassifier(hidden_size + vecf_size, hidden_size,
                       stem_vocab_size, num_layers))
     self._softmax = maybe_cuda(nn.LogSoftmax(dim=1))
     pass
示例#4
0
 def __init__(self, vec_features_size : int,
              word_feature_vocab_sizes : List[int],
              term_token_vocab_size : int,
              hidden_size : int, num_layers : int,
              tactic_vocab_size : int) -> None:
     super().__init__()
     self._goal_encoder = EncoderRNN(term_token_vocab_size, hidden_size, hidden_size)
     self._hyp_encoder = EncoderRNN(term_token_vocab_size, hidden_size, hidden_size)
     self._word_features_encoder = WordFeaturesEncoder(word_feature_vocab_sizes,
                                                       hidden_size, num_layers-1,
                                                       hidden_size)
     self._layer = nn.Linear(hidden_size * 3 + vec_features_size, hidden_size)
     self._out_layer = nn.Linear(hidden_size, tactic_vocab_size)
     self._softmax = maybe_cuda(nn.LogSoftmax(dim=1))
     pass
示例#5
0
 def _get_model(self, arg_values : Namespace,
                tactic_vocab_size : int,
                goal_vocab_size : int) \
     -> CopyArgModel:
     assert self._word_feature_functions
     assert self._vec_feature_functions
     feature_vec_size = sum([feature.feature_size()
                             for feature in self._vec_feature_functions])
     word_feature_vocab_sizes = [feature.vocab_size()
                                 for feature in self._word_feature_functions]
     return CopyArgModel(FindArgModel(tactic_vocab_size,
                                      goal_vocab_size, arg_values.max_length,
                                      arg_values.hidden_size),
                         WordFeaturesEncoder(word_feature_vocab_sizes,
                                             arg_values.hidden_size, 1,
                                             arg_values.hidden_size),
                         DNNClassifier(arg_values.hidden_size + feature_vec_size,
                                       arg_values.hidden_size, tactic_vocab_size,
                                       3))