def __init__(self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder, posclass_weight: Optional[float] = 1, use_power: Optional[bool] = False, dropout: Optional[float] = 0) -> None: super().__init__(vocab) self.embedder = embedder self.encoder = encoder if use_power: self.classifier = torch.nn.Linear( in_features=encoder.get_output_dim() + 1, out_features=vocab.get_vocab_size('labels') ) else: self.classifier = torch.nn.Linear( in_features=encoder.get_output_dim(), out_features=vocab.get_vocab_size('labels') ) self.use_power = use_power self.f1_lie = F1Measure(vocab.get_token_index('False', 'labels')) self.f1_truth = F1Measure(vocab.get_token_index('True', 'labels')) self.micro_f1 = FBetaMeasure(average='micro') self.macro_f1 = FBetaMeasure(average='macro') weights = [1,1] weights[vocab.get_token_index('False', 'labels')] = posclass_weight self.loss = torch.nn.CrossEntropyLoss(weight = torch.Tensor(weights)) self.dropout = torch.nn.Dropout(dropout)
def __init__( self, # input_dim: int, pooler: Seq2VecEncoder): super().__init__() self._input_dim = pooler.get_output_dim() # we distribute the pooler across _spans_, not actual time self._pooler = TimeDistributed(pooler)
def __init__(self, word_embeddings: TextFieldEmbedder, encoder: Seq2VecEncoder, vocab: Vocabulary) -> None: super().__init__(vocab) self.word_embeddings = word_embeddings self.encoder = encoder self.hidden2tag = torch.nn.Linear(in_features=encoder.get_output_dim(), out_features=vocab.get_vocab_size('labels')) self.accuracy = CategoricalAccuracy() self.loss_function = torch.nn.CrossEntropyLoss()