def __init__( self, is_binary: bool = True, label_weights: Optional[Dict[str, float]] = None, loss=None, ): super().__init__() if is_binary: self.loss = loss or BinaryCrossEntropyLoss(BinaryCrossEntropyLoss.Config()) else: self.loss = loss or CrossEntropyLoss(CrossEntropyLoss.Config())
class Config(OutputLayerBase.Config): loss: Union[CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, AUCPRHingeLoss.Config, KLDivergenceBCELoss.Config, KLDivergenceCELoss.Config, LabelSmoothedCrossEntropyLoss. Config, ] = CrossEntropyLoss.Config() label_weights: Dict[str, float] = {} ignore_pad_in_loss: Optional[bool] = True
def test_doc_classification_output_layer(self): tensorizer = LabelTensorizer() tensorizer.vocab = Vocabulary([SpecialTokens.PAD, "foo", "bar"]) layer = ClassificationOutputLayer.from_config( config=ClassificationOutputLayer.Config(loss=CrossEntropyLoss.Config()), labels=tensorizer.vocab, ) self.assertEqual(layer.loss_fn.ignore_index, 0) # use default pad tensorizer.vocab = Vocabulary(["foo", "bar"]) layer = ClassificationOutputLayer.from_config( config=ClassificationOutputLayer.Config(loss=CrossEntropyLoss.Config()), labels=tensorizer.vocab, ) self.assertEqual(layer.loss_fn.ignore_index, -1)
class Config(OutputLayerBase.Config): loss: Union[CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, MultiLabelSoftMarginLoss.Config, AUCPRHingeLoss.Config, KLDivergenceBCELoss.Config, KLDivergenceCELoss.Config, LabelSmoothedCrossEntropyLoss. Config, ] = CrossEntropyLoss.Config() label_weights: Optional[Dict[str, float]] = None
class Config(OutputLayerBase.Config): loss: CrossEntropyLoss.Config = CrossEntropyLoss.Config() ignore_impossible: bool = True pos_loss_weight: float = 0.5 has_answer_loss_weight: float = 0.5 false_label: str = "False" max_answer_len: int = 30
class Config(OutputLayerBase.Config): loss: Union[ CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, AUCPRHingeLoss.Config, KLDivergenceBCELoss.Config, KLDivergenceCELoss.Config, SoftHardBCELoss.Config, ] = CrossEntropyLoss.Config()
class Config(OutputLayerBase.Config): loss: Union[CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, BinaryCrossEntropyWithLogitsLoss.Config, MultiLabelSoftMarginLoss.Config, AUCPRHingeLoss.Config, HingeLoss.Config, KLDivergenceBCELoss.Config, KLDivergenceCELoss.Config, LabelSmoothedCrossEntropyLoss. Config, ] = CrossEntropyLoss.Config() label_weights: Optional[Dict[str, float]] = None automatic_label_weighting_method: Optional[WeightingMethod] = None
class Config(OutputLayerBase.Config): loss: Union[ CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, AUCPRHingeLoss.Config, KLDivergenceBCELoss.Config, KLDivergenceCELoss.Config, SoftHardBCELoss.Config, ] = CrossEntropyLoss.Config() label_weights: Optional[Dict[str, float]] = None
class Config(OutputLayerBase.Config): loss: Union[CrossEntropyLoss.Config, KLDivergenceCELoss.Config] = CrossEntropyLoss.Config() ignore_impossible: bool = True pos_loss_weight: float = 0.5 has_answer_loss_weight: float = 0.5 false_label: str = "False" max_answer_len: int = 30 # For knowledge distillation we have soft and hard labels. This specifies # the weight on loss against hard labels. hard_weight: float = 0.0
def from_config(cls, config, tensorizers): embedding = create_module(config.embedding, tensorizer=tensorizers["tokens"]) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) slots = tensorizers["slots"].vocab decoder = create_module(config.decoder, in_dim=representation.representation_dim, out_dim=len(slots)) output_layer = MyTaggingOutputLayer(slots, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]): labels = tensorizers["labels"].labels embedding = cls.create_embedding(config, tensorizers) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) output_layer = ClassificationOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def from_config(cls, config, tensorizers): labels = tensorizers["labels"].vocab embedding = cls.create_embedding(config, tensorizers) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) # TODO after migration: create_module(config.output_layer, tensorizers=tensorizers) output_layer = WordTaggingOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]): vocab = tensorizers["tokens"].vocab labels = tensorizers["labels"].labels embedding = WordEmbedding(len(vocab), config.embedding.embed_dim, None, None, vocab.idx[UNK], []) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) output_layer = ClassificationOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def __init__(self, loss: Loss = None): super().__init__() self.loss = loss or CrossEntropyLoss(CrossEntropyLoss.Config())
class Config(OutputLayerBase.Config): loss: CrossEntropyLoss.Config = CrossEntropyLoss.Config()
class Config(OutputLayerBase.Config): loss: Union[CrossEntropyLoss.Config, BinaryCrossEntropyLoss.Config, AUCPRHingeLoss.Config, ] = CrossEntropyLoss.Config()
class Config(OutputLayerBase.Config): loss: CrossEntropyLoss.Config = CrossEntropyLoss.Config() label_weights: Dict[str, float] = {}
class Config(ConfigBase): loss: Union[CrossEntropyLoss.Config, LabelSmoothedCrossEntropyLoss.Config, NLLLoss.Config, ] = CrossEntropyLoss.Config()