def from_config(cls, config, tensorizers): embedding = create_module(config.embedding, tensorizer=tensorizers["tokens"]) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) slots = tensorizers["slots"].vocab decoder = create_module(config.decoder, in_dim=representation.representation_dim, out_dim=len(slots)) output_layer = MyTaggingOutputLayer(slots, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def __init__( self, is_binary: bool = True, label_weights: Optional[Dict[str, float]] = None, loss=None, ): super().__init__() if is_binary: self.loss = loss or BinaryCrossEntropyLoss(BinaryCrossEntropyLoss.Config()) else: self.loss = loss or CrossEntropyLoss(CrossEntropyLoss.Config())
def from_config(cls, config, tensorizers): labels = tensorizers["labels"].vocab embedding = cls.create_embedding(config, tensorizers) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) # TODO after migration: create_module(config.output_layer, tensorizers=tensorizers) output_layer = WordTaggingOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]): labels = tensorizers["labels"].labels embedding = cls.create_embedding(config, tensorizers) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) output_layer = ClassificationOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def from_config(cls, config: Config, tensorizers: Dict[str, Tensorizer]): vocab = tensorizers["tokens"].vocab labels = tensorizers["labels"].labels embedding = WordEmbedding(len(vocab), config.embedding.embed_dim, None, None, vocab.idx[UNK], []) representation = create_module(config.representation, embed_dim=embedding.embedding_dim) decoder = create_module( config.decoder, in_dim=representation.representation_dim, out_dim=len(labels), ) output_layer = ClassificationOutputLayer(labels, CrossEntropyLoss(None)) return cls(embedding, representation, decoder, output_layer)
def __init__(self, loss: Loss = None): super().__init__() self.loss = loss or CrossEntropyLoss(CrossEntropyLoss.Config())