def fit(self,
         trn_data,
         dev_data,
         save_dir,
         transformer,
         max_seq_length=256,
         transformer_dropout=.33,
         d_positional=None,
         n_mlp_arc=500,
         n_mlp_rel=100,
         mlp_dropout=.33,
         optimizer='adamw',
         learning_rate=5e-5,
         learning_rate_transformer=None,
         weight_decay_rate=0,
         epsilon=1e-8,
         clipnorm=None,
         fp16=False,
         warmup_steps_ratio=0,
         arc_loss='binary_crossentropy',
         rel_loss='sparse_categorical_crossentropy',
         metrics=('UF', 'LF'),
         batch_size=3000,
         samples_per_batch=150,
         max_samples_per_batch=None,
         epochs=100,
         tree=False,
         punct=False,
         token_mapping=None,
         enhanced_only=False,
         run_eagerly=False,
         logger=None,
         verbose=True,
         **kwargs):
     return super().fit(**merge_locals_kwargs(locals(), kwargs))
Ejemplo n.º 2
0
 def __init__(self,
              config: SerializableDict = None,
              map_x=True,
              map_y=True,
              use_char=False,
              **kwargs) -> None:
     super().__init__(**merge_locals_kwargs(locals(), kwargs))
     self.word_vocab: Optional[VocabTF] = None
     self.tag_vocab: Optional[VocabTF] = None
     self.char_vocab: Optional[VocabTF] = None
Ejemplo n.º 3
0
 def __init__(self,
              config: SerializableDict = None,
              map_x=True,
              map_y=True,
              lower=False,
              **kwargs) -> None:
     super().__init__(**merge_locals_kwargs(locals(), kwargs))
     self.token_vocab = VocabTF()
     self.pos_vocab = VocabTF(pad_token=None, unk_token=None)
     self.ner_vocab = VocabTF(pad_token=None)
     self.deprel_vocab = VocabTF(pad_token=None, unk_token=None)
     self.rel_vocab = VocabTF(pad_token=None, unk_token=None)
 def fit(self,
         trn_data,
         dev_data,
         save_dir,
         n_embed=100,
         pretrained_embed=None,
         embed_dropout=.33,
         n_lstm_hidden=400,
         n_lstm_layers=3,
         lstm_dropout=.33,
         n_mlp_arc=500,
         n_mlp_rel=100,
         mlp_dropout=.33,
         optimizer='adam',
         lr=2e-3,
         mu=.9,
         nu=.9,
         epsilon=1e-12,
         clip=5.0,
         decay=.75,
         decay_steps=5000,
         patience=100,
         arc_loss='sparse_categorical_crossentropy',
         rel_loss='sparse_categorical_crossentropy',
         metrics=('UAS', 'LAS'),
         n_buckets=32,
         batch_size=5000,
         epochs=50000,
         early_stopping_patience=100,
         tree=False,
         punct=False,
         min_freq=2,
         run_eagerly=False,
         logger=None,
         verbose=True,
         **kwargs):
     return super().fit(**merge_locals_kwargs(locals(), kwargs))