def create( cls, label_size, intent_size, # BertEmbedder params model_name='bert-base-multilingual-cased', mode="weighted", is_freeze=True, # BiLSTM hidden_dim=512, rnn_layers=1, lstm_dropout=0.3, # Attn params embedding_size=768, key_dim=64, val_dim=64, num_heads=3, attn_dropout=0.3, # CRFDecoder params crf_dropout=0.5, # Clf params clf_dropout=0.3, # Global params device="cuda"): embeddings = BERTEmbedder.create(model_name=model_name, device=device, mode=mode, is_freeze=is_freeze) attn = MultiHeadAttention(key_dim, val_dim, hidden_dim, num_heads, attn_dropout) crf = CRFDecoder.create(label_size, hidden_dim, crf_dropout) clf = ClassDecoder(intent_size, hidden_dim, clf_dropout) return cls(embeddings, attn, crf, clf, device)
def create( cls, label_size, # BertEmbedder params model_name='bert-base-multilingual-cased', mode="weighted", is_freeze=True, # Attn params embedding_size=768, key_dim=64, val_dim=64, num_heads=3, attn_dropout=0.3, # NCRFDecoder params crf_dropout=0.5, nbest=1, # Global params device="cuda"): embeddings = BERTEmbedder.create(model_name=model_name, device=device, mode=mode, is_freeze=is_freeze) attn = MultiHeadAttention(key_dim, val_dim, embedding_size, num_heads, attn_dropout) crf = NCRFDecoder.create(label_size, embedding_size, crf_dropout, nbest=nbest, device=device) return cls(embeddings, attn, crf, device)