def get_model(hidden_size: int, num_layers: int, enc_dropout: float, enc_emb_dropout: float, dec_dropout: float, dec_emb_dropout: float, seq_enc_dropout, seq_dec_dropout: float) -> PtrNetModel: form_emb = model_ft.load_embedding_weight_matrix(home_path, ft_form_vec_file_path, tb_vocab.forms, device) lemma_emb = model_ft.load_embedding_weight_matrix(home_path, ft_lemma_vec_file_path, tb_vocab.lemmas, device) tags_num = len(tb_vocab.tags) feats_num = len(tb_vocab.feats) tag_emb = nn.Embedding(num_embeddings=tags_num, embedding_dim=50, padding_idx=0) feats_emb = nn.Embedding(num_embeddings=feats_num, embedding_dim=50, padding_idx=0) analysis_emb = AnalysisEmbedding(form_emb, lemma_emb, tag_emb, feats_emb) encoder = AnalysisEncoder(enc_dropout, analysis_emb.embedding_dim, hidden_size, num_layers, enc_emb_dropout) decoder = AnalysisDecoder(dec_dropout, analysis_emb.embedding_dim, hidden_size, num_layers, dec_emb_dropout) attention = Attention() model = PtrNetModel(tb_vocab, analysis_emb, encoder, seq_enc_dropout, decoder, seq_dec_dropout, attention) if torch.cuda.is_available(): model.cuda(device) return model
def get_model(hidden_size: int, num_layers: int, emb_dropout: float, hidden_dropout: float, rnn_dropout: float, class_dropout: float) -> TagRNN4: token_emb = model_ft.load_embedding_weight_matrix(home_path, ft_token_vec_file_path, tb_vocab.tokens, device) model = TagRNN4(token_emb, emb_dropout, hidden_size, num_layers, hidden_dropout, rnn_dropout, class_dropout, tb_vocab) if torch.cuda.is_available(): model.cuda(device) return model