def __init__(self, data): super(WordRep, self).__init__() print("build word representation...") self.gpu = data.HP_gpu self.use_char = data.use_char self.batch_size = data.HP_batch_size self.char_hidden_dim = 0 self.char_all_feature = False self.sentence_classification = data.sentence_classification if self.use_char: self.char_hidden_dim = data.HP_char_hidden_dim self.char_embedding_dim = data.char_emb_dim if data.char_feature_extractor == "CNN": self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "LSTM": self.char_feature = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "GRU": self.char_feature = CharBiGRU(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "ALL": self.char_all_feature = True self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) self.char_feature_extra = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) else: print("Error char feature selection, please check parameter data.char_feature_extractor (CNN/LSTM/GRU/ALL).") exit(0) self.embedding_dim = data.word_emb_dim self.drop = nn.Dropout(data.HP_dropout) self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim) if data.pretrain_word_embedding is not None: self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding)) else: self.word_embedding.weight.data.copy_(torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim))) self.feature_num = data.feature_num self.feature_embedding_dims = data.feature_emb_dims self.feature_embeddings = nn.ModuleList() for idx in range(self.feature_num): self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])) for idx in range(self.feature_num): if data.pretrain_feature_embeddings[idx] is not None: self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx])) else: self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))) self.use_elmo = data.use_elmo if self.use_elmo: self.elmo = Elmo(data.elmo_options_file, data.elmo_weight_file, 1, requires_grad=data.elmo_tune, dropout=data.elmo_dropout, gamma=data.elmo_gamma) with open(data.elmo_options_file, 'r') as fin: self._options = json.load(fin) if self.gpu >= 0 and torch.cuda.is_available(): self.drop = self.drop.cuda(self.gpu) self.word_embedding = self.word_embedding.cuda(self.gpu) for idx in range(self.feature_num): self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda(self.gpu) if self.use_elmo: self.elmo = self.elmo.cuda(self.gpu)
def use_elmo_as_pytorch_module(): # from allennlp.modules.elmo import Elmo, batch_to_ids from elmo.elmo import Elmo, batch_to_ids options_file = "elmo_small/elmo_2x1024_128_2048cnn_1xhighway_options.json" weight_file = "elmo_small/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5" # Compute two different representation for each token. # Each representation is a linear weighted combination for the # 3 layers in ELMo (i.e., charcnn, the outputs of the two BiLSTM)) elmo = Elmo(options_file, weight_file, 1, dropout=0) # use batch_to_ids to convert sentences to character ids sentences = [['First', 'sentence', '.'], ['Another', '.']] character_ids = batch_to_ids(sentences) embeddings = elmo(character_ids) # embeddings['elmo_representations'] is length two list of tensors. # Each element contains one layer of ELMo representations with shape # (2, 3, 1024). # 2 - the batch size # 3 - the sequence length of the batch # 1024 - the length of each ELMo vector pass
def __init__(self, hidden_size, class_size): super(Classifier, self).__init__() self.hidden_size = hidden_size self.class_size = class_size self.elmo = Elmo(hidden_size=hidden_size) self.embed = torch.nn.Embedding(262, 2 * self.hidden_size) self.lstm = torch.nn.LSTM(2 * self.hidden_size, self.hidden_size, bidirectional=True, batch_first=True) self.out = torch.nn.Linear(2 * self.hidden_size, self.class_size)
def __init__(self, args, Y, dicts): super(WordRep, self).__init__() self.gpu = args.gpu if args.embed_file: print("loading pretrained embeddings from {}".format( args.embed_file)) if args.use_ext_emb: pretrain_word_embedding, pretrain_emb_dim = build_pretrain_embedding( args.embed_file, dicts['w2ind'], True) W = torch.from_numpy(pretrain_word_embedding) else: W = torch.Tensor(load_embeddings(args.embed_file)) self.embed = nn.Embedding(W.size()[0], W.size()[1], padding_idx=0) self.embed.weight.data = W.clone() else: # add 2 to include UNK and PAD self.embed = nn.Embedding(len(dicts['w2ind']) + 2, args.embed_size, padding_idx=0) self.feature_size = self.embed.embedding_dim self.use_elmo = args.use_elmo if self.use_elmo: self.elmo = Elmo(args.elmo_options_file, args.elmo_weight_file, 1, requires_grad=args.elmo_tune, dropout=args.elmo_dropout, gamma=args.elmo_gamma) with open(args.elmo_options_file, 'r') as fin: _options = json.load(fin) self.feature_size += _options['lstm']['projection_dim'] * 2 self.embed_drop = nn.Dropout(p=args.dropout) self.conv_dict = { 1: [self.feature_size, args.num_filter_maps], 2: [self.feature_size, 100, args.num_filter_maps], 3: [self.feature_size, 150, 100, args.num_filter_maps], 4: [self.feature_size, 200, 150, 100, args.num_filter_maps] }
class WordRep(nn.Module): def __init__(self, data): super(WordRep, self).__init__() print("build word representation...") self.gpu = data.HP_gpu self.use_char = data.use_char self.batch_size = data.HP_batch_size self.char_hidden_dim = 0 self.char_all_feature = False self.sentence_classification = data.sentence_classification if self.use_char: self.char_hidden_dim = data.HP_char_hidden_dim self.char_embedding_dim = data.char_emb_dim if data.char_feature_extractor == "CNN": self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "LSTM": self.char_feature = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "GRU": self.char_feature = CharBiGRU(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) elif data.char_feature_extractor == "ALL": self.char_all_feature = True self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) self.char_feature_extra = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu) else: print("Error char feature selection, please check parameter data.char_feature_extractor (CNN/LSTM/GRU/ALL).") exit(0) self.embedding_dim = data.word_emb_dim self.drop = nn.Dropout(data.HP_dropout) self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim) if data.pretrain_word_embedding is not None: self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding)) else: self.word_embedding.weight.data.copy_(torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim))) self.feature_num = data.feature_num self.feature_embedding_dims = data.feature_emb_dims self.feature_embeddings = nn.ModuleList() for idx in range(self.feature_num): self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])) for idx in range(self.feature_num): if data.pretrain_feature_embeddings[idx] is not None: self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx])) else: self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))) self.use_elmo = data.use_elmo if self.use_elmo: self.elmo = Elmo(data.elmo_options_file, data.elmo_weight_file, 1, requires_grad=data.elmo_tune, dropout=data.elmo_dropout, gamma=data.elmo_gamma) with open(data.elmo_options_file, 'r') as fin: self._options = json.load(fin) if self.gpu >= 0 and torch.cuda.is_available(): self.drop = self.drop.cuda(self.gpu) self.word_embedding = self.word_embedding.cuda(self.gpu) for idx in range(self.feature_num): self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda(self.gpu) if self.use_elmo: self.elmo = self.elmo.cuda(self.gpu) def random_embedding(self, vocab_size, embedding_dim): pretrain_emb = np.empty([vocab_size, embedding_dim]) scale = np.sqrt(3.0 / embedding_dim) for index in range(vocab_size): pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim]) return pretrain_emb def forward(self, word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, elmo_char_inputs): """ input: word_inputs: (batch_size, sent_len) features: list [(batch_size, sent_len), (batch_len, sent_len),...] word_seq_lengths: list of batch_size, (batch_size,1) char_inputs: (batch_size*sent_len, word_length) char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1) char_seq_recover: variable which records the char order information, used to recover char order output: Variable(batch_size, sent_len, hidden_dim) """ batch_size = word_inputs.size(0) sent_len = word_inputs.size(1) word_embs = self.word_embedding(word_inputs) word_list = [word_embs] if not self.sentence_classification: for idx in range(self.feature_num): word_list.append(self.feature_embeddings[idx](feature_inputs[idx])) if self.use_char: ## calculate char lstm last hidden # print("charinput:", char_inputs) # exit(0) char_features = self.char_feature.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy()) char_features = char_features[char_seq_recover] char_features = char_features.view(batch_size,sent_len,-1) ## concat word and char together word_list.append(char_features) word_embs = torch.cat([word_embs, char_features], 2) if self.char_all_feature: char_features_extra = self.char_feature_extra.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy()) char_features_extra = char_features_extra[char_seq_recover] char_features_extra = char_features_extra.view(batch_size,sent_len,-1) ## concat word and char together word_list.append(char_features_extra) if self.use_elmo: elmo_outputs = self.elmo(elmo_char_inputs) elmo_outputs = elmo_outputs['elmo_representations'][0] word_list.append(elmo_outputs) word_embs = torch.cat(word_list, 2) # if a == 0: # print("inputs", word_inputs) # print("embeddings:", word_embs) word_represent = self.drop(word_embs) return word_represent