def load_glove_embedding(self, freeze=False): initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy() embedding_arr = torch.from_numpy(get_glove_matrix(self.reader.vocab, initial_arr)) self.m.u_encoder.embedding.weight.data.copy_(embedding_arr) self.m.z_decoder.emb.weight.data.copy_(embedding_arr) self.m.m_decoder.emb.weight.data.copy_(embedding_arr)
def load_glove_embedding(self, freeze=False): initial_arr = self.sedst.u_encoder.embedding.weight.data.cpu().numpy() embedding_arr = torch.from_numpy(get_glove_matrix(self.reader.vocab, initial_arr)) self.sedst.u_encoder.embedding.weight.data.copy_(embedding_arr) self.sedst.m_encoder.embedding.weight.data.copy_(embedding_arr) self.sedst.m_decoder.emb.weight.data.copy_(embedding_arr) self.sedst.qz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1,0)) self.sedst.pz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1,0)) if freeze: self.freeze_module(self.sedst.u_encoder.embedding) self.freeze_module(self.sedst.m_encoder.embedding) self.freeze_module(self.sedst.m_decoder.emb)
def load_glove_embedding(self, freeze=False): initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy() mat = get_glove_matrix(self.reader.vocab, initial_arr) # np.save('./data/embedding.npy',mat) # mat = np.load('./data/embedding.npy') embedding_arr = torch.from_numpy(mat) self.m.u_encoder.embedding.weight.data.copy_(embedding_arr) self.m.p_encoder.embedding.weight.data.copy_(embedding_arr) self.m.m_decoder.emb.weight.data.copy_(embedding_arr) self.m.p_decoder.emb.weight.data.copy_(embedding_arr) self.m.qz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0)) self.m.pz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0)) if freeze: self.freeze_module(self.m.u_encoder.embedding) self.freeze_module(self.m.m_e.embedding) self.freeze_module(self.m.m_decoder.emb)
def load_glove_embedding(self): initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy() embedding_arr = torch.from_numpy( get_glove_matrix(self.reader.vocab, initial_arr)) self.m.u_encoder.embedding.weight.data.copy_(embedding_arr) self.m.u_encoder.embedding.weight.requires_grad = cfg.emb_trainable if cfg.separate_enc: self.m.z_encoder.embedding.weight.data.copy_(embedding_arr) self.m.z_encoder.embedding.weight.requires_grad = cfg.emb_trainable for i in range(cfg.num_head): self.m.z_decoders[i].emb.weight.data.copy_(embedding_arr) self.m.z_decoders[i].emb.weight.requires_grad = cfg.emb_trainable self.m.req_classifiers.emb.weight.data.copy_(embedding_arr) self.m.req_classifiers.emb.weight.requires_grad = cfg.emb_trainable self.m.res_classifiers.emb.weight.data.copy_(embedding_arr) self.m.res_classifiers.emb.weight.requires_grad = cfg.emb_trainable self.m.m_decoder.emb.weight.data.copy_(embedding_arr) self.m.m_decoder.emb.weight.requires_grad = cfg.emb_trainable
def init_embedding(embedding, r): initial_arr = embedding.weight.data.cpu().numpy() embedding_arr = torch.from_numpy(reader.get_glove_matrix(r.vocab, initial_arr)) embedding.weight.data.copy_(embedding_arr) return embedding
def init_embedding_model(model, r): """ Set glove embeddings for model, r is a reader instance """ initial_arr = model.embedding.weight.data.cpu().numpy() embedding_arr = torch.from_numpy(reader.get_glove_matrix(r.vocab, initial_arr)) model.embedding.weight.data.copy_(embedding_arr)