def __init__(self, args=None): print("INFO: - Load the pre-built tokenizer...") if args.tokenize_type != "bpe": tokenizer = Tokenizer.load( os.path.join(args.model_dir, "tokenizer.vocab")) else: tokenizer = BPE.load(args.vocab_file) tokenizer.add_tokens(sys_tokens) labels_list = TXT.read(args.label_file, firstline=False) tokenizer.tw2i = Tokenizer.list2dict(sys_tokens + labels_list) tokenizer.i2tw = Tokenizer.reversed_dict(tokenizer.tw2i) self.args = args self.tokenizer = tokenizer self.device = torch.device("cuda:0" if self.args.use_cuda else "cpu") self.num_labels = len(self.tokenizer.tw2i) # Hyper-parameters at target language self.target2idx = Tokenizer.lst2idx(tokenizer=Tokenizer.process_target, vocab_words=self.tokenizer.tw2i, unk_words=True, sos=self.args.ssos, eos=self.args.seos) if self.args.tokenize_type != "bpe": # Hyper-parameters at source language self.source2idx = Tokenizer.lst2idx( tokenizer=Tokenizer.process_nl, vocab_words=self.tokenizer.sw2i, unk_words=True, sos=self.args.ssos, eos=self.args.seos) self.pad_id = self.tokenizer.sw2i.get(PAD, PAD_id) self.unk_id = self.tokenizer.sw2i.get(UNK, UNK_id) sw_size = len(self.tokenizer.sw2i) # tw_size = len(self.tokenizer.tw2i) self.collate_fn = Tokenizer.collate_fn(self.pad_id, True) else: self.source2idx = BPE.tokens2ids(self.tokenizer, sos=self.args.ssos, eos=self.args.seos) self.pad_id = self.tokenizer.token_to_id(BPAD) if self.tokenizer.token_to_id(BPAD) is not None \ else self.tokenizer.token_to_id(PAD) self.unk_id = self.tokenizer.token_to_id(BUNK) if self.tokenizer.token_to_id(BUNK) is not None \ else self.tokenizer.token_to_id(UNK) sw_size = self.tokenizer.get_vocab_size() # tw_size = self.tokenizer.get_vocab_size() self.collate_fn = BPE.collate_fn(self.pad_id, True) # Hyper-parameters at word-level source language # [size, dim, pre_embs, drop_rate, zero_padding, requires_grad] = HPs nlemb_HPs = [ sw_size, self.args.swd_dim, self.args.swd_pretrained, self.args.wd_dropout, self.args.wd_padding, self.args.snl_reqgrad ] # Encoder # [nn_mode, nn_inp_dim, nn_out_dim, nn_layers, nn_bidirect, nn_dropout] = HPs if self.args.enc_cnn: enc_HPs = [ "cnn", self.args.swd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_bidirect, self.args.kernel_size ] else: if self.args.ed_mode == "self_attention": # use the maximum length 5 times larger than input length nlemb_HPs += [self.tokenizer.swl * 5] # nn_mode, ninp, nhid, nlayers, nhead, dropout, activation, norm, his_mask enc_HPs = [ self.args.ed_mode, self.args.swd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_heads, self.args.ed_dropout, self.args.ed_activation, None, self.args.ed_hismask ] else: enc_HPs = [ self.args.ed_mode, self.args.swd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_bidirect, self.args.ed_dropout ] crf_HPs = [ self.args.use_crf, self.num_labels, self.args.se_transitions ] print("INFO: - Build model...") self.labeler = Labeler(nlemb_HPs, enc_HPs, crf_HPs, drop_rate=self.args.final_dropout, num_labels=self.num_labels) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") self.labeler = nn.DataParallel(self.labeler) self.labeler.to(self.device) self.labeler_optimizer = None if self.args.optimizer.lower() == "adamax": self.init_optimizers(optim.Adamax) elif self.args.optimizer.lower() == "adam": self.init_optimizers(optim.Adam) elif self.args.optimizer.lower() == "radam": self.init_optimizers(RAdam) elif self.args.optimizer.lower() == "adadelta": self.init_optimizers(optim.Adadelta) elif self.args.optimizer.lower() == "adagrad": self.init_optimizers(optim.Adagrad) else: self.init_optimizers(optim.SGD)
def __init__(self, args=None): print("INFO: - Load the pre-built tokenizer...") if args.tokenize_type != "bpe": tokenizer = Tokenizer.load(os.path.join(args.model_dir, "tokenizer.vocab")) else: tokenizer = BPE.load(args.vocab_file) tokenizer.add_tokens(sys_tokens) tokenizer.tw2i = tokenizer.get_vocab() tokenizer.i2tw = Tokenizer.reversed_dict(tokenizer.tw2i) self.args = args self.tokenizer = tokenizer self.device = torch.device("cuda:0" if self.args.use_cuda else "cpu") # Include SOt, EOt if set set_words, else Ignore SOt, EOt # self.num_labels = len(self.tokenizer.tw2i) self.num_labels = self.tokenizer.get_vocab_size() if self.num_labels > 2: self.lossF = nn.CrossEntropyLoss().to(self.device) else: self.lossF = nn.BCEWithLogitsLoss().to(self.device) # Hyper-parameters at source language if self.args.tokenize_type != "bpe": self.source2idx = Tokenizer.lst2idx(tokenizer=self.tokenizer.process_nl, vocab_words=self.tokenizer.sw2i, unk_words=True, sos=self.args.ssos, eos=self.args.seos) # Hyper-parameters at target language self.target2idx = Tokenizer.lst2idx(tokenizer=self.tokenizer.process_target, vocab_words=self.tokenizer.tw2i, unk_words=True, sos=self.args.tsos, eos=self.args.teos) self.pad_id = self.tokenizer.sw2i.get(PAD, 0) self.unk_id = self.tokenizer.sw2i.get(UNK, UNK_id) sw_size = len(self.tokenizer.sw2i) # tw_size = len(self.tokenizer.tw2i) self.collate_fn = Tokenizer.collate_fn(self.pad_id, True) else: self.source2idx = BPE.tokens2ids(self.tokenizer, sos=self.args.ssos, eos=self.args.seos) self.target2idx = BPE.tokens2ids(self.tokenizer, sos=self.args.tsos, eos=self.args.teos) self.pad_id = self.tokenizer.token_to_id(BPAD) if self.tokenizer.token_to_id(BPAD) is not None \ else self.tokenizer.token_to_id(PAD) self.unk_id = self.tokenizer.token_to_id(BUNK) if self.tokenizer.token_to_id(BUNK) is not None \ else self.tokenizer.token_to_id(UNK) sw_size = self.tokenizer.get_vocab_size() # tw_size = self.tokenizer.get_vocab_size() self.collate_fn = BPE.collate_fn(self.pad_id, True) # Hyper-parameters at word-level source language # [size, dim, pre_embs, drop_rate, zero_padding, requires_grad] = HPs nlemb_HPs = [sw_size, self.args.swd_dim, self.args.swd_pretrained, self.args.wd_dropout, self.args.wd_padding, self.args.snl_reqgrad] # NL inputs # Encoder # [nn_mode, nn_inp_dim, nn_out_dim, nn_layers, nn_bidirect, nn_dropout] = HPs if self.args.enc_cnn: enc_HPs = ["cnn", self.args.swd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_bidirect, self.args.kernel_size] else: enc_HPs = [self.args.ed_mode, self.args.swd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_bidirect, self.args.ed_dropout] # Decoder # [size, dim, pre_embs, drop_rate, zero_padding, requires_grad] = HPs temb_HPs = [self.num_labels, self.args.twd_dim, self.args.twd_pretrained, self.args.wd_dropout, self.args.wd_padding, self.args.twd_reqgrad] # Hyper-parameters at word-level target language dec_HPs = [self.args.ed_mode, self.args.twd_dim, self.args.ed_outdim, self.args.ed_layers, self.args.ed_bidirect, self.args.ed_dropout] dec_HPs = [temb_HPs, dec_HPs] print("INFO: - Build model...") # self.seq2seq = Seq2seq(semb_HPs, sch_HPs, enc_HPs, dec_HPs, drop_rate=self.args.final_dropout, # num_labels=self.num_labels, enc_att=self.args.enc_att).to(self.device) self.seq2seq = Seq2seq(nlemb_HPs, enc_HPs, dec_HPs, drop_rate=self.args.final_dropout, num_labels=self.num_labels, enc_att=self.args.enc_att) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs self.seq2seq = nn.DataParallel(self.seq2seq) self.seq2seq.to(self.device) self.seq2seq_optimizer = None if self.args.optimizer.lower() == "adamax": self.init_optimizers(optim.Adamax) elif self.args.optimizer.lower() == "adam": self.init_optimizers(optim.Adam) elif self.args.optimizer.lower() == "radam": self.init_optimizers(RAdam) elif self.args.optimizer.lower() == "adadelta": self.init_optimizers(optim.Adadelta) elif self.args.optimizer.lower() == "adagrad": self.init_optimizers(optim.Adagrad) else: self.init_optimizers(optim.SGD)
# Load datasets to build vocabulary data = Tokenizer.load_file([filename], task=1) s_paras = [-1, 1] t_paras = [-1, 1] tokenizer = Tokenizer(s_paras, t_paras) tokenizer.build(data) nl2ids = Tokenizer.lst2idx(tokenizer=Tokenizer.process_nl, vocab_words=tokenizer.sw2i, unk_words=True, sos=False, eos=False) tokenizer.tw2i = lb2id_dict tokenizer.i2tw = id2lb_dict tg2ids = Tokenizer.lst2idx(tokenizer=Tokenizer.process_target, vocab_words=tokenizer.tw2i, unk_words=False, sos=False, eos=False) pad_id = tokenizer.sw2i.get(PAD, 0) sw_size = len(tokenizer.sw2i) tw_size = len(tokenizer.tw2i) collate_fn = Tokenizer.collate_fn(pad_id, True) else: vocab_file = "/media/data/review_response/tokens/bert_level-bpe-vocab.txt" tokenizer = BPE.load(vocab_file) tokenizer.add_tokens(sys_tokens) nl2ids = BPE.tokens2ids(tokenizer, sos=False, eos=False, add_special_tokens=False) tg2ids = BPE.tokens2ids(tokenizer, sos=False, eos=False, add_special_tokens=False) pad_id = tokenizer.token_to_id(BPAD) if tokenizer.token_to_id(BPAD) is not None else tokenizer.token_to_id(PAD) sw_size = tokenizer.get_vocab_size() tw_size = tokenizer.get_vocab_size() collate_fn = BPE.collate_fn(pad_id, True) train_data, num_lines = Tokenizer.prepare_iter(filename, firstline=False, task=1) train_iterdataset = IterDataset(train_data, source2idx=nl2ids, target2idx=lb2ids, num_lines=num_lines) train_dataloader = DataLoader(train_iterdataset, pin_memory=True, batch_size=8, collate_fn=collate_fn)