Example #1
0
def train(opt, train_data, eval_data=None):
    logger.info("start training task")
    dim_input = 6
    dim_emb = 64
    num_class = train_data.num_class
    transformer_nhead = 2
    transformer_nlayers = 1
    model = TransformerModel(dim_input, dim_emb, transformer_nhead,
        num_class,
        transformer_nlayers)
    if model.cuda:
        model = move_to_gpu(model)
    summary(model, train_data[0]['x'].shape)
    try:
        dataloader = DataLoader(
            train_data,
            batch_size=opt.batch_size,
            shuffle=False,
            num_workers=4
        )
        logger.info("create training dataloader")
    except Exception as e:
        logger.error("fail to create dataloader", e)

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=model.optimizer,
            milestones=[5, 10], gamma=0.1)

    model_path = os.path.join(opt.model_dir,opt.model_name+".pth")
    global_steps = 0
    best = 0
    for epoch in tqdm(list(range(opt.epoch)), desc='epoch'):
        for step, batch in enumerate(dataloader):
            global_steps += 1
            metrics = model.train(batch)
            if global_steps % opt.log_steps == 0:
                logger.debug(f"global steps={global_steps},{metrics}")
            if global_steps % opt.save_steps == 0:
                val_metrics, eval_result = eval(opt, model, eval_data)
                logger.info(f"global steps={global_steps}, current={val_metrics}, best={best}, result={eval_result}")
                if val_metrics > best:
                    best = val_metrics
                    torch.save(model.state_dict(), model_path)
                    logger.info(f"global steps={global_steps}, save model:{model_path}")
        lr_scheduler.step()
Example #2
0
                                 t_total=num_train_optimization_steps)

[encoder, decoder], optimizer = amp.initialize([encoder, decoder],
                                               optimizer,
                                               opt_level='O1')

criterion = SequenceFocalLoss(gamma=a, beta=b)
eval_criterion = SequenceCrossEntropyLoss()

update_count = 0
start = time.time()

for ep in range(5):
    "Training"
    pb = tqdm.tqdm(train_dataloader)
    encoder.train()
    decoder.train()

    for batch in pb:
        record_loss, perplexity = train_one_iter(batch, fp16=True)
        update_count += 1

        if update_count % num_gradients_accumulation == num_gradients_accumulation - 1:
            scheduler.step()
            optimizer.step()
            optimizer.zero_grad()

            # speed measure
            end = time.time()
            speed = batch_size * num_gradients_accumulation / (end - start)
            start = end
Example #3
0
if args.restart:
    # Resume training from checkpoint
    with open(os.path.join(args.restart_dir, 'model.pt'), 'rb') as f:
        model = torch.load(f)
    if not args.fp16:
        model = model.float()
    model.apply(update_dropout)
    model.apply(update_dropatt)
else:
    # Train from the start
    model = TransformerModel(ntokens, args.d_model, args.n_head, args.d_inner,
                             args.n_layer, args.dropout)

    for p in model.parameters():
        p.requires_grad_(True)
    model.train()

    model.apply(weights_init)
args.n_all_param = sum([p.nelement() for p in model.parameters()])

if args.fp16:
    model = model.half()

if args.multi_gpu:
    model = model.to(device)
    if args.gpu0_bsz >= 0:
        para_model = BalancedDataParallel(args.gpu0_bsz // args.batch_chunk,
                                          model,
                                          dim=1).to(device)
    else:
        para_model = nn.DataParallel(model, dim=1).to(device)
Example #4
0
class TrainLoop_Transformer():
    def __init__(self, opt):
        self.opt = opt

        self.dict = json.load(open(args.bpe2index, encoding='utf-8'))
        self.index2word = {self.dict[key]: key for key in self.dict}

        self.batch_size = self.opt['batch_size']
        self.epoch = self.opt['epoch']
        self.use_cuda = opt['use_cuda']
        print('self.use_cuda:', self.use_cuda)

        self.device = 'cuda:{}'.format(
            self.opt['gpu']) if self.use_cuda else 'cpu'
        self.opt['device'] = self.device

        self.movie_ids = pkl.load(open("data/movie_ids.pkl", "rb"))

        # self.metrics_gen = {
        #     "ppl": 0,
        #     "dist1": 0,
        #     "dist2": 0,
        #     "dist3": 0,
        #     "dist4": 0,
        #     "bleu1": 0,
        #     "bleu2": 0,
        #     "bleu3": 0,
        #     "bleu4": 0,
        #     "count": 0
        # }

        self.build_data()
        self.build_model()

        # self.init_optim(
        #     [p for p in self.model.parameters() if p.requires_grad],
        #     optim_states=states.get('optimizer'),
        #     saved_optim_type=states.get('optimizer_type')
        # )
        self.init_optim(
            [p for p in self.model.parameters() if p.requires_grad])

    def build_data(self):
        if self.opt['process_data']:
            self.train_dataset = dataset(
                "../../data/data1030/output/train_cut.pkl", self.opt, 'train')
            self.valid_dataset = dataset(
                "../../data/data1030/output/valid_cut.pkl", self.opt, 'valid')
            self.test_dataset = dataset(
                "../../data/data1030/output/test_cut.pkl", self.opt, 'test')

            self.train_processed_set = self.train_dataset.data_process(True)
            self.valid_processed_set = self.valid_dataset.data_process(True)
            self.test_processed_set = self.test_dataset.data_process(True)

            pickle.dump(self.train_processed_set,
                        open('data/train_processed_set.pkl', 'wb'))
            pickle.dump(self.valid_processed_set,
                        open('data/valid_processed_set.pkl', 'wb'))
            pickle.dump(self.test_processed_set,
                        open('data/test_processed_set.pkl', 'wb'))
            logger.info("[Save processed data]")
        else:
            try:
                self.train_processed_set = pickle.load(
                    open('data/train_processed_set.pkl', 'rb'))
                self.valid_processed_set = pickle.load(
                    open('data/valid_processed_set.pkl', 'rb'))
                self.test_processed_set = pickle.load(
                    open('data/test_processed_set.pkl', 'rb'))
            except:
                assert 1 == 0, "No processed data"
            logger.info("[Load processed data]")

    def build_model(self):
        self.model = TransformerModel(self.opt, self.dict)
        # todo
        if self.opt['embedding_type'] != 'random':
            pass

        if self.opt['load_dict'] is not None:
            logger.info('[ Loading existing model params from {} ]'
                        ''.format(self.opt['load_dict']))
            self.model.load_model(self.opt['load_dict'])

        if self.use_cuda:
            self.model.to(self.device)

    def train(self):
        losses = []
        best_val_gen = 1000
        gen_stop = False
        patience = 0
        max_patience = 5
        num = 0

        # file_temp = open('temp.txt', 'w')
        # train_output_file = open(f"output_train_tf.txt", 'w', encoding='utf-8')

        for i in range(self.epoch):
            train_set = CRSdataset(self.train_processed_set,
                                   self.opt['n_entity'], self.opt['n_concept'])
            train_dataset_loader = torch.utils.data.DataLoader(
                dataset=train_set, batch_size=self.batch_size,
                shuffle=True)  # shuffle

            for context,c_lengths,response,r_length,mask_response, \
                    mask_r_length,entity,entity_vector,movie,\
                    concept_mask,dbpedia_mask,concept_vec, \
                    db_vec,rec in tqdm(train_dataset_loader):
                ####################################### 检验输入输出ok
                # file_temp.writelines("[Context] ", self.vector2sentence(context))
                # file_temp.writelines("[Response] ", self.vector2sentence(response))
                # file_temp.writelines("\n")

                seed_sets = []
                batch_size = context.shape[0]
                for b in range(batch_size):
                    seed_set = entity[b].nonzero().view(-1).tolist()
                    seed_sets.append(seed_set)

                self.model.train()
                self.zero_grad()

                scores, preds, rec_scores, rec_loss, gen_loss, mask_loss, info_db_loss, info_con_loss= \
                    self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, seed_sets, movie, \
                        concept_vec, db_vec, entity_vector.to(self.device), rec, test=False)

                ##########################################
                # train_output_file.writelines(
                #     ["Loss per batch = %f\n" % gen_loss.item()])
                # train_output_file.writelines(['[GroundTruth] ' + ' '.join(sen_gt)+'\n' \
                #     + '[Generated] ' + ' '.join(sen_gen)+'\n\n' \
                #     for sen_gt, sen_gen in zip(self.vector2sentence(response.cpu()), self.vector2sentence(preds.cpu()))])

                losses.append([gen_loss])
                self.backward(gen_loss)
                self.update_params()

                if num % 50 == 0:
                    loss = sum([l[0] for l in losses]) / len(losses)
                    ppl = exp(loss)
                    logger.info('gen loss is %f, ppl is %f' % (loss, ppl))
                    losses = []

                num += 1

            output_metrics_gen = self.val(epoch=i)
            _ = self.val(True, epoch=i)

            if best_val_gen < output_metrics_gen["ppl"]:
                patience += 1
                logger.info('Patience = ', patience)
                if patience >= 5:
                    gen_stop = True
            else:
                patience = 0
                best_val_gen = output_metrics_gen["ppl"]
                self.model.save_model(self.opt['model_save_path'])
                logger.info(
                    f"[generator model saved in {self.opt['model_save_path']}"
                    "------------------------------------------------]")

            if gen_stop:
                break

        # train_output_file.close()
        # _ = self.val(is_test=True)

    def val(self, is_test=False, epoch=-1):
        # count是response数量
        self.model.eval()
        if is_test:
            valid_processed_set = self.test_processed_set
        else:
            valid_processed_set = self.valid_processed_set

        val_set = CRSdataset(valid_processed_set, self.opt['n_entity'],
                             self.opt['n_concept'])
        val_dataset_loader = torch.utils.data.DataLoader(
            dataset=val_set, batch_size=self.batch_size, shuffle=False)

        inference_sum = []
        tf_inference_sum = []
        golden_sum = []
        # context_sum = []
        losses = []
        recs = []

        for context, c_lengths, response, r_length, mask_response, mask_r_length, \
                entity, entity_vector, movie, concept_mask, dbpedia_mask, concept_vec, db_vec, rec \
                in tqdm(val_dataset_loader):
            with torch.no_grad():
                seed_sets = []
                batch_size = context.shape[0]
                for b in range(batch_size):
                    seed_set = entity[b].nonzero().view(-1).tolist()
                    seed_sets.append(seed_set)

                # 使用teacher force下的回复生成,
                _, tf_preds, _, _, gen_loss, mask_loss, info_db_loss, info_con_loss = \
                    self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, \
                        seed_sets, movie, concept_vec, db_vec, entity_vector.to(self.device), rec, test=False)

                # 使用greedy模式下的回复生成,限定maxlen=20?
                # todo
                scores, preds, rec_scores, rec_loss, _, mask_loss, info_db_loss, info_con_loss = \
                    self.model(context.to(self.device), response.to(self.device), mask_response.to(self.device), concept_mask, dbpedia_mask, \
                        seed_sets, movie, concept_vec, db_vec, entity_vector.to(self.device), rec, test=True, maxlen=20, bsz=batch_size)

            golden_sum.extend(self.vector2sentence(response.cpu()))
            inference_sum.extend(self.vector2sentence(preds.cpu()))
            # tf_inference_sum.extend(self.vector2sentence(tf_preds.cpu()))
            # context_sum.extend(self.vector2sentence(context.cpu()))
            recs.extend(rec.cpu())
            losses.append(torch.mean(gen_loss))
            #logger.info(losses)
            #exit()

        subset = 'valid' if not is_test else 'test'

        # 原版: gen-loss来自teacher force,inference_sum来自greedy
        ppl = exp(sum(loss for loss in losses) / len(losses))
        output_dict_gen = {'ppl': ppl}
        logger.info(f"{subset} set metrics = {output_dict_gen}")
        # logger.info(f"{subset} set gt metrics = {self.metrics_gt}")

        # f=open('context_test.txt','w',encoding='utf-8')
        # f.writelines([' '.join(sen)+'\n' for sen in context_sum])
        # f.close()

        # 将生成的回复输出
        with open(f"output/output_{subset}_gen_epoch_{epoch}.txt",
                  'w',
                  encoding='utf-8') as f:
            f.writelines([
                '[Generated] ' + re.sub('@\d+', '__UNK__', ' '.join(sen)) +
                '\n' for sen in inference_sum
            ])

        # gt shuchu
        with open(f"output/output_{subset}_gt_epoch_{epoch}.txt",
                  'w',
                  encoding='utf-8') as f:
            for sen in golden_sum:
                mask_sen = re.sub('@\d+', '__UNK__', ' '.join(sen))
                mask_sen = re.sub(' ([!,.?])', '\\1', mask_sen)
                f.writelines(['[GT] ' + mask_sen + '\n'])

        # 将生成的回复与gt一起输出
        with open(f"output/output_{subset}_both_epoch_{epoch}.txt",
                  'w',
                  encoding='utf-8') as f:
            f.writelines(['[GroundTruth] ' + re.sub('@\d+', '__UNK__',' '.join(sen_gt))+'\n' \
                + '[Generated] ' + re.sub('@\d+', '__UNK__',' '.join(sen_gen))+'\n\n' \
                for sen_gt, sen_gen in zip(golden_sum, inference_sum)])

        self.save_embedding()

        return output_dict_gen

    def save_embedding(self):
        json.dump(loop.dict, open('output/tf_bpe2index.json', 'w'))

    def vector2sentence(self, batch_sen):
        # 一个batch的sentence 从id换成token
        sentences = []
        for sen in batch_sen.numpy().tolist():
            sentence = []
            for word in sen:
                if word > 3:
                    sentence.append(self.index2word[word])
                elif word == 3:
                    sentence.append('_UNK_')
            sentences.append(sentence)
        return sentences

    @classmethod
    def optim_opts(self):
        """
        Fetch optimizer selection.

        By default, collects everything in torch.optim, as well as importing:
        - qhm / qhmadam if installed from github.com/facebookresearch/qhoptim

        Override this (and probably call super()) to add your own optimizers.
        """
        # first pull torch.optim in
        optims = {
            k.lower(): v
            for k, v in optim.__dict__.items()
            if not k.startswith('__') and k[0].isupper()
        }
        try:
            import apex.optimizers.fused_adam as fused_adam
            optims['fused_adam'] = fused_adam.FusedAdam
        except ImportError:
            pass

        try:
            # https://openreview.net/pdf?id=S1fUpoR5FQ
            from qhoptim.pyt import QHM, QHAdam
            optims['qhm'] = QHM
            optims['qhadam'] = QHAdam
        except ImportError:
            # no QHM installed
            pass
        logger.info(optims)
        return optims

    def init_optim(self, params, optim_states=None, saved_optim_type=None):
        """
        Initialize optimizer with model parameters.

        :param params:
            parameters from the model

        :param optim_states:
            optional argument providing states of optimizer to load

        :param saved_optim_type:
            type of optimizer being loaded, if changed will skip loading
            optimizer states
        """

        opt = self.opt

        # set up optimizer args
        lr = opt['learningrate']
        kwargs = {'lr': lr}
        # kwargs['amsgrad'] = True
        # kwargs['betas'] = (0.9, 0.999)

        optim_class = self.optim_opts()[opt['optimizer']]
        logger.info(f'optim_class = {optim_class}')
        self.optimizer = optim_class(params, **kwargs)

    def backward(self, loss):
        """
        Perform a backward pass. It is recommended you use this instead of
        loss.backward(), for integration with distributed training and FP16
        training.
        """
        loss.backward()

    def update_params(self):
        """
        Perform step of optimization, clipping gradients and adjusting LR
        schedule if needed. Gradient accumulation is also performed if agent
        is called with --update-freq.

        It is recommended (but not forced) that you call this in train_step.
        """
        update_freq = 1
        if update_freq > 1:
            # we're doing gradient accumulation, so we don't only want to step
            # every N updates instead
            self._number_grad_accum = (self._number_grad_accum +
                                       1) % update_freq
            if self._number_grad_accum != 0:
                return
        #0.1是不是太小了,原版就是这样
        if self.opt['gradient_clip'] > 0:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                           self.opt['gradient_clip'])

        self.optimizer.step()

    def zero_grad(self):
        """
        Zero out optimizer.

        It is recommended you call this in train_step. It automatically handles
        gradient accumulation if agent is called with --update-freq.
        """
        self.optimizer.zero_grad()