def _compute_scores(self, src_filename, trg_filename): valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=src_filename, vocabulary=self.vocab_src, is_train_dataset=False, ), TextLineDataset(data_path=trg_filename, vocabulary=self.vocab_tgt, is_train_dataset=False ) ) # 其实好像还是会打乱 valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=40, use_bucket=True, buffer_size=100000, numbering=True, shuffle=False ) valid_iter = valid_iterator.build_generator() score_result = dict() self.model.eval() with torch.no_grad(): for batch in valid_iter: seq_numbers, seqs_x, seqs_y = batch x, y = prepare_data(seqs_x, seqs_y, cuda=True) y_inp = y[:, :-1].contiguous() y_label = y[:, 1:].contiguous() log_probs = self.model(x, y_inp) # [batch_size, seq_len, vocab_size] batch_size, seq_len = y_label.shape log_probs = log_probs.view(-1, self.vocab_tgt.max_n_words) y_label = y_label.view(-1) loss = F.nll_loss(log_probs, y_label, reduce=False, ignore_index=self.vocab_tgt.pad) # 越小越好 loss = loss.view(batch_size, seq_len) loss = loss.sum(-1) y_label = y_label.view(batch_size, seq_len) valid_token = (y_label != self.vocab_tgt.pad).sum(-1) norm_loss = loss.double().div(valid_token.double()) for seq_num, l, nl in zip(seq_numbers, loss, norm_loss): score_result.update({seq_num: (l.item(), nl.item())}) # for i1, y_l in enumerate(y_label): # score = 0 # for i2, y_index in enumerate(y_l): # if y_index.item() == 0: # break # score += log_probs[i1][i2][y_index.item()].item() # score_result.update({seq_numbers[i1]: score}) return score_result
def test_attack(config_path, save_to, model_name="attacker", shuffle=True, use_gpu=True): """ attack :param config_path: attack attack configs :param save_to: (string) saving directories :param model_name: (string) for saving names :param shuffle: (boolean) for batch scheme, shuffle data set :param use_gpu: (boolean) on gpu or not :return: attacked sequences """ # initiate with open(config_path.strip()) as f: configs = yaml.load(f) attack_configs = configs["attack_configs"] attacker_model_configs = configs["attacker_model_configs"] attacker_optim_configs = configs["attacker_optimizer_configs"] training_configs = configs["training_configs"] victim_config_path = attack_configs["victim_configs"] victim_model_path = attack_configs["victim_model"] with open(victim_config_path.strip()) as v_f: print("open victim configs...%s" % victim_config_path) victim_configs = yaml.load(v_f) data_configs = victim_configs["data_configs"] # building inputs vocab_src = Vocabulary(**data_configs["vocabularies"][0]) vocab_trg = Vocabulary(**data_configs["vocabularies"][1]) # parallel data binding train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0]), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_trg, max_len=data_configs['max_len'][1]), shuffle=shuffle) valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs["valid_data"][0], vocabulary=vocab_src, max_len=data_configs["max_len"][0]), TextLineDataset(data_path=data_configs["valid_data"][1], vocabulary=vocab_trg, max_len=data_configs["max_len"][1]), shuffle=shuffle) train_batch_size = training_configs["batch_size"] train_buffer_size = training_configs["buffer_size"] training_iterator = DataIterator( dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key']) # valid_iterator is bucketed by length to accelerate decoding (numbering to mark orders) valid_iterator = DataIterator( dataset=valid_bitext_dataset, batch_size=training_configs["valid_batch_size"], use_bucket=True, buffer_size=50000, numbering=True) # initiate saver model_collections = Collections() checkpoint_saver = Saver( save_prefix="{0}.ckpt".format(os.path.join(save_to, model_name)), num_max_keeping=training_configs['num_kept_checkpoints']) w2p, w2vocab = load_or_extract_near_vocab( config_path=victim_config_path, model_path=victim_model_path, init_perturb_rate=attack_configs["init_perturb_rate"], save_to=os.path.join(save_to, "near_vocab"), save_to_full=os.path.join(save_to, "full_near_vocab"), top_reserve=12, emit_as_id=True) # build attacker # attacker = Attacker(n_words=vocab_src.max_n_words, # **attacker_model_configs) # if use_gpu: # attacker = attacker.cuda() # CURRENT_DEVICE = "cuda" # else: # CURRENT_DEVICE = "cpu" # load embedding from trained NMT models # load_embedding(attacker, model_path=victim_model_path, device=CURRENT_DEVICE) # attacker.eval() # for i in range(6): train_iter = training_iterator.build_generator() batch = train_iter.__next__() print(batch[1][3])
def test_discriminator(config_path, save_to, model_name="Discriminator", shuffle=True, use_gpu=True): with open(config_path.strip()) as f: configs = yaml.load(f) attack_configs = configs["attack_configs"] discriminator_configs = configs["discriminator_configs"] discriminator_model_configs = discriminator_configs[ "discriminator_model_configs"] discriminator_optim_configs = discriminator_configs[ "discriminator_optimizer_configs"] victim_config_path = attack_configs["victim_configs"] victim_model_path = attack_configs["victim_model"] with open(victim_config_path.strip()) as v_f: print("open victim configs...%s" % victim_config_path) victim_configs = yaml.load(v_f) data_configs = victim_configs["data_configs"] # building inputs vocab_src = Vocabulary(**data_configs["vocabularies"][0]) vocab_trg = Vocabulary(**data_configs["vocabularies"][1]) # parallel data binding train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0]), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_trg, max_len=data_configs['max_len'][1]), shuffle=shuffle) valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs["valid_data"][0], vocabulary=vocab_src, max_len=data_configs["max_len"][0]), TextLineDataset(data_path=data_configs["valid_data"][1], vocabulary=vocab_trg, max_len=data_configs["max_len"][1]), shuffle=shuffle) train_batch_size = attack_configs["batch_size"] train_buffer_size = attack_configs["buffer_size"] training_iterator = DataIterator( dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=attack_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=attack_configs['batching_key']) # valid_iterator is bucketed by length to accelerate decoding (numbering to mark orders) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=attack_configs["batch_size"], use_bucket=True, buffer_size=50000, numbering=True) # initiate saver model_collections = Collections() checkpoint_saver = Saver( save_prefix="{0}.ckpt".format(os.path.join(save_to, model_name)), num_max_keeping=attack_configs['num_kept_checkpoints']) # building model model_D = TransDiscriminator(n_src_words=vocab_src.max_n_words, n_trg_words=vocab_trg.max_n_words, **discriminator_model_configs) if use_gpu: model_D = model_D.cuda() CURRENT_DEVICE = "cuda" else: CURRENT_DEVICE = "cpu" # load embedding from trained NMT models load_embedding(model_D, model_path=victim_model_path, device=CURRENT_DEVICE) # TODO reloading parameters # classification need label smoothing to trigger Negative log likelihood loss criterion = nn.CrossEntropyLoss() # building optimizer optim = Optimizer( name=discriminator_optim_configs["optimizer"], model=model_D, lr=discriminator_optim_configs["learning_rate"], grad_clip=discriminator_optim_configs["grad_clip"], optim_args=discriminator_optim_configs["optimizer_params"]) # Build scheduler for optimizer if needed if discriminator_optim_configs['schedule_method'] is not None: if discriminator_optim_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler( optimizer=optim, **discriminator_optim_configs["scheduler_configs"]) elif discriminator_optim_configs['schedule_method'] == "noam": scheduler = NoamScheduler( optimizer=optim, **discriminator_optim_configs['scheduler_configs']) elif discriminator_optim_configs["schedule_method"] == "rsqrt": scheduler = RsqrtScheduler( optimizer=optim, **discriminator_optim_configs["scheduler_configs"]) else: WARN( "Unknown scheduler name {0}. Do not use lr_scheduling.".format( discriminator_optim_configs['schedule_method'])) scheduler = None else: scheduler = None # reload latest checkpoint checkpoint_saver.load_latest(model=model_D, optim=optim, lr_scheduler=scheduler, collections=model_collections) # prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] summary_writer = SummaryWriter(log_dir=save_to + "log") w2p, w2vocab = load_or_extract_near_vocab( config_path=victim_config_path, model_path=victim_model_path, init_perturb_rate=attack_configs["init_perturb_rate"], save_to=os.path.join(save_to, "near_vocab"), save_to_full=os.path.join(save_to, "full_near_vocab"), top_reserve=12) while True: # infinite loop for training epoch training_iter = training_iterator.build_generator() for batch in training_iter: uidx += 1 if discriminator_optim_configs[ "schedule_method"] is not None and discriminator_optim_configs[ "schedule_method"] != "loss": scheduler.step(global_step=uidx) # training session seqs_x, seqs_y = batch # returned tensor type of the data optim.zero_grad() try: x, y, flags = prepare_D_data(w2p, w2vocab, victim_config_path, seqs_x, seqs_y, use_gpu=use_gpu) loss = compute_D_forward(model_D, criterion=criterion, seqs_x=x, seqs_y=y, gold_flags=flags) optim.step() print("loss:", loss) except RuntimeError as e: if "out of memory" in str(e): print("WARNING: out of memory, skipping batch") oom_count += 1 optim.zero_grad() else: raise e # check for validation and save the model if should_trigger_by_steps( uidx, eidx, every_n_step=discriminator_configs["acc_valid_freq"]): lrate = list(optim.get_lrate())[0] summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) if should_trigger_by_steps( uidx, eidx, every_n_step=attack_configs["save_freq"]): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) checkpoint_saver.save(global_step=uidx, model=model_D, optim=optim, lr_scheduler=scheduler, collections=model_collections) if should_trigger_by_steps( uidx, eidx, every_n_step=discriminator_configs["acc_valid_freq"]): # validate average loss over samples on validation set n_sents = 0. sum_loss = 0.0 valid_iter = valid_iterator.build_generator() for batch in valid_iter: _, seqs_x, seqs_y = batch n_sents += len(seqs_x) x, y, flags = prepare_D_data(w2p, w2vocab, victim_config_path, seqs_x, seqs_y, use_gpu=use_gpu) loss = compute_D_forward(model_D, criterion, x, y, gold_flags=flags, eval=True) if np.isnan(loss): WARN("NaN detected!") sum_loss += float(loss) eval_loss = float(sum_loss / n_sents) summary_writer.add_scalar("valid", scalar_value=eval_loss, global_step=uidx) if should_trigger_by_steps( uidx, eidx, every_n_step=discriminator_configs["acc_valid_freq"]): # validate accuracy of the discriminator acc = acc_validation(uidx=uidx, discriminator_model=model_D, valid_iterator=valid_iterator, victim_configs=victim_config_path, w2p=w2p, w2vocab=w2vocab, batch_size=attack_configs["batch_size"], use_gpu=use_gpu) summary_writer.add_scalar("accuracy", scalar_value=acc, global_step=uidx) eidx += 1 pass
def train(FLAGS): """ FLAGS: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # write log of training to file. write_log_to_file( os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) GlobalNames.USE_GPU = FLAGS.use_gpu if GlobalNames.USE_GPU: CURRENT_DEVICE = "cpu" else: CURRENT_DEVICE = "cuda:0" config_path = os.path.abspath(FLAGS.config_path) with open(config_path.strip()) as f: configs = yaml.load(f) INFO(pretty_configs(configs)) # Add default configs configs = default_configs(configs) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] GlobalNames.SEED = training_configs['seed'] set_seed(GlobalNames.SEED) best_model_prefix = os.path.join( FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_tgt = Vocabulary(**data_configs["vocabularies"][0]) train_batch_size = training_configs["batch_size"] * max( 1, training_configs["update_cycle"]) train_buffer_size = training_configs["buffer_size"] * max( 1, training_configs["update_cycle"]) train_bitext_dataset = ZipDataset(TextLineDataset( data_path=data_configs['train_data'][0], vocabulary=vocab_tgt, max_len=data_configs['max_len'][0], ), shuffle=training_configs['shuffle']) valid_bitext_dataset = ZipDataset( TextLineDataset( data_path=data_configs['valid_data'][0], vocabulary=vocab_tgt, )) training_iterator = DataIterator( dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key']) valid_iterator = DataIterator( dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True) INFO('Done. Elapsed time {0}'.format(timer.toc())) lrate = optimizer_configs['learning_rate'] is_early_stop = False # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial model_collections = Collections() checkpoint_saver = Saver( save_prefix="{0}.ckpt".format( os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints']) best_model_saver = Saver( save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model']) # 1. Build Model & Criterion INFO('Building model...') timer.tic() lm_model = build_model(n_tgt_vocab=vocab_tgt.max_n_words, **model_configs) INFO(lm_model) params_total = sum([p.numel() for n, p in lm_model.named_parameters()]) params_with_embedding = sum([ p.numel() for n, p in lm_model.named_parameters() if n.find('embedding') == -1 ]) INFO('Total parameters: {}'.format(params_total)) INFO('Total parameters (excluding word embeddings): {}'.format( params_with_embedding)) critic = NMTCriterion(label_smoothing=model_configs['label_smoothing']) INFO(critic) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 2. Move to GPU if GlobalNames.USE_GPU: lm_model = lm_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed lm_model.init_parameters(FLAGS.pretrain_path, device=CURRENT_DEVICE) # 4. Build optimizer INFO('Building Optimizer...') optim = Optimizer(name=optimizer_configs['optimizer'], model=lm_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params']) # 5. Build scheduler for optimizer if needed if optimizer_configs['schedule_method'] is not None: if optimizer_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler( optimizer=optim, **optimizer_configs["scheduler_configs"]) elif optimizer_configs['schedule_method'] == "noam": scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs']) else: WARN( "Unknown scheduler name {0}. Do not use lr_scheduling.".format( optimizer_configs['schedule_method'])) scheduler = None else: scheduler = None # 6. build moving average if training_configs['moving_average_method'] is not None: ma = MovingAverage( moving_average_method=training_configs['moving_average_method'], named_params=lm_model.named_parameters(), alpha=training_configs['moving_average_alpha']) else: ma = None INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if FLAGS.reload: checkpoint_saver.load_latest(model=lm_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [0])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] summary_writer = SummaryWriter(log_dir=FLAGS.log_path) cum_samples = 0 cum_words = 0 valid_loss = best_valid_loss = float('inf') # Max Float saving_files = [] # Timer for computing speed timer_for_speed = Timer() timer_for_speed.tic() INFO('Begin training...') while True: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format( eidx, uidx), total=len(training_iterator), unit="sents") for batch in training_iter: uidx += 1 if optimizer_configs[ "schedule_method"] is not None and optimizer_configs[ "schedule_method"] != "loss": scheduler.step(global_step=uidx) seqs_y = batch n_samples_t = len(seqs_y) n_words_t = sum(len(s) for s in seqs_y) cum_samples += n_samples_t cum_words += n_words_t train_loss = 0. optim.zero_grad() try: # Prepare data for (seqs_y_t, ) in split_shard( seqs_y, split_size=training_configs['update_cycle']): y = prepare_data(seqs_y_t, cuda=GlobalNames.USE_GPU) loss = compute_forward( model=lm_model, critic=critic, # seqs_x=x, seqs_y=y, eval=False, normalization=n_samples_t, norm_by_words=training_configs["norm_by_words"]) train_loss += loss / y.size( 1) if not training_configs["norm_by_words"] else loss optim.step() except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') oom_count += 1 optim.zero_grad() else: raise e if ma is not None and eidx >= training_configs[ 'moving_average_start_epoch']: ma.step() training_progress_bar.update(n_samples_t) training_progress_bar.set_description( ' - (Epc {}, Upd {}) '.format(eidx, uidx)) training_progress_bar.set_postfix_str( 'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format( train_loss, valid_loss, best_valid_loss)) summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx) # ================================================================================== # # Display some information if should_trigger_by_steps( uidx, eidx, every_n_step=training_configs['disp_freq']): # words per second and sents per second words_per_sec = cum_words / (timer.toc(return_seconds=True)) sents_per_sec = cum_samples / (timer.toc(return_seconds=True)) lrate = list(optim.get_lrate())[0] summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx) summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) # Reset timer timer.tic() cum_words = 0 cum_samples = 0 # ================================================================================== # # Saving checkpoints if should_trigger_by_steps( uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) if not is_early_stop: checkpoint_saver.save(global_step=uidx, model=lm_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) # ================================================================================== # # Loss Validation & Learning rate annealing if should_trigger_by_steps( global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug): if ma is not None: origin_state_dict = deepcopy(lm_model.state_dict()) lm_model.load_state_dict(ma.export_ma_params(), strict=False) valid_loss = loss_validation( model=lm_model, critic=critic, valid_iterator=valid_iterator, norm_by_words=training_configs["norm_by_words"]) model_collections.add_to_collection("history_losses", valid_loss) min_history_loss = np.array( model_collections.get_collection("history_losses")).min() summary_writer.add_scalar("loss", valid_loss, global_step=uidx) summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx) if ma is not None: lm_model.load_state_dict(origin_state_dict) del origin_state_dict if optimizer_configs["schedule_method"] == "loss": scheduler.step(metric=best_valid_loss) # If model get new best valid loss if valid_loss < best_valid_loss: bad_count = 0 if is_early_stop is False: # 1. save the best model torch.save(lm_model.state_dict(), best_model_prefix + ".final") # 2. record all several best models best_model_saver.save(global_step=uidx, model=lm_model) else: bad_count += 1 # At least one epoch should be traversed if bad_count >= training_configs[ 'early_stop_patience'] and eidx > 0: is_early_stop = True WARN("Early Stop!") best_valid_loss = min_history_loss summary_writer.add_scalar("bad_count", bad_count, uidx) INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format( uidx, valid_loss, lrate, bad_count)) training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break
def train(config_path, model_path, model_type, src_filename, trg_filename): """ flags: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # ================================================================================== # # Initialization for training on different devices # - CPU/GPU # - Single/Distributed Constants.USE_GPU = True print(config_path) print(model_path) print(model_type) world_size = 1 rank = 0 local_rank = 0 if Constants.USE_GPU: torch.cuda.set_device(local_rank) Constants.CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: Constants.CURRENT_DEVICE = "cpu" # ================================================================================== # # Parsing configuration files # - Load default settings # - Load pre-defined settings # - Load user-defined settings configs = prepare_configs(config_path) data_configs = configs['data_configs'] model_configs = configs['model_configs'] training_configs = configs['training_configs'] INFO(pretty_configs(configs)) Constants.SEED = training_configs['seed'] set_seed(Constants.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary.build_from_file(**data_configs['vocabularies'][0]) vocab_tgt = Vocabulary.build_from_file(**data_configs['vocabularies'][1]) Constants.EOS = vocab_src.eos Constants.PAD = vocab_src.pad Constants.BOS = vocab_src.bos valid_bitext_dataset = ZipDataset( TextLineDataset( data_path=src_filename, vocabulary=vocab_src, max_len=100, is_train_dataset=False, ), TextLineDataset( data_path=trg_filename, vocabulary=vocab_tgt, is_train_dataset=False, max_len=100, )) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=20, use_bucket=training_configs['use_bucket'], buffer_size=training_configs['buffer_size'], numbering=True, world_size=world_size, rank=rank) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, padding_idx=vocab_src.pad, vocab_src=vocab_src, **model_configs) INFO(nmt_model) # 2. Move to GPU if Constants.USE_GPU: nmt_model = nmt_model.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, model_path, device=Constants.CURRENT_DEVICE) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================================================================== # # Prepare training sent_per_sec_meter = TimeMeter() tok_per_sec_meter = TimeMeter() grad_denom = 0 train_loss = 0.0 cum_n_words = 0 valid_loss = best_valid_loss = float('inf') sent_per_sec_meter.start() tok_per_sec_meter.start() INFO('Begin training...') eidx = 0 uidx = 0 score_result = dict() # Build iterator and progress bar training_iter = valid_iterator.build_generator() training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format( eidx, uidx), total=len(valid_iterator), unit="sents") for batch in training_iter: seqs_numbers, seqs_x, seqs_y = batch batch_size = len(seqs_x) cum_n_words += sum(len(s) for s in seqs_y) try: # Prepare data x, y = prepare_data(seqs_x, seqs_y, cuda=Constants.USE_GPU) y_inp = y[:, :-1].contiguous() y_label = y[:, 1:].contiguous() # [batch_size, seq_len] log_probs = nmt_model( x, y_inp, log_probs=True) # [batch_size, seq_len, vocab_size] _, seq_len = y_label.shape log_probs = log_probs.view(-1, vocab_tgt.max_n_words) y_label = y_label.view(-1) loss = F.nll_loss(log_probs, y_label, reduce=False, ignore_index=vocab_tgt.pad) loss = loss.view(batch_size, seq_len) loss = loss.sum(-1) y_label = y_label.view(batch_size, seq_len) valid_token = (y_label != vocab_tgt.pad).sum(-1) loss = loss.double().div(valid_token.double()) for seq_num, l in zip(seqs_numbers, loss): assert seq_num not in score_result score_result.update({seq_num: l.item()}) uidx += 1 grad_denom += batch_size except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') else: raise e if training_progress_bar is not None: training_progress_bar.update(batch_size) training_progress_bar.set_description( ' - (Epc {}, Upd {}) '.format(eidx, uidx)) postfix_str = 'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f}), '.format( train_loss, valid_loss, best_valid_loss) training_progress_bar.set_postfix_str(postfix_str) training_progress_bar.close() return score_result
def train2(flags): """ flags: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # ================================================================================== # # Initialization for training on different devices # - CPU/GPU # - Single/Distributed Constants.USE_GPU = flags.use_gpu world_size = 1 rank = 0 local_rank = 0 if Constants.USE_GPU: torch.cuda.set_device(local_rank) Constants.CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: Constants.CURRENT_DEVICE = "cpu" # ================================================================================== # # Parsing configuration files # - Load default settings # - Load pre-defined settings # - Load user-defined settings configs = prepare_configs(flags.config_path, flags.predefined_config) data_configs = configs['data_configs'] model_configs = configs['model_configs'] training_configs = configs['training_configs'] bt_configs = configs['bt_configs'] if 'bt_configs' in configs else None if bt_configs is not None: print("btconfigs ", bt_configs) if 'bt_attribute_data' not in bt_configs: Constants.USE_BT = False bt_configs = None else: Constants.USE_BT = True Constants.USE_BTTAG = bt_configs['use_bttag'] Constants.USE_CONFIDENCE = bt_configs['use_confidence'] INFO(pretty_configs(configs)) Constants.SEED = training_configs['seed'] set_seed(Constants.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary.build_from_file(**data_configs['vocabularies'][0]) vocab_tgt = Vocabulary.build_from_file(**data_configs['vocabularies'][1]) Constants.EOS = vocab_src.eos Constants.PAD = vocab_src.pad Constants.BOS = vocab_src.bos # bt tag dataset if Constants.USE_BT: if Constants.USE_BTTAG: Constants.BTTAG = vocab_src.bttag train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], is_train_dataset=True ), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], is_train_dataset=True ), AttributeDataset(data_path=bt_configs['bt_attribute_data'], is_train_dataset=True) ) else: train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], is_train_dataset=True ), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], is_train_dataset=True ) ) training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=training_configs["batch_size"], use_bucket=training_configs['use_bucket'], buffer_size=training_configs['buffer_size'], batching_func=training_configs['batching_key'], world_size=world_size, numbering=True, rank=rank) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, padding_idx=vocab_src.pad, vocab_src=vocab_src, vocab_tgt=vocab_tgt, **model_configs) INFO(nmt_model) # 2. Move to GPU if Constants.USE_GPU: nmt_model = nmt_model.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, flags.pretrain_path, exclude_prefix=flags.pretrain_exclude_prefix, device=Constants.CURRENT_DEVICE) nmt_model = nmt_model.encoder INFO('Done. Elapsed time {0}'.format(timer.toc())) INFO('Begin training...') # 计算train集合每个句子的表示:mean pool training_iter = training_iterator.build_generator() nmt_model.eval() all_seq_numbers = [] encoder_filename = "/home/wangdq/encoder.mean.output" seq_numbers_filename = '/home/wangdq/seq_numbers.output' processd = 0 with open(encoder_filename, 'w') as f_encoder, open(seq_numbers_filename, 'w') as f_seq_numbers: for batch in training_iter: bt_attrib = None # bt attrib data if Constants.USE_BT: seq_numbers, seqs_x, seqs_y, bt_attrib = batch # seq_numerbs从0开始编号 else: seq_numbers, seqs_x, seqs_y = batch x = prepare_data(seqs_x, seqs_y=None, cuda=Constants.USE_GPU, bt_attrib=bt_attrib) try: with torch.no_grad(): encoder_hidden, mask = nmt_model(x) except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') else: raise e valid_hidden = (mask == False).float().cuda() sum_encoder_hidden = (encoder_hidden * valid_hidden.unsqueeze(-1)).sum(dim=1) valid_tokens = (mask == False).sum(-1) mean_encoder_hidden = sum_encoder_hidden.float().div(valid_tokens.unsqueeze(1)) all_seq_numbers.extend(seq_numbers) # if all_mean_encoder_hidden is None: # all_mean_encoder_hidden = mean_encoder_hidden.cpu() # else: # all_mean_encoder_hidden = torch.cat((all_mean_encoder_hidden, mean_encoder_hidden.cpu()), dim=0) mean_encoder_list = mean_encoder_hidden.cpu().numpy().tolist() content = [[str(i) for i in mean] for mean in mean_encoder_list] content = [' '.join(mean) + '\n' for mean in content] f_encoder.writelines(content) processd += len(seq_numbers) print(processd) content = [str(i) for i in all_seq_numbers] content = ' '.join(content) f_seq_numbers.writelines(content)
def test_data(flags): Constants.USE_GPU = flags.use_gpu world_size = 1 rank = 0 local_rank = 0 if Constants.USE_GPU: torch.cuda.set_device(local_rank) Constants.CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: Constants.CURRENT_DEVICE = "cpu" # ================================================================================== # # Parsing configuration files # - Load default settings # - Load pre-defined settings # - Load user-defined settings configs = prepare_configs(flags.config_path, flags.predefined_config) data_configs = configs['data_configs'] model_configs = configs['model_configs'] training_configs = configs['training_configs'] bt_configs = configs['bt_configs'] if 'bt_configs' in configs else None if bt_configs is not None: print("btconfigs ", bt_configs) if 'bt_attribute_data' not in bt_configs: Constants.USE_BT = False bt_configs = None else: Constants.USE_BT = True Constants.USE_BTTAG = bt_configs['use_bttag'] Constants.USE_CONFIDENCE = bt_configs['use_confidence'] INFO(pretty_configs(configs)) Constants.SEED = training_configs['seed'] set_seed(Constants.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary.build_from_file(**data_configs['vocabularies'][0]) vocab_tgt = Vocabulary.build_from_file(**data_configs['vocabularies'][1]) Constants.EOS = vocab_src.eos Constants.PAD = vocab_src.pad Constants.BOS = vocab_src.bos valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src, is_train_dataset=False, ), TextLineDataset(data_path=data_configs['valid_data'][1], vocabulary=vocab_tgt, is_train_dataset=False ) ) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True, world_size=world_size, rank=rank, shuffle=False) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, padding_idx=vocab_src.pad, vocab_src=vocab_src, vocab_tgt=vocab_tgt, **model_configs) INFO(nmt_model) # 2. Move to GPU if Constants.USE_GPU: nmt_model = nmt_model.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, flags.pretrain_path, exclude_prefix=flags.pretrain_exclude_prefix, device=Constants.CURRENT_DEVICE) nmt_model = nmt_model.encoder INFO('Done. Elapsed time {0}'.format(timer.toc())) INFO('Begin training...') # 计算train集合每个句子的表示:mean pool nmt_model.eval() # 计算test集合每个句子的表示: mean pool valid_iter = valid_iterator.build_generator() all_seq_numbers = [] all_mean_encoder_hidden = None for batch in valid_iter: bt_attrib = None seq_numbers, seqs_x, seqs_y = batch all_seq_numbers.extend(seq_numbers) x = prepare_data(seqs_x, seqs_y=None, cuda=Constants.USE_GPU, bt_attrib=bt_attrib) try: with torch.no_grad(): encoder_hidden, mask = nmt_model(x) except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') else: raise e valid_hidden = (mask == False).float().cuda() sum_encoder_hidden = (encoder_hidden * valid_hidden.unsqueeze(-1)).sum(dim=1) valid_tokens = (mask == False).sum(-1) mean_encoder_hidden = sum_encoder_hidden.float().div(valid_tokens.unsqueeze(1)) if all_mean_encoder_hidden is None: all_mean_encoder_hidden = mean_encoder_hidden else: all_mean_encoder_hidden = torch.cat((all_mean_encoder_hidden, mean_encoder_hidden), dim=0) return all_mean_encoder_hidden, all_seq_numbers
def _compute_scores(self, src_filename, trg_filename): valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=src_filename, vocabulary=self.vocab_src, is_train_dataset=False, max_len=100), TextLineDataset(data_path=trg_filename, vocabulary=self.vocab_tgt, is_train_dataset=False, max_len=100)) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=20, use_bucket=True, buffer_size=1000, numbering=True, shuffle=False) valid_iter = valid_iterator.build_generator() score_result = dict() self.model.eval() eidx = 0 uidx = 0 training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format( eidx, uidx), total=len(valid_iterator), unit="sents") with torch.no_grad(): for batch in valid_iter: seq_numbers, seqs_x, seqs_y = batch x, y = prepare_data(seqs_x, seqs_y, cuda=True) y_inp = y[:, :-1].contiguous() y_label = y[:, 1:].contiguous() # [batch_size, seq_len] log_probs = self.model( x, y_inp, log_probs=True) # [batch_size, seq_len, vocab_size] batch_size, seq_len = y_label.shape log_probs = log_probs.view(-1, self.vocab_tgt.max_n_words) y_label = y_label.view(-1) loss = F.nll_loss(log_probs, y_label, reduce=False, ignore_index=self.vocab_tgt.pad) loss = loss.view(batch_size, seq_len) loss = loss.sum(-1) y_label = y_label.view(batch_size, seq_len) valid_token = (y_label != self.vocab_tgt.pad).sum(-1) loss = loss.double().div(valid_token.double()) for seq_num, l in zip(seq_numbers, loss): assert seq_num not in score_result score_result.update({seq_num: l.item()}) training_progress_bar.update(batch_size) training_progress_bar.set_description( ' - (Epc {}, Upd {}) '.format(eidx, uidx)) # for i1, y_l in enumerate(y_label): # score = 0 # for i2, y_index in enumerate(y_l): # if y_index.item() == 0: # break # score += log_probs[i1][i2][y_index.item()].item() # valid_token = (y_label != self.vocab_tgt.pad).long().sum().item() # score = -1 * score / valid_token # score_result.update({seq_numbers[i1]: score}) return score_result
def run(): # default actor threads as 1 os.environ["OMP_NUM_THREADS"] = "1" mp = _mp.get_context('spawn') args = parser.parse_args() if not os.path.exists(args.save_to): os.mkdir(args.save_to) with open(args.config_path, "r") as f, \ open(os.path.join(args.save_to, "current_attack_configs.yaml"), "w") as current_configs: configs = yaml.load(f) yaml.dump(configs, current_configs) attack_configs = configs["attack_configs"] attacker_configs = configs["attacker_configs"] attacker_model_configs = attacker_configs["attacker_model_configs"] attacker_optimizer_configs = attacker_configs["attacker_optimizer_configs"] discriminator_configs = configs["discriminator_configs"] # training_configs = configs["training_configs"] # initial best saver for global model global_saver = Saver( save_prefix="{0}.final".format(os.path.join(args.save_to, "ACmodel")), num_max_keeping=attack_configs["num_kept_checkpoints"]) # the Global variable of USE_GPU is mainly used for environments GlobalNames.SEED = attack_configs["seed"] GlobalNames.USE_GPU = args.use_gpu torch.manual_seed(GlobalNames.SEED) # build vocabulary and data iterator for env with open(attack_configs["victim_configs"], "r") as victim_f: victim_configs = yaml.load(victim_f) data_configs = victim_configs["data_configs"] src_vocab = Vocabulary(**data_configs["vocabularies"][0]) trg_vocab = Vocabulary(**data_configs["vocabularies"][1]) data_set = ZipDataset( TextLineDataset( data_path=data_configs["train_data"][0], vocabulary=src_vocab, ), TextLineDataset( data_path=data_configs["train_data"][1], vocabulary=trg_vocab, ), shuffle=attack_configs["shuffle"] ) # we build the parallel data sets and iterate inside a thread # global model variables (trg network to save the results) global_attacker = attacker.Attacker(src_vocab.max_n_words, **attacker_model_configs) global_attacker = global_attacker.cpu() global_attacker.share_memory() if args.share_optim: # initiate optimizer and set to share mode optimizer = Optimizer( name=attacker_optimizer_configs["optimizer"], model=global_attacker, lr=attacker_optimizer_configs["learning_rate"], grad_clip=attacker_optimizer_configs["grad_clip"], optim_args=attacker_optimizer_configs["optimizer_params"]) optimizer.optim.share_memory() # Build scheduler for optimizer if needed if attacker_optimizer_configs['schedule_method'] is not None: if attacker_optimizer_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler( optimizer=optimizer, **attacker_optimizer_configs["scheduler_configs"]) elif attacker_optimizer_configs['schedule_method'] == "noam": scheduler = NoamScheduler( optimizer=optimizer, **attacker_optimizer_configs['scheduler_configs']) elif attacker_optimizer_configs["schedule_method"] == "rsqrt": scheduler = RsqrtScheduler( optimizer=optimizer, **attacker_optimizer_configs["scheduler_configs"]) else: WARN("Unknown scheduler name {0}. Do not use lr_scheduling.". format(attacker_optimizer_configs['schedule_method'])) scheduler = None else: scheduler = None else: optimizer = None scheduler = None # load from checkpoint for global model global_saver.load_latest(model=global_attacker, optim=optimizer, lr_scheduler=scheduler) if args.use_gpu: # collect available devices and distribute env on the available gpu device = "cuda" devices = [] for i in range(torch.cuda.device_count()): devices += ["cuda:%d" % i] print("available gpus:", devices) else: device = "cpu" devices = [device] process = [] counter = mp.Value("i", 0) lock = mp.Lock() # for multiple attackers update INFO("extract near candidates") _, _ = load_or_extract_near_vocab( config_path=attack_configs["victim_configs"], model_path=attack_configs["victim_model"], init_perturb_rate=attack_configs["init_perturb_rate"], save_to=os.path.join(args.save_to, "near_vocab"), save_to_full=os.path.join(args.save_to, "full_near_vocab"), top_reserve=12, emit_as_id=True) # train(0, device, args, counter, lock, # attack_configs, discriminator_configs, # src_vocab, trg_vocab, data_set, # global_attacker, attacker_configs, # optimizer, scheduler, # global_saver) # valid(args.n, device, args, # attack_configs, discriminator_configs, # src_vocab, trg_vocab, data_set, # global_attacker, attacker_configs, counter) # run multiple training process of local attacker to update global one for rank in range(args.n): print("initialize training thread on cuda:%d" % (rank + 1)) p = mp.Process(target=train, args=(rank, "cuda:%d" % (rank + 1), args, counter, lock, attack_configs, discriminator_configs, src_vocab, trg_vocab, data_set, global_attacker, attacker_configs, optimizer, scheduler, global_saver)) p.start() process.append(p) # run the dev thread for initiation print("initialize dev thread on cuda:0") p = mp.Process(target=valid, args=(0, "cuda:0", args, attack_configs, discriminator_configs, src_vocab, trg_vocab, data_set, global_attacker, attacker_configs, counter)) p.start() process.append(p) for p in process: p.join()
def run(): # default actor threads as 1 os.environ["OMP_NUM_THREADS"] = "1" mp = _mp.get_context('spawn') args = parser.parse_args() if not os.path.exists(args.save_to): os.mkdir(args.save_to) # load reinforce configs with open(args.config_path, "r") as f, \ open(os.path.join(args.save_to, "current_reinforce.yaml"), "w") as current_configs: INFO("load reinforce configures") configs = yaml.load(f, Loader=yaml.FullLoader) yaml.dump(configs, current_configs) reinforce_configs = configs["reinforce_configs"] agent_configs = configs["agent_configs"] rephraser_model_configs = agent_configs["rephraser_model_configs"] rephraser_optimizer_configs = agent_configs["rephraser_optimizer_configs"] annunciator_configs = configs["annunciator_configs"] # the Global variable of USE_GPU is mainly used for environments GlobalNames.SEED = reinforce_configs["seed"] GlobalNames.USE_GPU = args.use_gpu torch.manual_seed(GlobalNames.SEED) # build vocabulary and data iterator for env with open(reinforce_configs["victim_configs"], "r") as victim_f: victim_configs = yaml.load(victim_f, Loader=yaml.FullLoader) data_configs = victim_configs["data_configs"] src_vocab = Vocabulary(**data_configs["vocabularies"][0]) trg_vocab = Vocabulary(**data_configs["vocabularies"][1]) data_set = ZipDataset( TextLineDataset(data_path=data_configs["train_data"][0], vocabulary=src_vocab, max_len=data_configs["max_len"][0]), TextLineDataset(data_path=data_configs["train_data"][1], vocabulary=trg_vocab, max_len=data_configs["max_len"][1]), shuffle=reinforce_configs["shuffle"] ) # we build the parallel data sets and iterate inside a thread # collect range of action space: _, _, limit_dist = load_or_extract_near_vocab( config_path=reinforce_configs["victim_configs"], model_path=reinforce_configs["victim_model"], init_perturb_rate=reinforce_configs["init_perturb_rate"], save_to=os.path.join(args.save_to, "near_vocab"), save_to_full=os.path.join(args.save_to, "full_near_vocab"), top_reserve=12, emit_as_id=True, use_max_dist=True) # build global SACAgent for the final policy (on cpu) global_agent = rephraser.SACAgent( device="cpu", d_word_vec=victim_configs["model_configs"]["d_word_vec"], d_model=rephraser_model_configs["d_model"], limit_dist=limit_dist, dropout=rephraser_model_configs["dropout"], learnable_temperature=rephraser_model_configs["learnable_temperature"], init_temperature=rephraser_model_configs["init_temperature"], rephraser_optimizer_configs=rephraser_optimizer_configs, save_to=args.save_to, num_kept_checkpoints=reinforce_configs["num_kept_checkpoints"] ) # load global ckp (only for the AC parameters) if needed global_step = global_agent.load_model() print("global_step:", global_step) if global_step != 0: INFO("restarting at step %d"%global_step) else: # save the initial model global_agent.save_model(global_step) global_summary_writer = SummaryWriter( log_dir=os.path.join(args.save_to, "global_summary")) global_replay_buffer = SharedReplayBuffer( max_sen_len=data_configs["max_len"][0], state_dim=victim_configs["model_configs"]["d_word_vec"], action_dim=victim_configs["model_configs"]["d_word_vec"], capacity=reinforce_configs["replay_buffer_capacity"]) # test_for_throughput(global_replay_buffer) # make global objects shared memory global_agent.share_memory() global_replay_buffer.share_memory() # collect available devices and distribute env on the available gpu if args.use_gpu: device = "cuda" devices = [] for i in range(torch.cuda.device_count()): devices += ["cuda:%d" % i] print("available gpus:", devices) else: device = "cpu" devices = [device] # initialize global parameters for the current training trial global_step_lock = mp.Lock() global_step_counter = mp.Value("i", global_step) # "i is the type code for c_int" # train_thread(0, device, args, # reinforce_configs, annunciator_configs, # src_vocab, trg_vocab, data_set, # global_agent, global_replay_buffer, # global_step_counter, global_step_lock, # agent_configs) # valid_thread(device, args, # reinforce_configs, annunciator_configs, # src_vocab, trg_vocab, data_set, # global_agent, global_replay_buffer, # global_step_counter, global_step_lock, # agent_configs) # build multi thread for learning and validation process = [] for rank in range(args.n): print("initialize training thread on cuda:%d" % (rank+1)) p=mp.Process( target=train_thread, args=(rank, "cuda:%d"%(rank+1), args, reinforce_configs, annunciator_configs, src_vocab, trg_vocab, data_set, global_agent, global_replay_buffer, global_step_counter, global_step_lock, agent_configs) ) p.start() process.append(p) # run the dev thread for initiation print("initialize dev thread on cuda:0") p = mp.Process( target=valid_thread, args=("cuda:0", args, reinforce_configs, annunciator_configs, src_vocab, trg_vocab, data_set, global_agent, global_replay_buffer, global_step_counter, global_step_lock, agent_configs) ) p.start() process.append(p) for p in process: p.join()
def tune(flags): """ flags: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # ================================================================================== # # Initialization for training on different devices # - CPU/GPU # - Single/Distributed Constants.USE_GPU = flags.use_gpu if flags.multi_gpu: dist.distributed_init(flags.shared_dir) world_size = dist.get_world_size() rank = dist.get_rank() local_rank = dist.get_local_rank() else: world_size = 1 rank = 0 local_rank = 0 if Constants.USE_GPU: torch.cuda.set_device(local_rank) Constants.CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: Constants.CURRENT_DEVICE = "cpu" # If not root_rank, close logging # else write log of training to file. if rank == 0: write_log_to_file( os.path.join(flags.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) else: close_logging() # ================================================================================== # # Parsing configuration files # - Load default settings # - Load pre-defined settings # - Load user-defined settings configs = prepare_configs(flags.config_path, flags.predefined_config) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] INFO(pretty_configs(configs)) Constants.SEED = training_configs['seed'] set_seed(Constants.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary.build_from_file(**data_configs['vocabularies'][0]) vocab_tgt = Vocabulary.build_from_file(**data_configs['vocabularies'][1]) Constants.EOS = vocab_src.eos Constants.PAD = vocab_src.pad Constants.BOS = vocab_src.bos # bt tag dataset train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], is_train_dataset=True), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], is_train_dataset=True)) training_iterator = DataIterator( dataset=train_bitext_dataset, batch_size=training_configs["batch_size"], use_bucket=training_configs['use_bucket'], buffer_size=training_configs['buffer_size'], batching_func=training_configs['batching_key'], world_size=world_size, rank=rank) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial lrate = optimizer_configs['learning_rate'] model_collections = Collections() checkpoint_saver = Saver( save_prefix="{0}.ckpt".format( os.path.join(flags.saveto, flags.model_name)), num_max_keeping=training_configs['num_kept_checkpoints']) best_model_prefix = os.path.join( flags.saveto, flags.model_name + Constants.MY_BEST_MODEL_SUFFIX) best_model_saver = Saver( save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model']) # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, padding_idx=vocab_src.pad, vocab_src=vocab_src, vocab_tgt=vocab_tgt, **model_configs) INFO(nmt_model) critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'], padding_idx=vocab_tgt.pad) INFO(critic) # 2. Move to GPU if Constants.USE_GPU: nmt_model = nmt_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, flags.pretrain_path, exclude_prefix=flags.pretrain_exclude_prefix, device=Constants.CURRENT_DEVICE) # froze_parameters froze_params(nmt_model, flags.froze_config) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 4. Build optimizer INFO('Building Optimizer...') if not flags.multi_gpu: optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'], update_cycle=training_configs['update_cycle']) else: optim = dist.DistributedOptimizer( name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'], device_id=local_rank) # 5. Build scheduler for optimizer if needed scheduler = build_scheduler( schedule_method=optimizer_configs['schedule_method'], optimizer=optim, scheduler_configs=optimizer_configs['scheduler_configs']) # 6. build moving average if training_configs['moving_average_method'] is not None: ma = MovingAverage( moving_average_method=training_configs['moving_average_method'], named_params=nmt_model.named_parameters(), alpha=training_configs['moving_average_alpha']) else: ma = None INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if flags.reload: checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma, device=Constants.CURRENT_DEVICE) # broadcast parameters and optimizer states if world_size > 1: INFO("Broadcasting model parameters...") dist.broadcast_parameters(params=nmt_model.state_dict()) INFO("Broadcasting optimizer states...") dist.broadcast_optimizer_state(optimizer=optim.optim) INFO('Done.') # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [1])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] is_early_stop = model_collections.get_collection("is_early_stop", [ False, ])[-1] train_loss_meter = AverageMeter() sent_per_sec_meter = TimeMeter() tok_per_sec_meter = TimeMeter() update_cycle = training_configs['update_cycle'] grad_denom = 0 train_loss = 0.0 cum_n_words = 0 valid_loss = best_valid_loss = float('inf') if rank == 0: summary_writer = SummaryWriter(log_dir=flags.log_path) else: summary_writer = None sent_per_sec_meter.start() tok_per_sec_meter.start() INFO('Begin training...') while True: if summary_writer is not None: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() if rank == 0: training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format( eidx, uidx), total=len(training_iterator), unit="sents") else: training_progress_bar = None # INFO(Constants.USE_BT) for batch in training_iter: # bt attrib data seqs_x, seqs_y = batch batch_size = len(seqs_x) cum_n_words += sum(len(s) for s in seqs_y) try: # Prepare data x, y = prepare_data(seqs_x, seqs_y, cuda=Constants.USE_GPU) loss = compute_forward( model=nmt_model, critic=critic, seqs_x=x, seqs_y=y, eval=False, normalization=1.0, norm_by_words=training_configs["norm_by_words"]) update_cycle -= 1 grad_denom += batch_size train_loss += loss except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') oom_count += 1 else: raise e # When update_cycle becomes 0, it means end of one batch. Several things will be done: # - update parameters # - reset update_cycle and grad_denom, update uidx # - learning rate scheduling # - update moving average if update_cycle == 0: # 0. reduce variables if world_size > 1: grad_denom = dist.all_reduce_py(grad_denom) train_loss = dist.all_reduce_py(train_loss) cum_n_words = dist.all_reduce_py(cum_n_words) # 1. update parameters optim.step(denom=grad_denom) optim.zero_grad() if training_progress_bar is not None: training_progress_bar.update(grad_denom) training_progress_bar.set_description( ' - (Epc {}, Upd {}) '.format(eidx, uidx)) postfix_str = 'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f}), '.format( train_loss, valid_loss, best_valid_loss) training_progress_bar.set_postfix_str(postfix_str) # 2. learning rate scheduling if scheduler is not None and optimizer_configs[ "schedule_method"] != "loss": scheduler.step(global_step=uidx) # 3. update moving average if ma is not None and eidx >= training_configs[ 'moving_average_start_epoch']: ma.step() # 4. update meters train_loss_meter.update(train_loss, grad_denom) sent_per_sec_meter.update(grad_denom) tok_per_sec_meter.update(cum_n_words) # 5. reset accumulated variables, update uidx update_cycle = training_configs['update_cycle'] grad_denom = 0 uidx += 1 cum_n_words = 0.0 train_loss = 0.0 else: continue # ================================================================================== # # Display some information if should_trigger_by_steps( uidx, eidx, every_n_step=training_configs['disp_freq']): lrate = list(optim.get_lrate())[0] if summary_writer is not None: summary_writer.add_scalar( "Speed(sents/sec)", scalar_value=sent_per_sec_meter.ave, global_step=uidx) summary_writer.add_scalar( "Speed(words/sec)", scalar_value=tok_per_sec_meter.ave, global_step=uidx) summary_writer.add_scalar( "train_loss", scalar_value=train_loss_meter.ave, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) # Reset Meters sent_per_sec_meter.reset() tok_per_sec_meter.reset() train_loss_meter.reset() # ================================================================================== # # Saving checkpoints # if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=flags.debug): # model_collections.add_to_collection("uidx", uidx) # model_collections.add_to_collection("eidx", eidx) # model_collections.add_to_collection("bad_count", bad_count) # # if not is_early_stop: # if rank == 0: # checkpoint_saver.save(global_step=uidx, # model=nmt_model, # optim=optim, # lr_scheduler=scheduler, # collections=model_collections, # ma=ma) torch.save(nmt_model.state_dict(), best_model_prefix + ".final") if training_progress_bar is not None: training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break
def train(FLAGS): """ FLAGS: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # ================================================================================== # # Initialization for training on different devices # - CPU/GPU # - Single/Distributed GlobalNames.USE_GPU = FLAGS.use_gpu if FLAGS.multi_gpu: if hvd is None or distributed is None: ERROR("Distributed training is disable. Please check the installation of Horovod.") hvd.init() world_size = hvd.size() rank = hvd.rank() local_rank = hvd.local_rank() else: world_size = 1 rank = 0 local_rank = 0 if GlobalNames.USE_GPU: torch.cuda.set_device(local_rank) CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: CURRENT_DEVICE = "cpu" # If not root_rank, close logging if rank != 0: close_logging() # write log of training to file. if rank == 0: write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) # ================================================================================== # # Parsing configuration files config_path = os.path.abspath(FLAGS.config_path) with open(config_path.strip()) as f: configs = yaml.load(f) INFO(pretty_configs(configs)) # Add default configs configs = default_baseline_configs(configs) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] GlobalNames.SEED = training_configs['seed'] set_seed(GlobalNames.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary(**data_configs["vocabularies"][0]) vocab_tgt = Vocabulary(**data_configs["vocabularies"][1]) actual_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"]) train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], ), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], ) ) valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src, ), TextLineDataset(data_path=data_configs['valid_data'][1], vocabulary=vocab_tgt, ) ) training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=training_configs["batch_size"], use_bucket=training_configs['use_bucket'], buffer_size=actual_buffer_size, batching_func=training_configs['batching_key'], world_size=world_size, rank=rank) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True, world_size=world_size, rank=rank) bleu_scorer = SacreBLEUScorer(reference_path=data_configs["bleu_valid_reference"], num_refs=data_configs["num_refs"], lang_pair=data_configs["lang_pair"], sacrebleu_args=training_configs["bleu_valid_configs"]['sacrebleu_args'], postprocess=training_configs["bleu_valid_configs"]['postprocess'] ) INFO('Done. Elapsed time {0}'.format(timer.toc())) lrate = optimizer_configs['learning_rate'] is_early_stop = False # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial model_collections = Collections() best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX) checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints'] ) best_model_saver = Saver(save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model']) INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, **model_configs) INFO(nmt_model) critic = NMTCriterion(label_smoothing=model_configs['label_smoothing']) INFO(critic) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 2. Move to GPU if GlobalNames.USE_GPU: nmt_model = nmt_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE) # 4. Build optimizer INFO('Building Optimizer...') optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'], distributed=True if world_size > 1 else False, update_cycle=training_configs['update_cycle'] ) # 5. Build scheduler for optimizer if needed if optimizer_configs['schedule_method'] is not None: if optimizer_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler(optimizer=optim, **optimizer_configs["scheduler_configs"] ) elif optimizer_configs['schedule_method'] == "noam": scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs']) else: WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method'])) scheduler = None else: scheduler = None # 6. build moving average if training_configs['moving_average_method'] is not None: ma = MovingAverage(moving_average_method=training_configs['moving_average_method'], named_params=nmt_model.named_parameters(), alpha=training_configs['moving_average_alpha']) else: ma = None INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if FLAGS.reload: checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) # broadcast parameters and optimizer states if world_size > 1: hvd.broadcast_parameters(params=nmt_model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer=optim.optim, root_rank=0) # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [1])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] cum_n_samples = 0 cum_n_words = 0 best_valid_loss = 1.0 * 1e10 # Max Float update_cycle = training_configs['update_cycle'] grad_denom = 0 if rank == 0: summary_writer = SummaryWriter(log_dir=FLAGS.log_path) else: summary_writer = None # Timer for computing speed timer_for_speed = Timer() timer_for_speed.tic() INFO('Begin training...') while True: if summary_writer is not None: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() if rank == 0: training_progress_bar = tqdm(desc=' - (Epoch %d) ' % eidx, total=len(training_iterator), unit="sents" ) else: training_progress_bar = None for batch in training_iter: seqs_x, seqs_y = batch batch_size = len(seqs_x) cum_n_samples += batch_size cum_n_words += sum(len(s) for s in seqs_y) try: # Prepare data x, y = prepare_data(seqs_x, seqs_y, cuda=GlobalNames.USE_GPU) loss = compute_forward(model=nmt_model, critic=critic, seqs_x=x, seqs_y=y, eval=False, normalization=1.0, norm_by_words=training_configs["norm_by_words"]) update_cycle -= 1 grad_denom += batch_size except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') oom_count += 1 else: raise e # When update_cycle becomes 0, it means end of one batch. Several things will be done: # - update parameters # - reset update_cycle and grad_denom # - update uidx # - update moving average if update_cycle == 0: if world_size > 1: grad_denom = distributed.all_reduce(grad_denom) optim.step(denom=grad_denom) optim.zero_grad() if training_progress_bar is not None: training_progress_bar.update(grad_denom) update_cycle = training_configs['update_cycle'] grad_denom = 0 uidx += 1 if scheduler is None: pass elif optimizer_configs["schedule_method"] == "loss": scheduler.step(metric=best_valid_loss) else: scheduler.step(global_step=uidx) if ma is not None and eidx >= training_configs['moving_average_start_epoch']: ma.step() else: continue # ================================================================================== # # Display some information if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']): if world_size > 1: cum_n_words = sum(distributed.all_gather(cum_n_words)) cum_n_samples = sum(distributed.all_gather(cum_n_samples)) # words per second and sents per second words_per_sec = cum_n_words / (timer.toc(return_seconds=True)) sents_per_sec = cum_n_samples / (timer.toc(return_seconds=True)) lrate = list(optim.get_lrate())[0] if summary_writer is not None: summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx) summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) # Reset timer timer.tic() cum_n_words = 0 cum_n_samples = 0 # ================================================================================== # # Loss Validation & Learning rate annealing if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug): valid_loss = loss_validation(model=nmt_model, critic=critic, valid_iterator=valid_iterator, rank=rank, world_size=world_size ) model_collections.add_to_collection("history_losses", valid_loss) min_history_loss = np.array(model_collections.get_collection("history_losses")).min() best_valid_loss = min_history_loss if summary_writer is not None: summary_writer.add_scalar("loss", valid_loss, global_step=uidx) summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx) # ================================================================================== # # BLEU Validation & Early Stop if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['bleu_valid_freq'], min_step=training_configs['bleu_valid_warmup'], debug=FLAGS.debug): valid_bleu = bleu_validation(uidx=uidx, valid_iterator=valid_iterator, batch_size=training_configs["bleu_valid_batch_size"], model=nmt_model, bleu_scorer=bleu_scorer, vocab_tgt=vocab_tgt, valid_dir=FLAGS.valid_path, max_steps=training_configs["bleu_valid_configs"]["max_steps"], beam_size=training_configs["bleu_valid_configs"]["beam_size"], alpha=training_configs["bleu_valid_configs"]["alpha"], world_size=world_size, rank=rank, ) model_collections.add_to_collection(key="history_bleus", value=valid_bleu) best_valid_bleu = float(np.array(model_collections.get_collection("history_bleus")).max()) if summary_writer is not None: summary_writer.add_scalar("bleu", valid_bleu, uidx) summary_writer.add_scalar("best_bleu", best_valid_bleu, uidx) # If model get new best valid bleu score if valid_bleu >= best_valid_bleu: bad_count = 0 if is_early_stop is False: if rank == 0: # 1. save the best model torch.save(nmt_model.state_dict(), best_model_prefix + ".final") # 2. record all several best models best_model_saver.save(global_step=uidx, model=nmt_model, ma=ma) else: bad_count += 1 # At least one epoch should be traversed if bad_count >= training_configs['early_stop_patience'] and eidx > 0: is_early_stop = True WARN("Early Stop!") if summary_writer is not None: summary_writer.add_scalar("bad_count", bad_count, uidx) INFO("{0} Loss: {1:.2f} BLEU: {2:.2f} lrate: {3:6f} patience: {4}".format( uidx, valid_loss, valid_bleu, lrate, bad_count )) # ================================================================================== # # Saving checkpoints if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) if not is_early_stop: if rank == 0: checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) if training_progress_bar is not None: training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break
def train(FLAGS): """ FLAGS: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # write log of training to file. write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) GlobalNames.USE_GPU = FLAGS.use_gpu if GlobalNames.USE_GPU: CURRENT_DEVICE = "cpu" else: CURRENT_DEVICE = "cuda:0" config_path = os.path.abspath(FLAGS.config_path) with open(config_path.strip()) as f: configs = yaml.load(f) INFO(pretty_configs(configs)) # Add default configs configs = default_configs(configs) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] GlobalNames.SEED = training_configs['seed'] set_seed(GlobalNames.SEED) best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary(**data_configs["vocabularies"][0]) vocab_tgt = Vocabulary(**data_configs["vocabularies"][1]) train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"]) train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"]) train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], ), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], ), shuffle=training_configs['shuffle'] ) valid_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['valid_data'][0], vocabulary=vocab_src, ), TextLineDataset(data_path=data_configs['valid_data'][1], vocabulary=vocab_tgt, ) ) training_iterator = DataIterator(dataset=train_bitext_dataset, batch_size=train_batch_size, use_bucket=training_configs['use_bucket'], buffer_size=train_buffer_size, batching_func=training_configs['batching_key']) valid_iterator = DataIterator(dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True) bleu_scorer = SacreBLEUScorer(reference_path=data_configs["bleu_valid_reference"], num_refs=data_configs["num_refs"], lang_pair=data_configs["lang_pair"], sacrebleu_args=training_configs["bleu_valid_configs"]['sacrebleu_args'], postprocess=training_configs["bleu_valid_configs"]['postprocess'] ) INFO('Done. Elapsed time {0}'.format(timer.toc())) lrate = optimizer_configs['learning_rate'] is_early_stop = False # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial model_collections = Collections() checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)), num_max_keeping=training_configs['num_kept_checkpoints'] ) best_model_saver = Saver(save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model']) # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, **model_configs) INFO(nmt_model) critic = NMTCriterion(label_smoothing=model_configs['label_smoothing']) INFO(critic) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 2. Move to GPU if GlobalNames.USE_GPU: nmt_model = nmt_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE) # 4. Build optimizer INFO('Building Optimizer...') optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'] ) # 5. Build scheduler for optimizer if needed if optimizer_configs['schedule_method'] is not None: if optimizer_configs['schedule_method'] == "loss": scheduler = ReduceOnPlateauScheduler(optimizer=optim, **optimizer_configs["scheduler_configs"] ) elif optimizer_configs['schedule_method'] == "noam": scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs']) else: WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method'])) scheduler = None else: scheduler = None # 6. build EMA if training_configs['ema_decay'] > 0.0: ema = ExponentialMovingAverage(named_params=nmt_model.named_parameters(), decay=training_configs['ema_decay']) else: ema = None INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if FLAGS.reload: checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections) # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [0])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] summary_writer = SummaryWriter(log_dir=FLAGS.log_path) cum_samples = 0 cum_words = 0 best_valid_loss = 1.0 * 1e10 # Max Float saving_files = [] # Timer for computing speed timer_for_speed = Timer() timer_for_speed.tic() INFO('Begin training...') while True: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() training_progress_bar = tqdm(desc=' - (Epoch %d) ' % eidx, total=len(training_iterator), unit="sents" ) for batch in training_iter: uidx += 1 if scheduler is None: pass elif optimizer_configs["schedule_method"] == "loss": scheduler.step(metric=best_valid_loss) else: scheduler.step(global_step=uidx) seqs_x, seqs_y = batch n_samples_t = len(seqs_x) n_words_t = sum(len(s) for s in seqs_y) cum_samples += n_samples_t cum_words += n_words_t training_progress_bar.update(n_samples_t) optim.zero_grad() # Prepare data for seqs_x_t, seqs_y_t in split_shard(seqs_x, seqs_y, split_size=training_configs['update_cycle']): x, y = prepare_data(seqs_x_t, seqs_y_t, cuda=GlobalNames.USE_GPU) loss = compute_forward(model=nmt_model, critic=critic, seqs_x=x, seqs_y=y, eval=False, normalization=n_samples_t, norm_by_words=training_configs["norm_by_words"]) optim.step() if ema is not None: ema.step() # ================================================================================== # # Display some information if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']): # words per second and sents per second words_per_sec = cum_words / (timer.toc(return_seconds=True)) sents_per_sec = cum_samples / (timer.toc(return_seconds=True)) lrate = list(optim.get_lrate())[0] summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx) summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) # Reset timer timer.tic() cum_words = 0 cum_samples = 0 # ================================================================================== # # Saving checkpoints if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) if not is_early_stop: checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ema=ema) # ================================================================================== # # Loss Validation & Learning rate annealing if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=FLAGS.debug): if ema is not None: origin_state_dict = deepcopy(nmt_model.state_dict()) nmt_model.load_state_dict(ema.state_dict(), strict=False) valid_loss = loss_validation(model=nmt_model, critic=critic, valid_iterator=valid_iterator, ) model_collections.add_to_collection("history_losses", valid_loss) min_history_loss = np.array(model_collections.get_collection("history_losses")).min() summary_writer.add_scalar("loss", valid_loss, global_step=uidx) summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx) best_valid_loss = min_history_loss if ema is not None: nmt_model.load_state_dict(origin_state_dict) del origin_state_dict # ================================================================================== # # BLEU Validation & Early Stop if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['bleu_valid_freq'], min_step=training_configs['bleu_valid_warmup'], debug=FLAGS.debug): if ema is not None: origin_state_dict = deepcopy(nmt_model.state_dict()) nmt_model.load_state_dict(ema.state_dict(), strict=False) valid_bleu = bleu_validation(uidx=uidx, valid_iterator=valid_iterator, batch_size=training_configs["bleu_valid_batch_size"], model=nmt_model, bleu_scorer=bleu_scorer, vocab_tgt=vocab_tgt, valid_dir=FLAGS.valid_path, max_steps=training_configs["bleu_valid_configs"]["max_steps"], beam_size=training_configs["bleu_valid_configs"]["beam_size"], alpha=training_configs["bleu_valid_configs"]["alpha"] ) model_collections.add_to_collection(key="history_bleus", value=valid_bleu) best_valid_bleu = float(np.array(model_collections.get_collection("history_bleus")).max()) summary_writer.add_scalar("bleu", valid_bleu, uidx) summary_writer.add_scalar("best_bleu", best_valid_bleu, uidx) # If model get new best valid bleu score if valid_bleu >= best_valid_bleu: bad_count = 0 if is_early_stop is False: # 1. save the best model torch.save(nmt_model.state_dict(), best_model_prefix + ".final") # 2. record all several best models best_model_saver.save(global_step=uidx, model=nmt_model) else: bad_count += 1 # At least one epoch should be traversed if bad_count >= training_configs['early_stop_patience'] and eidx > 0: is_early_stop = True WARN("Early Stop!") summary_writer.add_scalar("bad_count", bad_count, uidx) if ema is not None: nmt_model.load_state_dict(origin_state_dict) del origin_state_dict INFO("{0} Loss: {1:.2f} BLEU: {2:.2f} lrate: {3:6f} patience: {4}".format( uidx, valid_loss, valid_bleu, lrate, bad_count )) training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break
def train(flags): """ flags: saveto: str reload: store_true config_path: str pretrain_path: str, default="" model_name: str log_path: str """ # ================================================================================== # # Initialization for training on different devices # - CPU/GPU # - Single/Distributed Constants.USE_GPU = flags.use_gpu if flags.multi_gpu: dist.distributed_init(flags.shared_dir) world_size = dist.get_world_size() rank = dist.get_rank() local_rank = dist.get_local_rank() else: world_size = 1 rank = 0 local_rank = 0 if Constants.USE_GPU: torch.cuda.set_device(local_rank) Constants.CURRENT_DEVICE = "cuda:{0}".format(local_rank) else: Constants.CURRENT_DEVICE = "cpu" # If not root_rank, close logging # else write log of training to file. if rank == 0: write_log_to_file( os.path.join(flags.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S"))) else: close_logging() # ================================================================================== # # Parsing configuration files # - Load default settings # - Load pre-defined settings # - Load user-defined settings configs = prepare_configs(flags.config_path, flags.predefined_config) data_configs = configs['data_configs'] model_configs = configs['model_configs'] optimizer_configs = configs['optimizer_configs'] training_configs = configs['training_configs'] INFO(pretty_configs(configs)) # use odc if training_configs['use_odc'] is True: ave_best_k = check_odc_config(training_configs) else: ave_best_k = 0 Constants.SEED = training_configs['seed'] set_seed(Constants.SEED) timer = Timer() # ================================================================================== # # Load Data INFO('Loading data...') timer.tic() # Generate target dictionary vocab_src = Vocabulary.build_from_file(**data_configs['vocabularies'][0]) vocab_tgt = Vocabulary.build_from_file(**data_configs['vocabularies'][1]) Constants.EOS = vocab_src.eos Constants.PAD = vocab_src.pad Constants.BOS = vocab_src.bos train_bitext_dataset = ZipDataset( TextLineDataset(data_path=data_configs['train_data'][0], vocabulary=vocab_src, max_len=data_configs['max_len'][0], is_train_dataset=True), TextLineDataset(data_path=data_configs['train_data'][1], vocabulary=vocab_tgt, max_len=data_configs['max_len'][1], is_train_dataset=True)) valid_bitext_dataset = ZipDataset( TextLineDataset( data_path=data_configs['valid_data'][0], vocabulary=vocab_src, is_train_dataset=False, ), TextLineDataset(data_path=data_configs['valid_data'][1], vocabulary=vocab_tgt, is_train_dataset=False)) training_iterator = DataIterator( dataset=train_bitext_dataset, batch_size=training_configs["batch_size"], use_bucket=training_configs['use_bucket'], buffer_size=training_configs['buffer_size'], batching_func=training_configs['batching_key'], world_size=world_size, rank=rank) valid_iterator = DataIterator( dataset=valid_bitext_dataset, batch_size=training_configs['valid_batch_size'], use_bucket=True, buffer_size=100000, numbering=True, world_size=world_size, rank=rank) bleu_scorer = SacreBLEUScorer( reference_path=data_configs["bleu_valid_reference"], num_refs=data_configs["num_refs"], lang_pair=data_configs["lang_pair"], sacrebleu_args=training_configs["bleu_valid_configs"] ['sacrebleu_args'], postprocess=training_configs["bleu_valid_configs"]['postprocess']) INFO('Done. Elapsed time {0}'.format(timer.toc())) # ================================ Begin ======================================== # # Build Model & Optimizer # We would do steps below on after another # 1. build models & criterion # 2. move models & criterion to gpu if needed # 3. load pre-trained model if needed # 4. build optimizer # 5. build learning rate scheduler if needed # 6. load checkpoints if needed # 0. Initial lrate = optimizer_configs['learning_rate'] model_collections = Collections() checkpoint_saver = Saver( save_prefix="{0}.ckpt".format( os.path.join(flags.saveto, flags.model_name)), num_max_keeping=training_configs['num_kept_checkpoints']) best_model_prefix = os.path.join( flags.saveto, flags.model_name + Constants.MY_BEST_MODEL_SUFFIX) best_k_saver = BestKSaver( save_prefix="{0}.best_k_ckpt".format( os.path.join(flags.saveto, flags.model_name)), num_max_keeping=training_configs['num_kept_best_k_checkpoints']) # 1. Build Model & Criterion INFO('Building model...') timer.tic() nmt_model = build_model(n_src_vocab=vocab_src.max_n_words, n_tgt_vocab=vocab_tgt.max_n_words, padding_idx=vocab_src.pad, vocab_src=vocab_src, **model_configs) INFO(nmt_model) # build teacher model teacher_model, teacher_model_path = get_teacher_model( training_configs, model_configs, vocab_src, vocab_tgt, flags) # build critic critic = CombinationCriterion(model_configs['loss_configs'], padding_idx=vocab_tgt.pad, teacher=teacher_model) # INFO(critic) critic.INFO() # 2. Move to GPU if Constants.USE_GPU: nmt_model = nmt_model.cuda() critic = critic.cuda() # 3. Load pretrained model if needed load_pretrained_model(nmt_model, flags.pretrain_path, exclude_prefix=None, device=Constants.CURRENT_DEVICE) INFO('Done. Elapsed time {0}'.format(timer.toc())) # 4. Build optimizer INFO('Building Optimizer...') if not flags.multi_gpu: optim = Optimizer(name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'], update_cycle=training_configs['update_cycle']) else: optim = dist.DistributedOptimizer( name=optimizer_configs['optimizer'], model=nmt_model, lr=lrate, grad_clip=optimizer_configs['grad_clip'], optim_args=optimizer_configs['optimizer_params'], device_id=local_rank) # 5. Build scheduler for optimizer if needed scheduler = build_scheduler( schedule_method=optimizer_configs['schedule_method'], optimizer=optim, scheduler_configs=optimizer_configs['scheduler_configs']) # 6. build moving average ma = build_ma(training_configs, nmt_model.named_parameters()) INFO('Done. Elapsed time {0}'.format(timer.toc())) # Reload from latest checkpoint if flags.reload: checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma, device=Constants.CURRENT_DEVICE) # broadcast parameters and optimizer states if world_size > 1: INFO("Broadcasting model parameters...") dist.broadcast_parameters(params=nmt_model.state_dict()) INFO("Broadcasting optimizer states...") dist.broadcast_optimizer_state(optimizer=optim.optim) INFO('Done.') # ================================================================================== # # Prepare training eidx = model_collections.get_collection("eidx", [0])[-1] uidx = model_collections.get_collection("uidx", [1])[-1] bad_count = model_collections.get_collection("bad_count", [0])[-1] oom_count = model_collections.get_collection("oom_count", [0])[-1] is_early_stop = model_collections.get_collection("is_early_stop", [ False, ])[-1] teacher_patience = model_collections.get_collection( "teacher_patience", [training_configs['teacher_patience']])[-1] train_loss_meter = AverageMeter() train_loss_dict_meter = AverageMeterDict(critic.get_critic_name()) sent_per_sec_meter = TimeMeter() tok_per_sec_meter = TimeMeter() update_cycle = training_configs['update_cycle'] grad_denom = 0 train_loss = 0.0 cum_n_words = 0 train_loss_dict = dict() valid_loss = best_valid_loss = float('inf') if rank == 0: summary_writer = SummaryWriter(log_dir=flags.log_path) else: summary_writer = None sent_per_sec_meter.start() tok_per_sec_meter.start() INFO('Begin training...') while True: if summary_writer is not None: summary_writer.add_scalar("Epoch", (eidx + 1), uidx) # Build iterator and progress bar training_iter = training_iterator.build_generator() if rank == 0: training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format( eidx, uidx), total=len(training_iterator), unit="sents") else: training_progress_bar = None for batch in training_iter: seqs_x, seqs_y = batch batch_size = len(seqs_x) cum_n_words += sum(len(s) for s in seqs_y) try: # Prepare data x, y = prepare_data(seqs_x, seqs_y, cuda=Constants.USE_GPU) loss, loss_dict = compute_forward( model=nmt_model, critic=critic, seqs_x=x, seqs_y=y, eval=False, normalization=1.0, norm_by_words=training_configs["norm_by_words"]) update_cycle -= 1 grad_denom += batch_size train_loss += loss train_loss_dict = add_dict_value(train_loss_dict, loss_dict) except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') oom_count += 1 else: raise e # When update_cycle becomes 0, it means end of one batch. Several things will be done: # - update parameters # - reset update_cycle and grad_denom, update uidx # - learning rate scheduling # - update moving average if update_cycle == 0: # 0. reduce variables if world_size > 1: grad_denom = dist.all_reduce_py(grad_denom) train_loss = dist.all_reduce_py(train_loss) train_loss_dict = dist.all_reduce_py(train_loss_dict) cum_n_words = dist.all_reduce_py(cum_n_words) # 1. update parameters optim.step(denom=grad_denom) optim.zero_grad() if training_progress_bar is not None: training_progress_bar.update(grad_denom) training_progress_bar.set_description( ' - (Epc {}, Upd {}) '.format(eidx, uidx)) postfix_str = 'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f}), '.format( train_loss, valid_loss, best_valid_loss) for critic_name, loss_value in train_loss_dict.items(): postfix_str += (critic_name + ': {:.2f}, ').format(loss_value) training_progress_bar.set_postfix_str(postfix_str) # 2. learning rate scheduling if scheduler is not None and optimizer_configs[ "schedule_method"] != "loss": scheduler.step(global_step=uidx) # 3. update moving average if ma is not None and eidx >= training_configs[ 'moving_average_start_epoch']: ma.step() # 4. update meters train_loss_meter.update(train_loss, grad_denom) train_loss_dict_meter.update(train_loss_dict, grad_denom) sent_per_sec_meter.update(grad_denom) tok_per_sec_meter.update(cum_n_words) # 5. reset accumulated variables, update uidx update_cycle = training_configs['update_cycle'] grad_denom = 0 uidx += 1 cum_n_words = 0.0 train_loss = 0.0 train_loss_dict = dict() else: continue # ================================================================================== # # Display some information if should_trigger_by_steps( uidx, eidx, every_n_step=training_configs['disp_freq']): lrate = list(optim.get_lrate())[0] if summary_writer is not None: summary_writer.add_scalar( "Speed(sents/sec)", scalar_value=sent_per_sec_meter.ave, global_step=uidx) summary_writer.add_scalar( "Speed(words/sec)", scalar_value=tok_per_sec_meter.ave, global_step=uidx) summary_writer.add_scalar( "train_loss", scalar_value=train_loss_meter.ave, global_step=uidx) # add loss for every critic if flags.display_loss_detail: combination_loss = train_loss_dict_meter.value for key, value in combination_loss.items(): summary_writer.add_scalar(key, scalar_value=value, global_step=uidx) summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx) summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx) # Reset Meters sent_per_sec_meter.reset() tok_per_sec_meter.reset() train_loss_meter.reset() train_loss_dict_meter.reset() # ================================================================================== # # Loss Validation & Learning rate annealing if should_trigger_by_steps( global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'], debug=flags.debug): with cache_parameters(nmt_model): valid_loss, valid_loss_dict = loss_evaluation( model=nmt_model, critic=critic, valid_iterator=valid_iterator, rank=rank, world_size=world_size) if scheduler is not None and optimizer_configs[ "schedule_method"] == "loss": scheduler.step(metric=valid_loss) model_collections.add_to_collection("history_losses", valid_loss) min_history_loss = np.array( model_collections.get_collection("history_losses")).min() best_valid_loss = min_history_loss if summary_writer is not None: summary_writer.add_scalar("loss", valid_loss, global_step=uidx) summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx) # ================================================================================== # # BLEU Validation & Early Stop if should_trigger_by_steps( global_step=uidx, n_epoch=eidx, every_n_step=training_configs['bleu_valid_freq'], min_step=training_configs['bleu_valid_warmup'], debug=flags.debug): with cache_parameters(nmt_model): valid_bleu = bleu_evaluation( uidx=uidx, valid_iterator=valid_iterator, batch_size=training_configs["bleu_valid_batch_size"], model=nmt_model, bleu_scorer=bleu_scorer, vocab_src=vocab_src, vocab_tgt=vocab_tgt, valid_dir=flags.valid_path, max_steps=training_configs["bleu_valid_configs"] ["max_steps"], beam_size=training_configs["bleu_valid_configs"] ["beam_size"], alpha=training_configs["bleu_valid_configs"]["alpha"], world_size=world_size, rank=rank, ) model_collections.add_to_collection(key="history_bleus", value=valid_bleu) best_valid_bleu = float( np.array(model_collections.get_collection( "history_bleus")).max()) if summary_writer is not None: summary_writer.add_scalar("bleu", valid_bleu, uidx) summary_writer.add_scalar("best_bleu", best_valid_bleu, uidx) # If model get new best valid bleu score if valid_bleu >= best_valid_bleu: bad_count = 0 if is_early_stop is False: if rank == 0: # 1. save the best model torch.save(nmt_model.state_dict(), best_model_prefix + ".final") else: bad_count += 1 # At least one epoch should be traversed if bad_count >= training_configs[ 'early_stop_patience'] and eidx > 0: is_early_stop = True WARN("Early Stop!") exit(0) if rank == 0: best_k_saver.save(global_step=uidx, metric=valid_bleu, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) # ODC if training_configs['use_odc'] is True: if valid_bleu >= best_valid_bleu: pass # choose method to generate teachers from checkpoints # - best # - ave_k_best # - ma if training_configs['teacher_choice'] == 'ma': teacher_params = ma.export_ma_params() elif training_configs['teacher_choice'] == 'best': teacher_params = nmt_model.state_dict() elif "ave_best" in training_configs['teacher_choice']: if best_k_saver.num_saved >= ave_best_k: teacher_params = average_checkpoints( best_k_saver.get_all_ckpt_path() [-ave_best_k:]) else: teacher_params = nmt_model.state_dict() else: raise ValueError( "can not support teacher choice %s" % training_configs['teacher_choice']) torch.save(teacher_params, teacher_model_path) del teacher_params teacher_patience = 0 critic.set_use_KD(False) else: teacher_patience += 1 if teacher_patience >= training_configs[ 'teacher_refresh_warmup']: teacher_params = torch.load( teacher_model_path, map_location=Constants.CURRENT_DEVICE) teacher_model.load_state_dict(teacher_params, strict=False) del teacher_params critic.reset_teacher(teacher_model) critic.set_use_KD(True) if summary_writer is not None: summary_writer.add_scalar("bad_count", bad_count, uidx) info_str = "{0} Loss: {1:.2f} BLEU: {2:.2f} lrate: {3:6f} patience: {4} ".format( uidx, valid_loss, valid_bleu, lrate, bad_count) for key, value in valid_loss_dict.items(): info_str += (key + ': {0:.2f} '.format(value)) INFO(info_str) # ================================================================================== # # Saving checkpoints if should_trigger_by_steps( uidx, eidx, every_n_step=training_configs['save_freq'], debug=flags.debug): model_collections.add_to_collection("uidx", uidx) model_collections.add_to_collection("eidx", eidx) model_collections.add_to_collection("bad_count", bad_count) model_collections.add_to_collection("teacher_patience", teacher_patience) if not is_early_stop: if rank == 0: checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler, collections=model_collections, ma=ma) if training_progress_bar is not None: training_progress_bar.close() eidx += 1 if eidx > training_configs["max_epochs"]: break