def initialize_distributed_model(self): if self.local_rank != -1: if not self.fp16: self.model = DDP(self.model) else: flat_dist_call([param.data for param in self.model.parameters()], torch.distributed.broadcast, (0,)) elif self.n_gpu > 1: self.model = nn.DataParallel(self.model)
def comm_model(self): for i in range(2): if self.rank == self.team_ranks[i][0]: flat_dist_call([param.data for param in self.model.parameters()], torch.distributed.broadcast, (i * self.team_size, self.model_comm_groups[i])) elif self.rank in self.team_ranks[i]: flat_dist_call([param.data for param in self.another_model.parameters()], torch.distributed.broadcast, (i * self.team_size, self.model_comm_groups[i])) self.another_model_updated = True
def allreduce(self): def dummy_all_reduce(*args, **kwargs): return dist.all_reduce(*args, **kwargs) params = [param.grad.data for param in self.model.parameters()] flat_dist_call(params, dummy_all_reduce, (torch.distributed.ReduceOp.SUM, self.team_group)) for param in params: param /= self.team_size
def allreduce_persistent(self): def dummy_all_reduce(*args, **kwargs): return dist.all_reduce(*args, **kwargs) all_params = list(map(lambda x: x[1], sorted(self.model.state_dict().items()))) flat_dist_call(all_params, dummy_all_reduce, (torch.distributed.ReduceOp.SUM, self.team_group)) for param in all_params: param /= self.team_size
def comm_model(self): for i in range(2): root = self.comm_model_group_ranks[i][0] teams = set(range(root, root + self.team_size)) if self.rank in teams: flat_dist_call( [param.data for param in self.model.parameters()], torch.distributed.broadcast, (i * self.team_size, )) else: flat_dist_call( [param.data for param in self.another_model.parameters()], torch.distributed.broadcast, (i * self.team_size, ))
def train(self): model = self.model self.time_start = time.time() self.criterion = torch.nn.CrossEntropyLoss().cuda() self.loss_stats = util.Stats() self.acc_stats = util.Stats() self.dloss_stats = util.Stats() self.it = 0 flat_dist_call([param.data for param in model.parameters()], dist.broadcast, (self.team_leader, self.team_group)) self.optimizer.zero_grad() if self.options.distillation_overlap: assert self.options.distillation self.train_loop_distillation_overlap() else: for epoch in range(self.options.epoch): self.epoch = epoch if not self.options.distillation: self.train_loop_plain() else: if self.options.equalize_data: self.train_loop_distillation_equalized() else: self.train_loop_distillation_original() self.allreduce_persistent() val_acc = self.evaluate() if self.team_rank == 0: print('accuracy epoch #{}: {}'.format(epoch, val_acc)) if self.team_rank == 0: elapsed = time.time() - self.time_start print('cost: {:.3f}'.format(elapsed)) if self.team_rank == 0 and not self.options.no_output_model: if self.options.distillation: torch.save( model.state_dict(), os.path.join(self.options.out, 'weight.{}.pth'.format(self.team))) else: torch.save(model.state_dict(), os.path.join(self.options.out, 'weight.pth'))
def prepare_model_and_optimizer(args, device): # Prepare model config = modeling.BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training model = modeling.BertForPreTraining(config) if args.disable_weight_tying: import torch.nn as nn print ("WARNING!!!!!!! Disabling weight tying for this run") print ("BEFORE ", model.cls.predictions.decoder.weight is model.bert.embeddings.word_embeddings.weight) model.cls.predictions.decoder.weight = torch.nn.Parameter(model.cls.predictions.decoder.weight.clone().detach()) print ("AFTER ", model.cls.predictions.decoder.weight is model.bert.embeddings.word_embeddings.weight) assert (model.cls.predictions.decoder.weight is model.bert.embeddings.word_embeddings.weight) == False checkpoint = None if not args.resume_from_checkpoint: global_step = 0 else: if args.resume_step == -1 and not args.init_checkpoint: model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")] args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names]) global_step = args.resume_step if not args.init_checkpoint else 0 if not args.init_checkpoint: checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") else: checkpoint = torch.load(args.init_checkpoint, map_location="cpu") model.load_state_dict(checkpoint['model'], strict=False) if args.phase2 and not args.init_checkpoint: global_step -= args.phase1_end_step if is_main_process(): print("resume step from ", args.resume_step) model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate) lr_scheduler = PolyWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps, degree=1) if args.fp16: if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic", cast_model_outputs=torch.float16) else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale, cast_model_outputs=torch.float16) amp._amp_state.loss_scalers[0]._loss_scale = args.init_loss_scale model.checkpoint_activations(args.checkpoint_activations) if args.resume_from_checkpoint: if args.phase2 or args.init_checkpoint: keys = list(checkpoint['optimizer']['state'].keys()) #Override hyperparameters from previous checkpoint for key in keys: checkpoint['optimizer']['state'][key]['step'] = global_step for iter, item in enumerate(checkpoint['optimizer']['param_groups']): checkpoint['optimizer']['param_groups'][iter]['step'] = global_step checkpoint['optimizer']['param_groups'][iter]['t_total'] = args.max_steps checkpoint['optimizer']['param_groups'][iter]['warmup'] = args.warmup_proportion checkpoint['optimizer']['param_groups'][iter]['lr'] = args.learning_rate optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint['optimizer']) for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) ) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) criterion = BertPretrainingCriterion(config.vocab_size) if args.disable_weight_tying: # Sanity Check that new param is in optimizer print ("SANITY CHECK OPTIMIZER: ", id(model.module.cls.predictions.decoder.weight) in [id(g) for g in optimizer.param_groups[0]['params']]) assert id(model.module.cls.predictions.decoder.weight) in [id(g) for g in optimizer.param_groups[0]['params']] return model, optimizer, lr_scheduler, checkpoint, global_step, criterion
def sync_params(self): assert self.distribured_enabled core = self._get_core() flat_dist_call([param.data for param in core.parameters()], dist.all_reduce) self.core.needs_refresh = True
def prepare_model_and_optimizer(args, device): # Prepare model config = BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) model = BertForPreTraining(config) checkpoint = None if not args.resume_from_checkpoint: global_step = 0 else: if args.resume_step == -1 and not args.init_checkpoint: model_names = [ f for f in os.listdir(args.output_dir) if f.endswith(".pt") ] args.resume_step = max([ int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names ]) global_step = args.resume_step if not args.init_checkpoint else 0 if not args.init_checkpoint: checkpoint = torch.load(os.path.join( args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") else: checkpoint = torch.load(args.init_checkpoint, map_location="cpu") model.load_state_dict(checkpoint['model'], strict=False) if args.phase2: global_step -= args.phase1_end_step if is_main_process(): print("resume step from ", args.resume_step) model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = FusedLAMB(optimizer_grouped_parameters, lr=args.learning_rate) lr_scheduler = PolyWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps) if args.fp16: if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic") else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale) amp._amp_state.loss_scalers[0]._loss_scale = 2**20 if args.resume_from_checkpoint: if args.phase2 or args.init_checkpoint: keys = list(checkpoint['optimizer']['state'].keys()) #Override hyperparameters from previous checkpoint for key in keys: checkpoint['optimizer']['state'][key]['step'] = global_step for iter, item in enumerate( checkpoint['optimizer']['param_groups']): checkpoint['optimizer']['param_groups'][iter][ 'step'] = global_step checkpoint['optimizer']['param_groups'][iter][ 't_total'] = args.max_steps checkpoint['optimizer']['param_groups'][iter][ 'warmup'] = args.warmup_proportion checkpoint['optimizer']['param_groups'][iter][ 'lr'] = args.learning_rate optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint['optimizer']) for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: model = DDP( model, message_size=250000000, gradient_predivide_factor=torch.distributed.get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0, )) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) return model, optimizer, lr_scheduler, checkpoint, global_step
def prepare_model_and_optimizer(args, device): # Prepare model config = modeling.BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training model = modeling.BertForPreTraining(config) checkpoint = None if not args.resume_from_checkpoint: global_step = 0 else: if args.resume_step == -1 and not args.init_checkpoint: model_names = [ f for f in os.listdir(args.output_dir) if f.endswith(".pt") ] args.resume_step = max([ int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names ]) global_step = args.resume_step if not args.init_checkpoint else 0 if not args.init_checkpoint: checkpoint = torch.load(os.path.join( args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") else: checkpoint = torch.load(args.init_checkpoint, map_location="cpu") model.load_state_dict(checkpoint['model'], strict=False) if args.phase2 and not args.init_checkpoint: global_step -= args.phase1_end_step if is_main_process(): print("resume step from ", args.resume_step) model.to(device) # BERT modeling uses weight sharing between word embedding and prediction decoder. # So make sure the storage is pointing properly even after model is moved to device. if args.use_habana: model.cls.predictions.decoder.weight = model.bert.embeddings.word_embeddings.weight param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] if args.use_habana: if args.use_fused_lamb: try: from hb_custom import FusedLamb except ImportError: raise ImportError("Please install hbopt.") optimizer = FusedLamb(optimizer_grouped_parameters, lr=args.learning_rate) else: optimizer = NVLAMB(optimizer_grouped_parameters, lr=args.learning_rate) else: if torch.cuda.is_available(): optimizer = FusedLAMB(optimizer_grouped_parameters, lr=args.learning_rate) else: optimizer = NVLAMB(optimizer_grouped_parameters, lr=args.learning_rate) lr_scheduler = PolyWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps) if args.fp16: if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic", cast_model_outputs=torch.float16) else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale, cast_model_outputs=torch.float16) amp._amp_state.loss_scalers[0]._loss_scale = args.init_loss_scale model.checkpoint_activations(args.checkpoint_activations) if args.resume_from_checkpoint: if args.phase2 or args.init_checkpoint: keys = list(checkpoint['optimizer']['state'].keys()) #Override hyperparameters from previous checkpoint for key in keys: checkpoint['optimizer']['state'][key]['step'] = global_step for iter, item in enumerate( checkpoint['optimizer']['param_groups']): checkpoint['optimizer']['param_groups'][iter][ 'step'] = global_step checkpoint['optimizer']['param_groups'][iter][ 't_total'] = args.max_steps checkpoint['optimizer']['param_groups'][iter][ 'warmup'] = args.warmup_proportion checkpoint['optimizer']['param_groups'][iter][ 'lr'] = args.learning_rate optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint['optimizer']) for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: if not args.use_jit_trace: if args.use_habana: model = DDP(model) else: model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0, )) elif args.n_pu > 1: model = torch.nn.DataParallel(model) criterion = BertPretrainingCriterion(config.vocab_size) return model, optimizer, lr_scheduler, checkpoint, global_step, criterion
def prepare_model_and_optimizer(args, device): # Prepare model config = BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) model = BertForPreTraining(config) checkpoint = None if not args.resume_from_checkpoint: global_step = 0 else: if args.resume_step == -1: model_names = [ f for f in os.listdir(args.output_dir) if f.endswith(".pt") ] args.resume_step = max([ int(x.split(".pt")[0].split("_")[1].strip()) for x in model_names ]) global_step = args.resume_step checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") model.load_state_dict(checkpoint["model"], strict=False) if args.phase2: global_step -= args.phase1_end_step if is_main_process(): print("resume step from ", args.resume_step) model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ["bias", "gamma", "beta", "LayerNorm"] optimizer_grouped_parameters = [] names = [] count = 1 for n, p in param_optimizer: count += 1 if not any(nd in n for nd in no_decay): optimizer_grouped_parameters.append({ "params": [p], "weight_decay": 0.01, "name": n }) names.append({"params": [n], "weight_decay": 0.01}) if any(nd in n for nd in no_decay): optimizer_grouped_parameters.append({ "params": [p], "weight_decay": 0.00, "name": n }) names.append({"params": [n], "weight_decay": 0.00}) optimizer = BertLAMB(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=args.max_steps) if args.fp16: if args.loss_scale == 0: # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) model, optimizer = amp.initialize( model, optimizer, opt_level="O2", loss_scale="dynamic", master_weights=False if args.accumulate_into_fp16 else True, ) else: # optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) model, optimizer = amp.initialize( model, optimizer, opt_level="O2", loss_scale=args.loss_scale, master_weights=False if args.accumulate_into_fp16 else True, ) amp._amp_state.loss_scalers[0]._loss_scale = 2**20 if args.resume_from_checkpoint: if args.phase2: keys = list(checkpoint["optimizer"]["state"].keys()) # Override hyperparameters from Phase 1 for key in keys: checkpoint["optimizer"]["state"][key]["step"] = global_step for iter, item in enumerate( checkpoint["optimizer"]["param_groups"]): checkpoint["optimizer"]["param_groups"][iter][ "t_total"] = args.max_steps checkpoint["optimizer"]["param_groups"][iter][ "warmup"] = args.warmup_proportion checkpoint["optimizer"]["param_groups"][iter][ "lr"] = args.learning_rate optimizer.load_state_dict(checkpoint["optimizer"]) # , strict=False) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint["optimizer"]) for param, saved_param in zip(amp.master_params(optimizer), checkpoint["master params"]): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: model = DDP( model, message_size=250000000, gradient_predivide_factor=torch.distributed.get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0, )) elif args.n_gpu > 1: model = torch.nn.DataParallel(model) return model, optimizer, checkpoint, global_step
def prepare_model_and_optimizer(args, device): global_step = 0 args.resume_step = 0 checkpoint = None config = BertConfig.from_json_file(args.bert_config_path) config.fused_mha = args.fused_mha config.fused_gelu_bias = args.fused_gelu_bias config.dense_seq_output = args.dense_seq_output config.unpad = args.unpad config.pad = args.pad config.fuse_qkv = not args.disable_fuse_qkv config.fuse_scale = not args.disable_fuse_scale config.fuse_mask = not args.disable_fuse_mask config.fuse_dropout = args.enable_fuse_dropout config.apex_softmax = not args.disable_apex_softmax config.enable_stream = args.enable_stream if config.fuse_mask == True: config.apex_softmax = True if config.pad == False: config.enable_stream = True if config.unpad == True: config.fused_mha = False # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) # Load from Pyt checkpoint - either given as init_checkpoint, or picked up from output_dir if found if args.init_checkpoint is not None or found_resume_checkpoint(args): # Prepare model model = BertForPreTraining(config) if args.init_checkpoint is None: # finding checkpoint in output_dir checkpoint_str = "phase2_ckpt_*.pt" if args.phase2 else "phase1_ckpt_*.pt" model_names = [f for f in glob.glob(os.path.join(args.output_dir, checkpoint_str))] global_step = max([int(x.split('.pt')[0].split('_')[-1].strip()) for x in model_names]) args.resume_step = global_step #used for throughput computation resume_init_checkpoint = os.path.join(args.output_dir, checkpoint_str.replace("*", str(global_step))) print("Setting init checkpoint to %s - which is the latest in %s" %(resume_init_checkpoint, args.output_dir)) checkpoint=torch.load(resume_init_checkpoint, map_location="cpu") else: checkpoint=torch.load(args.init_checkpoint, map_location="cpu")["model"] # Fused MHA requires a remapping of checkpoint parameters if config.fused_mha: checkpoint_remapped = remap_attn_parameters(checkpoint) model.load_state_dict(checkpoint_remapped, strict=False) else: model.load_state_dict(checkpoint, strict=True) else: #Load from TF Checkpoint model = BertForPreTraining.from_pretrained(args.init_tf_checkpoint, from_tf=True, config=config) model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay_rate}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] mlperf_logger.log_event(key=mlperf_logger.constants.OPT_BASE_LR, value=args.learning_rate, sync=False) optimizer = FusedLAMB(optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.opt_lamb_beta_1, args.opt_lamb_beta_2)) mlperf_logger.log_event(key='opt_epsilon', value=optimizer.defaults['eps'], sync=False) b1, b2 = optimizer.defaults['betas'] mlperf_logger.log_event(key='opt_lamb_beta_1', value=b1, sync=False) mlperf_logger.log_event(key='opt_lamb_beta_2', value=b2, sync=False) mlperf_logger.log_event(key='opt_lamb_weight_decay_rate', value=optimizer.defaults['weight_decay'], sync=False) if args.warmup_steps == 0: warmup_steps = int(args.max_steps * args.warmup_proportion) warmup_start = 0 else: warmup_steps = args.warmup_steps warmup_start = args.start_warmup_step lr_scheduler = LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=warmup_start, warmup_steps=warmup_steps, total_steps=args.max_steps, end_learning_rate=0.0, degree=1.0) if args.fp16: if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic") else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale) amp._amp_state.loss_scalers[0]._loss_scale = float(os.getenv("INIT_LOSS_SCALE", 2**20)) if found_resume_checkpoint(args): optimizer.load_state_dict(checkpoint['optimizer']) #restores m,v states (only if resuming checkpoint, not for init_checkpoint and init_tf_checkpoint for now) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint['optimizer']) for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: model = DDP(model, message_size=250000000, gradient_predivide_factor=torch.distributed.get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) ) return model, optimizer, lr_scheduler, checkpoint, global_step
def train_online_distillation_logit(self): global_step = self.snapshot.global_step or 0 if self.is_main_process(): print("SEED {}".format(self.args.seed)) logger.info("***** Running training *****") # logger.info(" Num examples = %d", len(train_data)) logger.info(" Batch size = %d", self.args.train_batch_size) print(" LR = ", self.args.learning_rate) print(" Online Distillation") print("Training. . .") self.model.train() average_loss = 0.0 # averaged loss every self.args.log_freq steps average_dloss_0 = 0.0 average_dloss_1 = 0.0 epoch = 0 begin = None # Note: We loop infinitely over epochs, termination is handled via # iteration count rng = random.Random(self.args.data_seed) cnt = 0 with ThreadPoolExecutor(1) as pool: while True: cnt += 1 step = global_step if self.args.phase2: step += self.args.phase1_end_step if step < self.args.burnin_steps: dataset_future, f_start_id, files, data_file = \ self.init_dataloader(epoch, pool) use_same_data = False else: torch.manual_seed(self.args.data_seed + cnt) dataset_future, f_start_id, files, data_file = \ self.init_dataloader(epoch, pool, rng) use_same_data = True previous_file = data_file train_dataloader, _ = dataset_future.result(timeout=None) overflow_buf = torch.cuda.IntTensor([0]) for f_id in range(f_start_id + 1, len(files)): logger.info("file no %s file %s" % (f_id, previous_file)) dataset_future, data_file = \ self.update_dataloader(pool, f_id, files) previous_file = data_file for batch in train_dataloader: if begin is None: begin = time.time() step = global_step if self.args.phase2: step += self.args.phase1_end_step if step == self.args.burnin_steps and \ not use_same_data: break batch = [t.to(self.device) for t in batch] _, _, _, masked_lm_labels, _ = batch aout0 = None aout1 = None if step < self.args.burnin_steps: loss = self.forward(self.model, batch) dloss0 = torch.zeros(()) dloss1 = torch.zeros(()) else: out0, out1 = self.forward(self.model, batch, calc_loss=False) mask = masked_lm_labels.view(-1) c = out0.shape[-1] # Send logit that are not maksed dout0 = out0.view(-1, c) dout0 = dout0[mask != -1] with torch.no_grad(): aout0 = dout0.detach().clone() aout1 = out1.detach().clone() flat_dist_call([aout0, aout1], torch.distributed.all_reduce, (torch.distributed.ReduceOp.SUM, self.equalize_data_group)) aout0 = aout0 * self.size - dout0 aout1 = aout1 * self.size - out1 loss = self.loss(out0, out1, batch) dloss0 = \ self.compute_distillation_loss(dout0, aout0) dloss1 = \ self.compute_distillation_loss(out1, aout1) dloss = dloss0 + dloss1 loss = loss + \ self.args.distillation_weight * dloss self.backward(loss) self.all_reduce(overflow_buf) global_step = self.take_optimizer_step(global_step) average_loss += loss.item() average_dloss_0 += dloss0.item() average_dloss_1 += dloss1.item() if global_step % self.args.log_freq == 0: divisor = self.args.log_freq if self.is_main_process(): print( "Team: {} Step:{} Average Loss = {} Average dLoss = {} {}" .format(self.team, global_step, average_loss / divisor, average_dloss_0 / divisor, average_dloss_1 / divisor)) average_loss = 0 average_dloss_0 = 0 average_dloss_1 = 0 if global_step >= self.args.max_steps or \ (global_step % self.args.num_steps_per_checkpoint) == 0: if self.team_rank == 0: # Save a trained model logger.info("** ** Saving model ** **") self.snapshot.save(global_step, f_id, files) if global_step >= self.args.max_steps: del train_dataloader torch.distributed.barrier() if torch.distributed.get_rank() == 0: print( "Total time taken {}".format(time.time() - begin)) return self.args del train_dataloader # Make sure pool has finished and switch train_dataloader # NOTE: Will block until complete train_dataloader, data_file = dataset_future.result( timeout=None) if step == self.args.burnin_steps and not use_same_data: break epoch += 1
def prepare_snapshot(self): self.snapshot = Snapshot(self.args, self.model, self.another_model, self.optimizer, self.team) flat_dist_call([param.data for param in self.model.parameters()], torch.distributed.broadcast, (self.team_master, self.local_group))