def prepare_model(args, device): # Prepare model config = BertConfig.from_json_file(args.bert_config_path) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) print('padded vocab size to: {}'.format(config.vocab_size)) # Set some options that the config file is expected to have (but don't need to be set properly # at this point) config.pad = False config.unpad = False config.dense_seq_output = False config.fused_mha = False config.fused_gelu_bias = False config.fuse_qkv = False config.fuse_scale = False config.fuse_mask = False config.fuse_dropout = False config.apex_softmax = False config.enable_stream = False if config.fuse_mask == True: config.apex_softmax = True if config.pad == False: config.enable_stream = True if config.unpad == True: config.fused_mha = False #Load from TF checkpoint model = BertForPreTraining.from_pretrained(args.tf_checkpoint, from_tf=True, config=config) return model
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_file", default=None, type=str, required=True, help="The input train corpus.") parser.add_argument( "--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written." ) ## Other parameters parser.add_argument( "--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument( "--on_memory", action='store_true', help="Whether to load train samples into memory or use disk") parser.add_argument( "--do_lower_case", action='store_true', help= "Whether to lower case the input text. True for uncased models, False for cased models." ) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumualte before performing a backward/update pass." ) parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) #train_examples = None num_train_optimization_steps = None if args.do_train: print("Loading Train Dataset", args.train_file) train_dataset = BERTDataset(args.train_file, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory) num_train_optimization_steps = int( len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) # Prepare model model = BertForPreTraining.from_pretrained(args.bert_model) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: #TODO: check if this works with current data generator from disk that relies on next(file) # (it doesn't return item back by index) train_sampler = DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_optimization_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model logger.info("** ** * Saving fine - tuned model ** ** * ") model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") if args.do_train: torch.save(model_to_save.state_dict(), output_model_file)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written." ) ## Other parameters parser.add_argument( "--bert_model", default='bert-base-multilingual-cased', type=str, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument( "--max_seq_length", default=384, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") # parser.add_argument("--do_eval", # action='store_true', # help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=2, type=int, help="Total batch size for training.") # parser.add_argument("--eval_batch_size", # default=2, # type=int, # help="Total batch size for eval.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on GPUs") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumualte before performing a backward/update pass." ) parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--visdom', action='store_true', help='Use visdom for loss visualization') parser.add_argument('--check_saved_model', action='store_true', help='Use visdom for loss visualization') parser.add_argument('--last_final_epoch', type=int, default=-1, help="저번에 이미 최종 학습을 했고, 이에 이어서 트레이닝을 원할때 사용,\n" "기존에 train_epoch를 3으로 세팅했다면, 2가 아닌 3을 입력하세요.") args = parser.parse_args() print(args) if args.visdom: import visdom viz = visdom.Visdom() # visdom을 통해서 loss를 시각화 os.makedirs(args.output_dir, exist_ok=True) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train: raise ValueError( "Training is currently the only implemented execution option. Please set `do_train`." ) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=False) processor = DataProcessor() label_list = processor.get_labels() num_train_optimization_steps = None if args.do_train: print("Loading Train Dataset", args.data_dir) train_examples = processor.get_train_examples(args.data_dir) train_dataset = LazyDataset(train_examples, args.max_seq_length, tokenizer) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: train_sampler = DistributedSampler(train_dataset) num_train_optimization_steps = int( len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) # Prepare model loaded_epoch = -1 saved_model_path = -1 if args.last_final_epoch != -1: last_model = os.path.join(args.output_dir, WEIGHTS_NAME) if os.path.exists(last_model): saved_model_path = last_model loaded_epoch = args.last_final_epoch - 1 elif args.check_saved_model: for epoch in range(int(args.num_train_epochs)): tmp = os.path.join(args.output_dir, (f"weight_on_ep{epoch}_" + WEIGHTS_NAME)) if os.path.exists(tmp): saved_model_path = tmp loaded_epoch = epoch if saved_model_path != -1: logger.info(f"Loading on saved model {saved_model_path}") config_file = os.path.join(args.output_dir, CONFIG_NAME) config = BertConfig(config_file) logger.info("Model config {}".format(config)) model = BertForPreTraining(config) model.load_state_dict(torch.load(saved_model_path)) else: loaded_epoch = -1 model = BertForPreTraining.from_pretrained(args.bert_model) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) if args.visdom: # 일단 visdom 기본 figure를 정의 vis_title = f'Baseline on {len(train_dataset)} dataset' vis_legend = ['LM Loss', 'Click Loss', 'Total Loss'] iter_plot = create_vis_plot(viz, 'Iteration', 'Loss', vis_title, vis_legend) epoch_plot = create_vis_plot(viz, 'Epoch', 'Loss', vis_title, vis_legend) # if args.do_eval: # eval_examples = processor.get_dev_examples(args.data_dir) # # logger.info("***** Running evaluation *****") # logger.info(" Num examples = %d", len(eval_examples)) # logger.info(" Batch size = %d", args.eval_batch_size) # # eval_data = LazyDatasetClassifier(eval_examples, label_list, args.max_seq_length, tokenizer) # # Run prediction for full data # """ # cur_tensors = (torch.tensor(f.input_ids), # torch.tensor(f.input_mask), # torch.tensor(f.segment_ids), # torch.tensor(f.lm_label_ids), # torch.tensor(f.label)) # """ # eval_sampler = SequentialSampler(eval_data) # eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # save_eval_loss = [] global_step = 0 if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) """ cur_tensors = (torch.tensor(f.input_ids), torch.tensor(f.input_mask), torch.tensor(f.segment_ids), torch.tensor(f.lm_label_ids), torch.tensor(f.label)) """ save_loss = [] save_epoch_loss = [] save_step = int(len(train_dataloader) // 5) for epoch in trange((loaded_epoch + 1), int(args.num_train_epochs), desc="Epoch"): # if args.do_eval and loaded_epoch != -1: # model.eval() # eval_loss, eval_accuracy = 0, 0 # nb_eval_steps, nb_eval_examples = 0, 0 # # for batch in tqdm(eval_dataloader, desc="Evaluating"): # batch = tuple(t.to(device) for t in batch) # input_ids, input_mask, segment_ids, label_ids = batch # # with torch.no_grad(): # tmp_eval_loss = model(input_ids, segment_ids, input_mask, None, label_ids) # prediction_scores, logits = model(input_ids, segment_ids, input_mask) # # if n_gpu > 1: # tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu. # # logits = logits.detach().cpu().numpy() # label_ids = label_ids.to('cpu').numpy() # tmp_eval_accuracy = accuracy(logits, label_ids) # # eval_loss += tmp_eval_loss.mean().item() # eval_accuracy += tmp_eval_accuracy # # nb_eval_examples += input_ids.size(0) # nb_eval_steps += 1 # # eval_loss = eval_loss / nb_eval_steps # eval_accuracy = eval_accuracy / nb_eval_examples # result = {'eval_loss': eval_loss, # 'eval_accuracy': eval_accuracy, # 'global_step': global_step} # # save_eval_loss.append(eval_loss) # # output_eval_file = os.path.join(args.output_dir, f"Epoch_{epoch}_eval_results.txt") # with open(output_eval_file, "w") as writer: # logger.info(f"***** Eval results on Epoch {epoch} *****") # for key in sorted(result.keys()): # logger.info(" %s = %s", key, str(result[key])) # writer.write("%s = %s\n" % (key, str(result[key]))) model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 tr_loss_ml = 0 tr_loss_click = 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids, label = batch # if global_step == 0: # print(input_ids.shape, input_mask.shape, segment_ids.shape, lm_label_ids.shape, label.shape) loss, loss_ml, loss_click = model(input_ids, segment_ids, input_mask, lm_label_ids, label) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. loss_ml = loss_ml.mean() loss_click = loss_click.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss_ml = loss_ml / args.gradient_accumulation_steps loss_click = loss_click / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() tr_loss_ml += loss_ml.item() tr_loss_click += loss_click.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_optimization_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if global_step != 0 and global_step % save_step == 0: # 한 에포치당 5번 저장 logger.info(f'Saving state, iter: {global_step}') model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self model_name = f"weight_on_{global_step}_" + WEIGHTS_NAME output_model_file = os.path.join(args.output_dir, model_name) torch.save(model_to_save.state_dict(), output_model_file) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string()) print("Loss at ", global_step, loss_ml.item(), loss_click.item(), loss.item()) save_loss.append( [loss_ml.item(), loss_click.item(), loss.item()]) if args.visdom: update_vis_plot(viz, global_step, loss_ml.item(), loss_click.item(), iter_plot, epoch_plot, 'append') if epoch != (int(args.num_train_epochs) - 1): # 각 에포치가 끝날때 마다 저장 logger.info(f'Saving state, epoch: {epoch}') model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self model_name = f"weight_on_ep{epoch}_" + WEIGHTS_NAME output_model_file = os.path.join(args.output_dir, model_name) torch.save(model_to_save.state_dict(), output_model_file) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string()) print("Loss at epoch", epoch, tr_loss_ml, tr_loss_click, tr_loss) save_epoch_loss.append([tr_loss_ml, tr_loss_click, tr_loss]) if args.visdom: update_vis_plot(viz, epoch, tr_loss_ml, tr_loss_click, epoch_plot, None, 'append', len(train_dataset) // args.train_batch_size) # if args.do_eval and loaded_epoch == -1: # # model.eval() # eval_loss, eval_accuracy = 0, 0 # nb_eval_steps, nb_eval_examples = 0, 0 # # for batch in tqdm(eval_dataloader, desc="Evaluating"): # batch = tuple(t.to(device) for t in batch) # input_ids, input_mask, segment_ids, label_ids = batch # # with torch.no_grad(): # tmp_eval_loss = model(input_ids, segment_ids, input_mask, None, label_ids) # prediction_scores, logits = model(input_ids, segment_ids, input_mask) # # if n_gpu > 1: # tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu. # # logits = logits.detach().cpu().numpy() # label_ids = label_ids.to('cpu').numpy() # tmp_eval_accuracy = accuracy(logits, label_ids) # # eval_loss += tmp_eval_loss.mean().item() # eval_accuracy += tmp_eval_accuracy # # nb_eval_examples += input_ids.size(0) # nb_eval_steps += 1 # # eval_loss = eval_loss / nb_eval_steps # eval_accuracy = eval_accuracy / nb_eval_examples # result = {'eval_loss': eval_loss, # 'eval_accuracy': eval_accuracy, # 'global_step': global_step} # # save_eval_loss.append(eval_loss) # # output_eval_file = os.path.join(args.output_dir, f"Epoch_{epoch}_eval_results.txt") # with open(output_eval_file, "w") as writer: # logger.info(f"***** Eval results on Epoch {epoch} *****") # for key in sorted(result.keys()): # logger.info(" %s = %s", key, str(result[key])) # writer.write("%s = %s\n" % (key, str(result[key]))) # Save a trained model logger.info("** ** * Saving fine - tuned model ** ** * ") # model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") # if args.do_train: # torch.save(model_to_save.state_dict(), output_model_file) save_loss = np.array(save_loss) save_epoch_loss = np.array(save_epoch_loss) np.save(os.path.join(args.output_dir, "save_loss.npy"), save_loss) np.save(os.path.join(args.output_dir, "save_epoch_loss.npy"), save_epoch_loss) # if args.do_eval: # save_eval_loss = np.array(save_eval_loss) # np.save(os.path.join(args.output_dir, "save_eval_loss.npy"), save_eval_loss) model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) torch.save(model_to_save.state_dict(), output_model_file) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string())
corpus = load_lm_data(args.entity_dict, args.data, args.output_dir, args.dataset, tokenizer) ## Training Dataset train_iter = corpus.get_iterator('train', args.batch_size, args.max_seq_length, args.max_doc_length, device=device) ## total batch numbers and optim updating steps total_train_steps = int(train_iter.batch_steps * args.num_train_epochs) ######################################################################################################################## # Building the model ######################################################################################################################## model = BertForPreTraining.from_pretrained(args.bert_model, entity_num=train_iter.entity_num) args.n_all_param = sum([p.nelement() for p in model.bert.parameters()]) args.n_nonemb_param = sum( [p.nelement() for p in model.bert.encoder.parameters()]) logger.info('=' * 100) for k, v in args.__dict__.items(): logger.info(' - {} : {}'.format(k, v)) logger.info('=' * 100) logger.info('#params = {}'.format(args.n_all_param)) logger.info('#non emb params = {}'.format(args.n_nonemb_param)) if args.fp16: model = model.half()
def prepare_model_and_optimizer(args, device): global_step = 0 args.resume_step = 0 checkpoint = None config = BertConfig.from_json_file(args.bert_config_path) config.fused_mha = args.fused_mha config.fused_gelu_bias = args.fused_gelu_bias config.dense_seq_output = args.dense_seq_output config.unpad = args.unpad config.pad = args.pad config.fuse_qkv = not args.disable_fuse_qkv config.fuse_scale = not args.disable_fuse_scale config.fuse_mask = not args.disable_fuse_mask config.fuse_dropout = args.enable_fuse_dropout config.apex_softmax = not args.disable_apex_softmax config.enable_stream = args.enable_stream if config.fuse_mask == True: config.apex_softmax = True if config.pad == False: config.enable_stream = True if config.unpad == True: config.fused_mha = False # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) # Load from Pyt checkpoint - either given as init_checkpoint, or picked up from output_dir if found if args.init_checkpoint is not None or found_resume_checkpoint(args): # Prepare model model = BertForPreTraining(config) if args.init_checkpoint is None: # finding checkpoint in output_dir checkpoint_str = "phase2_ckpt_*.pt" if args.phase2 else "phase1_ckpt_*.pt" model_names = [f for f in glob.glob(os.path.join(args.output_dir, checkpoint_str))] global_step = max([int(x.split('.pt')[0].split('_')[-1].strip()) for x in model_names]) args.resume_step = global_step #used for throughput computation resume_init_checkpoint = os.path.join(args.output_dir, checkpoint_str.replace("*", str(global_step))) print("Setting init checkpoint to %s - which is the latest in %s" %(resume_init_checkpoint, args.output_dir)) checkpoint=torch.load(resume_init_checkpoint, map_location="cpu") else: checkpoint=torch.load(args.init_checkpoint, map_location="cpu")["model"] # Fused MHA requires a remapping of checkpoint parameters if config.fused_mha: checkpoint_remapped = remap_attn_parameters(checkpoint) model.load_state_dict(checkpoint_remapped, strict=False) else: model.load_state_dict(checkpoint, strict=True) else: #Load from TF Checkpoint model = BertForPreTraining.from_pretrained(args.init_tf_checkpoint, from_tf=True, config=config) model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay_rate}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] mlperf_logger.log_event(key=mlperf_logger.constants.OPT_BASE_LR, value=args.learning_rate, sync=False) optimizer = FusedLAMB(optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.opt_lamb_beta_1, args.opt_lamb_beta_2)) mlperf_logger.log_event(key='opt_epsilon', value=optimizer.defaults['eps'], sync=False) b1, b2 = optimizer.defaults['betas'] mlperf_logger.log_event(key='opt_lamb_beta_1', value=b1, sync=False) mlperf_logger.log_event(key='opt_lamb_beta_2', value=b2, sync=False) mlperf_logger.log_event(key='opt_lamb_weight_decay_rate', value=optimizer.defaults['weight_decay'], sync=False) if args.warmup_steps == 0: warmup_steps = int(args.max_steps * args.warmup_proportion) warmup_start = 0 else: warmup_steps = args.warmup_steps warmup_start = args.start_warmup_step lr_scheduler = LinearWarmupPolyDecayScheduler(optimizer, start_warmup_steps=warmup_start, warmup_steps=warmup_steps, total_steps=args.max_steps, end_learning_rate=0.0, degree=1.0) if args.fp16: if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic") else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale) amp._amp_state.loss_scalers[0]._loss_scale = float(os.getenv("INIT_LOSS_SCALE", 2**20)) if found_resume_checkpoint(args): optimizer.load_state_dict(checkpoint['optimizer']) #restores m,v states (only if resuming checkpoint, not for init_checkpoint and init_tf_checkpoint for now) # Restore AMP master parameters if args.fp16: optimizer._lazy_init_maybe_master_weights() optimizer._amp_stash.lazy_init_called = True optimizer.load_state_dict(checkpoint['optimizer']) for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): param.data.copy_(saved_param.data) if args.local_rank != -1: if not args.allreduce_post_accumulation: model = DDP(model, message_size=250000000, gradient_predivide_factor=torch.distributed.get_world_size()) else: flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) ) return model, optimizer, lr_scheduler, checkpoint, global_step
def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--input_dir", type=str, required=True) parser.add_argument("--teacher_model", default=None, type=str, required=True) parser.add_argument("--student_model", default=None, type=str, required=True) parser.add_argument("--output_dir", default=None, type=str, required=True) parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") # Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--reduce_memory", action="store_true", help="Store training data as on-disc memmaps to massively reduce memory usage") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay') parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--steps_per_epoch', type=int, default=-1, help="Number of updates steps to in one epoch.") parser.add_argument('--max_steps', type=int, default=-1, help="Number of training steps.") parser.add_argument('--amp', action='store_true', default=False, help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--continue_train', action='store_true', default=False, help='Whether to train from checkpoints') parser.add_argument('--disable_progress_bar', default=False, action='store_true', help='Disable tqdm progress bar') parser.add_argument('--max_grad_norm', type=float, default=1., help="Gradient Clipping threshold") # Additional arguments parser.add_argument('--eval_step', type=int, default=1000) # This is used for running on Huawei Cloud. parser.add_argument('--data_url', type=str, default="") #Distillation specific parser.add_argument('--value_state_loss', action='store_true', default=False) parser.add_argument('--hidden_state_loss', action='store_true', default=False) parser.add_argument('--use_last_layer', action='store_true', default=False) parser.add_argument('--use_kld', action='store_true', default=False) parser.add_argument('--use_cosine', action='store_true', default=False) parser.add_argument('--distill_config', default="distillation_config.json", type=str, help="path the distillation config") parser.add_argument('--num_workers', type=int, default=4, help='number of DataLoader worker processes per rank') args = parser.parse_args() logger.info('args:{}'.format(args)) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, stream=sys.stdout) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.amp)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) # Reference params author_gbs = 256 author_steps_per_epoch = 22872 author_epochs = 3 author_max_steps = author_steps_per_epoch * author_epochs # Compute present run params if args.max_steps == -1 or args.steps_per_epoch == -1: args.steps_per_epoch = author_steps_per_epoch * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps) args.max_steps = author_max_steps * author_gbs // (args.train_batch_size * get_world_size() * args.gradient_accumulation_steps) #Set seed set_seed(args.seed, n_gpu) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.teacher_model, do_lower_case=args.do_lower_case) teacher_model, teacher_config = BertModel.from_pretrained(args.teacher_model, distill_config=args.distill_config) # Required to make sure model's fwd doesn't return anything. required for DDP. # fwd output not being used in loss computation crashes DDP teacher_model.make_teacher() if args.continue_train: student_model, student_config = BertForPreTraining.from_pretrained(args.student_model, distill_config=args.distill_config) else: student_model, student_config = BertForPreTraining.from_scratch(args.student_model, distill_config=args.distill_config) # We need a projection layer since teacher.hidden_size != student.hidden_size use_projection = student_config.hidden_size != teacher_config.hidden_size if use_projection: project = Project(student_config, teacher_config) if args.continue_train: project_model_file = os.path.join(args.student_model, "project.bin") project_ckpt = torch.load(project_model_file, map_location="cpu") project.load_state_dict(project_ckpt) distill_config = {"nn_module_names": []} #Empty list since we don't want to use nn module hooks here distill_hooks_student, distill_hooks_teacher = DistillHooks(distill_config), DistillHooks(distill_config) student_model.register_forward_hook(distill_hooks_student.child_to_main_hook) teacher_model.register_forward_hook(distill_hooks_teacher.child_to_main_hook) ## Register hooks on nn.Modules # student_fwd_pre_hook = student_model.register_forward_pre_hook(distill_hooks_student.register_nn_module_hook) # teacher_fwd_pre_hook = teacher_model.register_forward_pre_hook(distill_hooks_teacher.register_nn_module_hook) student_model.to(device) teacher_model.to(device) if use_projection: project.to(device) if args.local_rank != -1: teacher_model = torch.nn.parallel.DistributedDataParallel( teacher_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) student_model = torch.nn.parallel.DistributedDataParallel( student_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if use_projection: project = torch.nn.parallel.DistributedDataParallel( project, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) size = 0 for n, p in student_model.named_parameters(): logger.info('n: {}'.format(n)) logger.info('p: {}'.format(p.nelement())) size += p.nelement() logger.info('Total parameters: {}'.format(size)) # Prepare optimizer param_optimizer = list(student_model.named_parameters()) if use_projection: param_optimizer += list(project.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=args.max_steps) global_step = 0 logging.info("***** Running training *****") logging.info(" Num examples = {}".format(args.train_batch_size * args.max_steps)) logging.info(" Batch size = %d", args.train_batch_size) logging.info(" Num steps = %d", args.max_steps) # Prepare the data loader. if is_main_process(): tic = time.perf_counter() train_dataloader = lddl.torch.get_bert_pretrain_data_loader( args.input_dir, local_rank=args.local_rank, vocab_file=args.vocab_file, data_loader_kwargs={ 'batch_size': args.train_batch_size * n_gpu, 'num_workers': args.num_workers, 'pin_memory': True, }, base_seed=args.seed, log_dir=None if args.output_dir is None else os.path.join(args.output_dir, 'lddl_log'), log_level=logging.WARNING, start_epoch=0, ) if is_main_process(): print('get_bert_pretrain_data_loader took {} s!'.format(time.perf_counter() - tic)) train_dataloader = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0. nb_tr_examples, local_step = 0, 0 student_model.train() scaler = torch.cuda.amp.GradScaler() transformer_losses = TransformerLosses(student_config, teacher_config, device, args) iter_start = time.time() while global_step < args.max_steps: for batch in train_dataloader: if global_step >= args.max_steps: break #remove forward_pre_hook after one forward pass #the purpose of forward_pre_hook is to register #forward_hooks on nn_module_names provided in config # if idx == 1: # student_fwd_pre_hook.remove() # teacher_fwd_pre_hook.remove() # # return # Initialize loss metrics if global_step % args.steps_per_epoch == 0: tr_loss, tr_att_loss, tr_rep_loss, tr_value_loss = 0., 0., 0., 0. mean_loss, mean_att_loss, mean_rep_loss, mean_value_loss = 0., 0., 0., 0. batch = {k: v.to(device) for k, v in batch.items()} input_ids, segment_ids, input_mask, lm_label_ids, is_next = batch['input_ids'], batch['token_type_ids'], batch['attention_mask'], batch['labels'], batch['next_sentence_labels'] att_loss = 0. rep_loss = 0. value_loss = 0. with torch.cuda.amp.autocast(enabled=args.amp): student_model(input_ids, segment_ids, input_mask, None) # Gather student states extracted by hooks temp_model = unwrap_ddp(student_model) student_atts = flatten_states(temp_model.distill_states_dict, "attention_scores") student_reps = flatten_states(temp_model.distill_states_dict, "hidden_states") student_values = flatten_states(temp_model.distill_states_dict, "value_states") student_embeddings = flatten_states(temp_model.distill_states_dict, "embedding_states") bsz, attn_heads, seq_len, _ = student_atts[0].shape #No gradient for teacher training with torch.no_grad(): teacher_model(input_ids, segment_ids, input_mask) # Gather teacher states extracted by hooks temp_model = unwrap_ddp(teacher_model) teacher_atts = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "attention_scores")] teacher_reps = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "hidden_states")] teacher_values = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "value_states")] teacher_embeddings = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "embedding_states")] teacher_layer_num = len(teacher_atts) student_layer_num = len(student_atts) #MiniLM if student_config.distillation_config["student_teacher_layer_mapping"] == "last_layer": if student_config.distillation_config["use_attention_scores"]: student_atts = [student_atts[-1]] new_teacher_atts = [teacher_atts[-1]] if student_config.distillation_config["use_value_states"]: student_values = [student_values[-1]] new_teacher_values = [teacher_values[-1]] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[-1]] new_student_reps = [student_reps[-1]] else: assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) if student_config.distillation_config["use_attention_scores"]: new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_value_states"]: new_teacher_values = [teacher_values[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] new_student_reps = student_reps if student_config.distillation_config["use_attention_scores"]: att_loss = transformer_losses.compute_loss(student_atts, new_teacher_atts, loss_name="attention_loss") if student_config.distillation_config["use_hidden_states"]: if use_projection: rep_loss = transformer_losses.compute_loss(project(new_student_reps), new_teacher_reps, loss_name="hidden_state_loss") else: rep_loss = transformer_losses.compute_loss(new_student_reps, new_teacher_reps, loss_name="hidden_state_loss") if student_config.distillation_config["use_embedding_states"]: if use_projection: rep_loss += transformer_losses.compute_loss(project(student_embeddings), teacher_embeddings, loss_name="embedding_state_loss") else: rep_loss += transformer_losses.compute_loss(student_embeddings, teacher_embeddings, loss_name="embedding_state_loss") if student_config.distillation_config["use_value_states"]: value_loss = transformer_losses.compute_loss(student_values, new_teacher_values, loss_name="value_state_loss") loss = att_loss + rep_loss + value_loss if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_att_loss += att_loss.item() / args.gradient_accumulation_steps if student_config.distillation_config["use_hidden_states"]: tr_rep_loss += rep_loss.item() / args.gradient_accumulation_steps if student_config.distillation_config["use_value_states"]: tr_value_loss += value_loss.item() / args.gradient_accumulation_steps if args.amp: scaler.scale(loss).backward() scaler.unscale_(optimizer) else: loss.backward() if use_projection: torch.nn.utils.clip_grad_norm_(chain(student_model.parameters(), project.parameters()), args.max_grad_norm, error_if_nonfinite=False) else: torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm, error_if_nonfinite=False) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) local_step += 1 if local_step % args.gradient_accumulation_steps == 0: scheduler.step() if args.amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() global_step = optimizer.param_groups[0]["step"] if "step" in optimizer.param_groups[0] else 0 if (global_step % args.steps_per_epoch) > 0: mean_loss = tr_loss / (global_step % args.steps_per_epoch) mean_att_loss = tr_att_loss / (global_step % args.steps_per_epoch) mean_rep_loss = tr_rep_loss / (global_step % args.steps_per_epoch) value_loss = tr_value_loss / (global_step % args.steps_per_epoch) if (global_step + 1) % args.eval_step == 0 and is_main_process(): result = {} result['global_step'] = global_step result['lr'] = optimizer.param_groups[0]["lr"] result['loss'] = mean_loss result['att_loss'] = mean_att_loss result['rep_loss'] = mean_rep_loss result['value_loss'] = value_loss result['perf'] = (global_step + 1) * get_world_size() * args.train_batch_size * args.gradient_accumulation_steps / (time.time() - iter_start) output_eval_file = os.path.join(args.output_dir, "log.txt") if is_main_process(): with open(output_eval_file, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) # Save a trained model model_name = "{}".format(WEIGHTS_NAME) logging.info("** ** * Saving fine-tuned model ** ** * ") # Only save the model it-self model_to_save = student_model.module if hasattr(student_model, 'module') else student_model if use_projection: project_to_save = project.module if hasattr(project, 'module') else project output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) output_project_file = os.path.join(args.output_dir, "project.bin") torch.save(model_to_save.state_dict(), output_model_file) if use_projection: torch.save(project_to_save.state_dict(), output_project_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url) model_name = "{}".format(WEIGHTS_NAME) logging.info("** ** * Saving fine-tuned model ** ** * ") model_to_save = student_model.module if hasattr(student_model, 'module') else student_model if use_projection: project_to_save = project.module if hasattr(project, 'module') else project output_project_file = os.path.join(args.output_dir, "project.bin") if is_main_process(): torch.save(project_to_save.state_dict(), output_project_file) output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) if is_main_process(): torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url)