def load_classification_model(): global trainer global tokenizer mod = 'mtn_models/pytorch_model.bin' tok = 'mtn_models/vocab.txt' conf = 'mtn_models/config.json' tokenizer = BertTokenizer.from_pretrained(tok, do_lower_case=False, do_basic_tokenize=True, never_split=never_split_tokens, truncation=True) config = PretrainedConfig.from_pretrained(conf, num_labels=6) model = BertForSequenceClassification.from_pretrained(mod, config=config) training_args = TrainingArguments("./train") training_args.do_train = True training_args.evaluate_during_training = True training_args.adam_epsilon = 1e-8 training_args.learning_rate = 2e-5 training_args.warmup_steps = 0 training_args.per_gpu_train_batch_size = 16 training_args.per_gpu_eval_batch_size = 16 training_args.num_train_epochs = 3 #training_args.logging_steps = (len(train_features) - 1) // training_args.per_gpu_train_batch_size + 1 training_args.save_steps = training_args.logging_steps training_args.seed = 42 trainer = Trainer(model=model, args=training_args)
def generate_training_args(args, inoculation_step): training_args = TrainingArguments("tmp_trainer") training_args.no_cuda = args.no_cuda training_args.seed = args.seed training_args.do_train = args.do_train training_args.do_eval = args.do_eval training_args.output_dir = os.path.join(args.output_dir, str(inoculation_step)+"-sample") training_args.evaluation_strategy = args.evaluation_strategy # evaluation is done after each epoch training_args.metric_for_best_model = args.metric_for_best_model training_args.greater_is_better = args.greater_is_better training_args.logging_dir = args.logging_dir training_args.task_name = args.task_name training_args.learning_rate = args.learning_rate training_args.per_device_train_batch_size = args.per_device_train_batch_size training_args.per_device_eval_batch_size = args.per_device_eval_batch_size training_args.num_train_epochs = args.num_train_epochs # this is the maximum num_train_epochs, we set this to be 100. training_args.eval_steps = args.eval_steps training_args.logging_steps = args.logging_steps training_args.load_best_model_at_end = args.load_best_model_at_end if args.save_total_limit != -1: # only set if it is specified training_args.save_total_limit = args.save_total_limit import datetime date_time = "{}-{}".format(datetime.datetime.now().month, datetime.datetime.now().day) run_name = "{0}_{1}_{2}_{3}_mlen_{4}_lr_{5}_seed_{6}_metrics_{7}".format( args.run_name, args.task_name, args.model_type, date_time, args.max_seq_length, args.learning_rate, args.seed, args.metric_for_best_model ) training_args.run_name = run_name training_args_dict = training_args.to_dict() # for PR _n_gpu = training_args_dict["_n_gpu"] del training_args_dict["_n_gpu"] training_args_dict["n_gpu"] = _n_gpu HfParser = HfArgumentParser((TrainingArguments)) training_args = HfParser.parse_dict(training_args_dict)[0] if args.model_path == "": args.model_path = args.model_type if args.model_type == "": assert False # you have to provide one of them. # Set seed before initializing model. set_seed(training_args.seed) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, ) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") return training_args