def main(start_model_path, start_type, save_dir, modeling_option, target_idx_, num_gpu=1): num_gpu = int(num_gpu) tf_logging.info("train_from : nli_ex") hp = HPCommon() nli_setting = BertNLI() set_level_debug() reset_root_log_handler() train_config = NLIPairingTrainConfig() train_config.num_gpu = num_gpu tf_logging.info("loading batches") data = get_nli_data(hp, nli_setting) def init_fn(sess): return init_fn_generic(sess, start_type, start_model_path) class LMSConfig2(LMSConfigI): num_tags = 3 target_idx = target_idx_ use_embedding_out = True per_layer_component = 'linear' train_LMS(hp, train_config, LMSConfig2(), save_dir, data, modeling_option, init_fn)
def get_params(start_model_path, start_type, info_fn_name, num_gpu): hp = hyperparams.HPSENLI3() nli_setting = BertNLI() set_level_debug() train_config = NLIExTrainConfig() train_config.num_gpu = num_gpu train_config.save_train_payload = True tokenizer = get_tokenizer() tf_logging.info("Intializing dataloader") data_loader = get_modified_data_loader(tokenizer, hp.seq_max, nli_setting.vocab_filename) tf_logging.info("loading batches") data = get_nli_data(hp, nli_setting) def init_fn(sess): start_type_generic = { 'nli': 'cls', 'nli_ex': 'cls_ex', 'bert': 'bert', 'cold': 'cold' }[start_type] return init_fn_generic(sess, start_type_generic, start_model_path) informative_fn = get_informative_fn_by_name(info_fn_name) return data, data_loader, hp, informative_fn, init_fn, train_config
def run(explain_tag, method_name, model_path): hp = hyperparams.HPSENLI3() nli_setting = BertNLI() target_class = enlidef.get_target_class(explain_tag) data = get_nli_data(hp, nli_setting) train_batches, dev_batches = data flat_dev_batches = flatten_filter_batches(dev_batches, target_class)[:2000] if method_name in ['deletion_seq', "random", 'idf', 'deletion', 'LIME']: predictor = BaselineExPredictor(hp, nli_setting, model_path, method_name) acc_list = eval_fidelity_with_ex_predictor(predictor, flat_dev_batches, explain_tag) elif method_name.startswith("nli_ex"): modeling_option = "co" predictor = NLIExPredictor(hp, nli_setting, model_path, modeling_option) acc_list = eval_fidelity_with_ex_predictor(predictor, flat_dev_batches, explain_tag) elif method_name in [ "elrp", "deeplift", "saliency", "grad*input", "intgrad" ]: acc_list = eval_fidelity_gradient(hp, nli_setting, flat_dev_batches, explain_tag, method_name, model_path) else: raise Exception( "method_name={} is not in the known method list.".format( method_name)) print(method_name) for num_delete in sorted(acc_list.keys()): print("{}\t{}".format(num_delete, acc_list[num_delete]))
def train_nil_from_bert(model_path, save_dir): def load_fn(sess, model_path): return load_model_w_scope(sess, model_path, "bert") max_steps = 61358 max_steps = 36250 hp = hyperparams.HPSENLI3() set_level_debug() nli_setting = BertNLI() data = get_snli_data(hp, nli_setting) n_gpu = 2 return train_nli_multi_gpu(hp, nli_setting, save_dir, max_steps, data, model_path, load_fn, n_gpu)
def main(start_model_path, modeling_option, num_gpu=1): num_gpu = int(num_gpu) hp = HPCommon() nli_setting = BertNLI() set_level_debug() reset_root_log_handler() train_config = NLIPairingTrainConfig() train_config.num_gpu = num_gpu def init_fn(sess): return init_fn_generic(sess, "as_is", start_model_path) data = get_nli_data(hp, nli_setting) do_predict(hp, train_config, data, LMSConfig(), modeling_option, init_fn)
def run(args): hp = hyperparams.HPSENLI3() nli_setting = BertNLI() data_loader = get_modified_data_loader2(hp, nli_setting) predict_nli_ex( hp, nli_setting, data_loader, args.tag, args.data_id, args.model_path, args.run_name, args.modeling_option, )
def main(start_model_path, modeling_option, input_path, save_name, num_gpu=1): num_gpu = int(num_gpu) hp = HPCommon() nli_setting = BertNLI() set_level_debug() reset_root_log_handler() train_config = NLIPairingTrainConfig() train_config.num_gpu = num_gpu def init_fn(sess): return init_fn_generic(sess, "as_is", start_model_path) batches = get_batches(input_path, nli_setting, HPCommon.batch_size) output_d = do_predict(hp, train_config, batches, LMSConfig(), modeling_option, init_fn) save_to_pickle(output_d, save_name)
def main(start_model_path, start_type, save_dir, modeling_option, num_gpu=1): num_gpu = int(num_gpu) tf_logging.info("train_from : nli_ex") hp = HPCommon() nli_setting = BertNLI() set_level_debug() reset_root_log_handler() train_config = NLIPairingTrainConfig() train_config.num_gpu = num_gpu tf_logging.info("loading batches") data = get_nli_data(hp, nli_setting) def init_fn(sess): return init_fn_generic(sess, start_type, start_model_path) train_LMS(hp, train_config, LMSConfig(), save_dir, data, modeling_option, init_fn)
def train_nil_from(model_path, save_dir, resume=False): print("Load model path : ", model_path) print("Save dir : ", save_dir) def load_fn(sess, model_path): if not resume: return load_model_w_scope(sess, model_path, "bert") else: return load_model(sess, model_path) steps = 67000 hp = hyperparams.HPSENLI3() nli_setting = BertNLI() data = get_nli_data(hp, nli_setting) set_level_debug() hp = hyperparams.HPSENLI3() n_gpu = 2 return train_nli_multi_gpu(hp, nli_setting, save_dir, steps, data, model_path, load_fn, n_gpu)
def main(start_model_path, start_type, save_dir, modeling_option, num_gpu=1): num_gpu = int(num_gpu) tf_logging.info("Train with MLP") hp = HPCommon() hp.per_layer_component = 'mlp' nli_setting = BertNLI() set_level_debug() reset_root_log_handler() train_config = NLIPairingTrainConfig() train_config.num_gpu = num_gpu lms_config = LMSConfig() tf_logging.info("loading batches") data = get_nli_data(hp, nli_setting) def init_fn(sess): return init_fn_generic(sess, start_type, start_model_path) train_LMS(hp, train_config, lms_config, save_dir, data, modeling_option, init_fn)
def run(args): hp = hyperparams.HPSENLI3() nli_setting = BertNLI() data_loader = get_modified_data_loader2(hp, nli_setting) if args.method_name in [ 'deletion_seq', "random", 'idf', 'deletion', 'LIME' ]: predictor = nli_baseline_predict elif args.method_name in [ "elrp", "deeplift", "saliency", "grad*input", "intgrad", ]: predictor = nli_attribution_predict else: raise Exception( "method_name={} is not in the known method list.".format( args.method_name)) predictor(hp, nli_setting, data_loader, args.tag, args.method_name, args.data_id, args.sub_range, args.model_path)
def run(args): hp = hyperparams.HPGenEx() nli_setting = BertNLI() if args.method_name in [ 'deletion_seq', "random", 'idf', 'deletion', 'LIME', 'term_deletion', 'replace_token', 'term_replace' ]: predictor = baseline_predict elif args.method_name in [ "elrp", "deeplift", "saliency", "grad*input", "intgrad" ]: predictor = nli_attribution_predict else: raise Exception( "method_name={} is not in the known method list.".format( args.method_name)) save_name = "{}_{}".format(args.data_name, args.method_name) data = load_as_simple_format(args.data_name) explains: List[np.array] = predictor(hp, nli_setting, data, args.method_name, args.model_path) save_to_pickle(explains, save_name)
def train_nil_from(save_dir, model_path, load_fn, max_steps): hp = hyperparams.HPSENLI3() set_level_debug() nli_setting = BertNLI() data = get_nli_data(hp, nli_setting) train_nli(hp, nli_setting, save_dir, max_steps, data, model_path, load_fn)