#tmpl4_predictions = np.array([4]*len(mapped_data)) #tmpl5_predictions = np.array([5]*len(mapped_data)) #tmpl6_predictions = np.array([6]*len(mapped_data)) #if(args.rule_pred is not None): # rule_predictions = utils.read_pkl(args.rule_pred) # logging.info("Loaded rule predictions from %s" % (args.rule_pred)) # if(len(rule_predictions) != len(mapped_data)): # logging.error("Unequal length of rule predictions and data") # exit(-1) entity_inverse_map = utils.get_inverse_dict(distmult_dump['entity_to_id']) relation_inverse_map = utils.get_inverse_dict( distmult_dump['relation_to_id']) template_objs = template_builder.template_obj_builder( data_root, args.model_weights, args.template_load_dir, None, "distmult", [1, 2, 3, 4, 5, 6], True) explainer = Explainer( data_root, template_objs[0].kb, template_objs[0].base_model, entity_inverse_map, relation_inverse_map) #if(args.template_pred is not None): template_exps = [english_exp_template( mapped_data, var, template_objs, explainer) for var in template_predictions] #else: # template_exps = [ # explainer.NO_EXPLANATION for _ in range(len(mapped_data))] #if(args.rule_pred is not None): # rule_exps = english_exp_rules(mapped_data, rule_predictions, explainer) #else: # rule_exps = [explainer.NO_EXPLANATION for _ in range(len(mapped_data))]
logging.error("Unequal length of template predictions and data") exit(-1) if (args.rule_pred is not None): rule_predictions = utils.read_pkl(args.rule_pred) logging.info("Loaded rule predictions from %s" % (args.rule_pred)) if (len(rule_predictions) != len(mapped_data)): logging.error("Unequal length of rule predictions and data") exit(-1) entity_inverse_map = utils.get_inverse_dict(distmult_dump['entity_to_id']) relation_inverse_map = utils.get_inverse_dict( distmult_dump['relation_to_id']) template_objs = template_builder.template_obj_builder( data_root, args.model_weights, args.template_load_dir, None, "distmult", args.t_ids, True) explainer = Explainer(data_root, template_objs[0].kb, template_objs[0].base_model, entity_inverse_map, relation_inverse_map, list_of_different_template_files=['template1.txt']) if (args.template_pred is not None): template_exps = english_exp_template(mapped_data, template_predictions, template_objs, explainer) else: template_exps = [ explainer.NO_EXPLANATION for _ in range(len(mapped_data))
utils._LOG_LEVEL_STRINGS)) args = parser.parse_args() logging.basicConfig(format='%(levelname)s :: %(asctime)s - %(message)s', level=args.log_level, datefmt='%d/%m/%Y %I:%M:%S %p') if (args.y_labels != '' and args.negative_count != 0): logging.error( 'Cannot generate random samples with y labels. If using --y_labels use flag --negative_count 0 also' ) exit(-1) dataset_root = os.path.join(args.data_repo_root, args.dataset) template_objs = template_builder.template_obj_builder( dataset_root, args.model_weights, args.template_load_dir, None, args.model_type, args.t_ids, args.oov_entity) ktrain = template_objs[0].kb k_preprocess = kb.KnowledgeBase(args.preprocess_file, ktrain.entity_map, ktrain.relation_map, add_unknowns=not args.oov_entity) y_labels = [1 for _ in range(k_preprocess.facts.shape[0])] if (args.y_labels != ''): #y_labels = np.loadtxt(args.y_labels) y_labels, y_multilabels = utils.read_multilabel(args.y_labels) if (y_labels.shape[0] != k_preprocess.facts.shape[0]):