def deserialize(self, j): self.kbs = { k: kb.KnowledgeBase().deserialize(v, k) for k, v in j.iteritems() } self.dirty = False return self
def load(): facts = base64.decodebytes(FACTS.encode('ascii')) facts = pickle.loads(facts) skb = kb.KnowledgeBase() for fact in facts: skb.tell(fact) return skb
def run(self): global input while True: # choose kourse while True: i = 1 output = u'Cursos disponíveis:\n\n' for k in kodule.all_kourses: output = output + str(i) + '. ' + k.title + '\n' i += 1 yield output + u"\nO que quer estudar? Digite o número do curso." if not input.value.isdigit(): continue selected_item = int(input.value) - 1 if selected_item < 0 or selected_item >= len( kodule.all_kourses): continue break # study kourse output = '' kourse = kodule.all_kourses[selected_item] if kourse.pathname not in self.kbs: yield kourse.title + ':\n' \ + ' '.join(kourse.initial_material) \ + '\n\n' \ + u'Envie "ok" para começar o curso ou "não" para voltar para a escolha do curso.' if util.normalize_caseless(input.value) != 'ok': continue self.kbs[kourse.pathname] = kb.KnowledgeBase( ) # TODO load from file/DB else: output += 'Vamos continuar!\n\n' for x in study.study(input, kourse, self.kbs[kourse.pathname]): yield output + x output = '' now = datetime.datetime.now() be_back_datetime = self.kbs[ kourse.pathname].get_next_revision_datetime( ) + datetime.timedelta(seconds=59) if be_back_datetime >= now + datetime.timedelta(days=7): be_back_str = u'em ' + unicode(be_back_datetime.date()) elif be_back_datetime.day == (now + datetime.timedelta(days=1)).day: be_back_str = unicode( datetime.datetime.strftime(be_back_datetime, "%A")) + u' às ' + unicode( be_back_datetime.time())[:5] else: be_back_str = u'às ' + unicode(be_back_datetime.time())[:5] yield u"Já viu material o suficiente, chega de '" \ + kourse.title \ + u"' por enquanto.\n" \ + u"Por favor volte " \ + be_back_str \ + u" para revisar o que aprendeu até agora e ver coisas novas!"
def template_obj_builder(dataset_root, model_weight_file, template_load_dir, template_save_dir, model_type, templates_idlist, introduce_oov, use_hard_scoring=True, parts=1, offset=0): ktrain = kb.KnowledgeBase(os.path.join(dataset_root, 'train.txt')) if introduce_oov: ktrain.entity_map["<OOV>"] = len(ktrain.entity_map) ktest = kb.KnowledgeBase(os.path.join(dataset_root, 'test.txt'), ktrain.entity_map, ktrain.relation_map, add_unknowns=not introduce_oov) kvalid = kb.KnowledgeBase(os.path.join(dataset_root, 'valid.txt'), ktrain.entity_map, ktrain.relation_map, add_unknowns=not introduce_oov) if (model_type == "distmult"): base_model = models.TypedDM(model_weight_file) elif (model_type == "complex"): base_model = models.TypedComplex(model_weight_file) elif (model_type == 'trivec'): base_model = models.TriVec(model_weight_file) else: message = 'Invalid Model type choice: {0} (choose from {1})'.format( model_type, ["distmult", "complex"]) logging.error(message) raise argparse.ArgumentTypeError(message) templates_obj = builder.build_templates(templates_idlist, [ktrain, kvalid, ktest], base_model, use_hard_scoring, template_load_dir, template_save_dir, parts, offset) return templates_obj
def load_knowledge_base(): """ Loads and returns knowledge base :return: KnowledgeBase object """ kb_object_path = util.relative_path('kb_data/kb_object.p') if os.path.isfile(kb_object_path): knowledge_base = pickle.load(open(kb_object_path, 'rb')) else: knowledge_base = kb.KnowledgeBase() knowledge_base.load() pickle.dump(knowledge_base, open(kb_object_path, 'wb')) return knowledge_base
def readKB(filename): content = [] with open(filename, 'r') as f: content = f.read().splitlines() alpha_size = int(content[0]) query_string = content[1:alpha_size + 1] query = [] for cnf in query_string: clause = cnf.split() clause = list(filter(lambda x: x != 'OR', clause)) query.append(clause) KB = kb.KnowledgeBase() KB_size = int(content[alpha_size + 1]) KB_string = content[alpha_size + 2:] for cnf in KB_string: clause = cnf.split() clause = list(filter(lambda x: x != 'OR', clause)) KB.addClause(clause) return KB, query
if (args.y_labels != '' and args.negative_count != 0): logging.error( 'Cannot generate random samples with y labels. If using --y_labels use flag --negative_count 0 also' ) exit(-1) dataset_root = os.path.join(args.data_repo_root, args.dataset) template_objs = template_builder.template_obj_builder( dataset_root, args.model_weights, args.template_load_dir, None, args.model_type, args.t_ids, args.oov_entity) ktrain = template_objs[0].kb k_preprocess = kb.KnowledgeBase(args.preprocess_file, ktrain.entity_map, ktrain.relation_map, add_unknowns=not args.oov_entity) y_labels = [1 for _ in range(k_preprocess.facts.shape[0])] if (args.y_labels != ''): #y_labels = np.loadtxt(args.y_labels) y_labels, y_multilabels = utils.read_multilabel(args.y_labels) if (y_labels.shape[0] != k_preprocess.facts.shape[0]): logging.error('Number of facts and their y labels do not match') exit(-1) new_facts = preprocess(k_preprocess, template_objs, args.negative_count, not args.del_ids, y_labels) write_to_file(new_facts, args.sm_data_write)