types_of_train_data == type)[0] for predicate in selected_predicates: idxs_of_positive_examples_of_predicates[predicate] = np.where( predicates[triples_of_train_data[:, -1]] == predicate)[0] idxs_of_negative_examples_of_predicates[ predicate] = idxs_of_negative_examples print "finished to upload and analyze data" print "Start model definition" # domain definition clause_for_positive_examples_of_predicates = [ ltn.Clause( [ltn.Literal(True, isInRelation[p], object_pairs_in_relation[p])], label="examples_of_object_pairs_in_" + p.replace(" ", "_") + "_relation", weight=1.0) for p in selected_predicates ] clause_for_negative_examples_of_predicates = [ ltn.Clause( [ltn.Literal(False, isInRelation[p], object_pairs_not_in_relation[p])], label="examples_of_object_pairs_not_in_" + p.replace(" ", "_") + "_relation", weight=1.0) for p in selected_predicates ] # axioms from the Visual Relationship Ontology isa_subrelation_of, has_subrelations, inv_relations_of, not_relations_of, reflexivity_relations, symmetry, domain_relation, range_relation = get_vrd_ontology( )
idxs_of_positive_examples_of_types[type] = np.where(types_of_train_data == type)[0] idxs_of_negative_examples_of_types[type] = np.where(types_of_train_data != type)[0] idxs_of_positive_examples_of_partOf = np.where(partOf_of_pairs_of_train_data)[0] idxs_of_negative_examples_of_partOf = np.where(partOf_of_pairs_of_train_data == False)[0] existing_types = [t for t in selected_types if idxs_of_positive_examples_of_types[t].size > 0] print "non empty types in train data", existing_types print "finished to upload and analyze data" print "Start model definition" # domain definition clauses_for_positive_examples_of_types = \ [ltn.Clause([ltn.Literal(True,isOfType[t],objects_of_type[t])],label="examples_of_"+t,weight=1.0) for t in existing_types] clauses_for_negative_examples_of_types = \ [ltn.Clause([ltn.Literal(False,isOfType[t],objects_of_type_not[t])],label="examples_of_not_"+t,weight=1.0) for t in existing_types] clause_for_positive_examples_of_partOf = [ltn.Clause([ltn.Literal(True, isPartOf, object_pairs_in_partOf)], label="examples_of_object_pairs_in_partof_relation", weight=1.0)] clause_for_negative_examples_of_partOf = [ltn.Clause([ltn.Literal(False, isPartOf, object_pairs_not_in_partOf)], label="examples_of_object_pairs_not_in_part_of_relation", weight=1.0)] # defining axioms from the partOf ontology parts_of_whole, wholes_of_part = get_part_whole_ontology() w1 = {} p1 = {} w2 = {}
} predicates = {} constants = {} clauses = {} embedding_dimension = 2 word = ltn.Domain(embedding_dimension, label="Word") for w, embedding in data_embeddings.iteritems(): p = ltn.Predicate(w, word) predicates[w] = p c = ltn.Constant(w, domain=word, value=embedding) constants[w] = c clause_label = "%s_%s" % (w, w) clauses[clause_label] = ltn.Clause([ltn.Literal(True, p, c)], label=clause_label) #all_words = ltn.Domain_union( constants.values()) clauses["notZorH"] = ltn.Clause([ ltn.Literal(False, predicates["zebra"], word), ltn.Literal(True, predicates["horse"], word) ], label="notZorH") clauses["notZorBW"] = ltn.Clause([ ltn.Literal(False, predicates["zebra"], word), ltn.Literal(True, predicates["bw"], word) ], label="notZorBW") clauses["notHornotBWorZ"] = ltn.Clause([ ltn.Literal(True, predicates["zebra"], word),
print("finished uploading and analyzing data") print("Start model definition") # Define the clauses in the knowledge base. # First we define the facts. clause_for_types = [ ltn.TypeClause(mutExclType, weight=len(selected_types) * config.WEIGHT_TYPES_EXAMPLES, label='examples_types') ] labels_placeholder = clause_for_types[0].correct_labels clause_for_positive_examples_of_partOf = [ ltn.Clause([ltn.Literal(True, isPartOf, object_pairs_in_partOf)], label="examples_of_object_pairs_in_partof_relation", weight=config.WEIGHT_POS_PARTOF_EXAMPLES) ] clause_for_negative_examples_of_partOf = [ ltn.Clause([ltn.Literal(False, isPartOf, object_pairs_not_in_partOf)], label="examples_of_object_pairs_not_in_part_of_relation", weight=config.WEIGHT_NEG_PARTOF_EXAMPLES) ] # defining axioms from the partOf ontology parts_of_whole, wholes_of_part = get_part_whole_ontology() w1 = {} p1 = {} pw = {}
#defined = None #number of layer = 5 #W = 5 * 31 * 31 -random numbers #u = 5 * 1 - ones #tensor #--X = 14 * (30+1) #--XW = matmul(X(5 * 14 * 31)?, W(5*31*31)) #returns [0,1] Has_cancer = ltn.Predicate("has_cancer", person) #parameters- [W, u] #returns [0,1] smokes_implies_has_cancer = \ [ltn.Clause([ltn.Literal(False, Smokes, person), ltn.Literal(True, Has_cancer, person)], label="smoking_implies_cancer")] #ltn.Literal(True, Has_cancer, person) #label - none # Predicate - Has_cancer # polarity - True # domain - person # parameters - [W,u]+list of all object tensors # if polarity: # tensor - Has_cancer.tensor(person)->[0,1] # else: # tensor - 1-Smokes.tensor(person)->[0,1] #ltn.Clause([lit1,lit2],label="abc") #Clause object is created with props: # weight = 1
a1 = ltn.Constant("a1", domain=person, value=a1_features) a2 = ltn.Constant("a2", domain=person, value=a2_features) a3 = ltn.Constant("a2", domain=person, value=a3_features) b1 = ltn.Constant("b1", domain=person, value=b1_features) b2 = ltn.Constant("b2", domain=person, value=b2_features) b3 = ltn.Constant("b3", domain=person, value=b3_features) c = ltn.Constant("c", domain=person) d = ltn.Constant("d", domain=person) c_features_are_in_01 = ltn.Clause([ ltn.Literal( True, ltn.In_range(person, -0.4 + np.zeros(number_of_features), 0.4 + np.ones(number_of_features), sharpness=5.0), c) ], label="c_is_in_0_1") d_features_are_in_01 = ltn.Clause([ ltn.Literal( True, ltn.In_range(person, -0.4 + np.zeros(number_of_features), 0.4 + np.ones(number_of_features), sharpness=5.0), d) ], label="d_is_in_0_1") A_a1 = ltn.Clause([ltn.Literal(True, A, a1)], label="A_a1")
persons = line.split('\t') persons = [per.rstrip() for per in persons] if persons[1] == predicate: if persons[0] in everybody_label and persons[2] in everybody_label: gender_key_label = [spouse.label for spouse in gender_of.keys()] if persons[2] not in gender_key_label: gender_of[everybody[everybody_label.index(persons[2])]] = [everybody[everybody_label.index(persons[0])]] else: gender_of[everybody[everybody_label.index(persons[2])]].append(everybody[everybody_label.index(persons[0])]) return gender_of, gender_key_label gender_of, gender_key_label = create_dict('data/train.txt', 'gender', everybody_label) gender_positive_examples = [ltn.Clause([ltn.Literal(True, Is_gender, ltn.Domain_concat([x, y]))], label=x.label+"_is_gender_of_"+y.label) for x in gender_of for y in gender_of[x]] gender_negative_examples = [ltn.Clause([ltn.Literal(False, Is_gender, ltn.Domain_concat([x, y]))], label=x.label+"_is_not_gender_of_"+y.label) for x in gender_set for y in everybody if y not in gender_of[x]] profession_of, profession_key_label = create_dict('data/train.txt', 'profession', everybody_label) profession_positive_examples = [ltn.Clause([ltn.Literal(True, Is_profession, ltn.Domain_concat([x, y]))], label=x.label+"_is_profession_of_"+y.label) for x in profession_of for y in profession_of[x]] profession_negative_examples = [ltn.Clause([ltn.Literal(False, Is_profession, ltn.Domain_concat([x, y]))],
p1p2 = ltn.Domain_product(person, person) p1 = ltn.Domain_slice(p1p2, 0, number_of_features) p2 = ltn.Domain_slice(p1p2, number_of_features, number_of_features*2) p2p1 = ltn.Domain_concat([p2, p1]) Is_parent = ltn.Predicate("Is_parent", p1p1) Is_child = ltn.Predicate("Is_child", p1p1) Is_spouse = ltn.Predicate("Is_spouse", p1p1) spouse_is_symmetric = \ [ltn.Clause([ltn.Literal(False, Is_spouse, p1p2), ltn.Literal(True, Is_spouse, p2p1)], label="spouse_p1p2_implies_spouse_p2p1")] child_p1p2_implies_parent_p2p1 = \ [ltn.Clause([ltn.Literal(False, Is_child, p1p2), ltn.Literal(True, Is_parent, p2p1)], label="child_p1p2_implies_parent_p2p1")] everybody_label = [body.label for body in everybody] def create_dict(fname, predicate, everybody_label): predicate_of = {} predicate_key_label = [] with open(fname) as f: content = f.readlines()
for label in config["concepts"]: concepts[label] = ltn.Predicate(label, conceptual_space, data_points=pos_examples[label]) pos_literal = ltn.Literal(True, concepts[label], conceptual_space) neg_literal = ltn.Literal(False, concepts[label], conceptual_space) literals[label] = {True: pos_literal, False: neg_literal} # it can happen that we don't have any positive examples; then: don't try to add a rule if len(pos_examples[label]) > 0: pos_domain = ltn.Domain(conceptual_space.columns, label=label + "_pos_ex") rules.append( ltn.Clause([ltn.Literal(True, concepts[label], pos_domain)], label="{0}Const".format(label), weight=1.0 * len(pos_examples[label]))) feed_dict[pos_domain.tensor] = pos_examples[label] if len(neg_examples[label]) > 0: neg_domain = ltn.Domain(conceptual_space.columns, label=label + "_neg_ex") rules.append( ltn.Clause([ltn.Literal(False, concepts[label], neg_domain)], label="{0}ConstNot".format(label), weight=1.0 * len(neg_examples[label]))) feed_dict[neg_domain.tensor] = neg_examples[label] # parse rules file num_rules = 0 implication_rule = re.compile("(\w+) IMPLIES (\w+)")