Пример #1
0
        [ltn.Literal(False, isInRelation[p], object_pairs_not_in_relation[p])],
        label="examples_of_object_pairs_not_in_" + p.replace(" ", "_") +
        "_relation",
        weight=1.0) for p in selected_predicates
]

# axioms from the Visual Relationship Ontology
isa_subrelation_of, has_subrelations, inv_relations_of, not_relations_of, reflexivity_relations, symmetry, domain_relation, range_relation = get_vrd_ontology(
)

so_domain = {}
os_domain = {}

for predicate in selected_predicates:
    so_domain[predicate] = ltn.Domain(number_of_features * 2 +
                                      number_of_extra_features,
                                      label="object_pairs_for_axioms")

for type in selected_types:
    so_domain[type] = ltn.Domain(number_of_features * 2 +
                                 number_of_extra_features,
                                 label="object_pairs_for_axioms")
    os_domain[type] = ltn.Domain(number_of_features * 2 +
                                 number_of_extra_features,
                                 label="inverse_object_pairs_for_axioms")

clauses_for_not_domain = [
    ltn.Clause([
        ltn.Literal(False, isInRelation[pred], so_domain[subj[4:]]),
        ltn.Literal(False, isOfType[subj[4:]], objects_of_type[subj[4:]])
    ],
Пример #2
0
clause_for_positive_examples_of_partOf = [ltn.Clause([ltn.Literal(True, isPartOf, object_pairs_in_partOf)], label="examples_of_object_pairs_in_partof_relation", weight=1.0)]

clause_for_negative_examples_of_partOf = [ltn.Clause([ltn.Literal(False, isPartOf, object_pairs_not_in_partOf)], label="examples_of_object_pairs_not_in_part_of_relation", weight=1.0)]

# defining axioms from the partOf ontology

parts_of_whole, wholes_of_part = get_part_whole_ontology()

w1 = {}
p1 = {}
w2 = {}
p2 = {}
p1w1 = {}
p2w2 = {}
oo = ltn.Domain((number_of_features-1)*2+2, label="same_object_pairs")
o = ltn.Domain(number_of_features-1, label="a_generi_object")

w0 = ltn.Domain(number_of_features - 1, label="whole_of_part_whole_pair")
p0 = ltn.Domain(number_of_features - 1, label="part_of_part_whole_pair")
p0w0 = ltn.Domain((number_of_features - 1) * 2 + 2, label="part_whole_pair")
w0p0 = ltn.Domain((number_of_features - 1) * 2 + 2, label="whole_part_pair")

for t in selected_types:
    w1[t] = ltn.Domain(number_of_features-1, label="whole_predicted_objects_for_"+t)
    p1[t] = ltn.Domain(number_of_features-1, label="part_predicted_objects_for_"+t)
    w2[t] = ltn.Domain(number_of_features - 1, label="whole_predicted_objects_for_" + t)
    p2[t] = ltn.Domain(number_of_features - 1, label="part_predicted_objects_for_" + t)
    p1w1[t] = ltn.Domain((number_of_features-1)*2+2, label="potential_part_whole_object_pairs_for_"+t)
    p2w2[t] = ltn.Domain((number_of_features-1)*2+2, label="potential_whole_part_object_pairs_for_"+t)
Пример #3
0
data_training_dir = "data/train"
data_testing_dir = "data/test"
ontology_dir = "data/ontology"
ontology_dir = "data/ontology"

types = np.genfromtxt(os.path.join(ontology_dir, "classes.csv"),
                      dtype="S",
                      delimiter=",")
predicates = np.genfromtxt(os.path.join(ontology_dir, "predicates.csv"),
                           dtype="S",
                           delimiter=",")
selected_types = types[1:]
selected_predicates = predicates
number_of_features = len(types) + 4
number_of_extra_features = 7
objects = ltn.Domain(number_of_features, label="a_bounding_box")
pairs_of_objects = ltn.Domain(2 * number_of_features +
                              number_of_extra_features,
                              label="a_pair_of_bounding_boxes")

import inspect


def is_of_type(obj_type, features):
    return tf.slice(features, [0, obj_type], [tf.shape(features)[0], 1])


isOfType = {}
isInRelation = {}
for t_idx, t in enumerate(selected_types):
    t_p = np.where(selected_types == t)[0][0] + 1
Пример #4
0
    selected_types = np.array([
        'bottle', 'body', 'cap', 'pottedplant', 'plant', 'pot', 'tvmonitor',
        'screen', 'chair', 'sofa', 'diningtable'
    ])
if config.DATASET == 'animal':
    selected_types = np.array([
        'person', 'arm', 'ear', 'ebrow', 'foot', 'hair', 'hand', 'mouth',
        'nose', 'eye', 'head', 'leg', 'neck', 'torso', 'cat', 'tail', 'bird',
        'animal_wing', 'beak', 'sheep', 'horn', 'muzzle', 'cow', 'dog',
        'horse', 'hoof'
    ])
if config.DATASET == 'all':
    selected_types = types[1:]

# Domain containing the objects
objects = ltn.Domain(number_of_features, label="a_bounding_box")

# Domain containing pairs of objects. They get two additional overlap features.
pairs_of_objects = ltn.Domain(2 * number_of_features + 2,
                              label="a_pair_of_bounding_boxes")

# Create type predicates acting on the objects domain
isOfType = {}
labelOfType = {}
mutExclType = ltn.MutualExclusivePredicates('is_of_type_', len(selected_types),
                                            objects, config.MUT_EXCL_LAYERS)
for i in range(mutExclType.amt_predicates):
    isOfType[selected_types[i]] = mutExclType.predicates[i]
    labelOfType[selected_types[i]] = i

# Create partOf predicate acting on the pairs of objects domain
data_embeddings = {
    "horse": [1., 0.],
    "cow": [-1., 1.],
    "donkey": [1., 0.],
    "pony": [.8, 0.],
    "bw": [0., 1.],
    "lion": [-1., -1.],
    "zebra": None
}
predicates = {}
constants = {}
clauses = {}

embedding_dimension = 2

word = ltn.Domain(embedding_dimension, label="Word")

for w, embedding in data_embeddings.iteritems():
    p = ltn.Predicate(w, word)
    predicates[w] = p
    c = ltn.Constant(w, domain=word, value=embedding)
    constants[w] = c
    clause_label = "%s_%s" % (w, w)
    clauses[clause_label] = ltn.Clause([ltn.Literal(True, p, c)],
                                       label=clause_label)

#all_words = ltn.Domain_union( constants.values())

clauses["notZorH"] = ltn.Clause([
    ltn.Literal(False, predicates["zebra"], word),
    ltn.Literal(True, predicates["horse"], word)
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

ltn.default_layers = 4
ltn.default_smooth_factor = 1e-10
ltn.default_tnorm = "luk"
ltn.default_aggregator = "min"
ltn.default_optimizer = "rmsprop"
ltn.default_clauses_aggregator = "min"
ltn.default_positive_fact_penality = 0

number_of_features = 3

person = ltn.Domain(number_of_features, label="Person")

couple = ltn.Domain(number_of_features * 2, label="Couple")

A = ltn.Predicate("A", person)
B = ltn.Predicate("B", person)
R = ltn.Predicate("R", couple)

a1_features = [1.0, 0.0, 0.0]
a2_features = [0.7, 0.0, 0.2]
a3_features = [0.9, 0.2, 0.1]

b1_features = [0.0, 1.0, 1.0]
b2_features = [0.1, 0.8, 0.7]
b3_features = [0.3, 1.0, 0.8]
ltn.default_optimizer = set_ltn_variable(config, "optimizer",
                                         ltn.default_optimizer)
ltn.default_clauses_aggregator = set_ltn_variable(
    config, "clauses_aggregator", ltn.default_clauses_aggregator)
ltn.default_positive_fact_penality = set_ltn_variable(
    config, "positive_fact_penalty", ltn.default_positive_fact_penality)
ltn.default_norm_of_u = set_ltn_variable(config, "norm_of_u",
                                         ltn.default_norm_of_u)
ltn.default_epsilon = set_ltn_variable(config, "epsilon", ltn.default_epsilon)
if args.type != None:
    ltn.default_type = args.type
else:
    ltn.default_type = set_ltn_variable(config, "type", ltn.default_type)

# create conceptual space
conceptual_space = ltn.Domain(config["num_dimensions"],
                              label="ConceptualSpace")

# prepare classification rules: labeled data points need to be classified correctly
pos_examples = {}
neg_examples = {}
for label in config["concepts"]:
    pos_examples[label] = []
    neg_examples[label] = []

for labels, vec in config["training_vectors"]:
    for label in labels:
        # classify under correct labels
        pos_examples[label].append(vec)

    # don't classify under incorrect label (pick a random one)
    possible_negative_labels = list(set(config["concepts"]) - set(labels))