Exemplo n.º 1
0
def constant(label, *args, **kwargs):
    if label in CONSTANTS and args == () and kwargs == {}:
        return CONSTANTS[label]
    elif label in CONSTANTS and CONFIGURATION.get("error_on_redeclare"):
        logging.getLogger(__name__).error("Attempt at redeclaring existing constant %s" % label)
        raise Exception("Attempt at redeclaring existing constant %s" % label)
    else:
        if label in CONSTANTS:
            logging.getLogger(__name__).warn("Redeclaring existing constant %s" % label)
        CONSTANTS[label] = ltn.constant(label, *args, **kwargs)
        return CONSTANTS[label]
Exemplo n.º 2
0
        ]
        self.dense_class = tf.keras.layers.Dense(n_classes)

    def call(self, inputs):
        x = inputs
        for dense in self.denses:
            x = dense(x)
        return self.dense_class(x)


logits_model = MLP(4)
p = ltn.Predicate(
    ltn.utils.LogitsToPredicateModel(logits_model, single_label=False))

# Constants to index the classes
class_male = ltn.constant(0)
class_female = ltn.constant(1)
class_blue = ltn.constant(2)
class_orange = ltn.constant(3)

# ### Axioms
#
# ```
# forall x_blue: C(x_blue,blue)
# forall x_orange: C(x_orange,orange)
# forall x_male: C(x_male,male)
# forall x_female: C(x_female,female)
# forall x: ~(C(x,male) & C(x,female))
# forall x: ~(C(x,blue) & C(x,orange))
# ```
    parser.add_argument('--epochs', type=int, default=1000)
    args = parser.parse_args()
    dict_args = vars(args)
    return dict_args


args = parse_args()
EPOCHS = args['epochs']
csv_path = args['csv_path']

# Language

embedding_size = 10

g1 = {
    l: ltn.constant(np.random.uniform(low=0.0, high=1.0, size=embedding_size),
                    trainable=True)
    for l in 'abcdefgh'
}
g2 = {
    l: ltn.constant(np.random.uniform(low=0.0, high=1.0, size=embedding_size),
                    trainable=True)
    for l in 'ijklmn'
}
g = {**g1, **g2}

Smokes = ltn.Predicate.MLP([embedding_size], hidden_layer_sizes=(16, 16))
Friends = ltn.Predicate.MLP([embedding_size, embedding_size],
                            hidden_layer_sizes=(16, 16))
Cancer = ltn.Predicate.MLP([embedding_size], hidden_layer_sizes=(16, 16))

friends = [('a', 'b'), ('a', 'e'), ('a', 'f'), ('a', 'g'), ('b', 'c'),
pxy = [lxy, rxy, bxy, axy, cxy, ixy]
npxy = [nlxy, nrxy, nbxy, naxy, ncxy, nixy]

for xy in pxy:
    print(xy.name, xy.shape)

# variables for single rectangles

x = ltn.variable("x", 4)
y = ltn.variable("y", 4)
z = ltn.variable("z", 4)

# a rectangle and a set of rectangle used to show the results

ct = ltn.constant("ct", [.5, .5, .3, .3])
t = ltn.variable("t", tf.cast(bbst, tf.float32))

# relational predicates

L = ltn.predicate("left", 8)
R = ltn.predicate("right", 8)
B = ltn.predicate("below", 8)
A = ltn.predicate("above", 8)
C = ltn.predicate("contains", 8)
I = ltn.predicate("in", 8)

P = [L, R, B, A, C, I]

inv_P = [R, L, A, B, I, C]
Exemplo n.º 5
0
        self.dropout = tf.keras.layers.Dropout(0.2)

    def call(self, inputs, training=False):
        x = inputs
        for dense in self.denses:
            x = dense(x)
            x = self.dropout(x, training=training)
        return self.dense_class(x)


logits_model = MLP(4)
p = ltn.Predicate(
    ltn.utils.LogitsToPredicateModel(logits_model, single_label=True))

# Constants to index/iterate on the classes
class_A = ltn.constant(0)
class_B = ltn.constant(1)
class_C = ltn.constant(2)

# Operators and axioms
Not = ltn.Wrapper_Connective(ltn.fuzzy_ops.Not_Std())
And = ltn.Wrapper_Connective(ltn.fuzzy_ops.And_Prod())
Or = ltn.Wrapper_Connective(ltn.fuzzy_ops.Or_ProbSum())
Implies = ltn.Wrapper_Connective(ltn.fuzzy_ops.Implies_Reichenbach())
Forall = ltn.Wrapper_Quantifier(ltn.fuzzy_ops.Aggreg_pMeanError(p=2),
                                semantics="forall")

formula_aggregator = ltn.fuzzy_ops.Aggreg_pMeanError(p=2)


@tf.function
nloc = ltn.variable("not_location_data", emb_size)

# printing out the dimensions of examples
pxy = [act, mov, loc]
npxy = [nact, nmov, nloc]

for xy in pxy:
    print(xy.name, xy.shape)

# variables for single embeddings

w = ltn.variable("w", emb_size)

# # some more constants and tensors to show results after training

ct = ltn.constant("ct", actor_data[0].astype('float32'))
t = ltn.variable("t", tf.cast(actor_data[1:], tf.float32))

# relational predicates

A = ltn.Predicate("actor", emb_size * 2).ground
M = ltn.Predicate("movement", emb_size * 2).ground
L = ltn.Predicate("location", emb_size * 2).ground

P = [A, M, L]

n_pred = len(P)
# inv_P = [R,L,A,B,I,C]

# constraints/axioms
Exemplo n.º 7
0
import matplotlib.pyplot as plt

# loading data

data = np.random.uniform(
    [-1, -1],
    [1, 1],
    (500, 2),
).astype(np.float32)

# defining the language

x = ltn.variable("x", data)
y = ltn.variable("y", data)

a = ltn.constant("a", [0.5, 0.5])
b = ltn.constant("x", [-0.5, -0.5])

A = ltn.predicate("A", 2)
B = ltn.predicate("B", 2)
T = And(A(a), B(b), Not(A(b)), Forall(x, Implies(A(x), B(x))))

# start a tensorflow session

sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
opt = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(-T)

# optimize the satisfiability of T
slope = 1.
var = 0.001
epochs = 1000

train_X = np.random.uniform(start, end, (training_size)).astype("float32")
train_Y = slope * train_X + np.random.normal(scale=var, size=len(train_X))

W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")


def apply_fun(X):
    return tf.add(tf.multiply(X, W), b)


c_x = [ltn.constant("x_%s" % i, [x]) for i, x in enumerate(train_X)]
c_y = [ltn.constant("y_%s" % i, [y]) for i, y in enumerate(train_Y)]

f = ltn.function("f", 1, 1, fun_definition=apply_fun)
eq = ltn.predicate("equal", 2, lambda x, y: ltnl.equal_euclidian(x, y))

facts = [eq(f(x), y) for x, y in zip(c_x, c_y)]
cost = -tf.reduce_mean(tf.stack(facts))

sess = tf.Session()
opt = tf.train.GradientDescentOptimizer(
    learning_rate=learning_rate).minimize(cost)

init = tf.global_variables_initializer()
sess.run(init)
for i in range(epochs):
Exemplo n.º 9
0
def plt_heatmap(df):
    plt.pcolor(df)
    plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
    plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
    plt.colorbar()



pd.set_option('precision',2)

ltn.LAYERS = 4
ltn.BIAS_factor = 1e-7
ltn.set_universal_aggreg("mean")

size = 20
g1 = {l:ltn.constant(l,min_value=[0.]*size,max_value=[1.]*size) for l in 'abcdefgh'}
g2 = {l:ltn.constant(l,min_value=[0.]*size,max_value=[1.]*size) for l in 'ijklmn'}
g = {**g1,**g2}


friends = [('a','b'),('a','e'),('a','f'),('a','g'),('b','c'),('c','d'),('e','f'),('g','h'),
           ('i','j'),('j','m'),('k','l'),('m','n')]
smokes = ['a','e','f','g','j','n']
cancer = ['a','e']


p = ltn.variable("p",tf.concat(list(g.values()),axis=0))
q = ltn.variable("q",tf.concat(list(g.values()),axis=0))

p1 = ltn.variable("p1",tf.concat(list(g1.values()),axis=0))
q1 = ltn.variable("q1",tf.concat(list(g1.values()),axis=0))