Пример #1
0
from keras.models import Model
from keras import layers, regularizers
from keras.layers import Input
import logging
import ltns.logictensornetworks as ltn

logger = logging.getLogger()
logger.basicConfig = logging.basicConfig(level=logging.DEBUG)
import ltns.logictensornetworks_wrapper as ltnw
import tensorflow as tf

ltn.LAYERS = 10
embedding_size = 10

ltn.BIAS_factor = -1e-5
ltn.set_universal_aggreg("hmean")

closed_pa = pd.read_csv(os.getcwd() + "/../gold_standard/closed_pa",
                        names=["first", "second", "type"])
closed_an = pd.read_csv(os.getcwd() + "/../gold_standard/closed_an",
                        names=["first", "second", "type"])

#training_pa = closed_pa.sample(100)
#training_an = closed_an.sample(100)

#test_pa = (pd.concat([training_pa,closed_pa]).drop_duplicates(keep=False))
#test_an = (pd.concat([training_an,closed_an]).drop_duplicates(keep=False))

training_pa = pd.read_csv("training_testing/training_pa")
training_an = pd.read_csv("training_testing/training_an")
test_pa = pd.read_csv("training_testing/test_pa")
Пример #2
0
                        help="", type=str)

    args = parser.parse_args()
    biases = [-1e-8, -1e-5, -1e-1]

    lr = args.lr
    dc = args.dc
    ly = args.ly
    embedding_size = args.em

    bi = biases[args.bi]
    iter_epoch = args.iter

    ltn.LAYERS = ly
    ltn.BIAS_factor = bi
    ltn.set_universal_aggreg(args.univ)

    folder_name = "ancestors/reasoning_results/"

    entities = ["sue", "diana", "john", "edna", "paul", "francis", "john2",
                "john3", "john4", "joe", "jennifer", "juliet", "janice",
                "joey", "tom", "bonnie", "katie"]

    parents = [
        ("sue", "diana"),
        ("john", "diana"),
        ("sue", "bonnie"),
        ("john", "bonnie"),
        ("sue", "tom"),
        ("john", "tom"),
        ("diana", "katie"),
Пример #3
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-const', '--const', help="", type=int)
    parser.add_argument('-pred', '--pred', help="", type=int)
    parser.add_argument('-card', '--card', help="", type=int)

    args = parser.parse_args()

    pred = args.pred
    const = args.const
    card = args.card

    ltn.LAYERS = 4
    ltn.BIAS_factor = -1e-8
    ltn.set_universal_aggreg("mean")  # The truth value of forall x p(x) is
    # interpretable as the percentage of
    # element in the range of x that satisties p

    embedding_size = 4  # embedding space dimensionality

    pred_name = "pred"
    const_name = "const"

    predicates = list(map(lambda x: pred_name + str(x), range(1, 100)))
    constants = list(map(lambda x: const_name + str(x), range(1, 100)))

    for l in constants[:const]:
        print(l)
        ltnw.constant(l,
                      min_value=[0.] * embedding_size,