コード例 #1
0
ファイル: ILPformatSiena.py プロジェクト: rootcanal/August
        print(problem)

        story = utils.read_parse(k)
        sets = makesets.makesets(story['sentences'])
        EF.main(sets, k, a[k], sys.argv[1])
        sets = [
            x for x in sets if makesets.floatcheck(x[1].num) or x[1].num == 'x'
        ]
        print(sets)
        for z in sets:
            z[1].details()


if __name__ == "__main__":
    #q, a = sys.argv[1:3]
    inp = sys.argv[1]
    q, a, e = utils.parse_inp(inp)
    VERBOSE = False
    TRAIN = False
    '''
    if len(sys.argv)>3:
        if sys.argv[3]=='v':
            VERBOSE=True
        elif sys.argv[3]=='t':
            TRAIN = True
            OUT = sys.argv[4]
    '''
    # q = q[-10:]
    # a = a[-10:]
    make_eq(q, a, VERBOSE, TRAIN)
コード例 #2
0
# This code splits a dataset of the form:
#       Question
#       Equation
#       Answer
# into 5 randomly split folds in the data directory
import sys
import random

import utils


if __name__ == '__main__':
    q, aas, ees = utils.parse_inp(sys.argv[1])
    idx = list(range(len(q)))
    random.shuffle(idx)
    fold = len(q) // 5
    for i in range(4):
        fn = "data/indexes-1-fold-" + str(i) + ".txt"
        thisfold = idx[i * fold: (i + 1) * fold]
        with open(fn, 'w') as f:
            for x in thisfold:
                f.write(str(x + 1) + "\n")
    lastfold = idx[(i + 1) * fold:]
    fn = "data/indexes-1-fold-" + str(i + 1) + ".txt"
    with open(fn, 'w') as f:
        for x in lastfold:
            f.write(str(x + 1) + "\n")
コード例 #3
0
ファイル: train_local.py プロジェクト: rootcanal/August
                        compound = [substr]+compound[3:]
                    if True:
                        p, op, e = subeq
                        p = objs[p]
                        e = objs[e]
                        op = op.strip()
                        trips.append((op, p, e))
                        pute = (0, makesets.combine(p[1], e[1], op))
                        objs[substr] = pute
                    if pute == -1:
                        exit()
            t = training(trips, problem, story, target)
            for op in t:
                bigtexamples[op][0].extend(t[op][0])
                bigtexamples[op][1].extend(t[op][1])
    with open('data/' + sys.argv[1][-1] + ".local.training", 'wb') as f:
        pickle.dump(bigtexamples, f)


eqsdir = "ILP.out"


if __name__ == "__main__":
    #q, a = sys.argv[1:3]
    inp = sys.argv[1]
    #eqsdir = sys.argv[2]
    makesets.FOLD = sys.argv[1][-1]
    q, a, e = utils.parse_inp(inp)

    make_eq(q, a, e)
コード例 #4
0
ファイル: makesets.py プロジェクト: rootcanal/August
    return sets


def bug():
    print("bug")
    ip = 0
    while ip == 0:
        inp = input()
        if inp == 0:
            ip = 1
        else:
            exec(inp)


if __name__ == "__main__":
    q, a, e = utils.parse_inp(sys.argv[1])
    wps = q
    while True:
        for i in range(len(q)):
            print(i, q[i])
        k = input()
        k = int(k)
        problem = wps[k].lower()
        print(problem)
        story = utils.parse_stanford_nlp(problem)
        sets = makesets(story["sentences"])
        for s in sets:
            s[1].details()
        input()
コード例 #5
0
    return sets


def bug():
    print("bug")
    ip = 0
    while ip == 0:
        inp = input()
        if inp == 0:
            ip = 1
        else:
            exec(inp)


if __name__ == "__main__":
    q, a, e = utils.parse_inp(sys.argv[1])
    wps = q
    while True:
        for i in range(len(q)):
            print(i, q[i])
        k = input()
        k = int(k)
        problem = wps[k].lower()
        print(problem)
        story = utils.parse_stanford_nlp(problem)
        sets = makesets(story["sentences"])
        for s in sets:
            s[1].details()
        input()