Example #1
0
    def test_save_load(self):
        model_string = "nn(fc,[X,Y],Z,[0,1]) :: a(X,Y,Z).\nt(0.5)::b."
        fc = FC(10, 2)
        net = Network(fc, 'fc',
                      lambda a, b, c: Variable(torch.FloatTensor([0.2, 0.8])))
        model = Model(model_string, [net])
        model.save_state('test_save.mdl')
        orig_params = dict()
        for p in model.parameters:
            orig_params[p] = model.parameters[p]
            model.parameters[p] = 0
        original_net_params = list()

        for i, param in enumerate(fc.parameters()):
            original_net_params.append(param.data.clone())
            param.data.zero_()
        model.load_state('test_save.mdl')
        for p in model.parameters:
            self.assertEqual(model.parameters[p], orig_params[p])

        for i, param in enumerate(fc.parameters()):
            self.assertTrue(torch.equal(param.data, original_net_params[i]))
Example #2
0
train_queries = load('data/train{}_test{}_train.txt'.format(train, test))
test_queries = load('data/train{}_test{}_test.txt'.format(train, test))


def neural_pred(network, i1, i2):
    d = torch.zeros(20)
    d[int(i1)] = 1.0
    d[int(i2) + 10] = 1.0
    d = torch.autograd.Variable(d.unsqueeze(0))
    output = network.net(d)
    return output.squeeze(0)


fc1 = FC(20, 2)
adam = torch.optim.Adam(fc1.parameters(), lr=1.0)
swap_net = Network(fc1, 'swap_net', neural_pred, optimizer=adam)

#with open('compare.pl') as f:
with open('quicksort.pl') as f:
    problog_string = f.read()

model = Model(problog_string, [swap_net])
optimizer = Optimizer(model, 32)

train_model(model,
            train_queries,
            20,
            optimizer,
            test_iter=len(train_queries),
            test=lambda x: Model.accuracy(x, test_queries, test=True))
Example #3
0
        return net.last[1]
    tokenized, numbers, indices = tokenize(str(sentence).strip('"'))
    data = torch.zeros(len(tokenized), 1, len(vocab))
    for i, t in enumerate(tokenized):
        data[i, 0, t] = 1.0
    outputs = net.net(Variable(data), *indices)
    net.last = (str(sentence), outputs)
    return outputs


def np2(net, id):
    representation = np1(networks[0], id)
    outputs = net.net(representation)
    return outputs.squeeze(0)


networks = [
    Network(rnn, 'nn_rnn', np1),
    Network(network1, 'nn_permute', np2),
    Network(network2, 'nn_op1', np2),
    Network(network3, 'nn_swap', np2),
    Network(network4, 'nn_op2', np2)
]
networks[0].last = ('', None)

networks[0].optimizer = optim.Adam(rnn.parameters(), lr=0.02)
networks[1].optimizer = optim.Adam(network1.parameters(), lr=0.02)
networks[2].optimizer = optim.Adam(network2.parameters(), lr=0.02)
networks[3].optimizer = optim.Adam(network3.parameters(), lr=0.02)
networks[4].optimizer = optim.Adam(network4.parameters(), lr=0.02)
Example #4
0
    problog_string = f.read()


def neural_pred(network, i1, i2, carry):
    d = torch.zeros(30)
    d[int(i1)] = 1.0
    d[int(i2) + 10] = 1.0
    d[int(carry) + 20] = 1.0
    d = torch.autograd.Variable(d.unsqueeze(0))
    outputs = network.net(d)
    return outputs.squeeze(0)


net1 = FC(30, 25, 10)
network1 = Network(net1, 'neural1', neural_pred)
network1.optimizer = torch.optim.Adam(net1.parameters(), lr=0.05)

net2 = FC(30, 5, 2)
network2 = Network(net2, 'neural2', neural_pred)
network2.optimizer = torch.optim.Adam(net2.parameters(), lr=0.05)

model = Model(problog_string, [network1, network2], caching=False)
optimizer = Optimizer(model, 32)
logger = train_model(model,
                     train_queries,
                     40,
                     optimizer,
                     test_iter=len(train_queries) * 4,
                     test=lambda x: x.accuracy(test_queries, test=True))
print("hello!")