Ejemplo n.º 1
0
:~ nn_edge(X). [1, X]
'''


########
# Construct nnMapping, set optimizers, and initialize DeepLPMLN object
########

m = FC(40, 50, 50, 50, 50, 50, 24)
nnMapping = {"m":m}
optimizers = {'m':torch.optim.Adam(m.parameters(), lr=0.001)}

# 1234
# dlpmlnObj = DeepLPMLN(nnRule+aspRule+remove_con+path_con+reach_con+opt_con, nnMapping, optimizers)
# 234
dlpmlnObj = DeepLPMLN(nnRule+aspRule+path_con+reach_con+opt_con, nnMapping, optimizers)
# 23
# dlpmlnObj = DeepLPMLN(nnRule+aspRule+path_con+reach_con, nnMapping, optimizers)
# 2
# dlpmlnObj = DeepLPMLN(nnRule+aspRule+path_con, nnMapping, optimizers)


########
# Start training and testing on a list of different MVPP programs
########
mvppList = [remove_con, path_con, reach_con, remove_con+path_con, remove_con+reach_con, path_con+reach_con, remove_con+path_con+reach_con, remove_con+path_con+reach_con+opt_con]
mvppList = [aspRule+i for i in mvppList]

print('-------------------')
for idx, constraint in enumerate(mvppList):
    print('Constraint {} is\n{}\n-------------------'.format(idx+1, constraint))
Ejemplo n.º 2
0
mistake :- neural_coin(i, 0, HT1), random_coin(i, HT2), HT1!=HT2.
mistake :- neural_color(C, 0, C1), random_color(C, C2), C1!=C2.
'''

########
# Define nnMapping and optimizers, initialze DeepLPMLN object
########
model1 = Net()
model2 = FC(3, 3)
optimizer1 = torch.optim.Adam(model1.parameters(), lr=0.001)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=1.0)

nnMapping = {'m1': model1, 'm2': model2}
optimizers = {'m1': optimizer1, 'm2': optimizer2}

dlpmlnObj = DeepLPMLN(dprogram2, nnMapping, optimizers)

########
# Define dataList, obsList, and the dataset to test m2
########

dataList, obsList, m1dataset, m2dataset = fileToLists(
    './data/coinUrn_train.txt', trainLoader)

# print(trainLoader[0][0].size())
# print(trainLoader[0][1])
# sys.exit()

########
# Start training and testing
########
Ejemplo n.º 3
0
         transforms.Normalize((0.1307, ), (0.3081, ))])),
                                          batch_size=1000,
                                          shuffle=True,
                                          **kwargs)

m = Net().to(device)
functions = {'m': m}
optimizers = {'m': torch.optim.Adam(m.parameters(), lr=0.001)}

dataList = []
obsList = []
for batch in train_loader:
    dataList.append({
        "i1": batch[0][0].view(1, 1, 28, 28),
        "i2": batch[0][1].view(1, 1, 28, 28)
    })
    obsList.append(":- not addition(i1, i2, {}).".format(batch[1][0] +
                                                         batch[1][1]))

# dataList = [{"i1":batch[0][0].view(1, 1, 28, 28), "i2":batch[0][1].view(1, 1, 28, 28)} for batch in train_loader]
# obsList = [":- not addition(i1, i2, {}).".format( batch[1][0]+batch[1][1]) for batch in train_loader]

dlpmlnObj = DeepLPMLN(dprogram, functions, optimizers)

for i in range(1):
    time1 = time.time()
    dlpmlnObj.learn(dataList=dataList, obsList=obsList, epoch=1)
    time2 = time.time()
    dlpmlnObj.testNN("m", test_loader)
    print("--- train time: %s seconds ---" % (time2 - time1))
    print("--- test time: %s seconds ---" % (time.time() - time2))
Ejemplo n.º 4
0
for dataIdx, data in enumerate(train_loader):
    # print("This is data 1:",data[0][0])
    # print("This is data 2:", data[0][1])
    # print(data[0].shape)
    # sys.exit()
    dataList.append({
        "m": {
            "i1": data[0][0].view(1, 1, 28, 28),
            "i2": data[0][1].view(1, 1, 28, 28)
        }
    })
    obsList.append(":- not addition(i1, i2, {}).".format(data[1][0] +
                                                         data[1][1]))
    if dataIdx % 1000 == 0:
        obstxt += "addition(i1, i2, {}, {}).\n".format(data[1][0], data[1][1])
        obstxt += "#evidence\n"
        # if dataIdx == 10:
        # 	break

with open("evidence.txt", "w") as f:
    f.write(obstxt)

dlpmlnObj = DeepLPMLN(dprogram, nnDic, optimizer)

for i in range(2):
    print(i)
    dlpmlnObj.learn(dataList, obsList, 1)

    dlpmlnObj.test_nn("m", test_loader)

# print(dlpmlnObj.mvpp)
Ejemplo n.º 5
0
dprogram_test = '''
% define k 
#const k = 7.

topk(k).
% we make a mistake if the total weight of the chosen items exceeds maxweight 
mistake :- #sum{1, I : in(k,I,t)} > k.
'''

m = FC(10, *[50, 50, 50, 50, 50], 10)

nnMapping = {'m': m}

optimizer = {'m': torch.optim.Adam(m.parameters(), lr=0.001)}

dlpmlnObj = DeepLPMLN(dprogram, nnMapping, optimizer)

dataset = KsData("data/data.txt", 10)
# print(dataset.train_labels.shape)
# print(dataset.train_labels[0])
# sys.exit()
dataList = []
obsList = []

for i, d in enumerate(dataset.train_data):
    d_tensor = Variable(torch.from_numpy(d).float(), requires_grad=False)
    dataList.append({"k": d_tensor})

with open("data/evidence_train.txt", 'r') as f:
    obsList = f.read().strip().strip("#evidence").split("#evidence")
Ejemplo n.º 6
0
add_test_dataloader = DataLoader(add_test_dataset, batch_size=4, shuffle=True)

carry_test_dataset = carry_test(test_size)

carry_test_dataloader = DataLoader(carry_test_dataset,
                                   batch_size=4,
                                   shuffle=True)

for i in range(train_size):

    obs, str_list = create_data_sample()

    dataList.append(format_dataList(obs, str_list))
    obsList.append(format_observations(obs, str_list))

dlpmlnObj = DeepLPMLN(dprogram, functions, optimizers, dynamicMVPP=True)
# dlpmlnObj.device='cpu'

# print('k', dlpmlnObj.k)
# print('nnOutputs', dlpmlnObj.nnOutputs)
# print('functions', dlpmlnObj.functions)
# sys.exit()

print('training...')

# print(dataList[0])
# sys.exit()

for i in range(20):
    time1 = time.time()
    dlpmlnObj.learn(dataList=dataList, obsList=obsList, epoch=1)
Ejemplo n.º 7
0
carry(P+1,X) :- num1(P, A), num2(P, B), carry(P, Carry), carry(A,B,Carry,0,X).
'''

########
# Define nnMapping and optimizers, initialze DeepLPMLN object
########

m1 = FC(30, 25, 10)  # network for adding the numbers
m2 = FC(30, 5, 2)  # network for finding the carry out

nnMapping = {'m1': m1, 'm2': m2}
optimizers = {
    'm1': torch.optim.Adam(m1.parameters(), lr=0.01),
    'm2': torch.optim.Adam(m2.parameters(), lr=0.01)
}
dlpmlnObj = DeepLPMLN(dprogram, nnMapping, optimizers)

########
# Start training and testing
########

startTime = time.time()
for i in range(5):
    print('Epoch {}...'.format(i + 1))
    time1 = time.time()
    dlpmlnObj.learn(dataList=dataList, obsList=obsList, epoch=1)
    time2 = time.time()
    dlpmlnObj.testNN("m1", add_test_dataloader)  #test m1 network
    dlpmlnObj.testNN("m2", carry_test_dataloader)  #test m2 network
    print("--- train time: %s seconds ---" % (time2 - time1))
    print("--- test time: %s seconds ---" % (time.time() - time2))