import numpy as np import causet as cs #testing that the causalmatrix is correctly calculated by causDiam #this is not the most efficient way to calculate the entries of c and tests if #the vectorization in its implementation in causDiam is done correctly d = 4 c, t, x = cs.causDiam(1000, d) ds2 = 0. finalTruth = True for i in range(1000): for j in range(1000): ds2 = -(t[0, j] - t[0, i])**2 ds2 = ds2 + (x[0, j] - x[0, i])**2 if (d == 3) or (d == 4): ds2 = ds2 + (x[1, j] - x[1, i])**2 if d == 4: ds2 = ds2 + (x[2, j] - x[2, i])**2 tempBool = (ds2 < 0) and (t[0, i] < t[0, j]) finalTruth = (finalTruth and (tempBool == c[i, j])) #finalTruth should be True at the end of these loops
# generating the data to be used in the training of the autoencoder # for now we are generating causets in flat space in d dimensions # later we may incorporate other features such as curvature tot_train = 2000 #total number of causal sets (in training set) generated per dimension tot_test = 500 #total number of causal sets (in test set) generated per dimension n = 100 #total sprinkled points in causet dims = np.array([2, 3, 4]) xtrain = np.zeros((tot_train * dims.size, n**2), dtype='int8') ytrain = np.zeros((tot_train * dims.size, ), dtype='int8') xtest = np.zeros((tot_test * dims.size, n**2), dtype='int8') ytest = np.zeros((tot_test * dims.size, ), dtype='int8') for d in range(dims.size): for i in range(tot_train): c, t, x = cs.causDiam(n, dims[d]) xtrain[d * tot_train + i] = c.reshape(1, n**2) ytrain[d * tot_train + i] = dims[d] for i in range(tot_test): c, t, x = cs.causDiam(n, dims[d]) xtest[d * tot_test + i] = c.reshape(1, n**2) ytest[d * tot_test + i] = dims[d] np.save('xtrain.npy', xtrain) np.save('ytrain.npy', ytrain) np.save('xtest.npy', xtest) np.save('ytest.npy', ytest)