def train(self): self.net = pyrb.train_ols(self.data, self.target, 10e-8, 0.8, verbose=True) S = self.net.sim(self.data) errors = 0 count = 0 for i in range(len(self.target)): count += 1 if int(round(S[i][0])) != self.target[i]: errors += 1
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure) plt.show() def plotCityMap(crimes): return None if __name__ == "__main__": start = time.time() [data, target] = importDataFromFile("train.csv", [ 0, 7, 8 ], 1) [data, target] = [np.array(data).reshape(len(data), 3), np.array(target).reshape(len(target), 1)] net = pyrb.train_ols(data, target, 10e-8, 0.8, verbose=True) S = net.sim(data) errors = 0 count = 0 for i in range(len(target)): count += 1 if int(round(S[i][0])) != target[i]: errors += 1 end = time.time() print('Learning error: ' + str(round((errors / count) * 100)) + '%') print('Time: ' + str(end - start) + ' seconds')
#Evaluates function over every point of the grid V = heart(P[0:1], P[1:]) #Plot plt.plot(P[0:1][V<0.2], P[1:][V<0.2], '*r', P[0:1][V>=0.2], P[1:][V>=0.2], 'o') plt.show() #defines an exact RBFN enet = pyrb.train_exact(P.T, V.T, 0.3) #simulate S = enet.sim(P.T).T plt.plot(P[0:1][S<0.2], P[1:][S<0.2], '*r', P[0:1][S>=0.2], P[1:][S>=0.2], 'o') plt.show() # small differences are due to ill conditioning #What if we compute points outside training set O=np.random.uniform(size=(2,5000), low=-2., high=2.) S = enet.sim(O.T).T plt.plot(O[0:1][S<0.2], O[1:][S<0.2], '*r', O[0:1][S>=0.2], O[1:][S>=0.2], 'o') plt.show() #To achieve better generalization we can train an inexact RBFN inet = pyrb.train_ols(P.T, V.T, 0.0007, 0.3, verbose=True) #simulate S = inet.sim(P.T).T plt.plot(P[0:1][S<0.2], P[1:][S<0.2], '*r', P[0:1][S>=0.2], P[1:][S>=0.2], 'o') plt.show() #Outside training set... S = inet.sim(O.T).T plt.plot(O[0:1][S<0.2], O[1:][S<0.2], '*r', O[0:1][S>=0.2], O[1:][S>=0.2], 'o') plt.show()
if len(sys.argv) >= 3: arg1 = float(sys.argv[1]) arg2 = float(sys.argv[2]) else: arg1 = 0.1 arg2 = 1 TRAIN_SIZE = SIZE - ACCELERATION_FACTOR EVAL_SIZE = SIZE - TRAIN_SIZE train_label = np.reshape(train_label, [TRAIN_SIZE, 1]) eval_label = np.reshape(eval_label, [EVAL_SIZE, 1]) print ("training ") rbf = pyrb.train_ols(train_data, train_label, mse=arg1, gw=arg2, verbose=True) # rbf = pyrb.train_exact(train_data, train_label, 0.4) print ("train done") train_result = rbf.sim(train_data) train_result = np.reshape(train_result, [-1, 1]) x = np.sum(train_result == train_label) train_acc = np.sum(train_result == train_label) / float(TRAIN_SIZE) eval_result = rbf.sim(eval_data) eval_result = np.reshape(eval_result, [-1, 1]) eval_acc = np.sum(eval_result == eval_label) / float(EVAL_SIZE) out_train = train_result - train_label
for x in range(TRAIN_SIZE): fake_train_label[x, train_label[x]] = 1 for x in range(EVAL_SIZE): fake_eval_label[x, eval_label[x]] = 1 """ # train_label = fake_train_label # eval_label = fake_eval_label TRAIN_SIZE = SIZE - ACCELERATION_FACTOR EVAL_SIZE = SIZE - TRAIN_SIZE train_label = np.reshape(train_label, [TRAIN_SIZE, 1]) eval_label = np.reshape(eval_label, [EVAL_SIZE, 1]) rbf = pyrb.train_ols(train_data, train_label, mse=MSE, gw=GW, verbose=True) # rbf = pyrb.train_exact(train_data, train_label, GW) print ("train done") train_result = rbf.sim(train_data).astype(np.int) train_result = np.reshape(train_result, [-1, 1]) x = np.sum(train_result == train_label) train_acc = np.sum(train_result == train_label) / float(TRAIN_SIZE) eval_result = rbf.sim(eval_data).astype(np.int) eval_result = np.reshape(eval_result, [-1, 1]) eval_acc = np.sum(eval_result == eval_label) / float(EVAL_SIZE)