def __init__(self, executionTime, startPos, startVel, goalPos, cs, numWeights, overlap, use_scaling): self.T = executionTime self.cs = cs self.alpha = 25.0 self.beta = 6.25 self.g = goalPos self.y = startPos self.startPos = startPos self.z = self.T * startVel self.startZ = self.z self.rbf = Rbf(cs, executionTime, numWeights, overlap) self.amplitude = 0 self.use_scaling = use_scaling
def main(): #read dataset and preprocess it dataset = PreProcessing("seeds_dataset.txt", separator='\s+') dataset.normalize() dataset.normalize_class() #divide dataset into training and test sets train, test = training.holdout(0.7, dataset.normalized_dataframe) nn = Rbf(7, 3) nn.train(train, eta=0.5, max_iterations=500) print("RBF:", training.accuracy(nn, test, 3)) mm = Mlp(7, 3, 3) mm.backpropagation(train.values.tolist(), max_iterations=500) print("MLP:", training.accuracy(mm, test, 3))
def __init__(self, executionTime, startPos, startVel, startAcc, goalPos, goalVel, cs, numWeights, overlap, use_vel_scaling): self.T = executionTime self.cs = cs self.alpha = 25.0 self.beta = 6.25 self.g = goalPos self.gd = goalVel self.gdd = 0.0 #has to be 0 self.y = startPos self.y0 = startPos self.yd0 = startVel self.ydd0 = startAcc self.ydd = startAcc self.v = self.T * self.yd0 self.startV = self.v self.fop = Fop(0.0, startPos, startVel, startAcc, executionTime, goalPos, goalVel, self.gdd) self.rbf = Rbf(cs, executionTime, numWeights, overlap) self.amplitude = goalPos - startPos self.amplitude2 = goalVel - startVel self.use_vel_scaling = use_vel_scaling
def seed_test(): # Carregando e Normalizando os dados da base de vinhos dataset = PreProcessing("seeds_dataset.txt", separator='\s+') dataset.normalize() dataset.normalize_class() # Atributos a serem variados nos testes n_layers = [1, 2] hidden_layer = [3, [6, 6]] momentums = [0.3, 0.5] max_iterations = [100, 250, 500] etas = [0.3, 0.5] ps = [0.7, 0.9] rbf_accuracy = 0 mlp_accuracy = 0 tests = 0 # Teste for layer in n_layers: for momentum in momentums: for eta in etas: for max_iteration in max_iterations: for p in ps: tests += 1 print("Test number", tests) train, test = training.holdout( p, dataset.normalized_dataframe) print("INPUT NEURONS = 7 HIDDEN NEURONS = " + str(int(6 / layer)) + " OUTPUT NEURONS = 3 HIDDEN LAYER = " + str(layer) + " ETA = " + str(eta) + " MAX ITERATIONS = " + str(max_iteration) + " MOMENTUM = " + str(momentum) + " P = " + str(p)) print() print("RBF") nn = Rbf(7, 3) nn.train(train, eta=0.5, max_iterations=max_iteration) ac = training.accuracy(nn, test, 3) rbf_accuracy += ac print("ACCURACY =", ac) print() print("MLP") example = test.values.tolist() mm = Mlp(7, hidden_layer[layer - 1], 3, n_hidden_layers=layer) mm.backpropagation(train.values.tolist(), eta=eta, max_iterations=max_iteration) ac = training.accuracy(mm, test, n_classes=3) mlp_accuracy += ac print("ACCURACY =", ac) print() print("Rbf:") nn.feed_forward(example[15][:(-1 * 3)]) print(example[15]) print("Result 1") nn.show_class() print() print("Mlp") print(example[15]) nn.feed_forward(example[15][:(-1 * 3)]) print("Result 2") mm.show_class() print() print( "******************************************************//******************************************************" ) print() print(tests, " tests executed. Rbf accuracy:", rbf_accuracy / tests, " Mlp accuracy:", mlp_accuracy / tests)