def entrenar(): cur = conn.cursor() cur.execute("SELECT * FROM songs") rv = cur.fetchall() inputsTrain = np.empty([len(rv), 36]) outputsTrain = np.empty([len(rv), 4]) def eval_fitness(genomes): for g in genomes: net = nn.create_feed_forward_phenotype(g) sum_square_error = 0.0 for inputs, expected in zip(inputsTrain, outputsTrain): output = net.serial_activate(inputs) sum_square_error += np.mean((output - expected) ** 2) g.fitness = -sum_square_error for i in range(0, len(rv)): dict = common.loadDict(os.path.join(common.load("data_dir"), rv[i]["data"])) arr = common.featureDictToArray(dict) inputsTrain[i, :] = arr temp = np.zeros(4) temp[int(rv[i]["genre"]) - 1] = 1 outputsTrain[i, :] = temp local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, "neuron.config") pop = population.Population(config_path) reporte = NEATReporter() pop.add_reporter(reporte) pop.run(eval_fitness, 500) stats = pop.statistics winner = pop.statistics.best_genome() pickle.dump(winner, open(os.path.join(common.load("data_dir"), "redNeuronal.p"), "w")) visualize.plot_stats(stats, filename="app/static/results/evolucion.svg") visualize.plot_species(stats) visualize.draw_net(winner, filename="app/static/results/redNeuronal.svg") return winner
from neat import nn import pickle from app.audio.audioClass import Audio from app.audio import feature from app import common a = Audio('/home/yuli/demo.wav', nro_texture_windows=2584, hopsize=256) dict = feature.getFeatureVector(a, 512, 256, 86) arr = common.featureDictToArray(dict) winner = pickle.load(open('/home/yuli/IA2/clasificador/app/data/redNeuronal.p', 'r')) winner_net = nn.create_feed_forward_phenotype(winner) output = winner_net.serial_activate(arr) print output