Ejemplo n.º 1
0
    def eval_fitness(genomes, config):

        for idx, g in genomes:
            cppn = neat.nn.FeedForwardNetwork.create(g, config)
            network = ESNetwork(substrate, cppn, params)
            net = network.create_phenotype_network()

            fitnesses = []

            for i in xrange(trials):
                ob = env.reset()
                net.reset()

                total_reward = 0

                for j in xrange(max_steps):
                    for k in range(network.activations):
                        o = net.activate(ob)
                    action = np.argmax(o)
                    ob, reward, done, info = env.step(action)
                    total_reward += reward
                    if done:
                        break
                fitnesses.append(total_reward)

            g.fitness = np.array(fitnesses).mean()
Ejemplo n.º 2
0
def run(config_file):
    # Load configuration.
    config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
                         neat.DefaultSpeciesSet, neat.DefaultStagnation,
                         config_file)

    # Create the population, which is the top-level object for a NEAT run.
    # p = neat.Population(config)
    p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-4')

    # Add a stdout reporter to show progress in the terminal.
    p.add_reporter(neat.StdOutReporter(True))
    stats = neat.StatisticsReporter()
    p.add_reporter(stats)
    p.add_reporter(neat.Checkpointer(1000))
    #p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-299')

    # Run for up to 300 generations.
    pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), eval_genome)
    winner = p.run(pe.evaluate, 300)

    # Display the winning genome.
    print('\nBest genome:\n{!s}'.format(winner))

    # Show output of the most fit genome against training data.
    print('\nOutput:')
    cppn = neat.nn.RecurrentNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    winner_net = network.create_phenotype_network(
        filename='es_hyperneat_winner.png')
    input("Winner is found")
    for i in range(10):
        train(winner_net, network, True)

    draw_net(cppn, filename="es_hyperneat")
Ejemplo n.º 3
0
def run(config_file):
    # Load configuration.
    config = neat.config.Config(neat.genome.DefaultGenome,
                                neat.reproduction.DefaultReproduction,
                                neat.species.DefaultSpeciesSet,
                                neat.stagnation.DefaultStagnation, config_file)

    # Create the population, which is the top-level object for a NEAT run.
    #p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-15')

    # Add a stdout reporter to show progress in the terminal.
    pop = neat.population.Population(config)
    stats = neat.statistics.StatisticsReporter()
    pop.add_reporter(stats)
    pop.add_reporter(neat.reporting.StdOutReporter(True))
    pop.add_reporter(neat.Checkpointer(999))
    #p = neat.Checkpointer.restore_checkpoint('neat-checkpoint-299')

    # Run for up to 300 generations.
    winner = pop.run(eval_multipleGenomes, 300)

    # Display the winning genome.
    print('\nBest genome:\n{!s}'.format(winner))

    # Show output of the most fit genome against training data.
    print('\nOutput:')
    cppn = neat.nn.FeedForwardNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    winner_net = network.create_phenotype_network(
        filename='es_hyperneat_winner.png')
    input("Winner is found")
    for i in range(10):
        train(winner_net, network, True)

    draw_net(cppn, filename="es_hyperneat")
Ejemplo n.º 4
0
 def eval_fitness(self, genome, config):
     self.epoch_len = randint(42, 42 * 4)
     r_start = randint(0 + self.epoch_len, self.hs.hist_full_size - self.hd)
     #r_start_2 = self.hs.hist_full_size - self.epoch_len-1
     best_g_fit = 0.0
     champ_counter = self.gen_count % 10
     #img_count = 0
     for idx, g in genomes:
         cppn = neat.nn.FeedForwardNetwork.create(g, config)
         network = ESNetwork(self.subStrate, cppn, self.params, self.hd)
         net = network.create_phenotype_network_nd()
         train_ft = self.evaluate(net, network, r_start, g)
         #validate_ft = self.evaluate(net, network, r_start_2, g)
         g.fitness = train_ft
         if (g.fitness > best_g_fit):
             best_g_fit = g.fitness
             with open(
                     "./champ_data/kraken/latest_greatest" +
                     str(champ_counter) + ".pkl", 'wb') as output:
                 pickle.dump(g, output)
         #img_count += 1
     if (champ_counter == 0):
         self.refresh()
         self.compare_champs()
     self.gen_count += 1
     return
Ejemplo n.º 5
0
def eval_genomes(genomes, config):
    best_net = (None, None, -9999)
    runs = 10
    environments = [MultiAgentDeliveryEnv() for i in range(runs)]

    for genome_id, genome in genomes:

        cppn = neat.nn.FeedForwardNetwork.create(genome, config)
        network = ESNetwork(sub, cppn, params)
        net = network.create_phenotype_network()
        episode_reward = 0
        genome.fitness = 0

        sum_square_error = 0.0
        for inputs, expected in zip(xor_inputs, xor_outputs):
            new_input = inputs + (1.0, )
            net.reset()
            for i in range(network.activations):
                output = net.activate(new_input)

            sum_square_error += ((output[0] - expected[0])**2.0) / 4.0
        fitness = episode_reward / runs
        if fitness > best_net[2]:
            best_net = (net, network, fitness)
        # Append episode reward to a list and log stats (every given number of episodes)
        genome.fitness += fitness
    for i in range(4):
        train(best_net[0], best_net[1], True)
Ejemplo n.º 6
0
def eval_genomes(genomes, config):
    # start torcs serever
    # start torcs client
    # wait until finished
    # fitness = results
    for genome_id, genome in genomes:
        genome.fitness = 0
        # cppn = neat.nn.FeedForwardNetwork.create(genome, config)
        # net = create_phenotype_network(cppn, sub, "sigmoid")

        cppn = neat.nn.FeedForwardNetwork.create(genome, config)
        network = ESNetwork(sub, cppn, params)
        net = network.create_phenotype_network()
        # net = neat.nn.FeedForwardNetwork.create(genome, config)

        # make a file from net in torcs folder
        # run torcs
        # read fitness

        predictions = []
        for input, output_real in zip((inputs), (outputs)):
            output_pred = net.activate(input)
            # print(output_pred,output_real)
            predictions.append(output_pred)

        genome.fitness = 0 - sklearn.metrics.mean_squared_error(
            outputs, predictions)
Ejemplo n.º 7
0
    def eval_fitness(self, genomes, config):
        r_start = randint(0, self.hs.hist_full_size - self.epoch_len)    
        for idx, g in genomes:

            cppn = neat.nn.FeedForwardNetwork.create(g, config)
            network = ESNetwork(self.subStrate, cppn, self.params)
            net = network.create_phenotype_network()
            g.fitness = self.evaluate(net, network, r_start)
Ejemplo n.º 8
0
 def trial_run(self):
     r_start = 0
     file = open("es_trade_god_cppn_3d.pkl", 'rb')
     [cppn] = pickle.load(file)
     network = ESNetwork(self.subStrate, cppn, self.params)
     net = network.create_phenotype_network_nd()
     fitness = self.evaluate(net, network, r_start)
     return fitness
Ejemplo n.º 9
0
    def eval_fitness(genomes, config):

        for idx, g in genomes:
            cppn = neat.nn.FeedForwardNetwork.create(g, config)
            network = ESNetwork(substrate, cppn, params)
            net = network.create_phenotype_network()

            fitnesses = []

            for i in range(trials):
                x = game.start_game()
                possb = [
                    game.up_possible(x),
                    game.down_possible(x),
                    game.left_possible(x),
                    game.right_possible(x)
                ]
                scr = 0
                y = [0 for z in range(16)]
                y = game.matrix_to_vector(x)
                death = 1
                death_flag = 0
                net.reset()

                for j in range(max_steps):
                    for k in range(network.activations):
                        o = net.activate(format_input(x))
                    x = action(np.argmax(o), x)
                    if game.matrix_to_vector(x) == y:
                        if death_flag == 0:
                            death = -3
                            death_flag = 1
                        else:
                            death += 1
                    else:
                        death = 1
                        death_flag = 0
                        y = game.matrix_to_vector(x)
                    if death == 0:
                        break
                    scr = game.score(x)
                    possb = [
                        game.up_possible(x),
                        game.down_possible(x),
                        game.left_possible(x),
                        game.right_possible(x)
                    ]
                    if possb == [0, 0, 0, 0]:
                        break

                fitnesses.append(scr)

            g.fitness = np.array(fitnesses).mean()
Ejemplo n.º 10
0
def eval_genome(genome, config):
    cppn = neat.nn.RecurrentNetwork.create(genome, config)
    network = ESNetwork(sub, cppn, params)
    net = network.create_phenotype_network()
    episode_reward = 0
    runs = 10

    for i in range(runs):
        episode_reward += train(net, network, False)

    fitness = episode_reward / runs
    # Append episode reward to a list and log stats (every given number of episodes)
    return fitness
Ejemplo n.º 11
0
 def poloTrader(self):
     self.refresh_balances()
     end_prices = {}
     active = self.get_one_bar_input_2d()
     self.load_net()
     network = ESNetwork(self.subStrate, self.cppn, self.params, self.hd)
     net = network.create_phenotype_network_nd('paper_net.png')
     net.reset()
     sell_syms = []
     buy_syms = []
     buy_signals = []
     sell_signals = []
     self.closeOrders()
     for n in range(1, self.hd):
         net.activate(active[self.hd - n])
     out = net.activate(active[0])
     for x in range(len(out)):
         sym = self.hs.coin_dict[x]
         end_prices[sym] = self.get_price(self.base_sym + "_" + sym)
         if (out[x] > .5):
             buy_signals.append(out[x])
             buy_syms.append(sym)
         if (out[x] < -.5):
             sell_signals.append(out[x])
             sell_syms.append(sym)
     #rng = iter(shuffle(rng))
     sorted_buys = np.argsort(buy_signals)[::-1]
     sorted_sells = np.argsort(sell_signals)
     self.reset_tickers()
     for x in sorted_sells:
         sym = sell_syms[x]
         p = self.get_price(self.base_sym + "_" + sym)
         price = p - (p * .005)
         self.sell_coin(self.base_sym + "_" + sym, price)
     for x in sorted_buys:
         sym = buy_syms[x]
         self.target_percent = .1 + out[x] - .45
         p = self.get_price(self.base_sym + "_" + sym)
         price = p * 1.005
         self.buy_coin(self.base_sym + "_" + sym, price)
     if datetime.now() >= self.end_ts:
         return
     else:
         self.purge_polo_client()
         time.sleep(self.ticker_len)
         self.load_polo_client()
     self.refresh_data()
     self.make_shapes()
     #self.closeOrders()
     self.poloTrader()
Ejemplo n.º 12
0
def eval_fitness(genomes, config):
    for idx, g in genomes:
        cppn = neat.nn.FeedForwardNetwork.create(g, config)
        network = ESNetwork(sub, cppn, params)
        net = network.create_phenotype_network()

        sum_square_error = 0.0
        for inputs, expected in zip(xor_inputs, xor_outputs):
            new_input = inputs + (1.0, )
            net.reset()
            for i in range(network.activations):
                output = net.activate(new_input)

            sum_square_error += ((output[0] - expected[0])**2.0) / 4.0

        g.fitness = 1 - sum_square_error
Ejemplo n.º 13
0
 def validate_fitness(self):
     config = self.config
     genomes = neat.Checkpointer.restore_checkpoint("./pkl_pops/pop-checkpoint-27").population
     self.epoch_len = 233
     r_start = self.hs.hist_full_size - self.epoch_len-1
     best_g_fit = 1.0
     for idx in genomes:
         g = genomes[idx]
         cppn = neat.nn.FeedForwardNetwork.create(g, config)
         network = ESNetwork(self.subStrate, cppn, self.params, self.hd)
         net = network.create_phenotype_network_nd()
         g.fitness = self.evaluate(net, network, r_start, g)
         if(g.fitness > best_g_fit):
             best_g_fit = g.fitness
             with open('./champ_data/latest_greatest.pkl', 'wb') as output:
                 pickle.dump(g, output)
     return
Ejemplo n.º 14
0
    def poloTrader(self):
        end_prices = {}
        active = self.get_one_bar_input_2d()
        sub = Substrate(self.in_shapes, self.out_shapes)
        network = ESNetwork(sub, self.cppn, self.params)
        net = network.create_phenotype_network()
        net.reset()
        for n in range(network.activations):
            out = net.activate(active)
        #print(len(out))
        rng = len(out)
        #rng = iter(shuffle(rng))
        for x in np.random.permutation(rng):
            sym = self.coin_dict[x]
            #print(out[x])
            try:
                if (out[x] < -.5):
                    print("selling: ", sym)
                    self.folio.sell_coin(
                        sym, self.currentHists[sym]['close'][self.end_idx])
                elif (out[x] > .5):
                    print("buying: ", sym)
                    self.folio.buy_coin(
                        sym, self.currentHists[sym]['close'][self.end_idx])
            except:
                print('error', sym)
            #skip the hold case because we just dont buy or sell hehe
            end_prices[sym] = self.hist_shaped[x][len(self.hist_shaped[x]) -
                                                  1][2]

        if datetime.now() >= self.end_ts:
            port_info = self.folio.get_total_btc_value(end_prices)
            print("total val: ", port_info[0], "btc balance: ", port_info[1])
            return
        else:
            print(self.get_current_balance())
            for t in range(3):
                time.sleep(self.ticker_len / 4)
                p_vals = self.get_current_balance()
                print("current value: ", p_vals[0], "current btc holdings: ",
                      p_vals[1])
                #print(self.folio.ledger)
        time.sleep(self.ticker_len / 4)
        self.pull_polo()
        self.poloTrader()
Ejemplo n.º 15
0
def trainMultiple(genomes, config, render, env):
    step = 0
    current_state = env.reset()
    done = False
    nets = []
    networks = []
    rewards = np.zeros(len(genomes))

    for genome_id, genome in genomes:
        cppn = neat.nn.FeedForwardNetwork.create(genome, config)
        network = ESNetwork(sub, cppn, params)
        net = network.create_phenotype_network()
        nets.append(net)
        networks.append(network)

    pq = []
    for i in range(len(nets)):
        heapq.heappush(pq, (0, i))

    while not done and step < 1000:
        time, index = heapq.heappop(pq)
        network = networks[index]
        net = nets[index]
        numAction = []
        for action in current_state:
            action = np.append(action, [1])  #bias
            action = convert(action)
            for k in range(network.activations):
                o = net.activate(action)
            numAction.append(o)
        action = np.argmax(numAction)
        new_state, reward, done, distance = env.stepM(action, index)
        if render:
            env.render(100)
            print(action)
        rewards[index] += reward

        heapq.heappush(pq, (time + distance, index))
        current_state = new_state
        step += 1

    if render:
        print(rewards)
    # Append episode reward to a list and log stats (every given number of episodes)
    return rewards
Ejemplo n.º 16
0
 def compare_champs(self):
     r_start = 40
     self.epoch_len = 40
     print(self.end_idx)
     champ_fit = 0
     for f in os.listdir("./champ_data/stonks"):
         if(f != "lastest_greatest.pkl"):
             champ_file = open("./champ_data/stonks/"+f,'rb')
             g = pickle.load(champ_file)
             champ_file.close()
             cppn = neat.nn.FeedForwardNetwork.create(g, self.config)
             network = ESNetwork(self.subStrate, cppn, self.params, self.hd)
             net = network.create_phenotype_network_nd()
             g.fitness = self.evaluate_champ(net, network, r_start, g)
             if (g.fitness > champ_fit):
                 with open("./champ_data/stonks/latest_greatest.pkl", 'wb') as output:
                     pickle.dump(g, output)
     print(champ_fit)
     return
Ejemplo n.º 17
0
def eval_genomes(genomes, config):
    best_net = (None, None, -9999)
    for genome_id, genome in genomes:
        cppn = neat.nn.RecurrentNetwork.create(genome, config)
        network = ESNetwork(sub, cppn, params)
        net = network.create_phenotype_network()
        episode_reward = 0
        runs = 10
        genome.fitness = 0

        for i in range(runs):
            episode_reward += train(net, network, False)

        fitness = episode_reward / runs
        if fitness > best_net[2]:
            best_net = (net, network, fitness)
        # Append episode reward to a list and log stats (every given number of episodes)
        genome.fitness += fitness
    for i in range(4):
        train(best_net[0], best_net[1], True)
Ejemplo n.º 18
0
def eval_fitness(genomes, config):
    """
    Fitness function.
    For each genome evaluate its fitness, in this case, as the mean squared error.
    """
    for _, genome in genomes:
        cppn = neat.nn.FeedForwardNetwork.create(genome, config)
        network = ESNetwork(SUBSTRATE, cppn, DYNAMIC_PARAMS)
        net = network.create_phenotype_network()

        sum_square_error = 0.0

        for xor_inputs, xor_expected in zip(XOR_INPUTS, XOR_OUTPUTS):
            new_xor_input = xor_inputs + (1.0, )
            net.reset()

            for _ in range(network.activations):
                xor_output = net.activate(new_xor_input)

            sum_square_error += ((xor_output[0] - xor_expected[0])**2.0) / 4.0

        genome.fitness = 1 - sum_square_error
Ejemplo n.º 19
0
    def poloTrader(self):
        end_prices = {}
        active = self.get_one_bar_input_2d()
        sub = Substrate(self.in_shapes, self.out_shapes)
        network = ESNetwork(sub, self.cppn, self.params)
        net = network.create_phenotype_network()
        net.reset()
        for n in range(network.activations):
            out = net.activate(active)
        #print(len(out))
        rng = len(out)
        #rng = iter(shuffle(rng))
        for x in np.random.permutation(rng):
            sym = self.coin_dict[x]
            #print(out[x])
            try:
                if (out[x] < -.5):
                    print("selling: ", sym)
                    self.sell_coin(
                        sym,
                        self.get_price(sym),
                    )
                elif (out[x] > .5):
                    print("buying: ", sym)
                    self.buy_coin(sym, self.get_price(sym))
            except:
                print('error', sym)
            #skip the hold case because we just dont buy or sell hehe
            end_prices[sym] = self.get_price(sym)

        if datetime.now() >= self.end_ts:
            return
        else:
            time.sleep(self.ticker_len)
        self.reset_tickers
        self.pull_polo()
        self.poloTrader()
Ejemplo n.º 20
0
 def compare_champs(self):
     self.epoch_len = self.hs.hist_full_size - (self.hd + 1)
     r_start = self.epoch_len
     champ_current = open("./champ_data/kraken/latest_greatest.pkl", 'rb')
     g = pickle.load(champ_current)
     champ_current.close()
     cppn = neat.nn.FeedForwardNetwork.create(g, self.config)
     network = ESNetwork(self.subStrate, cppn, self.params, self.hd)
     net = network.create_phenotype_network_nd()
     champ_fit = self.evaluate(net, network, r_start, g)
     for f in os.listdir("./champ_data/kraken"):
         if (f != "lastest_greatest.pkl"):
             champ_file = open("./champ_data/kraken/" + f, 'rb')
             g = pickle.load(champ_file)
             champ_file.close()
             cppn = neat.nn.FeedForwardNetwork.create(g, self.config)
             network = ESNetwork(self.subStrate, cppn, self.params, self.hd)
             net = network.create_phenotype_network_nd()
             g.fitness = self.evaluate_champ(net, network, r_start, g)
             if (g.fitness > champ_fit):
                 with open("./champ_data/kraken/latest_greatest.pkl",
                           'wb') as output:
                     pickle.dump(g, output)
     return
Ejemplo n.º 21
0
    DYNAMIC_PARAMS = params(version)

    winner = pop.run(eval_fitness, gens)
    print(f"es_hyperneat_xor_{VERSION_TEXT} done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    WINNER = run(300, VERSION)[0]  # Only relevant to look at the winner.
    print('\nBest genome:\n{!s}'.format(WINNER))

    # Verify network output against training data.
    print('\nOutput:')
    CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG)
    NETWORK = ESNetwork(SUBSTRATE, CPPN, DYNAMIC_PARAMS)
    # This will also draw winner_net.
    WINNER_NET = NETWORK.create_phenotype_network(
        filename=
        f'pureples/experiments/xor/es_hyperneat_xor_{VERSION_TEXT}_winner.png')

    for inputs, expected in zip(XOR_INPUTS, XOR_OUTPUTS):
        new_input = inputs + (1.0, )
        WINNER_NET.reset()

        for i in range(NETWORK.activations):
            output = WINNER_NET.activate(new_input)

        print("  input {!r}, expected output {!r}, got {!r}".format(
            inputs, expected, output))
Ejemplo n.º 22
0
    def make_nets(self, genome):

        cppn = neat.nn.FeedForwardNetwork.create(genome, self)
        esnet = ESNetwork(self.substrate, cppn, self.es_params)
        net = esnet.create_phenotype_network()
        return cppn, esnet, net
Ejemplo n.º 23
0
                            neat.reproduction.DefaultReproduction,
                            neat.species.DefaultSpeciesSet,
                            neat.stagnation.DefaultStagnation,
                            'config_cppn_mountain_car')


def run(gens, env):
    winner, stats = run_es(gens, env, 200, config, params, sub, max_trials=0)
    print("es_hyperneat_mountain_car_small done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    # Setup logger and environment.
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    env = gym.make("MountainCar-v0")

    # Run!
    winner = run(200, env)[0]

    # Save CPPN if wished reused and draw it + winner to file.
    cppn = neat.nn.FeedForwardNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    net = network.create_phenotype_network(
        filename="es_hyperneat_mountain_car_small_winner")
    draw_net(cppn, filename="es_hyperneat_mountain_car_small_cppn")
    with open('es_hyperneat_mountain_car_small_cppn.pkl', 'wb') as output:
        pickle.dump(cppn, output, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 24
0
    pop.add_reporter(stats)
    pop.add_reporter(neat.reporting.StdOutReporter(True))

    winner = pop.run(eval_fitness, gens)
    print("es_hyperneat_xor_medium done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    winner = run(300)[0]
    print('\nBest genome:\n{!s}'.format(winner))

    # Verify network output against training data.
    print('\nOutput:')
    cppn = neat.nn.FeedForwardNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    winner_net = network.create_phenotype_network(filename='es_hyperneat_xor_medium_winner.png')  # This will also draw winner_net.
    for inputs, expected in zip(xor_inputs, xor_outputs):
        new_input = inputs + (1.0,)
        winner_net.reset()
        for i in range(network.activations):
            output = winner_net.activate(new_input)
        print("  input {!r}, expected output {!r}, got {!r}".format(inputs, expected, output))

    # Save CPPN if wished reused and draw it to file.
    draw_net(cppn, filename="es_hyperneat_xor_medium_cppn")
    with open('es_hyperneat_xor_medium_cppn.pkl', 'wb') as output:
        pickle.dump(cppn, output, pickle.HIGHEST_PROTOCOL)

Ejemplo n.º 25
0
                            neat.species.DefaultSpeciesSet,
                            neat.stagnation.DefaultStagnation,
                            'config_cppn_pole_balancing')


# Use the gym_runner to run this experiment using ES-HyperNEAT.
def run(gens, env):
    winner, stats = run_es(gens, env, 500, config, params, sub)
    print("es_hyperneat_polebalancing_medium done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    # Setup logger and environment.
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    env = gym.make("CartPole-v1")

    # Run!
    winner = run(100, env)[0]

    # Save CPPN if wished reused and draw it + winner to file.
    cppn = neat.nn.FeedForwardNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    net = network.create_phenotype_network(
        filename="es_hyperneat_pole_balancing_medium_winner")
    draw_net(cppn, filename="es_hyperneat_pole_balancing_medium_cppn")
    with open('es_hyperneat_pole_balancing_medium_cppn.pkl', 'wb') as output:
        pickle.dump(cppn, output)
Ejemplo n.º 26
0
    def poloTrader(self):
        try:
            trade_df = pd.read_json("./live_hist/json_hist.json")
        except Exception as e:
            trade_df = pd.DataFrame()
        end_prices = {}
        active = self.get_one_bar_input_2d()
        self.load_net()
        sub = Substrate(self.in_shapes, self.out_shapes)
        net = ESNetwork(sub, self.cppn, self.params, self.hd)
        network = net.create_phenotype_network_nd('paper_net.png')
        sell_syms = []
        buy_syms = []
        buy_signals = []
        sell_signals = []
        for n in range(1, self.hd):
            network.activate(active[self.hd - n])
        out = network.activate(active[0])
        self.reset_tickers()
        for x in range(len(out)):
            sym = self.hs.coin_dict[x]
            end_prices[sym] = self.get_price(self.base_sym + "_" + sym)
            if (out[x] > .5):
                buy_signals.append(out[x])
                buy_syms.append(sym)
            if (out[x] < -.5):
                sell_signals.append(out[x])
                sell_syms.append(sym)
        #rng = iter(shuffle(rng))
        sorted_buys = np.argsort(buy_signals)[::-1]
        sorted_sells = np.argsort(sell_signals)
        for x in sorted_sells:
            try:
                sym = sell_syms[x]
                p = end_prices[sym]
                print("selling: ", sym)
                self.folio.sell_coin(sym, p)
            except Exception as e:
                print("error placing order")
        for x in sorted_buys:
            try:
                sym = buy_syms[x]
                p = end_prices[sym]
                print("buying: ", sym)
                self.folio.buy_coin(sym, p)
            except Exception as e:
                print("error placing order")
        '''
        self.trade_hist["date"] = datetime.now()
        self.trade_hist["portfoliovalue"] = self.folio.get_total_btc_value_no_sell(end_prices)[0] 
        self.trade_hist["portfolio"] = self.folio.ledger
        self.trade_hist["percentchange"] = ((self.trade_hist["portfoliovalue"] - self.folio.start)/self.folio.start)*100
        trade_df.append(self.trade_hist)
        trade_df.to_json("./live_hist/json_hist.json")
        
        if(self.trade_hist["portfoliovalue"] > self.folio.start *1.1):
            self.folio.start = self.folio.get_total_btc_value(end_prices)[0]
        '''
        if datetime.now() >= self.end_ts:
            port_info = self.folio.get_total_btc_value(end_prices)
            print("total val: ", port_info[0], "btc balance: ", port_info[1])
            return

        else:
            print(self.get_current_balance())
            for t in range(2):
                p_vals = self.get_current_balance()
                print("current value: ", p_vals[0], "current holdings: ",
                      p_vals[1])
                time.sleep(self.ticker_len / 2)
        self.refresh_data()
        self.poloTrader()
Ejemplo n.º 27
0
    winner, stats = run_es(gens,
                           env,
                           200,
                           CONFIG,
                           params(version),
                           SUBSTRATE,
                           max_trials=0)
    print(f"es_hyperneat_mountain_car_{VERSION_TEXT} done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    # Setup logger and environment.
    LOGGER = logging.getLogger()
    LOGGER.setLevel(logging.INFO)
    ENVIRONMENT = gym.make("MountainCar-v0")

    # Run! Only relevant to look at the winner.
    WINNER = run(200, ENVIRONMENT, VERSION)[0]

    # Save CPPN if wished reused and draw it + winner to file.
    CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG)
    NETWORK = ESNetwork(SUBSTRATE, CPPN, params)
    NET = NETWORK.create_phenotype_network(
        filename=f"es_hyperneat_mountain_car_{VERSION_TEXT}_winner")
    draw_net(CPPN, filename=f"es_hyperneat_mountain_car_{VERSION_TEXT}_cppn")
    with open(f'es_hyperneat_mountain_car_{VERSION_TEXT}_cppn.pkl',
              'wb') as output:
        pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 28
0
    print(f"es_hyperneat_polebalancing_{VERSION_TEXT} done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    # Setup logger and environment.
    LOGGER = logging.getLogger()
    LOGGER.setLevel(logging.INFO)
    ENVIRONMENT = gym.make("CartPole-v1")

    # Run! Only relevant to look at the winner.
    WINNER = run(100, ENVIRONMENT, VERSION)[0]

    # Save CPPN if wished reused and draw it + winner to file.
    CPPN = neat.nn.FeedForwardNetwork.create(WINNER, CONFIG)
    NETWORK = ESNetwork(SUBSTRATE, CPPN, params(VERSION))
    NET = NETWORK.create_phenotype_network(
        filename=
        f"pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_winner"
    )
    draw_net(
        CPPN,
        filename=
        f"pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_cppn"
    )
    with open(
            f'pureples/experiments/pole_balancing/es_hyperneat_pole_balancing_{VERSION_TEXT}_cppn.pkl',
            'wb') as output:
        pickle.dump(CPPN, output, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 29
0
    # Create population and train the network. Return winner of network running 100 episodes.
    stats_one = neat.statistics.StatisticsReporter()
    pop = ini_pop(None, stats_one, config)
    pop.run(eval_fitness, gens)

    stats_ten = neat.statistics.StatisticsReporter()
    pop = ini_pop((pop.population, pop.species, 0), stats_ten, config)
    trials = 10
    winner_ten = pop.run(eval_fitness, gens)

    if max_trials is 0:
        return winner_ten, (stats_one, stats_ten)

    stats_hundred = neat.statistics.StatisticsReporter()
    pop = ini_pop((pop.population, pop.species, 0), stats_hundred, config)
    trials = max_trials
    winner_hundred = pop.run(eval_fitness, gens)
    return winner_hundred, (stats_one, stats_ten, stats_hundred)


logger = logging.getLogger()
logger.setLevel(logging.INFO)

winner = run(100, 1000000, config, params, sub)[0]

cppn = neat.nn.FeedForwardNetwork.create(winner, config)
network = ESNetwork(sub, cppn, params)
net = network.create_phenotype_network(filename="es_hyperneat_2048_winner")
draw_net(cppn, filename="es_hyperNEAT_2048_cppn")
with open('es_hyperNEAT_2048_cppn.pkl', 'wb') as output:
    pickle.dump(cppn, output, pickle.HIGHEST_PROTOCOL)
Ejemplo n.º 30
0
    pop.add_reporter(neat.reporting.StdOutReporter(True))

    winner = pop.run(eval_fitness, gens)
    print("es_hyperneat_xor_large done")
    return winner, stats


# If run as script.
if __name__ == '__main__':
    winner = run(300)[0]
    print('\nBest genome:\n{!s}'.format(winner))

    # Verify network output against training data.
    print('\nOutput:')
    cppn = neat.nn.FeedForwardNetwork.create(winner, config)
    network = ESNetwork(sub, cppn, params)
    # This will also draw winner_net.
    winner_net = network.create_phenotype_network(
        filename='es_hyperneat_xor_large_winner.png')
    for inputs, expected in zip(xor_inputs, xor_outputs):
        new_input = inputs + (1.0, )
        winner_net.reset()
        for i in range(network.activations):
            output = winner_net.activate(new_input)
        print("  input {!r}, expected output {!r}, got {!r}".format(
            inputs, expected, output))

    # Save CPPN if wished reused and draw it to file.
    draw_net(cppn, filename="es_hyperneat_xor_large_cppn")
    with open('es_hyperneat_xor_large_cppn.pkl', 'wb') as output:
        pickle.dump(cppn, output)