def wander(maze, start, n_steps, display):
    walk, dna = utility.spawn_random_walk(start, n_steps)
    steps = 0
    correct = []
    for step in walk:
        spot = maze[step[0], step[1]]
        if spot == 1:
            try:
                walk = utility.spawn_random_walk([step[0], step[1]],
                                                 len(walk) - steps)
            except ValueError:
                break
        r = utility.calculatle_rvec(step, start)
        options = {
            1: [step[0] - 1, step[1] - 1],
            2: [step[0] - 1, step[1]],
            3: [step[0] - 1, step[1] + 1],
            4: [step[0] - 1, step[1]],
            5: step,
            6: [step[0] + 1, step[1]],
            7: [step[0] + 1, step[1] - 1],
            8: [step[0] + 1, step[1]],
            9: [step[0] + 1, step[1] + 1]
        }
        for opt in options.values():
            if utility.calculatle_rvec(opt, start) > r:
                step = opt
        correct.append(step)
        steps += 1
    if display:
        print "Made it " + str(steps) + " Steps"
    return steps, correct, dna
Exemplo n.º 2
0
def main():
    t0 = time.time()

    A = [0, 0]
    B0 = [100, 100]
    depth = 150
    batch_size = 750
    n_batches = 250
    r = np.sqrt((A[0] - B0[0])**2 + (A[1] - B0[1])**2)
    mutation_markers = {}
    progress = []
    '''
        Using the derivative of dist to goal (drg) and
        the derivative of displacement, Find the longest
        sequence of overall positive slope [noting start
        pos of positive slope], and hopefully scan through
        while increasing derivative threshold. 
        
        *  Find the longest sequence of steps which lead to the 
           most positive increases in displacement, 
        
        *  minimize distance to goal
        
        *  reward steps that do both at the same time! 

    '''
    EvolutionaryCrawler(r, A, B0, depth, batch_size, n_batches)
    print '\033[1mSimulation FINISHED \033[34m[' + str(
        time.time() - t0) + 's Elapsed]\033[0m'
    if '-brute' in sys.argv:
        print '\033[1m\033[31m\t:: BEGINNING BRUTE_FORCE SEARCH :: \033[0m'
        print '\033[1m\033[33mUsing Random Walk Seed Length:\033[0m\033[1m ' + str(
            depth) + '\033[0m'
        print '\033[1m\033[33mUsing Batch Size:\033[0m\033[1m ' + str(
            batch_size) + '\033[0m'
        print '\033[1m\033[33mUsing N Batches:\033[0m\033[1m ' + str(
            n_batches) + '\033[0m'
        brute_points = []
        t = []
        for j in range(n_batches):
            for i in range(batch_size):
                random_walk, score = utility.spawn_random_walk(A, depth)
                best_dist, best_pt, mark = track_path(random_walk, A, B0,
                                                      False)
                mutation_markers[i] = mark
                progress.append(best_dist)
            brute_points.append(np.array(progress).min())
            t.append(time.time() - t0)
        print '\033[1mFINISHED \033[31m[' + str(time.time() -
                                                t0) + 's Elapsed]\033[0m'
        print str(A) + '--?--' + str(B0) + '\033[1m\tDistance:' + str(
            r) + '\033[0m'
        print "MINIMUM DISTANCE TO GOAL FOUND: " + str(
            np.array(progress).min())

        plt.title('Brute Force Search')
        plt.xlabel('time (s)')
        plt.ylabel('Minimum Distance To Goal Found')
        plt.plot(t, np.array(brute_points))
        plt.show()
Exemplo n.º 3
0
def spawn_walk_pool_common_origin(pool_size, walk_length, start):
    data = list()
    [
        data.append(utility.spawn_random_walk(start, walk_length))
        for walk in range(pool_size)
    ]
    return data
Exemplo n.º 4
0
    def add_particles(self, particle_types, rules):
        cloud = {}
        rgb = model.Spectra()
        rgb.initialize()
        ii = 0
        for color in particle_types.keys():
            try:
                self.particle_count[color] = particle_types[color]
                # POPULATE self.state
                for pid in range(self.particle_count[color]):
                    pt = utility.spawn_random_point(self.state)
                    particle = Particle(color, pt)
                    particle.apply_rule(rules)
                    particle.steps, unused = utility.spawn_random_walk(
                        pt, self.timescale)
                    cloud[ii] = particle
                    self.state[pt[0], pt[1], :] = rgb.color_handles[color]
                    ii += 1
                if self.verbose:
                    print "Added " + str(particle_types[color]) + ' \033[1m' + self.color_handles[color] + \
                          ' Particles to Simulation with ' + str(self.timescale) + ' Steps\033[0m'
            except KeyError:
                continue

        return cloud
def radial_wander(start, depth, generated_maze, show):
    walker_pool_displacement = {}
    disp = 0
    best = 0

    for pt in range(batch_size):
        walk, sequence = utility.spawn_random_walk(start, depth)
        rvec = []
        score = 0
        sn = 0
        for step in walk:
            try:
                if generated_maze[step[0], step[1]] != 1:
                    score += 1
                else:
                    pass
            except IndexError:
                pass
            rvec.append(utility.calculatle_rvec(step, start))
            sn += 1
        dr = np.diff(np.array(rvec)[1:]) + np.array(rvec)[2:]
        dR = dr[len(dr) - 1] - dr[0]
        if score > best and dR > disp:
            best = score
            disp = dR
            walker_pool_displacement[best] = walk
        if dR > 1:
            score += 1
    if show:
        print " Best Path: " + str(best)
        print "Total Path length: " + str(len(walk))
        print "Score: " + str(score)
        print "Displacement: " + str(disp)
        #genetic_maze_builder.draw_walk(walker_pool_displacement[best], start, generated_maze)
    return walker_pool_displacement, score
Exemplo n.º 6
0
def spawn_random_walk_pool(world, pool_size, walk_length):
    data = {}
    cloud = utility.fill_random_points(world,pool_size,False)
    i = 0
    for pt in cloud.values():
        walk, seq = utility.spawn_random_walk([pt[1], pt[0]], walk_length)
        data[i] = walk
        i += 1
    return data
Exemplo n.º 7
0
def one_way_chase(prey_start, pred_start):
    prey_seed, gene_sequence = utility.spawn_random_walk(prey_start, 270)
    pred_steps, prey_moves, caught = predatory_chase(prey_seed,
                                                     {'start': [200, 200]})
    if caught:
        print 'Captured!'
    draw_chase(prey_moves, pred_steps, pred_start, False, {
        'fps': 50,
        'name': 'basic_chase.mp4'
    })
Exemplo n.º 8
0
 def initialize_prey(self):
     prey = []
     for p in self.Prey_Starts.values():
         prey_movts = []
         [
             prey_movts.append(step)
             for step in utility.spawn_random_walk(p, self.N_Steps)
         ]
         pobj = Prey(p)
         pobj.steps = prey_movts
         prey.append(pobj)
     return prey
Exemplo n.º 9
0
def complex_chase(prey_start, pred_start, n_steps, activation):
    f = plt.figure()
    prey_seed_steps, sequence = utility.spawn_random_walk(prey_start, n_steps)
    state = np.zeros((250, 250))
    simulation = []
    tracker = []
    scared_prey = False
    for i in range(n_steps):
        if not scared_prey:
            step = prey_seed_steps[i]
            ''' Predator Eval '''
            dx = step[0] - pred_start[0]
            dy = step[1] - pred_start[1]
            r = np.sqrt(dx**2 + dy**2)
            tracker.append(r)
            if abs(dx) > abs(dy):
                if dx > 0:
                    pred_start = [pred_start[0] + 1, pred_start[1]]
                if dx <= 0:
                    pred_start = [pred_start[0] - 1, pred_start[1]]
                #TODO: dx>0 AND dy<0
                #TODO: dx>0 and dy<0
            if abs(dy) > abs(dx):
                if dy > 0:
                    pred_start = [pred_start[0], pred_start[1] + 1]
                if dy <= 0:
                    pred_start = [pred_start[0], pred_start[1] - 1]
                #TODO: dy>0 AND dx < 0
                #TODO: dy<0 AND dx <0
        else:
            dx = prey_seed_steps[i][0] - pred_start[0]
            dy = prey_seed_steps[i][1] - pred_start[1]
            if abs(dx) > abs(dx):
                if dx >= 0:
                    step = [step[0] - 1, step[1]]
                if dx <= 0:
                    step = [step[0] + 1, step[1]]
                # TODO: dx>0 AND dy<0
                # TODO: dx>0 and dy<0
            if abs(dy) > abs(dx):
                if dy >= 0:
                    step = [step[0], step[1] + 1]
                else:
                    step = [step[0], step[1] - 1]
                # TODO: dy>0 AND dx < 0
                # TODO: dy<0 AND dx <0
            dx = step[0] - pred_start[0]
            dy = step[1] - pred_start[1]
            r = np.sqrt(dx**2 + dy**2)
            tracker.append(r)
            if abs(dx) > abs(dy):
                if dx > 0:
                    pred_start = [pred_start[0] + 1, pred_start[1]]
                if dx <= 0:
                    pred_start = [pred_start[0] - 1, pred_start[1]]
            if abs(dy) > abs(dx):
                if dy > 0:
                    pred_start = [pred_start[0], pred_start[1] + 1]
                if dy <= 0:
                    pred_start = [pred_start[0], pred_start[1] - 1]
        ''' Prey Evade Sequence '''
        if int(r) == 0:
            print "CAPTURED"
            break
        if r <= activation:
            scared_prey = True
        else:
            scared_prey = False
        try:
            state[step[0] - 2:step[0] + 2, step[1] - 2:step[1] + 2] = -1
            state[pred_start[0] - 2:pred_start[0] + 2,
                  pred_start[1] - 2:pred_start[1] + 2] = 1
            simulation.append([plt.imshow(state, 'gray')])
            state[step[0] - 2:step[0] + 2, step[1] - 2:step[1] + 2] = 0
            state[pred_start[0] - 2:pred_start[0] + 2,
                  pred_start[1] - 2:pred_start[1] + 2] = 0
        except IndexError:
            pass
    a = animation.ArtistAnimation(f,
                                  simulation,
                                  interval=70,
                                  blit=True,
                                  repeat_delay=900)
    plt.show()

    plt.close()
    plt.plot(tracker)
    plt.plot(activation * np.ones((len(tracker), 1)))
    plt.show()
Exemplo n.º 10
0
def spawn_walk_pool_common_origin(pool_size, walk_length, start):
    data = {}
    for i in range(pool_size):
        walk, seq = utility.spawn_random_walk([start[0], start[1]], walk_length)
        data[i] = walk
    return data
 def generate_random_steps(self, nsteps):
     self.steps, unused = utility.spawn_random_walk([self.x, self.y],
                                                    nsteps)