Exemplo n.º 1
0
def analyse(environment,knowledge,movement,num_goals,
            min_dist,alphas,betas,repeat,scenario):
    progress = 0
    spaces = environment.get_coords(grid_object.labels['empty'])
    start = spaces[np.random.randint(0,len(spaces))]
    #################################
    start = [0,5]
    goals = [start]
    too_close = []
    for g in range(0,num_goals):
        for square in spaces:
            if grid_object.p2_dist(goals[-1],square)<=min_dist:
                too_close.append(square)
        for square in too_close:
            if square in spaces:
                spaces.remove(square)
        goals.append(spaces[np.random.randint(0,len(spaces))])
    goals.remove(start)
    ####################################
    goals = [[9,5]]
    environment.set_goals(goals)
    size = (environment.get_width(),environment.get_height())
    results = []
    for k in knowledge:
        obstacles = environment.get_coords(grid_object.labels['obstacle'])
        max_obs = environment.get_capacity()
        num_obs = int(np.floor(k*max_obs))
        initial_obs = []
        for n in range(0,num_obs):
            if len(obstacles)==0:
                break
            index = np.random.randint(0,len(obstacles))
            initial_obs.append(obstacles[index])
            obstacles.remove(obstacles[index])
        for m in movement:
            move_prob = [1-m,m/2,0,0,0,0,0,m/2]
            for p in range(0,len(alphas)):
                a = alphas[p]
                b = betas[p]
                costs = []
                colls = []
                perfect = journey.simulation(environment,environment,start,
                                             goals,[1,0,0,0,0,0,0,0])
                for r in range(0,repeat):
                    information = grid_object.grid(size,initial_obs,max_obs)
                    if scenario[0]==1:
                        information.set_goals(goals)
                    elif scenario[0]==2:
                        information.update_sense_range(scenario[1])
                    actual = journey.simulation(information,environment,start,
                                                goals,move_prob,a,b)
                    if ((actual[journey.outputs['complete']] and 
                        perfect[journey.outputs['complete']]) or scenario[0]==2):
                        costs.append(perfect[journey.outputs['cost']]-
                                     actual[journey.outputs['cost']])
                        colls.append(actual[journey.outputs['collisions']])
                if len(costs)>0:
                    av_cost = sum(costs)/len(costs)
                    av_coll = sum(colls)/len(colls)
                    results.append([k,m,a,b,av_cost,av_coll,start,goals,
                                    actual[journey.outputs['record']]])
                progress+=1
                print(progress)
    return results
Exemplo n.º 2
0
endD = [12, 1]
endE = [12, 12]
goals = [endD, endE]
env = grid_object.grid(sizeB, obstacles, len(obstacles))
#env.update_heuristic(endB)

inf = grid_object.grid(sizeB, [], len(obstacles))
#inf.update_heuristic(endB)

move_prob1 = [1, 0, 0, 0, 0, 0, 0, 0]
move_prob2 = [0.9, 0.1, 0, 0, 0, 0, 0, 0]
move_prob3 = [0.8, 0.1, 0, 0, 0, 0, 0, 0.1]

env.update_sense_range(3)
inf.update_sense_range(3)

env.set_goals(goals)
inf.set_goals(goals)
#env.set_goals([endD])

inf.update_heuristic([])
env.update_heuristic([])

results3 = journey.simulation(inf, env, startB, goals, move_prob1)
env.update_path_colour(results3[2], startB, [endD, endE])
env.show_me()
#animation.path_animation(env,results3[2],results3[2][0],goals)
"""
A simple example of an animated plot
"""
Exemplo n.º 3
0
##print(env.get_state())
##env.show_me()
#env.update_risk()
#env.update_heuristic(endB)
##
#
#move_prob1 = [1,0,0,0,0,0,0,0]
#move_prob2 = [0.9,0.1,0,0,0,0,0,0]
#
#results4 = journey.simulation(inf,env,[0,0],[9,9],move_prob2)
#env.update_path_colour(results4[2])
#env.show_me()
#print('BWANANA!!!!!x2')

<<<<<<< HEAD
=======
move_prob1 = [1,0,0,0,0,0,0,0]
move_prob2 = [0.9,0.1,0,0,0,0,0,0]
move_prob3 = [0.8,0.1,0,0,0,0,0,0.1]

results3 = journey.simulation(inf,env,[0,0],[9,9],move_prob1)
env.update_path_colour(results3[2])
env.show_me()
>>>>>>> 47a64b6712358be09cab82d06e479af7b8910a84
#env.update_path_colour (results3[1])
#env.show_me()
##print(results3)
##real_cost = results3[0]
#perfect_path = search.find_path(env,startB,endB)
#best_cost = perfect_path[0]
#print(best_cost)
Exemplo n.º 4
0
move_prob1 = [1, 0, 0, 0, 0, 0, 0, 0]
move_prob2 = [0.9, 0.1, 0, 0, 0, 0, 0, 0]
move_prob3 = [0.8, 0.1, 0, 0, 0, 0, 0, 0.1]

env.update_sense_range(3)
inf.update_sense_range(3)

env.set_goals([endC, endD, endE])
#inf.set_goals([endB,endC,endD])
#env.set_goals([endD])

inf.update_heuristic([])
env.update_heuristic([])

results3 = journey.simulation(inf, env, startB, [endB, endC], move_prob1)
env.update_path_colour(results3[2], startB, [endC, endD, endE])
env.show_me()
#animation.path_animation(env,results3[2])

env2 = grid_object.grid(sizeB, obstacles, len(obstacles))
'''
knowledge = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
movement = [0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5]
num_goals = 1
min_dist = 0
alphas = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5]
betas = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5]

'''
#results = performance.analyse(env,knowledge,movement,num_goals,