def analyse(environment,knowledge,movement,num_goals, min_dist,alphas,betas,repeat,scenario): progress = 0 spaces = environment.get_coords(grid_object.labels['empty']) start = spaces[np.random.randint(0,len(spaces))] ################################# start = [0,5] goals = [start] too_close = [] for g in range(0,num_goals): for square in spaces: if grid_object.p2_dist(goals[-1],square)<=min_dist: too_close.append(square) for square in too_close: if square in spaces: spaces.remove(square) goals.append(spaces[np.random.randint(0,len(spaces))]) goals.remove(start) #################################### goals = [[9,5]] environment.set_goals(goals) size = (environment.get_width(),environment.get_height()) results = [] for k in knowledge: obstacles = environment.get_coords(grid_object.labels['obstacle']) max_obs = environment.get_capacity() num_obs = int(np.floor(k*max_obs)) initial_obs = [] for n in range(0,num_obs): if len(obstacles)==0: break index = np.random.randint(0,len(obstacles)) initial_obs.append(obstacles[index]) obstacles.remove(obstacles[index]) for m in movement: move_prob = [1-m,m/2,0,0,0,0,0,m/2] for p in range(0,len(alphas)): a = alphas[p] b = betas[p] costs = [] colls = [] perfect = journey.simulation(environment,environment,start, goals,[1,0,0,0,0,0,0,0]) for r in range(0,repeat): information = grid_object.grid(size,initial_obs,max_obs) if scenario[0]==1: information.set_goals(goals) elif scenario[0]==2: information.update_sense_range(scenario[1]) actual = journey.simulation(information,environment,start, goals,move_prob,a,b) if ((actual[journey.outputs['complete']] and perfect[journey.outputs['complete']]) or scenario[0]==2): costs.append(perfect[journey.outputs['cost']]- actual[journey.outputs['cost']]) colls.append(actual[journey.outputs['collisions']]) if len(costs)>0: av_cost = sum(costs)/len(costs) av_coll = sum(colls)/len(colls) results.append([k,m,a,b,av_cost,av_coll,start,goals, actual[journey.outputs['record']]]) progress+=1 print(progress) return results
@author: matth """ import grid_object import search import journey import numpy as np np.set_printoptions(threshold=np.inf) sizeA = (5,5) startA = [0,0] endA = [2,2] #obs=[0,2] ##print(obs) test1 = grid_object.grid(sizeA,[],startA,endA) test1.random_obs(3,[startA,endA]) #test1.update_risk() print(test1.get_state()) #test1.update_heuristic(endA) ##print(test1.get_heuristic()) #result1 = search.find_path(test1,startA,endA) ##print(result1) <<<<<<< HEAD sizeA = (3,3) startA = [0,0] endA = [2,2] obs=[0,2] ##print(obs) test1 = grid_object.grid(sizeA,[obs],startA,endA)
##test2 = grid_object.grid(sizeA,startA,endA,[[1,1]]) ##result2 = search.find_path(test2,startA,endA) sizeB = (15, 15) ob1 = [[3, 4], [4, 4], [3, 3], [4, 5]] ob2 = [[6, 8], [7, 8], [8, 8], [9, 8]] ob3 = [[2, 5], [3, 4], [4, 3], [5, 2]] ob4 = [[10, 12], [11, 12], [13, 12], [12, 11]] obstacles = ob1 + ob2 + ob3 startB = [0, 0] #endB = [9,9] #endC = [0,14] endD = [12, 1] endE = [12, 12] goals = [endD, endE] env = grid_object.grid(sizeB, obstacles, len(obstacles)) #env.update_heuristic(endB) inf = grid_object.grid(sizeB, [], len(obstacles)) #inf.update_heuristic(endB) move_prob1 = [1, 0, 0, 0, 0, 0, 0, 0] move_prob2 = [0.9, 0.1, 0, 0, 0, 0, 0, 0] move_prob3 = [0.8, 0.1, 0, 0, 0, 0, 0, 0.1] env.update_sense_range(3) inf.update_sense_range(3) env.set_goals(goals) inf.set_goals(goals) #env.set_goals([endD])
def get_results(knowledge, movement, results_nb, goals_nb, repeat_nb): #create an environment... sizeB = (10, 10) ob1 = [[3, 4], [4, 4], [3, 3], [4, 5]] ob2 = [[6, 8], [7, 8], [8, 8], [9, 8]] ob3 = [[2, 5], [3, 4], [4, 3], [5, 2]] ob4 = [[5, 6], [5, 7]] obstacles = ob1 + ob2 + ob3 #+ob4 # # ob1=[[3,4],[3,6],[4,4],[4,6],[5,4],[5,6],[6,4],[6,6],[7,4],[7,6]] # obstacles=ob1 obstacles = gridStore.obs sizeB = (30, 30) env = grid_object.grid(sizeB, obstacles, len(obstacles)) #create parameter vectors... num_goals = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] min_dist = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15] ''' alpha is heuristic beta is risk ''' #test for cost alphas_c = [ 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0 ] betas__c = [ 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0 ] #test for heuristic alphas_h = [ 0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0 ] betas__h = [ 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0 ] #test fot risk alphas_r = [ 1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0 ] betas__r = [ 0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0 ] repeat = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] results = [] alphas = [alphas_r[-2]] betas = [betas__r[-2]] alphas = [1] betas = [1] for i in range(0, results_nb): results.append( performance.analyse(env, knowledge, movement, num_goals[goals_nb], min_dist[5], alphas, betas, repeat[repeat_nb], [2, 4])) env.update_path_colour(results[0][0][-1], results[0][0][-3], results[0][0][-2]) env.show_me() return results
import grid_object import search import journey import performance import numpy as np import animation np.set_printoptions(threshold=np.inf) sizeA = (5, 5) startA = [0, 0] endA = [2, 2] #obs=[0,2] ##print(obs) test1 = grid_object.grid(sizeA, [], 0) test1.random_obs(3, [startA, endA]) #test1.update_risk() #print(test1.get_state()) #test1.update_heuristic(endA) ##print(test1.get_heuristic()) #result1 = search.find_path(test1,startA,endA) ##print(result1) ##test2 = grid_object.grid(sizeA,startA,endA,[[1,1]]) ##result2 = search.find_path(test2,startA,endA) sizeB = (15, 15) ob1 = [[3, 4], [4, 4], [3, 3], [4, 5]] ob2 = [[6, 8], [7, 8], [8, 8], [9, 8]] ob3 = [[2, 5], [3, 4], [4, 3], [5, 2]] ob4 = [[10, 12], [11, 12], [13, 12], [12, 11]]