def init(): global time, agents, gates, envir, states, orders_list, data_stats, total_stats global envir, gates_locs, wall_locs, office_locs, charging_locs, waiting_points # Initilizing the environement envir, gates_locs, wall_locs, office_locs, charging_locs = envir_configuration(width, height) # Initilizing the waiting_points waiting_points.append(Waiting_Point(0, (6, 33))) waiting_points.append(Waiting_Point(1, (6, 42))) # Initilizing the gate objects gates.append(Gate(0, (0,31), (0,32), (0,33), (6,29))) gates.append(Gate(1, (0,37), (0,38), (0,39), (6, 38))) gates.append(Gate(2, (0,43), (0,44), (0,45), (6, 47))) # Initializing the agents if(behavior_type == 1): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ), "darkred", 100 + i, [])) elif(behavior_type == 2): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ),"red", 100 + i, ["client", "status"])) columns = orders_list.columns[2:] n_col = len(columns) i = 0 cont = 0 if(n_col > N_AGV): for c in columns: if(i < N_AGV): temp = agents[i].articles_priority temp.append(c) agents[i].articles_priority = temp i = i + 1 else: i = 0 temp = agents[i].articles_priority temp.append(c) agents[i].articles_priority = temp i = i + 1 else: print("Error - Too many AGVs ") elif(behavior_type == 3): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ),"red", 100 + i, [])) else: print("Error - Behavior_Type is not existing.") # Initializing the dataframe that is going to be used in order to collect the data about the simulation total_stats, data_stats = init_dataStats(data_stats, total_stats, agents)
def init(): global time, agents, gates, n_col_per_ag, envir, states, orders_list, data_stats, wall_x, wall_y, gate_x, gate_y, total_stats, office_x, office_y # Initilizing the environement envir, wall_x, wall_y, gate_x, gate_y, office_x, office_y = envir_configuration(width, height) # Initilizing the gate objects gates.append(Gate(0, (0,31), (0,32), (0,33), (3,29))) gates.append(Gate(1, (0,37), (0,38), (0,39), (3, 38))) gates.append(Gate(2, (0,43), (0,44), (0,45), (3, 47))) # Initializing the agents if(behavior_type == 1): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ),"red", 100 + i)) elif(behavior_type == 2): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ),"red", ["client", "status"])) columns = orders_list.columns[2:] n_col = len(columns) i = 0 if(n_col > N_AGV): for c in columns: if(i < N_AGV): temp = agents[i].id temp.append(c) agents[i].id = temp i = i + 1 else: i = 0 else: print("Error - Too many AGVs ") elif(behavior_type == 3): for i in range(0, N_AGV): agents.append(AGV((42, i + 1 + (i*2) ),"red", 100 + i)) else: print("Error - Behavior_Type is not existing.") # Initializing the dataframe that is going to be used in order to collect the data about the simulation total_stats, data_stats = init_dataStats(data_stats, total_stats, agents)
def create( spec, scheme = 'none', count = 20, *args ): """ @spec - Specification (size, endpoints, barriers); either exactly specified in a file, or with numeric values in a list @option_scheme - none|manual|optimal|small-world|random|ozgur's betweenness|ozgur's randomness|end @n_actions - Number of steps that need to taken comment : optimal(shortest path to destination)??|random|ozgur's betweenness|ozgur's randomness """ env = AGV.create( spec ) # Percentage if isinstance(count,str): count = int(count[:-1]) count = count*env.S/100 # Add options for all the optimal states O = [] if scheme == "none": pass elif scheme == "random-node": O = OptionGenerator.optimal_options_from_random_nodes( env, count, *args ) elif scheme == "random-path": O = OptionGenerator.optimal_options_from_random_paths( env, count, *args ) elif scheme == "betweenness": O = OptionGenerator.optimal_options_from_betweenness( env, count, *args ) elif scheme == "small-world": O = OptionGenerator.optimal_options_from_small_world( env, count, *args ) elif scheme == "betweenness+small-world": O = OptionEnvironment.optimal_options_from_betweenness( env, count ) count_ = count - len( O ) O += OptionEnvironment.optimal_options_from_small_world( env, count_, *args ) elif scheme == "load": O = OptionGenerator.options_from_file( count, *args ) else: raise NotImplemented() return OptionEnvironment( AGVOptions, env.S, env.A, env.P, env.R, env.R_bias, env.start_set, env.end_set, O )
def reset_rewards( env, spec, *args ): O = env.O env = AGV.reset_rewards( env, spec ) return OptionEnvironment( AGVOptions, env.S, env.A, env.P, env.R, env.R_bias, env.start_set, env.end_set, O )
# print(2*x) if __name__ == "__main__": rootDic = sys.argv[1] totalAGVno = 4 regNo = 4 AGVObj=[] q = Queue() ontObjList =[] mapNo = 1 mObj = createMaps(mapNo, rootDic) c = 1 while ( c <= totalAGVno): newOntCreate(c,rootDic) AGVObj.append(AGV(rootDic,mapNo, c, regNo, mObj)) ontObjList.append(newTT(c,rootDic)) c = c+1 #end while q.put(ontObjList) p1 = Process(target = AGVObj[0].pathPlanning,args=(ontObjList,)) p2 = Process(target = AGVObj[1].pathPlanning,args=(ontObjList,)) p3 = Process(target = AGVObj[2].pathPlanning,args=(ontObjList,)) p4 = Process(target = AGVObj[3].pathPlanning,args=(ontObjList,)) p1.start() p2.start() p3.start() p4.start() #end main