def get_cost(traj_node, traj_mps, peri_boundary, total_traj_time, habitats, sharkGrid, weights): """ Return the mean cost of the produced path Parameter: trajectory: a list of Node objects """ total_cost = 0 num_steps = 0 max_time = traj_node[-1].time_stamp time = 0 cal_cost = Cost() while time < max_time: currPath_mps = get_curr_mps_path(time, traj_mps) currPath_node = get_curr_node_path(time, traj_node) cost = cal_cost.habitat_shark_cost_func(currPath_mps, currPath_node[-1].pathLen, peri_boundary, total_traj_time, habitats, sharkGrid, weights) # print ("habitat_shark_cost = ", cost[0], "currPath length = ", len(currPath_mps), "currPath_node[-1].pathLen = ", currPath_node[-1].pathLen) total_cost += cost[0] time += 2 # dt = 2s num_steps += 1 mean_cost = total_cost / num_steps print("mean cost = ", total_cost, " / ", num_steps, " = ", mean_cost) return mean_cost
def eliminate(patterns, sequence, cost_unit, transition_transversion_proportion): extended_patterns = extend_embiguous_patterns(patterns) cost = Cost(sequence, cost_unit, transition_transversion_proportion) result_seq = MultipleSoftElimination(sequence=sequence, patterns=extended_patterns, cost=cost, alphabet=dna_alphabet).eliminate() result_cost = cost.get_cost_of_seq(result_seq) return result_seq, result_cost
def get_market_data(): global username if request.method == "GET": band1 = Bandit() trader1 = Trader() police1 = Police() cost1 = Cost() market_inventory = regions.child(currRegion).child('inventory').get() ship_inventory = users.child(username).child('ship').child( 'inventory').get() difficulty = users.child(username).child('difficulty').get() ship_cargo = users.child(username).child('ship').child('cargo').get() credit_val = users.child(username).child('credit').get() ship_health = users.child(username).child('ship').child('health').get() pilot_skill = users.child(username).child('skills').child( 'pilot').get() engineer = users.child(username).child('skills').child( 'engineer').get() fighter_skill = users.child(username).child('skills').child( 'fighter').get() merchant_skill = users.child(username).child('skills').child( 'merchant').get() fuel = users.child(username).child('ship').child('fuel').get() fuelcost = cost1.calculate_fuel(difficulty, credit_val) demand = band1.calculate_demand(difficulty, credit_val) repair = cost1.calculate_repair(difficulty, engineer, credit_val) price = trader1.item_to_sell(difficulty, credit_val) qty = trader1.qty item = trader1.item stolen = police1.stolen_item(ship_inventory) to_return = { 'username': username, 'currRegion': currRegion, 'market_inventory': market_inventory, 'ship_inventory': ship_inventory, 'cargo': ship_cargo, 'credit': credit_val, 'health': ship_health, 'demand': demand, 'qty': qty, 'item': item, 'price': price, 'eng': engineer, 'stolen': stolen, 'difficulty': difficulty, 'pilot': pilot_skill, 'fighter': fighter_skill, 'fuel': fuel, 'fuelcost': fuelcost, 'merch': merchant_skill, 'repair': repair } return to_return return None
def __init__(self, no_init=False, **kwargs): Cost.__init__(self) self._weights = kwargs.pop('weights', []) if no_init: self._costs = kwargs.pop('costs', []) else: costs_params = kwargs.pop('costs', []) self._costs = [init_component(params) for params in costs_params] assert len(self._costs) > 0 assert isinstance(self._costs[0], Cost) assert len(self._costs) == len(self._weights)
def setup(self, sp=False, costModel=False): Srunway = Variable("S_{runway}", "ft", "runway length") PFEI = Variable("PFEI", "kJ/(kg*km)", "PFEI") "Parameters of interest" self.aircraft = Aircraft() takeoff = TakeOff(self.aircraft, sp=sp) cruise = Cruise(self.aircraft) mission = [cruise, takeoff] climb = Climb(self.aircraft) if costModel: cost = Cost(self.aircraft) mission.extend([cost]) constraints = [ self.aircraft["P_{shaft-max}"] >= cruise["P_{shaft}"], Srunway >= takeoff["S_{TO}"], PFEI == self.aircraft["h_{batt}"] * self.aircraft["W_{batt}"] / (cruise["R"] * self.aircraft["W_{pay}"]) ] if sp: landing = Landing(self.aircraft) constraints.extend([Srunway >= landing["S_{land}"]]) mission.extend([landing]) else: landing = GLanding(self.aircraft) constraints.extend([Srunway >= landing["S_{land}"]]) mission.extend([landing]) return constraints, self.aircraft, mission, climb
def __init__(self): self.tag = self.__class__.__name__ self.input_layer = [] # <Sample Object> self.hidden_layer = [] # <Net Object> self.output_layer = [] # <Net Object> self.learning_rate = 1.0 # 学习速率 self.max_iteration = 1 # 最大迭代数 self.convergence = 0.001 # 收敛误差 self.activation = Method.Sigmoid self.iteration = 0 # 迭代次数 self.cost = Cost() # Cost Function # Callbacks self.iteration_callback = None self.completion_callback = None
def __init__(self, boundary, obstacles, shark_dict, sharkGrid, exp_rate=1, dist_to_end=2, diff_max=0.5, freq=30): '''setting parameters: initial_location: initial Motion_plan_state of AUV, [x, y, z, theta, v, w, time_stamp] goal_location: Motion_plan_state of the shark, [x, y, z] obstacle_list: Motion_plan_state of obstacles [[x1, y1, z1, size1], [x2, y2, z2, size2] ...] boundary: max & min Motion_plan_state of the configuration space [[x_min, y_min, z_min],[x_max, y_max, z_max]]''' #initialize obstacle, boundary, habitats for path planning self.boundary = boundary #initialize corners for boundaries coords = [] for corner in self.boundary: coords.append((corner.x, corner.y)) self.boundary_poly = Polygon(coords) self.obstacle_list = obstacles self.mps_list = [] # a list of motion_plan_state self.time_bin = {} #if minimum path length is not achieved within maximum iteration, return the latest path self.last_path = [] #setting parameters for path planning self.exp_rate = exp_rate self.dist_to_end = dist_to_end self.diff_max = diff_max self.freq = freq # initialize shark occupancy grid self.cell_list = splitCell(self.boundary_poly, 10) self.sharkGrid = sharkGrid self.sharkDict = shark_dict # initialize cost function self.cal_cost = Cost() # keep track of the longest single path in the tree to normalize every path length self.peri_boundary = self.cal_boundary_peri()
def total_cost(self, inputs, outputs): cost = 0. for (x, y) in zip(inputs, outputs): predctn = self.n_net.prediction(x) cost += cc.fn(self.n_net.cost_type, predctn, y) \ / len(x) # scaling return cost
def load_cost(self): """ This function is to load cost object data from storage. :return: cost object list. """ data_cost = [] data_file = open(self.storage) deliveries_reader = csv.reader(data_file) for row in deliveries_reader: if row[0] == 'C': data_cost.append(Cost(row[1], row[2])) data_file.close() return data_cost
def solve(pitches, wishes, relations=None, n=200, patience=200, diversity=.9): """ Attempt to minimise the cost function with a naive evolutionnary solver :param pitches: <pitch, <role, load>> :param wishes: <student, [(pitch, role)]> :param relations: <student, <student, cost>> :param n: number of competing solutions :param patience: number of iteration without improvement before stopping :param diversity: proportion of kept solutions for the next iteration :return: [(student, wish index)] """ assert n > 1 and patience > 0 cost = Cost(pitches, wishes, relations) # precomputations to pick best solutions to clone and modify keep = int(n * diversity) discard = list(range(keep - 1, n)) # the starting solutions {student, wish index} solutions = random_solutions(wishes, n) p = patience # for printing in the console with padded 0 zfill_p = len(str(patience)) - 1 best_cost = float("+inf") print("Cost so far:") start = time() count = 0 while p > 0: count += 1 # compute the cost of the solutions costs = [cost(s) for s in solutions] # sort the solutions by cost costs, solutions = zip( *sorted(zip(costs, solutions), key=lambda cs: cs[0])) solutions = list(solutions) # update the patience if best_cost == costs[0]: p -= 1 else: p = patience best_cost = costs[0] print( f"{best_cost:.2f} (patience = {str(int(p/10)).zfill(zfill_p)}) ", end="\r") # replace the worse solutions by modified clones of the best solutions for i in discard: solutions[i] = copy(solutions[i % keep]) random_changes(wishes, solutions[i], 2) print() delta = time() - start print(f"in {delta:,.1f} sec - {1000*delta/count:.0f}ms/it") # [(student, wish index)] return solutions[0]
def check_area_ytp(pickle_name): data = load(pickle_name) n_years = 15 house_cost = data.loc[0].value appreciation = 3.0 n_months = n_years * 12 dates = pd.date_range(data.loc[0].date, periods=n_months, freq="M") rent = Revenue("rent", "monthly payment", 0.011 * house_cost) # costs utilities = Cost("utilities", "hvac", 150) insurance = Cost("insurance", "full coverage", 100) property_tax = Cost("insurance", "full coverage", (house_cost * 0.019) / 12) mortgage = Mortgage(house_cost, 25000, dates, 3.0) # revenue revenues = [rent] costs = [utilities, insurance, property_tax] house = House(revenues, costs, mortgage, dates, appreciation) # house.summary() house.graph_net_profit() portfolio = Portfolio([house]) ytp = portfolio.get_years_to_positive(house) print("years till positive", ytp) exit(0) return ytp
def main(): seed = 18765 np.random.seed(seed) # Generate graph with 100 nodes (choose one of them) G = nx.barabasi_albert_graph(100, m=1, seed=seed) # G = nx.powerlaw_cluster_graph(100, 1, 0, seed) # G = nx.random_powerlaw_tree(100, 3, seed) topo = Topology(G) # Analyze graph and get providers providers = topo.get_providers() # For each block, get those providers from which it can get the energy # (those providers that are at minimum distance) available_providers = topo.get_available_providers() # Isolate providers so that houses attached to each of them can be easily computed topo.isolate_providers() #consumers = Consumers(Constants.num_blocks, [25] * Constants.num_blocks) # Same number of blocks as providers Constants.num_producers = Constants.num_blocks = len(providers) consumers = Consumers( len(providers), [len(nx.node_connected_component(topo.G, i)) for i in providers], list(available_providers.items())) # Compute final price of the energy (will update the demand too) price = Cost.optimal_demand_response(consumers) Cost.plot_price(price) # Cost.plot_price(house_price) # Battery.plot_battery(Cost.price) consumers.compute_total_measures() np.savetxt("expenditures.txt", consumers.total_expenditures) np.savetxt("demand.txt", consumers.total_demand)
def decide_provider(self, iteration, global_price): # self.reset_adj_matrix() for t in range(Constants.day_hours.size): price = global_price[:, t] changing_customers = [] for i in range(self.blocks.size): # Get list of changing customers of block i changing_customers_list, new_producer = self.blocks[ i].decide_provider(price, t, iteration) # Generate list of same number of tuples as number of changing customers # with first element being the block, second being one customer and third the new provider changing_customers = changing_customers + \ list(zip([i] * len(changing_customers_list), changing_customers_list, [new_producer] * len(changing_customers_list))) # Shuffle them so to get a random order in which they will change np.random.shuffle(changing_customers) for change_tuple in changing_customers: # Account change try: self.blocks[change_tuple[0]].change_provider( change_tuple[1], change_tuple[2], t) except: print(iteration, t) exit(1) # Recompute price possible_providers = self.blocks[ change_tuple[0]].possible_providers total_demand = np.zeros(possible_providers.size) for prod in range(total_demand.size): total_demand[prod] = self.get_demand_by_prod( iteration, possible_providers[prod], [t]) new_price = Cost.energy_price(total_demand, self.num_total_households) # Take new producer and block and update adjacency matrix # self.update_adj_matrix(t, change_tuple[2], change_tuple[0]) # If after change, prices are still different or the cheapest producer is the same, we assume # that customers will continue changing provider. Else, stop process. new_cheap_prod = np.where(new_price == np.min(new_price)) if all(new_price == new_price[0]) or \ np.all(np.where(change_tuple[2] == possible_providers)[0] != new_cheap_prod): break self.update_adj_matrix(t) return
def test_cost(self): seq = "aaAr" cost = Cost(sequence=seq, cost_unit=1, transition_transversion_proportion=2) assert 0 == cost.get(1, "a"), "cost is 0 for not changing the letter" assert 0 == cost.get( 4, "a"), "cost is 0 for not changing the letter (r is a or g)" assert infinity == cost.get( 3, "g"), "index 2 is not allowed to be changed (uppercase)" assert 1 == cost.get(1, "g"), "transition cost" assert 2 == cost.get(1, "t"), "transversion cost" assert 2 == cost.get(1, "c"), "transversion cost" assert 2 == cost.get(4, "t"), "transversion cost with IUPAC"
def back_propagtn(self, x, y): learng_coeff = self.learng_eta / len(x) # scaling l_losses = cc.dv(self.cost_type, y, self.l_actvtns[self.l_number-1]) \ + rr.dv(self.reg_type, y, self.l_actvtns[self.l_number-1]) for l_ix in range(self.l_number - 1, 0, -1): l_slopes = aa.dv(self.actvtn_types[l_ix], self.l_actvtns[l_ix]) l_deltas = l_losses * l_slopes weights_nabla = self.l_actvtns[l_ix-1].T.dot(l_deltas) biases_nabla = np.sum(l_deltas, axis=0, keepdims=True)[0] self.l_weights[l_ix] = self.l_weights[l_ix] \ + weights_nabla * learng_coeff self.l_biases[l_ix] = self.l_biases[l_ix] \ + biases_nabla * learng_coeff l_losses = l_deltas.dot(self.l_weights[l_ix].T)
def __init__(self, env, trajectory): self.cfg = config.cfg self.trajectory = trajectory self.cost = Cost(env) self.optim = Optimizer(env, self.cost) if self.cfg.goal_set_proj: print("Not implemented") # if self.cfg.scene_file == "" or self.cfg.traj_init == "grasp": # self.load_grasp_set(env) # self.setup_goal_set(env) # else: # self.load_goal_from_scene() # self.grasp_init(env) # self.learner = Learner(env, trajectory, self.cost) else: self.trajectory.interpolate_waypoints() self.history_trajectories = [] self.info = [] self.ik_cache = []
def astar(self, habitat_list, obs_lst, boundary_list, start, goal, weights): """ Find the optimal path from start to goal avoiding given obstacles Parameter: obs_lst - a list of motion_plan_state objects that represent obstacles start - a tuple of two elements: x and y coordinates goal - a tuple of two elements: x and y coordinates """ habitats_time_spent = 0 cal_cost = Cost() path_length = 0 start_node = Node(None, start) start_node.g = start_node.h = start_node.f = 0 end_node = Node(None, goal) end_node.g = end_node.h = end_node.f = 0 dynamic_path = [] # append new node as a* search goes cost = [] open_list = [] # hold neighbors of the expanded nodes closed_list = [] # hold all the exapnded nodes habitat_open_list = habitat_list # hold haibitats that have not been explored habitat_closed_list = [] # hold habitats that have been explored open_list.append(start_node) while len(open_list) > 0: current_node = open_list[0] # initialize the current node current_index = 0 for index, item in enumerate( open_list ): # find the current node with the smallest f(f=g+h) if item.f < current_node.f: current_node = item current_index = index open_list.pop(current_index) closed_list.append(current_node) dynamic_path.append(current_node.position) print("dynamic path: ", dynamic_path) # print ('dynamic_path: ', dynamic_path) if self.near_goal(current_node.position, end_node.position): path = [] current = current_node while current is not None: # backtracking to find the path path.append(current.position) cost.append(current.cost) # if current.parent is not None: # current.cost = current.parent.cost + cal_cost.cost_of_edge(current, path, habitat_open_list, habitat_closed_list, weights=[1, 1, 1]) path_length += 10 habitats_time_spent += self.habitats_time_spent(current) # print ("\n", "habitats_time_spent: ", habitats_time_spent) current = current.parent path_mps = [] for point in path: mps = Motion_plan_state(point[0], point[1]) path_mps.append(mps) # print ("\n", 'actual path length: ', path_length) # print ("\n", 'time spent in habitats: ', habitats_time_spent) # print ("\n", "cost list: ", cost) # print ("\n", 'cost list length: ', len(cost)) # print ("\n", 'path list length: ', len(path_mps)) return ([path_mps[::-1], cost]) # find close neighbors of the current node current_neighbors = self.curr_neighbors(current_node, boundary_list) print("\n", "current neighbors: ", current_neighbors) # make current neighbors Nodes children = [] grid = [] # holds a list of tuples (neighbor, grid value) for neighbor in current_neighbors: # create new node if the neighbor is collision-free if self.check_collision(neighbor, obs_lst): """ SELECTION """ grid_val = self.create_grid_map(current_node, neighbor) grid.append((neighbor, grid_val)) print("\n", "grid value: ", grid_val) print("\n", 'grid list: ', grid) # remove two elements with the lowest grid values grid.remove(min(grid)) print("selected grid list: ", grid) # grid.remove(min(grid)) # append the selected neighbors to children for neighbor in grid: new_node = Node(current_node, neighbor[0]) children.append(new_node) # update habitat_open_list and habitat_closed_list result = self.check_cover_habitats(new_node, habitat_open_list, habitat_closed_list) habitat_open_list = result[0] # print ("habitat_open_list: ", habitat_open_list) habitat_closed_list = result[1] # print ("habitat_closed_list: ", habitat_closed_list) for child in children: if child in closed_list: continue # dist_child_current = self.euclidean_dist(child.position, current_node.position) if child.parent is not None: result = cal_cost.cost_of_edge(child, dynamic_path, habitat_open_list, habitat_closed_list, weights=weights) child.cost = child.parent.cost + result[0] # child.g = current_node.g + dist_child_current child.g = current_node.cost child.h = weights[0] * self.euclidean_dist( child.position, goal) - weights[1] * result[1] # result=(cost, d_2) child.f = child.g + child.h # check if child exists in the open list and have bigger g for open_node in open_list: if child == open_node and child.g > open_node.g: continue open_list.append(child)
print(f'x == {x}') raise e myctparam.xlog(x, myctparam.i, total_cost, var_bool) print(f'ITERATION = {myctparam.i}\t\t*** TOTAL COST = {total_cost}') return total_cost mypl = Plant() mypl.run(Const.PLANT_PATH) mymrg = Margin() mymrg.run(mypl.freqhz, mypl.freqhzunit) # IN THIS SCRIPT mycost = Cost(mymrg, mypl) myct = Controller(mypl.freq) myctparam = ControlParam() xparam = myctparam.xinitial(4) #iter_num = myctparam.i #mybound = Bounds(Const.XMIN, Const.XMAX, keep_feasible=False) #xu = minimize(fun = IO, x0= ControlParam.x_notch4 , method ='COBYLA', options={'rhobeg': 1.0, 'maxiter': 20, 'disp': True, 'catol': 0.0002}) Const.OPTMETHOD = 'Nelder-Mead' tstart = time.monotonic() xu = minimize(fun=IO, x0=xparam, method=Const.OPTMETHOD, options={ 'rhobeg': 1.0,
def get_initial_bids(self, iteration, t): demand = self.demand.total_house_demand[iteration, :, t] return Cost.energy_price(demand, self.num_households)
def astar(self, pathLenLimit, weights, shark_traj): """ Find the optimal path from start to goal avoiding given obstacles Parameter: pathLenLimit: in meters; the length limit of the A* trajectory weights: a list of three numbers [w1, w2, w3] shark_traj: a list of Motion_plan_state objects """ w1 = weights[0] w2 = weights[1] w3 = weights[2] w4 = weights[3] habitats_time_spent = 0 cal_cost = Cost() start_node = Node(None, self.start) start_node.g = start_node.h = start_node.f = 0 cost = [] open_list = [] # hold neighbors of the expanded nodes closed_list = [] # hold all the exapnded nodes habitats = self.habitat_list[:] habitat_open_list = self.habitat_list[:] # hold haibitats that have not been explored habitat_closed_list = [] # hold habitats that have been explored open_list.append(start_node) while len(open_list) > 0: current_node = open_list[0] # initialize the current node current_index = 0 for index, item in enumerate(open_list): # find the current node with the smallest f if item.f < current_node.f: current_node = item current_index = index open_list.pop(current_index) closed_list.append(current_node) if abs(current_node.pathLen - pathLenLimit) <= 10: # terminating condition path = [] current = current_node while current is not None: # backtracking to find the d path.append(current) cost.append(current.cost) habitats_time_spent += self.habitats_time_spent(current) current = current.parent path_mps = [] for node in path: mps = Motion_plan_state(node.position[0], node.position[1], traj_time_stamp=round(node.time_stamp, 2)) path_mps.append(mps) trajectory = path_mps[::-1] # print ("\n", "Original Trajectory: ", trajectory) print ("\n", "Original Trajectory length: ", len(trajectory)) smoothPath = self.smoothPath(trajectory, habitats) # return {"path length" : len(trajectory), "path" : trajectory, "cost" : cost[0], "cost list" : cost} return {"path length" : len(smoothPath), "path" : smoothPath, "cost" : cost[0], "cost list" : cost, "node" : path[::-1]} current_neighbors = self.curr_neighbors(current_node, self.boundary_list) children = [] for neighbor in current_neighbors: # create new node if the neighbor is collision-free if self.collision_free(neighbor, self.obstacle_list): new_node = Node(current_node, neighbor) children.append(new_node) for child in children: if child in closed_list: continue child.pathLen = child.parent.pathLen + euclidean_dist(child.parent.position, child.position) dist_left = abs(pathLenLimit - child.pathLen) child.time_stamp = int(child.pathLen/self.velocity) currGrid = self.findCurrSOG(self.sharkGrid, child.time_stamp) child.g = child.parent.cost - w4 * self.get_cell_prob(child, currGrid) child.cost = child.g # child.h = - w2 * dist_left - w3 * len(habitat_open_list) - w4 * dist_left * self.get_max_prob_from_grid(currGrid) child.h = - w2 * dist_left - w3 * len(habitat_open_list) - w4 * self.get_top_n_prob(int(dist_left), currGrid) child.f = child.g + child.h # check if child exists in the open list and have bigger g for open_node in open_list: if child == open_node and child.g > open_node.g: continue x_pos, y_pos = self.get_indices(child.position[0], child.position[1]) if self.visited_nodes[x_pos, y_pos] == 0: open_list.append(child) self.visited_nodes[x_pos, y_pos] = 1
def main(Vxf0, urdf, options): modelNames = ['w.mat', 'Sshape.mat'] # Two example models provided by Khansari modelNumber = 1 # could be zero or one depending on the experiment the user is running data, demoIdx = load_saved_mat_file(lyap + '/' + 'example_models/' + modelNames[modelNumber]) Vxf0['d'] = int(data.shape[0] / 2) Vxf0.update(Vxf0) Vxf0 = guess_init_lyap(data, Vxf0, options['int_lyap_random']) cost = Cost() while cost.success: print('Optimizing the lyapunov function') Vxf, J = cost.learnEnergy(Vxf0, data, options) old_l = Vxf0['L'] Vxf0['L'] += 1 print('Constraints violated. increasing the size of L from {} --> {}'. format(old_l, Vxf0['L'])) if cost.success: print('optimization succeeded without violating constraints') break # Plot the result of V h1 = plt.plot(data[0, :], data[1, :], 'r.', label='demonstrations') extra = 30 axes_limits = [ np.min(data[0, :]) - extra, np.max(data[0, :]) + extra, np.min(data[1, :]) - extra, np.max(data[1, :]) + extra ] h3 = cost.energyContour(Vxf, axes_limits, np.array(()), np.array(()), np.array(()), False) h2 = plt.plot(0, 0, 'g*', markersize=15, linewidth=3, label='target') plt.title('Energy Levels of the learned Lyapunov Functions', fontsize=12) plt.xlabel('x (mm)', fontsize=15) plt.ylabel('y (mm)', fontsize=15) h = [h1, h2, h3] # Run DS opt_sim = dict() opt_sim['dt'] = 0.01 opt_sim['i_max'] = 4000 opt_sim['tol'] = 1 d = data.shape[0] / 2 # dimension of data x0_all = data[:int(d), demoIdx[0, :-1] - 1] # finding initial points of all demonstrations # get gmm params gmm = GMM(num_clusters=options['num_clusters']) gmm.update(data.T, K=options['num_clusters'], max_iterations=100) mu, sigma, priors = gmm.mu.T, gmm.sigma.T, gmm.logmass.T # rho0 and kappa0 impose minimum acceptable rate of decrease in the energy # function during the motion. Refer to page 8 of the paper for more information rho0 = 1 kappa0 = 0.1 inp = list(range(Vxf['d'])) output = np.arange(Vxf['d'], 2 * Vxf['d']) xd, _ = dsStabilizer(x0_all, Vxf, rho0, kappa0, priors, mu, sigma, inp, output, cost) # Evalute DS xT = np.array([]) d = x0_all.shape[0] # dimension of the model if not xT: xT = np.zeros((d, 1)) # initialization nbSPoint = x0_all.shape[1] x = [] #x0_all[0, 1] = -180 # modify starting point a bit to see performance in further regions #x0_all[1, 1] = -130 x.append(x0_all) xd = [] if xT.shape == x0_all.shape: XT = xT else: XT = np.tile( xT, [1, nbSPoint ]) # a matrix of target location (just to simplify computation) t = 0 # starting time dt = 0.01 for i in range(4000): xd.append( dsStabilizer(x[i] - XT, Vxf, rho0, kappa0, priors, mu, sigma, inp, output, cost)[0]) x.append(x[i] + xd[i] * dt) t += dt for i in range(nbSPoint): # Choose one trajectory x = np.reshape(x, [len(x), d, nbSPoint]) x0 = x[:, :, i] if i == 0: plt.plot(x0[:, 0], x0[:, 1], linestyle='--', label='DS eval', color='blue') else: plt.plot(x0[:, 0], x0[:, 1], linestyle='--', color='blue') plt.legend() plt.show()
class RRT: """ Class for RRT planning """ def __init__(self, boundary, obstacles, shark_dict, sharkGrid, exp_rate=1, dist_to_end=2, diff_max=0.5, freq=30): '''setting parameters: initial_location: initial Motion_plan_state of AUV, [x, y, z, theta, v, w, time_stamp] goal_location: Motion_plan_state of the shark, [x, y, z] obstacle_list: Motion_plan_state of obstacles [[x1, y1, z1, size1], [x2, y2, z2, size2] ...] boundary: max & min Motion_plan_state of the configuration space [[x_min, y_min, z_min],[x_max, y_max, z_max]]''' #initialize obstacle, boundary, habitats for path planning self.boundary = boundary #initialize corners for boundaries coords = [] for corner in self.boundary: coords.append((corner.x, corner.y)) self.boundary_poly = Polygon(coords) self.obstacle_list = obstacles self.mps_list = [] # a list of motion_plan_state self.time_bin = {} #if minimum path length is not achieved within maximum iteration, return the latest path self.last_path = [] #setting parameters for path planning self.exp_rate = exp_rate self.dist_to_end = dist_to_end self.diff_max = diff_max self.freq = freq # initialize shark occupancy grid self.cell_list = splitCell(self.boundary_poly, 10) self.sharkGrid = sharkGrid self.sharkDict = shark_dict # initialize cost function self.cal_cost = Cost() # keep track of the longest single path in the tree to normalize every path length self.peri_boundary = self.cal_boundary_peri() def replanning(self, start, habitats, plan_time_budget, traj_time_length, replan_time_interval): ''' RRT path planning to continually generate optimal path given the current trajectory AUV is following by choosing certain point along the curretn trajectory as starting node for new RRT planner After replan time interval, RRT planner will be called to generate a new optimal trajectory given the start node. For each iteration of planning, RRT planner is given a plan time constructio budget to plan and generate optimal trajectory with trajectory time length as maxmimum paramters: start: initial node to start with, need to be updated plan_time_budget: Plan construction time budget, i.e. the amount of time dedicated to constructed trajectories for RRT planner traj_time_length: Trajectory time length, i.e. the difference between the the last and first time stamp of the trajectory constructed i.e. how long the AUV will drive around for replan_time_interval: Replan time interval i.e. the time between each query to the planner to construct a new traj ''' traj = [start] time_dict = {} final_traj_time = list(self.sharkGrid.keys())[-1][1] plan_time = (plan_time_budget + replan_time_interval) count = 1 oriHabitats = habitats.copy() while (traj[-1].traj_time_stamp + plan_time) < final_traj_time: if traj_time_length + traj[-1].traj_time_stamp > final_traj_time: traj_time_length = final_traj_time - traj[-1].traj_time_stamp temp = self.exploring(traj[-1], habitats, 0.5, 5, 2, plan_time, traj_time_stamp=True, max_plan_time=plan_time_budget, max_traj_time=(traj_time_length + traj[-1].traj_time_stamp), plan_time=True, weights=[1, -3, -1, -5]) temp_path = temp["path"][1][list(temp["path"][1].keys())[0]] temp_path.reverse() traj.extend(temp_path) time_dict[count] = [temp_path, habitats.copy()] habitats = self.removeHabitat(habitats, temp_path) count += 1 time.sleep(replan_time_interval) length = self.cal_length(traj[1:]) cost = self.cal_cost.habitat_shark_cost_func(traj[1:], length, self.peri_boundary, traj[-1].traj_time_stamp, oriHabitats, self.sharkGrid, weight=[1, -3, -1, -5]) return [traj[1:], time_dict, cost] def exploring(self, initial, habitats, plot_interval, bin_interval, v, shark_interval, traj_time_stamp=False, max_plan_time=5, max_traj_time=200.0, plan_time=True, weights=[1, -1, -1, -1]): """ rrt path planning without setting a specific goal, rather try to explore the configuration space as much as possible calculate cost while expand and keep track of the current optimal cost path max_iter: maximum iteration for the tree to expand plan_time: expand by randomly picking a time stamp and find the motion_plan_state along the path with smallest time difference """ # keep track of the motion_plan_state whose path is optimal opt_cost = [float("inf")] opt_path = None opt_cost_list = [] self.mps_list = [initial] self.t_start = time.time() n_expand = math.ceil(max_plan_time / plot_interval) if traj_time_stamp: time_expand = math.ceil(max_traj_time / bin_interval) for i in range(1, time_expand + 1): self.time_bin[bin_interval * i] = [] self.time_bin[bin_interval].append(initial) for i in range(1, n_expand + 1): t_end = self.t_start + i * plot_interval while time.time() < t_end: # find the closest motion_plan_state by generating a random time stamp and # find the motion_plan_state whose time stamp is closest to it if plan_time: if traj_time_stamp: ran_bin = int(random.uniform(1, time_expand + 1)) while self.time_bin[bin_interval * ran_bin] == []: ran_bin = int(random.uniform(1, time_expand + 1)) ran_index = int( random.uniform( 0, len(self.time_bin[bin_interval * ran_bin]))) closest_mps = self.time_bin[bin_interval * ran_bin][ran_index] else: ran_time = random.uniform(0, max_plan_time * self.freq) closest_mps = self.get_closest_mps_time( ran_time, self.mps_list) if closest_mps.traj_time_stamp > max_traj_time: continue # find the closest motion_plan_state by generating a random motion_plan_state # and find the motion_plan_state with smallest distance else: ran_mps = self.get_random_mps() closest_mps = self.get_closest_mps(ran_mps, self.mps_list) if closest_mps.traj_time_stamp > max_traj_time: continue new_mps = self.steer(closest_mps, self.dist_to_end, self.diff_max, self.freq, 0.5, v, traj_time_stamp) if self.check_collision(new_mps, self.obstacle_list): new_mps.parent = closest_mps self.mps_list.append(new_mps) #add to time stamp bin if traj_time_stamp: curr_bin = (new_mps.traj_time_stamp // bin_interval + 1) * bin_interval if curr_bin > max_traj_time: self.time_bin[curr_bin] = [] self.time_bin[curr_bin].append(new_mps) # if new_mps.traj_time_stamp > longest_traj_time: # longest_traj_time = new_mps.traj_time_stamp #else: # if new_mps.traj_time_stamp > max_traj_time: # continue #Question: how to normalize the path length? if new_mps.traj_time_stamp >= max_traj_time - 30: path = self.generate_final_course(new_mps) new_mps.length = self.cal_length(path) #find the corresponding shark occupancy grid sharkOccupancyDict = {} start = initial.traj_time_stamp end = new_mps.traj_time_stamp for time_bin in self.sharkGrid: if (start >= time_bin[0] and start <= time_bin[1] ) or (time_bin[0] >= start and time_bin[1] <= end) or (end >= time_bin[0] and end <= time_bin[1]): sharkOccupancyDict[time_bin] = self.sharkGrid[ time_bin] new_cost = self.cal_cost.habitat_shark_cost_func( path, new_mps.length, self.peri_boundary, new_mps.traj_time_stamp, habitats, sharkOccupancyDict, weights) if new_cost[0] < opt_cost[0]: opt_cost = new_cost opt_path = [new_mps.length, path] # opt_cost_list.append(opt_cost[0]) path = self.splitPath(opt_path[1], shark_interval, [initial.traj_time_stamp, max_traj_time]) return { "path length": opt_path[0], "path": [opt_path[1], path], "cost": opt_cost } def planning(self, bin_interval=5, v=1, traj_time_stamp=False, max_plan_time=5, max_traj_time=200.0, plan_time=True): """ rrt path planning animation: flag for animation on or off """ self.mps_list = [self.start] self.t_start = time.time() t_end = time.time() + max_plan_time if traj_time_stamp: time_expand = math.ceil(max_traj_time / bin_interval) for i in range(1, time_expand + 1): self.time_bin[bin_interval * i] = [] self.time_bin[bin_interval].append(self.start) while time.time() < t_end: #find the closest motion_plan_state by generating a random time stamp and #find the motion_plan_state whose time stamp is closest to it if plan_time: if traj_time_stamp: ran_bin = int(random.uniform(1, time_expand + 1)) while self.time_bin[bin_interval * ran_bin] == []: ran_bin = int(random.uniform(1, time_expand + 1)) ran_index = int( random.uniform( 0, len(self.time_bin[bin_interval * ran_bin]))) closest_mps = self.time_bin[bin_interval * ran_bin][ran_index] else: ran_time = random.uniform(0, max_plan_time * self.freq) closest_mps = self.get_closest_mps_time( ran_time, self.mps_list) if closest_mps.traj_time_stamp > max_traj_time: continue #find the closest motion_plan_state by generating a random motion_plan_state #and find the motion_plan_state with smallest distance else: ran_mps = self.get_random_mps() closest_mps = self.get_closest_mps(ran_mps, self.mps_list) if closest_mps.traj_time_stamp > max_traj_time: continue new_mps = self.steer(closest_mps, self.dist_to_end, self.diff_max, self.freq, v, traj_time_stamp) if self.check_collision(new_mps, self.obstacle_list): print(new_mps) new_mps.parent = closest_mps self.mps_list.append(new_mps) #add to time stamp bin if traj_time_stamp: curr_bin = (new_mps.traj_time_stamp // bin_interval + 1) * bin_interval if curr_bin > max_traj_time: #continue self.time_bin[curr_bin] = [] self.time_bin[curr_bin].append(new_mps) final_mps = self.connect_to_goal_curve_alt(self.mps_list[-1], self.exp_rate) if self.check_collision(final_mps, self.obstacle_list): final_mps.parent = self.mps_list[-1] path = self.generate_final_course(final_mps) return path return None # cannot find path def steer(self, mps, dist_to_end, diff_max, freq, min_dist, velocity=1, traj_time_stamp=False): #dubins library '''new_mps = Motion_plan_state(from_mps.x, from_mps.y, theta = from_mps.theta) new_mps.path = [] q0 = (from_mps.x, from_mps.y, from_mps.theta) q1 = (to_mps.x, to_mps.y, to_mps.theta) turning_radius = 1.0 path = dubins.shortest_path(q0, q1, turning_radius) configurations, _ = path.sample_many(exp_rate) for configuration in configurations: new_mps.path.append(Motion_plan_state(x = configuration[0], y = configuration[1], theta = configuration[2])) new_mps.path.append(to_mps) dubins_path = new_mps.path new_mps = dubins_path[-2] new_mps.path = dubins_path''' new_mps = Motion_plan_state(mps.x, mps.y, theta=mps.theta, plan_time_stamp=time.time() - self.t_start, traj_time_stamp=mps.traj_time_stamp) new_mps.path = [mps] '''if dist_to_end > d: dist_to_end = dist_to_end / 10 n_expand = math.floor(dist_to_end / exp_rate)''' n_expand = random.uniform(0, freq) n_expand = math.floor(n_expand / 1) for _ in range(n_expand): #setting random parameters dist = random.uniform(0, dist_to_end) #setting random range diff = random.uniform(-diff_max, diff_max) #setting random range if abs(dist) > abs(diff): s1 = dist + diff s2 = dist - diff radius = (s1 + s2) / (-s1 + s2) phi = (s1 + s2) / (2 * radius) ori_theta = new_mps.theta new_mps.theta += phi delta_x = radius * (math.sin(new_mps.theta) - math.sin(ori_theta)) delta_y = radius * (-math.cos(new_mps.theta) + math.cos(ori_theta)) new_mps.x += delta_x new_mps.y += delta_y velocity_temp = random.uniform(0, 2 * velocity) new_mps.plan_time_stamp = time.time() - self.t_start movement = math.sqrt(delta_x**2 + delta_y**2) new_mps.traj_time_stamp += movement / velocity_temp if movement >= min_dist: new_mps.path.append( Motion_plan_state( new_mps.x, new_mps.y, v=velocity_temp, theta=new_mps.theta, traj_time_stamp=new_mps.traj_time_stamp, plan_time_stamp=new_mps.plan_time_stamp)) #d, theta = self.get_distance_angle(new_mps, to_mps) '''d, _ = self.get_distance_angle(new_mps, to_mps) if d <= dist_to_end: new_mps.path.append(to_mps)''' #new_node.parent = from_node new_mps.path[0] = mps return new_mps def connect_to_goal(self, mps, exp_rate, dist_to_end=float("inf")): new_mps = Motion_plan_state(mps.x, mps.y) d, theta = self.get_distance_angle(new_mps, self.goal) new_mps.path = [new_mps] if dist_to_end > d: dist_to_end = d n_expand = math.floor(dist_to_end / exp_rate) for _ in range(n_expand): new_mps.x += exp_rate * math.cos(theta) new_mps.y += exp_rate * math.sin(theta) new_mps.path.append(Motion_plan_state(new_mps.x, new_mps.y)) d, _ = self.get_distance_angle(new_mps, self.goal) if d <= dist_to_end: new_mps.path.append(self.goal) new_mps.path[0] = mps return new_mps def generate_final_course(self, mps): path = [mps] mps = mps while mps.parent is not None: reversed_path = reversed(mps.path) for point in reversed_path: path.append(point) mps = mps.parent #path.append(mps) return path def get_random_mps(self, size_max=15): x_max = max([mps.x for mps in self.boundary]) x_min = min([mps.x for mps in self.boundary]) y_max = max([mps.y for mps in self.boundary]) y_min = min([mps.y for mps in self.boundary]) ran_x = random.uniform(x_min, x_max) ran_y = random.uniform(y_min, y_max) ran_theta = random.uniform(-math.pi, math.pi) ran_size = random.uniform(0, size_max) mps = Motion_plan_state(ran_x, ran_y, theta=ran_theta, size=ran_size) #ran_z = random.uniform(self.min_area.z, self.max_area.z) return mps def draw_graph_replan(self, traj_path, rnd=None): fig = plt.figure(1, figsize=(10, 30)) x, y = self.boundary_poly.exterior.xy row = math.ceil(len(list(traj_path[1].keys())) / 3) for index, arr in traj_path[1].items(): ax = fig.add_subplot(row, 3, index) ax.plot(x, y, color="black") # for mps in self.mps_list: # if mps.parent: # plt.plot([point.x for point in mps.path], [point.y for point in mps.path], '-g') # plot obstacels as circles for obs in self.obstacle_list: ax.add_patch( plt.Circle((obs.x, obs.y), obs.size, color='#000000', fill=False)) for habitat in arr[1]: ax.add_patch( plt.Circle((habitat.x, habitat.y), habitat.size, color='b', fill=False)) # patch = [] # occ = [] # for cell in self.cell_list: # polygon = patches.Polygon(list(cell.exterior.coords), True) # patch.append(polygon) # occ.append(self.sharkGrid[time_tuple][cell.bounds]) # p = collections.PatchCollection(patch) # p.set_cmap("Greys") # p.set_array(np.array(occ)) # ax.add_collection(p) # fig.colorbar(p, ax=ax) ax.set_xlim([ self.boundary_poly.bounds[0] - 10, self.boundary_poly.bounds[2] + 10 ]) ax.set_ylim([ self.boundary_poly.bounds[1] - 10, self.boundary_poly.bounds[3] + 10 ]) # ax.title.set_text(str(list(self.sharkGrid.keys())[i])) ax.plot([mps.x for mps in traj_path[0]], [mps.y for mps in traj_path[0]], "r") ax.plot([mps.x for mps in arr[0]], [mps.y for mps in arr[0]], 'b') plt.show() def draw_graph_explore(self, habitats, traj_path, rnd=None): fig = plt.figure(1, figsize=(10, 8)) x, y = self.boundary_poly.exterior.xy for i in range(len(list(self.sharkGrid.keys()))): ax = fig.add_subplot(5, 2, i + 1) ax.plot(x, y, color="black") # for mps in self.mps_list: # if mps.parent: # plt.plot([point.x for point in mps.path], [point.y for point in mps.path], '-g') # plot obstacels as circles for obs in self.obstacle_list: ax.add_patch( plt.Circle((obs.x, obs.y), obs.size, color='#000000', fill=False)) for habitat in habitats: ax.add_patch( plt.Circle((habitat.x, habitat.y), habitat.size, color='b', fill=False)) patch = [] occ = [] key = list(self.sharkGrid.keys())[i] for cell in self.cell_list: polygon = patches.Polygon(list(cell.exterior.coords), True) patch.append(polygon) occ.append(self.sharkGrid[key][cell.bounds]) p = collections.PatchCollection(patch) p.set_cmap("Greys") p.set_array(np.array(occ)) ax.add_collection(p) fig.colorbar(p, ax=ax) ax.set_xlim([ self.boundary_poly.bounds[0] - 10, self.boundary_poly.bounds[2] + 10 ]) ax.set_ylim([ self.boundary_poly.bounds[1] - 10, self.boundary_poly.bounds[3] + 10 ]) ax.title.set_text(str(list(self.sharkGrid.keys())[i])) ax.plot([mps.x for mps in traj_path[0]], [mps.y for mps in traj_path[0]], "r") ax.plot([mps.x for mps in traj_path[1][key]], [mps.y for mps in traj_path[1][key]], 'b') plt.show() @staticmethod def plot_circle(x, y, size, color="-b"): # pragma: no cover deg = list(range(0, 360, 5)) deg.append(0) xl = [x + size * math.cos(np.deg2rad(d)) for d in deg] yl = [y + size * math.sin(np.deg2rad(d)) for d in deg] plt.plot(xl, yl, color) def connect_to_goal_curve(self, mps1): a = (self.goal.y - mps1.y) / (np.cosh(self.goal.x) - np.cosh(mps1.x)) b = mps1.y - a * np.cosh(mps1.x) x = np.linspace(mps1.x, self.goal.x, 15) x = x.tolist() y = a * np.cosh(x) + b y = y.tolist() new_mps = Motion_plan_state(x[-2], y[-2]) new_mps.path.append(mps1) for i in range(1, len(x)): new_mps.path.append(Motion_plan_state(x[i], y[i])) new_mps.length += math.sqrt( (new_mps.path[i].x - new_mps.path[i - 1].x)**2 + (new_mps.path[i].y - new_mps.path[i - 1].y)**2) '''plt.plot(mps1.x, mps1.y, color) plt.plot(self.goal.x, self.goal.y, color) plt.plot([mps.x for mps in new_mps.path], [mps.y for mps in new_mps.path], color) plt.show()''' return new_mps def connect_to_goal_curve_alt(self, mps, exp_rate): new_mps = Motion_plan_state(mps.x, mps.y, theta=mps.theta, traj_time_stamp=mps.traj_time_stamp) theta_0 = new_mps.theta _, theta = self.get_distance_angle(mps, self.goal) diff = theta - theta_0 diff = self.angle_wrap(diff) if abs(diff) > math.pi / 2: return #polar coordinate r_G = math.hypot(self.goal.x - new_mps.x, self.goal.y - new_mps.y) phi_G = math.atan2(self.goal.y - new_mps.y, self.goal.x - new_mps.x) #arc phi = 2 * self.angle_wrap(phi_G - new_mps.theta) radius = r_G / (2 * math.sin(phi_G - new_mps.theta)) length = radius * phi if phi > math.pi: phi -= 2 * math.pi length = -radius * phi elif phi < -math.pi: phi += 2 * math.pi length = -radius * phi new_mps.length += length ang_vel = phi / (length / exp_rate) #center of rotation x_C = new_mps.x - radius * math.sin(new_mps.theta) y_C = new_mps.y + radius * math.cos(new_mps.theta) n_expand = math.floor(length / exp_rate) for i in range(n_expand + 1): new_mps.x = x_C + radius * math.sin(ang_vel * i + theta_0) new_mps.y = y_C - radius * math.cos(ang_vel * i + theta_0) new_mps.theta = ang_vel * i + theta_0 new_mps.path.append( Motion_plan_state(new_mps.x, new_mps.y, theta=new_mps.theta, plan_time_stamp=time.time() - self.t_start)) return new_mps def angle_wrap(self, ang): if -math.pi <= ang <= math.pi: return ang elif ang > math.pi: ang += (-2 * math.pi) return self.angle_wrap(ang) elif ang < -math.pi: ang += (2 * math.pi) return self.angle_wrap(ang) def get_closest_mps(self, ran_mps, mps_list): min_dist, _ = self.get_distance_angle(mps_list[0], ran_mps) closest_mps = mps_list[0] for mps in mps_list: dist, _ = self.get_distance_angle(mps, ran_mps) if dist < min_dist: min_dist = dist closest_mps = mps return closest_mps def get_closest_mps_time(self, ran_time, mps_list): while len(mps_list) > 3: left = mps_list[len(mps_list) // 2 - 1] left_diff = abs(left.plan_time_stamp - ran_time) right = mps_list[len(mps_list) // 2 + 1] right_diff = abs(right.plan_time_stamp - ran_time) index = len(mps_list) // 2 if left_diff >= right_diff: mps_list = mps_list[index:] else: mps_list = mps_list[:index] return mps_list[0] def check_collision(self, mps, obstacleList): if mps is None: return False dList = [] for obstacle in obstacleList: for point in mps.path: d, _ = self.get_distance_angle(obstacle, point) dList.append(d) if min(dList) <= obstacle.size: return False # collision for point in mps.path: point = Point(point.x, point.y) if not point.within(self.boundary_poly): return False return True # safe def check_collision_obstacle(self, mps, obstacleList): for obstacle in obstacleList: d, _ = self.get_distance_angle(obstacle, mps) if d <= obstacle.size: return False return True def get_distance_angle(self, start_mps, end_mps): dx = end_mps.x - start_mps.x dy = end_mps.y - start_mps.y #dz = end_mps.z-start_mps.z dist = math.sqrt(dx**2 + dy**2) theta = math.atan2(dy, dx) return dist, theta def cal_length(self, path): length = 0 for i in range(1, len(path)): length += math.sqrt((path[i].x - path[i - 1].x)**2 + (path[i].y - path[i - 1].y)**2) return length def cal_boundary_peri(self): peri = 0 for i in range(len(self.boundary) - 1): dist, _ = self.get_distance_angle(self.boundary[i], self.boundary[i + 1]) peri += dist return peri def plot_performance(self, time_list, perf_list): _, ax = plt.subplots() ax.plot(time_list, perf_list) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('optimal sum cost') ax.set_title('RRT performance') ax.legend() plt.show() def splitPath(self, path, shark_interval, traj_time): n_expand = math.floor(traj_time[1] / shark_interval) res = {} start = traj_time[0] for i in range(n_expand): res[(start + i * shark_interval, start + (i + 1) * shark_interval)] = [] for point in path: for time, arr in res.items(): if point.traj_time_stamp >= time[ 0] and point.traj_time_stamp <= time[1]: arr.append(point) break return res def removeHabitat(self, habitats, path): for point in path: for habitat in habitats: if math.sqrt((point.x - habitat.x)**2 + (point.y - habitat.y)**2) <= habitat.size: habitats.remove(habitat) break return habitats
def astar(self, habitat_list, obs_lst, boundary_list, start, pathLenLimit, weights): """ Find the optimal path from start to goal avoiding given obstacles Parameter: obs_lst - a list of motion_plan_state objects that represent obstacles start - a tuple of two elements: x and y coordinates goal - a tuple of two elements: x and y coordinates """ w1 = weights[0] w2 = weights[1] w3 = weights[2] habitats_time_spent = 0 cal_cost = Cost() start_node = Node(None, start) start_node.g = start_node.h = start_node.f = 0 cost = [] open_list = [] # hold neighbors of the expanded nodes closed_list = [] # hold all the exapnded nodes habitat_open_list = habitat_list # hold haibitats that have not been explored habitat_closed_list = [] # hold habitats that have been explored open_list.append(start_node) while len(open_list) > 0: current_node = open_list[0] # initialize the current node current_index = 0 for index, item in enumerate( open_list): # find the current node with the smallest f if item.f < current_node.f: current_node = item current_index = index open_list.pop(current_index) closed_list.append(current_node) if abs(current_node.pathLen - pathLenLimit) <= 10: # terminating condition path = [] current = current_node while current is not None: # backtracking to find the d path.append(current.position) cost.append(current.cost) habitats_time_spent += self.habitats_time_spent(current) current = current.parent path_mps = [] for point in path: mps = Motion_plan_state(point[0], point[1]) path_mps.append(mps) return ([path_mps[::-1], cost]) current_neighbors = self.curr_neighbors(current_node, boundary_list) children = [] grid = [] '''Old Cost Function''' for neighbor in current_neighbors: # create new node if the neighbor is collision-free if self.collision_free(neighbor, obs_lst): new_node = Node(current_node, neighbor) result = self.update_habitat_coverage( new_node, habitat_open_list, habitat_closed_list ) # update habitat_open_list and habitat_closed_list habitat_open_list = result[0] habitat_closed_list = result[1] cost_of_edge = cal_cost.cost_of_edge( new_node, habitat_open_list, habitat_closed_list, weights) new_node.cost = new_node.parent.cost + cost_of_edge[0] children.append(new_node) '''New Cost Function''' # for neighbor in current_neighbors: # create new node if the neighbor is collision-free # if self.collision_free(neighbor, obs_lst): # """ # SELECTION # """ # grid_val = self.create_grid_map(current_node, neighbor) # grid.append((neighbor, grid_val)) # grid.remove(min(grid)) # append the selected neighbors to children for neighbor in grid: new_node = Node(current_node, neighbor[0]) children.append(new_node) result = self.update_habitat_coverage( new_node, habitat_open_list, habitat_closed_list ) # update habitat_open_list and habitat_closed_list habitat_open_list = result[0] habitat_closed_list = result[1] for child in children: if child in closed_list: continue result = cal_cost.cost_of_edge(child, habitat_open_list, habitat_closed_list, weights) d_2 = result[1] d_3 = result[2] child.g = child.parent.cost - w2 * d_2 - w3 * d_3 child.cost = child.g child.h = -w2 * abs(pathLenLimit - child.pathLen) - w3 * len( habitat_open_list) child.f = child.g + child.h child.pathLen = child.parent.pathLen + euclidean_dist( child.parent.position, child.position) # check if child exists in the open list and have bigger g for open_node in open_list: if child == open_node and child.g > open_node.g: continue x_pos, y_pos = self.get_indices(child.position[0], child.position[1]) print('\n', "x_in_meter, y_in_meter: ", child.position[0], child.position[1]) print('\n', "x_pos, y_pos", x_pos, y_pos) if self.visited_nodes[x_pos, y_pos] == 0: open_list.append(child) self.visited_nodes[x_pos, y_pos] = 1 else: print("False attempt : ", child.position)
def main(): parser = argparse.ArgumentParser() parser.add_argument("csv_file_path") parser.add_argument("--encoding", default="utf_8") options = parser.parse_args() _utObject = utils() testCsv, aircraftCode, aircraftRange = _utObject.input(options) # print("TESTCSV:::::::::",testCsv) # print(aircraftCode) # print(aircraftRange) outputList = [] _airdict = Airport() parsedAirportDict = _airdict.parseAirport('airport.csv') for tCi, tC in enumerate(testCsv): #Iterates over IATA code in testCsv storing their into a dictionary as tuples #Saving lat and long values for each IATA distances = {} for row in tC: data = parsedAirportDict.get(row) distances[row] = data # print(distances) #Calculating all possible distances between routes and storing them in a dict dict_air = {} for airs in distances: src = airs indivDist = [] for airs2 in distances: dest = airs2 indivDist.append( _airdict.distanceBetweenAirports(distances[src][0], distances[src][1], distances[dest][0], distances[dest][1])) dict_air[src] = indivDist #print(dict_air) #transform nodes(keys) of dict into a sequence graph_nodes = (list(dict_air.keys())) # _min_distance_obj = MinDist() # shortest_path = _min_distance_obj.bruteForce(graph_nodes,dict_air) airports_visited = [] min_dist = [] airIndices = [] new_source = graph_nodes[0] # airIndices.append(0) for s in graph_nodes: # print("Destination:", new_source) airports_visited.append(new_source) distances = dict_air.get(new_source) for i in airIndices: # print(s) distances[i] = 9999999 # print(distances) index = -1 for i, element in enumerate(distances): if element == 0: index = i distances[i] = 9999999 airIndices.append(index) mDist = min(distances) #print(mDist) airpIndex = distances.index(mDist) airIndices.append(airpIndex) #print("Index:",airpIndex) new_source = graph_nodes[airpIndex] min_dist.append(min(distances)) #Popping last element off stack to get 4 shortest distances min_dist.pop() #print(min_dist) finalSource = airports_visited[-1] finalDestination = airports_visited[0] finalSourceDist = parsedAirportDict.get(finalSource) finalDestinationDist = parsedAirportDict.get(finalDestination) finalDistance = _airdict.distanceBetweenAirports( finalSourceDist[0], finalSourceDist[1], finalDestinationDist[0], finalDestinationDist[1]) #print(finalDistance) #Append final dist to list of distances airports_visited.append(finalDestination) print("Itinerary:", airports_visited, '\n') min_dist.append(finalDistance) print("Itinerary Distances:", min_dist, '\n') print('________________________________________________', '\n') #Destination is the last index of airports_visited list _currencyobject = Currency() parsedCurrencyCost = _currencyobject.currencyParser( 'Currency_code_concat.csv') billingCountry = airports_visited[-1] _costObject = Cost() toEuroRate = _costObject.toEuroRate(billingCountry, 'airport.csv', 'countrycurrency.csv', 'currencyrates.csv') print(toEuroRate) print("Cost of round trip: ", "€", (sum(min_dist) // toEuroRate)) #Also calculate distance from last dest in list to home # print('________________________________________________') print("Checking if Itinerary is possible with provided aircraft") journeyPossible = _airdict.isItineraryPossible(aircraftRange[tCi], min_dist) if journeyPossible == False: print( "Sorry, with the provided aircraft this journey cannot be made. Try again!" ) airports_visited.append("No Route") outputList.append(airports_visited) else: remainingFuel = aircraftRange[tCi] print("\t\t\tRemaining Fuel::: ", remainingFuel) totalCosttoRefuel = 0 toEuroRate = _costObject.toEuroRate(airports_visited[0], 'airport.csv', 'countrycurrency.csv', 'currencyrates.csv') print("\t\t\t\t\tItinerary Scheduled: ", airports_visited) print("\t\t\t\t\tItinerary Scheduled: ", min_dist) totalCosttoRefuel += aircraftRange[tCi] * toEuroRate for i, val in enumerate(min_dist): toEuroRate = _costObject.toEuroRate(airports_visited[i + 1], 'airport.csv', 'countrycurrency.csv', 'currencyrates.csv') print("\t\t\tAT Airport: ", airports_visited[i + 1]) print("\t\t\tAT Airport: ", airports_visited[i + 1], "the EURO Rate is: ", toEuroRate) if remainingFuel >= val: print("\t\t\t\tProceeding") remainingFuel -= val else: print("\t\t\tRefueling") costOfRefuel = (aircraftRange[tCi] - remainingFuel) * toEuroRate totalCosttoRefuel += costOfRefuel remainingFuel = aircraftRange[tCi] - val print("\t\t\t\t", costOfRefuel) print("\t\t\t\tRemaining", remainingFuel) airports_visited.append(totalCosttoRefuel) outputList.append(airports_visited) print("TOTAL COST FOR ITINERARY IS: ", totalCosttoRefuel) print("Generating output csv file") _utObject.tocsv(outputList)
def astar(self, habitat_list, obs_lst, boundary_list, start, pathLenLimit, weights): """ Find the optimal path from start to goal avoiding given obstacles Parameter: obs_lst - a list of motion_plan_state objects that represent obstacles start - a tuple of two elements: x and y coordinates goal - a tuple of two elements: x and y coordinates """ w1 = weights[0] w2 = weights[1] w3 = weights[2] habitats_time_spent = 0 cal_cost = Cost() start_node = Node(None, start) start_node.g = start_node.h = start_node.f = 0 cost = [] open_list = [] # hold neighbors of the expanded nodes closed_list = [] # hold all the exapnded nodes # habitats = habitat_list[:] open_list.append(start_node) while len(open_list) > 0: current_node = open_list[0] # initialize the current node current_index = 0 for index, item in enumerate(open_list): # find the current node with the smallest f if item.f < current_node.f: current_node = item current_index = index open_list.pop(current_index) closed_list.append(current_node) if abs(current_node.pathLen - pathLenLimit) <= 10: # terminating condition path = [] current = current_node while current is not None: # backtracking to find the d path.append(current.position) cost.append(current.cost) habitats_time_spent += self.habitats_time_spent(current) current = current.parent path_mps = [] for point in path: mps = Motion_plan_state(point[0], point[1]) path_mps.append(mps) trajectory = path_mps[::-1] # smoothPath = self.smoothPath(trajectory, habitats) return ({"path" : trajectory, "cost list" : cost, "cost" : cost[0]}) current_neighbors = self.curr_neighbors(current_node, boundary_list) children = [] for neighbor in current_neighbors: # create new node if the neighbor is collision-free if self.collision_free(neighbor, obs_lst): new_node = Node(current_node, neighbor) self.update_habitat_coverage(new_node, self.habitat_open_list, self.habitat_closed_list) cost_of_edge = cal_cost.cost_of_edge(new_node, self.habitat_open_list, self.habitat_closed_list, weights) new_node.cost = new_node.parent.cost + cost_of_edge[0] children.append(new_node) for child in children: if child in closed_list: continue habitatInfo = cal_cost.cost_of_edge(child, self.habitat_open_list, self.habitat_closed_list, weights) insideAnyHabitats = habitatInfo[1] insideOpenHabitats = habitatInfo[2] child.g = child.parent.cost - w2 * insideAnyHabitats - w3 * insideOpenHabitats child.cost = child.g child.h = - w2 * abs(pathLenLimit - child.pathLen) - w3 * len(self.habitat_open_list) child.f = child.g + child.h child.pathLen = child.parent.pathLen + euclidean_dist(child.parent.position, child.position) child.time_stamp = int(child.pathLen/self.velocity) # check if child exists in the open list and have bigger g for open_node in open_list: if child == open_node and child.g > open_node.g: continue x_pos, y_pos = self.get_indices(child.position[0], child.position[1]) if self.visited_nodes[x_pos, y_pos] == 0: open_list.append(child) self.visited_nodes[x_pos, y_pos] = 1
if __name__ == "__main__": train, valid, test = load('mnist.pkl.gz') trans_train, shift_train, ori_train = translation(train[0], 28) trans_train, shift_train, ori_train = shared( (trans_train, shift_train, ori_train)) trans_valid, shift_valid, ori_valid = translation(valid[0], 28) trans_valid, shift_valid, ori_valid = shared( (trans_valid, shift_valid, ori_valid)) trans_test, shift_test, ori_test = translation(test[0], 28) trans_test, shift_test, ori_test = shared( (trans_test, shift_test, ori_test)) num_capsules = 60 in_dim = 784 recog_dim = 10 gener_dim = 20 activation = 'sigmoid' input = T.matrix('input') extra_input = T.matrix('extra') output = T.matrix('output') transae = TransAE(num_capsules, in_dim, recog_dim, gener_dim, activation) cost = Cost(transae, input, extra_input) train = SGDTrain(input, extra_input, output, (trans_train, shift_train, ori_train), transae, cost) train.main_loop((trans_valid, shift_valid, ori_valid), (trans_test, shift_test, ori_test), epochs=800, serialize=True)
def execute_command(self, command): """ This function is to execute the command entered Do pre processing before call the execution function is need :param command: Command entered :return: value form execute function """ if command == 'exit': sys.exit() elif command.startswith('todo '): if self.current_user_level >= 2: """Team leader and above is access to add task""" description = command.split(' ', 1)[1] return self.project.add_task(ToDo(description, False)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('deadline '): if self.current_user_level >= 2: """Team leader and above is access to add task""" command_part = command.split(' ', 1)[1] description = self.remove_from_word(command_part, 'by') by = self.remove_to_word(command_part, 'by') return self.project.add_task(Deadline(description, False, by)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('timeline '): if self.current_user_level >= 2: """Team leader and above is access to add task""" command_part = command.split(' ', 1)[1] description = self.remove_from_word(command_part, 'from') date = self.remove_to_word(command_part, 'from') start_date = self.remove_from_word(date, 'to') end_date = self.remove_to_word(date, 'to') return self.project.add_task( TimeLine(description, False, start_date, end_date)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('done '): if self.current_user_level >= 2: """Team leader and above is access to update task""" user_index = command.split(' ', 1)[1] index = int(user_index) - 1 if index < 0: raise Exception('Index must be grater then 0') else: try: self.project.tasks[index].mark_as_done() return 'Congrats on completing a task ! :-)' except Exception: raise Exception('No item at index ' + str(index + 1)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('pending '): if self.current_user_level >= 2: """Team leader and above is access to update task""" user_index = command.split(' ', 1)[1] index = int(user_index) - 1 if index < 0: raise Exception('Index must be grater than 0') else: try: self.project.tasks[index].mark_as_pending() return 'Task mark as pending' except Exception: raise Exception('No item at index' + str(index + 1)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('resource '): if self.current_user_level >= 2: """Team leader and above is access to add resource""" command_part = command.split(' ', 1)[1] description = self.remove_from_word(command_part, 'is') quantity = self.remove_to_word(command_part, 'is') return self.project.add_resources( Resource(description, quantity)) else: raise ValueError('Team leader or above only can add task') elif command.startswith('cost of '): if self.current_user_level >= 3: """Manager and above is access to add cost""" command_part = command.split(' ', 2)[2] description = self.remove_from_word(command_part, 'is') cost = self.remove_to_word(command_part, 'is') return self.project.add_cost(Cost(description, cost)) else: raise ValueError('Manager and above only can add cost') elif command.startswith('remove '): try: command_part = command.split(' ', 2)[1] command_index = command.split(' ', 2)[2] index = int(command_index) - 1 if command_part == 'task': if self.current_user_level >= 2: """Team leader and above to access to remove task""" return self.project.remove_task(index) else: raise ValueError( 'Team leader and above only can remove task') elif command_part == 'resource': if self.current_user_level >= 2: """Team Leader and above to access to remove resource""" return self.project.remove_resource(index) else: raise ValueError( 'Team leader and above only can remove resource') elif command_part == 'cost': if self.current_user_level >= 3: """Manager and above to access to remove cost""" return self.project.remove_cost(index) else: raise ValueError('Manager adn above only remove cost') except Exception: raise ValueError( 'Command format not recognize.\n Command: >>> remove [task/resource/cost] [index]' ) else: logging.error('Command not recognized.') raise Exception('Command not recognized')
def __init__(self, **kwargs): Cost.__init__(self) self._wu = kwargs.pop('wu') self._target = np.array(kwargs.pop('desired_state'), dtype=np.float64)
class RNN: def __init__(self): self.tag = self.__class__.__name__ self.input_layer = [] # <Sample Object> self.hidden_layer = [] # <Net Object> self.output_layer = [] # <Net Object> self.learning_rate = 1.0 # 学习速率 self.max_iteration = 1 # 最大迭代数 self.convergence = 0.001 # 收敛误差 self.activation = Method.Sigmoid self.iteration = 0 # 迭代次数 self.cost = Cost() # Cost Function # Callbacks self.iteration_callback = None self.completion_callback = None def add_hidden_net(self, net): if not net: return # 这里的 hidden net 的 has_recurrent 必须要是 True (有递归层) self.hidden_layer.append(net) def add_output_net(self, net): if not net: return self.output_layer.append(net) # 新增 1 笔训练样本 # 输入: 样本特征值、目标值 def add_sample(self, features=[], targets=[]): if not features or not targets: return sample = Sample(features, targets) self.input_layer.append(sample) # 新增多笔训练样本 # 输入: 多笔样本的特征值、目标值 def add_samples(self, sample_features=[], sample_targets=[]): for index, features in enumerate(sample_features): self.add_sample(features, sample_targets[index]) def randomize_weights(self, min=0.0, max=1.0): sample = self.input_layer[0] input_count = len(sample.features) hidden_count = len(self.hidden_layer) for hidden_net in self.hidden_layer: hidden_net.randomize_weights(input_count, min, max) hidden_net.randomize_recurrent_weights(hidden_count, min, max) # Recurrent Layer 的神经元数量是跟 Hidden Layer 是同步的 for output_net in self.output_layer: output_net.randomize_weights(hidden_count, min, max) def uniform_activation(self, activation=Method.Sigmoid): self.activation = activation for hidden_net in self.hidden_layer: hidden_net.activation_method = activation for output_net in self.output_layer: output_net.activation_method = activation def create_nets(self, net_count, to_layer, has_recurrent): for i in range(net_count): net = Net(has_recurrent) to_layer.append(net) def create_hidden_layer_nets(self, net_count=1): self.create_nets(net_count, self.hidden_layer, True) def create_output_layer_nets(self, net_count=1): self.create_nets(net_count, self.output_layer, False) # sample_features: <List<Double>>, 包了多个预测样本的特征阵列 def predicate(self, sample_features=[], completion_callback=None): for features in sample_features: network_outpus = self._forward(features) if completion_callback: completion_callback(features, network_outpus) # 是否继续训练 Network def go_ahead(self): under_max_iteration = self.iteration < self.max_iteration # 未达到最大迭代数 over_cost_goal = self.cost.mse > self.convergence # 未达到收敛误差 if self.iteration == 0: # 是一开始 over_cost_goal = True return under_max_iteration and over_cost_goal def training(self, iteration_callback, completion_callback): self.iteration_callback = iteration_callback self.completion_callback = completion_callback total = len(self.input_layer) # 未达到最大迭代数 && Cost Function 还未达到收敛误差 while(self.go_ahead()): self.cost.remove() self.iteration += 1 for index, sample in enumerate(self.input_layer): self._training_network(sample) current_size = index + 1 # 是最后一笔 (Full-BPTT) if current_size == total: self._bptt_update() # 已跑完 1 迭代 if self.iteration_callback: self.iteration_callback(self.iteration, self) # 训练结束(达到最大迭代数或已达到收敛误差) if self.completion_callback: self.completion_callback(self.iteration, self) ''' @ Private ''' def _bptt_update(self): for hidden_net in self.hidden_layer: hidden_net.renew() for output_net in self.output_layer: output_net.renew() def _forward(self, features=[]): # 先取出 Recurrent Layer 神经元的输出值 recurrent_outputs =[] for hidden_net in self.hidden_layer: recurrent_outputs.append(hidden_net.output_value) # Forward Pass hidden_outputs = [] for hidden_net in self.hidden_layer: net_output = hidden_net.net_output(features, recurrent_outputs) hidden_outputs.append(net_output) # Network Output network_outputs = [] for output_net in self.output_layer: net_output = output_net.net_output(hidden_outputs) network_outputs.append(net_output) return network_outputs # sample: Sample Object def _training_network(self, sample): if not sample: return # Forward Pass network_outputs = self._forward(sample.features) self.cost.add(network_outputs, sample.targets) # Backward Pass # To calculate the deltas of output-layer. for output_index, output_net in enumerate(self.output_layer): # Calculating deltas of output nets. target_value = sample.targets[output_index] output_value = network_outputs[output_index] output_error = target_value - output_value output_net.delta_value = (output_error * output_net.output_partial) # To calculate the deltas of output-layer to hidden-layer, and record the outputs of nets to an array. hidden_outputs = [] recurrent_outputs = [] # 开始计算 Hidden Layer 的权重修正量(delta) for hidden_index, hidden_net in enumerate(self.hidden_layer): # 先算从 Output Layer 到 Hidden Layer 的每颗 Hidden Net 的误差量: SUM(delta[t][k] * w[t][hk]) # Output Layer to Hidden Layer. sum_delta = 0.0 for output_net in self.output_layer: sum_delta += output_net.delta_value * output_net.weight_for_index(hidden_index) # Recurrent-layer to hidden-layer. sum_recurrent_delta = 0.0 for recurrent_net in self.hidden_layer: # 这里 recurrent_net 跟 hidden_net 是同一颗,只是取不同名便于区别计算 sum_recurrent_delta += recurrent_net.delta_value * recurrent_net.recurrent_weight_for_index(hidden_index) # Hidden nets delta: (SUM(output-net-delta[t][k] * w(jk)) + SUM(recurrent-net-delta[t+1][h] * w(h'h))) * f'(hidden-net-output) hidden_net.delta_value = (sum_delta + sum_recurrent_delta) * hidden_net.output_partial # To record the net output for backpropagation updating. hidden_outputs.append(hidden_net.output_value) recurrent_outputs.append(hidden_net.previous_output) # 计算输出层到输入层每一条的权重变化量(delta weights), 并记录至每一个神经元(net)里的 timesteps # To update weights of net-by-net between output-layer, hidden-layer and input-layer. for output_net in self.output_layer: output_net.calculate_delta_weights(self.learning_rate, hidden_outputs) # 取出输入的特征值来计算输入层到输出层的权重变化量 # To fetch the outputs of input-layer for updating weights of hidden-layer. inputs = sample.features for hidden_net in self.hidden_layer: hidden_net.calculate_delta_weights(self.learning_rate, inputs, recurrent_outputs)
def main(Vxf0, urdf, options): gmm = GMM(num_clusters=options['num_clusters']) if options['args'].data_type == 'h5_data': filename = '{}.h5'.format('torobo_processed_data') with h5py.File(filename, 'r+') as f: data = f['data/data'].value logging.debug(''.format(data.shape)) elif options['args'].data_type == 'pipe_et_trumpet': path = 'data' name = 'cart_pos.csv' data, data6d = format_data(path, name, learn_type='2d') if options['use_6d']: data = data6d Vxf0['d'] = int(data.shape[0] / 2) Vxf0.update(Vxf0) Vxf0 = guess_init_lyap(data, Vxf0, options['int_lyap_random']) cost = Cost() # cost.success = False while cost.success: # cost.success = False Vxf, J = cost.learnEnergy(Vxf0, data, options) # if not cost.success: # increase L and restart the optimization process old_l = Vxf0['L'] Vxf0['L'] += 1 rospy.logwarn( 'Constraints violated. increasing the size of L from {} --> {}'. format(old_l, Vxf0['L'])) if cost.success: rospy.logdebug( 'optimization succeeded without violating constraints') break # get gmm params gmm.update(data.T, K=options['num_clusters'], max_iterations=100) mu, sigma, priors = gmm.mu, gmm.sigma, gmm.logmass if options['disp']: logging.debug(' mu {}, sigma {}, priors: {}'.format( mu.shape, sigma.shape, priors.shape)) inp = slice(0, Vxf['d']) #np.array(range(0, Vxf['d'])) out = slice(Vxf['d'], 2 * Vxf['d']) #np.array(range(Vxf['d'], 2* Vxf['d'])) rho0, kappa0 = 1.0, 1.0 gmr_handle = lambda x: GMR(priors, mu, sigma, x, inp, out) stab_handle = lambda dat: dsStabilizer(dat, gmr_handle, Vxf, rho0, kappa0) x0_all = data[0, :] XT = data[-1, :] logger.debug('XT: {} x0: {} '.format(XT.shape, x0_all.shape)) home_pos = [0.0] * 7 executor = ToroboExecutor(home_pos, urdf) x, xd = executor.optimize_traj(data6d, stab_handle, opt_exec)