def add_walking_costs(self): geod = Geod(ellps="WGS84") for stop_key in self.stops_label_dict: stop_dict = self.stops_label_dict[stop_key] stops_around = Timetable.objects.filter(stop__stopLat__range=(stop_dict["stop_lat"] - LATS_DIFF, stop_dict["stop_lat"] + LATS_DIFF), stop__stopLon__range=(stop_dict["stop_lon"] - LON_DIFF, stop_dict["stop_lon"] + LON_DIFF), arrivalTime__range=(stop_dict["time_arrival"] - MAX_CHANGE_TIME, stop_dict["time_arrival"] + MAX_CHANGE_TIME), date=USER_DATE) for stop_around in stops_around: # if change to the same routeId - continue to next loop iteration if stop_around.route.routeId == stop_dict["route_id"]: continue # calculating euclidean distance between 2 stops lons = [stop_dict["stop_lon"], stop_around.stop.stopLon] lats = [stop_dict["stop_lat"], stop_around.stop.stopLat] walk_distance = geod.line_length(lons, lats) # calculating average time to walk from stop1 to stop walk_time = walk_distance / AVERAGE_HUMAN_SPEED # if we can't walk on time to stop_around - continue to next loop iteration if stop_dict["time_arrival"] + timedelta(minutes=walk_time) > stop_around.arrivalTime: continue stop_around_label_key = str(stop_around.route.routeId) + "|" + str(stop_around.stop.stopId) wait_time_ = -1 # calculate time user has to wait for new transport if stop around is not user endpoint if stop_around.stop.stopId != self.end_stop_id: after_walk_datetime = stop_dict["time_arrival"] + timedelta(minutes=walk_time) wait_time_timedelta = stop_around.arrivalTime - after_walk_datetime if wait_time_timedelta > MAX_WAIT_TIME: continue wait_time_ = CostMatrix.timedelta_to_minutes(wait_time_timedelta) summed_time = walk_time + wait_time_ else: summed_time = walk_time # adding new node to walking_label_dict if stop_around_label_key not in self.stops_label_dict and stop_around_label_key not in self.walking_label_dict: self.add_stop_label(stop_around, self.walking_label_dict, stop_around_label_key, walked_in=True) label_dict = self.walking_label_dict if stop_around_label_key in self.walking_label_dict else self.stops_label_dict cost_dict_key = str(stop_dict["cost_matrix_label"]) + "+" + \ str(label_dict[stop_around_label_key]["cost_matrix_label"]) # adding new cost to cost_dict self.cost_dict[cost_dict_key] = [walk_time, wait_time_]
def overground_distance(self, point: (float, float)) -> float: """ horizontal distance over ellipsoid :param point: (x, y) point :return: distance in ellipsodal units """ if not isinstance(point, numpy.ndarray): point = numpy.array(point) coordinates = numpy.stack([self.coordinates[:2], point], axis=0) if self.crs.is_projected: return numpy.hypot(*numpy.sum(numpy.diff(coordinates, axis=0), axis=0)) else: ellipsoid = self.crs.datum.to_json_dict()["ellipsoid"] geodetic = Geod( a=ellipsoid["semi_major_axis"], rf=ellipsoid["inverse_flattening"] ) return geodetic.line_length(coordinates[:, 0], coordinates[:, 1])
def get_linelength_between_points(C1: list, C2: list) -> float: geod = Geod(ellps="WGS84") total_length = geod.line_length(C1, C2) return total_length
def test_line_length__radians(): geod = Geod(ellps="WGS84") total_length = geod.line_length([1, 2], [0.5, 1], radians=True) assert_almost_equal(total_length, 5426061.32197463, decimal=3)
def test_line_length__single_point(): geod = Geod(ellps="WGS84") assert geod.line_length(1, 1) == 0
def run_skill_analysis(drifter_file, drifter_id, skill_files, date_range, grid, period=pd.Timedelta('3 Days'), data_freq=pd.Timedelta('60 minutes')): """Calculate the skill score for a given particle run. Here, the skill score is calculated as in Liu and Weisberg (2011). Keyword arguments: drifter_file -- a .csv file of the drifter data with the columns 'datetime', 'lat', and 'lon' skill_files -- a sorted list of the model output netCDF files that contain the tracks from the model runs for skill date_range -- a numpy.ndarray object of numpy.datetime64 objects, the drifter and output data frequencies should match period -- a pandas Timedelta object representing the time period for which to calculate the skill data_freq -- a pandas Timedelta object representing the frequency of the drifter data Returns: a numpy array of particle times, trajectory lengths, separation distances, and skill scores """ geod = Geod(ellps="WGS84") drifter_data = get_drifter_data(drifter_file, drifter_id) drifter_data = drifter_data[drifter_data['datetime'].isin(date_range)] # query one of the files to find the number of particles rootgrp = Dataset(skill_files[0]) num_particles = rootgrp['release'].size skill_data = np.empty((len(skill_files), num_particles, 4), dtype='O') # for each skill output file for i in range(0, len(skill_files)): # open model data and convert times to datetimes rootgrp = Dataset(skill_files[i]) units, epoch = rootgrp['time'].units.split(' since ') times = rootgrp['time'][:] times = pd.Timestamp(epoch) + pd.TimedeltaIndex(times, unit=units) # get the drifter coordinates at corresponding times as model output drift_indices = drifter_data['datetime'].isin(times) drifter_lats = drifter_data['lat'].values[drift_indices] drifter_lons = drifter_data['lon'].values[drift_indices] # get the model coordinates model_indices = times.isin(drifter_data['datetime']) model_lats = rootgrp['lat'][:, model_indices] model_lons = rootgrp['lon'][:, model_indices] # for each particle for j in range(0, num_particles): # find the total length of the trajectory by adding each path length traj_len = geod.line_length(model_lons[j], model_lats[j]) # find the separation distance lons = np.array([drifter_lons[-1], model_lons[j, -1]]) lats = np.array([drifter_lats[-1], model_lats[j, -1]]) sep_distance = geod.line_length(lons, lats) c = sep_distance / traj_len s = np.maximum(0, 1 - c) # save the skill information to array skill_data[i, j] = np.array([times[0], traj_len, sep_distance, s]) return skill_data
def aco_algorithm(num_iteration, ants, nodes, visibility, cost_matrix_object, e, alpha, beta, display_num): geod = Geod(ellps="WGS84") cost_matrix = cost_matrix_object.cost_matrix stops_label_dict = cost_matrix_object.stops_label_dict walking_label_dict = cost_matrix_object.walking_label_dict start_nodes = cost_matrix_object.start_nodes end_nodes = cost_matrix_object.end_nodes route_dict = {} best_route = [] dist_min_cost = 0 dist_min_cost_arr = [] if not start_nodes or not end_nodes: return route_dict, best_route, dist_min_cost, dist_min_cost_arr # FIXME consider multiple starting nodes # now we are taking into consideration only first possible start point start = stops_label_dict[start_nodes[0]]["cost_matrix_label"] possible_ends = [stops_label_dict[possible_end]["cost_matrix_label"] for possible_end in end_nodes if possible_end in stops_label_dict] possible_ends.extend([walking_label_dict[possible_end]["cost_matrix_label"] for possible_end in end_nodes if possible_end in walking_label_dict]) # initializing pheromone present at the paths to stops pheromone = .1 * np.ones((nodes, nodes)) branches_deleted = 0 for ite in range(num_iteration): # plots pheromone levels display_pheromone(pheromone, num_iteration, ite + 1, display_num, cost_matrix_object.cost_matrix_size) for i in range(ants): # initial starting position of every ant route = [start] temp_visibility = np.array(visibility) # creating a copy of visibility node = route[0] while node not in possible_ends: cur_loc = node temp_visibility[:, cur_loc] = 0 # making visibility of the current node equals zero p_feature = np.power(pheromone[cur_loc, :], beta) # calculating pheromone feature v_feature = np.power(visibility[cur_loc, :], alpha) # calculating visibility feature p_feature = p_feature[:, np.newaxis] # adding axis to make a size[5,1] v_feature = v_feature[:, np.newaxis] # adding axis to make a size[5,1] combine_feature = np.multiply(p_feature, v_feature) # calculating the combine feature # checking if ant can go any further - if not - ant come back to start node and # tries to search for food again if not np.any(combine_feature): # adding walk path to first endnode last_stop_key_label = cost_matrix_object.find_label_dict_key_with_cost_matrix_label(node) endpoint_key_label = end_nodes[0] last_stop_label_dict = walking_label_dict if last_stop_key_label in walking_label_dict else stops_label_dict endpoint_label_dict = walking_label_dict if endpoint_key_label in walking_label_dict else stops_label_dict # calculating euclidean distance between 2 stops lons = [last_stop_label_dict[last_stop_key_label]["stop_lon"], endpoint_label_dict[endpoint_key_label]["stop_lon"]] lats = [last_stop_label_dict[last_stop_key_label]["stop_lat"], endpoint_label_dict[endpoint_key_label]["stop_lat"]] walk_distance = geod.line_length(lons, lats) # calculating average time to walk from stop1 to stop walk_time = walk_distance / AVERAGE_HUMAN_SPEED if walk_time > 30: # TODO - for every node - calculate walk time to check if we are not close enough to endpoint counter = len(route) - 2 for route_node in reversed(route[1:]): # delete non optimal graph branch node_costs = cost_matrix_object.return_cost_matrix_cost_for_row(route_node) # if node does not have any other branches - delete it non_zero_cols_indexes = np.nonzero(node_costs)[0] # returns list if len(non_zero_cols_indexes) == 0: cost_matrix_object.cost_matrix[route[counter]][route_node] = [0, -1] cost_matrix[route[counter]][route_node] = [0, -1] branches_deleted += 1 visibility[route[counter]][route_node] = 0 temp_visibility[route[counter]][route_node] = 0 print(f"node {route[counter]}->{route_node} deleted, branches deleted: {branches_deleted}") else: break counter -= 1 route = [start] node = route[0] continue else: cost_matrix[node][possible_ends[0]] = [walk_time, 0] route.append(possible_ends[0]) break total = np.sum(combine_feature) # sum of all the feature # finding probability of element probabilities(i) = combine_feature(i)/total probabilities = combine_feature / total # if not node: # print(f"probabils: {probabilities}") cumulative_probabilities = np.cumsum(probabilities) # calculating cumulative sum r = np.random.random_sample() # random number in [0,1) # finding the next node having probability higher then random number (r) node = np.nonzero(cumulative_probabilities > r)[0][0] route.append(node) route_dict[i] = route dist_cost = {} for key in route_dict: route_cost = [] for counter, node in enumerate(route_dict[key][:-1]): # calculating total tour distance cost = cost_matrix_object.return_cost_matrix_cost(int(node), int(route_dict[key][counter+1])) route_cost.append(cost) dist_cost[key] = route_cost # storing distance of tour for 'i'th ant at location 'i' dist_cost_sum = {key: sum(route_cost) for key, route_cost in dist_cost.items()} dist_min_loc = min(dist_cost_sum, key=dist_cost_sum.get) # finding location of minimum of dist_cost dist_min_cost = dist_cost_sum[dist_min_loc] # finding min of dist_cost best_route = route_dict[dist_min_loc] # initializing current traversed as best route pheromone = (1 - e) * pheromone # evaporation of pheromone with (1-e) for key in route_dict: for counter, node in enumerate(route_dict[key][:-1]): dt = 1 / dist_cost_sum[key] pheromone[int(node), int(route_dict[key][counter+1])] += dt # updating the pheromone with delta distance (dt) # dt will be greater when distance will be smaller return route_dict, best_route, dist_min_cost
from pyproj import CRS from pyproj import Geod print("doing pyproj test") crs = CRS.from_epsg(4326) assert crs.to_authority() == ('EPSG', '4326') assert crs.prime_meridian.name == "Greenwich" # and test based on geodesic line length example from # https://pyproj4.github.io/pyproj/stable/examples.html lats = [ -72.9, -71.9, -74.9, -74.3, -77.5, -77.4, -71.7, -65.9, -65.7, -66.6, -66.9, -69.8, -70.0, -71.0, -77.3, -77.9, -74.7 ] lons = [ -74, -102, -102, -131, -163, 163, 172, 140, 113, 88, 59, 25, -4, -14, -33, -46, -61 ] geod = Geod(ellps="WGS84") total_length = geod.line_length(lons, lats) assert 14 < total_length / 1e6 < 15
sites = ['OPO','MAU','WEST','FLE','TAS','LWR','CAP','CAM','KAI','GOB','TIM','HSB','BGB','FIO'] site = sites[int(sys.argv[1])] geod = Geod(ellps="WGS84") for file in sorted(glob.glob(f'/nesi/nobackup/vuw03073/bigboy/all_settlement/{site}*')): traj = nc.Dataset(file) ym = file[-9:-3] print(f'{site}_{ym}', flush=True) lon = traj.variables['lon'][:] lat = traj.variables['lat'][:] dists = np.zeros((len(lon), 2)) for part in range(len(lon)): part_lons = lon[part][np.where(lon[part].mask==False)] part_lats = lat[part][np.where(lat[part].mask==False)] along_track = geod.line_length(part_lons, part_lats) start_finish = geod.line_length([part_lons[0], part_lons[-1]], [part_lats[0],part_lats[-1]]) dists[part, 0] = along_track dists[part, 1] = start_finish outFile = open(f'bigboy_distances/{site}_{ym}.txt', 'w') np.savetxt(outFile, dists) outFile.close() # def calc_along_track(trajectory): # import pdb; pdb.set_trace() # lons = trajectory.lon.data # lats = trajectory.lat.data # lons = lons[(lons>-10000) & (lons < 10000)] # lats = lats[(lats>-10000) & (lats < 10000)] # along_track = 0 # for i in range(len(lons)-1):