def extract_user_locations_child( self, child, original_name, parent_name, parent_location, ): try: locations = [ Location({ **loc, **{ "toponym": child } }) for loc in self.es.get(index='locations', id=child) ['_source']['locations'] ] except (elasticsearch.exceptions.NotFoundError, ValueError): return parent_location else: locations = sorted(locations, key=lambda loc: loc.translations, reverse=True) for loc in locations: if ('abbr' not in loc.languages or original_name in loc.abbreviations and loc.is_child_of(parent_location)): return loc else: return parent_location
def addLocationInformation(data, newLocation): for location in data.environment.locations: if location.name == newLocation: data.environment.location = location return newLocationObject = Location(newLocation) data.environment.location = newLocationObject data.environment.locations.append(newLocationObject)
def doc_to_namedtuple(self, doc, clean_text=LastTweetsDeque().clean_text): try: return AnalyzedDocShort( resolved_locations=[Location(loc) for loc in doc['locations']], date=doc['date'], language=doc['source']['lang'], text=doc['text'], clean_text=clean_text(doc['text']), scores=doc['scores'] if 'scores' in doc else None, author_id=doc['source']['author']['id']) except KeyError: print(doc) raise
def generate_routes(demand_locations, progress, search_size): ''' Generate the routes to pass onto the linear optimisation problem. A heuristic algorithm is used to reorder the nearest nodes to each location and then the genetic algorithm TSP is used to reorder the nodes for a slightly better time. Inputs -------- demand_locations : list list of all the locations where there is a demand progress : object progress object for progress bar search_size : integer search size for extra permuation search (min = 2) ''' warehouse_location = Location(data2["Lat"]["Warehouse"], data2["Long"]["Warehouse"], "Warehouse", 0) # Create array for routes to be stored routes = [] for current_location in demand_locations: remaining_locations = [ location for location in demand_locations if location.name not in [current_location.name] ] distances = current_location.nearest_neighbours(remaining_locations) nearest_default = distances[:8] # Get permutations of 8 nearest neighbours of length 2 and use these to randomise, 56 in total permutations = list(itertools.permutations(distances[:search_size], 2)) # Vary the maximum capacity of the trucks to generate more diverse solutions for maximum_capacity in range(current_location.demand, 13): for permutation in permutations: # Replace the start of the distances array with the permutation distances[:2] = permutation distances[2:search_size] = [ index for index in nearest_default if index not in permutation ] # Run the generate route algorithm with the specified inputs routes.append( generate_route(maximum_capacity, [warehouse_location, current_location], distances, remaining_locations)) progress.increment() return routes
def find_user_location_town( self, name, original_name, ): try: locations = self.es.get(index='locations', id=name)['_source']['locations'] except (elasticsearch.exceptions.NotFoundError, ValueError): return [] else: locations = sorted(locations, key=itemgetter('translations'), reverse=True) for loc in locations: if ('abbr' not in loc['languages'] or original_name in loc['abbreviations']): loc.update({"toponym": name}) return [Location(loc)] else: return []
import numpy as np import pandas as pd import seaborn as sns from pulp import LpVariable, LpProblem, LpBinary, LpMinimize, lpSum, LpStatus, value from data import data, data2, data3, data4, data5, data6 from classes import Location, Route, Solver, Progress from generation import generate_routes, generate_coefficents from simulation import simulate_weekday, simulate_weekend from plotting import plot_routes_basic, plot_routes_advanced if __name__ == '__main__': print("Running for Weekdays...") # Get the warehouse and other locations stored as location objects warehouse_location = Location(data2["Lat"]["Warehouse"], data2["Long"]["Warehouse"], "Warehouse", 0) demand_locations = [Location(data2["Lat"][name], data2["Long"][name], name, data3.demand[name]) for name in data.columns if name not in ["Warehouse"]] # Calculate the total demand of all of the locations and use this to calculate the total number of routes that will be generated total_demand = sum([location.demand for location in demand_locations]) total_checks = sum([13 - location.demand for location in demand_locations]) search_size = 8 # Initialise the progress bar with the correct number of iterations progress = Progress(total_checks * int((math.factorial(search_size) / math.factorial(search_size - 2))), "Generating Routes") # Generate the routes using the generate_routes function routes = generate_routes(demand_locations, progress, search_size) # Secondary check to get the number of routes
'JoJo', 10, "she", "her", "JoJo starts with alert level of 10. Sneaky. Power: She can carry 1 extra item." ) kurtis = Character( 'Kurtis', 30, "he", "his", "Kurtis starts with alert level of 30. Detail oriented. Power: After 80, his alert level penalty is decreased by half points." ) joshua = Character( 'Joshua', 5, "he", "his", "Joshua starts with alert level of 5. Obsessive compulsive. Power: Doesn't leave a mess" ) annalise = Character('Annalise Keating', 0, "she", "her", "No mistakes!") main_players = [crystal, jojo, kurtis, joshua] # Location Instantiation josh_room = Location("Sean's hide-out", "\nYou are in your first room, the 'MURDER ROOM'.") liz_office = Location( "Liz's Office", "\nTake a look around, but don't take too long, it will look suspicious if she catches you." ) elevator = Location("Elevator", "\nThis gives you access to anywhere in the building.") roof = Location( "Roof", "\nIt's a stormy evening and walking around dripping water might draw a few eyes. Better hurry and gather supplies." ) kitchen = Location( "Kitchen", "\nThis is the busiest room in the building. You should do what you need to do quickly." ) gym = Location(
def extract_candidates_per_toponym(self, locations_dict, doc, filter=None, toponym_languages=None, filter_most_common_words=False): toponym = locations_dict['_id'] if not toponym_languages: toponym_languages = [doc.language] assert isinstance(toponym_languages, list) # Do not consider if toponym is part of other toponym if filter and toponym in filter: return None locations = [ Location({ **loc, **{ 'toponym': toponym } }, scores=True) for loc in locations_dict['_source']['locations'] if ('general' in loc['languages'] or 'partial' in loc['languages'] or 'abbr' in loc['languages'] or any( language in loc['languages'] for language in toponym_languages)) ] if filter_most_common_words and toponym in self.most_common_words[ doc.language]: locations_ = [] for location in locations: self.pg.cur.execute( """ SELECT population FROM locations WHERE location_ID = %s """, (location.location_ID, )) population, = self.pg.cur.fetchone() if population is not None and population > 100_000: locations_.append(location) locations = locations_ # If multiple locations bear the same name and are family, only keep # the one with the highest number of translations in the locations # database. This is a proxy for the importance of the locations if len(locations) >= 2: to_discard = set() for loc1, loc2 in combinations(locations, 2): if loc1.is_child_of(loc2) or loc2.is_child_of(loc1): sorted_locs = sorted(sorted( [loc1, loc2], key=lambda loc: self.order_of_preference[loc.type]), key=lambda loc: loc.translations, reverse=True) to_discard.add(sorted_locs[1].location_ID) if to_discard: locations = [ loc for loc in locations if loc.location_ID not in to_discard ] if locations: return toponym, locations else: return None
def find_user_location(self, u_location): """Parses the location field of the user. The user field is split at a comma if present. If a comma is present, it is assumed that the part before the comma is the city and the second part the country. If no comma is present we assume that the user field specifies the country. The function returns False if not location is found, and a tuple otherwise.""" if not u_location: return [] for ch in ('/', ' and ', '&', '|', ' - ', ';'): if ch in u_location: return [ loc for split in u_location.split(ch) for loc in self.find_user_location(split) ] u_location = u_location.strip().replace('.', '') u_location_lower = u_location.lower() if ' - ' in u_location_lower: u_location_splitted = u_location_lower.split(' - ') else: u_location_splitted = u_location_lower.split(',') if len(u_location_splitted) == 1: u_location_lower_splitted_space = u_location_lower.split(' ') for i in range(1, len(u_location_lower_splitted_space) + 1): name = ' '.join(u_location_lower_splitted_space[-i:]) if name in self.region_names: parent_location_IDs = self.region_names[name] parent_locations = self.es.get( index='locations', id=name)['_source']['locations'] parent_locations = [ Location(loc) for loc in parent_locations if loc['location_ID'] in parent_location_IDs ] original_name = ' '.join( u_location.split(' ')[-len(name.split(' ')):]) parent_locations = [ parent_location for parent_location in parent_locations if not parent_location.abbreviations or original_name in parent_location.abbreviations ] if parent_locations: break else: continue else: return self.find_user_location_town(u_location_lower, u_location) child = u_location_lower[:-len(name)].strip() if child: original_name_i = u_location_lower.index(child) original_name = u_location[original_name_i:original_name_i + len(child)] locations = [] for parent_location in parent_locations: locations.append( self.extract_user_locations_child( child, name, original_name, parent_location)) return locations else: return parent_locations elif len(u_location_splitted) == 2: child, parent = u_location_splitted child, parent = child.strip(), parent.strip() if parent not in self.region_names: # Parent is not found, so might be neighborhood, town rather than town, country. return self.find_user_location_town( parent, u_location.split(',')[-1].strip()) else: parent_location_IDs = self.region_names[parent] parent_locations = self.es.get( index='locations', id=parent.lower())['_source']['locations'] parent_locations = [ Location(loc) for loc in parent_locations if loc['location_ID'] in parent_location_IDs ] original_parent_name = u_location.split(',')[-1].strip() parent_locations = [ parent_location for parent_location in parent_locations if not parent_location.abbreviations or original_parent_name in parent_location.abbreviations ] if not parent_locations: return self.find_user_location_town( parent, original_parent_name) else: return [ self.extract_user_locations_child( child, parent, original_parent_name, parent_location) for parent_location in parent_locations ] elif len(u_location_splitted) == 3: if ' - ' in u_location_lower: u_location_original_splitted = u_location.split(' - ') else: u_location_original_splitted = u_location.split(',') return self.find_user_location(','.join([ u_location_original_splitted[0] + u_location_original_splitted[-2] ])) else: return []
from classes import Location, LocationAltitude as Alt LOCATIONS = [ Location( "Arakebo", "Arakebo, a small island with radio telescope observatory and post-flight rehabilitation clinic for kerbonauts.", helipad=(8.391118, 179.643722, Alt(60, 1711.71)), staff_spawn=(8.391172, 179.64704, Alt(37, 1733.78)), vip_spawn=(8.360934, 179.770392, Alt(6, 1529.22)), kk_base_name="Arakebo Observatory", ), Location( "Ben Bay", "KashCorp headquarter in the Ben Bay, not far from the KSC.", helipad=(13.2242, -64.1848, Alt(1, 40.84)), staff_spawn=(13.226728, -64.184141, Alt(2, 41.1)), launch_refund=25, ), Location( "Black Krags", "Small air base beside the Black Krags mountain range, primarily used for basic flight training.", runway=(11.32069, -87.6877, Alt(4, 321.2)), staff_spawn=(11.319044, -87.681677, Alt(1, 322.91)), launch_refund=10, recovery_factor=75, ), Location( "Coaler Crater", "Small airfield located in beautiful region of lakes and shores called Coaler Crater.", runway=(35.4291, -98.9055, Alt(1, 67.83)), staff_spawn=(35.428764, -98.915376, Alt(1, 69.07)),
def simulate_weekend(routes, chosen_routes, total_routes, total_chosen, samples, traffic_multiplier, progress): ''' Running a Monte-Carlo simulation for our optimal weekend (Saturday) routes Inputs -------- routes: list list of all the routes in our linear program chosen_routes: list list of routes which were chosen in optimal solution total_routes: int total number of routes in linear program total_chosen: int total number of routes chosen in optimal solution samples: int number of simulations we are running traffic_multiplier: list list containing the traffic multipliers we are applying to our simulation progress: object progress object for progress bar ''' warehouse_location = Location(data2["Lat"]["Warehouse"], data2["Long"]["Warehouse"], "Warehouse", 0) # Initialise empty list for costs to be stored costs = [[] for _ in range(len(traffic_multiplier))] # Run each traffic multiplier a given number of times, defined by samples for i in range(len(traffic_multiplier)): for _j in range(samples): # Intialise cost of simulation and any unvisited nodes total_cost = 0 shortages = [] for route_index in chosen_routes: route = copy.deepcopy(routes[route_index]) # Randomly sample a demand for each node in the route except Warehouse for location in [ location for location in route.route if location.name not in ["Warehouse"] ]: location_type = data2["Type"][location.name] demands = data6['Demand'][location_type] # Randomly sample using bootstrap sampling location.demand = random.sample(demands, 1)[0] # Update with new demand values route.route = [ location for location in route.route if location.name in ["Warehouse"] or location.demand > 0 ] # Calculate the total demand for the new route (given randomly sampled demand values) new_demand = route.calc_demand() # Remove lowest demand node if demand exceeds truck capacity while new_demand > 12: least_demand = min( [(route.route.index(location), location.demand) for location in route.route if location.name not in ["Warehouse"]], key=operator.itemgetter(1)) shortages.append( copy.deepcopy(route.route[least_demand[0]]) ) # Append removed node to unvisited list route.route.pop(least_demand[0]) new_demand = route.calc_demand( ) # Recalculate total demand of route # Calculate the total time in hours route_time = route.calc_distance(traffic_multiplier[i]) route_time += route.calc_demand() * 300 route_time /= 3600.0 # Check if the time exceeds four hours if route_time > 4.0: if route_index >= total_routes: # Cost per 4 hour segment of leased truck total_cost += 1200 * ((route_time // 4) + 1) else: # Non-leased schedule should not be allowed if time exceeds 4 hours total_cost += 600 + math.ceil( (route_time - 4.0) * 10) / 10 * 200.0 else: if route_index < total_routes: # Add the time with ceiling to 6 minute intervals total_cost += math.ceil(route_time * 10) / 10 * 150.0 else: # The truck is a leased truck total_cost += 1200.0 # print(f'{total_cost}, {time}, {route.calc_distance()}, {route.calc_demand()}') shortage_times = [] # Begin generating new routes for unvisited nodes while len(shortages) > 0: current_demand = 0 k = 0 shortage_indices = [] # Begin creating new routes while current_demand < 12 and k < len(shortages): shortage_demand = shortages[k].demand # Add node to new route if demand doesn't exceed capacity if current_demand + shortage_demand <= 12: current_demand += shortage_demand shortage_indices.append(k) k += 1 # Generate new feasible routes by running through previously utilised algorithm shortage_route = Solver( [warehouse_location] + [shortages[l] for l in shortage_indices], 5, 2, 0.0, 5).run() # Determine time to traverse the route shortage_time = shortage_route.calc_distance() shortage_time += shortage_route.calc_demand() * 300 shortage_time /= 3600.0 # Store the route time shortage_times.append(shortage_time) # Remove the visited nodes for index in sorted(shortage_indices, reverse=True): shortages.pop(index) # print(shortage_times) # Add associated costs to new routes for l in range(len(shortage_times)): # Check if time exceeds 4 hours if shortage_times[l] >= 4.0: # Cost per 4 hour segment of leased truck total_cost += 1200 * ((shortage_times[l] // 4) + 1) else: if total_chosen + l <= 20: # Add the time with ceiling to 6 minute intervals total_cost += math.ceil( shortage_times[l] * 10) / 10 * 150.0 else: # Truck is a leased truck total_cost += 1200 # print(f'{total_cost}') costs[i].append(total_cost) progress.increment() return costs
def add_location(self, data, **kwargs): return Location.Location(**data)
def add_geodata(self, data, **kwargs): return Location.Geodata(**data)