def _manage_realtime(self, request, schedules, groub_by_dest=False): futures = [] pool = gevent.pool.Pool(self.instance.realtime_pool_size) # Copy the current request context to be used in greenlet reqctx = utils.copy_flask_request_context() def worker(rt_proxy, route_point, request, stop_schedule): # Use the copied request context in greenlet with utils.copy_context_in_greenlet_stack(reqctx): return ( rt_proxy, stop_schedule, self._get_next_realtime_passages(rt_proxy, route_point, request), ) for schedule in schedules: route_point = _get_route_point_from_stop_schedule(schedule) rt_proxy = self._get_realtime_proxy(route_point) if rt_proxy: futures.append(pool.spawn(worker, rt_proxy, route_point, request, schedule)) for future in gevent.iwait(futures): rt_proxy, schedule, next_rt_passages = future.get() rt_proxy._update_stop_schedule(schedule, next_rt_passages, groub_by_dest)
def call_kraken(self, request_type, request, instance, krakens_call, context=None): """ For all krakens_call, call the kraken and aggregate the responses return the list of all responses """ # TODO: handle min_alternative_journeys # TODO: call first bss|bss and do not call walking|walking if no bss in first results record_custom_parameter('scenario', 'new_default') resp = [] logger = logging.getLogger(__name__) futures = [] reqctx = copy_flask_request_context() def worker(dep_mode, arr_mode, instance, request, flask_request_id): with copy_context_in_greenlet_stack(reqctx): return ( dep_mode, arr_mode, instance.send_and_receive( request, flask_request_id=flask_request_id), ) pool = gevent.pool.Pool(app.config.get('GREENLET_POOL_SIZE', 3)) for dep_mode, arr_mode, direct_path_type in krakens_call: pb_request = create_pb_request(request_type, request, dep_mode, arr_mode, direct_path_type) # we spawn a new greenlet, it won't have access to our thread local request object so we pass the request_id futures.append( pool.spawn(worker, dep_mode, arr_mode, instance, pb_request, flask_request_id=flask.request.id)) for future in gevent.iwait(futures): dep_mode, arr_mode, local_resp = future.get() # for log purpose we put and id in each journeys self.nb_kraken_calls += 1 for idx, j in enumerate(local_resp.journeys): j.internal_id = "{resp}-{j}".format(resp=self.nb_kraken_calls, j=idx) if dep_mode == 'ridesharing': switch_back_to_ridesharing(local_resp, True) if arr_mode == 'ridesharing': switch_back_to_ridesharing(local_resp, False) fill_uris(local_resp) resp.append(local_resp) logger.debug("for mode %s|%s we have found %s journeys", dep_mode, arr_mode, len(local_resp.journeys)) return resp
def next_departures(self, request): resp = self.__stop_times(request, api=type_pb2.NEXT_DEPARTURES, departure_filter=request["filter"]) if request['data_freshness'] != RT_PROXY_DATA_FRESHNESS: return resp route_points = { RoutePoint(stop_point=passage.stop_point, route=passage.route): _create_template_from_passage( passage ) for passage in resp.next_departures } route_points.update( (RoutePoint(rp.route, rp.stop_point), _create_template_from_pb_route_point(rp)) for rp in resp.route_points ) rt_proxy = None futures = [] pool = gevent.pool.Pool(self.instance.realtime_pool_size) # Copy the current request context to be used in greenlet reqctx = utils.copy_flask_request_context() def worker(rt_proxy, route_point, template, request, resp): # Use the copied request context in greenlet with utils.copy_context_in_greenlet_stack(reqctx): return ( resp, rt_proxy, route_point, template, self._get_next_realtime_passages(rt_proxy, route_point, request), ) for route_point, template in route_points.items(): rt_proxy = self._get_realtime_proxy(route_point) if rt_proxy: futures.append(pool.spawn(worker, rt_proxy, route_point, template, request, resp)) for future in gevent.iwait(futures): resp, rt_proxy, route_point, template, next_rt_passages = future.get() rt_proxy._update_passages(resp.next_departures, route_point, template, next_rt_passages) # sort def sorter(p): return p.stop_date_time.departure_date_time resp.next_departures.sort(key=sorter) count = request['count'] if len(resp.next_departures) > count: del resp.next_departures[count:] # handle pagination : # If real time information exist, we have to change pagination score. if rt_proxy: resp.pagination.totalResult = len(resp.next_departures) resp.pagination.itemsOnPage = len(resp.next_departures) return resp
def _manage_occupancies(self, schedules): vo_service = self.instance.external_service_provider_manager.get_vehicle_occupancy_service() if not vo_service: return futures = [] # TODO define new parameter forseti_pool_size ? pool = gevent.pool.Pool(self.instance.realtime_pool_size) # Copy the current request context to be used in greenlet reqctx = utils.copy_flask_request_context() def worker(vo_service, date_time, args): # Use the copied request context in greenlet with utils.copy_context_in_greenlet_stack(reqctx): return (date_time, vo_service.get_response(args)) for schedule in schedules: stop_point_codes = vo_service.get_codes('stop_point', schedule.stop_point.codes) if not stop_point_codes: logging.getLogger(__name__).warning( "Stop point without source code {}".format(schedule.stop_point.uri) ) continue for date_time in schedule.date_times: vehicle_journey_codes = vo_service.get_codes( 'vehicle_journey', date_time.properties.vehicle_journey_codes ) if not vehicle_journey_codes: logging.getLogger(__name__).warning( "Vehicle journey without source code {}".format(date_time.properties.vehicle_journey_id) ) continue args = vehicle_journey_codes + stop_point_codes futures.append(pool.spawn(worker, vo_service, date_time, args)) for future in gevent.iwait(futures): date_time, occupancy = future.get() if occupancy is not None: date_time.occupancy = occupancy
def update_response(self, instance, vehicle_positions, **kwargs): futures = [] # TODO define new parameter forseti_pool_size ? pool = gevent.pool.Pool(instance.realtime_pool_size) # Copy the current request context to be used in greenlet reqctx = utils.copy_flask_request_context() def worker(vehicle_journey_position, args): # Use the copied request context in greenlet with utils.copy_context_in_greenlet_stack(reqctx): return vehicle_journey_position, self.get_response(args) for vehicle_position in vehicle_positions: for vehicle_journey_position in vehicle_position.vehicle_journey_positions: args = self.get_codes( 'vehicle_journey', vehicle_journey_position.vehicle_journey.codes) futures.append( pool.spawn(worker, vehicle_journey_position, args)) for future in gevent.iwait(futures): vehicle_journey_position, response = future.get() self._vehicle_journey_position(vehicle_journey_position, response)
def departure_boards(self, request): resp = self.__stop_times(request, api=type_pb2.DEPARTURE_BOARDS, departure_filter=request["filter"]) if request['data_freshness'] != RT_PROXY_DATA_FRESHNESS: return resp futures = [] pool = gevent.pool.Pool(self.instance.realtime_pool_size) # Copy the current request context to be used in greenlet reqctx = utils.copy_flask_request_context() def worker(rt_proxy, route_point, request, stop_schedule): # Use the copied request context in greenlet with utils.copy_context_in_greenlet_stack(reqctx): return ( rt_proxy, stop_schedule, self._get_next_realtime_passages(rt_proxy, route_point, request), ) for stop_schedule in resp.stop_schedules: route_point = _get_route_point_from_stop_schedule(stop_schedule) rt_proxy = self._get_realtime_proxy(route_point) if rt_proxy: futures.append( pool.spawn(worker, rt_proxy, route_point, request, stop_schedule)) for future in gevent.iwait(futures): rt_proxy, stop_schedule, next_rt_passages = future.get() rt_proxy._update_stop_schedule(stop_schedule, next_rt_passages) return resp