def test_similar_journeys_multi_trasfer_and_different_waiting_durations(): """ If 2 journeys take the same vj, same number of sections and several waitings with different waiting durations, for each journey find "min waiting duration" keep the journey which has larger "min waiting duration" """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 1000 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 10 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 190 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'boby' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 1000 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 20 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 180 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 100 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 100 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'boby' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags
def filter_journeys(responses, new_resp, instance, api_request): # we filter unwanted journeys in the new response # note that filter_journeys returns a generator which will be evaluated later filtered_new_resp = journey_filter.filter_journeys(new_resp, instance, api_request) # duplicate the generator tmp1, tmp2 = itertools.tee(filtered_new_resp) qualified_journeys = journey_filter.get_qualified_journeys(responses) # now we want to filter similar journeys in the new response which is done in 2 steps # In the first step, we compare journeys from the new response only , 2 by 2 # hopefully, it may lead to some early return for the second step to improve the perf a little # In the second step, we compare the journeys from the new response with those that have been qualified # already in the former iterations # note that the journey_pairs_pool is a list of 2-element tuple of journeys journey_pairs_pool = itertools.chain( # First step: compare journeys from the new response only itertools.combinations(tmp1, 2), # Second step: # we use the itertools.product to create combinations between qualified journeys and new journeys # Ex: # new_journeys = [n_1, n_2] # qualified_journeys = [q_1, q_2, q_3] # itertools.product(new_journeys, qualified_journeys) gives combinations as follows: # (n_1, q_1), (n_1, q_2),(n_1, q_3),(n_2, q_1),(n_2, q_2),(n_2, q_3) itertools.product(tmp2, qualified_journeys), ) journey_filter.filter_similar_vj_journeys(journey_pairs_pool, api_request)
def test_similar_journeys_different_transfer(): """ If 2 journeys take the same vjs but with a different number of sections, one should be filtered """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections.add() journey1.duration = 42 journey1.sections[-1].uris.vehicle_journey = 'bobette' responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections.add() journey2.duration = 43 journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bobette' jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags
def test_similar_journeys(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[0].uris.vehicle_journey = 'bob' journey2 = responses[0].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[0].uris.vehicle_journey = 'bob' jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert len(list(jf.get_qualified_journeys(responses))) == 1
def test_similar_journeys_test3(): responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.sections.add() journey1.duration = 42 journey1.sections[0].uris.vehicle_journey = 'bob' responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.sections.add() journey2.duration = 43 journey2.sections[-1].uris.vehicle_journey = 'bobette' jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags
def test_similar_journeys_different_waiting_durations(): """ If 2 journeys take the same vj, same number of sections but with different waiting durations, filter one with smaller waiting duration """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 600 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 600 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 25 journey2.sections.add() journey2.sections[-1].type = response_pb2.WAITING journey2.sections[-1].duration = 175 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(journey_pairs_gen(responses), {}) assert 'to_delete' not in journey2.tags assert 'to_delete' in journey1.tags
def test_similar_journeys_with_and_without_waiting_section(): """ If 2 journeys take the same vj, one with a waiting section and another without, filtere one with transfer but without waiting section """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 600 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bob' journey1.sections[-1].duration = 200 journey1.sections.add() journey1.sections[-1].type = response_pb2.TRANSFER journey1.sections[-1].duration = 50 journey1.sections.add() journey1.sections[-1].type = response_pb2.WAITING journey1.sections[-1].duration = 150 journey1.sections.add() journey1.sections[-1].uris.vehicle_journey = 'bobette' journey1.sections[-1].duration = 200 responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 600 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bob' journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].type = response_pb2.TRANSFER journey2.sections[-1].duration = 200 journey2.sections.add() journey2.sections[-1].uris.vehicle_journey = 'bobette' journey2.sections[-1].duration = 200 jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' in journey2.tags
def test_similar_journeys_walking_bike(): """ If we have 2 direct path, one walking and one by bike, we should not filter any journey """ responses = [response_pb2.Response()] journey1 = responses[0].journeys.add() journey1.duration = 42 journey1.sections.add() journey1.sections[-1].type = response_pb2.STREET_NETWORK journey1.sections[-1].street_network.mode = response_pb2.Walking responses.append(response_pb2.Response()) journey2 = responses[-1].journeys.add() journey2.duration = 42 journey2.sections.add() journey2.sections[-1].type = response_pb2.STREET_NETWORK journey2.sections[-1].street_network.mode = response_pb2.Bike jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {}) assert 'to_delete' not in journey1.tags assert 'to_delete' not in journey2.tags
def fill_journeys(self, request_type, api_request, instance): logger = logging.getLogger(__name__) if api_request['max_nb_journeys'] is not None and api_request[ 'max_nb_journeys'] <= 0: return response_pb2.Response() # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system) origin_detail = self.get_entrypoint_detail(api_request.get('origin'), instance) destination_detail = self.get_entrypoint_detail( api_request.get('destination'), instance) # we store the origin/destination detail in g to be able to use them after the marshall g.origin_detail = origin_detail g.destination_detail = destination_detail api_request['origin'] = get_kraken_id( origin_detail) or api_request.get('origin') api_request['destination'] = get_kraken_id( destination_detail) or api_request.get('destination') # building ridesharing request from "original" request ridesharing_req = deepcopy(api_request) if not api_request['origin_mode']: api_request['origin_mode'] = ['walking'] if not api_request['destination_mode']: api_request['destination_mode'] = ['walking'] # Return the possible couples combinations (origin_mode and destination_mode) krakens_call = get_kraken_calls(api_request) # We need the original request (api_request) for filtering, but request # is modified by create_next_kraken_request function. request = deepcopy(api_request) # min_nb_journeys option if request['min_nb_journeys']: min_nb_journeys = request['min_nb_journeys'] else: min_nb_journeys = 1 responses = [] nb_try = 0 nb_qualified_journeys = 0 nb_previously_qualified_journeys = 0 last_chance_retry = False min_journeys_calls = request.get('_min_journeys_calls', 1) max_journeys_calls = app.config.get('MAX_JOURNEYS_CALLS', 20) max_nb_calls = min(min_nb_journeys, max_journeys_calls) # Initialize a context for distributed distributed_context = self.get_context() while request is not None and ( (nb_qualified_journeys < min_nb_journeys and nb_try < max_nb_calls) or nb_try < min_journeys_calls): nb_try = nb_try + 1 # The parameter 'min_nb_journeys' isn't used in the following cases: # - If there's more than one single origin_mode and destination_mode couple. # - If there was no journey qualified in the previous response, the last chance request is limited if len(krakens_call) > 1 or last_chance_retry: request['min_nb_journeys'] = 0 elif api_request['min_nb_journeys']: min_nb_journeys_left = min_nb_journeys - nb_qualified_journeys request['min_nb_journeys'] = max(0, min_nb_journeys_left) new_resp = self.call_kraken(request_type, request, instance, krakens_call, distributed_context) _tag_by_mode(new_resp) _tag_direct_path(new_resp) _tag_bike_in_pt(new_resp) if nb_journeys(new_resp) == 0: # no new journeys found, we stop # we still append the new_resp because there are journeys that a tagged as dead probably responses.extend(new_resp) break request = self.create_next_kraken_request(request, new_resp) # we filter unwanted journeys in the new response # note that filter_journeys returns a generator which will be evaluated later filtered_new_resp = journey_filter.filter_journeys( new_resp, instance, api_request) # duplicate the generator tmp1, tmp2 = itertools.tee(filtered_new_resp) qualified_journeys = journey_filter.get_qualified_journeys( responses) # now we want to filter similar journeys in the new response which is done in 2 steps # In the first step, we compare journeys from the new response only , 2 by 2 # hopefully, it may lead to some early return for the second step to improve the perf a little # In the second step, we compare the journeys from the new response with those that have been qualified # already in the former iterations # note that the journey_pairs_pool is a list of 2-element tuple of journeys journey_pairs_pool = itertools.chain( # First step: compare journeys from the new response only itertools.combinations(tmp1, 2), # Second step: # we use the itertools.product to create combinations between qualified journeys and new journeys # Ex: # new_journeys = [n_1, n_2] # qualified_journeys = [q_1, q_2, q_3] # itertools.product(new_journeys, qualified_journeys) gives combinations as follows: # (n_1, q_1), (n_1, q_2),(n_1, q_3),(n_2, q_1),(n_2, q_2),(n_2, q_3) itertools.product(tmp2, qualified_journeys), ) journey_filter.filter_similar_vj_journeys(journey_pairs_pool, api_request) responses.extend( new_resp) # we keep the error for building the response nb_qualified_journeys = nb_journeys(responses) if api_request['timeframe_duration']: # If timeframe_duration is active, it is useless to recall Kraken, # it has already sent back what he could break if nb_previously_qualified_journeys == nb_qualified_journeys: # If there is no additional qualified journey in the kraken response, # another request is sent to try to find more journeys, just in case... if last_chance_retry: break last_chance_retry = True nb_previously_qualified_journeys = nb_qualified_journeys if nb_qualified_journeys == 0: min_journeys_calls = max(min_journeys_calls, 2) logger.debug('nb of call kraken: %i', nb_try) journey_filter.apply_final_journey_filters(responses, instance, api_request) pb_resp = merge_responses(responses, api_request['debug']) sort_journeys(pb_resp, instance.journey_order, api_request['clockwise']) compute_car_co2_emission(pb_resp, api_request, instance) tag_journeys(pb_resp) if instance.ridesharing_services and ( 'ridesharing' in ridesharing_req['origin_mode'] or 'ridesharing' in ridesharing_req['destination_mode']): logger.debug('trying to add ridesharing journeys') try: decorate_journeys(pb_resp, instance, api_request) except Exception: logger.exception('Error while retrieving ridesharing ads') else: for j in pb_resp.journeys: if 'ridesharing' in j.tags: journey_filter.mark_as_dead( j, api_request.get('debug'), 'no_matching_ridesharing_found') journey_filter.delete_journeys((pb_resp, ), api_request) type_journeys(pb_resp, api_request) culling_journeys(pb_resp, api_request) self._compute_pagination_links(pb_resp, instance, api_request['clockwise']) return pb_resp