Esempio n. 1
0
def filter_journeys(responses, new_resp, instance, api_request):
    # we filter unwanted journeys in the new response
    # note that filter_journeys returns a generator which will be evaluated later
    filtered_new_resp = journey_filter.filter_journeys(new_resp, instance, api_request)

    # duplicate the generator
    tmp1, tmp2 = itertools.tee(filtered_new_resp)
    qualified_journeys = journey_filter.get_qualified_journeys(responses)

    # now we want to filter similar journeys in the new response which is done in 2 steps
    # In the first step, we compare journeys from the new response only , 2 by 2
    # hopefully, it may lead to some early return for the second step to improve the perf a little
    # In the second step, we compare the journeys from the new response with those that have been qualified
    # already in the former iterations
    # note that the journey_pairs_pool is a list of 2-element tuple of journeys
    journey_pairs_pool = itertools.chain(
        # First step: compare journeys from the new response only
        itertools.combinations(tmp1, 2),
        # Second step:
        # we use the itertools.product to create combinations between qualified journeys and new journeys
        # Ex:
        # new_journeys = [n_1, n_2]
        # qualified_journeys = [q_1, q_2, q_3]
        # itertools.product(new_journeys, qualified_journeys) gives combinations as follows:
        # (n_1, q_1), (n_1, q_2),(n_1, q_3),(n_2, q_1),(n_2, q_2),(n_2, q_3)
        itertools.product(tmp2, qualified_journeys),
    )

    journey_filter.filter_similar_vj_journeys(journey_pairs_pool, api_request)
Esempio n. 2
0
    def create_next_kraken_request(self, request, responses):
        """
        modify the request to call the next (resp previous for non clockwise search) journeys in kraken

        to do that we find ask the next (resp previous) query datetime
        """

        # If Kraken send a new request date time, we use it
        # for the next call to skip current Journeys
        if responses and responses[0].HasField('next_request_date_time'):
            request['datetime'] = responses[0].next_request_date_time
        else:
            vjs = journey_filter.get_qualified_journeys(responses)
            if request["clockwise"]:
                request['datetime'] = self.next_journey_datetime(
                    vjs, request["clockwise"])
            else:
                request['datetime'] = self.previous_journey_datetime(
                    vjs, request["clockwise"])

        if request['datetime'] is None:
            logger = logging.getLogger(__name__)
            logger.error("In response next_request_date_time does not exist")
            return None

        #TODO forbid ODTs
        return request
Esempio n. 3
0
def test_get_qualified_journeys():
    responses = [response_pb2.Response()]
    journey1 = responses[0].journeys.add()
    journey1.tags.append("a_tag")

    journey2 = responses[0].journeys.add()
    journey2.tags.append("to_delete")

    journey3 = responses[0].journeys.add()
    journey3.tags.append("another_tag")
    journey3.tags.append("to_delete")

    for qualified in jf.get_qualified_journeys(responses):
        assert qualified.tags[0] == 'a_tag'
Esempio n. 4
0
def test_similar_journeys():

    responses = [response_pb2.Response()]
    journey1 = responses[0].journeys.add()
    journey1.sections.add()
    journey1.duration = 42
    journey1.sections[0].uris.vehicle_journey = 'bob'

    journey2 = responses[0].journeys.add()
    journey2.sections.add()
    journey2.duration = 43
    journey2.sections[0].uris.vehicle_journey = 'bob'

    jf.filter_similar_vj_journeys(list(journey_pairs_gen(responses)), {})
    assert len(list(jf.get_qualified_journeys(responses))) == 1
Esempio n. 5
0
def journey_pairs_gen(list_responses):
    return itertools.combinations(jf.get_qualified_journeys(list_responses), 2)
Esempio n. 6
0
    def fill_journeys(self, request_type, api_request, instance):
        logger = logging.getLogger(__name__)

        if api_request['max_nb_journeys'] is not None and api_request[
                'max_nb_journeys'] <= 0:
            return response_pb2.Response()

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'),
                                                   instance)
        destination_detail = self.get_entrypoint_detail(
            api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(
            origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(
            destination_detail) or api_request.get('destination')

        # building ridesharing request from "original" request
        ridesharing_req = deepcopy(api_request)

        if not api_request['origin_mode']:
            api_request['origin_mode'] = ['walking']
        if not api_request['destination_mode']:
            api_request['destination_mode'] = ['walking']

        # Return the possible couples combinations (origin_mode and destination_mode)
        krakens_call = get_kraken_calls(api_request)

        # We need the original request (api_request) for filtering, but request
        # is modified by create_next_kraken_request function.
        request = deepcopy(api_request)

        # min_nb_journeys option
        if request['min_nb_journeys']:
            min_nb_journeys = request['min_nb_journeys']
        else:
            min_nb_journeys = 1

        responses = []
        nb_try = 0
        nb_qualified_journeys = 0
        nb_previously_qualified_journeys = 0
        last_chance_retry = False

        min_journeys_calls = request.get('_min_journeys_calls', 1)
        max_journeys_calls = app.config.get('MAX_JOURNEYS_CALLS', 20)
        max_nb_calls = min(min_nb_journeys, max_journeys_calls)

        # Initialize a context for distributed
        distributed_context = self.get_context()

        while request is not None and (
            (nb_qualified_journeys < min_nb_journeys and nb_try < max_nb_calls)
                or nb_try < min_journeys_calls):

            nb_try = nb_try + 1

            # The parameter 'min_nb_journeys' isn't used in the following cases:
            # - If there's more than one single origin_mode and destination_mode couple.
            # - If there was no journey qualified in the previous response, the last chance request is limited
            if len(krakens_call) > 1 or last_chance_retry:
                request['min_nb_journeys'] = 0
            elif api_request['min_nb_journeys']:
                min_nb_journeys_left = min_nb_journeys - nb_qualified_journeys
                request['min_nb_journeys'] = max(0, min_nb_journeys_left)

            new_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call, distributed_context)
            _tag_by_mode(new_resp)
            _tag_direct_path(new_resp)
            _tag_bike_in_pt(new_resp)

            if nb_journeys(new_resp) == 0:
                # no new journeys found, we stop
                # we still append the new_resp because there are journeys that a tagged as dead probably
                responses.extend(new_resp)
                break

            request = self.create_next_kraken_request(request, new_resp)

            # we filter unwanted journeys in the new response
            # note that filter_journeys returns a generator which will be evaluated later
            filtered_new_resp = journey_filter.filter_journeys(
                new_resp, instance, api_request)

            # duplicate the generator
            tmp1, tmp2 = itertools.tee(filtered_new_resp)
            qualified_journeys = journey_filter.get_qualified_journeys(
                responses)

            # now we want to filter similar journeys in the new response which is done in 2 steps
            # In the first step, we compare journeys from the new response only , 2 by 2
            # hopefully, it may lead to some early return for the second step to improve the perf a little
            # In the second step, we compare the journeys from the new response with those that have been qualified
            # already in the former iterations
            # note that the journey_pairs_pool is a list of 2-element tuple of journeys
            journey_pairs_pool = itertools.chain(
                # First step: compare journeys from the new response only
                itertools.combinations(tmp1, 2),
                # Second step:
                # we use the itertools.product to create combinations between qualified journeys and new journeys
                # Ex:
                # new_journeys = [n_1, n_2]
                # qualified_journeys = [q_1, q_2, q_3]
                # itertools.product(new_journeys, qualified_journeys) gives combinations as follows:
                # (n_1, q_1), (n_1, q_2),(n_1, q_3),(n_2, q_1),(n_2, q_2),(n_2, q_3)
                itertools.product(tmp2, qualified_journeys),
            )

            journey_filter.filter_similar_vj_journeys(journey_pairs_pool,
                                                      api_request)

            responses.extend(
                new_resp)  # we keep the error for building the response

            nb_qualified_journeys = nb_journeys(responses)

            if api_request['timeframe_duration']:
                # If timeframe_duration is active, it is useless to recall Kraken,
                # it has already sent back what he could
                break

            if nb_previously_qualified_journeys == nb_qualified_journeys:
                # If there is no additional qualified journey in the kraken response,
                # another request is sent to try to find more journeys, just in case...
                if last_chance_retry:
                    break
                last_chance_retry = True
            nb_previously_qualified_journeys = nb_qualified_journeys

            if nb_qualified_journeys == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        logger.debug('nb of call kraken: %i', nb_try)

        journey_filter.apply_final_journey_filters(responses, instance,
                                                   api_request)
        pb_resp = merge_responses(responses, api_request['debug'])

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)

        if instance.ridesharing_services and (
                'ridesharing' in ridesharing_req['origin_mode']
                or 'ridesharing' in ridesharing_req['destination_mode']):
            logger.debug('trying to add ridesharing journeys')
            try:
                decorate_journeys(pb_resp, instance, api_request)
            except Exception:
                logger.exception('Error while retrieving ridesharing ads')
        else:
            for j in pb_resp.journeys:
                if 'ridesharing' in j.tags:
                    journey_filter.mark_as_dead(
                        j, api_request.get('debug'),
                        'no_matching_ridesharing_found')

        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])
        return pb_resp