예제 #1
0
def decorate_journeys(response, instance, request):
    # TODO: disable same journey schedule link for ridesharing journey?
    for journey in response.journeys:
        if 'ridesharing' not in journey.tags or to_be_deleted(journey):
            continue
        for i, section in enumerate(journey.sections):
            if section.street_network.mode == response_pb2.Ridesharing:
                section.additional_informations.append(
                    response_pb2.HAS_DATETIME_ESTIMATED)
                period_extremity = None
                if len(journey.sections
                       ) == 1:  # direct path, we use the user input
                    period_extremity = PeriodExtremity(request['datetime'],
                                                       request['clockwise'])
                elif i == 0:  # ridesharing on first section we want to arrive before the start of the pt
                    period_extremity = PeriodExtremity(section.end_date_time,
                                                       False)
                else:  # ridesharing at the end, we search for solution starting after the end of the pt sections
                    period_extremity = PeriodExtremity(section.begin_date_time,
                                                       True)

                pb_rsjs, pb_tickets, pb_fps = build_ridesharing_journeys(
                    section.origin, section.destination, period_extremity,
                    instance)
                if not pb_rsjs:
                    journey_filter.mark_as_dead(
                        journey, 'no_matching_ridesharing_found')
                else:
                    section.ridesharing_journeys.extend(pb_rsjs)
                    response.tickets.extend(pb_tickets)

                response.feed_publishers.extend(
                    (fp for fp in pb_fps
                     if fp not in response.feed_publishers))
예제 #2
0
    def add_new_ridesharing_results(self, pb_rsjs, pb_tickets, pb_fps,
                                    response, journey_idx, section_idx):
        if not pb_rsjs:
            journey_filter.mark_as_dead(response.journeys[journey_idx],
                                        'no_matching_ridesharing_found')
        else:
            response.journeys[journey_idx].sections[
                section_idx].ridesharing_journeys.extend(pb_rsjs)
            response.tickets.extend(pb_tickets)

        response.feed_publishers.extend(
            (fp for fp in pb_fps if fp not in response.feed_publishers))
예제 #3
0
    def fill_journeys(self, request_type, api_request, instance):
        logger = logging.getLogger(__name__)

        if api_request['max_nb_journeys'] is not None and api_request['max_nb_journeys'] <= 0:
            return response_pb2.Response()

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'), instance)
        destination_detail = self.get_entrypoint_detail(api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(destination_detail) or api_request.get('destination')

        # building ridesharing request from "original" request
        ridesharing_req = deepcopy(api_request)

        if not api_request['origin_mode']:
            api_request['origin_mode'] = ['walking']
        if not api_request['destination_mode']:
            api_request['destination_mode'] = ['walking']

        # Return the possible combinations (origin_mode ,destination_mode, direct_path_type)
        krakens_call = get_kraken_calls(api_request)

        # We need the original request (api_request) for filtering, but request
        # is modified by create_next_kraken_request function.
        request = deepcopy(api_request)

        # min_nb_journeys option
        if request['min_nb_journeys']:
            min_nb_journeys = request['min_nb_journeys']
        else:
            min_nb_journeys = 1

        responses = []
        nb_try = 0
        nb_qualified_journeys = 0
        nb_previously_qualified_journeys = 0
        last_chance_retry = False

        min_journeys_calls = request.get('_min_journeys_calls', 1)
        max_journeys_calls = app.config.get('MAX_JOURNEYS_CALLS', 20)
        max_nb_calls = min(min_nb_journeys, max_journeys_calls)

        # Initialize a context for distributed
        distributed_context = self.get_context()

        while request is not None and (
            (nb_qualified_journeys < min_nb_journeys and nb_try < max_nb_calls) or nb_try < min_journeys_calls
        ):

            nb_try = nb_try + 1

            # The parameter 'min_nb_journeys' isn't used in the following cases:
            # - If there's more than one single origin_mode and destination_mode couple.
            # - If there was no journey qualified in the previous response, the last chance request is limited
            if len(krakens_call) > 1 or last_chance_retry:
                request['min_nb_journeys'] = 0
            elif api_request['min_nb_journeys']:
                min_nb_journeys_left = min_nb_journeys - nb_qualified_journeys
                request['min_nb_journeys'] = max(0, min_nb_journeys_left)

            new_resp = self.call_kraken(request_type, request, instance, krakens_call, distributed_context)

            _tag_by_mode(new_resp)
            _tag_direct_path(new_resp)
            _tag_bike_in_pt(new_resp)

            if journey_filter.nb_qualifed_journeys(new_resp) == 0:
                # no new journeys found, we stop
                # we still append the new_resp because there are journeys that a tagged as dead probably
                responses.extend(new_resp)
                break

            request = self.create_next_kraken_request(request, new_resp)

            filter_journeys(responses, new_resp, instance, api_request)

            responses.extend(new_resp)  # we keep the error for building the response

            if api_request['timeframe_duration']:
                # If timeframe_duration is active, it is useless to recall Kraken,
                # it has already sent back what he could
                break

            nb_qualified_journeys = journey_filter.nb_qualifed_journeys(responses)
            if nb_previously_qualified_journeys == nb_qualified_journeys:
                # If there is no additional qualified journey in the kraken response,
                # another request is sent to try to find more journeys, just in case...
                if last_chance_retry:
                    break
                last_chance_retry = True
            nb_previously_qualified_journeys = nb_qualified_journeys

            if nb_qualified_journeys == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        logger.debug('nb of call kraken: %i', nb_try)

        journey_filter.apply_final_journey_filters(responses, instance, api_request)

        self.finalise_journeys(api_request, responses, distributed_context, instance, api_request['debug'])

        pb_resp = merge_responses(responses, api_request['debug'])

        sort_journeys(pb_resp, instance.journey_order, api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)

        if instance.ridesharing_services and (
            'ridesharing' in ridesharing_req['origin_mode']
            or 'ridesharing' in ridesharing_req['destination_mode']
        ):
            logger.debug('trying to add ridesharing journeys')
            try:
                decorate_journeys(pb_resp, instance, api_request)
            except Exception:
                logger.exception('Error while retrieving ridesharing ads')
        else:
            for j in pb_resp.journeys:
                if 'ridesharing' in j.tags:
                    journey_filter.mark_as_dead(j, api_request.get('debug'), 'no_matching_ridesharing_found')

        journey_filter.delete_journeys((pb_resp,), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance, api_request['clockwise'])
        return pb_resp
예제 #4
0
def culling_journeys(resp, request):
    """
    Remove some journeys if there are too many of them to have max_nb_journeys journeys.

    resp.journeys should be sorted before this function is called

    The goal is to choose a bunch of journeys(max_nb_journeys) that covers as many as possible sections
    but have as few as possible sum(sections)

    Ex:

    From:

    Journey_1 : Line 1 -> Line 8 -> Bus 172
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 6 ->Line 8 -> Bus 165
    Journey_4 : Line 1 -> Line 8 -> Bus 172 (this may happen when timeframe_duration or same_journey_schedule is used)

    We'd like to choose two journeys. The algo will return Journey_1 and Journey2.
    Note that Journey_4 is similar to the Journey_1 and will be ignored when max_nb_journeys<=3

    Because
    With Journey_1 and Journey_3, they cover all lines but have 5 transfers in all
    With Journey_2 and Journey_3, they don't cover all lines(Line 1 is missing) and have 5 transfers in all

    With Journey_1 and Journey_2, they cover all lines and have only 4 transfers in all -> OK

    No removing done in debug
    """
    logger = logging.getLogger(__name__)

    max_nb_journeys = request["max_nb_journeys"]
    if max_nb_journeys is None or max_nb_journeys >= len(resp.journeys):
        logger.debug('No need to cull journeys')
        return

    """
    Why aggregating journeys before culling journeys?
    We have encountered severe slowness when combining max_nb_journeys(ex: 20) and a big timeframe_duration(ex: 86400s).
    It turned out that, with this configuration, kraken will return a lot of journeys(ex: 100 journeys) and the
    algorithm was trying to figure out the best solution over 5.35E+20 possible combinations
    ( 5.35E+20=Combination(100,20) )!!

    aggregated_journeys will group journeys that are similar('similar' is defined by 'Journeys that have the same sequence
    of sections are similar'), which reduces the number of possible combinations considerably
    """
    aggregated_journeys, remaining_journeys = aggregate_journeys(resp.journeys)
    logger.debug(
        'aggregated_journeys: {} remaining_journeys: {}'.format(
            len(aggregated_journeys), len(remaining_journeys)
        )
    )
    is_debug = request.get('debug')

    if max_nb_journeys >= len(aggregated_journeys):
        """
        In this case, we return all aggregated_journeys plus earliest/latest journeys in remaining journeys
        """
        for j in remaining_journeys[max(0, max_nb_journeys - len(aggregated_journeys)) :]:
            journey_filter.mark_as_dead(
                j, is_debug, 'max_nb_journeys >= len(aggregated_journeys), ' 'Filtered by max_nb_journeys'
            )
        journey_filter.delete_journeys((resp,), request)
        return

    """
    When max_nb_journeys < len(aggregated_journeys), we first remove all remaining journeys from final response because
    those journeys already have a similar journey in aggregated_journeys
    """
    for j in remaining_journeys:
        journey_filter.mark_as_dead(
            j, is_debug, 'Filtered by max_nb_journeys, ' 'max_nb_journeys < len(aggregated_journeys)'
        )

    logger.debug('Trying to culling the journeys')

    """
    To create a candidates pool, we choose only journeys that are NOT tagged as 'comfort' and 'best' and we create a
    section set from that pool

    Ex:
    Journey_1 (Best): Line 14 -> Line 8 -> Bus 172
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 8 -> Bus 165

    The candidate pool will be like [Journey_2, Journey_3]
    The sections set will be like set([Line 14, Line 6, Line 8, Bus 165])
    """
    candidates_pool, sections_set, idx_of_jrnys_must_keep = _build_candidate_pool_and_sections_set(
        aggregated_journeys
    )

    nb_journeys_must_have = len(idx_of_jrnys_must_keep)
    logger.debug("There are {0} journeys we must keep".format(nb_journeys_must_have))

    if max_nb_journeys <= nb_journeys_must_have:
        # At this point, max_nb_journeys is smaller than nb_journeys_must_have, we have to make choices

        def _inverse_selection(d, indexes):
            select = np.in1d(list(range(d.shape[0])), indexes)
            return d[~select]

        # Here we mark all journeys as dead that are not must-have
        for jrny in _inverse_selection(candidates_pool, idx_of_jrnys_must_keep):
            journey_filter.mark_as_dead(jrny, is_debug, 'Filtered by max_nb_journeys')

        if max_nb_journeys == nb_journeys_must_have:
            logger.debug('max_nb_journeys equals to nb_journeys_must_have')
            journey_filter.delete_journeys((resp,), request)
            return

        logger.debug(
            'max_nb_journeys:{0} is smaller than nb_journeys_must_have:{1}'.format(
                request["max_nb_journeys"], nb_journeys_must_have
            )
        )

        # At this point, resp.journeys should contain only must-have journeys
        list_dict = collections.defaultdict(list)
        for jrny in resp.journeys:
            if not journey_filter.to_be_deleted(jrny):
                list_dict[jrny.type].append(jrny)

        sorted_by_type_journeys = []
        for t in JOURNEY_TYPES_TO_RETAIN:
            sorted_by_type_journeys.extend(list_dict.get(t, []))

        for jrny in sorted_by_type_journeys[max_nb_journeys:]:
            journey_filter.mark_as_dead(jrny, is_debug, 'Filtered by max_nb_journeys')

        journey_filter.delete_journeys((resp,), request)
        return

    logger.debug('Trying to find {0} journeys from {1}'.format(max_nb_journeys, candidates_pool.shape[0]))

    """
    Ex:
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 8 -> Bus 165

    The candidate pool will be like [Journey_2, Journey_3]
    The sections set will be like set([Line 14, Line 6, Line 8, Bus 165])

    selected_sections_matrix:
    [[1,1,0,1] -> journey_2
     [1,0,1,1] -> journey_3
    ]
    """
    selected_sections_matrix = _build_selected_sections_matrix(sections_set, candidates_pool)

    best_indexes, selection_matrix = _get_sorted_solutions_indexes(
        selected_sections_matrix, max_nb_journeys, idx_of_jrnys_must_keep
    )

    logger.debug("Nb best solutions: {0}".format(best_indexes.shape[0]))

    the_best_index = best_indexes[0]

    logger.debug("Trying to find the best of best")
    """
    Let's find the best of best :)
    """
    # If there're several solutions which have the same score of integrity and nb_sections
    if best_indexes.shape[0] != 1:
        requested_dt = request['datetime']
        is_clockwise = request.get('clockwise', True)

        def combinations_sorter(v):
            # Hoping to find We sort the solution by the sum of journeys' pseudo duration
            return np.sum(
                (
                    get_pseudo_duration(jrny, requested_dt, is_clockwise)
                    for jrny in np.array(candidates_pool)[np.where(selection_matrix[v, :])]
                )
            )

        the_best_index = min(best_indexes, key=combinations_sorter)

    logger.debug('Removing non selected journeys')
    for jrny in candidates_pool[np.where(selection_matrix[the_best_index, :] == 0)]:
        journey_filter.mark_as_dead(jrny, is_debug, 'Filtered by max_nb_journeys')

    journey_filter.delete_journeys((resp,), request)
예제 #5
0
def culling_journeys(resp, request):
    """
    Remove some journeys if there are too many of them to have max_nb_journeys journeys.
    
    resp.journeys should be sorted before this function is called

    The goal is to choose a bunch of journeys(max_nv_journeys) that covers as many as possible sections
    but have as few as possible sum(sections)

    Ex:

    From:

    Journey_1 : Line 1 -> Line 8 -> Bus 172
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 6 ->Line 8 -> Bus 165

    W'd like to choose two journeys. The algo will return Journey_1 and Journey2.

    Because
    With Journey_1 and Journey_3, they cover all lines but have 5 transfers in all
    With Journey_2 and Journey_3, they don't cover all lines(Line 1 is missing) and have 5 transfers in all

    With Journey_1 and Journey_2, they cover all lines and have only 4 transfers in all -> OK

    No removing done in debug
    """
    logger = logging.getLogger(__name__)

    if not request["max_nb_journeys"] or request["max_nb_journeys"] >= len(
            resp.journeys):
        logger.debug('No need to cull journeys')
        return

    logger.debug('Trying to culling the journeys')
    """
    To create a candidates pool, we choose only journeys that are NOT tagged as 'comfort' and 'best' and we create a
    section set from that pool

    Ex:
    Journey_1 (Best): Line 14 -> Line 8 -> Bus 172
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 8 -> Bus 165

    The candidate pool will be like [Journey_2, Journey_3]
    The sections set will be like set([Line 14, Line 6, Line 8, Bus 165])
    """
    candidates_pool, sections_set, idx_of_jrnys_must_keep = _build_candidate_pool_and_sections_set(
        resp)

    nb_journeys_must_have = len(idx_of_jrnys_must_keep)
    logger.debug(
        "There are {0} journeys we must keep".format(nb_journeys_must_have))
    if (request["max_nb_journeys"] - nb_journeys_must_have) <= 0:
        # At this point, max_nb_journeys is smaller than nb_journeys_must_have, we have to make choices

        def _inverse_selection(d, indexes):
            select = np.in1d(list(range(d.shape[0])), indexes)
            return d[~select]

        # Here we mark all journeys as dead that are not must-have
        for jrny in _inverse_selection(candidates_pool,
                                       idx_of_jrnys_must_keep):
            journey_filter.mark_as_dead(jrny, 'Filtered by max_nb_journeys')

        if request["max_nb_journeys"] == nb_journeys_must_have:
            logger.debug('max_nb_journeys equals to nb_journeys_must_have')
            journey_filter.delete_journeys((resp, ), request)
            return

        logger.debug(
            'max_nb_journeys:{0} is smaller than nb_journeys_must_have:{1}'.
            format(request["max_nb_journeys"], nb_journeys_must_have))

        # At this point, resp.journeys should contain only must-have journeys
        list_dict = collections.defaultdict(list)
        for jrny in resp.journeys:
            if not journey_filter.to_be_deleted(jrny):
                list_dict[jrny.type].append(jrny)

        sorted_by_type_journeys = []
        for t in JOURNEY_TYPES_TO_RETAIN:
            sorted_by_type_journeys.extend(list_dict.get(t, []))

        for jrny in sorted_by_type_journeys[request["max_nb_journeys"]:]:
            journey_filter.mark_as_dead(jrny, 'Filtered by max_nb_journeys')

        journey_filter.delete_journeys((resp, ), request)
        return

    nb_journeys_to_find = request["max_nb_journeys"]
    logger.debug('Trying to find {0} journeys from {1}'.format(
        nb_journeys_to_find, candidates_pool.shape[0]))
    """
    Ex:
    Journey_2 : Line 14 -> Line 6 -> Bus 165
    Journey_3 : Line 14 -> Line 8 -> Bus 165

    The candidate pool will be like [Journey_2, Journey_3]
    The sections set will be like set([Line 14, Line 6, Line 8, Bus 165])

    selected_sections_matrix:
    [[1,1,0,1] -> journey_2
     [1,0,1,1] -> journey_3
    ]
    """
    selected_sections_matrix = _build_selected_sections_matrix(
        sections_set, candidates_pool)

    best_indexes, selection_matrix = _get_sorted_solutions_indexes(
        selected_sections_matrix, nb_journeys_to_find, idx_of_jrnys_must_keep)

    logger.debug("Nb best solutions: {0}".format(best_indexes.shape[0]))

    the_best_index = best_indexes[0]

    logger.debug("Trying to find the best of best")
    """
    Let's find the best of best :)
    """
    # If there're several solutions which have the same score of integrity and nb_sections
    if best_indexes.shape[0] != 1:
        requested_dt = request['datetime']
        is_clockwise = request.get('clockwise', True)

        def combinations_sorter(v):
            # Hoping to find We sort the solution by the sum of journeys' pseudo duration
            return np.sum((get_pseudo_duration(jrny, requested_dt,
                                               is_clockwise)
                           for jrny in np.array(candidates_pool)[np.where(
                               selection_matrix[v, :])]))

        the_best_index = min(best_indexes, key=combinations_sorter)

    logger.debug('Removing non selected journeys')
    for jrny in candidates_pool[np.where(
            selection_matrix[the_best_index, :] == 0)]:
        journey_filter.mark_as_dead(jrny, 'Filtered by max_nb_journeys')

    journey_filter.delete_journeys((resp, ), request)
예제 #6
0
    def fill_journeys(self, request_type, api_request, instance):
        logger = logging.getLogger(__name__)

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'),
                                                   instance)
        destination_detail = self.get_entrypoint_detail(
            api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(
            origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(
            destination_detail) or api_request.get('destination')

        # building ridesharing request from "original" request
        ridesharing_req = deepcopy(api_request)

        if not api_request['origin_mode']:
            api_request['origin_mode'] = ['walking']
        if not api_request['destination_mode']:
            api_request['destination_mode'] = ['walking']

        krakens_call = get_kraken_calls(api_request)

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)
        min_journeys_calls = get_or_default(request, '_min_journeys_calls', 1)

        responses = []
        nb_try = 0
        while request is not None and \
                ((nb_journeys(responses) < min_asked_journeys and nb_try < min_asked_journeys)
                 or nb_try < min_journeys_calls):
            nb_try = nb_try + 1

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)
            _tag_by_mode(tmp_resp)
            _tag_direct_path(tmp_resp)
            _tag_bike_in_pt(tmp_resp)
            journey_filter._filter_too_long_journeys(tmp_resp, request)
            responses.extend(
                tmp_resp)  # we keep the error for building the response
            if nb_journeys(tmp_resp) == 0:
                # no new journeys found, we stop
                break

            request = self.create_next_kraken_request(request, tmp_resp)

            # we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses, instance, api_request)

            #We allow one more call to kraken if there is no valid journey.
            if nb_journeys(responses) == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        journey_filter.final_filter_journeys(responses, instance, api_request)
        pb_resp = merge_responses(responses)

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)

        if instance.ridesharing_services and \
                ('ridesharing' in ridesharing_req['origin_mode']
                 or 'ridesharing' in ridesharing_req['destination_mode']):
            logging.getLogger(__name__).debug(
                'trying to add ridesharing journeys')
            try:
                decorate_journeys(pb_resp, instance, api_request)
            except Exception:
                logger.exception('Error while retrieving ridesharing ads')
        else:
            for j in pb_resp.journeys:
                if 'ridesharing' in j.tags:
                    journey_filter.mark_as_dead(
                        j, 'no_matching_ridesharing_found')

        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])

        return pb_resp
예제 #7
0
    def fill_journeys(self, request_type, api_request, instance):
        logger = logging.getLogger(__name__)

        if api_request['max_nb_journeys'] is not None and api_request[
                'max_nb_journeys'] <= 0:
            return response_pb2.Response()

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'),
                                                   instance)
        destination_detail = self.get_entrypoint_detail(
            api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(
            origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(
            destination_detail) or api_request.get('destination')

        # building ridesharing request from "original" request
        ridesharing_req = deepcopy(api_request)

        if not api_request['origin_mode']:
            api_request['origin_mode'] = ['walking']
        if not api_request['destination_mode']:
            api_request['destination_mode'] = ['walking']

        # Return the possible couples combinations (origin_mode and destination_mode)
        krakens_call = get_kraken_calls(api_request)

        # We need the original request (api_request) for filtering, but request
        # is modified by create_next_kraken_request function.
        request = deepcopy(api_request)

        # min_nb_journeys option
        if request['min_nb_journeys']:
            min_nb_journeys = request['min_nb_journeys']
        else:
            min_nb_journeys = 1

        responses = []
        nb_try = 0
        nb_qualified_journeys = 0
        nb_previously_qualified_journeys = 0
        last_chance_retry = False

        min_journeys_calls = request.get('_min_journeys_calls', 1)
        max_journeys_calls = app.config.get('MAX_JOURNEYS_CALLS', 20)
        max_nb_calls = min(min_nb_journeys, max_journeys_calls)

        # Initialize a context for distributed
        distributed_context = self.get_context()

        while request is not None and (
            (nb_qualified_journeys < min_nb_journeys and nb_try < max_nb_calls)
                or nb_try < min_journeys_calls):

            nb_try = nb_try + 1

            # The parameter 'min_nb_journeys' isn't used in the following cases:
            # - If there's more than one single origin_mode and destination_mode couple.
            # - If there was no journey qualified in the previous response, the last chance request is limited
            if len(krakens_call) > 1 or last_chance_retry:
                request['min_nb_journeys'] = 0
            elif api_request['min_nb_journeys']:
                min_nb_journeys_left = min_nb_journeys - nb_qualified_journeys
                request['min_nb_journeys'] = max(0, min_nb_journeys_left)

            new_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call, distributed_context)
            _tag_by_mode(new_resp)
            _tag_direct_path(new_resp)
            _tag_bike_in_pt(new_resp)

            if nb_journeys(new_resp) == 0:
                # no new journeys found, we stop
                # we still append the new_resp because there are journeys that a tagged as dead probably
                responses.extend(new_resp)
                break

            request = self.create_next_kraken_request(request, new_resp)

            # we filter unwanted journeys in the new response
            # note that filter_journeys returns a generator which will be evaluated later
            filtered_new_resp = journey_filter.filter_journeys(
                new_resp, instance, api_request)

            # duplicate the generator
            tmp1, tmp2 = itertools.tee(filtered_new_resp)
            qualified_journeys = journey_filter.get_qualified_journeys(
                responses)

            # now we want to filter similar journeys in the new response which is done in 2 steps
            # In the first step, we compare journeys from the new response only , 2 by 2
            # hopefully, it may lead to some early return for the second step to improve the perf a little
            # In the second step, we compare the journeys from the new response with those that have been qualified
            # already in the former iterations
            # note that the journey_pairs_pool is a list of 2-element tuple of journeys
            journey_pairs_pool = itertools.chain(
                # First step: compare journeys from the new response only
                itertools.combinations(tmp1, 2),
                # Second step:
                # we use the itertools.product to create combinations between qualified journeys and new journeys
                # Ex:
                # new_journeys = [n_1, n_2]
                # qualified_journeys = [q_1, q_2, q_3]
                # itertools.product(new_journeys, qualified_journeys) gives combinations as follows:
                # (n_1, q_1), (n_1, q_2),(n_1, q_3),(n_2, q_1),(n_2, q_2),(n_2, q_3)
                itertools.product(tmp2, qualified_journeys),
            )

            journey_filter.filter_similar_vj_journeys(journey_pairs_pool,
                                                      api_request)

            responses.extend(
                new_resp)  # we keep the error for building the response

            nb_qualified_journeys = nb_journeys(responses)

            if api_request['timeframe_duration']:
                # If timeframe_duration is active, it is useless to recall Kraken,
                # it has already sent back what he could
                break

            if nb_previously_qualified_journeys == nb_qualified_journeys:
                # If there is no additional qualified journey in the kraken response,
                # another request is sent to try to find more journeys, just in case...
                if last_chance_retry:
                    break
                last_chance_retry = True
            nb_previously_qualified_journeys = nb_qualified_journeys

            if nb_qualified_journeys == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        logger.debug('nb of call kraken: %i', nb_try)

        journey_filter.apply_final_journey_filters(responses, instance,
                                                   api_request)
        pb_resp = merge_responses(responses, api_request['debug'])

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)

        if instance.ridesharing_services and (
                'ridesharing' in ridesharing_req['origin_mode']
                or 'ridesharing' in ridesharing_req['destination_mode']):
            logger.debug('trying to add ridesharing journeys')
            try:
                decorate_journeys(pb_resp, instance, api_request)
            except Exception:
                logger.exception('Error while retrieving ridesharing ads')
        else:
            for j in pb_resp.journeys:
                if 'ridesharing' in j.tags:
                    journey_filter.mark_as_dead(
                        j, api_request.get('debug'),
                        'no_matching_ridesharing_found')

        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])
        return pb_resp