示例#1
0
def apply_final_journey_filters(response_list, instance, request):
    """
    Final pass: Filter by side effect the list of pb responses's journeys

    Nota: All filters below are applied only once, after all calls to kraken are done
    """
    is_debug = request.get('debug', False)
    journey_generator = get_qualified_journeys
    if is_debug:
        journey_generator = get_all_journeys

    # we remove similar journeys (same lines and same succession of stop_points)
    final_line_filter = get_or_default(request, '_final_line_filter', False)
    if final_line_filter:
        journeys = journey_generator(response_list)
        journey_pairs_pool = itertools.combinations(journeys, 2)
        _filter_similar_line_journeys(journey_pairs_pool, request)

    # we filter journeys having "shared sections" (same succession of stop_points + custom rules)
    no_shared_section = get_or_default(request, 'no_shared_section', False)
    if no_shared_section:
        journeys = journey_generator(response_list)
        journey_pairs_pool = itertools.combinations(journeys, 2)
        filter_shared_sections_journeys(journey_pairs_pool, request)

    # we filter journeys having too much connections compared to minimum
    journeys = journey_generator(response_list)
    _filter_too_much_connections(journeys, instance, request)
示例#2
0
    def fill_journeys(self, request_type, api_request, instance):

        krakens_call = get_kraken_calls(api_request)

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)
        min_journeys_calls = get_or_default(request, '_min_journeys_calls', 1)

        responses = []
        nb_try = 0
        while request is not None and \
                ((nb_journeys(responses) < min_asked_journeys and nb_try < min_asked_journeys)
                 or nb_try < min_journeys_calls):
            nb_try = nb_try + 1

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)
            _tag_by_mode(tmp_resp)
            _tag_direct_path(tmp_resp)
            journey_filter._filter_too_long_journeys(tmp_resp, request)
            responses.extend(
                tmp_resp)  # we keep the error for building the response
            if nb_journeys(tmp_resp) == 0:
                # no new journeys found, we stop
                break

            request = self.create_next_kraken_request(request, tmp_resp)

            # we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses, instance, api_request)

            #We allow one more call to kraken if there is no valid journey.
            if nb_journeys(responses) == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        journey_filter.final_filter_journeys(responses, instance, api_request)
        pb_resp = merge_responses(responses)

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)
        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])

        return pb_resp
示例#3
0
def _filter_max_successive_physical_mode(journeys, instance, request):
    """
    eliminates journeys with specified public_transport.physical_mode more than
    _max_successive_physical_mode (used for STIF buses)
    """
    logger = logging.getLogger(__name__)
    max_successive_physical_mode = get_or_default(
        request, '_max_successive_physical_mode', 0)
    if max_successive_physical_mode == 0:
        return
    for j in journeys:
        if to_be_deleted(j):
            continue

        bus_count = 0
        for s in j.sections:
            if s.type != response_pb2.PUBLIC_TRANSPORT:
                continue
            if s.pt_display_informations.uris.physical_mode == instance.successive_physical_mode_to_limit_id:
                bus_count += 1
            else:
                if bus_count <= max_successive_physical_mode:
                    bus_count = 0

        if bus_count > max_successive_physical_mode:
            logger.debug(
                "the journey {} has a too much successive {}, we delete it".
                format(j.internal_id,
                       instance.successive_physical_mode_to_limit_id))
            mark_as_dead(j, "too_much_successive_physical_mode")
示例#4
0
def _filter_max_successive_buses(journeys, request):
    """
    eliminates journeys with public_transport.bus more than _max_successive_buses
    """
    logger = logging.getLogger(__name__)
    max_successive_buses = get_or_default(request, '_max_successive_buses', 0)
    if max_successive_buses == 0:
        return
    for j in journeys:
        if _to_be_deleted(j):
            continue

        bus_count = 0
        for s in j.sections:
            if s.type != response_pb2.PUBLIC_TRANSPORT:
                continue
            if s.pt_display_informations.uris.physical_mode == 'Bus':
                bus_count += 1
            else:
                if bus_count <= max_successive_buses:
                    bus_count = 0

        if bus_count > max_successive_buses:
            logger.debug(
                "the journey {} has a too much successive buses, we delete it".
                format(j.internal_id))
            mark_as_dead(j, "too_much_successive_buses")
示例#5
0
def filter_journeys(responses, instance, request):
    """
    Filter by side effect the list of pb responses's journeys

    """
    is_debug = request.get('debug')
    #DEBUG
    if is_debug:
        [_debug_journey(j) for j in get_qualified_journeys(responses)]

    min_nb_transfers = get_or_default(request, 'min_nb_transfers', 0)

    # Note that we use the functools.partial to capture the arguments
    filters = [
        partial(filter_too_short_heavy_journeys, request=request),
        partial(filter_too_long_waiting, is_debug=is_debug),
        partial(filter_min_transfers,
                is_debug=is_debug,
                min_nb_transfers=min_nb_transfers)
    ]

    # we add more filters in some special cases:
    max_successive = get_or_default(request, '_max_successive_physical_mode',
                                    0)
    if max_successive != 0:
        limit_id = instance.successive_physical_mode_to_limit_id
        filters.append(
            partial(filter_max_successive_physical_mode,
                    is_debug=is_debug,
                    successive_physical_mode_to_limit_id=limit_id,
                    max_successive_physical_mode=max_successive))

    dp = get_or_default(request, 'direct_path', 'indifferent')
    if dp != 'indifferent':
        filters.append(partial(filter_direct_path, is_debug=is_debug, dp=dp))

    composed_filter = ComposedFilter()
    [composed_filter.add_filter(f) for f in filters]

    return composed_filter.compose_filters()(get_qualified_journeys(responses))
示例#6
0
    def fill_journeys(self, request_type, api_request, instance):

        krakens_call = get_kraken_calls(api_request)

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)
        min_journeys_calls = get_or_default(request, '_min_journeys_calls', 1)

        responses = []
        nb_try = 0
        while request is not None and \
                ((nb_journeys(responses) < min_asked_journeys and nb_try < min_asked_journeys)
                 or nb_try < min_journeys_calls):
            nb_try = nb_try + 1

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)
            responses.extend(
                tmp_resp)  # we keep the error for building the response
            if nb_journeys(tmp_resp) == 0:
                # no new journeys found, we stop
                break

            request = self.create_next_kraken_request(request, tmp_resp)

            # we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses, instance, api_request)

        journey_filter.final_filter_journeys(responses, instance, api_request)
        pb_resp = merge_responses(responses)

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        tag_journeys(pb_resp)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance)

        return pb_resp
示例#7
0
def _filter_min_transfers(journeys, instance, request):
    """
    eliminates journeys with number of connections less then min_nb_transfers among journeys
    """
    logger = logging.getLogger(__name__)
    min_nb_transfers = get_or_default(request, 'min_nb_transfers', 0)

    for j in journeys:
        if to_be_deleted(j):
            continue
        if get_nb_connections(j) < min_nb_transfers:
            logger.debug("the journey {} has not enough connections, we delete it".format(j.internal_id))
            mark_as_dead(j, "not_enough_connections")
示例#8
0
def apply_final_journey_filters_post_finalize(response_list, request):
    """
    Final pass: Filter by side effect the list of pb responses's journeys
    """
    is_debug = request.get('debug', False)
    journey_generator = get_qualified_journeys
    if is_debug:
        journey_generator = get_all_journeys

    # we remove similar journeys (same lines and same succession of stop_points)
    final_line_filter = get_or_default(request, '_final_line_filter', False)
    if final_line_filter:
        journeys = journey_generator(response_list)
        journey_pairs_pool = itertools.combinations(journeys, 2)
        _filter_similar_line_and_crowfly_journeys(journey_pairs_pool, request)
示例#9
0
def final_filter_journeys(response_list, instance, request):
    """
    Filter by side effect the list of pb responses's journeys
    Final pass : we remove similar journeys (same lines and stop_points of change)
    """

    # for clarity purpose we build a temporary list
    journeys = [j for r in response_list for j in r.journeys]

    final_line_filter = get_or_default(request, '_final_line_filter', False)
    if final_line_filter:
        _filter_similar_line_journeys(journeys, request)

    _filter_too_much_connections(journeys, instance, request)

    return response_list
示例#10
0
def final_filter_journeys(response_list, instance, request):
    """
    Filter by side effect the list of pb responses's journeys
    Final pass : we remove similar journeys (same lines and stop_points of change)
    """

    # for clarity purpose we build a temporary list

    final_line_filter = get_or_default(request, '_final_line_filter', False)
    if final_line_filter:
        journeys = get_qualified_journeys(response_list)
        journeys_pool = itertools.combinations(journeys, 2)
        _filter_similar_line_journeys(journeys_pool, request)

    journeys = get_qualified_journeys(response_list)
    _filter_too_much_connections(journeys, instance, request)
示例#11
0
def _filter_too_much_connections(journeys, instance, request):
    """
    eliminates journeys with number of connections more then minimum connections among journeys
    in the result + _max_additional_connections
    """
    logger = logging.getLogger(__name__)
    max_additional_connections = get_or_default(request, '_max_additional_connections',
                                            instance.max_additional_connections)

    min_connections = get_min_connections(journeys)
    if min_connections is not None:
        max_connections_allowed = max_additional_connections + min_connections
        for j in journeys:
            if to_be_deleted(j):
                continue

            if get_nb_connections(j) > max_connections_allowed:
                logger.debug("the journey {} has a too much connections, we delete it".format(j.internal_id))
                mark_as_dead(j, "too_much_connections")
示例#12
0
def _filter_too_much_connections(journeys, instance, request):
    """
    eliminates journeys with a number of connections strictly superior to the
    the number of connections of the best pt_journey + _max_additional_connections
    """
    logger = logging.getLogger(__name__)
    max_additional_connections = get_or_default(
        request, '_max_additional_connections', instance.max_additional_connections
    )
    import itertools

    it1, it2 = itertools.tee(journeys, 2)
    best_pt_journey_connections = get_best_pt_journey_connections(it1, request)
    is_debug = request.get('debug', False)
    if best_pt_journey_connections is not None:
        max_connections_allowed = max_additional_connections + best_pt_journey_connections
        for j in it2:
            if get_nb_connections(j) > max_connections_allowed:
                logger.debug("the journey {} has a too much connections, we delete it".format(j.internal_id))
                mark_as_dead(j, is_debug, "too_much_connections")
示例#13
0
def _filter_direct_path(journeys, instance, request):
    """
    eliminates journeys that are not matching direct path parameter (none, only or indifferent)
    """
    logger = logging.getLogger(__name__)
    dp = get_or_default(request, 'direct_path', 'indifferent')

    if dp == 'indifferent':
        return

    for j in journeys:
        if to_be_deleted(j):
            continue
        if dp == 'none' and 'non_pt' in j.tags:
            logger.debug("the journey {} is direct, we delete it as param direct_path=none"
                         .format(j.internal_id))
            mark_as_dead(j, "direct_path_none")
        if dp == 'only' and 'non_pt' not in j.tags:
            logger.debug("the journey {} uses pt, we delete it as param direct_path=only"
                         .format(j.internal_id))
            mark_as_dead(j, "direct_path_only")
示例#14
0
def _filter_too_much_connections(journeys, instance, request):
    """
    eliminates journeys with number of connections more then minimum connections among journeys
    in the result + _max_additional_connections
    """
    logger = logging.getLogger(__name__)
    max_additional_connections = get_or_default(
        request, '_max_additional_connections',
        instance.max_additional_connections)
    import itertools
    it1, it2 = itertools.tee(journeys, 2)
    min_connections = get_min_connections(it1)
    is_debug = request.get('debug')
    if min_connections is not None:
        max_connections_allowed = max_additional_connections + min_connections
        for j in it2:
            if get_nb_connections(j) > max_connections_allowed:
                logger.debug(
                    "the journey {} has a too much connections, we delete it".
                    format(j.internal_id))
                mark_as_dead(j, is_debug, "too_much_connections")
示例#15
0
    def fill_journeys(self, request_type, api_request, instance):

        krakens_call = get_kraken_calls(api_request)

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)

        responses = []
        last_nb_journeys = 0
        while nb_journeys(responses) < min_asked_journeys:

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)

            responses.extend(tmp_resp)
            new_nb_journeys = nb_journeys(responses)
            if new_nb_journeys == 0:
                #no new journeys found, we stop
                break

            #we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses,
                                           instance,
                                           request=request,
                                           original_request=api_request)

            if last_nb_journeys == new_nb_journeys:
                #we are stuck with the same number of journeys, we stops
                break
            last_nb_journeys = new_nb_journeys

            request = create_next_kraken_request(request, responses)

        pb_resp = merge_responses(responses)
        sort_journeys(pb_resp, instance.journey_order, request['clockwise'])
        tag_journeys(pb_resp)
        culling_journeys(pb_resp, request)

        return pb_resp
示例#16
0
def create_pb_request(requested_type, request, dep_mode, arr_mode, direct_path_type):
    """Parse the request dict and create the protobuf version"""
    # TODO: bench if the creation of the request each time is expensive
    req = request_pb2.Request()
    req.requested_api = requested_type
    req._current_datetime = date_to_timestamp(request['_current_datetime'])

    if "origin" in request and request["origin"]:
        if requested_type != type_pb2.NMPLANNER:
            origins, durations = ([request["origin"]], [0])
        else:
            # in the n-m query, we have several origin points, with their corresponding access duration
            origins, durations = (request["origin"], request["origin_access_duration"])
        for place, duration in zip(origins, durations):
            location = req.journeys.origin.add()
            location.place = place
            location.access_duration = duration
    if "destination" in request and request["destination"]:
        if requested_type != type_pb2.NMPLANNER:
            destinations, durations = ([request["destination"]], [0])
        else:
            destinations, durations = (request["destination"], request["destination_access_duration"])
        for place, duration in zip(destinations, durations):
            location = req.journeys.destination.add()
            location.place = place
            location.access_duration = duration

    req.journeys.datetimes.append(request["datetime"])  # TODO remove this datetime list completly in another PR

    req.journeys.clockwise = request["clockwise"]
    sn_params = req.journeys.streetnetwork_params
    sn_params.max_walking_duration_to_pt = request["max_walking_duration_to_pt"]
    sn_params.max_bike_duration_to_pt = request["max_bike_duration_to_pt"]
    sn_params.max_bss_duration_to_pt = request["max_bss_duration_to_pt"]
    sn_params.max_car_duration_to_pt = request["max_car_duration_to_pt"]
    sn_params.max_car_no_park_duration_to_pt = request["max_car_no_park_duration_to_pt"]
    sn_params.walking_speed = request["walking_speed"]
    sn_params.bike_speed = request["bike_speed"]
    sn_params.car_speed = request["car_speed"]
    sn_params.bss_speed = request["bss_speed"]
    sn_params.car_no_park_speed = request["car_no_park_speed"]
    sn_params.origin_filter = request.get("origin_filter", "")
    sn_params.destination_filter = request.get("destination_filter", "")
    # we always want direct path, even for car
    sn_params.enable_direct_path = True

    # settings fallback modes
    sn_params.origin_mode = dep_mode
    sn_params.destination_mode = arr_mode

    # If we only want direct_paths, max_duration(time to pass in pt) is zero
    if direct_path_type == "only":
        req.journeys.max_duration = 0
    else:
        req.journeys.max_duration = request["max_duration"]

    req.journeys.max_transfers = request["max_transfers"]
    if request["max_extra_second_pass"]:
        req.journeys.max_extra_second_pass = request["max_extra_second_pass"]
    req.journeys.wheelchair = request["wheelchair"] or False  # default value is no wheelchair
    req.journeys.realtime_level = get_pb_data_freshness(request)

    if "details" in request and request["details"]:
        req.journeys.details = request["details"]

    req.journeys.walking_transfer_penalty = request['_walking_transfer_penalty']

    for forbidden_uri in get_or_default(request, "forbidden_uris[]", []):
        req.journeys.forbidden_uris.append(forbidden_uri)
    for allowed_id in get_or_default(request, "allowed_id[]", []):
        req.journeys.allowed_id.append(allowed_id)

    req.journeys.bike_in_pt = (dep_mode == 'bike') and (arr_mode == 'bike')

    if request["free_radius_from"]:
        req.journeys.free_radius_from = request["free_radius_from"]
    if request["free_radius_to"]:
        req.journeys.free_radius_to = request["free_radius_to"]

    if request["min_nb_journeys"]:
        req.journeys.min_nb_journeys = request["min_nb_journeys"]

    req.journeys.night_bus_filter_max_factor = request['_night_bus_filter_max_factor']
    req.journeys.night_bus_filter_base_factor = request['_night_bus_filter_base_factor']

    if request['timeframe_duration']:
        req.journeys.timeframe_duration = int(request['timeframe_duration'])

    req.journeys.depth = request['depth']

    return req
示例#17
0
    def parse_journey_request(self, requested_type, request):
        """Parse the request dict and create the protobuf version"""
        req = request_pb2.Request()
        req.requested_api = requested_type
        req._current_datetime = date_to_timestamp(request["_current_datetime"])
        if "origin" in request and request["origin"]:
            if requested_type != type_pb2.NMPLANNER:
                origins = ([request["origin"]], [0])
            else:
                origins = (request["origin"],
                           request["origin_access_duration"])
            for i in range(0, len(origins[0])):
                location = req.journeys.origin.add()
                location.place = origins[0][i]
                location.access_duration = origins[1][i]
        if "destination" in request and request["destination"]:
            if requested_type != type_pb2.NMPLANNER:
                destinations = ([request["destination"]], [0])
            else:
                destinations = (request["destination"],
                                request["destination_access_duration"])
            for i in range(0, len(destinations[0])):
                location = req.journeys.destination.add()
                location.place = destinations[0][i]
                location.access_duration = destinations[1][i]
            self.destination_modes = request["destination_mode"]
        else:
            self.destination_modes = ["walking"]
        if "datetime" in request and request["datetime"]:
            if isinstance(request["datetime"], int):
                request["datetime"] = [request["datetime"]]
            for dte in request["datetime"]:
                req.journeys.datetimes.append(dte)
        req.journeys.clockwise = request["clockwise"]
        sn_params = req.journeys.streetnetwork_params
        sn_params.max_walking_duration_to_pt = request[
            "max_walking_duration_to_pt"]
        sn_params.max_bike_duration_to_pt = request["max_bike_duration_to_pt"]
        sn_params.max_bss_duration_to_pt = request["max_bss_duration_to_pt"]
        sn_params.max_car_duration_to_pt = request["max_car_duration_to_pt"]
        sn_params.walking_speed = request["walking_speed"]
        sn_params.bike_speed = request["bike_speed"]
        sn_params.car_speed = request["car_speed"]
        sn_params.bss_speed = request["bss_speed"]
        if "origin_filter" in request:
            sn_params.origin_filter = request["origin_filter"]
        else:
            sn_params.origin_filter = ""
        if "destination_filter" in request:
            sn_params.destination_filter = request["destination_filter"]
        else:
            sn_params.destination_filter = ""
        req.journeys.max_duration = request["max_duration"]
        req.journeys.max_transfers = request["max_transfers"]
        if request["max_extra_second_pass"]:
            req.journeys.max_extra_second_pass = request[
                "max_extra_second_pass"]
        req.journeys.wheelchair = request[
            "wheelchair"] or False  # default value is no wheelchair

        if request['data_freshness'] == 'realtime':
            req.journeys.realtime_level = type_pb2.REALTIME
        elif request['data_freshness'] == 'adapted_schedule':
            req.journeys.realtime_level = type_pb2.ADAPTED_SCHEDULE
        else:
            req.journeys.realtime_level = type_pb2.BASE_SCHEDULE

        if "details" in request and request["details"]:
            req.journeys.details = request["details"]

        req.journeys.walking_transfer_penalty = request[
            '_walking_transfer_penalty']

        self.origin_modes = request["origin_mode"]

        if req.journeys.streetnetwork_params.origin_mode == "bike_rental":
            req.journeys.streetnetwork_params.origin_mode = "bss"
        if req.journeys.streetnetwork_params.destination_mode == "bike_rental":
            req.journeys.streetnetwork_params.destination_mode = "bss"
        for forbidden_uri in get_or_default(request, "forbidden_uris[]", []):
            req.journeys.forbidden_uris.append(forbidden_uri)
        for allowed_id in get_or_default(request, "allowed_id[]", []):
            req.journeys.allowed_id.append(allowed_id)
        if not "type" in request:
            request["type"] = "all"  #why ?

        #for the default scenario, we filter the walking if we have walking + bss

        # Technically, bss mode enable walking (if it is better than bss)
        # so if the user ask for walking and bss, we only keep bss
        for fallback_modes in self.origin_modes, self.destination_modes:
            if 'walking' in fallback_modes and 'bss' in fallback_modes:
                fallback_modes.remove('walking')

        return req
示例#18
0
    def fill_journeys(self, request_type, api_request, instance):

        krakens_call = get_kraken_calls(api_request)

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'),
                                                   instance)
        destination_detail = self.get_entrypoint_detail(
            api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(
            origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(
            destination_detail) or api_request.get('destination')

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)
        min_journeys_calls = get_or_default(request, '_min_journeys_calls', 1)

        responses = []
        nb_try = 0
        while request is not None and \
                ((nb_journeys(responses) < min_asked_journeys and nb_try < min_asked_journeys)
                 or nb_try < min_journeys_calls):
            nb_try = nb_try + 1

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)
            _tag_by_mode(tmp_resp)
            _tag_direct_path(tmp_resp)
            _tag_bike_in_pt(tmp_resp)
            journey_filter._filter_too_long_journeys(tmp_resp, request)
            responses.extend(
                tmp_resp)  # we keep the error for building the response
            if nb_journeys(tmp_resp) == 0:
                # no new journeys found, we stop
                break

            request = self.create_next_kraken_request(request, tmp_resp)

            # we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses, instance, api_request)

            #We allow one more call to kraken if there is no valid journey.
            if nb_journeys(responses) == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        journey_filter.final_filter_journeys(responses, instance, api_request)
        pb_resp = merge_responses(responses)

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)
        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])

        return pb_resp
示例#19
0
def create_pb_request(requested_type, request, dep_mode, arr_mode):
    """Parse the request dict and create the protobuf version"""
    #TODO: bench if the creation of the request each time is expensive
    req = request_pb2.Request()
    req.requested_api = requested_type
    req._current_datetime = date_to_timestamp(request['_current_datetime'])

    if "origin" in request and request["origin"]:
        if requested_type != type_pb2.NMPLANNER:
            origins, durations = ([request["origin"]], [0])
        else:
            # in the n-m query, we have several origin points, with their corresponding access duration
            origins, durations = (request["origin"],
                                  request["origin_access_duration"])
        for place, duration in zip(origins, durations):
            location = req.journeys.origin.add()
            location.place = place
            location.access_duration = duration
    if "destination" in request and request["destination"]:
        if requested_type != type_pb2.NMPLANNER:
            destinations, durations = ([request["destination"]], [0])
        else:
            destinations, durations = (request["destination"],
                                       request["destination_access_duration"])
        for place, duration in zip(destinations, durations):
            location = req.journeys.destination.add()
            location.place = place
            location.access_duration = duration

    req.journeys.datetimes.append(
        request["datetime"]
    )  #TODO remove this datetime list completly in another PR

    req.journeys.clockwise = request["clockwise"]
    sn_params = req.journeys.streetnetwork_params
    sn_params.max_walking_duration_to_pt = request[
        "max_walking_duration_to_pt"]
    sn_params.max_bike_duration_to_pt = request["max_bike_duration_to_pt"]
    sn_params.max_bss_duration_to_pt = request["max_bss_duration_to_pt"]
    sn_params.max_car_duration_to_pt = request["max_car_duration_to_pt"]
    sn_params.walking_speed = request["walking_speed"]
    sn_params.bike_speed = request["bike_speed"]
    sn_params.car_speed = request["car_speed"]
    sn_params.bss_speed = request["bss_speed"]
    sn_params.origin_filter = request.get("origin_filter", "")
    sn_params.destination_filter = request.get("destination_filter", "")
    #we always want direct path, even for car
    sn_params.enable_direct_path = True

    #settings fallback modes
    sn_params.origin_mode = dep_mode
    sn_params.destination_mode = arr_mode

    req.journeys.max_duration = request["max_duration"]
    req.journeys.max_transfers = request["max_transfers"]
    if request["max_extra_second_pass"]:
        req.journeys.max_extra_second_pass = request["max_extra_second_pass"]
    req.journeys.wheelchair = request[
        "wheelchair"] or False  # default value is no wheelchair
    if request['data_freshness'] == 'realtime':
        req.journeys.realtime_level = type_pb2.REALTIME
    elif request['data_freshness'] == 'adapted_schedule':
        req.journeys.realtime_level = type_pb2.ADAPTED_SCHEDULE
    else:
        req.journeys.realtime_level = type_pb2.BASE_SCHEDULE

    if "details" in request and request["details"]:
        req.journeys.details = request["details"]

    req.journeys.walking_transfer_penalty = request[
        '_walking_transfer_penalty']

    for forbidden_uri in get_or_default(request, "forbidden_uris[]", []):
        req.journeys.forbidden_uris.append(forbidden_uri)

    return req
示例#20
0
    def fill_journeys(self, request_type, api_request, instance):
        logger = logging.getLogger(__name__)

        # sometimes we need to change the entrypoint id (eg if the id is from another autocomplete system)
        origin_detail = self.get_entrypoint_detail(api_request.get('origin'),
                                                   instance)
        destination_detail = self.get_entrypoint_detail(
            api_request.get('destination'), instance)
        # we store the origin/destination detail in g to be able to use them after the marshall
        g.origin_detail = origin_detail
        g.destination_detail = destination_detail

        api_request['origin'] = get_kraken_id(
            origin_detail) or api_request.get('origin')
        api_request['destination'] = get_kraken_id(
            destination_detail) or api_request.get('destination')

        # building ridesharing request from "original" request
        ridesharing_req = deepcopy(api_request)

        if not api_request['origin_mode']:
            api_request['origin_mode'] = ['walking']
        if not api_request['destination_mode']:
            api_request['destination_mode'] = ['walking']

        krakens_call = get_kraken_calls(api_request)

        request = deepcopy(api_request)
        min_asked_journeys = get_or_default(request, 'min_nb_journeys', 1)
        min_journeys_calls = get_or_default(request, '_min_journeys_calls', 1)

        responses = []
        nb_try = 0
        while request is not None and \
                ((nb_journeys(responses) < min_asked_journeys and nb_try < min_asked_journeys)
                 or nb_try < min_journeys_calls):
            nb_try = nb_try + 1

            tmp_resp = self.call_kraken(request_type, request, instance,
                                        krakens_call)
            _tag_by_mode(tmp_resp)
            _tag_direct_path(tmp_resp)
            _tag_bike_in_pt(tmp_resp)
            journey_filter._filter_too_long_journeys(tmp_resp, request)
            responses.extend(
                tmp_resp)  # we keep the error for building the response
            if nb_journeys(tmp_resp) == 0:
                # no new journeys found, we stop
                break

            request = self.create_next_kraken_request(request, tmp_resp)

            # we filter unwanted journeys by side effects
            journey_filter.filter_journeys(responses, instance, api_request)

            #We allow one more call to kraken if there is no valid journey.
            if nb_journeys(responses) == 0:
                min_journeys_calls = max(min_journeys_calls, 2)

        journey_filter.final_filter_journeys(responses, instance, api_request)
        pb_resp = merge_responses(responses)

        sort_journeys(pb_resp, instance.journey_order,
                      api_request['clockwise'])
        compute_car_co2_emission(pb_resp, api_request, instance)
        tag_journeys(pb_resp)

        if instance.ridesharing_services and \
                ('ridesharing' in ridesharing_req['origin_mode']
                 or 'ridesharing' in ridesharing_req['destination_mode']):
            logging.getLogger(__name__).debug(
                'trying to add ridesharing journeys')
            try:
                decorate_journeys(pb_resp, instance, api_request)
            except Exception:
                logger.exception('Error while retrieving ridesharing ads')
        else:
            for j in pb_resp.journeys:
                if 'ridesharing' in j.tags:
                    journey_filter.mark_as_dead(
                        j, 'no_matching_ridesharing_found')

        journey_filter.delete_journeys((pb_resp, ), api_request)
        type_journeys(pb_resp, api_request)
        culling_journeys(pb_resp, api_request)

        self._compute_pagination_links(pb_resp, instance,
                                       api_request['clockwise'])

        return pb_resp
示例#21
0
def create_pb_request(requested_type, request, dep_mode, arr_mode):
    """Parse the request dict and create the protobuf version"""
    #TODO: bench if the creation of the request each time is expensive
    req = request_pb2.Request()
    req.requested_api = requested_type

    if "origin" in request and request["origin"]:
        if requested_type != type_pb2.NMPLANNER:
            origins, durations = ([request["origin"]], [0])
        else:
            # in the n-m query, we have several origin points, with their corresponding access duration
            origins, durations = (request["origin"],
                                  request["origin_access_duration"])
        for place, duration in zip(origins, durations):
            location = req.journeys.origin.add()
            location.place = place
            location.access_duration = duration
    if "destination" in request and request["destination"]:
        if requested_type != type_pb2.NMPLANNER:
            destinations, durations = ([request["destination"]], [0])
        else:
            destinations, durations = (request["destination"],
                                       request["destination_access_duration"])
        for place, duration in zip(destinations, durations):
            location = req.journeys.destination.add()
            location.place = place
            location.access_duration = duration

    req.journeys.datetimes.append(
        request["datetime"]
    )  #TODO remove this datetime list completly in another PR

    req.journeys.clockwise = request["clockwise"]
    sn_params = req.journeys.streetnetwork_params
    sn_params.max_walking_duration_to_pt = request[
        "max_walking_duration_to_pt"]
    sn_params.max_bike_duration_to_pt = request["max_bike_duration_to_pt"]
    sn_params.max_bss_duration_to_pt = request["max_bss_duration_to_pt"]
    sn_params.max_car_duration_to_pt = request["max_car_duration_to_pt"]
    sn_params.walking_speed = request["walking_speed"]
    sn_params.bike_speed = request["bike_speed"]
    sn_params.car_speed = request["car_speed"]
    sn_params.bss_speed = request["bss_speed"]
    sn_params.origin_filter = request.get("origin_filter", "")
    sn_params.destination_filter = request.get("destination_filter", "")

    #settings fallback modes
    sn_params.origin_mode = dep_mode
    sn_params.destination_mode = arr_mode

    req.journeys.max_duration = request["max_duration"]
    req.journeys.max_transfers = request["max_transfers"]
    req.journeys.wheelchair = request["wheelchair"]
    req.journeys.disruption_active = request["disruption_active"]
    req.journeys.show_codes = request["show_codes"]

    if "details" in request and request["details"]:
        req.journeys.details = request["details"]

    for forbidden_uri in get_or_default(request, "forbidden_uris[]", []):
        req.journeys.forbidden_uris.append(forbidden_uri)

    return req