Ejemplo n.º 1
0
    def call_kraken(self,
                    request_type,
                    request,
                    instance,
                    krakens_call,
                    context=None):
        """
        For all krakens_call, call the kraken and aggregate the responses

        return the list of all responses
        """
        # TODO: handle min_alternative_journeys
        # TODO: call first bss|bss and do not call walking|walking if no bss in first results
        record_custom_parameter('scenario', 'new_default')
        resp = []
        logger = logging.getLogger(__name__)
        futures = []
        reqctx = copy_flask_request_context()

        def worker(dep_mode, arr_mode, instance, request, flask_request_id):
            with copy_context_in_greenlet_stack(reqctx):
                return (
                    dep_mode,
                    arr_mode,
                    instance.send_and_receive(
                        request, flask_request_id=flask_request_id),
                )

        pool = gevent.pool.Pool(app.config.get('GREENLET_POOL_SIZE', 3))
        for dep_mode, arr_mode, direct_path_type in krakens_call:
            pb_request = create_pb_request(request_type, request, dep_mode,
                                           arr_mode, direct_path_type)
            # we spawn a new greenlet, it won't have access to our thread local request object so we pass the request_id
            futures.append(
                pool.spawn(worker,
                           dep_mode,
                           arr_mode,
                           instance,
                           pb_request,
                           flask_request_id=flask.request.id))

        for future in gevent.iwait(futures):
            dep_mode, arr_mode, local_resp = future.get()
            # for log purpose we put and id in each journeys
            self.nb_kraken_calls += 1
            for idx, j in enumerate(local_resp.journeys):
                j.internal_id = "{resp}-{j}".format(resp=self.nb_kraken_calls,
                                                    j=idx)

            if dep_mode == 'ridesharing':
                switch_back_to_ridesharing(local_resp, True)
            if arr_mode == 'ridesharing':
                switch_back_to_ridesharing(local_resp, False)

            fill_uris(local_resp)
            resp.append(local_resp)
            logger.debug("for mode %s|%s we have found %s journeys", dep_mode,
                         arr_mode, len(local_resp.journeys))

        return resp
Ejemplo n.º 2
0
    def call_kraken(self, request_type, request, instance, krakens_call, request_id, context):
        record_custom_parameter('scenario', 'distributed')
        logger = logging.getLogger(__name__)
        logger.warning("using experimental scenario!!")
        """
        All spawned futures must be started(if they're not yet started) when leaving the scope.

        We do this to prevent the programme from being blocked in case where some un-started futures may hold
        threading locks. If we leave the scope without cleaning these futures, they may hold locks forever.

        Note that the cleaning process depends on the implementation of futures.
        """
        try:
            with FutureManager(self.greenlet_pool_size) as future_manager, timed_logger(
                logger, 'call_kraken', request_id
            ):
                if request_type == type_pb2.ISOCHRONE:
                    return self._scenario._compute_isochrone_common(
                        future_manager, request, instance, krakens_call, type_pb2.ISOCHRONE
                    )
                elif request_type == type_pb2.PLANNER:
                    return self._scenario._compute_journeys(
                        future_manager, request, instance, krakens_call, context, type_pb2.PLANNER
                    )
                else:
                    abort(400, message="This type of request is not supported with distributed")
        except PtException as e:
            logger.exception('')
            return [e.get()]
        except EntryPointException as e:
            logger.exception('')
            return [e.get()]
        except StreetNetworkException as e:
            return [e.get()]
Ejemplo n.º 3
0
Archivo: api.py Proyecto: Xzya/navitia
def add_info_newrelic(response, *args, **kwargs):
    try:
        record_custom_parameter('navitia-request-id', request.id)
        token = get_token()
        user = get_user(token=token, abort_if_no_token=False)
        app_name = get_app_name(token)
        if user:
            record_custom_parameter('user_id', str(user.id))
        record_custom_parameter('token_name', app_name)
        record_custom_parameter('version', __version__)
        coverages = get_used_coverages()
        if coverages:
            record_custom_parameter('coverage', coverages[0])
    except:
        logger = logging.getLogger(__name__)
        logger.exception('error while reporting to newrelic:')
    return response
Ejemplo n.º 4
0
    def call_kraken(self, request_type, request, instance, krakens_call,
                    context):
        record_custom_parameter('scenario', 'distributed')
        logger = logging.getLogger(__name__)
        logger.warning("using experimental scenario!!")
        """
        All spawned futures must be started(if they're not yet started) when leaving the scope.

        We do this to prevent the programme from being blocked in case where some un-started futures may hold
        threading locks. If we leave the scope without cleaning these futures, they may hold locks forever.

        Note that the cleaning process depends on the implementation of futures.
        """
        try:
            with FutureManager() as future_manager:
                res = self._compute_all(future_manager, request, instance,
                                        krakens_call, context)
                return res
        except PtException as e:
            return [e.get()]
        except EntryPointException as e:
            return [e.get()]
Ejemplo n.º 5
0
 def __init__(self):
     super(Scenario, self).__init__()
     self._scenario = Distributed()
     record_custom_parameter('scenario', 'distributed')
Ejemplo n.º 6
0
    def call_kraken(self, req, instance, tag=None):
        record_custom_parameter('scenario', 'default')
        resp = None
        """
            for all combinaison of departure and arrival mode we call kraken
        """
        logger = logging.getLogger(__name__)
        futures = []

        def worker(o_mode, d_mode, instance, request, request_id):
            return (o_mode, d_mode,
                    instance.send_and_receive(request, request_id=request_id))

        pool = gevent.pool.Pool(current_app.config.get('GREENLET_POOL_SIZE',
                                                       3))
        for o_mode, d_mode in itertools.product(self.origin_modes,
                                                self.destination_modes):
            # since we use multiple green thread we have to copy the request
            local_req = copy.deepcopy(req)
            local_req.journeys.streetnetwork_params.origin_mode = o_mode
            local_req.journeys.streetnetwork_params.destination_mode = d_mode
            if o_mode == 'car' or (is_admin(req.journeys.origin[0].place)
                                   and is_admin(
                                       req.journeys.destination[0].place)):
                # we don't want direct path for car or for admin to admin journeys
                req.journeys.streetnetwork_params.enable_direct_path = False
            else:
                req.journeys.streetnetwork_params.enable_direct_path = True
            futures.append(
                pool.spawn(worker,
                           o_mode,
                           d_mode,
                           instance,
                           local_req,
                           request_id=flask.request.id))

        for future in gevent.iwait(futures):
            o_mode, d_mode, local_resp = future.get()
            if local_resp.response_type == response_pb2.ITINERARY_FOUND:

                # if a specific tag was provided, we tag the journeys
                # and we don't call the qualifier, it will be done after
                # with the journeys from the previous query
                if tag:
                    for j in local_resp.journeys:
                        j.type = tag
                else:
                    # we qualify the journeys
                    request_type = "arrival" if req.journeys.clockwise else "departure"
                    qualifier_one(local_resp.journeys, request_type)

                fill_uris(local_resp)
                if not resp:
                    resp = local_resp
                else:
                    self.merge_response(resp, local_resp)
            if not resp:
                resp = local_resp
            logger.debug(
                "for mode %s|%s we have found %s journeys: %s",
                o_mode,
                d_mode,
                len(local_resp.journeys),
                [j.type for j in local_resp.journeys],
            )

        return resp