Esempio n. 1
0
    def test_datastore_transactions(self):
        data_store = self.ds_class(datastore_name='ion_test_ds', profile=DataStore.DS_PROFILE.RESOURCES, scope=get_sys_name())
        # Just in case previous run failed without cleaning up, delete data store
        try:
            data_store.delete_datastore()
        except NotFound:
            pass
        data_store.create_datastore()
        self.data_store = data_store

        init_db_stats()
        try:
            with data_store.in_transaction():
                obj_id1, _ = data_store.create_doc(dict(foo="bar1"))
                obj_id2, _ = data_store.create_doc(dict(foo="bar2"))
                obj_id3, _ = data_store.create_doc(dict(foo="bar3"))

                objs = data_store.read_mult([obj_id1, obj_id2, obj_id3])
                self.assertEqual(set([obj_id1, obj_id2, obj_id3]), {o["_id"] for o in objs})
                raise BadRequest("EXIT")
        except BadRequest as br:
            if br.message != "EXIT":
                raise
        objs = data_store.read_mult([obj_id1, obj_id2, obj_id3], strict=False)
        self.assertEqual(objs, [None]*3)

        with data_store.in_transaction():
            obj_id1, _ = data_store.create_doc(dict(foo="bar4"))
            obj_id2, _ = data_store.create_doc(dict(foo="bar5"))
        objs = data_store.read_mult([obj_id1, obj_id2], strict=False)
        self.assertEqual(set([obj_id1, obj_id2]), {o["_id"] for o in objs})

        data_store.delete_mult([obj_id1, obj_id2])

        db_stats = get_db_stats()
        print "STATS", db_stats
        self.assertEqual(db_stats["count.select"], 3)
        self.assertEqual(db_stats["count.nonsel"], 7)    # Note: delete_mult issues individual statements
        self.assertEqual(db_stats["count.all"], 10)

        clear_db_stats()

        # TEST: Check 2 concurrent greenlets

        def worker(num):
            with data_store.in_transaction() as conn:
                obj_id1, _ = data_store.create_doc(dict(foo="gevent1"+str(num)))
                gevent.sleep(0.1)
                obj_id2, _ = data_store.create_doc(dict(foo="gevent2"+str(num)))
                gevent.sleep(0.1)
                obj_id3, _ = data_store.create_doc(dict(foo="gevent2"+str(num)))
                objs = data_store.read_mult([obj_id1, obj_id2, obj_id3])
                self.assertEqual(set([obj_id1, obj_id2, obj_id3]), {o["_id"] for o in objs})
                data_store.delete_mult([obj_id1, obj_id2, obj_id3])

        gevent.joinall([gevent.spawn(worker, i) for i in xrange(3)])
Esempio n. 2
0
    def _control_flow(self):
        """
        Entry point for process control thread of execution.

        This method is run by the control greenlet for each ION process. Listeners attached
        to the process, either RPC Servers or Subscribers, synchronize calls to the process
        by placing call requests into the queue by calling _routing_call.

        This method blocks until there are calls to be made in the synchronized queue, and
        then calls from within this greenlet.  Any exception raised is caught and re-raised
        in the greenlet that originally scheduled the call.  If successful, the AsyncResult
        created at scheduling time is set with the result of the call.
        """
        if self.name:
            svc_name = "unnamed-service"
            if self.service is not None and hasattr(self.service, 'name'):
                svc_name = self.service.name
            threading.current_thread().name = "%s-%s" % (svc_name, self.name)
        thread_base_name = threading.current_thread().name

        self._ready_control.set()

        for calltuple in self._ctrl_queue:
            calling_gl, ar, call, callargs, callkwargs, context = calltuple
            request_id = (context or {}).get("request-id", None)
            if request_id:
                threading.current_thread().name = thread_base_name + "-" + str(request_id)
            #log.debug("control_flow making call: %s %s %s (has context: %s)", call, callargs, callkwargs, context is not None)

            res = None
            start_proc_time = get_ion_ts_millis()
            self._record_proc_time(start_proc_time)

            # check context for expiration
            if context is not None and 'reply-by' in context:
                if start_proc_time >= int(context['reply-by']):
                    log.info("control_flow: attempting to process message already exceeding reply-by, ignore")

                    # raise a timeout in the calling thread to allow endpoints to continue processing
                    e = IonTimeout("Reply-by time has already occurred (reply-by: %s, op start time: %s)" % (context['reply-by'], start_proc_time))
                    calling_gl.kill(exception=e, block=False)

                    continue

            # If ar is set, means it is cancelled
            if ar.ready():
                log.info("control_flow: attempting to process message that has been cancelled, ignore")
                continue

            init_db_stats()
            try:
                # ******************************************************************
                # ****** THIS IS WHERE THE RPC OPERATION/SERVICE CALL IS MADE ******

                with self.service.push_context(context):
                    with self.service.container.context.push_context(context):
                        self._ctrl_current = ar
                        res = call(*callargs, **callkwargs)

                # ****** END CALL, EXCEPTION HANDLING FOLLOWS                 ******
                # ******************************************************************

            except OperationInterruptedException:
                # endpoint layer takes care of response as it's the one that caused this
                log.debug("Operation interrupted")
                pass

            except Exception as e:
                if self._log_call_exception:
                    log.exception("PROCESS exception: %s" % e.message)

                # Raise the exception in the calling greenlet.
                # Try decorating the args of the exception with the true traceback -
                # this should be reported by ThreadManager._child_failed
                exc = PyonThreadTraceback("IonProcessThread _control_flow caught an exception (call: %s, *args %s, **kwargs %s, context %s)\nTrue traceback captured by IonProcessThread' _control_flow:\n\n%s" % (
                        call, callargs, callkwargs, context, traceback.format_exc()))
                e.args = e.args + (exc,)

                # HACK HACK HACK
                # we know that we only handle TypeError and IonException derived things, so only forward those if appropriate
                if isinstance(e, (TypeError, IonException)):
                    calling_gl.kill(exception=e, block=False)
                else:
                    # otherwise, swallow/record/report and hopefully we can continue on our way
                    self._errors.append((call, callargs, callkwargs, context, e, exc))

                    log.warn(exc)
                    log.warn("Attempting to continue...")

                    # have to raise something friendlier on the client side
                    # calling_gl.kill(exception=ContainerError(str(exc)), block=False)
                    # If exception string representation then calling calling_gl.kill(exception=ContainerError(str(exc)), block=False)
                    # will crush the container.
                    exceptions_str = str(exc)
                    if len(exceptions_str) > 10000:
                        exceptions_str = (
                            "Exception string representation is to large to put it all here. "
                            "Begin and end of the exception:\n"
                            + exceptions_str[:2000] + "\n...\n" + exceptions_str[-2000:]
                        )
                    calling_gl.kill(exception=ContainerError(exceptions_str), block=False)
            finally:
                try:
                    self._compute_proc_stats(start_proc_time)

                    db_stats = get_db_stats()
                    if db_stats:
                        if self._warn_call_dbstmt_threshold > 0 and db_stats.get("count.all", 0) >= self._warn_call_dbstmt_threshold:
                            stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
                            log.warn("PROC_OP '%s.%s' EXCEEDED DB THRESHOLD. stats=%s", call.__module__, call.__name__, stats_str)
                        elif self._log_call_dbstats:
                            stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
                            log.info("PROC_OP '%s.%s' DB STATS: %s", call.__module__, call.__name__, stats_str)
                    clear_db_stats()
                except Exception:
                    log.exception("Error computing process call stats")

                self._ctrl_current = None
                threading.current_thread().name = thread_base_name

            ar.set(res)
Esempio n. 3
0
    def _control_flow(self):
        """
        Entry point for process control thread of execution.

        This method is run by the control greenlet for each ION process. Listeners attached
        to the process, either RPC Servers or Subscribers, synchronize calls to the process
        by placing call requests into the queue by calling _routing_call.

        This method blocks until there are calls to be made in the synchronized queue, and
        then calls from within this greenlet.  Any exception raised is caught and re-raised
        in the greenlet that originally scheduled the call.  If successful, the AsyncResult
        created at scheduling time is set with the result of the call.
        """
        svc_name = getattr(
            self.service, "name",
            "unnamed-service") if self.service else "unnamed-service"
        proc_id = getattr(self.service, "id",
                          "unknown-pid") if self.service else "unknown-pid"
        if self.name:
            threading.current_thread().name = "%s-%s" % (svc_name, self.name)
        thread_base_name = threading.current_thread().name

        self._ready_control.set()

        for calltuple in self._ctrl_queue:
            calling_gl, ar, call, callargs, callkwargs, context = calltuple
            request_id = (context or {}).get("request-id", None)
            if request_id:
                threading.current_thread(
                ).name = thread_base_name + "-" + str(request_id)
            #log.debug("control_flow making call: %s %s %s (has context: %s)", call, callargs, callkwargs, context is not None)

            res = None
            start_proc_time = get_ion_ts_millis()
            self._record_proc_time(start_proc_time)

            # check context for expiration
            if context is not None and 'reply-by' in context:
                if start_proc_time >= int(context['reply-by']):
                    log.info(
                        "control_flow: attempting to process message already exceeding reply-by, ignore"
                    )

                    # raise a timeout in the calling thread to allow endpoints to continue processing
                    e = IonTimeout(
                        "Reply-by time has already occurred (reply-by: %s, op start time: %s)"
                        % (context['reply-by'], start_proc_time))
                    calling_gl.kill(exception=e, block=False)

                    continue

            # If ar is set, means it is cancelled
            if ar.ready():
                log.info(
                    "control_flow: attempting to process message that has been cancelled, ignore"
                )
                continue

            init_db_stats()
            try:
                # ******************************************************************
                # ****** THIS IS WHERE THE RPC OPERATION/SERVICE CALL IS MADE ******

                with self.service.push_context(context), \
                     self.service.container.context.push_context(context):
                    self._ctrl_current = ar
                    res = call(*callargs, **callkwargs)

                # ****** END CALL, EXCEPTION HANDLING FOLLOWS                 ******
                # ******************************************************************

            except OperationInterruptedException:
                # endpoint layer takes care of response as it's the one that caused this
                log.debug("Operation interrupted")
                pass

            except Exception as e:
                if self._log_call_exception:
                    log.exception("PROCESS exception: %s" % e.message)

                # Raise the exception in the calling greenlet.
                # Try decorating the args of the exception with the true traceback -
                # this should be reported by ThreadManager._child_failed
                exc = PyonThreadTraceback(
                    "IonProcessThread _control_flow caught an exception "
                    "(call: %s, *args %s, **kwargs %s, context %s)\n"
                    "True traceback captured by IonProcessThread' _control_flow:\n\n%s"
                    % (call, callargs, callkwargs, context,
                       traceback.format_exc()))
                e.args = e.args + (exc, )

                if isinstance(e, (TypeError, IonException)):
                    # Pass through known process exceptions, in particular IonException
                    calling_gl.kill(exception=e, block=False)
                else:
                    # Otherwise, wrap unknown, forward and hopefully we can continue on our way
                    self._errors.append(
                        (call, callargs, callkwargs, context, e, exc))

                    log.warn(exc)
                    log.warn("Attempting to continue...")

                    # Note: Too large exception string will crash the container (when passed on as msg header).
                    exception_str = str(exc)
                    if len(exception_str) > 10000:
                        exception_str = (
                            "Exception string representation too large. "
                            "Begin and end of the exception:\n" +
                            exception_str[:2000] + "\n...\n" +
                            exception_str[-2000:])
                    calling_gl.kill(exception=ContainerError(exception_str),
                                    block=False)
            finally:
                try:
                    # Compute statistics
                    self._compute_proc_stats(start_proc_time)

                    db_stats = get_db_stats()
                    if db_stats:
                        if self._warn_call_dbstmt_threshold > 0 and db_stats.get(
                                "count.all",
                                0) >= self._warn_call_dbstmt_threshold:
                            stats_str = ", ".join(
                                "{}={}".format(k, db_stats[k])
                                for k in sorted(db_stats.keys()))
                            log.warn(
                                "PROC_OP '%s.%s' EXCEEDED DB THRESHOLD. stats=%s",
                                svc_name, call.__name__, stats_str)
                        elif self._log_call_dbstats:
                            stats_str = ", ".join(
                                "{}={}".format(k, db_stats[k])
                                for k in sorted(db_stats.keys()))
                            log.info("PROC_OP '%s.%s' DB STATS: %s", svc_name,
                                     call.__name__, stats_str)
                    clear_db_stats()

                    if stats_callback:
                        stats_callback(proc_id=proc_id,
                                       proc_name=self.name,
                                       svc=svc_name,
                                       op=call.__name__,
                                       request_id=request_id,
                                       context=context,
                                       db_stats=db_stats,
                                       proc_stats=self.time_stats,
                                       result=res,
                                       exc=None)
                except Exception:
                    log.exception("Error computing process call stats")

                self._ctrl_current = None
                threading.current_thread().name = thread_base_name

            # Set response in AsyncEvent of caller (endpoint greenlet)
            ar.set(res)