def moveCalendarTimezoneProperties(sqlStore):
    """
    Need to move all the CalDAV:calendar-timezone properties in the
    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
    the new value from the XML property.
    """

    cb = schema.CALENDAR_BIND
    rp = schema.RESOURCE_PROPERTY

    try:
        calendars_for_id = {}
        while True:
            sqlTxn = sqlStore.newTransaction()
            rows = (yield rowsForProperty(sqlTxn, caldavxml.CalendarTimeZone, with_uid=True, batch=BATCH_SIZE))
            if len(rows) == 0:
                yield sqlTxn.commit()
                break
            delete_ids = []
            for calendar_rid, value, viewer in rows:
                delete_ids.append(calendar_rid)
                if calendar_rid not in calendars_for_id:
                    ids = yield Select(
                        [cb.CALENDAR_HOME_RESOURCE_ID, cb.BIND_MODE, ],
                        From=cb,
                        Where=cb.CALENDAR_RESOURCE_ID == calendar_rid,
                    ).on(sqlTxn)
                    calendars_for_id[calendar_rid] = ids

                if viewer:
                    calendarHome = (yield sqlTxn.calendarHomeWithUID(viewer))
                else:
                    calendarHome = None
                    for row in calendars_for_id[calendar_rid]:
                        home_id, bind_mode = row
                        if bind_mode == _BIND_MODE_OWN:
                            calendarHome = (yield sqlTxn.calendarHomeWithResourceID(home_id))
                            break

                if calendarHome is not None:
                    prop = WebDAVDocument.fromString(value).root_element
                    calendar = (yield calendarHome.childWithID(calendar_rid))
                    if calendar is not None:
                        yield calendar.setTimezone(prop.calendar())

            # Always delete the rows so that batch processing works correctly
            yield Delete(
                From=rp,
                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(delete_ids)))).And
                      (rp.NAME == PropertyName.fromElement(caldavxml.CalendarTimeZone).toString()),
            ).on(sqlTxn, ids=delete_ids)

            yield sqlTxn.commit()

        yield cleanPropertyStore()

    except RuntimeError:
        f = Failure()
        yield sqlTxn.abort()
        f.raiseException()
Ejemplo n.º 2
0
def doToEachHomeNotAtVersion(store, homeSchema, version, doIt, logStr, filterOwnerUID=None, processExternal=False):
    """
    Do something to each home whose version column indicates it is older
    than the specified version. Do this in batches as there may be a lot of work to do. Also,
    allow the GUID to be filtered to support a parallel mode of operation.
    """

    txn = store.newTransaction("updateDataVersion")
    where = homeSchema.DATAVERSION < version
    if filterOwnerUID:
        where = where.And(homeSchema.OWNER_UID.StartsWith(filterOwnerUID))
    total = (yield Select(
        [Count(homeSchema.RESOURCE_ID), ],
        From=homeSchema,
        Where=where,
    ).on(txn))[0][0]
    yield txn.commit()
    count = 0

    while True:

        logUpgradeStatus(logStr, count, total)

        # Get the next home with an old version
        txn = store.newTransaction("updateDataVersion")
        try:
            rows = yield Select(
                [homeSchema.RESOURCE_ID, homeSchema.OWNER_UID, homeSchema.STATUS, ],
                From=homeSchema,
                Where=where,
                OrderBy=homeSchema.OWNER_UID,
                Limit=1,
            ).on(txn)

            if len(rows) == 0:
                yield txn.commit()
                logUpgradeStatus("End {}".format(logStr), count, total)
                returnValue(None)

            # Apply to the home if not external
            homeResourceID, _ignore_owner_uid, homeStatus = rows[0]
            if homeStatus != _HOME_STATUS_EXTERNAL or processExternal:
                yield doIt(txn, homeResourceID)

            # Update the home to the current version
            yield Update(
                {homeSchema.DATAVERSION: version},
                Where=homeSchema.RESOURCE_ID == homeResourceID,
            ).on(txn)
            yield txn.commit()
        except RuntimeError, e:
            f = Failure()
            logUpgradeError(
                logStr,
                "Failed to upgrade {} to {}: {}".format(homeSchema, version, e)
            )
            yield txn.abort()
            f.raiseException()

        count += 1
Ejemplo n.º 3
0
    def process_response(self, response):
        '''Passes response (Response or Failure object) received from
        Downloader thought pipeline middlewares.

        Return value is either Request, Response or Failure object.
        '''
        # we can be sure that response.request is set from the downloader
        request = response.request

        for name, enabled_setting, pr, pf in self._process_response:
            # skip disabled mw through meta
            if not request.meta.get(enabled_setting, True):
                continue
            method = pr if isinstance(response, Response) else pf
            try:
                response = method(response)
            except:
                response = Failure()
            assert response is None or isinstance(response, (Request, Response, Failure)), \
                'Middleware %s.process_request must return None, Response, Request or Failure, got %s' % \
                (method.im_self.__class__.__name__, type(response))
            if response is None:
                method_name = 'process_response()' if method is pr else 'process_failure()'
                failure = Failure(DropRequest(
                    '`%s` pipeline middleware dropped the request in `%s` method' %
                    (name, method_name)))
                failure.request = request
                return failure
            if not isinstance(response, (Response, Failure)):
                return response

            # make sure, request attribute is always set
            response.request = request
        return response
Ejemplo n.º 4
0
    def log(self, category, eventDict):
        """
        Log an event.

        @param category: A short string identifying the type of log event.
            The receiving log server may use this to collect all messages of the
            same category in their own log files.
        @type category: C{bytes}

        @param eventDict: The event dictionary. As this is serialized to JSON
            (see L{serialize}), for complex values, you may want to render them
            to a string before adding them to the event dictionary.
        @type eventDict: C{dict}
        """
        self.augment(eventDict)
        data = self.serialize(category, eventDict)

        try:
            self.socket.send(data)
        except:
            failure = Failure()
            why = "Failed to send udplog message"
            data = self.serializeFailure(category, eventDict, len(data),
                                         failure, why)
            try:
                self.socket.send(data)
            except Exception:
                import sys
                text = why + '\n' + failure.getBriefTraceback()
                print >> sys.stderr, text
Ejemplo n.º 5
0
 def decorator(*args, **kwargs):
     try: return func(*args, **kwargs)
     except:
         failure = Failure()
         msg = getException(failure)
         msg += ': ' + failure.getErrorMessage()
         return NoResource(msg)
Ejemplo n.º 6
0
 def migrateOneHome(self, fileTxn, homeType, fileHome):
     """
     Migrate an individual calendar or addressbook home.
     """
     migrateFunc, destFunc = homeTypeLookup.get(homeType)
     uid = normalizeUUIDOrNot(fileHome.uid())
     self.log.warn("Starting migration transaction %s UID %r" %
                   (homeType, uid))
     sqlTxn = self.sqlStore.newTransaction()
     homeGetter = destFunc(sqlTxn)
     sqlHome = yield homeGetter(uid, create=False)
     if sqlHome is not None and not self.merge:
         self.log.warn(
             "%s home %r already existed not migrating" % (
                 homeType, uid))
         yield sqlTxn.abort()
         returnValue(None)
     try:
         if sqlHome is None:
             sqlHome = yield homeGetter(uid, create=True)
         yield migrateFunc(fileHome, sqlHome, merge=self.merge)
     except:
         f = Failure()
         yield sqlTxn.abort()
         f.raiseException()
     else:
         yield sqlTxn.commit()
         # Remove file home after migration. FIXME: instead, this should be a
         # public remove...HomeWithUID() API for de-provisioning.  (If we had
         # this, this would simply be a store-to-store migrator rather than a
         # filesystem-to-database upgrade.)
         fileHome._path.remove()
Ejemplo n.º 7
0
 def newfunc(*args, **kwargs):
     result = None
     attempt = 0
     # setup the caller object
     iKwargs = {"debug": debug, "fd": fd, "verbose": verbose, "defaults": defaults, "trap": trap}
     caller = _InspectorGadget(func, **iKwargs)
     while attempt <= maximum:
         try:  # initial caller setup
             result = caller(*args, **kwargs)
             if isinstance(result, defer.Deferred):
                 return _deferred(result, caller)
             break  # reset data
         except SystemExit:
             result = Failure()
             break
         except KeyboardInterrupt:
             result = Failure()
             break
         except:
             attempt += 1
             if attempt > maximum:
                 result = Failure()
                 break
             time.sleep(delay)
             caller.write(">>> Retry attempt %d" % attempt)
     if isinstance(result, Failure):
         result.raiseException()
     return result
Ejemplo n.º 8
0
 def _start_stop_common(self, label, action):
     result = {}
     try:
         function = getattr(self.model, action)
         d = defer.maybeDeferred(function, label)
         wfd = defer.waitForDeferred(d)
         yield wfd
         result = wfd.getResult()
         #take this time to update the instance
         if isinstance(result, dict):
             thisInst = self.model.getInstance(label)
             thisInst.updateInfo(result) 
     except:
         failure = Failure()
         if failure.check(DroneCommandFailed):
             result = failure.value.resultContext
         else:
             #log the error, allowing for debugging
             self.debugReport()
             #be nice and return something to the end user
             template = "%s: %s" % (getException(failure), failure.getErrorMessage())
             context = {'error':failure,'code':-2}
             result = self.resultContext(template, None, **context)
         #finally wrap the failure into a known type
         result = Failure(DroneCommandFailed(result))
     #AppInstances need a moment to be updated
     d = defer.Deferred()
     reactor.callLater(1.0, d.callback, None)
     wfd = defer.waitForDeferred(d)
     yield wfd
     wfd.getResult()
     yield result
Ejemplo n.º 9
0
def err(message="", **context):
    failure = Failure()
    if message:
        message += '\n'
    message += failure.getTraceback()
    log(message, **context)
    return failure
Ejemplo n.º 10
0
    def http_POST(self, request):
        """
        The server-to-server POST method.
        """

        # Need a transaction to work with
        txn = transactionFromRequest(request, self._newStore)

        # This is a server-to-server scheduling operation.
        scheduler = IScheduleScheduler(txn, None, podding=self._podding)

        originator = self.loadOriginatorFromRequestHeaders(request)
        recipients = self.loadRecipientsFromRequestHeaders(request)
        body = (yield allDataFromStream(request.stream))

        # Do the POST processing treating this as a non-local schedule
        try:
            result = (yield scheduler.doSchedulingViaPOST(request.remoteAddr, request.headers, body, originator, recipients))
        except Exception:
            ex = Failure()
            yield txn.abort()
            ex.raiseException()
        else:
            yield txn.commit()
        response = result.response()
        if not self._podding:
            response.headers.addRawHeader(ISCHEDULE_CAPABILITIES, str(config.Scheduling.iSchedule.SerialNumber))
        returnValue(response)
Ejemplo n.º 11
0
    def test_writeFailure(self, logger):
        """
        L{writeFailure} writes a L{Failure} to the log.
        """
        if Failure is None:
            raise SkipTest("Twisted unavailable")

        try:
            raise RuntimeError("because")
        except:
            failure = Failure()
            expectedTraceback = failure.getBriefTraceback()
            writeFailure(failure, logger)
        message = logger.messages[0]
        assertContainsFields(
            self,
            message,
            {
                "message_type": "eliot:traceback",
                "exception": RuntimeError,
                "reason": failure.value,
                "traceback": expectedTraceback,
            },
        )
        logger.flushTracebacks(RuntimeError)
Ejemplo n.º 12
0
def _do_get_connection(account, conductor, ready, retries_left, backoff):
    this_ready = defer.Deferred()
    factory = ImapClientFactory(account, conductor, this_ready)
    factory.connect()
    try:
        conn = yield this_ready
        # yay - report we are good and tell the real callback we have it.
        account.reportStatus(brat.EVERYTHING, brat.GOOD)
        ready.callback(conn)
    except Exception, exc:
        fail = Failure()
        logger.debug("first chance connection error handling: %s\n%s", fail.getErrorMessage(), fail.getBriefTraceback())
        retries_left -= 1
        if retries_left <= 0:
            ready.errback(fail)
        else:
            status = failure_to_status(fail)
            account.reportStatus(**status)
            acct_id = account.details.get("id", "")
            logger.warning(
                "Failed to connect to account %r, will retry after %s secs: %s",
                acct_id,
                backoff,
                fail.getErrorMessage(),
            )
            next_backoff = min(backoff * 2, MAX_BACKOFF)  # magic number
            conductor.reactor.callLater(
                backoff, _do_get_connection, account, conductor, ready, retries_left, next_backoff
            )
Ejemplo n.º 13
0
 def __call__(self, argstr):
     args = argstr.split()
     resultContext = None
     if not args: #return command usage
         methods = {}
         for name,args,doc in self.exposedMethodInfo:
             methods[name] = {'args' : args, 'doc' : doc}
         resultContext = dict(description=self.__doc__, methods=methods)
         yield resultContext
     else:
         method = args.pop(0)
         try:
             wfd = defer.waitForDeferred(
                     self.invoke(method,args)
             )
             yield wfd
             resultContext = wfd.getResult()
         except:
             failure = Failure()
             if failure.check(DroneCommandFailed):
                 resultContext = failure.value.resultContext
             else:
                 #be nice and return something to the end user
                 template = "[%(application)s] "
                 template += "%s: %s" % (getException(failure), failure.getErrorMessage())
                 context = {'error': True, 'code': -2, 'stacktrace': failure.getTraceback()}
                 resultContext = self.resultContext(template, None, 
                     **context
                 )
                 
         yield resultContext
Ejemplo n.º 14
0
 def dispatch(self, *a, **k):
     for handler in self._listeners:
         try:
             handler(*a, **k)
         except Exception, e:
             f = Failure(e)
             f.printTraceback()
             log.err(e)
    def test_should_return_None_when_exception_raised(self, _):

        failure = Failure(RuntimeError())
        failure.lb_ip = '192.168.0.1'
        responses = [(True, {'name': 'devytc97', 'state': 'up', 'session': 'monitor-enabled'}),
                     (False, failure)]

        self.assertEquals(None, check_status_responses(responses))
Ejemplo n.º 16
0
 def handle(self, message, address):
     try:
         (v,) = message.arguments
         self.dispatch(self._transform(float(v)))
     except Exception, e:
         f = Failure(e)
         f.printTraceback()
         log.msg('[FloatDispatcher.handle] error', e)
Ejemplo n.º 17
0
    def test_process_done(self):
        error = Failure(exc_value=Exception())
        error.type = ProcessDone
        error.value = Mock(exitCode=1, message="foobar")

        jobtype = JobType(fake_assignment())
        self.assertEqual(
            jobtype.format_error(error),
            "Process has finished with no apparent errors."
        )
Ejemplo n.º 18
0
 def parseError(self, err, query, instMap):
     err = Failure(err)
     err.value = 'Received %s from query: %s'%(err.value, query)
     log.error(err.getErrorMessage())
     results = {}
     for instances in instMap.values():
         for tables in instances.values():
             for table, props in tables:
                 results[table] = [err,]
     return results
Ejemplo n.º 19
0
 def receiveDirectMessage(self, senderName, messageText, metadata=None):
     cmdline = string.split(messageText, ' ', 1)
     if len(cmdline) == 1:
         cmd, arg = cmdline[0], ''
     else:
         cmd, arg = cmdline
     try:
         getattr(self, "bot_%s" % cmd)(senderName, arg, metadata)
     except:
         f = Failure()
         self.voice.directMessage(senderName, f.getBriefTraceback())
Ejemplo n.º 20
0
 def newfunc(*args,**kwargs):
     try:
         return func(*args,**kwargs)
     except:
         failure = Failure()
         caught_exc = failure.value
         err_msg = failure.getErrorMessage()
         if failure.check(exc): raise caught_exc #1
         exc_inst = exc(err_msg)
         exc_inst.inner_exception = caught_exc
         raise exc_inst
Ejemplo n.º 21
0
 def _unexpected_error(self, failure=None, task=None):
     if not failure:
         failure = Failure()
     log.notifyFailure(self, failure,
                       "Unexpected error%s",
                       (task and " during %s" % task) or "",
                       cleanTraceback=True)
     m = messages.Error(_(failure.getErrorMessage()),
                        debug=log.getFailureMessage(failure))
     self.addMessage(m)
     return failure
Ejemplo n.º 22
0
 def __transcodingError(self, failure=None, task=None):
     self._fireStatusChanged(TranscoderStatusEnum.error)
     if not failure:
         failure = Failure()
     self.onJobError(failure.getErrorMessage())
     log.notifyFailure(self, failure,
                       "Transocding error%s",
                       (task and " during %s" % task) or "",
                       cleanTraceback=True)
     self.setMood(moods.sad)
     return failure
Ejemplo n.º 23
0
Archivo: jobs.py Proyecto: D3f0/txscada
 def oops(self, *arg):
     """
     Returns a C{False} status code for a remote call along with a string
     traceback of the exception raised. You can supply your own exception or
     L{Failure} instance. If you don't, the current exception will be used.
     """
     if arg and isinstance(arg[0], Failure):
         failureObject = arg[0]
     else:
         failureObject = Failure(*arg)
     return False, failureObject.getTraceback()
Ejemplo n.º 24
0
    def test_process_terminated(self):
        error = Failure(exc_value=Exception())
        error.type = ProcessTerminated
        error.value = Mock(exitCode=1, message="foobar")

        jobtype = JobType(fake_assignment())
        self.assertEqual(
            jobtype.format_error(error),
            "Process may have terminated abnormally, please check "
            "the logs.  Message from error "
            "was 'foobar'"
        )
Ejemplo n.º 25
0
 def write(self, data):
     """write data to some file like object only writes happen if debug was
        specified during instantiation.
     """
     if hasattr(self.fd, 'write') and hasattr(self.fd.write, '__call__') \
             and self.debug: #only write on debug true
         try:
             self.fd.write(str(data)+'\n')
         except:
             failure = Failure()
             if self.verbose: failure.printDetailedTraceback(sys.stderr)
             else: failure.printTraceback(sys.stderr)
def moveCalendarAvailabilityProperties(sqlStore):
    """
    Need to move all the CS:calendar-availability properties in the
    RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
    the new value from the XML property.
    """

    cb = schema.CALENDAR_BIND
    rp = schema.RESOURCE_PROPERTY

    try:
        while True:
            sqlTxn = sqlStore.newTransaction()
            rows = (yield rowsForProperty(sqlTxn, customxml.CalendarAvailability, batch=BATCH_SIZE))
            if len(rows) == 0:
                yield sqlTxn.commit()
                break

            # Map each calendar to a home id using a single query for efficiency
            calendar_ids = [row[0] for row in rows]

            home_map = yield Select(
                [cb.CALENDAR_RESOURCE_ID, cb.CALENDAR_HOME_RESOURCE_ID, ],
                From=cb,
                Where=(cb.CALENDAR_RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And(cb.BIND_MODE == _BIND_MODE_OWN),
            ).on(sqlTxn, ids=calendar_ids)
            calendar_to_home = dict(home_map)

            # Move property to each home
            for calendar_rid, value in rows:
                if calendar_rid in calendar_to_home:
                    calendarHome = (yield sqlTxn.calendarHomeWithResourceID(calendar_to_home[calendar_rid]))

                    if calendarHome is not None:
                        prop = WebDAVDocument.fromString(value).root_element
                        yield calendarHome.setAvailability(prop.calendar())

            # Always delete the rows so that batch processing works correctly
            yield Delete(
                From=rp,
                Where=(rp.RESOURCE_ID.In(Parameter("ids", len(calendar_ids)))).And
                      (rp.NAME == PropertyName.fromElement(customxml.CalendarAvailability).toString()),
            ).on(sqlTxn, ids=calendar_ids)

            yield sqlTxn.commit()

        yield cleanPropertyStore()

    except RuntimeError:
        f = Failure()
        yield sqlTxn.abort()
        f.raiseException()
Ejemplo n.º 27
0
 def test_message_why_iserror(self):
     """
     When message, why and isError is given, then message takes precedence
     and why and isError is ignored to construct message
     """
     failure = Failure(ValueError())
     self.wrapper({'message': ('mine', 'yours'), 'isError': True,
                   'why': 'reason', 'failure': failure})
     self.assertEqual(
         self._formatted_event(),
         {'message': ('mineyours',), 'level': LogLevel.ERROR,
          'traceback': failure.getTraceback(),
          'exception_type': 'ValueError'})
Ejemplo n.º 28
0
 def __unexpectedError(self, failure=None, task=None):
     self._fireStatusChanged(TranscoderStatusEnum.unexpected_error)
     if not failure:
         failure = Failure()
     self.onJobError(failure.getErrorMessage())
     log.notifyFailure(self, failure,
                       "Unexpected error%s",
                       (task and " during %s" % task) or "",
                       cleanTraceback=True)
     m = messages.Error(T_(failure.getErrorMessage()),
                        debug=log.getFailureMessage(failure))
     self.addMessage(m)
     return failure
Ejemplo n.º 29
0
Archivo: error.py Proyecto: f3at/feat
    def __init__(self, *args, **kwargs):
        self.data = kwargs.pop('data', None)
        self.cause = kwargs.pop('cause', None)
        default_code = self.default_error_code
        default_name = self.default_error_name or self.__class__.__name__
        self.error_code = kwargs.pop('code', default_code)
        self.error_name = kwargs.pop('name', default_name)

        if args and isinstance(args[0], unicode):
            # Exception don't like passing them unicode strings
            # as a message. Here we do our best to encode it
            try:
                encoded = args[0].encode('utf8')
            except:
                encoded = args[0].encode('ascii', 'replace')
            args = (encoded, ) + args[1:]

        Exception.__init__(self, *args, **kwargs)

        self.cause_details = None
        self.cause_traceback = None

        try:
            from twisted.python.failure import Failure
        except ImportError:
            Failure = None

        if self.cause:
            if isinstance(self.cause, Exception):
                self.cause_details = get_exception_message(self.cause)
            elif Failure and isinstance(self.cause, Failure):
                self.cause_details = get_failure_message(self.cause)
            else:
                self.cause_details = "Unknown"

            if Failure and isinstance(self.cause, Failure):
                f = self.cause
                self.cause = f.value
                try:
                    self.cause_traceback = f.getTraceback()
                except:
                    # Ignore failure.NoCurrentExceptionError
                    pass
            elif Failure:
                try:
                    f = Failure()
                    if f.value == self.cause:
                        self.cause_traceback = f.getTraceback()
                except:
                    # Ignore failure.NoCurrentExceptionError
                    pass
Ejemplo n.º 30
0
 def handleDeferreds(labels):
     """Remember last yield is the return value, don't use return"""
     results = {}
     descriptions = []
     ret = {}
     code = 0
     for l in labels:
         try:
             d = defer.maybeDeferred(func, l, *args[1:], **kwargs)
             wfd = defer.waitForDeferred(d)
             yield wfd
             ret = wfd.getResult()
         except:
             failure = Failure()
             des = "%s: %s" % \
                     (getException(failure),failure.getErrorMessage())
             if failure.check(DroneCommandFailed):
                 result[l] = failure.value.resultContext
                 if 'description' not in result[l]:
                     result[l]['description'] = des
                 result[l]['stacktrace'] = failure.getTraceback()
                 result[l]['error'] = True
                 if 'code' not in result[l]:
                     result[l]['code'] = 1
             else:
                 ret = {
                     'description': des,
                     'code': 1,
                     'error': True,
                     'stacktrace': failure.getTraceback()
                 }
         if not ret: #NoneType detection
             ret = {'description' : str(ret), 'code' : 0}
         if 'code' in ret:
             code += abs(ret['code'])
         results[l] = ret
         try:
             descriptions.append(results[l]['description'])
         except:
            self.debugReport()
     results['code'] = code
     try:
         results['description'] = '\n'.join(descriptions)
     except:
         results['description'] = None
     if len(labels) == 0:
         Label = labels[0]
     else:
         Label = None
     ret = self.resultContext('%(description)s',label=Label,**results) 
     yield ret
Ejemplo n.º 31
0
 def wrap_failure(failure):
     return Failure(failure.value)
Ejemplo n.º 32
0
    def _find(self, target, force_nodes=False):
        nodes_closest = set(
            self.routing_table.closest_nodes(target, max_nodes=MAX_FIND_WALKS))
        if not nodes_closest:
            returnValue(
                Failure(RuntimeError('No nodes found in the routing table')))

        nodes_tried = set()
        values = []
        recent = None

        for _ in range(MAX_FIND_STEPS):
            if not nodes_closest:
                break

            # Send closest nodes a find-node-request
            deferreds = [
                self._send_find_request(node, target, force_nodes)
                for node in nodes_closest
            ]
            responses = yield gatherResponses(deferreds, consumeErrors=True)
            recent = next(
                (sender
                 for sender, response in responses if 'nodes' in response),
                recent)

            nodes_tried |= nodes_closest
            nodes_closest.clear()

            # Process responses and puncture nodes that we haven't tried yet
            new_values, new_nodes, to_puncture = self._process_find_responses(
                responses, nodes_tried)
            values += new_values
            nodes_closest |= new_nodes

            deferreds = [
                self._send_find_request(sender, node.id, force_nodes)
                for sender, node in to_puncture.items()
            ]

            # Wait for punctures (if any)...
            yield DeferredList(deferreds, consumeErrors=True)

            # Ensure we haven't tried these nodes yet
            nodes_closest -= nodes_tried
            # Only consider top-k closest to our target
            if len(nodes_closest) > MAX_FIND_WALKS:
                nodes_closest = set(
                    sorted(
                        nodes_closest,
                        key=lambda n: distance(n.id, target))[:MAX_FIND_WALKS])

        if force_nodes:
            returnValue(
                sorted(nodes_tried, key=lambda n: distance(n.id, target)))

        # Merge all values received into one tuple. First pick the first value from each tuple, then the second, etc.
        values = sum(six.moves.zip_longest(*values), ())

        # Filter out duplicates while preserving order
        seen = set()
        values = [
            v for v in values
            if v is not None and not (v in seen or seen.add(v))
        ]

        if recent and values:
            # Store the key-value pair on the most recently visited node that
            # did not have it (for caching purposes).
            self.store_on_nodes(target, values, [recent])

        returnValue(self.post_process_values(values))
Ejemplo n.º 33
0
 def _cancelConnectWaiters(self):
     """
     Notify all pending requests for a connection that no more connections
     are expected.
     """
     self._unawait(Failure(CancelledError()))
Ejemplo n.º 34
0
 def test_default_media_failed(self):
     request = Request('http://url')
     fail = Failure(Exception())
     assert self.pipe.media_failed(fail, request, self.info) is fail
Ejemplo n.º 35
0
    def startBuild(self, build_status, expectations, slavebuilder):
        """This method sets up the build, then starts it by invoking the
        first Step. It returns a Deferred which will fire when the build
        finishes. This Deferred is guaranteed to never errback."""

        # we are taking responsibility for watching the connection to the
        # remote. This responsibility was held by the Builder until our
        # startBuild was called, and will not return to them until we fire
        # the Deferred returned by this method.

        log.msg("%s.startBuild" % self)
        self.build_status = build_status
        # now that we have a build_status, we can set properties
        self.setupProperties()
        self.setupSlaveBuilder(slavebuilder)
        slavebuilder.slave.updateSlaveStatus(buildStarted=build_status)

        # convert all locks into their real forms
        lock_list = []
        for access in self.locks:
            if not isinstance(access, locks.LockAccess):
                # Buildbot 0.7.7 compability: user did not specify access
                access = access.defaultAccess()
            lock = self.builder.botmaster.getLockByID(access.lockid)
            lock_list.append((lock, access))
        self.locks = lock_list
        # then narrow SlaveLocks down to the right slave
        self.locks = [(l.getLock(self.slavebuilder), la)
                       for l, la in self.locks]
        self.remote = slavebuilder.remote
        self.remote.notifyOnDisconnect(self.lostRemote)

        metrics.MetricCountEvent.log('active_builds', 1)

        d = self.deferred = defer.Deferred()
        def _uncount_build(res):
            metrics.MetricCountEvent.log('active_builds', -1)
            return res
        d.addBoth(_uncount_build)

        def _release_slave(res, slave, bs):
            self.slavebuilder.buildFinished()
            slave.updateSlaveStatus(buildFinished=bs)
            return res
        d.addCallback(_release_slave, self.slavebuilder.slave, build_status)

        try:
            self.setupBuild(expectations) # create .steps
        except:
            # the build hasn't started yet, so log the exception as a point
            # event instead of flunking the build. 
            # TODO: associate this failure with the build instead. 
            # this involves doing
            # self.build_status.buildStarted() from within the exception
            # handler
            log.msg("Build.setupBuild failed")
            log.err(Failure())
            self.builder.builder_status.addPointEvent(["setupBuild",
                                                       "exception"])
            self.finished = True
            self.results = FAILURE
            self.deferred = None
            d.callback(self)
            return d

        self.build_status.buildStarted(self)
        self.acquireLocks().addCallback(self._startBuild_2)
        return d
Ejemplo n.º 36
0
 def doTry(f):
     try:
         f()
     except (LookupError, TypeError, ValueError):
         # a plausible set of exceptions, so we don't catch implausible ones
         err('erroneous', Failure())
Ejemplo n.º 37
0
def cb_fail(value, arg1, arg2):
    return Failure(TypeError())
 def test_httpsession_on_error(self):
     test_deferred = Deferred()
     session = HttpTrackerSession("localhost", ("localhost", 4782), "/announce", 5)
     session.result_deferred = Deferred().addErrback(lambda failure: test_deferred.callback(None))
     session.on_error(Failure(RuntimeError(u"test\xf8\xf9")))
     return test_deferred
Ejemplo n.º 39
0
    def add_gui_request(self, infohash, timeout=20, scrape_now=False):
        """
        Public API for adding a GUI request.
        :param infohash: Torrent infohash.
        :param timeout: The timeout to use in the performed requests
        :param scrape_now: Flag whether we want to force scraping immediately
        """
        result = self._torrent_db.getTorrent(
            infohash, (u'torrent_id', u'last_tracker_check', u'num_seeders',
                       u'num_leechers'), False)
        if result is None:
            self._logger.warn(u"torrent info not found, skip. infohash: %s",
                              hexlify(infohash))
            return fail(Failure(RuntimeError("Torrent not found")))

        torrent_id = result[u'torrent_id']
        last_check = result[u'last_tracker_check']
        time_diff = time.time() - last_check
        if time_diff < self._torrent_check_interval and not scrape_now:
            self._logger.debug(
                u"time interval too short, skip GUI request. infohash: %s",
                hexlify(infohash))
            return succeed({
                "db": {
                    "seeders": result[u'num_seeders'],
                    "leechers": result[u'num_leechers'],
                    "infohash": infohash.encode('hex')
                }
            })

        # get torrent's tracker list from DB
        tracker_set = set()
        db_tracker_list = self._torrent_db.getTrackerListByTorrentID(
            torrent_id)
        for tracker in db_tracker_list:
            tracker_set.add(tracker)

        if not tracker_set:
            self._logger.warn(u"no trackers, skip GUI request. infohash: %s",
                              hexlify(infohash))
            # TODO: add code to handle torrents with no tracker
            return fail(
                Failure(
                    RuntimeError("No trackers available for this torrent")))

        deferred_list = []
        for tracker_url in tracker_set:
            if tracker_url == u'DHT':
                # Create a (fake) DHT session for the lookup
                session = FakeDHTSession(self.tribler_session, infohash,
                                         timeout)
                self._session_list['DHT'].append(session)
                deferred_list.append(session.connect_to_tracker().addCallbacks(
                    *self.get_callbacks_for_session(session)))
            elif tracker_url != u'no-DHT':
                session = self._create_session_for_request(tracker_url,
                                                           timeout=timeout)
                session.add_infohash(infohash)
                deferred_list.append(session.connect_to_tracker().addCallbacks(
                    *self.get_callbacks_for_session(session)))

        return DeferredList(deferred_list, consumeErrors=True).addCallback(
            lambda res: self.on_gui_request_completed(infohash, res))
Ejemplo n.º 40
0
                yield properties_to_enumerate
                properties_to_enumerate = properties_to_enumerate.getResult()
            else:
                properties_to_enumerate = search_properties

            for property in properties_to_enumerate:
                has = waitForDeferred(resource.hasProperty(property, request))
                yield has
                has = has.getResult()
                if has:
                    try:
                        resource_property = waitForDeferred(resource.readProperty(property, request))
                        yield resource_property
                        resource_property = resource_property.getResult()
                    except:
                        f = Failure()
                        status = statusForFailure(f, "getting property: %s" % (property,))
                        if status not in properties_by_status:
                            properties_by_status[status] = []
                        if not returnMinimal or status != responsecode.NOT_FOUND:
                            properties_by_status[status].append(propertyName(property))
                    else:
                        if resource_property is not None:
                            properties_by_status[responsecode.OK].append(resource_property)
                        elif not returnMinimal:
                            properties_by_status[responsecode.NOT_FOUND].append(propertyName(property))
                elif not returnMinimal:
                    properties_by_status[responsecode.NOT_FOUND].append(propertyName(property))

        propstats = []
Ejemplo n.º 41
0
 def toggle_lbry_file_running(self, lbry_file):
     """Toggle whether a stream reader is currently running"""
     for l in self.lbry_files:
         if l == lbry_file:
             return l.toggle_running()
     return defer.fail(Failure(ValueError("Could not find that LBRY file")))
Ejemplo n.º 42
0
 def _ignoreAndCancelConnectWaiters(self, f):
     """
     Notify all pending requests for a connection that no more connections
     are expected, after ignoring the Failure passed in.
     """
     self._unawait(Failure(CancelledError()))
Ejemplo n.º 43
0
                                             responsecode.NO_CONTENT)
        got_an_error = False

        if makecalendar.children:
            # mkcalendar -> set -> prop -> property*
            for property in makecalendar.children[0].children[0].children:
                try:
                    if property.qname() == (
                            caldavxml.caldav_namespace,
                            "supported-calendar-component-set"):
                        yield self.setSupportedComponentSet(property)
                        set_supported_component_set = True
                    else:
                        yield self.writeProperty(property, request)
                except HTTPError:
                    errors.add(Failure(), property)
                    got_an_error = True
                else:
                    errors.add(responsecode.OK, property)

        if got_an_error:
            # Force a transaction error and proper clean-up
            errors.error()
            raise HTTPError(MultiStatusResponse([errors.response()]))

    # When calendar collections are single component only, default MKCALENDAR is VEVENT only
    if not set_supported_component_set and config.RestrictCalendarsToOneComponentType:
        yield self.setSupportedComponents(("VEVENT", ))

    returnValue(responsecode.CREATED)
Ejemplo n.º 44
0
 def clear_pending_requests(self, spider):
     """Remove all pending requests for the given spider"""
     q = self.pending_requests[spider]
     while q:
         _, dfd = q.pop()[0]
         dfd.errback(Failure(IgnoreRequest()))
Ejemplo n.º 45
0
    def test_should_remove_req_res_references_before_caching_the_results(self):
        """Regression test case to prevent a memory leak in the Media Pipeline.

        The memory leak is triggered when an exception is raised when a Response
        scheduled by the Media Pipeline is being returned. For example, when a
        FileException('download-error') is raised because the Response status
        code is not 200 OK.

        It happens because we are keeping a reference to the Response object
        inside the FileException context. This is caused by the way Twisted
        return values from inline callbacks. It raises a custom exception
        encapsulating the original return value.

        The solution is to remove the exception context when this context is a
        _DefGen_Return instance, the BaseException used by Twisted to pass the
        returned value from those inline callbacks.

        Maybe there's a better and more reliable way to test the case described
        here, but it would be more complicated and involve running - or at least
        mocking - some async steps from the Media Pipeline. The current test
        case is simple and detects the problem very fast. On the other hand, it
        would not detect another kind of leak happening due to old object
        references being kept inside the Media Pipeline cache.

        This problem does not occur in Python 2.7 since we don't have Exception
        Chaining (https://www.python.org/dev/peps/pep-3134/).
        """
        # Create sample pair of Request and Response objects
        request = Request('http://url')
        response = Response('http://url', body=b'', request=request)

        # Simulate the Media Pipeline behavior to produce a Twisted Failure
        try:
            # Simulate a Twisted inline callback returning a Response
            # The returnValue method raises an exception encapsulating the value
            returnValue(response)
        except BaseException as exc:
            def_gen_return_exc = exc
            try:
                # Simulate the media_downloaded callback raising a FileException
                # This usually happens when the status code is not 200 OK
                raise FileException('download-error')
            except Exception as exc:
                file_exc = exc
                # Simulate Twisted capturing the FileException
                # It encapsulates the exception inside a Twisted Failure
                failure = Failure(file_exc)

        # The Failure should encapsulate a FileException ...
        self.assertEqual(failure.value, file_exc)
        # ... and it should have the returnValue exception set as its context
        self.assertEqual(failure.value.__context__, def_gen_return_exc)

        # Let's calculate the request fingerprint and fake some runtime data...
        fp = request_fingerprint(request)
        info = self.pipe.spiderinfo
        info.downloading.add(fp)
        info.waiting[fp] = []

        # When calling the method that caches the Request's result ...
        self.pipe._cache_result_and_execute_waiters(failure, fp, info)
        # ... it should store the Twisted Failure ...
        self.assertEqual(info.downloaded[fp], failure)
        # ... encapsulating the original FileException ...
        self.assertEqual(info.downloaded[fp].value, file_exc)
        # ... but it should not store the returnValue exception on its context
        context = getattr(info.downloaded[fp].value, '__context__', None)
        self.assertIsNone(context)
Ejemplo n.º 46
0
    def _render(self, request):
        time_in = utils.now()
        if not self._check_headers(request):
            self._render_error(Failure(InvalidHeaderError()), request, None)
            return server.NOT_DONE_YET
        session = request.getSession()
        session_id = session.uid
        finished_deferred = request.notifyFinish()

        if self._use_authentication:
            # if this is a new session, send a new secret and set the expiration
            # otherwise, session.touch()
            if self._initialize_session(session_id):

                def expire_session():
                    self._unregister_user_session(session_id)

                session.startCheckingExpiration()
                session.notifyOnExpire(expire_session)
                message = "OK"
                request.setResponseCode(200)
                self._set_headers(request, message, True)
                self._render_message(request, message)
                return server.NOT_DONE_YET
            else:
                session.touch()

        request.content.seek(0, 0)
        content = request.content.read()
        try:
            parsed = jsonrpclib.loads(content)
        except ValueError:
            log.warning("Unable to decode request json")
            self._render_error(
                JSONRPCError(None, JSONRPCError.CODE_PARSE_ERROR), request,
                None)
            return server.NOT_DONE_YET

        request_id = None
        try:
            function_name = parsed.get('method')
            args = parsed.get('params', {})
            request_id = parsed.get('id', None)
            token = parsed.pop('hmac', None)
        except AttributeError as err:
            log.warning(err)
            self._render_error(
                JSONRPCError(None, code=JSONRPCError.CODE_INVALID_REQUEST),
                request, request_id)
            return server.NOT_DONE_YET

        reply_with_next_secret = False
        if self._use_authentication:
            try:
                self._verify_token(session_id, parsed, token)
            except InvalidAuthenticationToken as err:
                log.warning("API validation failed")
                self._render_error(
                    JSONRPCError.create_from_exception(
                        err,
                        code=JSONRPCError.CODE_AUTHENTICATION_ERROR,
                        traceback=format_exc()), request, request_id)
                return server.NOT_DONE_YET
            request.addCookie("TWISTED_SESSION", session_id)
            self._update_session_secret(session_id)
            reply_with_next_secret = True

        try:
            fn = self._get_jsonrpc_method(function_name)
        except UnknownAPIMethodError as err:
            log.warning('Failed to get function %s: %s', function_name, err)
            self._render_error(
                JSONRPCError(None, JSONRPCError.CODE_METHOD_NOT_FOUND),
                request, request_id)
            return server.NOT_DONE_YET
        except NotAllowedDuringStartupError:
            log.warning('Function not allowed during startup: %s',
                        function_name)
            self._render_error(
                JSONRPCError(
                    "This method is unavailable until the daemon is fully started",
                    code=JSONRPCError.CODE_INVALID_REQUEST), request,
                request_id)
            return server.NOT_DONE_YET

        if args == EMPTY_PARAMS or args == []:
            _args, _kwargs = (), {}
        elif isinstance(args, dict):
            _args, _kwargs = (), args
        elif len(args) == 1 and isinstance(args[0], dict):
            # TODO: this is for backwards compatibility. Remove this once API and UI are updated
            # TODO: also delete EMPTY_PARAMS then
            _args, _kwargs = (), args[0]
        elif len(args) == 2 and isinstance(args[0], list) and isinstance(
                args[1], dict):
            _args, _kwargs = args
        else:
            raise ValueError('invalid args format')

        params_error, erroneous_params = self._check_params(fn, _args, _kwargs)
        if params_error is not None:
            params_error_message = '{} for {} command: {}'.format(
                params_error, function_name, ', '.join(erroneous_params))
            log.warning(params_error_message)
            self._render_error(
                JSONRPCError(params_error_message,
                             code=JSONRPCError.CODE_INVALID_PARAMS), request,
                request_id)
            return server.NOT_DONE_YET

        d = defer.maybeDeferred(fn, self, *_args, **_kwargs)

        # finished_deferred will callback when the request is finished
        # and errback if something went wrong. If the errback is
        # called, cancel the deferred stack. This is to prevent
        # request.finish() from being called on a closed request.
        finished_deferred.addErrback(self._handle_dropped_request, d,
                                     function_name)

        d.addCallback(self._callback_render, request, request_id,
                      reply_with_next_secret)
        d.addErrback(trap, ConnectionDone, ConnectionLost,
                     defer.CancelledError)
        d.addErrback(self._render_error, request, request_id)
        d.addBoth(lambda _: log.debug("%s took %f", function_name,
                                      (utils.now() - time_in).total_seconds()))
        return server.NOT_DONE_YET
Ejemplo n.º 47
0
    def start_component(self,
                        component_id,
                        config,
                        reload_modules=False,
                        details=None):
        """
        Starts a component in this container worker.

        :param component_id: The ID under which to start the component.
        :type component_id: str

        :param config: Component configuration.
        :type config: dict

        :param reload_modules: If `True`, enforce reloading of modules (user code)
           that were modified (see: TrackingModuleReloader).
        :type reload_modules: bool

        :param details: Caller details.
        :type details: instance of :class:`autobahn.wamp.types.CallDetails`

        :returns: Component startup information.
        :rtype: dict
        """
        self.log.debug('{klass}.start_component({component_id}, {config})',
                       klass=self.__class__.__name__,
                       component_id=component_id,
                       config=config)

        # prohibit starting a component twice
        #
        if component_id in self.components:
            emsg = 'duplicate component "{}" - a component with this ID is already running (or starting)'.format(
                component_id)
            self.log.debug(emsg)
            raise ApplicationError('crossbar.error.already_running', emsg)

        # check component configuration
        #
        try:
            self.personality.check_container_component(self.personality,
                                                       config)
        except Exception as e:
            emsg = 'invalid container component configuration: {}'.format(e)
            self.log.debug(emsg)
            raise ApplicationError('crossbar.error.invalid_configuration',
                                   emsg)
        else:
            self.log.debug('starting component "{component_id}" ..',
                           component_id=component_id)

        # WAMP application component factory
        #
        realm = config.get('realm', None)
        assert type(realm) == str

        extra = config.get('extra', {})
        assert type(extra) == dict

        # forward crossbar node base directory
        extra['cbdir'] = self.config.extra.cbdir

        # allow access to controller session
        controller = self if self.config.extra.expose_controller else None

        # expose an object shared between components
        shared = self.components_shared if self.config.extra.expose_shared else None

        # this is the component configuration provided to the components ApplicationSession
        component_config = ComponentConfig(realm=realm,
                                           extra=extra,
                                           keyring=None,
                                           controller=controller,
                                           shared=shared)

        # define component ctor function
        try:
            create_component = _appsession_loader(config)
        except ApplicationError as e:
            # for convenience, also log failed component loading
            self.log.error('component loading failed', log_failure=Failure())
            if 'No module named' in str(e):
                self.log.error('  Python module search paths:')
                for path in e.kwargs['pythonpath']:
                    self.log.error('    {path}', path=path)
            raise

        # check component extra configuration
        #
        if hasattr(create_component, 'check_config') and callable(
                create_component.check_config) and extra:
            try:
                create_component.check_config(self.personality, extra)
            except Exception as e:
                emsg = 'invalid container component extra configuration: {}'.format(
                    e)
                self.log.debug(emsg)
                raise ApplicationError('crossbar.error.invalid_configuration',
                                       emsg)
            else:
                self.log.debug(
                    'starting container component "{component_id}" ..',
                    component_id=component_id)

        # force reload of modules (user code)
        #
        if reload_modules:
            self._module_tracker.reload()

        # prepare some cleanup code in case this connection goes away
        def _component_closed(session, was_clean):
            """
            This is moderate hack around the fact that we don't have any way
            to "listen" for a close event on websocket or rawsocket
            objects. Also, the rawsocket implementation doesn't have
            "a" function we can wrap anyway (they are asyncio vs
            Twisted specific), so for both WebSocket and rawsocket
            cases, we actually listen on the WAMP session for
            transport close notifications.

            Ideally we'd listen for "close" on the transport but this
            works fine for cleaning up the components.
            """
            if component_id not in self.components:
                self.log.warn(
                    "Component '{id}' closed, but not in set.",
                    id=component_id,
                )
                return

            if was_clean:
                self.log.info(
                    "Closed connection to '{id}'",
                    id=component_id,
                )
            else:
                self.log.error(
                    "Lost connection to component '{id}' uncleanly",
                    id=component_id,
                )

            component = self.components[component_id]
            del self.components[component_id]
            self._publish_component_stop(component)
            component._stopped.callback(component.marshal())
            del component

            # figure out if we need to shut down the container itself or not
            if not was_clean and self._exit_mode == self.SHUTDOWN_ON_ANY_COMPONENT_FAILED:
                self.log.info(
                    "A component has failed: stopping container in exit mode <{exit_mode}> ...",
                    exit_mode=self._exit_mode,
                )
                self.shutdown()
                return

            if self._exit_mode == self.SHUTDOWN_ON_ANY_COMPONENT_STOPPED:
                self.log.info(
                    "A component has stopped: stopping container in exit mode <{exit_mode}> ...",
                    exit_mode=self._exit_mode,
                )
                self.shutdown()
                return

            if not self.components:
                if self._exit_mode == self.SHUTDOWN_ON_LAST_COMPONENT_STOPPED:
                    self.log.info(
                        "Container is hosting no more components: stopping container in exit mode <{exit_mode}> ...",
                        exit_mode=self._exit_mode,
                    )
                    self.shutdown()
                    return
                else:
                    self.log.info(
                        "Container is hosting no more components: continue running in exit mode <{exit_mode}>",
                        exit_mode=self._exit_mode,
                    )
            else:
                self.log.info(
                    "Container is still hosting {component_count} components: continue running in exit mode <{exit_mode}>",
                    exit_mode=self._exit_mode,
                    component_count=len(self.components),
                )

            # determine if we should re-start the component. Note that
            # we can only arrive here if we *didn't* decide to
            # shutdown above .. so if we have a shutdown mode of
            # SHUTDOWN_ON_ANY_COMPONENT_STOPPED will mean we never try
            # to re-start anything.
            if self._restart_mode == self.RESTART_ALWAYS or (
                    self._restart_mode == self.RESTART_FAILED
                    and not was_clean):

                def restart_component():
                    # Think: if this below start_component() fails,
                    # we'll still schedule *exactly one* new re-start
                    # attempt for it, right?
                    self.log.info(
                        '{func}: now restarting previously closed component {component_id} automatically .. [restart_mode={restart_mode}, was_clean={was_clean}]',
                        func=hltype(_component_closed),
                        component_id=hlid(component_id),
                        restart_mode=hlval(self._restart_mode),
                        was_clean=hlval(was_clean))
                    return self.start_component(
                        component_id,
                        config,
                        reload_modules=reload_modules,
                        details=details,
                    )

                # note we must yield to the reactor with
                # callLater(0, ..) to avoid infinite recursion if
                # we're stuck in a restart loop
                from twisted.internet import reactor
                reactor.callLater(0, restart_component)
            else:
                self.log.warn(
                    '{func}: component {component_id} will not be restarted automatically! [restart_mode={restart_mode}, was_clean={was_clean}]',
                    func=hltype(_component_closed),
                    component_id=hlid(component_id),
                    restart_mode=hlval(self._restart_mode),
                    was_clean=hlval(was_clean))

        joined_d = Deferred()

        # WAMP application session factory
        #
        def create_session():
            try:
                session = create_component(component_config)

                # any exception spilling out from user code in onXXX handlers is fatal!
                def panic(fail, msg):
                    self.log.error(
                        "Fatal error in component: {msg} - {log_failure.value}",
                        msg=msg,
                        log_failure=fail,
                    )
                    session.disconnect()

                session._swallow_error = panic

                # see note above, for _component_closed -- we should be
                # listening for "the transport was closed", but
                # "session disconnect" is close enough (since there
                # are no "proper events" from websocket/rawsocket
                # implementations).
                session.on('disconnect', _component_closed)

                # note, "ready" here means: onJoin and any on('join',
                # ..) handlers have all completed successfully. This
                # is necessary for container-components (as opposed to
                # router-components) to work as expected
                def _ready(s):
                    # this is different from "self.config.controller._realm" !!
                    self.log.info(
                        'Container component ready: component_id="{component_id}", realm="{realm}", authrole="{authrole}", authid="{authid}", session={session} {func}',
                        func=hltype(self.onJoin),
                        component_id=hlid(component_id),
                        realm=hlid(session._realm),
                        authid=hlid(session._authid),
                        authrole=hlid(session._authrole),
                        session=hlid(session._session_id))
                    if not joined_d.called:
                        joined_d.callback(None)

                session.on('ready', _ready)

                def _left(s, details):
                    if not joined_d.called:
                        joined_d.errback(
                            ApplicationError(
                                details.reason,
                                details.message,
                            ))

                session.on('leave', _left)

                return session

            except Exception:
                self.log.failure(
                    'component instantiation failed: {log_failure.value}')
                raise

        # WAMP transport factory
        #
        transport_config = config['transport']

        if transport_config['type'] == 'websocket':

            # create a WAMP-over-WebSocket transport client factory
            transport_factory = WampWebSocketClientFactory(
                create_session, transport_config['url'])
            transport_factory.noisy = False

            if 'options' in transport_config:
                set_websocket_options(transport_factory,
                                      transport_config['options'])

        elif transport_config['type'] == 'rawsocket':

            transport_factory = WampRawSocketClientFactory(
                create_session, transport_config)
            transport_factory.noisy = False

            if 'options' in transport_config:
                set_rawsocket_options(transport_factory,
                                      transport_config['options'])

        else:
            # should not arrive here, since we did check the config before
            raise Exception('logic error')

        # create and connect client endpoint
        #
        endpoint = create_connecting_endpoint_from_config(
            transport_config['endpoint'], self.config.extra.cbdir,
            self._reactor, self.log)

        # now, actually connect the client
        #
        d = endpoint.connect(transport_factory)

        def on_connect_success(proto):
            component = ContainerComponent(component_id, config, proto, None)
            self.components[component_id] = component

            # publish event "on_component_start" to all but the caller
            #
            uri = self._uri_prefix + '.on_component_started'

            component_started = {'id': component_id, 'config': config}

            self.publish(uri,
                         component_started,
                         options=PublishOptions(exclude=details.caller))

            return component_started

        def on_connect_error(err):
            # https://twistedmatrix.com/documents/current/api/twisted.internet.error.ConnectError.html
            if isinstance(err.value, internet.error.ConnectError):
                emsg = 'could not connect container component to router - transport establishment failed ({})'.format(
                    err.value)
                self.log.warn(emsg)
                raise ApplicationError('crossbar.error.cannot_connect', emsg)
            else:
                # should not arrive here (since all errors arriving here
                # should be subclasses of ConnectError)
                raise err

        def await_join(arg):
            """
            We don't want to consider this component working until its on_join
            has completed (see create_session() above where this is hooked up)
            """
            return joined_d

        d.addCallbacks(on_connect_success, on_connect_error)
        d.addCallback(await_join)

        return d
Ejemplo n.º 48
0
 def _error(param):
     self.exitcode = -1
     self.output = param.value.output
     self.err = self.output
     LOGGER.warning('Remote Asterisk process failed: {0}'.format(self.err))
     return Failure(self)
Ejemplo n.º 49
0
 def _wrap_exception(f):
     return Failure(RemoteException(f))
Ejemplo n.º 50
0
    def ultimatelyPerform(cls, txnFactory, jobDescriptor):
        """
        Eventually, after routing the job to the appropriate place, somebody
        actually has to I{do} it. This method basically calls L{JobItem.run}
        but it does a bunch of "booking" to track the transaction and log failures
        and timing information.

        @param txnFactory: a 0- or 1-argument callable that creates an
            L{IAsyncTransaction}
        @type txnFactory: L{callable}
        @param jobDescriptor: the job descriptor
        @type jobID: L{JobDescriptor}
        @return: a L{Deferred} which fires with C{None} when the job has been
            performed, or fails if the job can't be performed.
        """

        t = time.time()

        def _tm():
            return "{:.3f}".format(1000 * (time.time() - t))

        def _overtm(nb):
            return "{:.0f}".format(1000 * (t - astimestamp(nb)))

        # Failed job clean-up
        def _failureCleanUp(delay=None):
            @inlineCallbacks
            def _cleanUp2(txn2):
                try:
                    job = yield cls.load(txn2, jobDescriptor.jobID)
                except NoSuchRecord:
                    log.debug(
                        "JobItem: {workType} {jobid} disappeared t={tm}",
                        workType=jobDescriptor.workType,
                        jobid=jobDescriptor.jobID,
                        tm=_tm(),
                    )
                else:
                    log.debug(
                        "JobItem: {workType} {jobid} marking as failed {count} t={tm}",
                        workType=jobDescriptor.workType,
                        jobid=jobDescriptor.jobID,
                        count=job.failed + 1,
                        tm=_tm(),
                    )
                    yield job.failedToRun(locked=isinstance(e, JobRunningError), delay=delay)
            return inTransaction(txnFactory, _cleanUp2, "ultimatelyPerform._failureCleanUp")

        log.debug("JobItem: {workType} {jobid} starting to run", workType=jobDescriptor.workType, jobid=jobDescriptor.jobID)
        txn = txnFactory(label="ultimatelyPerform: {workType} {jobid}".format(workType=jobDescriptor.workType, jobid=jobDescriptor.jobID))
        try:
            job = yield cls.load(txn, jobDescriptor.jobID)
            if hasattr(txn, "_label"):
                txn._label = "{} <{}>".format(txn._label, job.workType)
            log.debug(
                "JobItem: {workType} {jobid} loaded {work} t={tm}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                work=job.workType,
                tm=_tm(),
            )
            yield job.run()

        except NoSuchRecord:
            # The record has already been removed
            yield txn.commit()
            log.debug(
                "JobItem: {workType} {jobid} already removed t={tm}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                tm=_tm(),
            )

        except JobTemporaryError as e:

            # Temporary failure delay with back-off
            def _temporaryFailure():
                return _failureCleanUp(delay=e.delay * (job.failed + 1))
            log.debug(
                "JobItem: {workType} {jobid} {desc} t={tm}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                desc="temporary failure #{}".format(job.failed + 1),
                tm=_tm(),
            )
            txn.postAbort(_temporaryFailure)
            yield txn.abort()

        except (JobFailedError, JobRunningError) as e:

            # Permanent failure
            log.debug(
                "JobItem: {workType} {jobid} {desc} t={tm}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                desc="failed" if isinstance(e, JobFailedError) else "locked",
                tm=_tm(),
            )
            txn.postAbort(_failureCleanUp)
            yield txn.abort()

        except:
            f = Failure()
            log.error(
                "JobItem: {workType} {jobid} exception t={tm} {exc}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                tm=_tm(),
                exc=f,
            )
            yield txn.abort()
            returnValue(f)

        else:
            yield txn.commit()
            log.debug(
                "JobItem: {workType} {jobid} completed t={tm} over={over}",
                workType=jobDescriptor.workType,
                jobid=jobDescriptor.jobID,
                tm=_tm(),
                over=_overtm(job.notBefore),
            )

        returnValue(None)
Ejemplo n.º 51
0
 def closed(self):
     self._protocol.connectionLost(
         Failure(ConnectionDone("ssh channel closed")))
Ejemplo n.º 52
0
    def _handle_error(self, response):
        if response.code < 400:
            return response

        return _read_body(response).addCallback(lambda body: Failure(
            TwitterAPIError(response.code, response=body)))
Ejemplo n.º 53
0
 def errback(reason, *args, **kwargs):
     self.queue.put(Failure(reason))
Ejemplo n.º 54
0
    def check_yield_points_inner(*args, **kwargs):
        gen = f(*args, **kwargs)

        last_yield_line_no = gen.gi_frame.f_lineno
        result = None  # type: Any
        while True:
            expected_context = current_context()

            try:
                isFailure = isinstance(result, Failure)
                if isFailure:
                    d = result.throwExceptionIntoGenerator(gen)
                else:
                    d = gen.send(result)
            except (StopIteration, defer._DefGen_Return) as e:
                if current_context() != expected_context:
                    # This happens when the context is lost sometime *after* the
                    # final yield and returning. E.g. we forgot to yield on a
                    # function that returns a deferred.
                    #
                    # We don't raise here as it's perfectly valid for contexts to
                    # change in a function, as long as it sets the correct context
                    # on resolving (which is checked separately).
                    err = (
                        "Function %r returned and changed context from %s to %s,"
                        " in %s between %d and end of func" % (
                            f.__qualname__,
                            expected_context,
                            current_context(),
                            f.__code__.co_filename,
                            last_yield_line_no,
                        ))
                    changes.append(err)
                return getattr(e, "value", None)

            frame = gen.gi_frame

            if isinstance(d, defer.Deferred) and not d.called:
                # This happens if we yield on a deferred that doesn't follow
                # the log context rules without wrapping in a `make_deferred_yieldable`.
                # We raise here as this should never happen.
                if current_context():
                    err = ("%s yielded with context %s rather than sentinel,"
                           " yielded on line %d in %s" % (
                               frame.f_code.co_name,
                               current_context(),
                               frame.f_lineno,
                               frame.f_code.co_filename,
                           ))
                    raise Exception(err)

            # the wrapped function yielded a Deferred: yield it back up to the parent
            # inlineCallbacks().
            try:
                result = yield d
            except Exception:
                # this will fish an earlier Failure out of the stack where possible, and
                # thus is preferable to passing in an exception to the Failure
                # constructor, since it results in less stack-mangling.
                result = Failure()

            if current_context() != expected_context:

                # This happens because the context is lost sometime *after* the
                # previous yield and *after* the current yield. E.g. the
                # deferred we waited on didn't follow the rules, or we forgot to
                # yield on a function between the two yield points.
                #
                # We don't raise here as its perfectly valid for contexts to
                # change in a function, as long as it sets the correct context
                # on resolving (which is checked separately).
                err = (
                    "%s changed context from %s to %s, happened between lines %d and %d in %s"
                    % (
                        frame.f_code.co_name,
                        expected_context,
                        current_context(),
                        last_yield_line_no,
                        frame.f_lineno,
                        frame.f_code.co_filename,
                    ))
                changes.append(err)

            last_yield_line_no = frame.f_lineno
Ejemplo n.º 55
0
 def setUp(self):
     super(TestHandleSpiderError, self).setUp()
     self.exception_message = 'Foo'
     self.exception = Exception(self.exception_message)
     self.failure = Failure(self.exception)
Ejemplo n.º 56
0
 def test_block_on_already_failed(self):
     d = defer.fail(Failure(ZeroDivisionError()))
     self.assertRaises(ZeroDivisionError, block_on, d)
Ejemplo n.º 57
0
 def test_attacher_error_handler(self):
     # make sure error-handling "does something" that isn't blowing up
     with patch('sys.stdout') as fake_stdout:
         TorState(self.protocol)._attacher_error(
             Failure(RuntimeError("quote")))
Ejemplo n.º 58
0
 def event_handler():
     yield Failure(PyeeTestError())
Ejemplo n.º 59
0
 def on_failure(reason):
     return Failure(
         ValueError(
             ("Exception while uploading file to %s. \n" + "Reason:%s.")
             % (remotepath, reason.getTraceback())))
Ejemplo n.º 60
0
        def on_torrent_failed(failure):
            self._logger.error("Could not add torrent to LibtorrentManager %s", self.tdef.get_name_as_unicode())

            self.cew_scheduled = False

            return Failure((self, pstate))