Exemple #1
0
    def _cbMX(self, answers, domain, cnamesLeft):
        """
        Try to find the mail exchange host for a domain from the given DNS
        records.

        This will attempt to resolve canonical name record results.  It can
        recognize loops and will give up on non-cyclic chains after a specified
        number of lookups.

        @type answers: L{dict} mapping L{bytes} to L{list} of L{IRecord
            <twisted.names.dns.IRecord>} provider
        @param answers: A mapping of record name to record payload.

        @type domain: L{bytes}
        @param domain: A domain name.

        @type cnamesLeft: L{int}
        @param cnamesLeft: The number of unique canonical name records
            left to follow while looking up the mail exchange host.

        @rtype: L{Record_MX <twisted.names.dns.Record_MX>} or L{Failure}
        @return: An MX record for the mail exchange host or a failure if one
            cannot be found.
        """
        # Do this import here so that relaymanager.py doesn't depend on
        # twisted.names, only MXCalculator will.
        from twisted.names import dns, error

        seenAliases = set()
        exchanges = []
        # Examine the answers for the domain we asked about
        pertinentRecords = answers.get(domain, [])
        while pertinentRecords:
            record = pertinentRecords.pop()

            # If it's a CNAME, we'll need to do some more processing
            if record.TYPE == dns.CNAME:

                # Remember that this name was an alias.
                seenAliases.add(domain)

                canonicalName = str(record.name)
                # See if we have some local records which might be relevant.
                if canonicalName in answers:

                    # Make sure it isn't a loop contained entirely within the
                    # results we have here.
                    if canonicalName in seenAliases:
                        return Failure(CanonicalNameLoop(record))

                    pertinentRecords = answers[canonicalName]
                    exchanges = []
                else:
                    if cnamesLeft:
                        # Request more information from the server.
                        return self.getMX(canonicalName, cnamesLeft - 1)
                    else:
                        # Give up.
                        return Failure(CanonicalNameChainTooLong(record))

            # If it's an MX, collect it.
            if record.TYPE == dns.MX:
                exchanges.append((record.preference, record))

        if exchanges:
            exchanges.sort()
            for (preference, record) in exchanges:
                host = str(record.name)
                if host not in self.badMXs:
                    return record
                t = self.clock.seconds() - self.badMXs[host]
                if t >= 0:
                    del self.badMXs[host]
                    return record
            return exchanges[0][1]
        else:
            # Treat no answers the same as an error - jump to the errback to
            # try to look up an A record.  This provides behavior described as
            # a special case in RFC 974 in the section headed I{Interpreting
            # the List of MX RRs}.
            return Failure(
                error.DNSNameError("No MX records for %r" % (domain, )))
Exemple #2
0
    def test_worker_accepts_builds_after_failure(self):
        """
        If a latent worker fails to substantiate, the worker is still able to accept jobs.
        """
        controller = LatentController(self, 'local')
        config_dict = {
            'builders': [
                BuilderConfig(
                    name="testy",
                    workernames=["local"],
                    factory=BuildFactory(),
                ),
            ],
            'workers': [controller.worker],
            'protocols': {
                'null': {}
            },
            # Disable checks about missing scheduler.
            'multiMaster':
            True,
        }
        master = self.getMaster(config_dict)
        builder_id = self.successResultOf(
            master.data.updates.findBuilderId('testy'))

        controller.auto_stop(True)
        # Trigger a buildrequest
        bsid, brids = self.createBuildrequest(master, [builder_id])

        unclaimed_build_requests = []
        self.successResultOf(
            master.mq.startConsuming(
                lambda key, request: unclaimed_build_requests.append(request),
                ('buildrequests', None, 'unclaimed')))
        # The worker fails to substantiate.
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # The retry logic should only trigger after a exponential backoff
        self.assertEqual(controller.starting, False)

        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)

        # If the worker started again after the failure, then the retry logic will have
        # already kicked in to start a new build on this (the only) worker. We check that
        # a new instance was requested, which indicates that the worker
        # accepted the build.
        self.assertEqual(controller.starting, True)

        # The worker fails to substantiate(again).
        controller.start_instance(
            Failure(TestException("substantiation failed")))
        # Flush the errors logged by the failure.
        self.flushLoggedErrors(TestException)

        # advance the time to the point where we should not retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, False)
        # advance the time to the point where we should retry
        master.reactor.advance(controller.worker.quarantine_initial_timeout)
        self.assertEqual(controller.starting, True)
Exemple #3
0
    def _route(self, notification, router_data):
        """Blocking APNS call to route the notification

        :param notification: Notification data to send
        :type notification: dict
        :param router_data: Pre-initialized data for this connection
        :type router_data: dict

        """
        router_token = router_data["token"]
        rel_channel = router_data["rel_channel"]
        apns_client = self.apns[rel_channel]
        # chid MUST MATCH THE CHANNELID GENERATED BY THE REGISTRATION SERVICE
        # Currently this value is in hex form.
        payload = {
            "chid": notification.channel_id.hex,
            "ver": notification.version,
        }
        if notification.data:
            payload["body"] = notification.data
            payload["con"] = notification.headers["encoding"]
            payload["enc"] = notification.headers["encryption"]

            if "crypto_key" in notification.headers:
                payload["cryptokey"] = notification.headers["crypto_key"]
            elif "encryption_key" in notification.headers:
                payload["enckey"] = notification.headers["encryption_key"]
            payload['aps'] = router_data.get('aps', {
                "mutable-content": 1,
                "alert": {"title": " ", "body": " "}
            })
        apns_id = str(uuid.uuid4()).lower()
        try:
            apns_client.send(router_token=router_token, payload=payload,
                             apns_id=apns_id)
        except (ConnectionError, AttributeError) as ex:
            self.metrics.increment("updates.client.bridge.apns.connection_err",
                                   self._base_tags)
            self.log.error("Connection Error sending to APNS",
                           log_failure=Failure(ex))
            raise RouterException(
                "Server error",
                status_code=502,
                response_body="APNS returned an error processing request",
                log_exception=False,
            )
        except HTTP20Error as ex:
            self.log.error("HTTP2 Error sending to APNS",
                           log_failure=Failure(ex))
            raise RouterException(
                "Server error",
                status_code=502,
                response_body="APNS returned an error processing request",
            )

        location = "%s/m/%s" % (self.ap_settings.endpoint_url,
                                notification.version)
        self.metrics.increment(
            "updates.client.bridge.apns.%s.sent" %
            router_data["rel_channel"],
            self._base_tags
        )
        return RouterResponse(status_code=201, response_body="",
                              headers={"TTL": notification.ttl,
                                       "Location": location},
                              logged_status=200)
Exemple #4
0
    def start_router_component(self, id, config, details=None):
        """
        Start an app component in this router worker.

        :param id: The ID of the component to start.
        :type id: str

        :param config: The component configuration.
        :type config: dict

        :param details: Call details.
        :type details: :class:`autobahn.wamp.types.CallDetails`
        """
        self.log.debug("{name}.start_router_component",
                       name=self.__class__.__name__)

        # prohibit starting a component twice
        #
        if id in self.components:
            emsg = "Could not start component: a component with ID '{}'' is already running (or starting)".format(
                id)
            self.log.error(emsg)
            raise ApplicationError('crossbar.error.already_running', emsg)

        started_d = Deferred()

        # check configuration
        #
        try:
            self.personality.check_router_component(self.personality, config)
        except Exception as e:
            emsg = "Invalid router component configuration: {}".format(e)
            self.log.error(emsg)
            raise ApplicationError("crossbar.error.invalid_configuration",
                                   emsg)
        else:
            self.log.debug("Starting {type}-component on router.",
                           type=config['type'])

        # resolve references to other entities
        #
        references = {}
        for ref in config.get('references', []):
            ref_type, ref_id = ref.split(':')
            if ref_type == 'connection':
                if ref_id in self._connections:
                    references[ref] = self._connections[ref_id]
                else:
                    emsg = "cannot resolve reference '{}' - no '{}' with ID '{}'".format(
                        ref, ref_type, ref_id)
                    self.log.error(emsg)
                    raise ApplicationError(
                        "crossbar.error.invalid_configuration", emsg)
            else:
                emsg = "cannot resolve reference '{}' - invalid reference type '{}'".format(
                    ref, ref_type)
                self.log.error(emsg)
                raise ApplicationError("crossbar.error.invalid_configuration",
                                       emsg)

        # create component config
        #
        realm = config.get('realm', None)
        assert isinstance(realm, str)

        extra = config.get('extra', {})
        assert isinstance(extra, dict)

        # forward crossbar node base directory
        extra['cbdir'] = self.config.extra.cbdir

        # allow access to controller session
        controller = self if self.config.extra.expose_controller else None

        # expose an object shared between components
        shared = self.components_shared if self.config.extra.expose_shared else None

        # this is the component configuration provided to the components ApplicationSession
        component_config = ComponentConfig(realm=realm,
                                           extra=extra,
                                           keyring=None,
                                           controller=controller,
                                           shared=shared)

        # define component ctor function
        try:
            create_component = _appsession_loader(config)
        except ApplicationError as e:
            # for convenience, also log failed component loading
            self.log.error('component loading failed', log_failure=Failure())
            if 'No module named' in str(e):
                self.log.error('  Python module search paths:')
                for path in e.kwargs['pythonpath']:
                    self.log.error('    {path}', path=path)
            raise

        # check component extra configuration
        #
        if hasattr(create_component, 'check_config') and callable(
                create_component.check_config) and extra:
            try:
                create_component.check_config(self.personality, extra)
            except Exception as e:
                emsg = 'invalid router component extra configuration: {}'.format(
                    e)
                self.log.debug(emsg)
                raise ApplicationError('crossbar.error.invalid_configuration',
                                       emsg)
            else:
                self.log.debug('starting router component "{component_id}" ..',
                               component_id=id)

        # .. and create and add an WAMP application session to
        # run the component next to the router
        try:
            session = create_component(component_config)

            # any exception spilling out from user code in onXXX handlers is fatal!
            def panic(fail, msg):
                self.log.error(
                    "Fatal error in component: {msg} - {log_failure.value}",
                    msg=msg,
                    log_failure=fail)
                session.disconnect()

            session._swallow_error = panic
        except Exception:
            self.log.error(
                "Component instantiation failed",
                log_failure=Failure(),
            )
            raise

        # Note that 'join' is fired to listeners *before* onJoin runs,
        # so if you do 'yield self.leave()' in onJoin we'll still
        # publish "started" before "stopped".

        def publish_stopped(session, stop_details):
            self.log.info(
                "stopped component: {session} id={session_id}",
                session=class_name(session),
                session_id=session._session_id,
            )
            topic = self._uri_prefix + '.on_component_stop'
            event = {'id': id}
            caller = details.caller if details else None
            self.publish(topic, event, options=PublishOptions(exclude=caller))
            if not started_d.called:
                started_d.errback(Exception("Session left before being ready"))
            return event

        def publish_ready(session):
            """
            when our component is ready, we publish .on_component_ready
            """
            self.log.info(
                "component ready: {session} id={session_id}",
                session=class_name(session),
                session_id=session._session_id,
            )
            topic = self._uri_prefix + '.on_component_ready'
            event = {'id': id}
            self.publish(topic, event)
            started_d.callback(event)
            return event

        def publish_started(session, start_details):
            """
            when our component starts, we publish .on_component_start
            """

            # hook up handlers for "session is ready"
            session.on('ready', publish_ready)

            # publish .on_component_start
            self.log.info(
                "started component: {session} id={session_id}",
                session=class_name(session),
                session_id=session._session_id,
            )
            topic = self._uri_prefix + '.on_component_start'
            event = {'id': id}
            caller = details.caller if details else None
            self.publish(topic, event, options=PublishOptions(exclude=caller))
            return event

        session.on('leave', publish_stopped)
        session.on('join', publish_started)

        self.components[id] = RouterComponent(id, config, session)
        router = self._router_factory.get(realm)
        self._router_session_factory.add(session,
                                         router,
                                         authrole=config.get(
                                             'role', 'anonymous'),
                                         authid=uuid4().__str__())
        self.log.debug(
            "Added component {id} (type '{name}')",
            id=id,
            name=class_name(session),
        )
        return started_d
Exemple #5
0
 def maybe_replace_reason(passthrough):
     if delayed_timeout.active():
         return passthrough
     return Failure(reason)
Exemple #6
0
 def _get_filenode(self, encoded_path_u):
     try:
         d = self._upload_dirnode.get(encoded_path_u)
     except KeyError:
         return Failure()
     return d
Exemple #7
0
    def handle_rpc(self, request):
        initial_ctx = TwistedHttpMethodContext(self.http_transport, request,
                                 self.http_transport.app.out_protocol.mime_type)

        if _has_fd(request.content):
            f = request.content

            # it's best to avoid empty mappings.
            if fstat(f.fileno()).st_size == 0:
                initial_ctx.in_string = ['']
            else:
                initial_ctx.in_string = [mmap(f.fileno(), 0)]
        else:
            request.content.seek(0)
            initial_ctx.in_string = [request.content.read()]

        initial_ctx.transport.file_info = _get_file_info(initial_ctx)

        contexts = self.http_transport.generate_contexts(initial_ctx)
        p_ctx, others = contexts[0], contexts[1:]

        p_ctx.active = True
        p_ctx.out_stream = request
        # TODO: Rate limiting
        p_ctx.active = True

        if p_ctx.in_error:
            return self.handle_rpc_error(p_ctx, others, p_ctx.in_error, request)

        else:
            self.http_transport.get_in_object(p_ctx)

            if p_ctx.in_error:
                return self.handle_rpc_error(p_ctx, others, p_ctx.in_error,
                                                                        request)

            self.http_transport.get_out_object(p_ctx)
            if p_ctx.out_error:
                return self.handle_rpc_error(p_ctx, others, p_ctx.out_error,
                                                                        request)

        ret = p_ctx.out_object[0]
        retval = NOT_DONE_YET
        if isinstance(ret, Deferred):
            ret.addCallback(_cb_deferred, request, p_ctx, others, resource=self)
            ret.addErrback(_eb_deferred, request, p_ctx, others, resource=self)
            ret.addErrback(log_and_let_go, logger)

        elif isinstance(ret, PushBase):
            self.http_transport.init_root_push(ret, p_ctx, others)

        else:
            try:
                retval = _cb_deferred(p_ctx.out_object, request, p_ctx, others,
                                                                 self, cb=False)
            except Exception as e:
                logger_server.exception(e)
                try:
                    _eb_deferred(Failure(), request, p_ctx, others,
                                                                  resource=self)
                except Exception as e:
                    logger_server.exception(e)

        return retval
 def _loseConnection(self):
     self.protocol.connectionLost(Failure(error.ConnectionDone("Bye.")))
     self.foreignProtocol.transport.loseConnection()
Exemple #9
0
 def logException():
     self.failures.append(Failure())
    def lineReceived(self, line, request_counter):
        if self.expect_tcp_proxy_protocol_header:
            # This flag may be set only for TCP transport AND when TCP_PROXY_PROTOCOL
            # is enabled in server config. Then we expect the first line of the stream
            # may contain proxy metadata.

            # We don't expect this header during this session anymore
            self.expect_tcp_proxy_protocol_header = False

            if line.startswith('PROXY'):
                self.proxied_ip = line.split()[2]

                # Let's process next line
                request_counter.decrease()
                return

        try:
            message = json.loads(line)
        except:
            #self.writeGeneralError("Cannot decode message '%s'" % line)
            request_counter.finish()
            raise custom_exceptions.ProtocolException(
                "Cannot decode message '%s'" % line.strip())

        if self.factory.debug:
            log.debug("> %s" % message)

        msg_id = message.get('id', 0)
        msg_method = message.get('method')
        msg_params = message.get('params')
        msg_result = message.get('result')
        msg_error = message.get('error')

        if msg_method:
            # It's a RPC call or notification
            try:
                result = self.event_handler._handle_event(msg_method,
                                                          msg_params,
                                                          connection_ref=self)
                if result == None and msg_id != None:
                    # event handler must return Deferred object or raise an exception for RPC request
                    raise custom_exceptions.MethodNotFoundException(
                        "Event handler cannot process method '%s'" %
                        msg_method)
            except:
                failure = Failure()
                self.process_failure(failure, msg_id, msg_method, msg_params,
                                     request_counter)

            else:
                if msg_id == None:
                    # It's notification, don't expect the response
                    request_counter.decrease()
                else:
                    # It's a RPC call
                    result.addCallback(self.process_response, msg_id,
                                       msg_method, msg_params, request_counter)
                    result.addErrback(self.process_failure, msg_id, msg_method,
                                      msg_params, request_counter)

        elif msg_id:
            # It's a RPC response
            # Perform lookup to the table of waiting requests.
            request_counter.decrease()

            try:
                meta = self.lookup_table[msg_id]
                del self.lookup_table[msg_id]
            except KeyError:
                # When deferred object for given message ID isn't found, it's an error
                raise custom_exceptions.ProtocolException(
                    "Lookup for deferred object for message ID '%s' failed." %
                    msg_id)

            # If there's an error, handle it as errback
            # If both result and error are null, handle it as a success with blank result
            if msg_error != None:
                meta['defer'].errback(
                    custom_exceptions.RemoteServiceException(
                        msg_error[0], msg_error[1], msg_error[2]))
            else:
                meta['defer'].callback(msg_result)

        else:
            request_counter.decrease()
            raise custom_exceptions.ProtocolException(
                "Cannot handle message '%s'" % line)
 def loseConnection(self):
     if self.connected:
         self.connected = False
         self.protocol.connectionLost(Failure(error.ConnectionDone("Bye.")))
Exemple #12
0
 def cb(reply, d=d):
     if reply == "EXCEPTION":
         d.errback(Failure(self._closedEx))
     else:
         d.callback(reply)
Exemple #13
0
def cb_fail(value, arg1, arg2):
    return Failure(TypeError())
Exemple #14
0
    def _render(self, request):
        time_in = utils.now()
        if not self._check_headers(request):
            self._render_error(Failure(InvalidHeaderError()), request, None)
            return server.NOT_DONE_YET
        session = request.getSession()
        session_id = session.uid
        finished_deferred = request.notifyFinish()

        if self._use_authentication:
            # if this is a new session, send a new secret and set the expiration
            # otherwise, session.touch()
            if self._initialize_session(session_id):

                def expire_session():
                    self._unregister_user_session(session_id)

                session.startCheckingExpiration()
                session.notifyOnExpire(expire_session)
                message = "OK"
                request.setResponseCode(200)
                self._set_headers(request, message, True)
                self._render_message(request, message)
                return server.NOT_DONE_YET
            else:
                session.touch()

        request.content.seek(0, 0)
        content = request.content.read()
        try:
            parsed = jsonrpclib.loads(content)
        except ValueError:
            log.warning("Unable to decode request json")
            self._render_error(
                JSONRPCError(None, JSONRPCError.CODE_PARSE_ERROR), request,
                None)
            return server.NOT_DONE_YET

        request_id = None
        try:
            function_name = parsed.get('method')
            args = parsed.get('params', {})
            request_id = parsed.get('id', None)
            token = parsed.pop('hmac', None)
        except AttributeError as err:
            log.warning(err)
            self._render_error(
                JSONRPCError(None, code=JSONRPCError.CODE_INVALID_REQUEST),
                request, request_id)
            return server.NOT_DONE_YET

        reply_with_next_secret = False
        if self._use_authentication:
            try:
                self._verify_token(session_id, parsed, token)
            except InvalidAuthenticationToken as err:
                log.warning("API validation failed")
                self._render_error(
                    JSONRPCError.create_from_exception(
                        err,
                        code=JSONRPCError.CODE_AUTHENTICATION_ERROR,
                        traceback=format_exc()), request, request_id)
                return server.NOT_DONE_YET
            request.addCookie("TWISTED_SESSION", session_id)
            self._update_session_secret(session_id)
            reply_with_next_secret = True

        try:
            fn = self._get_jsonrpc_method(function_name)
        except UnknownAPIMethodError as err:
            log.warning('Failed to get function %s: %s', function_name, err)
            self._render_error(
                JSONRPCError(None, JSONRPCError.CODE_METHOD_NOT_FOUND),
                request, request_id)
            return server.NOT_DONE_YET
        except NotAllowedDuringStartupError:
            log.warning('Function not allowed during startup: %s',
                        function_name)
            self._render_error(
                JSONRPCError(
                    "This method is unavailable until the daemon is fully started",
                    code=JSONRPCError.CODE_INVALID_REQUEST), request,
                request_id)
            return server.NOT_DONE_YET

        if args == EMPTY_PARAMS or args == []:
            _args, _kwargs = (), {}
        elif isinstance(args, dict):
            _args, _kwargs = (), args
        elif len(args) == 1 and isinstance(args[0], dict):
            # TODO: this is for backwards compatibility. Remove this once API and UI are updated
            # TODO: also delete EMPTY_PARAMS then
            _args, _kwargs = (), args
        elif len(args) == 2 and isinstance(args[0], list) and isinstance(
                args[1], dict):
            _args, _kwargs = args
        else:
            raise ValueError('invalid args format')

        params_error, erroneous_params = self._check_params(fn, _args, _kwargs)
        if params_error is not None:
            params_error_message = '{} for {} command: {}'.format(
                params_error, function_name, ', '.join(erroneous_params))
            log.warning(params_error_message)
            self._render_error(
                JSONRPCError(params_error_message,
                             code=JSONRPCError.CODE_INVALID_PARAMS), request,
                request_id)
            return server.NOT_DONE_YET

        d = defer.maybeDeferred(fn, self, *_args, **_kwargs)

        # finished_deferred will callback when the request is finished
        # and errback if something went wrong. If the errback is
        # called, cancel the deferred stack. This is to prevent
        # request.finish() from being called on a closed request.
        finished_deferred.addErrback(self._handle_dropped_request, d,
                                     function_name)

        d.addCallback(self._callback_render, request, request_id,
                      reply_with_next_secret)
        d.addErrback(trap, ConnectionDone, ConnectionLost,
                     defer.CancelledError)
        d.addErrback(self._render_error, request, request_id)
        d.addBoth(lambda _: log.debug("%s took %f", function_name,
                                      (utils.now() - time_in).total_seconds()))
        return server.NOT_DONE_YET
 def checkConnLost(self):
     self.connsLost += 1
     if self.connsLost >= 2:
         self.disconnecting = True
         self.disconnected = True
         self.proto.connectionLost(Failure(main.CONNECTION_DONE))
Exemple #16
0
    def search(
        self,
        filterText=None,
        filterObject=None,
        attributes=(),
        scope=None,
        derefAliases=None,
        sizeLimit=0,
        sizeLimitIsNonFatal=False,
        timeLimit=0,
        typesOnly=0,
        callback=None,
        controls=None,
        return_controls=False,
    ):
        self._checkState()
        d = defer.Deferred()
        if filterObject is None and filterText is None:
            filterObject = pureldap.LDAPFilterMatchAll
        elif filterObject is None and filterText is not None:
            filterObject = ldapfilter.parseFilter(filterText)
        elif filterObject is not None and filterText is None:
            pass
        elif filterObject is not None and filterText is not None:
            f = ldapfilter.parseFilter(filterText)
            filterObject = pureldap.LDAPFilter_and((f, filterObject))

        if scope is None:
            scope = pureldap.LDAP_SCOPE_wholeSubtree
        if derefAliases is None:
            derefAliases = pureldap.LDAP_DEREF_neverDerefAliases

        if attributes is None:
            attributes = ["1.1"]

        results = []
        if callback is None:
            cb = results.append
        else:
            cb = callback
        try:
            op = pureldap.LDAPSearchRequest(
                baseObject=self.dn.getText(),
                scope=scope,
                derefAliases=derefAliases,
                sizeLimit=sizeLimit,
                timeLimit=timeLimit,
                typesOnly=typesOnly,
                filter=filterObject,
                attributes=attributes,
            )
            dsend = self.client.send_multiResponse_ex(
                op,
                controls,
                self._cbSearchMsg,
                d,
                cb,
                complete=not attributes,
                sizeLimitIsNonFatal=sizeLimitIsNonFatal,
            )
        except ldapclient.LDAPClientConnectionLostException:
            d.errback(Failure())
        else:
            if callback is None:
                if return_controls:
                    d.addCallback(lambda ctls: (results, ctls))
                else:
                    d.addCallback(lambda dummy: results)

            def rerouteerr(e):
                d.errback(e)
                # returning None will stop the error
                # from being propagated and logged.

            dsend.addErrback(rerouteerr)
        return d
Exemple #17
0
 def _get_metadata(self, encoded_path_u):
     try:
         d = self._upload_dirnode.get_metadata_for(encoded_path_u)
     except KeyError:
         return Failure()
     return d
class NotificationErrorsTestCase(TestWithDatabase):
    """Test Notification error handling.

    These tests will throw notifications at the server that will raise
    exceptions. Some events don't trigger exceptions in themselves, but an
    exception is created at broadcast.

    """

    induced_error = Failure(ValueError("Test error"))

    @defer.inlineCallbacks
    def setUp(self):
        yield super(NotificationErrorsTestCase, self).setUp()
        self.event_sent = defer.Deferred()
        self.notifier = FakeNotifier(event_sent_deferred=self.event_sent)
        self.patch(notifier, 'get_notifier', lambda: self.notifier)
        self.fake_reactor = DummyReactor()

        self.ssfactory = StorageServerFactory(reactor=self.fake_reactor)

        protocol = StorageServer()
        protocol.factory = self.ssfactory
        protocol.working_caps = ["volumes", "generations"]
        protocol.session_id = uuid.uuid4()
        self.patch(self.ssfactory.content, 'get_user_by_id',
                   lambda *a: self.induced_error)
        self.handler = self.add_memento_handler(logger)

    @defer.inlineCallbacks
    def check_event(self, event, **kwargs):
        """Test an error in node update."""
        self.notifier.send_event(event)
        yield self.event_sent

        actual = self.handler.records_by_level[logging.ERROR]
        self.assertEqual(len(actual), 1)
        expected = '%s in notification %r while calling deliver_%s(**%r)' % (
            self.induced_error.value, event, event.event_type, kwargs)
        self.assertEqual(actual[0].getMessage(), expected)

    def test_share_created(self):
        """Test the share events."""
        event_args = (uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW,
                      False)
        return self.check_event(ShareCreated(*event_args))

    def test_share_deleted(self):
        """Test the share events."""
        event_args = (uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW,
                      False)
        return self.check_event(ShareDeleted(*event_args))

    def test_share_declined(self):
        """Test the share events."""
        event_args = (uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW,
                      False)
        return self.check_event(ShareDeclined(*event_args))

    def test_share_accepted(self):
        """Test the share events."""
        event_args = (uuid.uuid4(), u"name", uuid.uuid4(), 1, 2, Share.VIEW,
                      True)
        return self.check_event(ShareAccepted(*event_args),
                                recipient_id=u'test')

    def test_udf_delete(self):
        """Test UDF Delete."""
        return self.check_event(UDFDelete(1, uuid.uuid4(), uuid.uuid4()))

    def test_udf_create(self):
        """Test UDF Create."""
        return self.check_event(
            UDFCreate(1, uuid.uuid4(), uuid.uuid4(), u"path", uuid.uuid4()))

    def test_new_volume_gen(self):
        """Test the new gen for volume events."""
        event = VolumeNewGeneration(1, uuid.uuid4(), 77, uuid.uuid4())
        return self.check_event(event)
Exemple #19
0
 def addLogWithException(self, why, logprefix=""):
     return self.addLogWithFailure(Failure(why), logprefix)
Exemple #20
0
    def _handle_send_response(self, result, payloadsByTopicPart,
                              deferredsByTopicPart):
        """Handle the response from our client to our send_produce_request

        This is a bit complex. Failures can happen in a few ways:
          1) The client sent an empty list, False, None or some similar thing
             as the result, but we were expecting real responses.
          2) The client had a failure before it even tried sending any requests
             to any brokers.
             a) Kafka error: See if we can retry the whole request
             b) Non-kafka: Figure it's a programming error, fail all deferreds
          3) The client sent all the requests (it's all or none) to the brokers
             but one or more request failed (timed out before receiving a
             response, or the brokerclient threw some sort of exception on send
             In this case, the client throws FailedPayloadsError, and attaches
             the responses (NOTE: some can have errors!), and the payloads
             where the send itself failed to the exception.
          4) The client sent all the requests, all responses were received, but
             the Kafka broker indicated an error with servicing the request on
             some of the responses.
        """
        def _deliver_result(d_list, result=None):
            """Possibly callback each deferred in a list with single result"""
            for d in d_list:
                if not isinstance(d, Deferred):
                    # nested list...
                    _deliver_result(d, result)
                else:
                    # We check d.called since the request could have been
                    # cancelled while we waited for the response
                    if not d.called:
                        d.callback(result)

        def _do_retry(payloads):
            # We use 'fail_on_error=False' because we want our client to
            # process every response that comes back from the brokers so
            # we can determine which requests were successful, and which
            # failed for retry
            d = self.client.send_produce_request(payloads,
                                                 acks=self.req_acks,
                                                 timeout=self.ack_timeout,
                                                 fail_on_error=False)
            self._req_attempts += 1
            # add our handlers
            d.addBoth(self._handle_send_response, payloadsByTopicPart,
                      deferredsByTopicPart)
            return d

        def _cancel_retry(failure, dc):
            # Cancel the retry callLater and pass-thru the failure
            dc.cancel()
            # cancel all the top-level deferreds associated with the request
            _deliver_result(deferredsByTopicPart.values(), failure)
            return failure

        def _check_retry_payloads(failed_payloads_with_errs):
            """Check our retry count and retry after a delay or errback

            If we have more retries to try, create a deferred that will fire
            with the result of delayed retry. If not, errback the remaining
            deferreds with failure

            Params:
            failed_payloads - list of (payload, failure) tuples
            """
            # Do we have retries left?
            if self._req_attempts >= self._max_attempts:
                # No, no retries left, fail each failed_payload with its
                # associated failure
                for p, f in failed_payloads_with_errs:
                    t_and_p = TopicAndPartition(p.topic, p.partition)
                    _deliver_result(deferredsByTopicPart[t_and_p], f)
                return
            # Retries remain!  Schedule one...
            d = Deferred()
            dc = self._get_clock().callLater(self._retry_interval, d.callback,
                                             [p for p, f in failed_payloads])
            self._retry_interval *= self.RETRY_INTERVAL_FACTOR
            # Cancel the callLater when request is cancelled before it fires
            d.addErrback(_cancel_retry, dc)
            # Reset the topic metadata for all topics which had failed_requests
            # where the failures were of the kind UnknownTopicOrPartitionError
            # or NotLeaderForPartitionError, since those indicate our client's
            # metadata is out of date.
            reset_topics = []

            def _check_for_meta_error(tup):
                payload, failure = tup
                if (isinstance(failure, NotLeaderForPartitionError)
                        or isinstance(failure, UnknownTopicOrPartitionError)):
                    reset_topics.append(payload.topic)

            map(_check_for_meta_error, failed_payloads)
            if reset_topics:
                self.client.reset_topic_metadata(*reset_topics)
            d.addCallback(_do_retry)
            return d

        # The payloads we need to retry, if we still can..
        failed_payloads = []
        # In the case we are sending requests without requiring acks, the
        # brokerclient will immediately callback() the deferred upon send with
        # None. In that case, we just iterate over all the deferreds in
        # deferredsByTopicPart and callback them with None
        # If we are expecting responses/acks, and we get an empty result, we
        # callback with a Failure of NoResponseError
        if not result:
            # Success, but no results, is that what we're expecting?
            if self.req_acks == PRODUCER_ACK_NOT_REQUIRED:
                result = None
            else:
                # We got no result, but we were expecting one? Fail everything!
                result = Failure(NoResponseError())
            _deliver_result(deferredsByTopicPart.values(), result)
            return
        elif isinstance(result, Failure):
            # Failure!  Was it total, or partial?
            if not result.check(FailedPayloadsError):
                # Total failure of some sort!
                # The client was unable to send the request at all. If it's
                # a KafkaError (probably Leader/Partition unavailable), retry
                if result.check(KafkaError):
                    # Yep, a kafak error. Set failed_payloads, and we'll retry
                    # them all below. Set failure for errback to callers if we
                    # are all out of retries
                    failure, result = result, []  # no succesful results, retry
                    failed_payloads = [(p, failure)
                                       for p in payloadsByTopicPart.values()]
                else:
                    # Was the request cancelled?
                    if not result.check(tid_CancelledError):
                        # Uh Oh, programming error? Log it!
                        log.error(
                            "Unexpected failure: %r in "
                            "_handle_send_response", result)
                    # Cancelled, or programming error, we fail the requests
                    _deliver_result(deferredsByTopicPart.values(), result)
                    return
            else:
                # FailedPayloadsError: This means that some/all of the
                # requests to a/some brokerclients failed to send.
                # Pull the successful responses and the failed_payloads off
                # the exception and handle them below. Preserve the
                # FailedPayloadsError as 'failure'
                failure = result
                result = failure.value.args[0]
                failed_payloads = failure.value.args[1]

        # Do we have results? Iterate over them and if the response indicates
        # success, then callback the associated deferred. If the response
        # indicates an error, then setup that request for retry.
        # NOTE: In this case, each failed_payload get it's own error...
        for res in result:
            t_and_p = TopicAndPartition(res.topic, res.partition)
            t_and_p_err = check_error(res, raiseException=False)
            if not t_and_p_err:
                # Success for this topic/partition
                d_list = deferredsByTopicPart[t_and_p]
                _deliver_result(d_list, res)
            else:
                p = payloadsByTopicPart[t_and_p]
                failed_payloads.append((p, t_and_p_err))

        # Were there any failed requests to possibly retry?
        if failed_payloads:
            return _check_retry_payloads(failed_payloads)
        return
Exemple #21
0
def _cb_deferred(ret, request, p_ctx, others, resource, cb=True):
    ### set response headers
    resp_code = p_ctx.transport.resp_code

    # If user code set its own response code, don't touch it.
    if resp_code is None:
        resp_code = HTTP_200
    request.setResponseCode(int(resp_code[:3]))

    _set_response_headers(request, p_ctx.transport.resp_headers)

    ### normalize response data
    om = p_ctx.descriptor.out_message
    single_class = None
    if cb:
        if p_ctx.descriptor.is_out_bare():
            p_ctx.out_object = [ret]

        elif (not issubclass(om, ComplexModelBase)) or len(om._type_info) <= 1:
            p_ctx.out_object = [ret]
            if len(om._type_info) == 1:
                single_class, = om._type_info.values()
        else:
            p_ctx.out_object = ret
    else:
        p_ctx.out_object = ret

    ### start response
    retval = NOT_DONE_YET

    if isinstance(ret, PushBase):
        resource.http_transport.init_root_push(ret, p_ctx, others)

    elif ((isclass(om) and issubclass(om, File)) or
          (isclass(single_class) and issubclass(single_class, File))) and \
         isinstance(p_ctx.out_protocol, HttpRpc) and \
                                      getattr(ret, 'abspath', None) is not None:

        file = static.File(ret.abspath,
                        defaultType=str(ret.type) or 'application/octet-stream')
        retval = _render_file(file, request)
        if retval != NOT_DONE_YET and cb:
            request.write(retval)
            request.finish()
            p_ctx.close()
        else:
            def _close_only_context(ret):
                p_ctx.close()

            request.notifyFinish() \
                .addCallback(_close_only_context) \
                .addErrback(_eb_request_finished, request, p_ctx) \
                .addErrback(log_and_let_go, logger)

    else:
        ret = resource.http_transport.get_out_string(p_ctx)

        if not isinstance(ret, Deferred):
            producer = Producer(p_ctx.out_string, request)
            producer.deferred \
                .addCallback(_cb_request_finished, request, p_ctx) \
                .addErrback(_eb_request_finished, request, p_ctx) \
                .addErrback(log_and_let_go, logger)

            try:
                request.registerProducer(producer, False)
            except Exception as e:
                logger_server.exception(e)
                try:
                    _eb_deferred(Failure(), request, p_ctx, others, resource)
                except Exception as e:
                    logger_server.exception(e)
                    raise

        else:
            def _cb(ret):
                if isinstance(ret, Deferred):
                    return ret \
                        .addCallback(_cb) \
                        .addErrback(_eb_request_finished, request, p_ctx) \
                        .addErrback(log_and_let_go, logger)
                else:
                    return _cb_request_finished(ret, request, p_ctx)

            ret \
                .addCallback(_cb) \
                .addErrback(_eb_request_finished, request, p_ctx) \
                .addErrback(log_and_let_go, logger)

    process_contexts(resource.http_transport, others, p_ctx)

    return retval
Exemple #22
0
 def errReceived(self, data):
     if not self.got_output_deferred.called:
         self.got_output_deferred.errback(Failure(RuntimeError(data)))
 def test_on_disconnect(self):
     self.assertIn(self.conn.proto1,
                   self.manager1.connections.handshaking_peers)
     self.conn.disconnect(Failure(Exception('testing')))
     self.assertNotIn(self.conn.proto1,
                      self.manager1.connections.handshaking_peers)
Exemple #24
0
 def requestAvatarId(self, credentials):
     required_token = self.get_auth_token()
     precondition(isinstance(required_token, bytes))
     if credentials.equals(required_token):
         return succeed(ANONYMOUS)
     return fail(Failure(UnauthorizedLogin()))
Exemple #25
0
    def startBuild(self, build_status, expectations, slavebuilder):
        """This method sets up the build, then starts it by invoking the
        first Step. It returns a Deferred which will fire when the build
        finishes. This Deferred is guaranteed to never errback."""

        # we are taking responsibility for watching the connection to the
        # remote. This responsibility was held by the Builder until our
        # startBuild was called, and will not return to them until we fire
        # the Deferred returned by this method.

        log.msg("%s.startBuild" % self)
        self.build_status = build_status
        # now that we have a build_status, we can set properties
        self.setupProperties()
        self.setupSlaveBuilder(slavebuilder)
        slavebuilder.slave.updateSlaveStatus(buildStarted=build_status)

        # then narrow SlaveLocks down to the right slave
        self.locks = [(l.getLock(self.slavebuilder.slave), a)
                      for l, a in self.locks]
        self.remote = slavebuilder.remote
        self.remote.notifyOnDisconnect(self.lostRemote)

        metrics.MetricCountEvent.log('active_builds', 1)

        d = self.deferred = defer.Deferred()

        def _uncount_build(res):
            metrics.MetricCountEvent.log('active_builds', -1)
            return res

        d.addBoth(_uncount_build)

        def _release_slave(res, slave, bs):
            self.slavebuilder.buildFinished()
            slave.updateSlaveStatus(buildFinished=bs)
            return res

        d.addCallback(_release_slave, self.slavebuilder.slave, build_status)

        try:
            self.setupBuild(expectations)  # create .steps
        except:
            # the build hasn't started yet, so log the exception as a point
            # event instead of flunking the build.
            # TODO: associate this failure with the build instead.
            # this involves doing
            # self.build_status.buildStarted() from within the exception
            # handler
            log.msg("Build.setupBuild failed")
            log.err(Failure())
            self.builder.builder_status.addPointEvent(
                ["setupBuild", "exception"])
            self.finished = True
            self.results = EXCEPTION
            self.deferred = None
            d.callback(self)
            return d

        self.build_status.buildStarted(self)
        self.acquireLocks().addCallback(self._startBuild_2)
        return d
Exemple #26
0
    def startBuild(self, build_status, workerforbuilder):
        """This method sets up the build, then starts it by invoking the
        first Step. It returns a Deferred which will fire when the build
        finishes. This Deferred is guaranteed to never errback."""
        self.workerforbuilder = workerforbuilder
        self.conn = None

        worker = workerforbuilder.worker

        log.msg("%s.startBuild" % self)

        self.build_status = build_status
        # TODO: this will go away when build collapsing is implemented; until
        # then we just assign the build to the first buildrequest
        brid = self.requests[0].id
        builderid = yield self.getBuilderId()
        self.buildid, self.number = \
            yield self.master.data.updates.addBuild(
                builderid=builderid,
                buildrequestid=brid,
                workerid=worker.workerid)

        self.stopBuildConsumer = yield self.master.mq.startConsuming(self.controlStopBuild,
                                                                     ("control", "builds",
                                                                      str(self.buildid),
                                                                      "stop"))
        self.setupOwnProperties()

        # then narrow WorkerLocks down to the right worker
        self.locks = [(l.getLock(workerforbuilder.worker), a)
                      for l, a in self.locks]
        metrics.MetricCountEvent.log('active_builds', 1)

        # make sure properties are available to people listening on 'new'
        # events
        yield self._flushProperties(None)
        self.build_status.buildStarted(self)
        yield self.master.data.updates.setBuildStateString(self.buildid, u'starting')
        yield self.master.data.updates.generateNewBuildEvent(self.buildid)

        try:
            self.setupBuild()  # create .steps
        except Exception:
            yield self.buildPreparationFailure(Failure(), "worker_prepare")
            self.buildFinished(['Build.setupBuild', 'failed'], EXCEPTION)
            return

        # flush properties in the beginning of the build
        yield self._flushProperties(None)

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'preparing worker')
        try:
            ready_or_failure = yield workerforbuilder.prepare(self)
        except Exception:
            ready_or_failure = Failure()

        # If prepare returns True then it is ready and we start a build
        # If it returns failure then we don't start a new build.
        if ready_or_failure is not True:
            yield self.buildPreparationFailure(ready_or_failure, "worker_prepare")
            if self.stopped:
                self.buildFinished(["worker", "cancelled"], self.results)
            elif isinstance(ready_or_failure, Failure) and ready_or_failure.check(interfaces.LatentWorkerCannotSubstantiate):
                self.buildFinished(["worker", "cannot", "substantiate"], EXCEPTION)
            else:
                self.buildFinished(["worker", "not", "available"], RETRY)
            return

        # ping the worker to make sure they're still there. If they've
        # fallen off the map (due to a NAT timeout or something), this
        # will fail in a couple of minutes, depending upon the TCP
        # timeout.
        #
        # TODO: This can unnecessarily suspend the starting of a build, in
        # situations where the worker is live but is pushing lots of data to
        # us in a build.
        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'pinging worker')
        log.msg("starting build %s.. pinging the worker %s"
                % (self, workerforbuilder))
        try:
            ping_success_or_failure = yield workerforbuilder.ping()
        except Exception:
            ping_success_or_failure = Failure()

        if ping_success_or_failure is not True:
            yield self.buildPreparationFailure(ping_success_or_failure, "worker_ping")
            self.buildFinished(["worker", "not", "pinged"], RETRY)
            return

        self.conn = workerforbuilder.worker.conn

        # To retrieve the builddir property, the worker must be attached as we
        # depend on its path_module. Latent workers become attached only after
        # preparing them, so we can't setup the builddir property earlier like
        # the rest of properties
        self.setupWorkerBuildirProperty(workerforbuilder)
        self.setupWorkerForBuilder(workerforbuilder)
        self.subs = self.conn.notifyOnDisconnect(self.lostRemote)

        # tell the remote that it's starting a build, too
        try:
            yield self.conn.remoteStartBuild(self.builder.name)
        except Exception:
            yield self.buildPreparationFailure(Failure(), "start_build")
            self.buildFinished(["worker", "not", "building"], RETRY)
            return

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'acquiring locks')
        yield self.acquireLocks()

        yield self.master.data.updates.setBuildStateString(self.buildid,
                                                           u'building')

        # start the sequence of steps
        self.startNextStep()
Exemple #27
0
        def on_torrent_failed(failure):
            self._logger.error("Could not add torrent to LibtorrentManager %s", self.tdef.get_name_as_unicode())

            self.cew_scheduled = False

            return Failure((self, pstate))
Exemple #28
0
 def doTry(f):
     try:
         f()
     except (LookupError, TypeError, ValueError):
         # a plausible set of exceptions, so we don't catch implausible ones
         err('erroneous', Failure())
Exemple #29
0
    def startStep(self, remote):
        self.remote = remote

        # create and start the step, noting that the name may be altered to
        # ensure uniqueness
        self.stepid, self.number, self.name = yield self.master.data.updates.newStep(
            buildid=self.build.buildid,
            name=util.ascii2unicode(self.name))
        yield self.master.data.updates.startStep(self.stepid)

        # convert all locks into their real form
        self.locks = [(self.build.builder.botmaster.getLockFromLockAccess(access), access)
                      for access in self.locks]
        # then narrow SlaveLocks down to the slave that this build is being
        # run on
        self.locks = [(l.getLock(self.build.slavebuilder.slave), la)
                      for l, la in self.locks]

        for l, la in self.locks:
            if l in self.build.locks:
                log.msg("Hey, lock %s is claimed by both a Step (%s) and the"
                        " parent Build (%s)" % (l, self, self.build))
                raise RuntimeError("lock claimed by both Step and Build")

        try:
            # set up locks
            yield self.acquireLocks()

            if self.stopped:
                raise BuildStepCancelled

            # check doStepIf
            if isinstance(self.doStepIf, bool):
                doStep = self.doStepIf
            else:
                doStep = yield self.doStepIf(self)

            # render renderables in parallel
            renderables = []
            accumulateClassList(self.__class__, 'renderables', renderables)

            def setRenderable(res, attr):
                setattr(self, attr, res)

            dl = []
            for renderable in renderables:
                d = self.build.render(getattr(self, renderable))
                d.addCallback(setRenderable, renderable)
                dl.append(d)
            yield defer.gatherResults(dl)
            self.rendered = True
            # we describe ourselves only when renderables are interpolated
            self.realUpdateSummary()

            # run -- or skip -- the step
            if doStep:
                try:
                    self._running = True
                    self.results = yield self.run()
                finally:
                    self._running = False
            else:
                self.results = SKIPPED

        # NOTE: all of these `except` blocks must set self.results immediately!
        except BuildStepCancelled:
            self.results = CANCELLED

        except BuildStepFailed:
            self.results = FAILURE

        except BuildSlaveTooOldError:
            self.results = EXCEPTION

        except error.ConnectionLost:
            self.results = RETRY

        except Exception:
            self.results = EXCEPTION
            why = Failure()
            log.err(why, "BuildStep.failed; traceback follows")
            yield self.addLogWithFailure(why)

        if self.stopped and self.results != RETRY:
            # We handle this specially because we don't care about
            # the return code of an interrupted command; we know
            # that this should just be exception due to interrupt
            # At the same time we must respect RETRY status because it's used
            # to retry interrupted build due to some other issues for example
            # due to slave lost
            if self.results != CANCELLED:
                self.results = EXCEPTION

        # update the summary one last time, make sure that completes,
        # and then don't update it any more.
        self.realUpdateSummary()
        yield self.realUpdateSummary.stop()

        yield self.master.data.updates.finishStep(self.stepid, self.results)

        hidden = self.hideStepIf
        if callable(hidden):
            try:
                hidden = hidden(self.results, self)
            except Exception:
                why = Failure()
                log.err(why, "hidden callback failed; traceback follows")
                yield self.addLogWithFailure(why)
                self.results = EXCEPTION
                hidden = False
        # TODO: hidden

        self.releaseLocks()

        defer.returnValue(self.results)
Exemple #30
0
 def answered(result):
     """user finally answered our question"""
     if result:
         return self.__adduser()
     else:
         return Failure(CancelledError)