Exemple #1
0
 def _clearFailed(self, deferred, id):
     try:
         del self.liveMessages[id]
     except:
         pass
     deferred.errback(failure.Failure(DNSQueryTimeoutError(id)))
 def push(self, **namespace):
     return defer.fail(failure.Failure(FailingEngineError("error text")))
 def getResult(self, i=None):
     return defer.fail(failure.Failure(FailingEngineError("error text")))
Exemple #4
0
    def handle_disconnected_connect(self):
        self.state = "connecting"
        if not self.factoryStarted:
            self.factory.doStart()
            self.factoryStarted = True

        if self.timeout is not None:
            self.timeoutID = self.reactor.callLater(self.timeout, self.connectionFailed, failure.Failure(error.TimeoutError()))

        self.sub = _SubConnector(self)
        self.sub.startConnecting()

        self.factory.startedConnecting(self)
Exemple #5
0
        pass

    def handle_QUERY(self, request):
        pass

    def handle_GETMORE(self, request):
        pass

    def handle_DELETE(self, request):
        pass

    def handle_KILL_CURSORS(self, request):
        pass


connectionDone = failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()


class MongoAuthenticationError(Exception):
    pass


class MongoProtocol(MongoServerProtocol, MongoClientProtocol):
    __connection_ready = None
    __deferreds = None

    min_wire_version = None
    max_wire_version = None

    def __init__(self):
Exemple #6
0
def fail(n):
    for i in R:
        try:
            eval("deepFailure%d_0" % n)()
        except:
            failure.Failure()
Exemple #7
0
def fail_easy(n):
    for i in R:
        try:
            failure.Failure(PythonException())
        except:
            pass
Exemple #8
0
class MasterConfig(util.ComparableMixin):
    def __init__(self):
        # local import to avoid circular imports
        from buildbot.process import properties
        # default values for all attributes

        # global
        self.title = 'Buildbot'
        self.titleURL = 'http://buildbot.net'
        self.buildbotURL = 'http://localhost:8080/'
        self.changeHorizon = None
        self.eventHorizon = 50
        self.logHorizon = None
        self.buildHorizon = None
        self.logCompressionLimit = 4 * 1024
        self.logCompressionMethod = 'bz2'
        self.logEncoding = 'utf-8'
        self.logMaxSize = None
        self.logMaxTailSize = None
        self.properties = properties.Properties()
        self.collapseRequests = None
        self.codebaseGenerator = None
        self.prioritizeBuilders = None
        self.multiMaster = False
        self.manhole = None
        self.protocols = {}

        self.validation = dict(
            branch=re.compile(r'^[\w.+/~-]*$'),
            revision=re.compile(r'^[ \w\.\-\/]*$'),
            property_name=re.compile(r'^[\w\.\-\/\~:]*$'),
            property_value=re.compile(r'^[\w\.\-\/\~:]*$'),
        )
        self.db = dict(db_url='sqlite:///state.sqlite', )
        self.mq = dict(type='simple', )
        self.metrics = None
        self.caches = dict(
            Builds=15,
            Changes=10,
        )
        self.schedulers = {}
        self.builders = []
        self.slaves = []
        self.change_sources = []
        self.status = []
        self.user_managers = []
        self.revlink = default_revlink_matcher
        self.www = dict(
            port=None,
            plugins=dict(),
            auth=auth.NoAuth(),
            avatar_methods=avatar.AvatarGravatar(),
            logfileName='http.log',
        )
        self.services = {}

    _known_config_keys = set([
        "buildbotURL", "buildCacheSize", "builders", "buildHorizon", "caches",
        "change_source", "codebaseGenerator", "changeCacheSize",
        "changeHorizon", 'db', "db_poll_interval", "db_url", "eventHorizon",
        "logCompressionLimit", "logCompressionMethod", "logEncoding",
        "logHorizon", "logMaxSize", "logMaxTailSize", "manhole",
        "collapseRequests", "metrics", "mq", "multiMaster",
        "prioritizeBuilders", "projectName", "projectURL", "properties",
        "protocols", "revlink", "schedulers", "services", "slavePortnum",
        "slaves", "status", "title", "titleURL", "user_managers", "validation",
        'www'
    ])
    compare_attrs = list(_known_config_keys)

    def preChangeGenerator(self, **kwargs):
        return {
            'author': kwargs.get('author', None),
            'files': kwargs.get('files', None),
            'comments': kwargs.get('comments', None),
            'revision': kwargs.get('revision', None),
            'when_timestamp': kwargs.get('when_timestamp', None),
            'branch': kwargs.get('branch', None),
            'category': kwargs.get('category', None),
            'revlink': kwargs.get('revlink', u''),
            'properties': kwargs.get('properties', {}),
            'repository': kwargs.get('repository', u''),
            'project': kwargs.get('project', u''),
            'codebase': kwargs.get('codebase', None)
        }

    @classmethod
    def loadConfig(cls, basedir, filename):
        if not os.path.isdir(basedir):
            raise ConfigErrors([
                "basedir '%s' does not exist" % (basedir, ),
            ])
        filename = os.path.join(basedir, filename)
        if not os.path.exists(filename):
            raise ConfigErrors([
                "configuration file '%s' does not exist" % (filename, ),
            ])

        try:
            f = open(filename, "r")
        except IOError, e:
            raise ConfigErrors([
                "unable to open configuration file %r: %s" % (filename, e),
            ])

        log.msg("Loading configuration from %r" % (filename, ))

        # execute the config file
        localDict = {
            'basedir': os.path.expanduser(basedir),
            '__file__': os.path.abspath(filename),
        }

        # from here on out we can batch errors together for the user's
        # convenience
        global _errors
        _errors = errors = ConfigErrors()

        old_sys_path = sys.path[:]
        sys.path.append(basedir)
        try:
            try:
                exec f in localDict
            except ConfigErrors, e:
                for err in e.errors:
                    error(err)
                raise errors
            except Exception:
                log.err(failure.Failure(), 'error while parsing config file:')
                error(
                    "error while parsing config file: %s (traceback in logfile)"
                    % (sys.exc_info()[1], ), )
                raise errors
Exemple #9
0
 def errReceived(self, bytes):
     self.transport.loseConnection()
     if self.proto is not None:
         self.proto.connectionLost(failure.Failure(UnexpectedOutputError(bytes)))
         self.proto = None
 def nextWorker(bldr, lst, br=None):
     return defer.fail(failure.Failure(RuntimeError()))
 def nextBuild(bldr, lst):
     return defer.fail(failure.Failure(RuntimeError()))
 def _maybeStartBuildsOnBuilder(n):
     # fail slowly, so that the activity loop doesn't exit too soon
     d = defer.Deferred()
     self.reactor.callLater(0, d.errback,
                            failure.Failure(RuntimeError("oh noes")))
     return d
Exemple #13
0
 def test_ExplictPass(self):
     e = RuntimeError()
     f = failure.Failure(e)
     f.trap(RuntimeError)
     self.assertEqual(f.value, e)
Exemple #14
0
 def loseConnection(self):
     if self.connected:
         self.connected = False
         self.protocol.connectionLost(
             failure.Failure(error.ConnectionDone("Bye.")))
Exemple #15
0
    def _startBuildFor(self, slavebuilder, buildrequests):
        """Start a build on the given slave.
        @param build: the L{base.Build} to start
        @param sb: the L{SlaveBuilder} which will host this build

        @return: (via Deferred) boolean indicating that the build was
        succesfully started.
        """

        # as of the Python versions supported now, try/finally can't be used
        # with a generator expression.  So instead, we push cleanup functions
        # into a list so that, at any point, we can abort this operation.
        cleanups = []

        def run_cleanups():
            try:
                while cleanups:
                    fn = cleanups.pop()
                    fn()
            except:
                log.err(failure.Failure(),
                        "while running %r" % (run_cleanups, ))

        # the last cleanup we want to perform is to update the big
        # status based on any other cleanup
        cleanups.append(lambda: self.updateBigStatus())

        build = self.config.factory.newBuild(buildrequests)
        build.setBuilder(self)
        log.msg("starting build %s using slave %s" % (build, slavebuilder))

        # set up locks
        build.setLocks(self.config.locks)
        cleanups.append(lambda: slavebuilder.slave.releaseLocks())

        if len(self.config.env) > 0:
            build.setSlaveEnvironment(self.config.env)

        # append the build to self.building
        self.building.append(build)
        cleanups.append(lambda: self.building.remove(build))

        # update the big status accordingly
        self.updateBigStatus()

        try:
            ready = yield slavebuilder.prepare(self.builder_status, build)
        except:
            log.err(failure.Failure(), 'while preparing slavebuilder:')
            ready = False

        # If prepare returns True then it is ready and we start a build
        # If it returns false then we don't start a new build.
        if not ready:
            log.msg("slave %s can't build %s after all; re-queueing the "
                    "request" % (build, slavebuilder))
            run_cleanups()
            defer.returnValue(False)
            return

        # ping the slave to make sure they're still there. If they've
        # fallen off the map (due to a NAT timeout or something), this
        # will fail in a couple of minutes, depending upon the TCP
        # timeout.
        #
        # TODO: This can unnecessarily suspend the starting of a build, in
        # situations where the slave is live but is pushing lots of data to
        # us in a build.
        log.msg("starting build %s.. pinging the slave %s" %
                (build, slavebuilder))
        try:
            ping_success = yield slavebuilder.ping()
        except:
            log.err(failure.Failure(), 'while pinging slave before build:')
            ping_success = False

        if not ping_success:
            log.msg("slave ping failed; re-queueing the request")
            run_cleanups()
            defer.returnValue(False)
            return

        # The buildslave is ready to go. slavebuilder.buildStarted() sets its
        # state to BUILDING (so we won't try to use it for any other builds).
        # This gets set back to IDLE by the Build itself when it finishes.
        slavebuilder.buildStarted()
        cleanups.append(lambda: slavebuilder.buildFinished())

        # tell the remote that it's starting a build, too
        try:
            yield slavebuilder.remote.callRemote("startBuild")
        except:
            log.err(failure.Failure(), 'while calling remote startBuild:')
            run_cleanups()
            defer.returnValue(False)
            return

        # create the BuildStatus object that goes with the Build
        bs = self.builder_status.newBuild()

        # record the build in the db - one row per buildrequest
        try:
            bids = []
            for req in build.requests:
                bid = yield self.master.db.builds.addBuild(req.id, bs.number)
                bids.append(bid)
        except:
            log.err(failure.Failure(), 'while adding rows to build table:')
            run_cleanups()
            defer.returnValue(False)
            return

        # let status know
        self.master.status.build_started(req.id, self.name, bs)

        # start the build. This will first set up the steps, then tell the
        # BuildStatus that it has started, which will announce it to the world
        # (through our BuilderStatus object, which is its parent).  Finally it
        # will start the actual build process.  This is done with a fresh
        # Deferred since _startBuildFor should not wait until the build is
        # finished.
        d = build.startBuild(bs, self.expectations, slavebuilder)
        d.addCallback(self.buildFinished, slavebuilder, bids)
        # this shouldn't happen. if it does, the slave will be wedged
        d.addErrback(log.err)

        # make sure the builder's status is represented correctly
        self.updateBigStatus()

        defer.returnValue(True)
Exemple #16
0
 def convert_ce_to_te(x):
     if cancelled[0] and x.check(defer.CancelledError):
         return failure.Failure(TimeoutError(x))
     else:
         return x
Exemple #17
0
    def startService(self):
        assert not self._already_started, "can only start the master once"
        self._already_started = True

        log.msg("Starting BuildMaster -- buildbot.version: %s" %
                buildbot.version)

        # Set umask
        if self.umask is not None:
            os.umask(self.umask)

        # first, apply all monkeypatches
        monkeypatches.patch_all()

        # we want to wait until the reactor is running, so we can call
        # reactor.stop() for fatal errors
        d = defer.Deferred()
        self.reactor.callWhenRunning(d.callback, None)
        yield d

        startup_succeed = False
        try:
            yield self.initLock.acquire()
            # load the configuration file, treating errors as fatal
            try:
                # run the master.cfg in thread, so that it can use blocking
                # code
                self.config = yield threads.deferToThreadPool(
                    self.reactor, self.reactor.getThreadPool(),
                    self.config_loader.loadConfig)

            except config.ConfigErrors as e:
                log.msg("Configuration Errors:")
                for msg in e.errors:
                    log.msg("  " + msg)
                log.msg("Halting master.")
                self.reactor.stop()
                return
            except Exception:
                log.err(failure.Failure(), 'while starting BuildMaster')
                self.reactor.stop()
                return

            # set up services that need access to the config before everything
            # else gets told to reconfig
            try:
                yield self.db.setup()
            except exceptions.DatabaseNotReadyError:
                # (message was already logged)
                self.reactor.stop()
                return

            self.mq.setup()

            if hasattr(signal, "SIGHUP"):

                def sighup(*args):
                    eventually(self.reconfig)

                signal.signal(signal.SIGHUP, sighup)

            if hasattr(signal, "SIGUSR1"):

                def sigusr1(*args):
                    eventually(self.botmaster.cleanShutdown)

                signal.signal(signal.SIGUSR1, sigusr1)

            # get the masterid so other services can use it in
            # startup/reconfig.  This goes directly to the DB since the data
            # API isn't initialized yet, and anyway, this method is aware of
            # the DB API since it just called its setup function
            self.masterid = yield self.db.masters.findMasterId(name=self.name)

            # mark this master as stopped, in case it crashed before
            yield self.data.updates.masterStopped(name=self.name,
                                                  masterid=self.masterid)

            # call the parent method
            yield super().startService()

            # We make sure the housekeeping is done before configuring in order to cleanup
            # any remaining claimed schedulers or change sources from zombie
            # masters
            yield self.data.updates.expireMasters(forceHouseKeeping=True)

            # give all services a chance to load the new configuration, rather
            # than the base configuration
            yield self.reconfigServiceWithBuildbotConfig(self.config)

            # Mark the master as active now that mq is running
            yield self.data.updates.masterActive(name=self.name,
                                                 masterid=self.masterid)

            # Start the heartbeat timer
            yield self.masterHeartbeatService.setServiceParent(self)

            # send the statistics to buildbot.net, without waiting
            self.sendBuildbotNetUsageData()
            startup_succeed = True
        except Exception:
            f = failure.Failure()
            log.err(f, 'while starting BuildMaster')
            self.reactor.stop()

        finally:
            if startup_succeed:
                log.msg("BuildMaster is running")
            else:
                log.msg("BuildMaster startup failed")

            yield self.initLock.release()
            self._master_initialized = True
Exemple #18
0
        try:
            d.getResult()
        except pop3client.ServerErrorResponse, e:
            self.setStatus(
                u'Login failed: ' + str(e).decode('ascii', 'replace'), False)
            self.transport.loseConnection()
            return
        except pop3.InsecureAuthenticationDisallowed:
            self.setStatus(u'Login aborted: server not secure.', False)
            self.transport.loseConnection()
            return
        except (error.ConnectionDone, error.ConnectionLost):
            self.setStatus(u"Connection lost", False)
            return
        except:
            f = failure.Failure()
            log.err(f, "Failure logging in")
            self.setStatus(u'Login failed: internal error.', False)
            self.transport.loseConnection()
            return

        N = 100

        # Up to N (index, uid) pairs which have been received but not
        # checked against shouldRetrieve
        uidWorkingSet = []

        # All the (index, uid) pairs which should be retrieved
        uidList = []

        # Consumer for listUID - adds to the working set and processes
Exemple #19
0
def fail_str(n):
    for i in R:
        try:
            eval("deepFailure%d_0" % n)()
        except:
            str(failure.Failure())
Exemple #20
0
def timeout(deferred):
    deferred.errback(failure.Failure(TimeoutError("Callback timed out")))
 def work():
     d = defer.Deferred()
     reactor.callLater(0, d.errback,
                       failure.Failure(RuntimeError("Test failure")))
     return d
Exemple #22
0
 def lookupAllRecords(self, name, timeout=None):
     return defer.fail(failure.Failure(dns.DomainError(name)))
Exemple #23
0
 def handle_connecting_stopConnecting(self):
     self.connectionFailed(failure.Failure(error.UserError()))
Exemple #24
0
            expected = digest.calcResponse(
                digest.calcHA1(algo, "user,name", "test realm", "password",
                               nonce, cnonce), algo, nonce, None, None, None,
                "GET", "/write/1,2.txt", None)
        return expected

    @inlineCallbacks
    def assertRaisesDeferred(self, exception, f, *args, **kwargs):
        try:
            result = (yield f(*args, **kwargs))
        except exception, inst:
            returnValue(inst)
        except:
            raise self.failureException('%s raised instead of %s:\n %s' %
                                        (sys.exc_info()[0], exception.__name__,
                                         failure.Failure().getTraceback()))
        else:
            raise self.failureException('%s not raised (%r returned)' %
                                        (exception.__name__, result))

    @inlineCallbacks
    def test_getChallenge(self):
        """
        Test that all the required fields exist in the challenge,
        and that the information matches what we put into our
        DigestCredentialFactory
        """

        challenge = (yield
                     self.credentialFactories[0].getChallenge(clientAddress))
        self.assertEquals(challenge['qop'], 'auth')
 def execute(self, lines):
     return defer.fail(failure.Failure(FailingEngineError("error text")))
Exemple #26
0
 def toFailure(self, exc_tb = None):
     return failure.Failure(self, exc_tb=exc_tb)
 def pull(self, *keys):
     return defer.fail(failure.Failure(FailingEngineError("error text")))
Exemple #28
0
 def loseConnection(self):
     if self.connected and not self.disconnecting:
         self.disconnecting = 1
         self.stopReading()
         self.reactor.callLater(0, self.connectionLost,
                                failure.Failure(CONNECTION_DONE))
 def keys(self):
     return defer.fail(failure.Failure(FailingEngineError("error text")))
Exemple #30
0
 def run(self, result):
     try:
         raise RuntimeError("error that occurs outside of a test")
     except RuntimeError:
         log.err(failure.Failure())