def __init__(self, workersrcs=None, masterdest=None, workdir=None, maxsize=None, blocksize=16 * 1024, glob=False, mode=None, compress=None, keepstamp=False, url=None, slavesrcs=None, # deprecated, use `workersrcs` instead **buildstep_kwargs): # Deprecated API support. if slavesrcs is not None: reportDeprecatedWorkerNameUsage( "'slavesrcs' keyword argument is deprecated, " "use 'workersrcs' instead") assert workersrcs is None workersrcs = slavesrcs # Emulate that first two arguments are positional. if workersrcs is None or masterdest is None: raise TypeError("__init__() takes at least 3 arguments") _TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs) self.workersrcs = workersrcs if isinstance(workersrcs, list) else [workersrcs] self._registerOldWorkerAttr("workersrcs") self.masterdest = masterdest self.maxsize = maxsize self.blocksize = blocksize if not isinstance(mode, (int, type(None))): config.error( 'mode must be an integer or None') self.mode = mode if compress not in (None, 'gz', 'bz2'): config.error( "'compress' must be one of None, 'gz', or 'bz2'") self.compress = compress self.glob = glob self.keepstamp = keepstamp self.url = url
def __init__(self, name, builderNames, minute=0, hour='*', dayOfMonth='*', month='*', dayOfWeek='*', branch=NoBranch, fileIsImportant=None, onlyIfChanged=False, properties={}, change_filter=None, onlyImportant=False): Timed.__init__(self, name=name, builderNames=builderNames, properties=properties) # If True, only important changes will be added to the buildset. self.onlyImportant = onlyImportant if fileIsImportant and not callable(fileIsImportant): config.error( "fileIsImportant must be a callable") if branch is Nightly.NoBranch: config.error( "Nightly parameter 'branch' is required") self.minute = minute self.hour = hour self.dayOfMonth = dayOfMonth self.month = month self.dayOfWeek = dayOfWeek self.branch = branch self.onlyIfChanged = onlyIfChanged self.fileIsImportant = fileIsImportant self.change_filter = filter.ChangeFilter.fromSchedulerConstructorArgs( change_filter=change_filter) self.reason = "The Nightly scheduler named '%s' triggered this build" % self.name
def __init__(self, s, workerdest=None, workdir=None, maxsize=None, blocksize=16 * 1024, mode=None, slavedest=None, # deprecated, use `workerdest` instead **buildstep_kwargs): # Deprecated API support. if slavedest is not None: reportDeprecatedWorkerNameUsage( "'slavedest' keyword argument is deprecated, " "use 'workerdest' instead") assert workerdest is None workerdest = slavedest # Emulate that first two arguments are positional. if workerdest is None: raise TypeError("__init__() takes at least 3 arguments") _TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs) self.s = s self.workerdest = workerdest self._registerOldWorkerAttr("workerdest") self.maxsize = maxsize self.blocksize = blocksize if not isinstance(mode, (int, type(None))): config.error( "StringDownload step's mode must be an integer or None," " got '%s'" % mode) self.mode = mode
def __init__(self, name, password, flavor, os_username, os_password, os_tenant_name, os_auth_url, block_devices=None, image=None, meta=None, # Have a nova_args parameter to allow passing things directly # to novaclient v1.1. nova_args=None, **kwargs): if not client or not nce: config.error("The python module 'novaclient' is needed " "to use a OpenStackLatentWorker") if not block_devices and not image: raise ValueError('One of block_devices or image must be given') AbstractLatentWorker.__init__(self, name, password, **kwargs) self.flavor = flavor self.os_username = os_username self.os_password = os_password self.os_tenant_name = os_tenant_name self.os_auth_url = os_auth_url if block_devices is not None: self.block_devices = [self._parseBlockDevice(bd) for bd in block_devices] else: self.block_devices = None self.image = image self.meta = meta self.nova_args = nova_args if nova_args is not None else {}
def __init__(self, workersrc=None, masterdest=None, workdir=None, maxsize=None, blocksize=16 * 1024, mode=None, keepstamp=False, url=None, slavesrc=None, # deprecated, use `workersrc` instead **buildstep_kwargs): # Deprecated API support. if slavesrc is not None: reportDeprecatedWorkerNameUsage( "'slavesrc' keyword argument is deprecated, " "use 'workersrc' instead") assert workersrc is None workersrc = slavesrc # Emulate that first two arguments are positional. if workersrc is None or masterdest is None: raise TypeError("__init__() takes at least 3 arguments") _TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs) self.workersrc = workersrc self._registerOldWorkerAttr("workersrc") self.masterdest = masterdest self.maxsize = maxsize self.blocksize = blocksize if not isinstance(mode, (int, type(None))): config.error( 'mode must be an integer or None') self.mode = mode self.keepstamp = keepstamp self.url = url
def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None, builderNames=None, branch=NotABranch, branches=NotABranch, fileIsImportant=None, categories=None, reason="The %(classname)s scheduler named '%(name)s' triggered this build", change_filter=None, onlyImportant=False, **kwargs): if shouldntBeSet is not self.NotSet: config.error( "pass arguments to schedulers using keyword arguments") if fileIsImportant and not callable(fileIsImportant): config.error( "fileIsImportant must be a callable") # initialize parent classes base.BaseScheduler.__init__(self, name, builderNames, **kwargs) self.treeStableTimer = treeStableTimer if fileIsImportant is not None: self.fileIsImportant = fileIsImportant self.onlyImportant = onlyImportant self.change_filter = self.getChangeFilter(branch=branch, branches=branches, change_filter=change_filter, categories=categories) # the IDelayedCall used to wake up when this scheduler's # treeStableTimer expires. self._stable_timers = defaultdict(lambda: None) self._stable_timers_lock = defer.DeferredLock() self.reason = util.ascii2unicode(reason % { 'name': name, 'classname': self.__class__.__name__ })
def __init__(self, workersrc=None, masterdest=None, workdir=None, maxsize=None, blocksize=16 * 1024, compress=None, url=None, slavesrc=None, # deprecated, use `workersrc` instead **buildstep_kwargs ): # Deprecated API support. if slavesrc is not None: reportDeprecatedWorkerNameUsage( "'slavesrc' keyword argument is deprecated, " "use 'workersrc' instead") assert workersrc is None workersrc = slavesrc # Emulate that first two arguments are positional. if workersrc is None or masterdest is None: raise TypeError("__init__() takes at least 3 arguments") _TransferBuildStep.__init__(self, workdir=workdir, **buildstep_kwargs) self.workersrc = workersrc self._registerOldWorkerAttr("workersrc") self.masterdest = masterdest self.maxsize = maxsize self.blocksize = blocksize if compress not in (None, 'gz', 'bz2'): config.error( "'compress' must be one of None, 'gz', or 'bz2'") self.compress = compress self.url = url
def __init__(self, name, password, max_builds=None, notify_on_missing=[], missing_timeout=3600, properties={}, locks=None, keepalive_interval=3600): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this buildslave (the default is None for no limit) @param properties: properties that will be applied to builds run on this slave @type properties: dictionary @param locks: A list of locks that must be acquired before this slave can be used @type locks: dictionary """ name = ascii2unicode(name) service.AsyncMultiService.__init__(self) self.slavename = ascii2unicode(name) self.password = password # protocol registration self.registration = None # these are set when the service is started self.botmaster = None self.manager = None self.master = None self.buildslaveid = None self.slave_status = SlaveStatus(name) self.slave_commands = None self.slavebuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.lock_subscriptions = [] self.properties = Properties() self.properties.update(properties, "BuildSlave") self.properties.setProperty("slavename", name, "BuildSlave") self.lastMessageReceived = 0 if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: if not isinstance(i, str): config.error( 'notify_on_missing arg %r is not a string' % (i,)) self.missing_timeout = missing_timeout self.missing_timer = None # a protocol connection, if we're currently connected self.conn = None self._old_builder_list = None
def __init__(self, **kwargs): self.worker = None self._registerOldWorkerAttr("worker", name="buildslave") for p in self.__class__.parms: if p in kwargs: setattr(self, p, kwargs.pop(p)) if kwargs: config.error("%s.__init__ got unexpected keyword argument(s) %s" % (self.__class__, list(kwargs))) self._pendingLogObservers = [] if not isinstance(self.name, str): config.error("BuildStep name must be a string: %r" % (self.name,)) if isinstance(self.description, str): self.description = [self.description] if isinstance(self.descriptionDone, str): self.descriptionDone = [self.descriptionDone] if isinstance(self.descriptionSuffix, str): self.descriptionSuffix = [self.descriptionSuffix] self._acquiringLock = None self.stopped = False self.master = None self.statistics = {} self.logs = {} self._running = False self.stepid = None self._start_unhandled_deferreds = None
def __init__(self, repourl, branch='default', workdir=None, pollInterval=10 * 60, hgbin='hg', usetimestamps=True, category=None, project='', pollinterval=-2, encoding='utf-8', name=None, pollAtLaunch=False): # for backward compatibility; the parameter used to be spelled with 'i' if pollinterval != -2: pollInterval = pollinterval if name is None: name = "%s[%s]" % (repourl, branch) self.repourl = repourl self.branch = branch base.PollingChangeSource.__init__( self, name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch) self.encoding = encoding self.lastChange = time.time() self.lastPoll = time.time() self.hgbin = hgbin self.workdir = workdir self.usetimestamps = usetimestamps self.category = category if callable( category) else ascii2unicode(category) self.project = project self.commitInfo = {} self.initLock = defer.DeferredLock() if self.workdir is None: config.error("workdir is mandatory for now in HgPoller")
def __init__(self, name, label=None, tablabel=None, regex=None, **kw): """ @param name: the name of the field, used during posting values back to the scheduler. This is not necessarily a UI value, and there may be restrictions on the characters allowed for this value. For example, HTML would require this field to avoid spaces and other punctuation ('-', '.', and '_' allowed) @type name: unicode @param label: (optional) the name of the field, used for UI display. @type label: unicode or None (to use 'name') @param regex: (optional) regex to validate the value with. Not used by all subclasses @type regex: unicode or regex """ if name in ["owner", "builderNames", "builderid"]: config.error( "%s cannot be used as a parameter name, because it is reserved" % (name,)) self.name = name self.label = name if label is None else label self.tablabel = self.label if tablabel is None else tablabel if regex: self.regex = re.compile(regex) if 'value' in kw: config.error("Use default='%s' instead of value=... to give a " "default Parameter value" % kw['value']) # all other properties are generically passed via **kw self.__dict__.update(kw)
def checkAvailable(from_module): """Call me at checkConfig time to properly report config error if neither txrequests or treq is installed """ if txrequests is None and treq is None: config.error("neither txrequests nor treq is installed, but {} is requiring it\n\n{}".format( from_module, HTTPClientService.TREQ_PROS_AND_CONS))
def __init__(self, workdir=None, command=None, usePTY="slave-config", **kwargs): # most of our arguments get passed through to the RemoteShellCommand # that we create, but first strip out the ones that we pass to # BuildStep (like haltOnFailure and friends), and a couple that we # consume ourselves. if command: self.setCommand(command) # pull out the ones that LoggingBuildStep wants, then upcall buildstep_kwargs = {} # workdir is here first positional argument, but it belongs to BuildStep parent kwargs["workdir"] = workdir for k in kwargs.keys()[:]: if k in self.__class__.parms: buildstep_kwargs[k] = kwargs[k] del kwargs[k] buildstep.LoggingBuildStep.__init__(self, **buildstep_kwargs) # check validity of arguments being passed to RemoteShellCommand invalid_args = [] valid_rsc_args = inspect.getargspec(remotecommand.RemoteShellCommand.__init__)[0] for arg in kwargs.keys(): if arg not in valid_rsc_args: invalid_args.append(arg) # Raise Configuration error in case invalid arguments are present if invalid_args: config.error("Invalid argument(s) passed to RemoteShellCommand: " + ", ".join(invalid_args)) # everything left over goes to the RemoteShellCommand kwargs["usePTY"] = usePTY self.remote_kwargs = kwargs self.remote_kwargs["workdir"] = workdir
def checkConfig(self, gitBaseURL=None, handled_events=("patchset-created", "ref-updated"), debug=False): if gitBaseURL is None: config.error("gitBaseURL must be specified")
def checkConfig(self, *args, **kwargs): service.BuildbotService.checkConfig(self) if txrequests is None: config.error("Please install txrequests and requests to use %s (pip install txrequest)" % (self.__class__,)) if not isinstance(kwargs.get('builders'), (type(None), list)): config.error("builders must be a list or None")
def __init__(self, root=None, resultdir=None, **kwargs): """ Creates the Mock object. @type root: str @param root: the name of the mock buildroot @type resultdir: str @param resultdir: the path of the result dir @type kwargs: dict @param kwargs: All further keyword arguments. """ ShellCommand.__init__(self, **kwargs) if root: self.root = root if resultdir: self.resultdir = resultdir if not self.root: config.error("You must specify a mock root") self.command = ['mock', '--root', self.root] if self.resultdir: self.command += ['--resultdir', self.resultdir]
def checkSecretDirectoryIsAvailableAndReadable(self, dirname, suffixes): if not os.access(dirname, os.F_OK): config.error("directory %s does not exists" % dirname) for secretfile in os.listdir(dirname): for suffix in suffixes: if secretfile.endswith(suffix): self.checkFileIsReadOnly(dirname, secretfile)
def __init__(self, function, *args, **kwargs): if not callable(function) and not IRenderable.providedBy(function): config.error("function given to Transform neither callable nor renderable") self._function = function self._args = args self._kwargs = kwargs
def __init__(self, spec=None, sources=None, **kwargs): """ Creates the MockBuildSRPM object. @type spec: str @param spec: the path of the specfiles. @type sources: str @param sources: the path of the sources dir. @type kwargs: dict @param kwargs: All further keyword arguments. """ Mock.__init__(self, **kwargs) if spec: self.spec = spec if sources: self.sources = sources if not self.spec: config.error("You must specify a spec file") if not self.sources: config.error("You must specify a sources dir") self.command += ['--buildsrpm', '--spec', self.spec, '--sources', self.sources]
def start(self): if self.platform is None: config.error('platform is mandatory. Please specify a string such as "Win32"') command = [ "%VCENV_BAT%", "x86", "&&", "msbuild", self.projectfile, "/p:Configuration=%s" % (self.config), "/p:Platform=%s" % (self.platform), ] if self.project is not None: command.append("/t:%s" % (self.project)) elif self.mode == "build": command.append("/t:Build") elif self.mode == "clean": command.append("/t:Clean") elif self.mode == "rebuild": command.append("/t:Rebuild") self.setCommand(command) return VisualStudio.start(self)
def __init__(self, name, builderNames, properties={}, reason='', createAbsoluteSourceStamps=False, onlyIfChanged=False, branch=NoBranch, change_filter=None, fileIsImportant=None, onlyImportant=False, **kwargs): base.BaseScheduler.__init__(self, name, builderNames, properties, **kwargs) # tracking for when to start the next build self.lastActuated = None # A lock to make sure that each actuation occurs without interruption. # This lock governs actuateAt, actuateAtTimer, and actuateOk self.actuationLock = defer.DeferredLock() self.actuateOk = False self.actuateAt = None self.actuateAtTimer = None self.reason = util.ascii2unicode(reason % {'name': name}) self.branch = branch self.change_filter = ChangeFilter.fromSchedulerConstructorArgs(change_filter=change_filter) self.createAbsoluteSourceStamps = createAbsoluteSourceStamps self.onlyIfChanged = onlyIfChanged if fileIsImportant and not callable(fileIsImportant): config.error( "fileIsImportant must be a callable") self.fileIsImportant = fileIsImportant # If True, only important changes will be added to the buildset. self.onlyImportant = onlyImportant self._reactor = reactor # patched by tests
def __init__(self, password, # On-Demand related stuff instance_booter, build_wait_timeout=60 * 10, keepalive_interval=None, # Generic stuff for the base class max_builds=None, notify_on_missing=[], missing_timeout=60 * 20, properties={}, locks=None, ): # The buildslave name has already been supplied to the driver # responsible for booting the node, so we use that attribute here. name = instance_booter.driver.name BuildSlave.__init__( self, name, password, max_builds, notify_on_missing, missing_timeout, properties, locks ) if build_wait_timeout < 0: config.error("%s: %s: Can't wait for negative time." % (self.__class__, name)) self.build_wait_timeout = build_wait_timeout # Uggh if keepalive_interval is not None: self.keepalive_interval = keepalive_interval self.building = set() self.instance_booter = instance_booter self.addService(TimerService(60, self.periodic))
def __init__(self, fileloc=None, suppressTags=None, **kwargs): """ Create the DebLintian object. @type fileloc: str @param fileloc: Location of the .deb or .changes to test. @type suppressTags: list @param suppressTags: List of tags to suppress. @type kwargs: dict @param kwargs: all other keyword arguments. """ ShellCommand.__init__(self, **kwargs) if fileloc: self.fileloc = fileloc if suppressTags: self.suppressTags = suppressTags if not self.fileloc: config.error("You must specify a fileloc") self.command = ["lintian", "-v", self.fileloc] if self.suppressTags: for tag in self.suppressTags: self.command += ['--suppress-tags', tag]
def __init__(self, port, username, password, ssh_hostkey_dir): """ @type port: string or int @param port: what port should the Manhole listen on? This is a strports specification string, like 'tcp:12345' or 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a simple tcp port. @param username: @param password: username= and password= form a pair of strings to use when authenticating the remote user. @type ssh_hostkey_dir: str @param ssh_hostkey_dir: directory which contains ssh host keys for this server """ if not manhole_ssh: config.error("cryptography required for ssh mahole.") self.username = username self.password = password self.ssh_hostkey_dir = ssh_hostkey_dir c = checkers.InMemoryUsernamePasswordDatabaseDontUse() c.addUser(unicode2bytes(username), unicode2bytes(password)) super().__init__(port, c, ssh_hostkey_dir)
def __init__(self, port, keyfile, ssh_hostkey_dir): """ @type port: string or int @param port: what port should the Manhole listen on? This is a strports specification string, like 'tcp:12345' or 'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a simple tcp port. @param keyfile: the name of a file (relative to the buildmaster's basedir) that contains SSH public keys of authorized users, one per line. This is the exact same format as used by sshd in ~/.ssh/authorized_keys . @type ssh_hostkey_dir: str @param ssh_hostkey_dir: directory which contains ssh host keys for this server """ if not manhole_ssh: config.error("cryptography required for ssh mahole.") # TODO: expanduser this, and make it relative to the buildmaster's # basedir self.keyfile = keyfile c = AuthorizedKeysChecker(keyfile) super().__init__(port, c, ssh_hostkey_dir)
def __init__(self, name, builderNames, periodicBuildTimer, reason="The Periodic scheduler named '%(name)s' triggered this build", **kwargs): Timed.__init__(self, name, builderNames, reason=reason, **kwargs) if periodicBuildTimer <= 0: config.error("periodicBuildTimer must be positive") self.periodicBuildTimer = periodicBuildTimer
def __init__(self, p4port=None, p4user=None, p4passwd=None, p4base='//', p4bin='p4', split_file=lambda branchfile: (None, branchfile), pollInterval=60 * 10, histmax=None, pollinterval=-2, encoding='utf8', project=None, name=None, use_tickets=False, ticket_login_interval=60 * 60 * 24, server_tz=None, pollAtLaunch=False): # for backward compatibility; the parameter used to be spelled with 'i' if pollinterval != -2: pollInterval = pollinterval base.PollingChangeSource.__init__(self, name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch) if project is None: project = '' if use_tickets and not p4passwd: config.error("You need to provide a P4 password to use ticket authentication") self.p4port = p4port self.p4user = p4user self.p4passwd = p4passwd self.p4base = p4base self.p4bin = p4bin self.split_file = split_file self.encoding = encoding self.project = project self.use_tickets = use_tickets self.ticket_login_interval = ticket_login_interval self.server_tz = server_tz self._ticket_passwd = None self._ticket_login_counter = 0
def __init__(self, name, password, flavor, image, os_username, os_password, os_tenant_name, os_auth_url, meta=None, max_builds=None, notify_on_missing=[], missing_timeout=60*20, build_wait_timeout=60*10, properties={}, locks=None): if not client or not nce: config.error("The python module 'novaclient' is needed " "to use a OpenStackLatentBuildSlave") AbstractLatentBuildSlave.__init__( self, name, password, max_builds, notify_on_missing, missing_timeout, build_wait_timeout, properties, locks) self.flavor = flavor self.image = image self.os_username = os_username self.os_password = os_password self.os_tenant_name = os_tenant_name self.os_auth_url = os_auth_url self.meta = meta
def __init__(self, workersrcs=None, masterdest=None, workdir=None, maxsize=None, blocksize=16 * 1024, glob=False, mode=None, compress=None, keepstamp=False, url=None, **buildstep_kwargs): # Emulate that first two arguments are positional. if workersrcs is None or masterdest is None: raise TypeError("__init__() takes at least 3 arguments") super().__init__(workdir=workdir, **buildstep_kwargs) self.workersrcs = workersrcs self.masterdest = masterdest self.maxsize = maxsize self.blocksize = blocksize if not isinstance(mode, (int, type(None))): config.error( 'mode must be an integer or None') self.mode = mode if compress not in (None, 'gz', 'bz2'): config.error( "'compress' must be one of None, 'gz', or 'bz2'") self.compress = compress self.glob = glob self.keepstamp = keepstamp self.url = url
def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None, builderNames=None, branch=NotABranch, branches=NotABranch, fileIsImportant=None, properties={}, categories=None, change_filter=None, onlyImportant=False): if shouldntBeSet is not self.NotSet: config.error( "pass arguments to schedulers using keyword arguments") if fileIsImportant and not callable(fileIsImportant): config.error( "fileIsImportant must be a callable") # initialize parent classes base.BaseScheduler.__init__(self, name, builderNames, properties) self.treeStableTimer = treeStableTimer self.fileIsImportant = fileIsImportant self.onlyImportant = onlyImportant self.change_filter = self.getChangeFilter(branch=branch, branches=branches, change_filter=change_filter, categories=categories) # the IDelayedCall used to wake up when this scheduler's # treeStableTimer expires. self._stable_timers = defaultdict(lambda : None) self._stable_timers_lock = defer.DeferredLock()
def ensureHasSSL(module): from buildbot.config import error if not has_ssl: error( "TLS dependencies required for {} are not installed : {}\n pip install 'buildbot[tls]'" .format(module, ssl_import_error))
def bad(arg): config.error("invalid {} argument {}".format( self.__class__.__name__, arg))
def checkConfig(self, name, password, max_builds=None, notify_on_missing=None, missing_timeout=None, properties=None, defaultProperties=None, locks=None, keepalive_interval=DEFAULT_KEEPALIVE_INTERVAL): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this worker (the default is None for no limit) @param properties: properties that will be applied to builds run on this worker @type properties: dictionary @param defaultProperties: properties that will be applied to builds run on this worker only if the property has not been set by another source @type defaultProperties: dictionary @param locks: A list of locks that must be acquired before this worker can be used @type locks: dictionary """ self.name = name = bytes2unicode(name) self.password = password # protocol registration self.registration = None self._graceful = False self._paused = False # these are set when the service is started self.manager = None self.workerid = None self.worker_status = WorkerStatus(name) self.worker_commands = None self.workerforbuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.lock_subscriptions = [] self.properties = Properties() self.properties.update(properties or {}, "Worker") self.properties.setProperty("workername", name, "Worker") self.defaultProperties = Properties() self.defaultProperties.update(defaultProperties or {}, "Worker") self.lastMessageReceived = 0 if notify_on_missing is None: notify_on_missing = [] if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: if not isinstance(i, str): config.error('notify_on_missing arg %r is not a string' % (i, )) self.missing_timeout = missing_timeout self.missing_timer = None # a protocol connection, if we're currently connected self.conn = None self._old_builder_list = None self._configured_builderid_list = None
def test_error_no_raise(self): e = config.ConfigErrors() self.patch(config, "_errors", e) config.error("message") self.assertEqual(e.errors, ["message"])
def checkConfig( self, name, password, max_builds=None, notify_on_missing=None, missing_timeout=10 * 60, # Ten minutes properties=None, locks=None, keepalive_interval=3600): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this buildslave (the default is None for no limit) @param properties: properties that will be applied to builds run on this slave @type properties: dictionary @param locks: A list of locks that must be acquired before this slave can be used @type locks: dictionary """ self.name = name = ascii2unicode(name) if properties is None: properties = {} self.password = password # protocol registration self.registration = None # these are set when the service is started self.manager = None self.buildslaveid = None self.slave_status = SlaveStatus(name) self.slave_commands = None self.slavebuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.lock_subscriptions = [] self.properties = Properties() self.properties.update(properties, "BuildSlave") self.properties.setProperty("slavename", name, "BuildSlave") self.lastMessageReceived = 0 if notify_on_missing is None: notify_on_missing = [] if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: if not isinstance(i, str): config.error('notify_on_missing arg %r is not a string' % (i, )) self.missing_timeout = missing_timeout self.missing_timer = None # a protocol connection, if we're currently connected self.conn = None self._old_builder_list = None
def bad(arg): config.error("invalid %s argument %s" % (self.__class__.__name__, arg))
def __init__(self, name, password, docker_host, image=None, command=None, volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False, masterFQDN=None, hostconfig=None, networking_config='bridge', **kwargs): if not client: config.error( "The python module 'docker-py>=1.4' is needed to use a" " DockerLatentWorker") if not image and not dockerfile: config.error("DockerLatentWorker: You need to specify at least" " an image name, or a dockerfile") self.volumes = volumes or [] self.binds = {} self.networking_config = networking_config self.followStartupLogs = followStartupLogs # Following block is only for checking config errors, # actual parsing happens in self.parse_volumes() # Renderables can be direct volumes definition or list member if isinstance(volumes, list): for volume_string in (volumes or []): if not isinstance(volume_string, str): continue try: volume, bind = volume_string.split(":", 1) except ValueError: config.error("Invalid volume definition for docker " "%s. Skipping..." % volume_string) continue # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.__init__(self, name, password, **kwargs) self.image = image self.command = command or [] self.dockerfile = dockerfile if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.hostconfig = hostconfig or {} # Prepare the parameters for the Docker Client object. self.client_args = {'base_url': docker_host} if version is not None: self.client_args['version'] = version if tls is not None: self.client_args['tls'] = tls
def checkConfig(self, name=None, pollInterval=60 * 10, pollAtLaunch=False): ChangeSource.checkConfig(self, name=name) if pollInterval < 0: config.error("interval must be >= 0: {}".format(pollInterval))
def loadConfig(self): config.error('oh noes')
def __init__(self, roles, usernames): self.roles = roles if None in usernames: from buildbot import config config.error('Usernames cannot be None') self.usernames = usernames
def check(self): if not (self.workers == 'all' or self.workers is None or isinstance(self.workers, (list, tuple, set))): config.error( "workers must be 'all', None, or list of worker names")
def checkConfig(self, foo, a=None): if a is None: config.error("a must be specified") return defer.succeed(True)
def __init__(self, name, builderNames, username=UserNameParameter(), reason=StringParameter(name="reason", default="force build", size=20), reasonString="A build was forced by '%(owner)s': %(reason)s", buttonName=None, codebases=None, label=None, properties=None): """ Initialize a ForceScheduler. The UI will provide a set of fields to the user; these fields are driven by a corresponding child class of BaseParameter. Use NestedParameter to provide logical groupings for parameters. The branch/revision/repository/project fields are deprecated and provided only for backwards compatibility. Using a Codebase(name='') will give the equivalent behavior. @param name: name of this scheduler (used as a key for state) @type name: unicode @param builderNames: list of builders this scheduler may start @type builderNames: list of unicode @param username: the "owner" for a build (may not be shown depending on the Auth configuration for the master) @type username: BaseParameter @param reason: the "reason" for a build @type reason: BaseParameter @param codebases: the codebases for a build @type codebases: list of string's or CodebaseParameter's; None will generate a default, but CodebaseParameter(codebase='', hide=True) will remove all codebases @param properties: extra properties to configure the build @type properties: list of BaseParameter's """ if not self.checkIfType(name, str): config.error("ForceScheduler name must be a unicode string: %r" % name) if not name: config.error("ForceScheduler name must not be empty: %r" % name) if not identifiers.ident_re.match(name): config.error("ForceScheduler name must be an identifier: %r" % name) if not self.checkIfListOfType(builderNames, (str, )): config.error( "ForceScheduler '{}': builderNames must be a list of strings: {}" .format(name, repr(builderNames))) if self.checkIfType(reason, BaseParameter): self.reason = reason else: config.error( "ForceScheduler '{}': reason must be a StringParameter: {}". format(name, repr(reason))) if properties is None: properties = [] if not self.checkIfListOfType(properties, BaseParameter): config.error(("ForceScheduler '{}': properties must be " "a list of BaseParameters: {}").format( name, repr(properties))) if self.checkIfType(username, BaseParameter): self.username = username else: config.error( "ForceScheduler '{}': username must be a StringParameter: {}". format(name, repr(username))) self.forcedProperties = [] self.label = name if label is None else label # Use the default single codebase form if none are provided if codebases is None: codebases = [CodebaseParameter(codebase='')] elif not codebases: config.error(( "ForceScheduler '{}': 'codebases' cannot be empty;" " use [CodebaseParameter(codebase='', hide=True)] if needed: {} " ).format(name, repr(codebases))) elif not isinstance(codebases, list): config.error(( "ForceScheduler '{}': 'codebases' should be a list of strings " "or CodebaseParameter, not {}").format(name, type(codebases))) codebase_dict = {} for codebase in codebases: if isinstance(codebase, str): codebase = CodebaseParameter(codebase=codebase) elif not isinstance(codebase, CodebaseParameter): config.error(( "ForceScheduler '{}': 'codebases' must be a list of strings " "or CodebaseParameter objects: {}").format( name, repr(codebases))) self.forcedProperties.append(codebase) codebase_dict[codebase.codebase] = dict(branch='', repository='', revision='') super().__init__(name=name, builderNames=builderNames, properties={}, codebases=codebase_dict) if properties: self.forcedProperties.extend(properties) # this is used to simplify the template self.all_fields = [NestedParameter(name='', fields=[username, reason])] self.all_fields.extend(self.forcedProperties) self.reasonString = reasonString self.buttonName = buttonName or name
def checkConfig(self, fromaddr, mode=("failing", "passing", "warnings"), tags=None, builders=None, addLogs=False, relayhost="localhost", buildSetSummary=False, subject="Buildbot %(result)s in %(title)s on %(builder)s", lookup=None, extraRecipients=None, sendToInterestedUsers=True, messageFormatter=None, extraHeaders=None, addPatch=True, useTls=False, useSmtps=False, smtpUser=None, smtpPassword=None, smtpPort=25, schedulers=None, branches=None, watchedWorkers='all', messageFormatterMissingWorker=None, dumpMailsToLog=False, generators=None): if ESMTPSenderFactory is None: config.error("twisted-mail is not installed - cannot " "send mail") super().checkConfig( mode=mode, tags=tags, builders=builders, buildSetSummary=buildSetSummary, messageFormatter=messageFormatter, subject=subject, addLogs=addLogs, addPatch=addPatch, schedulers=schedulers, branches=branches, watchedWorkers=watchedWorkers, messageFormatterMissingWorker=messageFormatterMissingWorker, generators=generators, _has_old_arg_names={ 'addPatch': addPatch is False, 'watchedWorkers': watchedWorkers != 'all' }) if extraRecipients is None: extraRecipients = [] if not isinstance(extraRecipients, (list, tuple)): config.error("extraRecipients must be a list or tuple") else: for r in extraRecipients: if not isinstance(r, str) or not VALID_EMAIL.search(r): config.error( "extra recipient {} is not a valid email".format(r)) if lookup is not None: if not isinstance(lookup, str): assert interfaces.IEmailLookup.providedBy(lookup) if extraHeaders: if not isinstance(extraHeaders, dict): config.error("extraHeaders must be a dictionary") if useSmtps: ssl.ensureHasSSL(self.__class__.__name__)
def checkPassIsInPath(self): if not any((Path(p) / "pass").is_file() for p in os.environ["PATH"].split(":")): config.error("pass does not exist in PATH")
def __init__( self, name, builderNames, username=UserNameParameter(), reason=StringParameter(name="reason", label="Reason", default="", length=20), codebases=None, properties=[ NestedParameter(name='', fields=[ AnyPropertyParameter("property1"), AnyPropertyParameter("property2"), AnyPropertyParameter("property3"), AnyPropertyParameter("property4"), ]) ], # deprecated; use 'codebase' instead branch=None, revision=None, repository=None, project=None): """ Initialize a ForceScheduler. The UI will provide a set of fields to the user; these fields are driven by a corresponding child class of BaseParameter. Use NestedParameter to provide logical groupings for parameters. The branch/revision/repository/project fields are deprecated and provided only for backwards compatibility. Using a Codebase(name='') will give the equivalent behavior. @param name: name of this scheduler (used as a key for state) @type name: unicode @param builderNames: list of builders this scheduler may start @type builderNames: list of unicode @param username: the "owner" for a build (may not be shown depending on the Auth configuration for the master) @type username: BaseParameter @param reason: the "reason" for a build @type reason: BaseParameter @param codebases: the codebases for a build @type codebases: list of string's or CodebaseParameter's; None will generate a default, but [] will remove all codebases @param properties: extra properties to configure the build @type properties: list of BaseParameter's """ if not self.checkIfType(name, str): config.error("ForceScheduler name must be a unicode string: %r" % name) if not name: config.error("ForceScheduler name must not be empty: %r " % name) if not self.checkIfListOfType(builderNames, str): config.error( "ForceScheduler builderNames must be a list of strings: %r" % builderNames) if self.checkIfType(reason, BaseParameter): self.reason = reason else: config.error( "ForceScheduler reason must be a StringParameter: %r" % reason) if not self.checkIfListOfType(properties, BaseParameter): config.error( "ForceScheduler properties must be a list of BaseParameters: %r" % properties) if self.checkIfType(username, BaseParameter): self.username = username else: config.error( "ForceScheduler username must be a StringParameter: %r" % username) self.forcedProperties = [] if any((branch, revision, repository, project)): if codebases: config.error( "ForceScheduler: Must either specify 'codebases' or the 'branch/revision/repository/project' parameters: %r " % (codebases, )) codebases = [ CodebaseParameter( codebase='', branch=branch or DefaultField, revision=revision or DefaultField, repository=repository or DefaultField, project=project or DefaultField, ) ] # Use the default single codebase form if none are provided if codebases is None: codebases = [CodebaseParameter(codebase='')] elif not codebases: config.error( "ForceScheduler: 'codebases' cannot be empty; use CodebaseParameter(codebase='', hide=True) if needed: %r " % (codebases, )) codebase_dict = {} for codebase in codebases: if isinstance(codebase, basestring): codebase = CodebaseParameter(codebase=codebase) elif not isinstance(codebase, CodebaseParameter): config.error( "ForceScheduler: 'codebases' must be a list of strings or CodebaseParameter objects: %r" % (codebases, )) self.forcedProperties.append(codebase) codebase_dict[codebase.codebase] = dict(branch='', repository='', revision='') base.BaseScheduler.__init__(self, name=name, builderNames=builderNames, properties={}, codebases=codebase_dict) if properties: self.forcedProperties.extend(properties) # this is used to simplify the template self.all_fields = [NestedParameter(name='', fields=[username, reason])] self.all_fields.extend(self.forcedProperties)
def __init__(self, mode='incremental', method=None, p4base=None, p4branch=None, p4port=None, p4user=None, p4passwd=None, p4extra_views=(), p4line_end='local', p4viewspec=None, p4viewspec_suffix='...', p4client=Interpolate('buildbot_%(prop:workername)s_%(prop:buildername)s'), p4client_spec_options='allwrite rmdir', p4extra_args=None, p4bin='p4', use_tickets=False, **kwargs): self.method = method self.mode = mode self.p4branch = p4branch self.p4bin = p4bin self.p4base = p4base self.p4port = p4port self.p4user = p4user self.p4passwd = p4passwd self.p4extra_views = p4extra_views self.p4viewspec = p4viewspec self.p4viewspec_suffix = p4viewspec_suffix self.p4line_end = p4line_end self.p4client = p4client self.p4client_spec_options = p4client_spec_options self.p4extra_args = p4extra_args self.use_tickets = use_tickets Source.__init__(self, **kwargs) if self.mode not in self.possible_modes and not interfaces.IRenderable.providedBy(self.mode): config.error("mode %s is not an IRenderable, or one of %s" % (self.mode, self.possible_modes)) if not p4viewspec and p4base is None: config.error("You must provide p4base or p4viewspec") if p4viewspec and (p4base or p4branch or p4extra_views): config.error("Either provide p4viewspec or p4base and p4branch (and optionally p4extra_views") if p4viewspec and isinstance(p4viewspec, StringType): config.error("p4viewspec must not be a string, and should be a sequence of 2 element sequences") if not interfaces.IRenderable.providedBy(p4base) and p4base and p4base.endswith('/'): config.error('p4base should not end with a trailing / [p4base = %s]' % p4base) if not interfaces.IRenderable.providedBy(p4branch) and p4branch and p4branch.endswith('/'): config.error('p4branch should not end with a trailing / [p4branch = %s]' % p4branch) if (p4branch or p4extra_views) and not p4base: config.error('If you specify either p4branch or p4extra_views you must also specify p4base') if self.p4client_spec_options is None: self.p4client_spec_options = ''
def checkPassDirectoryIsAvailableAndReadable(self, dirname): if not os.access(dirname, os.F_OK): config.error(f"directory {dirname} does not exist")
def loadConfig(cls, b, f): config.error('oh noes')
def __init__(self, repourl=None, branch='HEAD', mode='incremental', method=None, reference=None, submodules=False, shallow=False, progress=False, retryFetch=False, clobberOnFailure=False, getDescription=False, config=None, origin=None, **kwargs): """ @type repourl: string @param repourl: the URL which points at the git repository @type branch: string @param branch: The branch or tag to check out by default. If a build specifies a different branch, it will be used instead of this. @type submodules: boolean @param submodules: Whether or not to update (and initialize) git submodules. @type mode: string @param mode: Type of checkout. Described in docs. @type method: string @param method: Full builds can be done is different ways. This parameter specifies which method to use. @type reference: string @param reference: If available use a reference repo. Uses `--reference` in git command. Refer `git clone --help` @type progress: boolean @param progress: Pass the --progress option when fetching. This can solve long fetches getting killed due to lack of output, but requires Git 1.7.2+. @type shallow: boolean or integer @param shallow: Use a shallow or clone, if possible @type retryFetch: boolean @param retryFetch: Retry fetching before failing source checkout. @type getDescription: boolean or dict @param getDescription: Use 'git describe' to describe the fetched revision @type origin: string @param origin: The name to give the remote when cloning (default None) @type config: dict @param config: Git configuration options to enable when running git """ if not getDescription and not isinstance(getDescription, dict): getDescription = False self.branch = branch self.method = method self.prog = progress self.repourl = repourl self.reference = reference self.retryFetch = retryFetch self.submodules = submodules self.shallow = shallow self.clobberOnFailure = clobberOnFailure self.mode = mode self.getDescription = getDescription self.config = config self.supportsBranch = True self.supportsSubmoduleCheckout = True self.srcdir = 'source' self.origin = origin Source.__init__(self, **kwargs) if not self.repourl: bbconfig.error("Git: must provide repourl.") if isinstance(self.mode, basestring): if not self._hasAttrGroupMember('mode', self.mode): bbconfig.error( "Git: mode must be %s" % (' or '.join(self._listAttrGroupMembers('mode')))) if isinstance(self.method, basestring): if (self.mode == 'full' and self.method not in ['clean', 'fresh', 'clobber', 'copy', None]): bbconfig.error("Git: invalid method for mode 'full'.") if self.shallow and (self.mode != 'full' or self.method != 'clobber'): bbconfig.error( "Git: shallow only possible with mode 'full' and method 'clobber'." ) if not isinstance(self.getDescription, (bool, dict)): bbconfig.error("Git: getDescription must be a boolean or a dict.")
def __init__(self, architecture=None, distribution=None, basetgz=None, mirror=None, extrapackages=None, keyring=None, components=None, **kwargs): """ Creates the DebPbuilder object. @type architecture: str @param architecture: the name of the architecture to build @type distribution: str @param distribution: the man of the distribution to use @type basetgz: str @param basetgz: the path or path template of the basetgz @type mirror: str @param mirror: the mirror for building basetgz @type extrapackages: list @param extrapackages: adds packages specified to buildroot @type keyring: str @param keyring: keyring file to use for verification @type components: str @param components: components to use for chroot creation @type kwargs: dict @param kwargs: All further keyword arguments. """ WarningCountingShellCommand.__init__(self, **kwargs) if architecture: self.architecture = architecture if distribution: self.distribution = distribution if mirror: self.mirror = mirror if extrapackages: self.extrapackages = extrapackages if keyring: self.keyring = keyring if components: self.components = components if self.architecture: kwargs['architecture'] = self.architecture else: kwargs['architecture'] = 'local' kwargs['distribution'] = self.distribution if basetgz: self.basetgz = basetgz % kwargs else: self.basetgz = self.basetgz % kwargs if not self.distribution: config.error("You must specify a distribution.") self.command = [ 'pdebuild', '--buildresult', '.', '--pbuilder', self.pbuilder] if self.architecture: self.command += ['--architecture', self.architecture] self.command += ['--', '--buildresult', '.', self.baseOption, self.basetgz] if self.extrapackages: self.command += ['--extrapackages', " ".join(self.extrapackages)] self.suppressions.append( (None, re.compile(r"\.pbuilderrc does not exist"), None, None)) self.addLogObserver( 'stdio', logobserver.LineConsumerLogObserver(self.logConsumer))
def load(self): # License note: # It is a reimplementation based on the original # buildbot.config.FileLoader and buildbot.config.loadConfigDict # implementation. config = self.path.absolute() basedir = config.parent if not config.exists(): raise ConfigErrors( [f"configuration file '{config}' does not exist"]) try: with config.open('r'): pass except IOError as e: raise ConfigErrors( [f'unable to open configuration file {config}: {e}']) log.info(f'Loading configuration from {config}') # execute the config file local_dict = { # inject global variables, useful for including configurations **self.inject_globals, # TODO(kszucs): is it required? 'basedir': basedir.expanduser(), '__file__': config } old_sys_path = sys.path[:] sys.path.append(str(basedir)) try: try: execfile(config, local_dict) except ConfigErrors: raise except SyntaxError: exc = traceback.format_exc() error( f'encountered a SyntaxError while parsing config file:\n' f'{exc}', always_raise=True) except Exception: exc = traceback.format_exc() error( f'error while parsing config file: {exc} (traceback in ' f'logfile)', always_raise=True) finally: sys.path[:] = old_sys_path if self.variable not in local_dict: error( f"Configuration file {config} does not define variable" f"'{self.variable}'", always_raise=True) return local_dict[self.variable]
def __init__(self, testdir=None, **kwargs): if not testdir: config.error("please pass testdir") kwargs['command'] = 'run_maxq.py %s' % (testdir, ) ShellCommand.__init__(self, **kwargs)
def __init__(self, repourl, branches=None, branch=None, workdir=None, pollInterval=10 * 60, gitbin='git', usetimestamps=True, category=None, project=None, pollinterval=-2, fetch_refspec=None, encoding='utf-8', name=None, pollAtLaunch=False, buildPushesWithNoCommits=False, only_tags=False, sshPrivateKey=None, sshHostKey=None, sshKnownHosts=None): # for backward compatibility; the parameter used to be spelled with 'i' if pollinterval != -2: pollInterval = pollinterval if name is None: name = repourl super().__init__(name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch, sshPrivateKey=sshPrivateKey, sshHostKey=sshHostKey, sshKnownHosts=sshKnownHosts) if project is None: project = '' if only_tags and (branch or branches): config.error( "GitPoller: can't specify only_tags and branch/branches") if branch and branches: config.error("GitPoller: can't specify both branch and branches") elif branch: branches = [branch] elif not branches: if only_tags: branches = lambda ref: ref.startswith('refs/tags/' ) # noqa: E731 else: branches = ['master'] self.repourl = repourl self.branches = branches self.encoding = encoding self.buildPushesWithNoCommits = buildPushesWithNoCommits self.gitbin = gitbin self.workdir = workdir self.usetimestamps = usetimestamps self.category = category if callable(category) else bytes2unicode( category, encoding=self.encoding) self.project = bytes2unicode(project, encoding=self.encoding) self.changeCount = 0 self.lastRev = {} self.sshPrivateKey = sshPrivateKey self.sshHostKey = sshHostKey self.sshKnownHosts = sshKnownHosts self.setupGit(logname='GitPoller') if fetch_refspec is not None: config.error("GitPoller: fetch_refspec is no longer supported. " "Instead, only the given branches are downloaded.") if self.workdir is None: self.workdir = 'gitpoller-work'
def __init__(self, name, password, max_builds=None, notify_on_missing=[], missing_timeout=3600, properties={}, locks=None, keepalive_interval=3600): """ @param name: botname this machine will supply when it connects @param password: password this machine will supply when it connects @param max_builds: maximum number of simultaneous builds that will be run concurrently on this buildslave (the default is None for no limit) @param properties: properties that will be applied to builds run on this slave @type properties: dictionary @param locks: A list of locks that must be acquired before this slave can be used @type locks: dictionary """ service.MultiService.__init__(self) self.slavename = name self.password = password # PB registration self.registration = None self.registered_port = None # these are set when the service is started, and unset when it is # stopped self.botmaster = None self.master = None self.slave_status = SlaveStatus(name) self.slave = None # a RemoteReference to the Bot, when connected self.slave_commands = None self.slavebuilders = {} self.max_builds = max_builds self.access = [] if locks: self.access = locks self.lock_subscriptions = [] self.properties = Properties() self.properties.update(properties, "BuildSlave") self.properties.setProperty("slavename", name, "BuildSlave") self.lastMessageReceived = 0 if isinstance(notify_on_missing, str): notify_on_missing = [notify_on_missing] self.notify_on_missing = notify_on_missing for i in notify_on_missing: if not isinstance(i, str): config.error('notify_on_missing arg %r is not a string' % (i, )) self.missing_timeout = missing_timeout self.missing_timer = None self.keepalive_interval = keepalive_interval self.detached_subs = None self._old_builder_list = None
def __init__( self, host, nick, channels, pm_to_nicks=[], port=6667, allowForce=False, tags=None, password=None, notify_events={}, noticeOnChannel=False, showBlameList=True, useRevisions=False, useSSL=False, lostDelay=None, failedDelay=None, useColors=True, allowShutdown=False, categories=None # categories is deprecated ): base.StatusReceiverMultiService.__init__(self) if allowForce not in (True, False): config.error("allowForce must be boolean, not %r" % (allowForce, )) if allowShutdown not in (True, False): config.error("allowShutdown must be boolean, not %r" % (allowShutdown, )) # need to stash these so we can detect changes later self.host = host self.port = port self.nick = nick self.channels = channels self.pm_to_nicks = pm_to_nicks self.password = password self.allowForce = allowForce self.useRevisions = useRevisions self.tags = tags or categories self.notify_events = notify_events self.allowShutdown = allowShutdown self.f = IrcStatusFactory(self.nick, self.password, self.channels, self.pm_to_nicks, self.tags, self.notify_events, noticeOnChannel=noticeOnChannel, useRevisions=useRevisions, showBlameList=showBlameList, lostDelay=lostDelay, failedDelay=failedDelay, useColors=useColors, allowShutdown=allowShutdown) if useSSL: # SSL client needs a ClientContextFactory for some SSL mumbo-jumbo if not have_ssl: raise RuntimeError("useSSL requires PyOpenSSL") cf = ssl.ClientContextFactory() c = internet.SSLClient(self.host, self.port, self.f, cf) else: c = internet.TCPClient(self.host, self.port, self.f) c.setServiceParent(self)
def __init__(self, http_port=None, distrib_port=None, allowForce=None, public_html="public_html", site=None, numbuilds=15, num_events=200, num_events_max=None, auth=None, order_console_by_time=False, changecommentlink=None, projects=None, repositories=None, authz=None, logRotateLength=None, maxRotatedFiles=None, change_hook_dialects = {}, provide_feeds=None, jinja_loaders=None, change_hook_auth=None): """Run a web server that provides Buildbot status. @type http_port: int or L{twisted.application.strports} string @param http_port: a strports specification describing which port the buildbot should use for its web server, with the Waterfall display as the root page. For backwards compatibility this can also be an int. Use 'tcp:8000' to listen on that port, or 'tcp:12345:interface=127.0.0.1' if you only want local processes to connect to it (perhaps because you are using an HTTP reverse proxy to make the buildbot available to the outside world, and do not want to make the raw port visible). @type distrib_port: int or L{twisted.application.strports} string @param distrib_port: Use this if you want to publish the Waterfall page using web.distrib instead. The most common case is to provide a string that is an absolute pathname to the unix socket on which the publisher should listen (C{os.path.expanduser(~/.twistd-web-pb)} will match the default settings of a standard twisted.web 'personal web server'). Another possibility is to pass an integer, which means the publisher should listen on a TCP socket, allowing the web server to be on a different machine entirely. Both forms are provided for backwards compatibility; the preferred form is a strports specification like 'unix:/home/buildbot/.twistd-web-pb'. Providing a non-absolute pathname will probably confuse the strports parser. @param allowForce: deprecated; use authz instead @param auth: deprecated; use with authz @param authz: a buildbot.status.web.authz.Authz instance giving the authorization parameters for this view @param public_html: the path to the public_html directory for this display, either absolute or relative to the basedir. The default is 'public_html', which selects BASEDIR/public_html. @type site: None or L{twisted.web.server.Site} @param site: Use this if you want to define your own object instead of using the default.` @type numbuilds: int @param numbuilds: Default number of entries in lists at the /one_line_per_build and /builders/FOO URLs. This default can be overriden both programatically --- by passing the equally named argument to constructors of OneLinePerBuildOneBuilder and OneLinePerBuild --- and via the UI, by tacking ?numbuilds=xy onto the URL. @type num_events: int @param num_events: Default number of events to show in the waterfall. @type num_events_max: int @param num_events_max: The maximum number of events that are allowed to be shown in the waterfall. The default value of C{None} will disable this check @type auth: a L{status.web.auth.IAuth} or C{None} @param auth: an object that performs authentication to restrict access to the C{allowForce} features. Ignored if C{allowForce} is not C{True}. If C{auth} is C{None}, people can force or stop builds without auth. @type order_console_by_time: bool @param order_console_by_time: Whether to order changes (commits) in the console view according to the time they were created (for VCS like Git) or according to their integer revision numbers (for VCS like SVN). @type changecommentlink: callable, dict, tuple (2 or 3 strings) or C{None} @param changecommentlink: adds links to ticket/bug ids in change comments, see buildbot.status.web.base.changecommentlink for details @type projects: callable, dict or c{None} @param projects: maps project identifiers to URLs, so that any project listed is automatically decorated with a link to it's front page. see buildbot.status.web.base.dictlink for details @type repositories: callable, dict or c{None} @param repositories: maps repository identifiers to URLs, so that any project listed is automatically decorated with a link to it's web view. see buildbot.status.web.base.dictlink for details @type logRotateLength: None or int @param logRotateLength: file size at which the http.log is rotated/reset. If not set, the value set in the buildbot.tac will be used, falling back to the BuildMaster's default value (1 Mb). @type maxRotatedFiles: None or int @param maxRotatedFiles: number of old http.log files to keep during log rotation. If not set, the value set in the buildbot.tac will be used, falling back to the BuildMaster's default value (10 files). @type change_hook_dialects: None or dict @param change_hook_dialects: If empty, disables change_hook support, otherwise whitelists valid dialects. In the format of {"dialect1": "Option1", "dialect2", None} Where the values are options that will be passed to the dialect To enable the DEFAULT handler, use a key of DEFAULT @type provide_feeds: None or list @param provide_feeds: If empty, provides atom, json, and rss feeds. Otherwise, a dictionary of strings of the type of feeds provided. Current possibilities are "atom", "json", and "rss" @type jinja_loaders: None or list @param jinja_loaders: If not empty, a list of additional Jinja2 loader objects to search for templates. """ service.MultiService.__init__(self) if type(http_port) is int: http_port = "tcp:%d" % http_port self.http_port = http_port if distrib_port is not None: if type(distrib_port) is int: distrib_port = "tcp:%d" % distrib_port if distrib_port[0] in "/~.": # pathnames distrib_port = "unix:%s" % distrib_port self.distrib_port = distrib_port self.num_events = num_events if num_events_max: if num_events_max < num_events: config.error( "num_events_max must be greater than num_events") self.num_events_max = num_events_max self.public_html = public_html # make up an authz if allowForce was given if authz: if allowForce is not None: config.error( "cannot use both allowForce and authz parameters") if auth: config.error( "cannot use both auth and authz parameters (pass " + "auth as an Authz parameter)") else: # invent an authz if allowForce and auth: authz = Authz(auth=auth, default_action="auth") elif allowForce: authz = Authz(default_action=True) else: if auth: log.msg("Warning: Ignoring authentication. Search for 'authorization'" " in the manual") authz = Authz() # no authorization for anything self.authz = authz # check for correctness of HTTP auth parameters if change_hook_auth is not None: if not isinstance(change_hook_auth, tuple) or len(change_hook_auth) != 2: config.error("Invalid credentials for change_hook auth") self.change_hook_auth = change_hook_auth self.orderConsoleByTime = order_console_by_time # If we were given a site object, go ahead and use it. (if not, we add one later) self.site = site # keep track of our child services self.http_svc = None self.distrib_svc = None # store the log settings until we create the site object self.logRotateLength = logRotateLength self.maxRotatedFiles = maxRotatedFiles # create the web site page structure self.childrenToBeAdded = {} self.setupUsualPages(numbuilds=numbuilds, num_events=num_events, num_events_max=num_events_max) self.changecommentlink = changecommentlink self.repositories = repositories self.projects = projects # keep track of cached connections so we can break them when we shut # down. See ticket #102 for more details. self.channels = weakref.WeakKeyDictionary() # do we want to allow change_hook self.change_hook_dialects = {} if change_hook_dialects: self.change_hook_dialects = change_hook_dialects resource_obj = ChangeHookResource(dialects=self.change_hook_dialects) if self.change_hook_auth is not None: resource_obj = self.setupProtectedResource(resource_obj) self.putChild("change_hook", resource_obj) # Set default feeds if provide_feeds is None: self.provide_feeds = ["atom", "json", "rss"] else: self.provide_feeds = provide_feeds self.jinja_loaders = jinja_loaders
def setupGitStep(self): self.didDownloadSshPrivateKey = False self.setupGit(logname='Git') if not self.repourl: config.error("Git: must provide repourl.")
def __init__(self, name, builderNames, properties, codebases=DefaultCodebases): """ Initialize a Scheduler. @param name: name of this scheduler (used as a key for state) @type name: unicode @param builderNames: list of builders this scheduler may start @type builderNames: list of unicode @param properties: properties to add to builds triggered by this scheduler @type properties: dictionary @param codebases: codebases that are necessary to process the changes @type codebases: dict with following struct: key: '<codebase>' value: {'repository':'<repo>', 'branch':'<br>', 'revision:'<rev>'} @param consumeChanges: true if this scheduler wishes to be informed about the addition of new changes. Defaults to False. This should be passed explicitly from subclasses to indicate their interest in consuming changes. @type consumeChanges: boolean """ service.MultiService.__init__(self) self.name = name "name of this scheduler; used to identify replacements on reconfig" ok = True if not isinstance(builderNames, (list, tuple)): ok = False else: for b in builderNames: if not isinstance(b, basestring): ok = False if not ok: config.error( "The builderNames argument to a scheduler must be a list " "of Builder names.") self.builderNames = builderNames "list of builder names to start in each buildset" self.properties = Properties() "properties that are contributed to each buildset" self.properties.update(properties, "Scheduler") self.properties.setProperty("scheduler", name, "Scheduler") self.objectid = None self.master = None # Set the codebases that are necessary to process the changes # These codebases will always result in a sourcestamp with or without changes if codebases is not None: if not isinstance(codebases, dict): config.error("Codebases must be a dict of dicts") for codebase, codebase_attrs in codebases.iteritems(): if not isinstance(codebase_attrs, dict): config.error("Codebases must be a dict of dicts") if (codebases != BaseScheduler.DefaultCodebases and 'repository' not in codebase_attrs): config.error( "The key 'repository' is mandatory in codebases") else: config.error("Codebases cannot be None") self.codebases = codebases # internal variables self._change_subscription = None self._change_consumption_lock = defer.DeferredLock()
def __init__(self, name, password, instance_type, ami=None, valid_ami_owners=None, valid_ami_location_regex=None, elastic_ip=None, identifier=None, secret_identifier=None, aws_id_file_path=None, user_data=None, region=None, keypair_name=None, security_name=None, spot_instance=False, max_spot_price=1.6, volumes=None, placement=None, price_multiplier=1.2, tags=None, product_description='Linux/UNIX', subnet_id=None, security_group_ids=None, instance_profile_name=None, block_device_map=None, session=None, **kwargs): if not boto3: config.error("The python module 'boto3' is needed to use a " "EC2LatentWorker") if keypair_name is None: reportDeprecatedWorkerNameUsage( "Use of default value of 'keypair_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") keypair_name = 'latent_buildbot_slave' if security_name is None and not subnet_id: reportDeprecatedWorkerNameUsage( "Use of default value of 'security_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") security_name = 'latent_buildbot_slave' if volumes is None: volumes = [] if tags is None: tags = {} AbstractLatentWorker.__init__(self, name, password, **kwargs) if security_name and subnet_id: raise ValueError( 'security_name (EC2 classic security groups) is not supported ' 'in a VPC. Use security_group_ids instead.') if not ((ami is not None) ^ (valid_ami_owners is not None or valid_ami_location_regex is not None)): raise ValueError( 'You must provide either a specific ami, or one or both of ' 'valid_ami_location_regex and valid_ami_owners') self.ami = ami if valid_ami_owners is not None: if isinstance(valid_ami_owners, integer_types): valid_ami_owners = (valid_ami_owners, ) else: for element in valid_ami_owners: if not isinstance(element, integer_types): raise ValueError( 'valid_ami_owners should be int or iterable ' 'of ints', element) if valid_ami_location_regex is not None: if not isinstance(valid_ami_location_regex, string_types): raise ValueError('valid_ami_location_regex should be a string') else: # verify that regex will compile re.compile(valid_ami_location_regex) if spot_instance and price_multiplier is None and max_spot_price is None: raise ValueError('You must provide either one, or both, of ' 'price_multiplier or max_spot_price') self.valid_ami_owners = None if valid_ami_owners: self.valid_ami_owners = [str(o) for o in valid_ami_owners] self.valid_ami_location_regex = valid_ami_location_regex self.instance_type = instance_type self.keypair_name = keypair_name self.security_name = security_name self.user_data = user_data self.spot_instance = spot_instance self.max_spot_price = max_spot_price self.volumes = volumes self.price_multiplier = price_multiplier self.product_description = product_description if None not in [placement, region]: self.placement = '%s%s' % (region, placement) else: self.placement = None if identifier is None: assert secret_identifier is None, ( 'supply both or neither of identifier, secret_identifier') if aws_id_file_path is None: home = os.environ['HOME'] default_path = os.path.join(home, '.ec2', 'aws_id') if os.path.exists(default_path): aws_id_file_path = default_path if aws_id_file_path: log.msg('WARNING: EC2LatentWorker is using deprecated ' 'aws_id file') with open(aws_id_file_path, 'r') as aws_file: identifier = aws_file.readline().strip() secret_identifier = aws_file.readline().strip() else: assert aws_id_file_path is None, \ 'if you supply the identifier and secret_identifier, ' \ 'do not specify the aws_id_file_path' assert secret_identifier is not None, \ 'supply both or neither of identifier, secret_identifier' region_found = None # Make the EC2 connection. self.session = session if self.session is None: if region is not None: for r in boto3.Session(aws_access_key_id=identifier, aws_secret_access_key=secret_identifier ).get_available_regions('ec2'): if r == region: region_found = r if region_found is not None: self.session = boto3.Session( region_name=region, aws_access_key_id=identifier, aws_secret_access_key=secret_identifier) else: raise ValueError('The specified region does not exist: ' + region) else: # boto2 defaulted to us-east-1 when region was unset, we # mimic this here in boto3 region = botocore.session.get_session().get_config_variable( 'region') if region is None: region = 'us-east-1' self.session = boto3.Session( aws_access_key_id=identifier, aws_secret_access_key=secret_identifier, region_name=region) self.ec2 = self.session.resource('ec2') self.ec2_client = self.session.client('ec2') # Make a keypair # # We currently discard the keypair data because we don't need it. # If we do need it in the future, we will always recreate the keypairs # because there is no way to # programmatically retrieve the private key component, unless we # generate it and store it on the filesystem, which is an unnecessary # usage requirement. try: self.ec2.KeyPair(self.keypair_name).load() # key_pair.delete() # would be used to recreate except ClientError as e: if 'InvalidKeyPair.NotFound' not in str(e): if 'AuthFailure' in str(e): log.msg('POSSIBLE CAUSES OF ERROR:\n' ' Did you supply your AWS credentials?\n' ' Did you sign up for EC2?\n' ' Did you put a credit card number in your AWS ' 'account?\n' 'Please doublecheck before reporting a problem.\n') raise # make one; we would always do this, and stash the result, if we # needed the key (for instance, to SSH to the box). We'd then # use paramiko to use the key to connect. self.ec2.create_key_pair(KeyName=keypair_name) # create security group if security_name: try: self.ec2_client.describe_security_groups( GroupNames=[security_name]) except ClientError as e: if 'InvalidGroup.NotFound' in str(e): self.security_group = self.ec2.create_security_group( GroupName=security_name, Description= 'Authorization to access the buildbot instance.') # Authorize the master as necessary # TODO this is where we'd open the hole to do the reverse pb # connect to the buildbot # ip = urllib.urlopen( # 'http://checkip.amazonaws.com').read().strip() # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip) # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip) else: raise # get the image if self.ami is not None: self.image = self.ec2.Image(self.ami) else: # verify we have access to at least one acceptable image discard = self.get_image() assert discard # get the specified elastic IP, if any if elastic_ip is not None: # Using ec2.vpc_addresses.filter(PublicIps=[elastic_ip]) throws a # NotImplementedError("Filtering not supported in describe_address.") in moto # https://github.com/spulec/moto/blob/100ec4e7c8aa3fde87ff6981e2139768816992e4/moto/ec2/responses/elastic_ip_addresses.py#L52 addresses = self.ec2.meta.client.describe_addresses( PublicIps=[elastic_ip])['Addresses'] if not addresses: raise ValueError('Could not find EIP for IP: ' + elastic_ip) allocation_id = addresses[0]['AllocationId'] elastic_ip = self.ec2.VpcAddress(allocation_id) self.elastic_ip = elastic_ip self.subnet_id = subnet_id self.security_group_ids = security_group_ids self.classic_security_groups = [self.security_name ] if self.security_name else None self.instance_profile_name = instance_profile_name self.tags = tags self.block_device_map = self.create_block_device_mapping( block_device_map) if block_device_map else None