def test_master_lock_update_from_lockid(self): lock = RealMasterLock('lock1') lock.updateFromLockId(MasterLock('lock1', maxCount=3), 0) lock.updateFromLockId(MasterLock('lock1', maxCount=4), 0) self.assertEqual(lock.lockName, 'lock1') self.assertEqual(lock.maxCount, 4) self.assertEqual(lock.description, '<MasterLock(lock1, 4)>') with self.assertRaises(AssertionError): lock.updateFromLockId(MasterLock('lock2', maxCount=4), 0)
def test_master_lock_init_from_lockid(self): lockid = MasterLock('lock1', maxCount=3) lock = RealMasterLock(lockid) self.assertEqual(lock.name, 'lock1') self.assertEqual(lock.maxCount, 3) self.assertEqual(lock.description, '<MasterLock(lock1, 3)>')
def test_master_lock_init_from_lockid(self): lock = RealMasterLock('lock1') lock.updateFromLockId(MasterLock('lock1', maxCount=3), 0) self.assertEqual(lock.lockName, 'lock1') self.assertEqual(lock.maxCount, 3) self.assertEqual(lock.description, '<MasterLock(lock1, 3)>')
def __init__(self, name, baseurl, branch, nightly=None, enable_force=True, giturl=None): super().__init__(name) if giturl is None: giturl = baseurl + ".git" self.baseurl = baseurl self.giturl = giturl self.branch = branch self.nightly = nightly self.enable_force = enable_force # Lock used to avoid writing source code when it is read by another task self.lock_src = MasterLock("src-{0}".format(self.name), maxCount=sys.maxsize)
def locks(self): # We use a shared dictionary here, since locks are compared via # identity, not by name. lock_name = '/'.join(['functional', 'api', self.provider]) # This should be in the config file (FLOC-2025) # Allow up to 2 AWS functional storage driver tests to run in parallel. # This is a temporary fix to get around test wait time being # queued up to run. # OpenStack tests have not experienced long queued wait times. # So, leave the max count at 1. if self.provider == 'aws': maxCount = 2 elif self.provider in ('rackspace', 'redhat-openstack'): maxCount = 1 else: raise NotImplementedError("Unsupported provider %s" % (self.provider, )) lock = self._locks.setdefault(self.provider, MasterLock(lock_name, maxCount=maxCount)) return [lock.access("counting")]
__all__ = [ "rtdist_lock", "sse2_flag", "threads_flag", "buildtype_flag", "common_flags", "MakeTorrent" ] from buildbot.steps.shell import SetPropertyFromCommand, ShellCommand from buildbot.steps.master import MasterShellCommand from buildbot.steps.transfer import DirectoryUpload from buildbot.process.properties import Interpolate, Property, renderer from buildbot.locks import MasterLock import re import config # Define a lock so that only one builder can update the rtdist at a time. rtdist_lock = MasterLock('rtdist') # Which letters are invalid in a local tag, as per PEP 440. local_tag_invalid = re.compile('[^a-zA-Z0-9.]') @renderer def sse2_flag(props): "Determines the SSE2 flag based on the requested architecture." if "macosx" in props["buildername"]: # All Intel Macs have SSE2, I think return ["--use-sse2"] if props["arch"] in ("amd64", "x86_64"): return ["--use-sse2"]
haltOnFailure=True), # Run the test suite. Test(name="test py2", command=get_test_command(2), haltOnFailure=True, doStepIf=is_branch("release/1.10.x")), Test(name="test py3", command=get_test_command(3), haltOnFailure=True), # Build the installer. ShellCommand(name="package", command=package_cmd, haltOnFailure=True, doStepIf=lambda step:not step.getProperty("optimize", False)), # And the test scripts for deploy-ng. #Test(name="build_samples", command=test_deployng_cmd, doStepIf=is_branch("deploy-ng"), haltOnFailure=True), ] # Define a global lock, since reprepro won't allow simultaneous access to the repo. repo_lock = MasterLock('reprepro') # Steps to publish the runtime and SDK. publish_deb_steps = [ # Upload the deb package. FileUpload(workersrc=deb_filename, masterdest=deb_upload_filename, mode=0o664, haltOnFailure=True, doStepIf=lambda step:not step.getProperty("optimize", False)), # Create a torrent file and start seeding it. #MakeTorrent(deb_upload_filename), #SeedTorrent(deb_upload_filename), # Upload it to an apt repository. MasterShellCommand(name="reprepro", command=[ "reprepro", "-b", deb_archive_dir, "includedeb", deb_archive_suite,
@property def slave_class(self): return 'aws/centos-7' ACCEPTANCE_CONFIGURATIONS = [ AcceptanceConfiguration(provider='aws', distribution='rhel-7.2', dataset_backend='native') ] # Too many simultaneous builds will hit AWS limits, but # too few will make tests painfully slow. We need to find # a compromise between these two variables. See FLOC-3263. aws_lock = MasterLock('aws-lock', maxCount=3) ACCEPTANCE_LOCKS = { 'aws': [aws_lock.access("counting")], } def getBuilders(slavenames): builders = [] for configuration in ACCEPTANCE_CONFIGURATIONS: builders.append( BuilderConfig(name=configuration.builder_name, builddir=configuration.builder_directory, slavenames=slavenames[configuration.slave_class], category='flocker', factory=run_acceptance_tests(configuration), locks=ACCEPTANCE_LOCKS.get(configuration.provider,
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301, USA. """All docker build related steps.""" from buildbot.locks import MasterLock from buildbot.process import logobserver from buildbot.process.results import FAILURE, SUCCESS from buildbot.steps.master import MasterShellCommand from twisted.internet import defer DOCKER_BUILD_LOCK = MasterLock('docker_build') class DockerStep(MasterShellCommand): def __init__(self, label, image, command, **kwargs): self.label = label self.image = image super(DockerStep, self).__init__(command, logEnviron=False, **kwargs) def __eq__(self, other): return (isinstance(other, self.__class__) and self.image == other.image and self.label == other.label) class DockerBuild(DockerStep):
# Make sure the base distribution is up-to-date. ShellCommand(command=['wget', '-N', cloudimg_url], workdir="context"), # Build the Docker image. ShellCommand( name="setup", command=setup_cmd, workdir="context", haltOnFailure=True), # Invoke makepanda. Compile(command=build_cmd, haltOnFailure=True, env={'PYTHONPATH': python_path}), ] # Define a global lock, since reprepro won't allow simultaneous access to the repo. repo_lock = MasterLock('reprepro') # Steps to publish the runtime and SDK. publish_deb_steps = [ # Upload the deb package. FileUpload(slavesrc=deb_filename, masterdest=deb_upload_filename, mode=0o664, haltOnFailure=True), # Create a torrent file and start seeding it. MakeTorrent(deb_upload_filename), SeedTorrent(deb_upload_filename), # Upload it to an apt repository. MasterShellCommand(name="reprepro",
class StandardBuild(Build): __slots__ = [ 'baseurl', 'giturl', 'branch', 'nightly', 'enable_force', 'lock_src' ] PATCHES = [] def __init__(self, name, baseurl, branch, nightly=None, enable_force=True, giturl=None): super().__init__(name) if giturl is None: giturl = baseurl + ".git" self.baseurl = baseurl self.giturl = giturl self.branch = branch self.nightly = nightly self.enable_force = enable_force # Lock used to avoid writing source code when it is read by another task self.lock_src = MasterLock("src-{0}".format(self.name), maxCount=sys.maxsize) def getGlobalSchedulers(self, platforms): ret = list() change_filter = ChangeFilter(repository=self.baseurl, branch=self.branch) # Fetch scheduler (triggered by event source) ret.append( SingleBranchScheduler(name="fetch-{0}".format(self.name), change_filter=change_filter, treeStableTimer=5, builderNames=["fetch-{0}".format(self.name) ])) # Nightly scheduler (started by time) # It's triggered after regular builds to take note of the last fetched source # Note that build is not started by trigger if self.nightly is not None: ret.append( NightlyTriggerable( name="nightly-{0}".format(self.name), branch=self.branch, builderNames=["nightly-{0}".format(self.name)], hour=self.nightly[0], minute=self.nightly[1], onlyIfChanged=True)) # All compiling builders comp_builders = [ "{0}-{1}".format(self.name, p.name) for p in platforms if p.canBuild(self) ] # Global build scheduler (triggered by fetch build) ret.append(Triggerable(name=self.name, builderNames=comp_builders)) # Force schedulers if self.enable_force: ret.append( ForceScheduler( name="force-scheduler-{0}-fetch".format(self.name), reason=StringParameter(name="reason", label="Reason:", required=True, size=80), builderNames=["fetch-{0}".format(self.name)], codebases=[CodebaseParameter(codebase='', hide=True)], properties=[ BooleanParameter(name="clean", label="Clean", default=False), BooleanParameter(name="package", label="Package", default=False), ])) ret.append( ForceScheduler( name="force-scheduler-{0}-build".format(self.name), reason=StringParameter(name="reason", label="Reason:", required=True, size=80), builderNames=comp_builders, codebases=[CodebaseParameter(codebase='', hide=True)], properties=[ BooleanParameter(name="clean", label="Clean", default=False), BooleanParameter(name="package", label="Package", default=False), ])) return ret def getGlobalBuilders(self): ret = list() f = factory.BuildFactory() f.useProgress = False f.addStep( Git( mode="incremental", workdir=".", repourl=self.giturl, branch=self.branch, locks=[self.lock_src.access("exclusive")], )) if len(self.PATCHES): f.addStep( steps.Patch( patches=self.PATCHES, workdir=".", locks=[self.lock_src.access("exclusive")], )) if self.nightly is not None: # Trigger nightly scheduler to let it know the source stamp f.addStep( Trigger(name="Updating source stamp", hideStepIf=(lambda r, s: r == results.SUCCESS), schedulerNames=["nightly-{0}".format(self.name)])) f.addStep( Trigger(name="Building all platforms", schedulerNames=[self.name], copy_properties=['got_revision', 'clean', 'package'], updateSourceStamp=True, waitForFinish=True)) ret.append( BuilderConfig( name="fetch-{0}".format(self.name), # This is specific workername='fetcher', workerbuilddir="/data/src/{0}".format(self.name), factory=f, tags=["fetch"], )) if self.nightly is not None: f = factory.BuildFactory() f.addStep( Trigger(schedulerNames=[self.name], copy_properties=['got_revision'], updateSourceStamp=True, waitForFinish=True, set_properties={ 'clean': True, 'package': True })) ret.append( BuilderConfig( name="nightly-{0}".format(self.name), # TODO: Fix this workername='fetcher', workerbuilddir="/data/triggers/nightly-{0}".format( self.name), factory=f, tags=["nightly"], locks=[self.lock_src.access("counting")])) return ret