def __init__(self, name, password, connection, hd_image, base_image=None, xml=None, **kwargs): AbstractLatentWorker.__init__(self, name, password, **kwargs) if not libvirt: config.error( "The python module 'libvirt' is needed to use a LibVirtWorker") self.connection = connection self.image = hd_image self.base_image = base_image self.xml = xml self.cheap_copy = True self.graceful_shutdown = False self.domain = None self.ready = False self._find_existing_deferred = self._find_existing_instance()
def test_reconfigService(self): old = AbstractLatentWorker("name", "password", build_wait_timeout=10) new = AbstractLatentWorker("name", "password", build_wait_timeout=30) self.do_test_reconfigService(old, new) self.assertEqual(old.build_wait_timeout, 30)
def __init__( self, name, password, docker_host, image=None, command=None, volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False, masterFQDN=None, hostconfig=None, networking_config="bridge", **kwargs ): if not client: config.error("The python module 'docker-py>=1.4' is needed to use a" " DockerLatentWorker") if not image and not dockerfile: config.error("DockerLatentWorker: You need to specify at least" " an image name, or a dockerfile") self.volumes = volumes or [] self.binds = {} self.networking_config = networking_config self.followStartupLogs = followStartupLogs # Following block is only for checking config errors, # actual parsing happens in self.parse_volumes() # Renderables can be direct volumes definition or list member if isinstance(volumes, list): for volume_string in volumes or []: if not isinstance(volume_string, str): continue try: volume, bind = volume_string.split(":", 1) except ValueError: config.error("Invalid volume definition for docker " "%s. Skipping..." % volume_string) continue # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if "build_wait_timeout" not in kwargs: kwargs["build_wait_timeout"] = 0 AbstractLatentWorker.__init__(self, name, password, **kwargs) self.image = image self.command = command or [] self.dockerfile = dockerfile if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.hostconfig = hostconfig or {} # Prepare the parameters for the Docker Client object. self.client_args = {"base_url": docker_host} if version is not None: self.client_args["version"] = version if tls is not None: self.client_args["tls"] = tls
def checkConfig(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): # Set build_wait_timeout to 0s if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.checkConfig(self, name, password, **kwargs) if not Hyper: config.error( "The python modules 'docker-py>=1.4' and 'hypercompose' are needed to use a" " HyperLatentWorker") if hyper_size not in self.ALLOWED_SIZES: config.error("Size is not valid %s vs %r".format( hyper_size, self.ALLOWED_SIZES))
def reconfigService(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): AbstractLatentWorker.reconfigService(self, name, password, **kwargs) self.size = hyper_size self.image = image # Prepare the parameters for the Docker Client object. self.client_args = { 'clouds': { hyper_host: { "accesskey": hyper_accesskey, "secretkey": hyper_secretkey } } } if not masterFQDN: # also match empty string (for UI) masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN
def __init__(self, name, password, docker_host, image=None, command=None, volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False, masterFQDN=None, hostconfig=None, **kwargs): if not client: config.error( "The python module 'docker-py>=1.4' is needed to use a" " DockerLatentWorker") if not image and not dockerfile: config.error("DockerLatentWorker: You need to specify at least" " an image name, or a dockerfile") self.volumes = volumes or [] self.binds = {} self.followStartupLogs = followStartupLogs # Following block is only for checking config errors, # actual parsing happens in self.parse_volumes() # Renderables can be direct volumes definition or list member if isinstance(volumes, list): for volume_string in (volumes or []): if not isinstance(volume_string, str): continue try: bind, volume = volume_string.split(":", 1) except ValueError: config.error("Invalid volume definition for docker " "%s. Skipping..." % volume_string) continue # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.__init__(self, name, password, **kwargs) self.image = image self.command = command or [] self.dockerfile = dockerfile if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.hostconfig = hostconfig or {} # Prepare the parameters for the Docker Client object. self.client_args = {'base_url': docker_host} if version is not None: self.client_args['version'] = version if tls is not None: self.client_args['tls'] = tls
def checkConfig(self, name, password=None, api_username=None, api_password=None, image=None, hostconfig=None, base_url=DEFAULT_BASE_URL, masterFQDN=None, **kwargs): if image is None or api_username is None or api_password is None: config.error("UpcloudLatentWorker: You need to specify at least" " an image name, zone, api_username and api_password") AbstractLatentWorker.checkConfig(self, name, password, **kwargs)
def __init__(self, name, password, docker_host, image=None, command=None, volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False, masterFQDN=None, **kwargs): if not client: config.error("The python module 'docker-py' is needed to use a" " DockerLatentWorker") if not image and not dockerfile: config.error("DockerLatentWorker: You need to specify at least" " an image name, or a dockerfile") self.volumes = [] self.binds = {} self.followStartupLogs = followStartupLogs for volume_string in (volumes or []): try: volume, bind = volume_string.split(":", 1) except ValueError: config.error("Invalid volume definition for docker " "%s. Skipping..." % volume_string) self.volumes.append(volume) ro = False if bind.endswith(':ro') or bind.endswith(':rw'): ro = bind[-2:] == 'ro' bind = bind[:-3] self.binds[volume] = {'bind': bind, 'ro': ro} # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.__init__(self, name, password, **kwargs) self.image = image self.command = command or [] self.dockerfile = dockerfile if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN # Prepare the parameters for the Docker Client object. self.client_args = {'base_url': docker_host} if version is not None: self.client_args['version'] = version if tls is not None: self.client_args['tls'] = tls
def checkConfig(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): AbstractLatentWorker.checkConfig(self, name, password, **kwargs) if not Hyper: config.error("The python modules 'docker-py>=1.4' and 'hyper_sh' are needed to use a" " HyperLatentWorker") if hyper_size not in self.ALLOWED_SIZES: config.error("Size is not valid %s vs %r".format( hyper_size, self.ALLOWED_SIZES))
def __init__( self, name, password, flavor, os_username, os_password, os_tenant_name, os_auth_url, os_user_domain=None, os_project_domain=None, block_devices=None, region=None, image=None, meta=None, # Have a nova_args parameter to allow passing things directly # to novaclient. nova_args=None, client_version='2', **kwargs): if not client: config.error("The python module 'novaclient' is needed " "to use a OpenStackLatentWorker. " "Please install 'python-novaclient' package.") if not loading or not session: config.error("The python module 'keystoneauth1' is needed " "to use a OpenStackLatentWorker. " "Please install the 'keystoneauth1' package.") if not block_devices and not image: raise ValueError('One of block_devices or image must be given') AbstractLatentWorker.__init__(self, name, password, **kwargs) self.flavor = flavor self.client_version = client_version if client: self.novaclient = self._constructClient( client_version, os_username, os_user_domain, os_password, os_tenant_name, os_project_domain, os_auth_url) if region is not None: self.novaclient.client.region_name = region if block_devices is not None: self.block_devices = [ self._parseBlockDevice(bd) for bd in block_devices ] else: self.block_devices = None self.image = image self.meta = meta self.nova_args = nova_args if nova_args is not None else {}
def checkConfig(self, name, password=None, image=None, masterFQDN=None, **kwargs): # Set build_wait_timeout to 0 if not explicitly set: Starting a # container is almost immediate, we can afford doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 if image is not None and not isinstance(image, str): if not hasattr(image, 'getRenderingFor'): config.error("image must be a string") AbstractLatentWorker.checkConfig(self, name, password, **kwargs)
def checkConfig(self, name, password=None, image=None, masterFQDN=None, **kwargs): # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 if image is not None and not isinstance(image, str): if not hasattr(image, 'getRenderingFor'): config.error("image must be a string") AbstractLatentWorker.checkConfig(self, name, password, **kwargs)
def __init__(self, image, datastore, name, hostname, username, **kwargs): if not fabric: config.error("The python module 'fabric' is needed to use a " "VMBhyveLatentWorker") self.datastore = datastore self.name = name self.image = image self.conn = fabric.Connection(host=hostname, user=username, port=22) self.vm_status = "" self.timeout = 300 AbstractLatentWorker.__init__(self, **kwargs)
def __init__(self, name, password, flavor, os_username, os_password, os_tenant_name, os_auth_url, os_user_domain=None, os_project_domain=None, block_devices=None, region=None, image=None, meta=None, # Have a nova_args parameter to allow passing things directly # to novaclient. nova_args=None, client_version='2', **kwargs): if not client: config.error("The python module 'novaclient' is needed " "to use a OpenStackLatentWorker. " "Please install 'python-novaclient' package.") if not loading or not session: config.error("The python module 'keystoneauth1' is needed " "to use a OpenStackLatentWorker. " "Please install the 'keystoneauth1' package.") if not block_devices and not image: raise ValueError('One of block_devices or image must be given') AbstractLatentWorker.__init__(self, name, password, **kwargs) self.flavor = flavor self.client_version = client_version if client: self.novaclient = self._constructClient( client_version, os_username, os_user_domain, os_password, os_tenant_name, os_project_domain, os_auth_url) if region is not None: self.novaclient.client.region_name = region if block_devices is not None: self.block_devices = [ self._parseBlockDevice(bd) for bd in block_devices] else: self.block_devices = None self.image = image self.meta = meta self.nova_args = nova_args if nova_args is not None else {}
def __init__(self, name, password, docker_host, image=None, command=None, volumes=None, dockerfile=None, version=None, tls=None, followStartupLogs=False, masterFQDN=None, hostconfig=None, networking_config='bridge', **kwargs): if not client: config.error("The python module 'docker-py>=1.4' is needed to use a" " DockerLatentWorker") if not image and not dockerfile: config.error("DockerLatentWorker: You need to specify at least" " an image name, or a dockerfile") self.volumes = [] self.binds = {} self.networking_config = networking_config self.followStartupLogs = followStartupLogs for volume_string in (volumes or []): try: volume, bind = volume_string.split(":", 1) except ValueError: config.error("Invalid volume definition for docker " "%s. Skipping..." % volume_string) continue self.volumes.append(volume) ro = False if bind.endswith(':ro') or bind.endswith(':rw'): ro = bind[-2:] == 'ro' bind = bind[:-3] self.binds[volume] = {'bind': bind, 'ro': ro} # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.__init__(self, name, password, **kwargs) self.image = image self.command = command or [] self.dockerfile = dockerfile if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.hostconfig = hostconfig or {} # Prepare the parameters for the Docker Client object. self.client_args = {'base_url': docker_host} if version is not None: self.client_args['version'] = version if tls is not None: self.client_args['tls'] = tls
def reconfigService(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): # Set build_wait_timeout to 0s if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 yield AbstractLatentWorker.reconfigService(self, name, password, **kwargs) self.manager = yield HyperLatentManager.getService( self.master, hyper_host, hyper_accesskey, hyper_secretkey) self.masterhash = hashlib.sha1(self.master.name).hexdigest()[:6] self.size = hyper_size self.image = image if not masterFQDN: # also match empty string (for UI) masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN
def reconfigService(self, name, password=None, zone=None, api_username=None, api_password=None, image=None, hostconfig=None, base_url=DEFAULT_BASE_URL, masterFQDN=None, **kwargs): if password is None: password = self.getRandomPass() if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.image = image if hostconfig is None: hostconfig = {} self.hostconfig = hostconfig self.client = yield HTTPClientService.getService( self.master, base_url, auth=(api_username, api_password), debug=kwargs.get('debug', False)) masterName = util.unicode2bytes(self.master.name) self.masterhash = hashlib.sha1(masterName).hexdigest()[:6] yield AbstractLatentWorker.reconfigService(self, name, password, **kwargs)
def reconfigService(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="xs", masterFQDN=None, **kwargs): AbstractLatentWorker.reconfigService(self, name, password, **kwargs) self.size = hyper_size self.image = image # Prepare the parameters for the Docker Client object. self.client_args = {'clouds': { hyper_host: { "accesskey": hyper_accesskey, "secretkey": hyper_secretkey } }} if not masterFQDN: # also match empty string (for UI) masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN
def checkConfig(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="xs", masterFQDN=None, **kwargs): # Set build_wait_timeout to 0s if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 AbstractLatentWorker.checkConfig(self, name, password, **kwargs) if not Hyper: config.error("The python modules 'docker-py>=1.4' and 'hypercompose' are needed to use a" " HyperLatentWorker") if hyper_size not in self.ALLOWED_SIZES: config.error("Size is not valid %s vs %r".format(hyper_size, self.ALLOWED_SIZES))
def __init__(self, name, password, aws_id_file_path, region, instance_name): if not boto: config.error("The python module 'boto' is needed to use EC2 build slaves") AbstractLatentWorker.__init__(self, name, password, max_builds=None, notify_on_missing=[], missing_timeout=60 * 20, build_wait_timeout=0, properties={}, locks=None) if not os.path.exists(aws_id_file_path): raise ValueError( "Please supply your AWS credentials in " "the {} file (on two lines).".format(aws_id_file_path)) with open(aws_id_file_path, "r") as aws_credentials_file: access_key_id = aws_credentials_file.readline().strip() secret_key = aws_credentials_file.readline().strip() self.ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_key) self.instance = self.ec2_conn.get_only_instances(filters={"tag:Name": instance_name})[0]
def stopService(self): # stopService will call stop_instance if the worker was up. yield AbstractLatentWorker.stopService(self) # we cleanup our thread and session (or reactor.stop will hang) if self.client is not None: self.client.close() self.client = None if self.threadPool is not None: yield self.threadPool.stop() self.threadPool = None
def reconfigService(self, name, password=None, image=None, masterFQDN=None, **kwargs): # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can afford doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 if password is None: password = self.getRandomPass() self.image = image return AbstractLatentWorker.reconfigService(self, name, password, **kwargs)
def canStartBuild(self): if not self.ready: log.msg("Not accepting builds as existing domains not iterated") return False if self.domain and not self.isConnected(): log.msg( "Not accepting builds as existing domain but worker not connected") return False return AbstractLatentWorker.canStartBuild(self)
def checkConfig(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): AbstractLatentWorker.checkConfig(self, name, password, **kwargs) if not Hyper: config.error( "The python modules 'docker-py>=1.4' and 'hyper_sh' are needed to use a" " HyperLatentWorker") if hyper_size not in self.ALLOWED_SIZES: config.error("Size is not valid %s vs %r".format( hyper_size, self.ALLOWED_SIZES))
def canStartBuild(self): if not self.ready: log.msg("Not accepting builds as existing domains not iterated") return False if self.domain and not self.isConnected(): log.msg( "Not accepting builds as existing domain but worker not connected" ) return False return AbstractLatentWorker.canStartBuild(self)
def reconfigService(self, name, password=None, image=None, masterFQDN=None, **kwargs): # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can afford doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 if password is None: password = self.getRandomPass() if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.image = image self.masterhash = hashlib.sha1(self.master.name).hexdigest()[:6] return AbstractLatentWorker.reconfigService(self, name, password, **kwargs)
def reconfigService(self, name, password=None, image=None, masterFQDN=None, **kwargs): # Set build_wait_timeout to 0 if not explicitely set: Starting a # container is almost immediate, we can afford doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 if password is None: password = self.getRandomPass() if masterFQDN is None: masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN self.image = image masterName = unicode2bytes(self.master.name) self.masterhash = hashlib.sha1(masterName).hexdigest()[:6] return AbstractLatentWorker.reconfigService(self, name, password, **kwargs)
def reconfigService(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): # Set build_wait_timeout to 0s if not explicitely set: Starting a # container is almost immediate, we can affort doing so for each build. if 'build_wait_timeout' not in kwargs: kwargs['build_wait_timeout'] = 0 yield AbstractLatentWorker.reconfigService(self, name, password, **kwargs) self.manager = yield HyperLatentManager.getService(self.master, hyper_host, hyper_accesskey, hyper_secretkey) self.masterhash = hashlib.sha1(self.master.name).hexdigest()[:6] self.size = hyper_size self.image = image if not masterFQDN: # also match empty string (for UI) masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN
def reconfigService(self, name, password, hyper_host, hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs): yield AbstractLatentWorker.reconfigService(self, name, password, **kwargs) self.manager = yield HyperLatentManager.getService( self.master, hyper_host, hyper_accesskey, hyper_secretkey) self.masterhash = hashlib.sha1(self.master.name).hexdigest()[:6] self.size = hyper_size self.image = image if not masterFQDN: # also match empty string (for UI) masterFQDN = socket.getfqdn() self.masterFQDN = masterFQDN
def checkConfig(self, name, run_task_kwargs, container_name='buildbot', boto3_session=None, master_host=None, build_wait_timeout=0, **kwargs): if 'cluster' not in run_task_kwargs: config.error( "You must specify a cluster in the ECSWorker run_task_kwargs") if 'taskDefinition' not in run_task_kwargs: config.error( "You must specify a taskDefinition in the ECSWorker run_task_kwargs" ) return AbstractLatentWorker.checkConfig( self, name, password=None, build_wait_timeout=build_wait_timeout, **kwargs)
def reconfigService(self, name, run_task_kwargs, container_name='buildbot', boto3_session=None, master_host=None, build_wait_timeout=0, **kwargs): # create a random password for this worker password = kwargs.pop('password', self.getRandomPass()) self.master_host = master_host or socket.getfqdn() self.run_task_kwargs = run_task_kwargs self.container_name = container_name self.boto3_session = boto3_session or boto3.Session() self.ecs = self.boto3_session.client('ecs') self.task_arn = None return AbstractLatentWorker.reconfigService( self, name, password=password, build_wait_timeout=build_wait_timeout, **kwargs)
def __init__(self, name, password, instance_type, ami=None, valid_ami_owners=None, valid_ami_location_regex=None, elastic_ip=None, identifier=None, secret_identifier=None, aws_id_file_path=None, user_data=None, region=None, keypair_name=None, security_name=None, spot_instance=False, max_spot_price=1.6, volumes=None, placement=None, price_multiplier=1.2, tags=None, product_description='Linux/UNIX', subnet_id=None, security_group_ids=None, instance_profile_name=None, block_device_map=None, session=None, **kwargs): if not boto3: config.error("The python module 'boto3' is needed to use a " "EC2LatentWorker") if keypair_name is None: reportDeprecatedWorkerNameUsage( "Use of default value of 'keypair_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") keypair_name = 'latent_buildbot_slave' if security_name is None and not subnet_id: reportDeprecatedWorkerNameUsage( "Use of default value of 'security_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") security_name = 'latent_buildbot_slave' if volumes is None: volumes = [] if tags is None: tags = {} AbstractLatentWorker.__init__(self, name, password, **kwargs) if security_name and subnet_id: raise ValueError( 'security_name (EC2 classic security groups) is not supported ' 'in a VPC. Use security_group_ids instead.') if not ((ami is not None) ^ (valid_ami_owners is not None or valid_ami_location_regex is not None)): raise ValueError( 'You must provide either a specific ami, or one or both of ' 'valid_ami_location_regex and valid_ami_owners') self.ami = ami if valid_ami_owners is not None: if isinstance(valid_ami_owners, integer_types): valid_ami_owners = (valid_ami_owners,) else: for element in valid_ami_owners: if not isinstance(element, integer_types): raise ValueError( 'valid_ami_owners should be int or iterable ' 'of ints', element) if valid_ami_location_regex is not None: if not isinstance(valid_ami_location_regex, string_types): raise ValueError( 'valid_ami_location_regex should be a string') else: # verify that regex will compile re.compile(valid_ami_location_regex) if spot_instance and price_multiplier is None and max_spot_price is None: raise ValueError('You must provide either one, or both, of ' 'price_multiplier or max_spot_price') self.valid_ami_owners = None if valid_ami_owners: self.valid_ami_owners = [str(o) for o in valid_ami_owners] self.valid_ami_location_regex = valid_ami_location_regex self.instance_type = instance_type self.keypair_name = keypair_name self.security_name = security_name self.user_data = user_data self.spot_instance = spot_instance self.max_spot_price = max_spot_price self.volumes = volumes self.price_multiplier = price_multiplier self.product_description = product_description if None not in [placement, region]: self.placement = '%s%s' % (region, placement) else: self.placement = None if identifier is None: assert secret_identifier is None, ( 'supply both or neither of identifier, secret_identifier') if aws_id_file_path is None: home = os.environ['HOME'] default_path = os.path.join(home, '.ec2', 'aws_id') if os.path.exists(default_path): aws_id_file_path = default_path if aws_id_file_path: log.msg('WARNING: EC2LatentWorker is using deprecated ' 'aws_id file') with open(aws_id_file_path, 'r') as aws_file: identifier = aws_file.readline().strip() secret_identifier = aws_file.readline().strip() else: assert aws_id_file_path is None, \ 'if you supply the identifier and secret_identifier, ' \ 'do not specify the aws_id_file_path' assert secret_identifier is not None, \ 'supply both or neither of identifier, secret_identifier' region_found = None # Make the EC2 connection. self.session = session if self.session is None: if region is not None: for r in boto3.Session( aws_access_key_id=identifier, aws_secret_access_key=secret_identifier).get_available_regions('ec2'): if r == region: region_found = r if region_found is not None: self.session = boto3.Session( region_name=region, aws_access_key_id=identifier, aws_secret_access_key=secret_identifier) else: raise ValueError( 'The specified region does not exist: ' + region) else: # boto2 defaulted to us-east-1 when region was unset, we # mimic this here in boto3 region = botocore.session.get_session().get_config_variable('region') if region is None: region = 'us-east-1' self.session = boto3.Session( aws_access_key_id=identifier, aws_secret_access_key=secret_identifier, region_name=region ) self.ec2 = self.session.resource('ec2') self.ec2_client = self.session.client('ec2') # Make a keypair # # We currently discard the keypair data because we don't need it. # If we do need it in the future, we will always recreate the keypairs # because there is no way to # programmatically retrieve the private key component, unless we # generate it and store it on the filesystem, which is an unnecessary # usage requirement. try: self.ec2.KeyPair(self.keypair_name).load() # key_pair.delete() # would be used to recreate except ClientError as e: if 'InvalidKeyPair.NotFound' not in str(e): if 'AuthFailure' in str(e): log.msg('POSSIBLE CAUSES OF ERROR:\n' ' Did you supply your AWS credentials?\n' ' Did you sign up for EC2?\n' ' Did you put a credit card number in your AWS ' 'account?\n' 'Please doublecheck before reporting a problem.\n') raise # make one; we would always do this, and stash the result, if we # needed the key (for instance, to SSH to the box). We'd then # use paramiko to use the key to connect. self.ec2.create_key_pair(KeyName=keypair_name) # create security group if security_name: try: self.ec2_client.describe_security_groups(GroupNames=[security_name]) except ClientError as e: if 'InvalidGroup.NotFound' in str(e): self.security_group = self.ec2.create_security_group( GroupName=security_name, Description='Authorization to access the buildbot instance.') # Authorize the master as necessary # TODO this is where we'd open the hole to do the reverse pb # connect to the buildbot # ip = urllib.urlopen( # 'http://checkip.amazonaws.com').read().strip() # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip) # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip) else: raise # get the image if self.ami is not None: self.image = self.ec2.Image(self.ami) else: # verify we have access to at least one acceptable image discard = self.get_image() assert discard # get the specified elastic IP, if any if elastic_ip is not None: # Using ec2.vpc_addresses.filter(PublicIps=[elastic_ip]) throws a # NotImplementedError("Filtering not supported in describe_address.") in moto # https://github.com/spulec/moto/blob/100ec4e7c8aa3fde87ff6981e2139768816992e4/moto/ec2/responses/elastic_ip_addresses.py#L52 addresses = self.ec2.meta.client.describe_addresses( PublicIps=[elastic_ip])['Addresses'] if not addresses: raise ValueError( 'Could not find EIP for IP: ' + elastic_ip) allocation_id = addresses[0]['AllocationId'] elastic_ip = self.ec2.VpcAddress(allocation_id) self.elastic_ip = elastic_ip self.subnet_id = subnet_id self.security_group_ids = security_group_ids self.classic_security_groups = [ self.security_name] if self.security_name else None self.instance_profile_name = instance_profile_name self.tags = tags self.block_device_map = self.create_block_device_mapping( block_device_map) if block_device_map else None
def stopService(self): # stopService will call stop_instance if the worker was up. yield AbstractLatentWorker.stopService(self) yield self.maybeDeleteSingletons()
def checkConfig(self, name, _, **kwargs): AbstractLatentWorker.checkConfig( self, name, None, build_wait_timeout=self._controller.build_wait_timeout, **kwargs)
def reconfigService(self, name, _, **kwargs): AbstractLatentWorker.reconfigService( self, name, None, build_wait_timeout=self._controller.build_wait_timeout, **kwargs)
def __init__(self, name, password, instance_type, ami=None, valid_ami_owners=None, valid_ami_location_regex=None, elastic_ip=None, identifier=None, secret_identifier=None, aws_id_file_path=None, user_data=None, region=None, keypair_name=None, security_name=None, spot_instance=False, max_spot_price=1.6, volumes=None, placement=None, price_multiplier=1.2, tags=None, product_description='Linux/UNIX', subnet_id=None, security_group_ids=None, instance_profile_name=None, block_device_map=None, session=None, **kwargs): if not boto3: config.error("The python module 'boto3' is needed to use a " "EC2LatentWorker") if keypair_name is None: reportDeprecatedWorkerNameUsage( "Use of default value of 'keypair_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") keypair_name = 'latent_buildbot_slave' if security_name is None and not subnet_id: reportDeprecatedWorkerNameUsage( "Use of default value of 'security_name' of EC2LatentWorker " "constructor is deprecated. Please explicitly specify value") security_name = 'latent_buildbot_slave' if volumes is None: volumes = [] if tags is None: tags = {} AbstractLatentWorker.__init__(self, name, password, **kwargs) if security_name and subnet_id: raise ValueError( 'security_name (EC2 classic security groups) is not supported ' 'in a VPC. Use security_group_ids instead.') if not ((ami is not None) ^ (valid_ami_owners is not None or valid_ami_location_regex is not None)): raise ValueError( 'You must provide either a specific ami, or one or both of ' 'valid_ami_location_regex and valid_ami_owners') self.ami = ami if valid_ami_owners is not None: if isinstance(valid_ami_owners, integer_types): valid_ami_owners = (valid_ami_owners,) else: for element in valid_ami_owners: if not isinstance(element, integer_types): raise ValueError( 'valid_ami_owners should be int or iterable ' 'of ints', element) if valid_ami_location_regex is not None: if not isinstance(valid_ami_location_regex, string_types): raise ValueError( 'valid_ami_location_regex should be a string') else: # verify that regex will compile re.compile(valid_ami_location_regex) if spot_instance and price_multiplier is None and max_spot_price is None: raise ValueError('You must provide either one, or both, of ' 'price_multiplier or max_spot_price') self.valid_ami_owners = None if valid_ami_owners: self.valid_ami_owners = [str(o) for o in valid_ami_owners] self.valid_ami_location_regex = valid_ami_location_regex self.instance_type = instance_type self.keypair_name = keypair_name self.security_name = security_name self.user_data = user_data self.spot_instance = spot_instance self.max_spot_price = max_spot_price self.volumes = volumes self.price_multiplier = price_multiplier self.product_description = product_description if None not in [placement, region]: self.placement = '%s%s' % (region, placement) else: self.placement = None if identifier is None: assert secret_identifier is None, ( 'supply both or neither of identifier, secret_identifier') if aws_id_file_path is None: home = os.environ['HOME'] default_path = os.path.join(home, '.ec2', 'aws_id') if os.path.exists(default_path): aws_id_file_path = default_path if aws_id_file_path: log.msg('WARNING: EC2LatentWorker is using deprecated ' 'aws_id file') with open(aws_id_file_path, 'r') as aws_file: identifier = aws_file.readline().strip() secret_identifier = aws_file.readline().strip() else: assert aws_id_file_path is None, \ 'if you supply the identifier and secret_identifier, ' \ 'do not specify the aws_id_file_path' assert secret_identifier is not None, \ 'supply both or neither of identifier, secret_identifier' region_found = None # Make the EC2 connection. self.session = session if self.session is None: if region is not None: for r in boto3.Session( aws_access_key_id=identifier, aws_secret_access_key=secret_identifier).get_available_regions('ec2'): if r == region: region_found = r if region_found is not None: self.session = boto3.Session( region_name=region, aws_access_key_id=identifier, aws_secret_access_key=secret_identifier) else: raise ValueError( 'The specified region does not exist: ' + region) else: # boto2 defaulted to us-east-1 when region was unset, we # mimic this here in boto3 region = botocore.session.get_session().get_config_variable('region') if region is None: region = 'us-east-1' self.session = boto3.Session( aws_access_key_id=identifier, aws_secret_access_key=secret_identifier, region_name=region ) self.ec2 = self.session.resource('ec2') # Make a keypair # # We currently discard the keypair data because we don't need it. # If we do need it in the future, we will always recreate the keypairs # because there is no way to # programmatically retrieve the private key component, unless we # generate it and store it on the filesystem, which is an unnecessary # usage requirement. try: self.ec2.KeyPair(self.keypair_name).load() # key_pair.delete() # would be used to recreate except ClientError as e: if 'InvalidKeyPair.NotFound' not in str(e): if 'AuthFailure' in str(e): log.msg('POSSIBLE CAUSES OF ERROR:\n' ' Did you supply your AWS credentials?\n' ' Did you sign up for EC2?\n' ' Did you put a credit card number in your AWS ' 'account?\n' 'Please doublecheck before reporting a problem.\n') raise # make one; we would always do this, and stash the result, if we # needed the key (for instance, to SSH to the box). We'd then # use paramiko to use the key to connect. self.ec2.create_key_pair(KeyName=keypair_name) # create security group if security_name: try: self.ec2.SecurityGroup(security_name).load() except ClientError as e: if 'InvalidGroup.NotFound' in str(e): self.security_group = self.ec2.create_security_group( GroupName=security_name, Description='Authorization to access the buildbot instance.') # Authorize the master as necessary # TODO this is where we'd open the hole to do the reverse pb # connect to the buildbot # ip = urllib.urlopen( # 'http://checkip.amazonaws.com').read().strip() # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip) # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip) else: raise # get the image if self.ami is not None: self.image = self.ec2.Image(self.ami) else: # verify we have access to at least one acceptable image discard = self.get_image() assert discard # get the specified elastic IP, if any if elastic_ip is not None: # Using ec2.vpc_addresses.filter(PublicIps=[elastic_ip]) throws a # NotImplementedError("Filtering not supported in describe_address.") in moto # https://github.com/spulec/moto/blob/100ec4e7c8aa3fde87ff6981e2139768816992e4/moto/ec2/responses/elastic_ip_addresses.py#L52 addresses = self.ec2.meta.client.describe_addresses( PublicIps=[elastic_ip])['Addresses'] if not addresses: raise ValueError( 'Could not find EIP for IP: ' + elastic_ip) allocation_id = addresses[0]['AllocationId'] elastic_ip = self.ec2.VpcAddress(allocation_id) self.elastic_ip = elastic_ip self.subnet_id = subnet_id self.security_group_ids = security_group_ids self.classic_security_groups = [ self.security_name] if self.security_name else None self.instance_profile_name = instance_profile_name self.tags = tags self.block_device_map = self.create_block_device_mapping( block_device_map) if block_device_map else None
def __init__(self, name, controller, **kwargs): self._controller = controller AbstractLatentWorker.__init__(self, name, None, **kwargs)
def reconfigService(self, name, _, **kwargs): AbstractLatentWorker.reconfigService(self, name, None, **kwargs)
def checkConfig(self, name, _, **kwargs): AbstractLatentWorker.checkConfig(self, name, None, **kwargs)
def __init__(self, name, controller, **kwargs): AbstractLatentWorker.__init__(self, name, None, **kwargs) self._controller = controller