def __init__(self, name, access_key=None, secret_key=None, security_token=None, profile_name=None): self.host = None self.port = None self.host_header = None self.access_key = access_key self.secret_key = secret_key self.security_token = security_token self.profile_name = profile_name self.name = name self.acl_class = self.AclClassMap[self.name] self.canned_acls = self.CannedAclsMap[self.name] self._credential_expiry_time = None # Load shared credentials file if it exists shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') self.shared_credentials = Config(do_load=False) if os.path.isfile(shared_path): self.shared_credentials.load_from_path(shared_path) self.get_credentials(access_key, secret_key, security_token, profile_name) self.configure_headers() self.configure_errors() # Allow config file to override default host and port. host_opt_name = '%s_host' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_opt_name): self.host = config.get('Credentials', host_opt_name) port_opt_name = '%s_port' % self.HostKeyMap[self.name] if config.has_option('Credentials', port_opt_name): self.port = config.getint('Credentials', port_opt_name) host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_header_opt_name): self.host_header = config.get('Credentials', host_header_opt_name)
def getConfig(self): if not self._config: remote_file = BotoConfigPath local_file = '%s.ini' % self.instance.id self.get_file(remote_file, local_file) self._config = Config(local_file) return self._config
def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): super(ServiceDef, self).__init__(config_file) self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key script = Config.get(self, 'Pyami', 'scripts') if script: self.name = script.split('.')[-1] else: self.name = None
def write_metadata(self): fp = open(os.path.expanduser(BotoConfigPath), 'w') fp.write('[Instance]\n') inst_data = get_instance_metadata() for key in inst_data: fp.write('%s = %s\n' % (key, inst_data[key])) user_data = get_instance_userdata() fp.write('\n%s\n' % user_data) fp.write('[Pyami]\n') fp.write('working_dir = %s\n' % self.working_dir) fp.close() # This file has the AWS credentials, should we lock it down? # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) # now that we have written the file, read it into a pyami Config object fcu_boto.config = Config() fcu_boto.init_logging()
class Item(IObject): def __init__(self): self.region = None self.name = None self.instance_type = None self.quantity = 0 self.zone = None self.ami = None self.groups = [] self.key = None self.ec2 = None self.config = None def set_userdata(self, key, value): self.userdata[key] = value def get_userdata(self, key): return self.userdata[key] def set_region(self, region=None): if region: self.region = region else: l = [(r, r.name, r.endpoint) for r in fcu_boto.ec2.regions()] self.region = self.choose_from_list(l, prompt='Choose Region') def set_name(self, name=None): if name: self.name = name else: self.name = self.get_string('Name') def set_instance_type(self, instance_type=None): if instance_type: self.instance_type = instance_type else: self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type') def set_quantity(self, n=0): if n > 0: self.quantity = n else: self.quantity = self.get_int('Quantity') def set_zone(self, zone=None): if zone: self.zone = zone else: l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()] self.zone = self.choose_from_list( l, prompt='Choose Availability Zone') def set_ami(self, ami=None): if ami: self.ami = ami else: l = [(a, a.id, a.location) for a in self.ec2.get_all_images()] self.ami = self.choose_from_list(l, prompt='Choose AMI') def add_group(self, group=None): if group: self.groups.append(group) else: l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()] self.groups.append( self.choose_from_list(l, prompt='Choose Security Group')) def set_key(self, key=None): if key: self.key = key else: l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()] self.key = self.choose_from_list(l, prompt='Choose Keypair') def update_config(self): if not self.config.has_section('Credentials'): self.config.add_section('Credentials') self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id) self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key) if not self.config.has_section('Pyami'): self.config.add_section('Pyami') sdb_domain = get_domain() if sdb_domain: self.config.set('Pyami', 'server_sdb_domain', sdb_domain) self.config.set('Pyami', 'server_sdb_name', self.name) def set_config(self, config_path=None): if not config_path: config_path = self.get_filename('Specify Config file') self.config = Config(path=config_path) def get_userdata_string(self): s = StringIO() self.config.write(s) return s.getvalue() def enter(self, **params): self.region = params.get('region', self.region) if not self.region: self.set_region() self.ec2 = self.region.connect() self.name = params.get('name', self.name) if not self.name: self.set_name() self.instance_type = params.get('instance_type', self.instance_type) if not self.instance_type: self.set_instance_type() self.zone = params.get('zone', self.zone) if not self.zone: self.set_zone() self.quantity = params.get('quantity', self.quantity) if not self.quantity: self.set_quantity() self.ami = params.get('ami', self.ami) if not self.ami: self.set_ami() self.groups = params.get('groups', self.groups) if not self.groups: self.add_group() self.key = params.get('key', self.key) if not self.key: self.set_key() self.config = params.get('config', self.config) if not self.config: self.set_config() self.update_config()
def set_config(self, config_path=None): if not config_path: config_path = self.get_filename('Specify Config file') self.config = Config(path=config_path)
class Provider(object): CredentialMap = { 'aws': ('aws_access_key_id', 'aws_secret_access_key', 'aws_security_token', 'aws_profile'), 'google': ('gs_access_key_id', 'gs_secret_access_key', None, None), } AclClassMap = { 'aws': Policy, 'google': ACL } CannedAclsMap = { 'aws': CannedS3ACLStrings, 'google': CannedGSACLStrings } HostKeyMap = { 'aws': 's3', 'google': 'gs' } ChunkedTransferSupport = { 'aws': False, 'google': True } MetadataServiceSupport = { 'aws': True, 'google': False } # If you update this map please make sure to put "None" for the # right-hand-side for any headers that don't apply to a provider, rather # than simply leaving that header out (which would cause KeyErrors). HeaderInfoMap = { 'aws': { HEADER_PREFIX_KEY: AWS_HEADER_PREFIX, METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-', ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl', AUTH_HEADER_KEY: 'AWS', COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source', COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source-version-id', COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source-range', DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date', DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker', METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX + 'metadata-directive', RESUMABLE_UPLOAD_HEADER_KEY: None, SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token', SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX + 'server-side-encryption', VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore', }, 'google': { HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-', ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl', AUTH_HEADER_KEY: 'GOOG1', COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source', COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source-version-id', COPY_SOURCE_RANGE_HEADER_KEY: None, DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date', DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker', METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX + 'metadata-directive', RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable', SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token', SERVER_SIDE_ENCRYPTION_KEY: None, # Note that this version header is not to be confused with # the Google Cloud Storage 'x-goog-api-version' header. VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', STORAGE_CLASS_HEADER_KEY: GOOG_HEADER_PREFIX + 'storage-class', MFA_HEADER_KEY: None, RESTORE_HEADER_KEY: None, } } ErrorMap = { 'aws': { STORAGE_COPY_ERROR: fcu_boto.exception.S3CopyError, STORAGE_CREATE_ERROR: fcu_boto.exception.S3CreateError, STORAGE_DATA_ERROR: fcu_boto.exception.S3DataError, STORAGE_PERMISSIONS_ERROR: fcu_boto.exception.S3PermissionsError, STORAGE_RESPONSE_ERROR: fcu_boto.exception.S3ResponseError, }, 'google': { STORAGE_COPY_ERROR: fcu_boto.exception.GSCopyError, STORAGE_CREATE_ERROR: fcu_boto.exception.GSCreateError, STORAGE_DATA_ERROR: fcu_boto.exception.GSDataError, STORAGE_PERMISSIONS_ERROR: fcu_boto.exception.GSPermissionsError, STORAGE_RESPONSE_ERROR: fcu_boto.exception.GSResponseError, } } def __init__(self, name, access_key=None, secret_key=None, security_token=None, profile_name=None): self.host = None self.port = None self.host_header = None self.access_key = access_key self.secret_key = secret_key self.security_token = security_token self.profile_name = profile_name self.name = name self.acl_class = self.AclClassMap[self.name] self.canned_acls = self.CannedAclsMap[self.name] self._credential_expiry_time = None # Load shared credentials file if it exists shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') self.shared_credentials = Config(do_load=False) if os.path.isfile(shared_path): self.shared_credentials.load_from_path(shared_path) self.get_credentials(access_key, secret_key, security_token, profile_name) self.configure_headers() self.configure_errors() # Allow config file to override default host and port. host_opt_name = '%s_host' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_opt_name): self.host = config.get('Credentials', host_opt_name) port_opt_name = '%s_port' % self.HostKeyMap[self.name] if config.has_option('Credentials', port_opt_name): self.port = config.getint('Credentials', port_opt_name) host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_header_opt_name): self.host_header = config.get('Credentials', host_header_opt_name) def get_access_key(self): if self._credentials_need_refresh(): self._populate_keys_from_metadata_server() return self._access_key def set_access_key(self, value): self._access_key = value access_key = property(get_access_key, set_access_key) def get_secret_key(self): if self._credentials_need_refresh(): self._populate_keys_from_metadata_server() return self._secret_key def set_secret_key(self, value): self._secret_key = value secret_key = property(get_secret_key, set_secret_key) def get_security_token(self): if self._credentials_need_refresh(): self._populate_keys_from_metadata_server() return self._security_token def set_security_token(self, value): self._security_token = value security_token = property(get_security_token, set_security_token) def _credentials_need_refresh(self): if self._credential_expiry_time is None: return False else: # The credentials should be refreshed if they're going to expire # in less than 5 minutes. delta = self._credential_expiry_time - datetime.utcnow() # python2.6 does not have timedelta.total_seconds() so we have # to calculate this ourselves. This is straight from the # datetime docs. seconds_left = ( (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10 ** 6) if seconds_left < (5 * 60): fcu_boto.log.debug("Credentials need to be refreshed.") return True else: return False def get_credentials(self, access_key=None, secret_key=None, security_token=None, profile_name=None): access_key_name, secret_key_name, security_token_name, \ profile_name_name = self.CredentialMap[self.name] # Load profile from shared environment variable if it was not # already passed in and the environment variable exists if profile_name is None and profile_name_name is not None and \ profile_name_name.upper() in os.environ: profile_name = os.environ[profile_name_name.upper()] shared = self.shared_credentials if access_key is not None: self.access_key = access_key fcu_boto.log.debug("Using access key provided by client.") elif access_key_name.upper() in os.environ: self.access_key = os.environ[access_key_name.upper()] fcu_boto.log.debug("Using access key found in environment variable.") elif profile_name is not None: if shared.has_option(profile_name, access_key_name): self.access_key = shared.get(profile_name, access_key_name) fcu_boto.log.debug("Using access key found in shared credential " "file for profile %s." % profile_name) elif config.has_option("profile %s" % profile_name, access_key_name): self.access_key = config.get("profile %s" % profile_name, access_key_name) fcu_boto.log.debug("Using access key found in config file: " "profile %s." % profile_name) else: raise ProfileNotFoundError('Profile "%s" not found!' % profile_name) elif shared.has_option('default', access_key_name): self.access_key = shared.get('default', access_key_name) fcu_boto.log.debug("Using access key found in shared credential file.") elif config.has_option('Credentials', access_key_name): self.access_key = config.get('Credentials', access_key_name) fcu_boto.log.debug("Using access key found in config file.") if secret_key is not None: self.secret_key = secret_key fcu_boto.log.debug("Using secret key provided by client.") elif secret_key_name.upper() in os.environ: self.secret_key = os.environ[secret_key_name.upper()] fcu_boto.log.debug("Using secret key found in environment variable.") elif profile_name is not None: if shared.has_option(profile_name, secret_key_name): self.secret_key = shared.get(profile_name, secret_key_name) fcu_boto.log.debug("Using secret key found in shared credential " "file for profile %s." % profile_name) elif config.has_option("profile %s" % profile_name, secret_key_name): self.secret_key = config.get("profile %s" % profile_name, secret_key_name) fcu_boto.log.debug("Using secret key found in config file: " "profile %s." % profile_name) else: raise ProfileNotFoundError('Profile "%s" not found!' % profile_name) elif shared.has_option('default', secret_key_name): self.secret_key = shared.get('default', secret_key_name) fcu_boto.log.debug("Using secret key found in shared credential file.") elif config.has_option('Credentials', secret_key_name): self.secret_key = config.get('Credentials', secret_key_name) fcu_boto.log.debug("Using secret key found in config file.") elif config.has_option('Credentials', 'keyring'): keyring_name = config.get('Credentials', 'keyring') try: import keyring except ImportError: fcu_boto.log.error("The keyring module could not be imported. " "For keyring support, install the keyring " "module.") raise self.secret_key = keyring.get_password( keyring_name, self.access_key) fcu_boto.log.debug("Using secret key found in keyring.") if security_token is not None: self.security_token = security_token fcu_boto.log.debug("Using security token provided by client.") elif ((security_token_name is not None) and (access_key is None) and (secret_key is None)): # Only provide a token from the environment/config if the # caller did not specify a key and secret. Otherwise an # environment/config token could be paired with a # different set of credentials provided by the caller if security_token_name.upper() in os.environ: self.security_token = os.environ[security_token_name.upper()] fcu_boto.log.debug("Using security token found in environment" " variable.") elif shared.has_option(profile_name or 'default', security_token_name): self.security_token = shared.get(profile_name or 'default', security_token_name) fcu_boto.log.debug("Using security token found in shared " "credential file.") elif profile_name is not None: if config.has_option("profile %s" % profile_name, security_token_name): fcu_boto.log.debug("config has option") self.security_token = config.get("profile %s" % profile_name, security_token_name) fcu_boto.log.debug("Using security token found in config file: " "profile %s." % profile_name) elif config.has_option('Credentials', security_token_name): self.security_token = config.get('Credentials', security_token_name) fcu_boto.log.debug("Using security token found in config file.") if ((self._access_key is None or self._secret_key is None) and self.MetadataServiceSupport[self.name]): self._populate_keys_from_metadata_server() self._secret_key = self._convert_key_to_str(self._secret_key) def _populate_keys_from_metadata_server(self): # get_instance_metadata is imported here because of a circular # dependency. fcu_boto.log.debug("Retrieving credentials from metadata server.") from fcu_boto.utils import get_instance_metadata timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0) attempts = config.getint('Boto', 'metadata_service_num_attempts', 1) # The num_retries arg is actually the total number of attempts made, # so the config options is named *_num_attempts to make this more # clear to users. metadata = get_instance_metadata( timeout=timeout, num_retries=attempts, data='meta-data/iam/security-credentials/') if metadata: creds = self._get_credentials_from_metadata(metadata) self._access_key = creds[0] self._secret_key = creds[1] self._security_token = creds[2] expires_at = creds[3] # I'm assuming there's only one role on the instance profile. self._credential_expiry_time = datetime.strptime( expires_at, "%Y-%m-%dT%H:%M:%SZ") fcu_boto.log.debug("Retrieved credentials will expire in %s at: %s", self._credential_expiry_time - datetime.now(), expires_at) def _get_credentials_from_metadata(self, metadata): # Given metadata, return a tuple of (access, secret, token, expiration) # On errors, an InvalidInstanceMetadataError will be raised. # The "metadata" is a lazy loaded dictionary means that it's possible # to still encounter errors as we traverse through the metadata dict. # We try to be careful and raise helpful error messages when this # happens. creds = list(metadata.values())[0] if not isinstance(creds, dict): # We want to special case a specific error condition which is # where get_instance_metadata() returns an empty string on # error conditions. if creds == '': msg = 'an empty string' else: msg = 'type: %s' % creds raise InvalidInstanceMetadataError("Expected a dict type of " "credentials instead received " "%s" % (msg)) try: access_key = creds['AccessKeyId'] secret_key = self._convert_key_to_str(creds['SecretAccessKey']) security_token = creds['Token'] expires_at = creds['Expiration'] except KeyError as e: raise InvalidInstanceMetadataError( "Credentials from instance metadata missing " "required key: %s" % e) return access_key, secret_key, security_token, expires_at def _convert_key_to_str(self, key): if isinstance(key, six.text_type): # the secret key must be bytes and not unicode to work # properly with hmac.new (see http://bugs.python.org/issue5285) return str(key) return key def configure_headers(self): header_info_map = self.HeaderInfoMap[self.name] self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY] self.header_prefix = header_info_map[HEADER_PREFIX_KEY] self.acl_header = header_info_map[ACL_HEADER_KEY] self.auth_header = header_info_map[AUTH_HEADER_KEY] self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY] self.copy_source_version_id = header_info_map[ COPY_SOURCE_VERSION_ID_HEADER_KEY] self.copy_source_range_header = header_info_map[ COPY_SOURCE_RANGE_HEADER_KEY] self.date_header = header_info_map[DATE_HEADER_KEY] self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY] self.metadata_directive_header = ( header_info_map[METADATA_DIRECTIVE_HEADER_KEY]) self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY] self.resumable_upload_header = ( header_info_map[RESUMABLE_UPLOAD_HEADER_KEY]) self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY] self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] self.version_id = header_info_map[VERSION_ID_HEADER_KEY] self.mfa_header = header_info_map[MFA_HEADER_KEY] self.restore_header = header_info_map[RESTORE_HEADER_KEY] def configure_errors(self): error_map = self.ErrorMap[self.name] self.storage_copy_error = error_map[STORAGE_COPY_ERROR] self.storage_create_error = error_map[STORAGE_CREATE_ERROR] self.storage_data_error = error_map[STORAGE_DATA_ERROR] self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR] self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR] def get_provider_name(self): return self.HostKeyMap[self.name] def supports_chunked_transfer(self): return self.ChunkedTransferSupport[self.name]
def create(cls, config_file=None, logical_volume=None, cfg=None, **params): """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file. """ if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement=zone, user_data=cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print( 'Waiting for instance to start so we can set its elastic IP address...' ) # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print('set the elastic IP of the first instance to %s' % elastic_ip) for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i == 0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
class Server(Model): @property def ec2(self): if self._ec2 is None: self._ec2 = fcu_boto.connect_ec2() return self._ec2 @classmethod def Inventory(cls): """ Returns a list of Server instances, one for each Server object persisted in the db """ l = ServerSet() rs = cls.find() for server in rs: l.append(server) return l @classmethod def Register(cls, name, instance_id, description=''): s = cls() s.name = name s.instance_id = instance_id s.description = description s.save() return s def __init__(self, id=None, **kw): super(Server, self).__init__(id, **kw) self._reservation = None self._instance = None self._ssh_client = None self._pkey = None self._config = None self._ec2 = None name = StringProperty(unique=True, verbose_name="Name") instance_id = StringProperty(verbose_name="Instance ID") config_uri = StringProperty() ami_id = StringProperty(verbose_name="AMI ID") zone = StringProperty(verbose_name="Availability Zone") security_group = StringProperty(verbose_name="Security Group", default="default") key_name = StringProperty(verbose_name="Key Name") elastic_ip = StringProperty(verbose_name="Elastic IP") instance_type = StringProperty(verbose_name="Instance Type") description = StringProperty(verbose_name="Description") log = StringProperty() def setReadOnly(self, value): raise AttributeError def getInstance(self): if not self._instance: if self.instance_id: try: rs = self.ec2.get_all_reservations([self.instance_id]) except: return None if len(rs) > 0: self._reservation = rs[0] self._instance = self._reservation.instances[0] return self._instance instance = property(getInstance, setReadOnly, None, 'The Instance for the server') def getAMI(self): if self.instance: return self.instance.image_id ami = property(getAMI, setReadOnly, None, 'The AMI for the server') def getStatus(self): if self.instance: self.instance.update() return self.instance.state status = property(getStatus, setReadOnly, None, 'The status of the server') def getHostname(self): if self.instance: return self.instance.public_dns_name hostname = property(getHostname, setReadOnly, None, 'The public DNS name of the server') def getPrivateHostname(self): if self.instance: return self.instance.private_dns_name private_hostname = property(getPrivateHostname, setReadOnly, None, 'The private DNS name of the server') def getLaunchTime(self): if self.instance: return self.instance.launch_time launch_time = property(getLaunchTime, setReadOnly, None, 'The time the Server was started') def getConsoleOutput(self): if self.instance: return self.instance.get_console_output() console_output = property(getConsoleOutput, setReadOnly, None, 'Retrieve the console output for server') def getGroups(self): if self._reservation: return self._reservation.groups else: return None groups = property(getGroups, setReadOnly, None, 'The Security Groups controlling access to this server') def getConfig(self): if not self._config: remote_file = BotoConfigPath local_file = '%s.ini' % self.instance.id self.get_file(remote_file, local_file) self._config = Config(local_file) return self._config def setConfig(self, config): local_file = '%s.ini' % self.instance.id fp = open(local_file) config.write(fp) fp.close() self.put_file(local_file, BotoConfigPath) self._config = config config = property(getConfig, setConfig, None, 'The instance data for this server') def set_config(self, config): """ Set SDB based config """ self._config = config self._config.dump_to_sdb("botoConfigs", self.id) def load_config(self): self._config = Config(do_load=False) self._config.load_from_sdb("botoConfigs", self.id) def stop(self): if self.instance: self.instance.stop() def start(self): self.stop() ec2 = fcu_boto.connect_ec2() ami = ec2.get_all_images(image_ids=[str(self.ami_id)])[0] groups = ec2.get_all_security_groups( groupnames=[str(self.security_group)]) if not self._config: self.load_config() if not self._config.has_section("Credentials"): self._config.add_section("Credentials") self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id) self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key) if not self._config.has_section("Pyami"): self._config.add_section("Pyami") if self._manager.domain: self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name) self._config.set("Pyami", 'server_sdb_name', self.name) cfg = StringIO() self._config.write(cfg) cfg = cfg.getvalue() r = ami.run(min_count=1, max_count=1, key_name=self.key_name, security_groups=groups, instance_type=self.instance_type, placement=self.zone, user_data=cfg) i = r.instances[0] self.instance_id = i.id self.put() if self.elastic_ip: ec2.associate_address(self.instance_id, self.elastic_ip) def reboot(self): if self.instance: self.instance.reboot() def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts', uname='root'): import paramiko if not self.instance: print('No instance yet!') return if not self._ssh_client: if not key_file: iobject = IObject() key_file = iobject.get_filename('Path to OpenSSH Key file') self._pkey = paramiko.RSAKey.from_private_key_file(key_file) self._ssh_client = paramiko.SSHClient() self._ssh_client.load_system_host_keys() self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) self._ssh_client.set_missing_host_key_policy( paramiko.AutoAddPolicy()) self._ssh_client.connect(self.instance.public_dns_name, username=uname, pkey=self._pkey) return self._ssh_client def get_file(self, remotepath, localpath): ssh_client = self.get_ssh_client() sftp_client = ssh_client.open_sftp() sftp_client.get(remotepath, localpath) def put_file(self, localpath, remotepath): ssh_client = self.get_ssh_client() sftp_client = ssh_client.open_sftp() sftp_client.put(localpath, remotepath) def listdir(self, remotepath): ssh_client = self.get_ssh_client() sftp_client = ssh_client.open_sftp() return sftp_client.listdir(remotepath) def shell(self, key_file=None): ssh_client = self.get_ssh_client(key_file) channel = ssh_client.invoke_shell() interactive_shell(channel) def bundle_image(self, prefix, key_file, cert_file, size): print('bundling image...') print('\tcopying cert and pk over to /mnt directory on server') ssh_client = self.get_ssh_client() sftp_client = ssh_client.open_sftp() path, name = os.path.split(key_file) remote_key_file = '/mnt/%s' % name self.put_file(key_file, remote_key_file) path, name = os.path.split(cert_file) remote_cert_file = '/mnt/%s' % name self.put_file(cert_file, remote_cert_file) print('\tdeleting %s' % BotoConfigPath) # delete the metadata.ini file if it exists try: sftp_client.remove(BotoConfigPath) except: pass command = 'sudo ec2-bundle-vol ' command += '-c %s -k %s ' % (remote_cert_file, remote_key_file) command += '-u %s ' % self._reservation.owner_id command += '-p %s ' % prefix command += '-s %d ' % size command += '-d /mnt ' if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium': command += '-r i386' else: command += '-r x86_64' print('\t%s' % command) t = ssh_client.exec_command(command) response = t[1].read() print('\t%s' % response) print('\t%s' % t[2].read()) print('...complete!') def upload_bundle(self, bucket, prefix): print('uploading bundle...') command = 'ec2-upload-bundle ' command += '-m /mnt/%s.manifest.xml ' % prefix command += '-b %s ' % bucket command += '-a %s ' % self.ec2.aws_access_key_id command += '-s %s ' % self.ec2.aws_secret_access_key print('\t%s' % command) ssh_client = self.get_ssh_client() t = ssh_client.exec_command(command) response = t[1].read() print('\t%s' % response) print('\t%s' % t[2].read()) print('...complete!') def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None): iobject = IObject() if not bucket: bucket = iobject.get_string('Name of S3 bucket') if not prefix: prefix = iobject.get_string('Prefix for AMI file') if not key_file: key_file = iobject.get_filename('Path to RSA private key file') if not cert_file: cert_file = iobject.get_filename('Path to RSA public cert file') if not size: size = iobject.get_int('Size (in MB) of bundled image') self.bundle_image(prefix, key_file, cert_file, size) self.upload_bundle(bucket, prefix) print('registering image...') self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix)) return self.image_id def attach_volume(self, volume, device="/dev/sdp"): """ Attach an EBS volume to this server :param volume: EBS Volume to attach :type volume: fcu_boto.ec2.volume.Volume :param device: Device to attach to (default to /dev/sdp) :type device: string """ if hasattr(volume, "id"): volume_id = volume.id else: volume_id = volume return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) def detach_volume(self, volume): """ Detach an EBS volume from this server :param volume: EBS Volume to detach :type volume: fcu_boto.ec2.volume.Volume """ if hasattr(volume, "id"): volume_id = volume.id else: volume_id = volume return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id) def install_package(self, package_name): print('installing %s...' % package_name) command = 'yum -y install %s' % package_name print('\t%s' % command) ssh_client = self.get_ssh_client() t = ssh_client.exec_command(command) response = t[1].read() print('\t%s' % response) print('\t%s' % t[2].read()) print('...complete!')
def load_config(self): self._config = Config(do_load=False) self._config.load_from_sdb("botoConfigs", self.id)
import logging import logging.config from fcu_boto.compat import urlparse from fcu_boto.exception import InvalidUriError __version__ = '2.46.1' Version = __version__ # for backware compatibility # http://bugs.python.org/issue7980 datetime.datetime.strptime('', '') UserAgent = 'Boto/%s Python/%s %s/%s' % (__version__, platform.python_version(), platform.system(), platform.release()) config = Config() # Regex to disallow buckets violating charset or not [3..255] chars total. BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') # Regex to disallow buckets with individual DNS labels longer than 63. TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)' r'#(?P<generation>[0-9]+)$') VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$') ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): for file in BotoConfigLocations: try: logging.config.fileConfig(os.path.expanduser(file))