def __init__(self, name, access_key=None, secret_key=None, security_token=None, profile_name=None): self.host = None self.port = None self.host_header = None self.access_key = access_key self.secret_key = secret_key self.security_token = security_token self.profile_name = profile_name self.name = name self.acl_class = self.AclClassMap[self.name] self.canned_acls = self.CannedAclsMap[self.name] self._credential_expiry_time = None # Load shared credentials file if it exists shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') self.shared_credentials = Config(do_load=False) if os.path.isfile(shared_path): self.shared_credentials.load_from_path(shared_path) self.get_credentials(access_key, secret_key, security_token, profile_name) self.configure_headers() self.configure_errors() # Allow config file to override default host and port. host_opt_name = '%s_host' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_opt_name): self.host = config.get('Credentials', host_opt_name) port_opt_name = '%s_port' % self.HostKeyMap[self.name] if config.has_option('Credentials', port_opt_name): self.port = config.getint('Credentials', port_opt_name) host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] if config.has_option('Credentials', host_header_opt_name): self.host_header = config.get('Credentials', host_header_opt_name)
def getConfig(self): if not self._config: remote_file = BotoConfigPath local_file = '%s.ini' % self.instance.id self.get_file(remote_file, local_file) self._config = Config(local_file) return self._config
def init_config(filepath=None, enable_boto=True, enable_botocore=False): # Default credential file will be located at current folder if filepath is None or not os.path.exists(filepath): pwdpath = dirname(realpath(__file__)) filepath = pathjoin(pwdpath, CONFIG) if enable_boto: # Initialize credentials for boto from boto.pyami.config import Config boto.config = Config(filepath) access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None) secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None) # FIXME: a trick when the value is empty if not access_key or not secret_key: boto.config.remove_section('Credentials') if enable_botocore: # Initialize credentials for botocore import botocore.credentials if access_key and secret_key: def get_credentials(session, metadata=None): return botocore.credentials.Credentials(access_key, secret_key) botocore.credentials.get_credentials = get_credentials if access_key and secret_key: return access_key, secret_key
def get_s3_client(): config = Config() access_key = config.get_value(settings.BOTO_SECTION, "aws_access_key_id") secret_key = config.get_value(settings.BOTO_SECTION, "aws_secret_access_key") return boto3.client( "s3", aws_access_key_id=access_key, aws_secret_access_key=secret_key )
def loadConfig(self, path=None): # Get all the Configuration config = Config(path=path) self.aws_access_key_id = config.get('Credentials', 'aws_access_key_id') self.aws_secret_access_key = config.get('Credentials', 'aws_secret_access_key') self.key_name = config.get('Key', 'key_name') self.instance_type = config.get('Instance', 'instance_type') self.zone = config.get('Instance', 'zone', default='us-east-1c') self.security_groups = config.get('Instance', 'security_groups') self.tags = config.get('Instance', 'tags') self.os = config.get('Type', 'os') self.num_nodes = int(config.get('Type', 'num_nodes')) self.ami = config.get('AMI', self.os) self.ebs_size = int(config.get('EBS', 'volume_size', default=0)) self.num_ebs = int(config.get('EBS', 'volumes', default=0)) self.membase_port = config.get('global', 'port', default='8091') self.ssh_username = config.get('global', 'username', default='root') self.ssh_key_path = config.get('global', 'ssh_key', default='/root/.ssh/QAkey.pem') self.rest_username = config.get('membase', 'rest_username', default='Administrator') self.rest_password = config.get('membase', 'rest_password', default='password')
def create(cls, config_file=None, logical_volume=None, cfg=None, **params): if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement=zone, user_data=cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip != None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print 'set the elastic IP of the first instance to %s' % elastic_ip for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i == 0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def parse_aws_credentials(): path = os.getenv('AWS_SHARED_CREDENTIALS_FILE', "~/.aws/credentials") conf = Config(os.path.expanduser(path)) if access_key_id == conf.get('default', 'aws_access_key_id'): return (access_key_id, conf.get('default', 'aws_secret_access_key')) return (conf.get(access_key_id, 'aws_access_key_id'), conf.get(access_key_id, 'aws_secret_access_key'))
def start_moto(context): context.moto = Process(target=moto_main, kwargs={'argv': ['s3bucket_path']}) context.moto.start() if config is not None: push_env('BOTO_CONFIG', config) import boto import boto.connection import boto.provider from boto.pyami.config import Config boto.config = Config(config) # reread configuration boto.connection.config = boto.provider.config = boto.config
def __init__(self, profile): """ :param profile: instance of `Profile` """ boto_config = osp.join(profile.path, 'gstorage.boto') if osp.isfile(boto_config): boto.config = Config(path=boto_config) # patch config to have absolute path to p12 key p12_key_file = boto.config.get('Credentials', 'gs_service_key_file') p12_key_file = osp.expanduser(p12_key_file) if not osp.isabs(p12_key_file): p12_key_file = osp.join(profile.path, p12_key_file) boto.config.set('Credentials', 'gs_service_key_file', p12_key_file)
def __init__(self): config = Config() access_key = config.get_value(settings.BOTO_SECTION, 'aws_access_key_id') secret_key = config.get_value(settings.BOTO_SECTION, 'aws_secret_access_key') # connect to S3 + get ref to our data bucket conn = S3Connection(access_key, secret_key) self.bucket = conn.get_bucket(settings.S3_DATA_BUCKET) # this is where our local data will live self.base_path = os.path.abspath('./s3cache/')
def parse_aws_credentials(): path = os.getenv("AWS_SHARED_CREDENTIALS_FILE", "~/.aws/credentials") if not os.path.exists(os.path.expanduser(path)): return None conf = Config(os.path.expanduser(path)) if access_key_id == conf.get("default", "aws_access_key_id"): return (access_key_id, conf.get("default", "aws_secret_access_key")) return ( conf.get(access_key_id, "aws_access_key_id"), conf.get(access_key_id, "aws_secret_access_key"), )
def write_metadata(self): fp = open(os.path.expanduser(BotoConfigPath), 'w') fp.write('[Instance]\n') inst_data = get_instance_metadata() for key in inst_data: fp.write('%s = %s\n' % (key, inst_data[key])) user_data = get_instance_userdata() fp.write('\n%s\n' % user_data) fp.write('[Pyami]\n') fp.write('working_dir = %s\n' % self.working_dir) fp.close() # This file has the AWS credentials, should we lock it down? # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) # now that we have written the file, read it into a pyami Config object boto.config = Config() boto.init_logging()
def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: cfg = Config() cfg.load_credential_file(os.path.expanduser("~/.aws/credentials")) cfg.load_credential_file(os.path.expanduser("~/.aws/config")) session_token = cfg.get(self.boto_profile, "aws_session_token") conn = ec2.connect_to_region(region, security_token=session_token, profile_name=self.boto_profile) # connect_to_region will fail "silently" by returning None if the # region name is wrong or not supported if conn is None: print("region name: {} likely not supported, or AWS is down. " "connection to region failed.".format(region)) sys.exit(1) reservations = conn.get_all_instances(filters=self.filters) bastion_ip = self.find_bastion_box(conn) instances = [] for reservation in reservations: instances.extend(reservation.instances) # sort the instance based on name and index, in this order def sort_key(instance): name = instance.tags.get('Name', '') return "{}-{}".format(name, instance.id) for instance in sorted(instances, key=sort_key): self.add_instance(bastion_ip, instance, region) except boto.provider.ProfileNotFoundError as e: raise Exception( "{}, configure it with 'aws configure --profile {}'".format( e.message, self.boto_profile)) except boto.exception.BotoServerError as e: print(e) sys.exit(1)
def setUp(self): self.config = Config() # Enable https_validate_certificates. self.config.add_section('Boto') self.config.setbool('Boto', 'https_validate_certificates', True) # Set up bogus credentials so that the auth module is willing to go # ahead and make a request; the request should fail with a service-level # error if it does get to the service (S3 or GS). self.config.add_section('Credentials') self.config.set('Credentials', 'gs_access_key_id', 'xyz') self.config.set('Credentials', 'gs_secret_access_key', 'xyz') self.config.set('Credentials', 'aws_access_key_id', 'xyz') self.config.set('Credentials', 'aws_secret_access_key', 'xyz') self._config_patch = mock.patch('boto.config', self.config) self._config_patch.start()
def setUp(self): super(TestCommandRunnerIntegrationTests, self).setUp() # Mock out the timestamp file so we can manipulate it. self.previous_update_file = ( command_runner.LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE) self.timestamp_file = self.CreateTempFile(contents='0') command_runner.LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE = ( self.timestamp_file) # Mock out raw_input to trigger yes prompt. command_runner.raw_input = lambda p: 'y' # Create a credential-less boto config file. self.orig_config = boto.config config_file = path = self.CreateTempFile( contents='[GSUtil]\nsoftware_update_check_period=1') boto.config = Config(path=config_file) # Need to copy config into boto.connection.config because it gets loaded # before tests run. boto.connection.config = boto.config self.command_runner = command_runner.CommandRunner(config_file)
def create(cls, config_file=None, logical_volume = None, cfg = None, **params): """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file. """ if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement = zone, user_data = cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip is not None and instances.__len__() > 0: instance = instances[0] print('Waiting for instance to start so we can set its elastic IP address...') # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print('set the elastic IP of the first instance to %s' % elastic_ip) for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i==0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
def load_config(self): self._config = Config(do_load=False) self._config.load_from_sdb("botoConfigs", self.id)
def assume_identity(config, profile): # if AWS_PROFILE was the option last used, and it didn't require assuming a role if config.get('AWS_PROFILE_REFRESH_NOT_NEEDED'): return None _config_lock = config.get('CONFIG_LOCK') or config_lock _config_lock.acquire() if 'assumed_roles' not in config: config['assumed_roles'] = {} if 'role_last_updated' not in config: config['role_last_updated'] = {} try: assumed_roles = config.get('assumed_roles', {}) assumed_role = assumed_roles.get(profile) if assumed_role and not assumed_role.credentials.is_expired( time_offset_seconds=900): return False # fetch the credentials from the aws configs shared_credentials = config.get('AWS_SHARED_CREDENTIALS') if not shared_credentials: config_path = config.get('AWS_CONFIG_FILE') or os.environ.get( 'AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws', 'config') credentials_path = (config.get('AWS_CONFIG_FILE') or os.environ.get('AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws', 'credentials')).replace( '/config', '/credentials') shared_credentials = Config(do_load=False) if os.path.isfile(credentials_path): shared_credentials.load_from_path(credentials_path) if os.path.isfile(config_path): shared_credentials.load_from_path(config_path) config['AWS_SHARED_CREDENTIALS'] = shared_credentials profile_key = profile if not shared_credentials.has_section(profile_key): profile_key = 'profile {}'.format(profile_key) if not shared_credentials.has_section(profile_key): raise ProfileNotFoundError('Profile {} not found'.format( config['AWS_PROFILE'])) # no matter what, get the access and secret key pair if all([ shared_credentials.has_option(profile_key, x) for x in ('aws_access_key_id', 'aws_secret_access_key') ]): aws_access_key_id = shared_credentials.get(profile_key, 'aws_access_key_id') aws_secret_access_key = shared_credentials.get( profile_key, 'aws_secret_access_key') elif shared_credentials.has_option(profile_key, 'source_profile'): source_profile_key = shared_credentials.get( profile_key, 'source_profile') if not shared_credentials.has_section(source_profile_key): source_profile_key = 'profile {}'.format(source_profile_key) if not shared_credentials.has_section(source_profile_key): raise ProfileNotFoundError( 'Source profile {} for profile {} not found'.format( shared_credentials.get(profile_key, 'source_profile'), profile)) # source_section = shared_credentials['_sections'][source_profile_key] if all([ shared_credentials.has_option(source_profile_key, x) for x in ('aws_access_key_id', 'aws_secret_access_key') ]): aws_access_key_id = shared_credentials.get( source_profile_key, 'aws_access_key_id') aws_secret_access_key = shared_credentials.get( source_profile_key, 'aws_secret_access_key') else: raise ProfileNotFoundError( 'Source profile {} for profile {} has no access or secret key' .format( shared_credentials.get(profile_key, 'source_profile'), profile)) # if there's a role_arn, use it to assume a role if shared_credentials.has_option(profile_key, 'role_arn'): role_arn = shared_credentials.get(profile_key, 'role_arn') sts_connection = sts.STSConnection( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) config['assumed_roles'][profile] = sts_connection.assume_role( role_arn, ROLE_SESSION_NAME, policy=None, duration_seconds=960) config['role_last_updated'][profile] = datetime.datetime.utcnow( ).isoformat()[:19] + 'Z' return True finally: _config_lock.release()
from boto.compat import urlparse from boto.exception import InvalidUriError __version__ = '2.34.0' Version = __version__ # for backware compatibility # http://bugs.python.org/issue7980 datetime.datetime.strptime('', '') UserAgent = 'Boto/%s Python/%s %s/%s' % ( __version__, platform.python_version(), platform.system(), platform.release() ) config = Config() # Regex to disallow buckets violating charset or not [3..255] chars total. BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') # Regex to disallow buckets with individual DNS labels longer than 63. TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)' r'#(?P<generation>[0-9]+)$') VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$') ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') def init_logging(): for file in BotoConfigLocations: try: logging.config.fileConfig(os.path.expanduser(file))
def set_config(self, config_path=None): if not config_path: config_path = self.get_filename('Specify Config file') self.config = Config(path=config_path)