예제 #1
0
    def __init__(self, name, access_key=None, secret_key=None,
                 security_token=None, profile_name=None):
        self.host = None
        self.port = None
        self.host_header = None
        self.access_key = access_key
        self.secret_key = secret_key
        self.security_token = security_token
        self.profile_name = profile_name
        self.name = name
        self.acl_class = self.AclClassMap[self.name]
        self.canned_acls = self.CannedAclsMap[self.name]
        self._credential_expiry_time = None

        # Load shared credentials file if it exists
        shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
        self.shared_credentials = Config(do_load=False)
        if os.path.isfile(shared_path):
            self.shared_credentials.load_from_path(shared_path)

        self.get_credentials(access_key, secret_key, security_token, profile_name)
        self.configure_headers()
        self.configure_errors()

        # Allow config file to override default host and port.
        host_opt_name = '%s_host' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_opt_name):
            self.host = config.get('Credentials', host_opt_name)
        port_opt_name = '%s_port' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', port_opt_name):
            self.port = config.getint('Credentials', port_opt_name)
        host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_header_opt_name):
            self.host_header = config.get('Credentials', host_header_opt_name)
예제 #2
0
def get_s3_client():
    config = Config()
    access_key = config.get_value(settings.BOTO_SECTION, "aws_access_key_id")
    secret_key = config.get_value(settings.BOTO_SECTION, "aws_secret_access_key")
    return boto3.client(
        "s3", aws_access_key_id=access_key, aws_secret_access_key=secret_key
    )
예제 #3
0
 def getConfig(self):
     if not self._config:
         remote_file = BotoConfigPath
         local_file = '%s.ini' % self.instance.id
         self.get_file(remote_file, local_file)
         self._config = Config(local_file)
     return self._config
예제 #4
0
 def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None):
     Config.__init__(self, config_file)
     self.aws_access_key_id = aws_access_key_id
     self.aws_secret_access_key = aws_secret_access_key
     script = Config.get(self, 'Pyami', 'scripts')
     if script:
         self.name = script.split('.')[-1]
     else:
         self.name = None
예제 #5
0
파일: server.py 프로젝트: bopopescu/boto-1
 def create(cls, config_file=None, logical_volume=None, cfg=None, **params):
     if config_file:
         cfg = Config(path=config_file)
     if cfg.has_section('EC2'):
         # include any EC2 configuration values that aren't specified in params:
         for option in cfg.options('EC2'):
             if option not in params:
                 params[option] = cfg.get('EC2', option)
     getter = CommandLineGetter()
     getter.get(cls, params)
     region = params.get('region')
     ec2 = region.connect()
     cls.add_credentials(cfg, ec2.aws_access_key_id,
                         ec2.aws_secret_access_key)
     ami = params.get('ami')
     kp = params.get('keypair')
     group = params.get('group')
     zone = params.get('zone')
     # deal with possibly passed in logical volume:
     if logical_volume != None:
         cfg.set('EBS', 'logical_volume_name', logical_volume.name)
     cfg_fp = StringIO.StringIO()
     cfg.write(cfg_fp)
     # deal with the possibility that zone and/or keypair are strings read from the config file:
     if isinstance(zone, Zone):
         zone = zone.name
     if isinstance(kp, KeyPair):
         kp = kp.name
     reservation = ami.run(min_count=1,
                           max_count=params.get('quantity', 1),
                           key_name=kp,
                           security_groups=[group],
                           instance_type=params.get('instance_type'),
                           placement=zone,
                           user_data=cfg_fp.getvalue())
     l = []
     i = 0
     elastic_ip = params.get('elastic_ip')
     instances = reservation.instances
     if elastic_ip != None and instances.__len__() > 0:
         instance = instances[0]
         print 'Waiting for instance to start so we can set its elastic IP address...'
         while instance.update() != 'running':
             time.sleep(1)
         instance.use_ip(elastic_ip)
         print 'set the elastic IP of the first instance to %s' % elastic_ip
     for instance in instances:
         s = cls()
         s.ec2 = ec2
         s.name = params.get('name') + '' if i == 0 else str(i)
         s.description = params.get('description')
         s.region_name = region.name
         s.instance_id = instance.id
         if elastic_ip and i == 0:
             s.elastic_ip = elastic_ip
         s.put()
         l.append(s)
         i += 1
     return l
예제 #6
0
    def parse_aws_credentials():
        path = os.getenv('AWS_SHARED_CREDENTIALS_FILE', "~/.aws/credentials")
        if not os.path.exists(os.path.expanduser(path)):
            return None

        conf = Config(os.path.expanduser(path))

        if access_key_id == conf.get('default', 'aws_access_key_id'):
            return (access_key_id, conf.get('default', 'aws_secret_access_key'))
        return (conf.get(access_key_id, 'aws_access_key_id'),
                conf.get(access_key_id, 'aws_secret_access_key'))
예제 #7
0
    def __init__(self):
        config = Config()
        access_key = config.get_value(settings.BOTO_SECTION,
                                      'aws_access_key_id')
        secret_key = config.get_value(settings.BOTO_SECTION,
                                      'aws_secret_access_key')

        # connect to S3 + get ref to our data bucket
        conn = S3Connection(access_key, secret_key)
        self.bucket = conn.get_bucket(settings.S3_DATA_BUCKET)

        # this is where our local data will live
        self.base_path = os.path.abspath('./s3cache/')
예제 #8
0
def copy_aws_credentials(src_fname, dst_fname, region):
    """
    Opens a Boto file, changes the region and saves it to a new file, changing
    the ec2 region.
    """
    log("copy_aws_credentials from src[%s] to [%s] in region [%s]" % (
                                                                src_fname,
                                                                dst_fname,
                                                                region))

    ec2_region_endpoint = {"us-east-1": "ec2.us-east-1.amazonaws.com",
            "us-west-2": "ec2.us-west-2.amazonaws.com",
            "us-west-1": "ec2.us-west-1.amazonaws.com",
            "eu-west-1": "ec2.eu-west-1.amazonaws.com",
            "ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com",
            "ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com",
            "ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com",
            "sa-east-1": "ec2.sa-east-1.amazonaws.com"}[region]
    creds = BotoConfig(src_fname)

    # check for AZ override in the CloudSim section
    az = creds.get('CloudSim', region)
    if az in ['any', None]:
        az = region  # use region without a specific AZ
    print(src_fname, dst_fname, region)
    creds.set('Boto', 'ec2_region_name', az)
    log("copy_aws_credentials: using Availability Zone: %s" % az)
    creds.set('Boto', 'ec2_region_endpoint', ec2_region_endpoint)
    with open(dst_fname, 'w') as f:
        creds.write(f)
예제 #9
0
 def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
     if config_file:
         cfg = Config(path=config_file)
     if cfg.has_section('EC2'):
         # include any EC2 configuration values that aren't specified in params:
         for option in cfg.options('EC2'):
             if option not in params:
                 params[option] = cfg.get('EC2', option)
     getter = CommandLineGetter()
     getter.get(cls, params)
     region = params.get('region')
     ec2 = region.connect()
     cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
     ami = params.get('ami')
     kp = params.get('keypair')
     group = params.get('group')
     zone = params.get('zone')
     # deal with possibly passed in logical volume:
     if logical_volume != None:
        cfg.set('EBS', 'logical_volume_name', logical_volume.name) 
     cfg_fp = StringIO.StringIO()
     cfg.write(cfg_fp)
     # deal with the possibility that zone and/or keypair are strings read from the config file:
     if isinstance(zone, Zone):
         zone = zone.name
     if isinstance(kp, KeyPair):
         kp = kp.name
     reservation = ami.run(min_count=1,
                           max_count=params.get('quantity', 1),
                           key_name=kp,
                           security_groups=[group],
                           instance_type=params.get('instance_type'),
                           placement = zone,
                           user_data = cfg_fp.getvalue())
     l = []
     i = 0
     elastic_ip = params.get('elastic_ip')
     instances = reservation.instances
     if elastic_ip != None and instances.__len__() > 0:
         instance = instances[0]
         print 'Waiting for instance to start so we can set its elastic IP address...'
         while instance.update() != 'running':
             time.sleep(1)
         instance.use_ip(elastic_ip)
         print 'set the elastic IP of the first instance to %s' % elastic_ip
     for instance in instances:
         s = cls()
         s.ec2 = ec2
         s.name = params.get('name') + '' if i==0 else str(i)
         s.description = params.get('description')
         s.region_name = region.name
         s.instance_id = instance.id
         if elastic_ip and i == 0:
             s.elastic_ip = elastic_ip
         s.put()
         l.append(s)
         i += 1
     return l
예제 #10
0
파일: servicedef.py 프로젝트: 0t3dWCE/boto
 def getint(self, option, default=0):
     try:
         val = Config.get(self, self.name, option)
         val = int(val)
     except:
         val = int(default)
     return val
예제 #11
0
def init_config(filepath=None, enable_boto=True, enable_botocore=False):
    # Default credential file will be located at current folder
    if filepath is None or not os.path.exists(filepath):
        pwdpath = dirname(realpath(__file__))
        filepath = pathjoin(pwdpath, CONFIG)

    if enable_boto:
        # Initialize credentials for boto
        from boto.pyami.config import Config
        boto.config = Config(filepath)

        access_key = boto.config.get('Credentials', AWS_ACCESS_KEY_NAME, None)
        secret_key = boto.config.get('Credentials', AWS_SECRET_KEY_NAME, None)

        # FIXME: a trick when the value is empty
        if not access_key or not secret_key:
            boto.config.remove_section('Credentials')

    if enable_botocore:
        # Initialize credentials for botocore
        import botocore.credentials

        if access_key and secret_key:

            def get_credentials(session, metadata=None):
                return botocore.credentials.Credentials(access_key, secret_key)

            botocore.credentials.get_credentials = get_credentials

    if access_key and secret_key:
        return access_key, secret_key
예제 #12
0
    def __init__(self, name, access_key=None, secret_key=None,
                 security_token=None, profile_name=None):
        self.host = None
        self.port = None
        self.host_header = None
        self.access_key = access_key
        self.secret_key = secret_key
        self.security_token = security_token
        self.profile_name = profile_name
        self.name = name
        self.acl_class = self.AclClassMap[self.name]
        self.canned_acls = self.CannedAclsMap[self.name]
        self._credential_expiry_time = None

        # Load shared credentials file if it exists
        shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
        self.shared_credentials = Config(do_load=False)
        if os.path.isfile(shared_path):
            self.shared_credentials.load_from_path(shared_path)

        self.get_credentials(access_key, secret_key, security_token, profile_name)
        self.configure_headers()
        self.configure_errors()

        # Allow config file to override default host and port.
        host_opt_name = '%s_host' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_opt_name):
            self.host = config.get('Credentials', host_opt_name)
        port_opt_name = '%s_port' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', port_opt_name):
            self.port = config.getint('Credentials', port_opt_name)
        host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_header_opt_name):
            self.host_header = config.get('Credentials', host_header_opt_name)
예제 #13
0
 def getConfig(self):
     if not self._config:
         remote_file = BotoConfigPath
         local_file = '%s.ini' % self.instance.id
         self.get_file(remote_file, local_file)
         self._config = Config(local_file)
     return self._config
예제 #14
0
 def getint(self, option, default=0):
     try:
         val = Config.get(self, self.name, option)
         val = int(val)
     except:
         val = int(default)
     return val
    def setUp(self):
        self.config = Config()

        # Enable https_validate_certificates.
        self.config.add_section('Boto')
        self.config.setbool('Boto', 'https_validate_certificates', True)

        # Set up bogus credentials so that the auth module is willing to go
        # ahead and make a request; the request should fail with a service-level
        # error if it does get to the service (S3 or GS).
        self.config.add_section('Credentials')
        self.config.set('Credentials', 'gs_access_key_id', 'xyz')
        self.config.set('Credentials', 'gs_secret_access_key', 'xyz')
        self.config.set('Credentials', 'aws_access_key_id', 'xyz')
        self.config.set('Credentials', 'aws_secret_access_key', 'xyz')

        self._config_patch = mock.patch('boto.config', self.config)
        self._config_patch.start()
예제 #16
0
 def getbool(self, option, default=False):
     try:
         val = Config.get(self, self.name, option)
         if val.lower() == 'true':
             val = True
         else:
             val = False
     except:
         val = default
     return val
예제 #17
0
파일: servicedef.py 프로젝트: 0t3dWCE/boto
 def getbool(self, option, default=False):
     try:
         val = Config.get(self, self.name, option)
         if val.lower() == 'true':
             val = True
         else:
             val = False
     except:
         val = default
     return val
예제 #18
0
def start_moto(context):
    context.moto = Process(target=moto_main,
                           kwargs={'argv': ['s3bucket_path']})
    context.moto.start()
    if config is not None:
        push_env('BOTO_CONFIG', config)
        import boto
        import boto.connection
        import boto.provider
        from boto.pyami.config import Config
        boto.config = Config(config)  # reread configuration
        boto.connection.config = boto.provider.config = boto.config
예제 #19
0
def get_aws_credentials(config_file):
    # Try to read .boto configuration from several places (later ones take precedence)
    try: # user home directory (~/.boto)
        boto_cfg = Config(os.path.join(os.path.expanduser('~'), '.boto'))
    except: pass
    try: # current directory (./.boto)
        boto_cfg = Config('.boto')
    except: pass
    try: # command line option (--config <file>)
        if config_file: boto_cfg = Config(config_file)
    except: pass

    # Load the AWS key credentials
    try:
        access_key = boto_cfg.get('Credentials', 'aws_access_key_id')
        secret_key = boto_cfg.get('Credentials', 'aws_secret_access_key')
    except:
        print >> sys.stderr, 'Could not find .boto config file'
        sys.exit(1)
        
    return (access_key, secret_key)
예제 #20
0
 def __init__(self, profile):
     """
     :param profile: instance of `Profile`
     """
     boto_config = osp.join(profile.path, 'gstorage.boto')
     if osp.isfile(boto_config):
         boto.config = Config(path=boto_config)
         # patch config to have absolute path to p12 key
         p12_key_file = boto.config.get('Credentials', 'gs_service_key_file')
         p12_key_file = osp.expanduser(p12_key_file)
         if not osp.isabs(p12_key_file):
             p12_key_file = osp.join(profile.path, p12_key_file)
         boto.config.set('Credentials', 'gs_service_key_file', p12_key_file)
예제 #21
0
    def parse_aws_credentials():
        path = os.getenv('AWS_SHARED_CREDENTIALS_FILE', "~/.aws/credentials")
        conf = Config(os.path.expanduser(path))

        if access_key_id == conf.get('default', 'aws_access_key_id'):
            return (access_key_id, conf.get('default',
                                            'aws_secret_access_key'))
        return (conf.get(access_key_id, 'aws_access_key_id'),
                conf.get(access_key_id, 'aws_secret_access_key'))
예제 #22
0
 def write_metadata(self):
     fp = open(os.path.expanduser(BotoConfigPath), 'w')
     fp.write('[Instance]\n')
     inst_data = get_instance_metadata()
     for key in inst_data:
         fp.write('%s = %s\n' % (key, inst_data[key]))
     user_data = get_instance_userdata()
     fp.write('\n%s\n' % user_data)
     fp.write('[Pyami]\n')
     fp.write('working_dir = %s\n' % self.working_dir)
     fp.close()
     # This file has the AWS credentials, should we lock it down?
     # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE)
     # now that we have written the file, read it into a pyami Config object
     boto.config = Config()
     boto.init_logging()
예제 #23
0
    def setUp(self):
        self.config = Config()

        # Enable https_validate_certificates.
        self.config.add_section('Boto')
        self.config.setbool('Boto', 'https_validate_certificates', True)

        # Set up bogus credentials so that the auth module is willing to go
        # ahead and make a request; the request should fail with a service-level
        # error if it does get to the service (S3 or GS).
        self.config.add_section('Credentials')
        self.config.set('Credentials', 'gs_access_key_id', 'xyz')
        self.config.set('Credentials', 'gs_secret_access_key', 'xyz')
        self.config.set('Credentials', 'aws_access_key_id', 'xyz')
        self.config.set('Credentials', 'aws_secret_access_key', 'xyz')

        self._config_patch = mock.patch('boto.config', self.config)
        self._config_patch.start()
예제 #24
0
    def parse_aws_credentials():
        path = os.getenv("AWS_SHARED_CREDENTIALS_FILE", "~/.aws/credentials")
        if not os.path.exists(os.path.expanduser(path)):
            return None

        conf = Config(os.path.expanduser(path))

        if access_key_id == conf.get("default", "aws_access_key_id"):
            return (access_key_id, conf.get("default",
                                            "aws_secret_access_key"))
        return (
            conf.get(access_key_id, "aws_access_key_id"),
            conf.get(access_key_id, "aws_secret_access_key"),
        )
예제 #25
0
    def get_instances_by_region(self, region):
        ''' Makes an AWS EC2 API call to the list of instances in a particular
        region '''

        try:
            cfg = Config()
            cfg.load_credential_file(os.path.expanduser("~/.aws/credentials"))
            cfg.load_credential_file(os.path.expanduser("~/.aws/config"))
            session_token = cfg.get(self.boto_profile, "aws_session_token")

            conn = ec2.connect_to_region(region,
                                         security_token=session_token,
                                         profile_name=self.boto_profile)

            # connect_to_region will fail "silently" by returning None if the
            # region name is wrong or not supported
            if conn is None:
                print("region name: {} likely not supported, or AWS is down. "
                      "connection to region failed.".format(region))
                sys.exit(1)

            reservations = conn.get_all_instances(filters=self.filters)

            bastion_ip = self.find_bastion_box(conn)

            instances = []
            for reservation in reservations:
                instances.extend(reservation.instances)

            # sort the instance based on name and index, in this order
            def sort_key(instance):
                name = instance.tags.get('Name', '')
                return "{}-{}".format(name, instance.id)

            for instance in sorted(instances, key=sort_key):
                self.add_instance(bastion_ip, instance, region)

        except boto.provider.ProfileNotFoundError as e:
            raise Exception(
                "{}, configure it with 'aws configure --profile {}'".format(
                    e.message, self.boto_profile))

        except boto.exception.BotoServerError as e:
            print(e)
            sys.exit(1)
예제 #26
0
    def setUp(self):
        super(TestCommandRunnerIntegrationTests, self).setUp()

        # Mock out the timestamp file so we can manipulate it.
        self.previous_update_file = (
            command_runner.LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE)
        self.timestamp_file = self.CreateTempFile(contents='0')
        command_runner.LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE = (
            self.timestamp_file)

        # Mock out raw_input to trigger yes prompt.
        command_runner.raw_input = lambda p: 'y'

        # Create a credential-less boto config file.
        self.orig_config = boto.config
        config_file = path = self.CreateTempFile(
            contents='[GSUtil]\nsoftware_update_check_period=1')
        boto.config = Config(path=config_file)
        # Need to copy config into boto.connection.config because it gets loaded
        # before tests run.
        boto.connection.config = boto.config
        self.command_runner = command_runner.CommandRunner(config_file)
예제 #27
0
파일: order.py 프로젝트: carlgao/lenga
class Item(IObject):
    
    def __init__(self, ec2_conn):
        self.name = None
        self.instance_type = None
        self.quantity = 0
        self.zone = None
        self.ami = None
        self.groups = []
        self.key = None
        self.ec2 = ec2_conn
        self.config = None

    def set_userdata(self, key, value):
        self.userdata[key] = value

    def get_userdata(self, key):
        return self.userdata[key]

    def set_name(self, name=None):
        if name:
            self.name = name
        else:
            self.name = self.get_string('Name')

    def set_instance_type(self, instance_type=None):
        if instance_type:
            self.instance_type = instance_type
        else:
            self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')

    def set_quantity(self, n=0):
        if n > 0:
            self.quantity = n
        else:
            self.quantity = self.get_int('Quantity')

    def set_zone(self, zone=None):
        if zone:
            self.zone = zone
        else:
            l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
            self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
            
    def set_ami(self, ami=None):
        if ami:
            self.ami = ami
        else:
            l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
            self.ami = self.choose_from_list(l, prompt='Choose AMI')

    def add_group(self, group=None):
        if group:
            self.groups.append(group)
        else:
            l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
            self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))

    def set_key(self, key=None):
        if key:
            self.key = key
        else:
            l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
            self.key = self.choose_from_list(l, prompt='Choose Keypair')

    def update_config(self):
        if not self.config.has_section('Credentials'):
            self.config.add_section('Credentials')
            self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
            self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
        if not self.config.has_section('Pyami'):
            self.config.add_section('Pyami')
        sdb_domain = get_domain()
        if sdb_domain:
            self.config.set('Pyami', 'server_sdb_domain', sdb_domain.name)
            self.config.set('Pyami', 'server_sdb_name', self.name)

    def set_config(self, config_path=None):
        if not config_path:
            config_path = self.get_filename('Specify Config file')
        self.config = Config(path=config_path)

    def get_userdata_string(self):
        s = StringIO.StringIO()
        self.config.write(s)
        return s.getvalue()

    def enter(self, **params):
        self.name = params.get('name', self.name)
        if not self.name:
            self.set_name()
        self.instance_type = params.get('instance_type', self.instance_type)
        if not self.instance_type:
            self.set_instance_type()
        self.zone = params.get('zone', self.zone)
        if not self.zone:
            self.set_zone()
        self.quantity = params.get('quantity', self.quantity)
        if not self.quantity:
            self.set_quantity()
        self.ami = params.get('ami', self.ami)
        if not self.ami:
            self.set_ami()
        self.groups = params.get('groups', self.groups)
        if not self.groups:
            self.add_group()
        self.key = params.get('key', self.key)
        if not self.key:
            self.set_key()
        self.config = params.get('config', self.config)
        if not self.config:
            self.set_config()
        self.update_config()
예제 #28
0
import os
import sys
from boto.pyami.config import Config
from fabric.colors import red

# Load the configuration file
if os.path.exists('config.ini'):
    boto_config = Config()
    boto_config.load_credential_file('config.ini')
    if boto_config.items('Credentials'):
        AWS_ID = boto_config.get('Credentials', 'aws_access_key_id')
        AWS_KEY = boto_config.get('Credentials', 'aws_secret_access_key')
        REGION = boto_config.get('Credentials', 'region')
    else:
        print(red('Error: credentials section is missing, abort!'))
        sys.exit(1)
    if boto_config.items('Config'):
        DEFAULT_OS = boto_config.get('Config', 'default_os')
        DEFAULT_SSH_DIR = os.path.expanduser(boto_config.get('Config', 'default_ssh_dir'))
        DEFAULT_FILE_DIR = os.path.expanduser(boto_config.get('Config', 'default_file_dir'))
        DEFAULT_INTERNAL_DOMAIN = boto_config.get('Config', 'default_internal_domain')
    else:
        print(red('Error: config section is missing, abort!'))
        sys.exit(1)
else:
    print(red('Error: configuration file missing, abort!'))
    sys.exit(1)

AWS_REGIONS = {
    'ap-northeast-1': 'Asia Pacific (Tokyo)',
    'ap-southeast-1': 'Asia Pacific (Singapore)',
예제 #29
0
class CertValidationTest(unittest.TestCase):
    def setUp(self):
        self.config = Config()

        # Enable https_validate_certificates.
        self.config.add_section('Boto')
        self.config.setbool('Boto', 'https_validate_certificates', True)

        # Set up bogus credentials so that the auth module is willing to go
        # ahead and make a request; the request should fail with a service-level
        # error if it does get to the service (S3 or GS).
        self.config.add_section('Credentials')
        self.config.set('Credentials', 'gs_access_key_id', 'xyz')
        self.config.set('Credentials', 'gs_secret_access_key', 'xyz')
        self.config.set('Credentials', 'aws_access_key_id', 'xyz')
        self.config.set('Credentials', 'aws_secret_access_key', 'xyz')

        self._config_patch = mock.patch('boto.config', self.config)
        self._config_patch.start()

    def tearDown(self):
        self._config_patch.stop()

    def enableProxy(self):
        self.config.set('Boto', 'proxy', PROXY_HOST)
        self.config.set('Boto', 'proxy_port', PROXY_PORT)

    def assertConnectionThrows(self, connection_class, error):
        conn = connection_class('fake_id', 'fake_secret')
        self.assertRaises(error, conn.get_all_buckets)

    def do_test_valid_cert(self):
        # When connecting to actual servers with bundled root certificates, no
        # cert errors should be thrown; instead we will get "invalid
        # credentials" errors since the config used does not contain any
        # credentials.
        self.assertConnectionThrows(S3Connection, exception.S3ResponseError)
        self.assertConnectionThrows(GSConnection, exception.GSResponseError)

    def test_valid_cert(self):
        self.do_test_valid_cert()

    def test_valid_cert_with_proxy(self):
        self.enableProxy()
        self.do_test_valid_cert()

    def do_test_invalid_signature(self):
        self.config.set('Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
        self.assertConnectionThrows(S3Connection, ssl.SSLError)
        self.assertConnectionThrows(GSConnection, ssl.SSLError)

    def test_invalid_signature(self):
        self.do_test_invalid_signature()

    def test_invalid_signature_with_proxy(self):
        self.enableProxy()
        self.do_test_invalid_signature()

    def do_test_invalid_host(self):
        self.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST)
        self.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST)
        self.assertConnectionThrows(S3Connection, ssl.SSLError)
        self.assertConnectionThrows(GSConnection, ssl.SSLError)

    def do_test_invalid_host(self):
        self.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST)
        self.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST)
        self.assertConnectionThrows(
                S3Connection, https_connection.InvalidCertificateException)
        self.assertConnectionThrows(
                GSConnection, https_connection.InvalidCertificateException)

    def test_invalid_host(self):
        self.do_test_invalid_host()

    def test_invalid_host_with_proxy(self):
        self.enableProxy()
        self.do_test_invalid_host()
예제 #30
0
    def loadConfig(self, path=None):
        # Get all the Configuration
        config = Config(path=path)
        self.aws_access_key_id = config.get('Credentials', 'aws_access_key_id')
        self.aws_secret_access_key = config.get('Credentials',
                                                'aws_secret_access_key')
        self.key_name = config.get('Key', 'key_name')
        self.instance_type = config.get('Instance', 'instance_type')
        self.zone = config.get('Instance', 'zone', default='us-east-1c')
        self.security_groups = config.get('Instance', 'security_groups')
        self.tags = config.get('Instance', 'tags')

        self.os = config.get('Type', 'os')
        self.num_nodes = int(config.get('Type', 'num_nodes'))
        self.ami = config.get('AMI', self.os)
        self.ebs_size = int(config.get('EBS', 'volume_size', default=0))
        self.num_ebs = int(config.get('EBS', 'volumes', default=0))
        self.membase_port = config.get('global', 'port', default='8091')
        self.ssh_username = config.get('global', 'username', default='root')
        self.ssh_key_path = config.get('global',
                                       'ssh_key',
                                       default='/root/.ssh/QAkey.pem')
        self.rest_username = config.get('membase',
                                        'rest_username',
                                        default='Administrator')
        self.rest_password = config.get('membase',
                                        'rest_password',
                                        default='password')
예제 #31
0
    def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
        """
        Create a new instance based on the specified configuration file or the specified
        configuration and the passed in parameters.

        If the config_file argument is not None, the configuration is read from there.
        Otherwise, the cfg argument is used.

        The config file may include other config files with a #import reference. The included
        config files must reside in the same directory as the specified file.

        The logical_volume argument, if supplied, will be used to get the current physical
        volume ID and use that as an override of the value specified in the config file. This
        may be useful for debugging purposes when you want to debug with a production config
        file but a test Volume.

        The dictionary argument may be used to override any EC2 configuration values in the
        config file.
        """
        if config_file:
            cfg = Config(path=config_file)
        if cfg.has_section('EC2'):
            # include any EC2 configuration values that aren't specified in params:
            for option in cfg.options('EC2'):
                if option not in params:
                    params[option] = cfg.get('EC2', option)
        getter = CommandLineGetter()
        getter.get(cls, params)
        region = params.get('region')
        ec2 = region.connect()
        cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
        ami = params.get('ami')
        kp = params.get('keypair')
        group = params.get('group')
        zone = params.get('zone')
        # deal with possibly passed in logical volume:
        if logical_volume != None:
           cfg.set('EBS', 'logical_volume_name', logical_volume.name)
        cfg_fp = StringIO()
        cfg.write(cfg_fp)
        # deal with the possibility that zone and/or keypair are strings read from the config file:
        if isinstance(zone, Zone):
            zone = zone.name
        if isinstance(kp, KeyPair):
            kp = kp.name
        reservation = ami.run(min_count=1,
                              max_count=params.get('quantity', 1),
                              key_name=kp,
                              security_groups=[group],
                              instance_type=params.get('instance_type'),
                              placement = zone,
                              user_data = cfg_fp.getvalue())
        l = []
        i = 0
        elastic_ip = params.get('elastic_ip')
        instances = reservation.instances
        if elastic_ip is not None and instances.__len__() > 0:
            instance = instances[0]
            print('Waiting for instance to start so we can set its elastic IP address...')
            # Sometimes we get a message from ec2 that says that the instance does not exist.
            # Hopefully the following delay will giv eec2 enough time to get to a stable state:
            time.sleep(5)
            while instance.update() != 'running':
                time.sleep(1)
            instance.use_ip(elastic_ip)
            print('set the elastic IP of the first instance to %s' % elastic_ip)
        for instance in instances:
            s = cls()
            s.ec2 = ec2
            s.name = params.get('name') + '' if i==0 else str(i)
            s.description = params.get('description')
            s.region_name = region.name
            s.instance_id = instance.id
            if elastic_ip and i == 0:
                s.elastic_ip = elastic_ip
            s.put()
            l.append(s)
            i += 1
        return l
예제 #32
0
class Server(Model):

    ec2 = boto.connect_ec2()

    @classmethod
    def Inventory(cls):
        """
        Returns a list of Server instances, one for each Server object
        persisted in the db
        """
        l = ServerSet()
        rs = cls.find()
        for server in rs:
            l.append(server)
        return l

    @classmethod
    def Register(cls, name, instance_id, description=''):
        s = cls()
        s.name = name
        s.instance_id = instance_id
        s.description = description
        s.save()
        return s

    def __init__(self, id=None, **kw):
        Model.__init__(self, id, **kw)
        self._reservation = None
        self._instance = None
        self._ssh_client = None
        self._pkey = None
        self._config = None

    name = StringProperty(unique=True, verbose_name="Name")
    instance_id = StringProperty(verbose_name="Instance ID")
    config_uri = StringProperty()
    ami_id = StringProperty(verbose_name="AMI ID")
    zone = StringProperty(verbose_name="Availability Zone")
    security_group = StringProperty(verbose_name="Security Group", default="default")
    key_name = StringProperty(verbose_name="Key Name")
    elastic_ip = StringProperty(verbose_name="Elastic IP")
    instance_type = StringProperty(verbose_name="Instance Type")
    description = StringProperty(verbose_name="Description")
    log = StringProperty()

    def setReadOnly(self, value):
        raise AttributeError

    def getInstance(self):
        if not self._instance:
            if self.instance_id:
                try:
                    rs = self.ec2.get_all_instances([self.instance_id])
                except:
                    return None
                if len(rs) > 0:
                    self._reservation = rs[0]
                    self._instance = self._reservation.instances[0]
        return self._instance

    instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
    
    def getAMI(self):
        if self.instance:
            return self.instance.image_id

    ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
    
    def getStatus(self):
        if self.instance:
            self.instance.update()
            return self.instance.state

    status = property(getStatus, setReadOnly, None,
                      'The status of the server')
    
    def getHostname(self):
        if self.instance:
            return self.instance.public_dns_name

    hostname = property(getHostname, setReadOnly, None,
                        'The public DNS name of the server')

    def getPrivateHostname(self):
        if self.instance:
            return self.instance.private_dns_name

    private_hostname = property(getPrivateHostname, setReadOnly, None,
                                'The private DNS name of the server')

    def getLaunchTime(self):
        if self.instance:
            return self.instance.launch_time

    launch_time = property(getLaunchTime, setReadOnly, None,
                           'The time the Server was started')

    def getConsoleOutput(self):
        if self.instance:
            return self.instance.get_console_output()

    console_output = property(getConsoleOutput, setReadOnly, None,
                              'Retrieve the console output for server')

    def getGroups(self):
        if self._reservation:
            return self._reservation.groups
        else:
            return None

    groups = property(getGroups, setReadOnly, None,
                      'The Security Groups controlling access to this server')

    def getConfig(self):
        if not self._config:
            remote_file = BotoConfigPath
            local_file = '%s.ini' % self.instance.id
            self.get_file(remote_file, local_file)
            self._config = Config(local_file)
        return self._config

    def setConfig(self, config):
        local_file = '%s.ini' % self.instance.id
        fp = open(local_file)
        config.write(fp)
        fp.close()
        self.put_file(local_file, BotoConfigPath)
        self._config = config

    config = property(getConfig, setConfig, None,
                      'The instance data for this server')

    def set_config(self, config):
        """
        Set SDB based config
        """
        self._config = config
        self._config.dump_to_sdb("botoConfigs", self.id)

    def load_config(self):
        self._config = Config(do_load=False)
        self._config.load_from_sdb("botoConfigs", self.id)

    def stop(self):
        if self.instance:
            self.instance.stop()

    def start(self):
        self.stop()
        ec2 = boto.connect_ec2()
        ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
        groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
        if not self._config:
            self.load_config()
        if not self._config.has_section("Credentials"):
            self._config.add_section("Credentials")
            self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
            self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)

        if not self._config.has_section("Pyami"):
            self._config.add_section("Pyami")

        if self._manager.domain:
            self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
            self._config.set("Pyami", 'server_sdb_name', self.name)

        cfg = StringIO.StringIO()
        self._config.write(cfg)
        cfg = cfg.getvalue()
        r = ami.run(min_count=1,
                    max_count=1,
                    key_name=self.key_name,
                    security_groups = groups,
                    instance_type = self.instance_type,
                    placement = self.zone,
                    user_data = cfg)
        i = r.instances[0]
        self.instance_id = i.id
        self.put()
        if self.elastic_ip:
            ec2.associate_address(self.instance_id, self.elastic_ip)

    def reboot(self):
        if self.instance:
            self.instance.reboot()

    def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts',
                       uname='root'):
        import paramiko
        if not self.instance:
            print 'No instance yet!'
            return
        if not self._ssh_client:
            if not key_file:
                iobject = IObject()
                key_file = iobject.get_filename('Path to OpenSSH Key file')
            self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
            self._ssh_client = paramiko.SSHClient()
            self._ssh_client.load_system_host_keys()
            self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
            self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            self._ssh_client.connect(self.instance.public_dns_name,
                                     username=uname, pkey=self._pkey)
        return self._ssh_client

    def get_file(self, remotepath, localpath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        sftp_client.get(remotepath, localpath)

    def put_file(self, localpath, remotepath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        sftp_client.put(localpath, remotepath)

    def listdir(self, remotepath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        return sftp_client.listdir(remotepath)

    def shell(self, key_file=None):
        ssh_client = self.get_ssh_client(key_file)
        channel = ssh_client.invoke_shell()
        interactive_shell(channel)

    def bundle_image(self, prefix, key_file, cert_file, size):
        print 'bundling image...'
        print '\tcopying cert and pk over to /mnt directory on server'
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        path, name = os.path.split(key_file)
        remote_key_file = '/mnt/%s' % name
        self.put_file(key_file, remote_key_file)
        path, name = os.path.split(cert_file)
        remote_cert_file = '/mnt/%s' % name
        self.put_file(cert_file, remote_cert_file)
        print '\tdeleting %s' % BotoConfigPath
        # delete the metadata.ini file if it exists
        try:
            sftp_client.remove(BotoConfigPath)
        except:
            pass
        command = 'ec2-bundle-vol '
        command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
        command += '-u %s ' % self._reservation.owner_id
        command += '-p %s ' % prefix
        command += '-s %d ' % size
        command += '-d /mnt '
        if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
            command += '-r i386'
        else:
            command += '-r x86_64'
        print '\t%s' % command
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print '\t%s' % response
        print '\t%s' % t[2].read()
        print '...complete!'

    def upload_bundle(self, bucket, prefix):
        print 'uploading bundle...'
        command = 'ec2-upload-bundle '
        command += '-m /mnt/%s.manifest.xml ' % prefix
        command += '-b %s ' % bucket
        command += '-a %s ' % self.ec2.aws_access_key_id
        command += '-s %s ' % self.ec2.aws_secret_access_key
        print '\t%s' % command
        ssh_client = self.get_ssh_client()
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print '\t%s' % response
        print '\t%s' % t[2].read()
        print '...complete!'

    def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None):
        iobject = IObject()
        if not bucket:
            bucket = iobject.get_string('Name of S3 bucket')
        if not prefix:
            prefix = iobject.get_string('Prefix for AMI file')
        if not key_file:
            key_file = iobject.get_filename('Path to RSA private key file')
        if not cert_file:
            cert_file = iobject.get_filename('Path to RSA public cert file')
        if not size:
            size = iobject.get_int('Size (in MB) of bundled image')
        self.bundle_image(prefix, key_file, cert_file, size)
        self.upload_bundle(bucket, prefix)
        print 'registering image...'
        self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix))
        return self.image_id

    def attach_volume(self, volume, device="/dev/sdp"):
        """
        Attach an EBS volume to this server

        :param volume: EBS Volume to attach
        :type volume: boto.ec2.volume.Volume

        :param device: Device to attach to (default to /dev/sdp)
        :type device: string
        """
        if hasattr(volume, "id"):
            volume_id = volume.id
        else:
            volume_id = volume
        return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)

    def detach_volume(self, volume):
        """
        Detach an EBS volume from this server

        :param volume: EBS Volume to detach
        :type volume: boto.ec2.volume.Volume
        """
        if hasattr(volume, "id"):
            volume_id = volume.id
        else:
            volume_id = volume
        return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)

    def install_package(self, package_name):
        print 'installing %s...' % package_name
        command = 'yum -y install %s' % package_name
        print '\t%s' % command
        ssh_client = self.get_ssh_client()
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print '\t%s' % response
        print '\t%s' % t[2].read()
        print '...complete!'
예제 #33
0
 def get(self, name, default=None):
     return Config.get(self, self.name, name, default)
예제 #34
0
 def set_config(self, config_path=None):
     if not config_path:
         config_path = self.get_filename('Specify Config file')
     self.config = Config(path=config_path)
예제 #35
0
파일: s3ingest.py 프로젝트: rmauge/S-3PO
def main(argv):
    parser = argparse.ArgumentParser(description='Upload assets to Amazon')
    parser.add_argument('--config',
                        dest='config_filename',
                        action='store',
                        default=CONFIG_FILE,
                        help='optional custom configuration filename')
    parser.add_argument('--node',
                        dest='node_name_override',
                        action='store',
                        default=False,
                        help='optional override for the pid-id specified in the config file')
    parameters = parser.parse_args()

    current_defaults_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), parameters.config_filename)
    config = Config(path=current_defaults_filename)
    global access_key_id
    global secret_access_key
    access_key_id = config.get('Amazon', 'aws_access_key_id')
    secret_access_key = config.get('Amazon', 'aws_secret_access_key')
    log_file_path = config.get('General', 'log_file_path', '/var/log/s3ingest.log')
    log_level = config.getint('General', 'log_level', 20)
    target_bucket_name = config.get('Amazon', 's3_bucket_name')
    monitored_dir_name = config.get('General', 'monitored_directory')
    worker_threads = config.getint('General', 'worker_threads', 5)
    pid_file_path = config.get('General', 'pid_file_path', './s3ingest.semaphore')
    if not parameters.node_name_override:
        pid_id = config.get('General', 'pid_id').rstrip()
    else:
        pid_id = parameters.node_name_override.rstrip()
    HEART_BEAT_TIME_SECS = config.getint('General', 'heart_beat_time_secs', 300)
    MIN_MODIFIED_INTERVAL_SECS = 3600 # 3600 secs = 1 hr. Keep high to allow time for large files to upload and reduce false positives

    if not os.path.exists(monitored_dir_name):
        print "The directory to be monitored '{0}' does not exist".format(monitored_dir_name)
        sys.exit(1)

    logging.basicConfig(filename=log_file_path, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=log_level)
    mailhost = config.get('Mail', 'mailhost')
    fromaddr = config.get('Mail', 'fromaddr')
    toaddrs = config.get('Mail', 'toaddrs')
    smtp_handler = handlers.SMTPHandler(mailhost, fromaddr, toaddrs, 'S3Util error occurred')
    smtp_handler.setLevel(logging.ERROR)
    logging.getLogger().addHandler(smtp_handler)
    
    s3_util = S3Util(access_key_id, secret_access_key)

    s3_util.set_target_bucket_name(target_bucket_name)
    signal.signal(signal.SIGINT, s3_util.signal_handler)
    signal.signal(signal.SIGTERM, s3_util.signal_handler)

    # Check for pid file and create if not found
    if not os.path.exists(pid_file_path):
        pid_file = open(pid_file_path, "w+")
        fcntl.flock(pid_file.fileno(), fcntl.LOCK_EX)
        pid_file.write(str(pid_id))
        fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
        pid_file.close()

    s3_util.start_monitoring(monitored_dir_name)

    logging.debug("Starting worker threads")
    for i in range(worker_threads):
        t = S3Uploader(s3_util)
        t.setDaemon(True)
        t.start()

    logging.debug("Worker threads started")

    while True:
        pid_file = open(pid_file_path, "r+")
        logging.debug("Waiting for lock")
        fcntl.flock(pid_file.fileno(), fcntl.LOCK_SH)
        logging.debug("Acquired lock")
        current_pid = pid_file.readline().rstrip()
        st = os.stat(pid_file_path)
        now = time.time()
        pid_modified_time = st[stat.ST_MTIME]
        logging.debug("pid file: {0}, current_host: {1}".format(current_pid, pid_id))
        if pid_id == current_pid:
            logging.debug("State - Active")
            os.utime(pid_file_path, None)
            s3_util.set_active(True)
            # Find files have been unmodified for a defined threshold and assume that they need to be queued
            for dirpath, dirnames, filenames in os.walk(monitored_dir_name):
                for name in filenames:
                    file_path = os.path.normpath(os.path.join(dirpath, name))
                    last_modifed_time = os.path.getmtime(file_path)
                    if ((now - last_modifed_time) > MIN_MODIFIED_INTERVAL_SECS and not
                        (s3_util.is_queued(file_path) or s3_util.is_currently_processing(file_path))):
                        logging.info("Directory scan found file '{0}' older than {1} seconds and added to queue".format(file_path, (now - last_modifed_time)))
                        s3_util.add_to_queue(file_path)
        else:
            if now - pid_modified_time > HEART_BEAT_TIME_SECS:
                logging.debug("Stale pid file found, setting state - Active")
                pid_file.truncate(0)
                pid_file.seek(0)
                pid_file.write(str(pid_id))
                s3_util.set_active(True)
            else:
                logging.debug("State - Inactive")
                s3_util.set_active(False)
        fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
        logging.debug("Released lock")
        pid_file.close()
        #Play nice
        sleep(5)

    s3_util.wait_for_completion()
    logging.debug("Exiting")
    sys.exit(0)
예제 #36
0
class Item(IObject):
    
    def __init__(self):
        self.region = None
        self.name = None
        self.instance_type = None
        self.quantity = 0
        self.zone = None
        self.ami = None
        self.groups = []
        self.key = None
        self.ec2 = None
        self.config = None

    def set_userdata(self, key, value):
        self.userdata[key] = value

    def get_userdata(self, key):
        return self.userdata[key]

    def set_region(self, region=None):
        if region:
            self.region = region
        else:
            l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
            self.region = self.choose_from_list(l, prompt='Choose Region')

    def set_name(self, name=None):
        if name:
            self.name = name
        else:
            self.name = self.get_string('Name')

    def set_instance_type(self, instance_type=None):
        if instance_type:
            self.instance_type = instance_type
        else:
            self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')

    def set_quantity(self, n=0):
        if n > 0:
            self.quantity = n
        else:
            self.quantity = self.get_int('Quantity')

    def set_zone(self, zone=None):
        if zone:
            self.zone = zone
        else:
            l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
            self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
            
    def set_ami(self, ami=None):
        if ami:
            self.ami = ami
        else:
            l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
            self.ami = self.choose_from_list(l, prompt='Choose AMI')

    def add_group(self, group=None):
        if group:
            self.groups.append(group)
        else:
            l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
            self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))

    def set_key(self, key=None):
        if key:
            self.key = key
        else:
            l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
            self.key = self.choose_from_list(l, prompt='Choose Keypair')

    def update_config(self):
        if not self.config.has_section('Credentials'):
            self.config.add_section('Credentials')
            self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
            self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
        if not self.config.has_section('Pyami'):
            self.config.add_section('Pyami')
        sdb_domain = get_domain()
        if sdb_domain:
            self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
            self.config.set('Pyami', 'server_sdb_name', self.name)

    def set_config(self, config_path=None):
        if not config_path:
            config_path = self.get_filename('Specify Config file')
        self.config = Config(path=config_path)

    def get_userdata_string(self):
        s = io.StringIO()
        self.config.write(s)
        return s.getvalue()

    def enter(self, **params):
        self.region = params.get('region', self.region)
        if not self.region:
            self.set_region()
        self.ec2 = self.region.connect()
        self.name = params.get('name', self.name)
        if not self.name:
            self.set_name()
        self.instance_type = params.get('instance_type', self.instance_type)
        if not self.instance_type:
            self.set_instance_type()
        self.zone = params.get('zone', self.zone)
        if not self.zone:
            self.set_zone()
        self.quantity = params.get('quantity', self.quantity)
        if not self.quantity:
            self.set_quantity()
        self.ami = params.get('ami', self.ami)
        if not self.ami:
            self.set_ami()
        self.groups = params.get('groups', self.groups)
        if not self.groups:
            self.add_group()
        self.key = params.get('key', self.key)
        if not self.key:
            self.set_key()
        self.config = params.get('config', self.config)
        if not self.config:
            self.set_config()
        self.update_config()
예제 #37
0
파일: servicedef.py 프로젝트: 0t3dWCE/boto
 def has_option(self, option):
     return Config.has_option(self, self.name, option)
class CertValidationTest(unittest.TestCase):
    def setUp(self):
        self.config = Config()

        # Enable https_validate_certificates.
        self.config.add_section('Boto')
        self.config.setbool('Boto', 'https_validate_certificates', True)

        # Set up bogus credentials so that the auth module is willing to go
        # ahead and make a request; the request should fail with a service-level
        # error if it does get to the service (S3 or GS).
        self.config.add_section('Credentials')
        self.config.set('Credentials', 'gs_access_key_id', 'xyz')
        self.config.set('Credentials', 'gs_secret_access_key', 'xyz')
        self.config.set('Credentials', 'aws_access_key_id', 'xyz')
        self.config.set('Credentials', 'aws_secret_access_key', 'xyz')

        self._config_patch = mock.patch('boto.config', self.config)
        self._config_patch.start()

    def tearDown(self):
        self._config_patch.stop()

    def enableProxy(self):
        self.config.set('Boto', 'proxy', PROXY_HOST)
        self.config.set('Boto', 'proxy_port', PROXY_PORT)

    def assertConnectionThrows(self, connection_class, error):
        conn = connection_class('fake_id', 'fake_secret')
        self.assertRaises(error, conn.get_all_buckets)

    def do_test_valid_cert(self):
        # When connecting to actual servers with bundled root certificates, no
        # cert errors should be thrown; instead we will get "invalid
        # credentials" errors since the config used does not contain any
        # credentials.
        self.assertConnectionThrows(S3Connection, exception.S3ResponseError)
        self.assertConnectionThrows(GSConnection, exception.GSResponseError)

    def test_valid_cert(self):
        self.do_test_valid_cert()

    def test_valid_cert_with_proxy(self):
        self.enableProxy()
        self.do_test_valid_cert()

    def do_test_invalid_signature(self):
        self.config.set('Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
        self.assertConnectionThrows(S3Connection, ssl.SSLError)
        self.assertConnectionThrows(GSConnection, ssl.SSLError)

    def test_invalid_signature(self):
        self.do_test_invalid_signature()

    def test_invalid_signature_with_proxy(self):
        self.enableProxy()
        self.do_test_invalid_signature()

    def do_test_invalid_host(self):
        self.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST)
        self.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST)
        self.assertConnectionThrows(S3Connection, ssl.SSLError)
        self.assertConnectionThrows(GSConnection, ssl.SSLError)

    def do_test_invalid_host(self):
        self.config.set('Credentials', 'gs_host', INVALID_HOSTNAME_HOST)
        self.config.set('Credentials', 's3_host', INVALID_HOSTNAME_HOST)
        self.assertConnectionThrows(
            S3Connection, https_connection.InvalidCertificateException)
        self.assertConnectionThrows(
            GSConnection, https_connection.InvalidCertificateException)

    def test_invalid_host(self):
        self.do_test_invalid_host()

    def test_invalid_host_with_proxy(self):
        self.enableProxy()
        self.do_test_invalid_host()
예제 #39
0
def assume_identity(config, profile):
    # if AWS_PROFILE was the option last used, and it didn't require assuming a role
    if config.get('AWS_PROFILE_REFRESH_NOT_NEEDED'):
        return None

    _config_lock = config.get('CONFIG_LOCK') or config_lock
    _config_lock.acquire()
    if 'assumed_roles' not in config:
        config['assumed_roles'] = {}
    if 'role_last_updated' not in config:
        config['role_last_updated'] = {}

    try:
        assumed_roles = config.get('assumed_roles', {})
        assumed_role = assumed_roles.get(profile)
        if assumed_role and not assumed_role.credentials.is_expired(
                time_offset_seconds=900):
            return False

        # fetch the credentials from the aws configs
        shared_credentials = config.get('AWS_SHARED_CREDENTIALS')

        if not shared_credentials:
            config_path = config.get('AWS_CONFIG_FILE') or os.environ.get(
                'AWS_CONFIG_FILE') or os.path.join(expanduser('~'), '.aws',
                                                   'config')
            credentials_path = (config.get('AWS_CONFIG_FILE')
                                or os.environ.get('AWS_CONFIG_FILE')
                                or os.path.join(expanduser('~'), '.aws',
                                                'credentials')).replace(
                                                    '/config', '/credentials')

            shared_credentials = Config(do_load=False)
            if os.path.isfile(credentials_path):
                shared_credentials.load_from_path(credentials_path)
            if os.path.isfile(config_path):
                shared_credentials.load_from_path(config_path)
            config['AWS_SHARED_CREDENTIALS'] = shared_credentials

        profile_key = profile
        if not shared_credentials.has_section(profile_key):
            profile_key = 'profile {}'.format(profile_key)
        if not shared_credentials.has_section(profile_key):
            raise ProfileNotFoundError('Profile {} not found'.format(
                config['AWS_PROFILE']))

        # no matter what, get the access and secret key pair
        if all([
                shared_credentials.has_option(profile_key, x)
                for x in ('aws_access_key_id', 'aws_secret_access_key')
        ]):
            aws_access_key_id = shared_credentials.get(profile_key,
                                                       'aws_access_key_id')
            aws_secret_access_key = shared_credentials.get(
                profile_key, 'aws_secret_access_key')
        elif shared_credentials.has_option(profile_key, 'source_profile'):
            source_profile_key = shared_credentials.get(
                profile_key, 'source_profile')
            if not shared_credentials.has_section(source_profile_key):
                source_profile_key = 'profile {}'.format(source_profile_key)
            if not shared_credentials.has_section(source_profile_key):
                raise ProfileNotFoundError(
                    'Source profile {} for profile {} not found'.format(
                        shared_credentials.get(profile_key, 'source_profile'),
                        profile))

            # source_section = shared_credentials['_sections'][source_profile_key]
            if all([
                    shared_credentials.has_option(source_profile_key, x)
                    for x in ('aws_access_key_id', 'aws_secret_access_key')
            ]):
                aws_access_key_id = shared_credentials.get(
                    source_profile_key, 'aws_access_key_id')
                aws_secret_access_key = shared_credentials.get(
                    source_profile_key, 'aws_secret_access_key')
            else:
                raise ProfileNotFoundError(
                    'Source profile {} for profile {} has no access or secret key'
                    .format(
                        shared_credentials.get(profile_key, 'source_profile'),
                        profile))

        # if there's a role_arn, use it to assume a role
        if shared_credentials.has_option(profile_key, 'role_arn'):
            role_arn = shared_credentials.get(profile_key, 'role_arn')
            sts_connection = sts.STSConnection(
                aws_access_key_id=aws_access_key_id,
                aws_secret_access_key=aws_secret_access_key)
            config['assumed_roles'][profile] = sts_connection.assume_role(
                role_arn, ROLE_SESSION_NAME, policy=None, duration_seconds=960)
            config['role_last_updated'][profile] = datetime.datetime.utcnow(
            ).isoformat()[:19] + 'Z'

        return True

    finally:
        _config_lock.release()
예제 #40
0
 def load_config(self):
     self._config = Config(do_load=False)
     self._config.load_from_sdb("botoConfigs", self.id)
예제 #41
0
파일: order.py 프로젝트: carlgao/lenga
 def set_config(self, config_path=None):
     if not config_path:
         config_path = self.get_filename('Specify Config file')
     self.config = Config(path=config_path)
예제 #42
0
    def loadConfig(self, path=None):
        # Get all the Configuration
        config = Config(path=path)
        self.aws_access_key_id = config.get('Credentials','aws_access_key_id')
        self.aws_secret_access_key = config.get('Credentials','aws_secret_access_key')
        self.key_name = config.get('Key','key_name')
        self.instance_type = config.get('Instance','instance_type')
        self.zone = config.get('Instance','zone', default='us-east-1c')
        self.security_groups = config.get('Instance','security_groups')
        self.tags = config.get('Instance', 'tags')

        self.os = config.get('Type','os')
        self.num_nodes = int(config.get('Type','num_nodes'))
        self.ami = config.get('AMI',self.os)
        self.ebs_size = int(config.get('EBS','volume_size', default=0))
        self.num_ebs = int(config.get('EBS','volumes', default=0))
        self.membase_port = config.get('global', 'port', default='8091')
        self.ssh_username = config.get('global', 'username', default='root')
        self.ssh_key_path = config.get('global', 'ssh_key', default='/root/.ssh/QAkey.pem')
        self.rest_username = config.get('membase','rest_username', default='Administrator')
        self.rest_password = config.get('membase','rest_password', default='password')
예제 #43
0
class Server(Model):
    @property
    def ec2(self):
        if self._ec2 is None:
            self._ec2 = boto.connect_ec2()
        return self._ec2

    @classmethod
    def Inventory(cls):
        """
        Returns a list of Server instances, one for each Server object
        persisted in the db
        """
        l = ServerSet()
        rs = cls.find()
        for server in rs:
            l.append(server)
        return l

    @classmethod
    def Register(cls, name, instance_id, description=''):
        s = cls()
        s.name = name
        s.instance_id = instance_id
        s.description = description
        s.save()
        return s

    def __init__(self, id=None, **kw):
        super(Server, self).__init__(id, **kw)
        self._reservation = None
        self._instance = None
        self._ssh_client = None
        self._pkey = None
        self._config = None
        self._ec2 = None

    name = StringProperty(unique=True, verbose_name="Name")
    instance_id = StringProperty(verbose_name="Instance ID")
    config_uri = StringProperty()
    ami_id = StringProperty(verbose_name="AMI ID")
    zone = StringProperty(verbose_name="Availability Zone")
    security_group = StringProperty(verbose_name="Security Group",
                                    default="default")
    key_name = StringProperty(verbose_name="Key Name")
    elastic_ip = StringProperty(verbose_name="Elastic IP")
    instance_type = StringProperty(verbose_name="Instance Type")
    description = StringProperty(verbose_name="Description")
    log = StringProperty()

    def setReadOnly(self, value):
        raise AttributeError

    def getInstance(self):
        if not self._instance:
            if self.instance_id:
                try:
                    rs = self.ec2.get_all_reservations([self.instance_id])
                except:
                    return None
                if len(rs) > 0:
                    self._reservation = rs[0]
                    self._instance = self._reservation.instances[0]
        return self._instance

    instance = property(getInstance, setReadOnly, None,
                        'The Instance for the server')

    def getAMI(self):
        if self.instance:
            return self.instance.image_id

    ami = property(getAMI, setReadOnly, None, 'The AMI for the server')

    def getStatus(self):
        if self.instance:
            self.instance.update()
            return self.instance.state

    status = property(getStatus, setReadOnly, None, 'The status of the server')

    def getHostname(self):
        if self.instance:
            return self.instance.public_dns_name

    hostname = property(getHostname, setReadOnly, None,
                        'The public DNS name of the server')

    def getPrivateHostname(self):
        if self.instance:
            return self.instance.private_dns_name

    private_hostname = property(getPrivateHostname, setReadOnly, None,
                                'The private DNS name of the server')

    def getLaunchTime(self):
        if self.instance:
            return self.instance.launch_time

    launch_time = property(getLaunchTime, setReadOnly, None,
                           'The time the Server was started')

    def getConsoleOutput(self):
        if self.instance:
            return self.instance.get_console_output()

    console_output = property(getConsoleOutput, setReadOnly, None,
                              'Retrieve the console output for server')

    def getGroups(self):
        if self._reservation:
            return self._reservation.groups
        else:
            return None

    groups = property(getGroups, setReadOnly, None,
                      'The Security Groups controlling access to this server')

    def getConfig(self):
        if not self._config:
            remote_file = BotoConfigPath
            local_file = '%s.ini' % self.instance.id
            self.get_file(remote_file, local_file)
            self._config = Config(local_file)
        return self._config

    def setConfig(self, config):
        local_file = '%s.ini' % self.instance.id
        fp = open(local_file)
        config.write(fp)
        fp.close()
        self.put_file(local_file, BotoConfigPath)
        self._config = config

    config = property(getConfig, setConfig, None,
                      'The instance data for this server')

    def set_config(self, config):
        """
        Set SDB based config
        """
        self._config = config
        self._config.dump_to_sdb("botoConfigs", self.id)

    def load_config(self):
        self._config = Config(do_load=False)
        self._config.load_from_sdb("botoConfigs", self.id)

    def stop(self):
        if self.instance:
            self.instance.stop()

    def start(self):
        self.stop()
        ec2 = boto.connect_ec2()
        ami = ec2.get_all_images(image_ids=[str(self.ami_id)])[0]
        groups = ec2.get_all_security_groups(
            groupnames=[str(self.security_group)])
        if not self._config:
            self.load_config()
        if not self._config.has_section("Credentials"):
            self._config.add_section("Credentials")
            self._config.set("Credentials", "aws_access_key_id",
                             ec2.aws_access_key_id)
            self._config.set("Credentials", "aws_secret_access_key",
                             ec2.aws_secret_access_key)

        if not self._config.has_section("Pyami"):
            self._config.add_section("Pyami")

        if self._manager.domain:
            self._config.set('Pyami', 'server_sdb_domain',
                             self._manager.domain.name)
            self._config.set("Pyami", 'server_sdb_name', self.name)

        cfg = StringIO()
        self._config.write(cfg)
        cfg = cfg.getvalue()
        r = ami.run(min_count=1,
                    max_count=1,
                    key_name=self.key_name,
                    security_groups=groups,
                    instance_type=self.instance_type,
                    placement=self.zone,
                    user_data=cfg)
        i = r.instances[0]
        self.instance_id = i.id
        self.put()
        if self.elastic_ip:
            ec2.associate_address(self.instance_id, self.elastic_ip)

    def reboot(self):
        if self.instance:
            self.instance.reboot()

    def get_ssh_client(self,
                       key_file=None,
                       host_key_file='~/.ssh/known_hosts',
                       uname='root'):
        import paramiko
        if not self.instance:
            print('No instance yet!')
            return
        if not self._ssh_client:
            if not key_file:
                iobject = IObject()
                key_file = iobject.get_filename('Path to OpenSSH Key file')
            self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
            self._ssh_client = paramiko.SSHClient()
            self._ssh_client.load_system_host_keys()
            self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
            self._ssh_client.set_missing_host_key_policy(
                paramiko.AutoAddPolicy())
            self._ssh_client.connect(self.instance.public_dns_name,
                                     username=uname,
                                     pkey=self._pkey)
        return self._ssh_client

    def get_file(self, remotepath, localpath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        sftp_client.get(remotepath, localpath)

    def put_file(self, localpath, remotepath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        sftp_client.put(localpath, remotepath)

    def listdir(self, remotepath):
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        return sftp_client.listdir(remotepath)

    def shell(self, key_file=None):
        ssh_client = self.get_ssh_client(key_file)
        channel = ssh_client.invoke_shell()
        interactive_shell(channel)

    def bundle_image(self, prefix, key_file, cert_file, size):
        print('bundling image...')
        print('\tcopying cert and pk over to /mnt directory on server')
        ssh_client = self.get_ssh_client()
        sftp_client = ssh_client.open_sftp()
        path, name = os.path.split(key_file)
        remote_key_file = '/mnt/%s' % name
        self.put_file(key_file, remote_key_file)
        path, name = os.path.split(cert_file)
        remote_cert_file = '/mnt/%s' % name
        self.put_file(cert_file, remote_cert_file)
        print('\tdeleting %s' % BotoConfigPath)
        # delete the metadata.ini file if it exists
        try:
            sftp_client.remove(BotoConfigPath)
        except:
            pass
        command = 'sudo ec2-bundle-vol '
        command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
        command += '-u %s ' % self._reservation.owner_id
        command += '-p %s ' % prefix
        command += '-s %d ' % size
        command += '-d /mnt '
        if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
            command += '-r i386'
        else:
            command += '-r x86_64'
        print('\t%s' % command)
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print('\t%s' % response)
        print('\t%s' % t[2].read())
        print('...complete!')

    def upload_bundle(self, bucket, prefix):
        print('uploading bundle...')
        command = 'ec2-upload-bundle '
        command += '-m /mnt/%s.manifest.xml ' % prefix
        command += '-b %s ' % bucket
        command += '-a %s ' % self.ec2.aws_access_key_id
        command += '-s %s ' % self.ec2.aws_secret_access_key
        print('\t%s' % command)
        ssh_client = self.get_ssh_client()
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print('\t%s' % response)
        print('\t%s' % t[2].read())
        print('...complete!')

    def create_image(self,
                     bucket=None,
                     prefix=None,
                     key_file=None,
                     cert_file=None,
                     size=None):
        iobject = IObject()
        if not bucket:
            bucket = iobject.get_string('Name of S3 bucket')
        if not prefix:
            prefix = iobject.get_string('Prefix for AMI file')
        if not key_file:
            key_file = iobject.get_filename('Path to RSA private key file')
        if not cert_file:
            cert_file = iobject.get_filename('Path to RSA public cert file')
        if not size:
            size = iobject.get_int('Size (in MB) of bundled image')
        self.bundle_image(prefix, key_file, cert_file, size)
        self.upload_bundle(bucket, prefix)
        print('registering image...')
        self.image_id = self.ec2.register_image('%s/%s.manifest.xml' %
                                                (bucket, prefix))
        return self.image_id

    def attach_volume(self, volume, device="/dev/sdp"):
        """
        Attach an EBS volume to this server

        :param volume: EBS Volume to attach
        :type volume: boto.ec2.volume.Volume

        :param device: Device to attach to (default to /dev/sdp)
        :type device: string
        """
        if hasattr(volume, "id"):
            volume_id = volume.id
        else:
            volume_id = volume
        return self.ec2.attach_volume(volume_id=volume_id,
                                      instance_id=self.instance_id,
                                      device=device)

    def detach_volume(self, volume):
        """
        Detach an EBS volume from this server

        :param volume: EBS Volume to detach
        :type volume: boto.ec2.volume.Volume
        """
        if hasattr(volume, "id"):
            volume_id = volume.id
        else:
            volume_id = volume
        return self.ec2.detach_volume(volume_id=volume_id,
                                      instance_id=self.instance_id)

    def install_package(self, package_name):
        print('installing %s...' % package_name)
        command = 'yum -y install %s' % package_name
        print('\t%s' % command)
        ssh_client = self.get_ssh_client()
        t = ssh_client.exec_command(command)
        response = t[1].read()
        print('\t%s' % response)
        print('\t%s' % t[2].read())
        print('...complete!')
예제 #44
0
    def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
        """
        Create a new instance based on the specified configuration file or the specified
        configuration and the passed in parameters.

        If the config_file argument is not None, the configuration is read from there.
        Otherwise, the cfg argument is used.

        The config file may include other config files with a #import reference. The included
        config files must reside in the same directory as the specified file.

        The logical_volume argument, if supplied, will be used to get the current physical
        volume ID and use that as an override of the value specified in the config file. This
        may be useful for debugging purposes when you want to debug with a production config
        file but a test Volume.

        The dictionary argument may be used to override any EC2 configuration values in the
        config file.
        """
        if config_file:
            cfg = Config(path=config_file)
        if cfg.has_section('EC2'):
            # include any EC2 configuration values that aren't specified in params:
            for option in cfg.options('EC2'):
                if option not in params:
                    params[option] = cfg.get('EC2', option)
        getter = CommandLineGetter()
        getter.get(cls, params)
        region = params.get('region')
        ec2 = region.connect()
        cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
        ami = params.get('ami')
        kp = params.get('keypair')
        group = params.get('group')
        zone = params.get('zone')
        # deal with possibly passed in logical volume:
        if logical_volume != None:
           cfg.set('EBS', 'logical_volume_name', logical_volume.name)
        cfg_fp = StringIO.StringIO()
        cfg.write(cfg_fp)
        # deal with the possibility that zone and/or keypair are strings read from the config file:
        if isinstance(zone, Zone):
            zone = zone.name
        if isinstance(kp, KeyPair):
            kp = kp.name
        reservation = ami.run(min_count=1,
                              max_count=params.get('quantity', 1),
                              key_name=kp,
                              security_groups=[group],
                              instance_type=params.get('instance_type'),
                              placement = zone,
                              user_data = cfg_fp.getvalue())
        l = []
        i = 0
        elastic_ip = params.get('elastic_ip')
        instances = reservation.instances
        if elastic_ip is not None and instances.__len__() > 0:
            instance = instances[0]
            print 'Waiting for instance to start so we can set its elastic IP address...'
            # Sometimes we get a message from ec2 that says that the instance does not exist.
            # Hopefully the following delay will giv eec2 enough time to get to a stable state:
            time.sleep(5)
            while instance.update() != 'running':
                time.sleep(1)
            instance.use_ip(elastic_ip)
            print 'set the elastic IP of the first instance to %s' % elastic_ip
        for instance in instances:
            s = cls()
            s.ec2 = ec2
            s.name = params.get('name') + '' if i==0 else str(i)
            s.description = params.get('description')
            s.region_name = region.name
            s.instance_id = instance.id
            if elastic_ip and i == 0:
                s.elastic_ip = elastic_ip
            s.put()
            l.append(s)
            i += 1
        return l
예제 #45
0
   exit()

# Get Command Line Arguments
PHRASE        = sys.argv[1]
JOB_ID        = sys.argv[2]
ITERATION     = sys.argv[3]
PARENT_HIT_ID = sys.argv[4]
BRANCHES      = sys.argv[5]


# Connect to the HIT database
database = sqlite3.connect('crowdstorming.db', isolation_level='DEFERRED')
db       = database.cursor()

# BOTO Configuration
config = Config()
AWS_ID = config.get('Credentials', 'aws_access_key_id', None)
SECRET_ID = config.get('Credentials', 'aws_secret_access_key_id', None)
HOST = 'mechanicalturk.amazonaws.com'

mt = MTurkConnection(
   aws_access_key_id=AWS_ID, 
   aws_secret_access_key=SECRET_ID, 
   host=HOST
   )

# HIT Configuration - global title, description, keywords, qualifications
TITLE    = 'Provide Related Terms'
DESC     = 'Given a word or phrase, provide another (different) word that relates to the given one.'
KEYWORDS = 'opinions, relations, idea, brainstorm, crowdstorm'
QUAL     = Qualifications()
예제 #46
0
from boto.compat import urlparse
from boto.exception import InvalidUriError

__version__ = '2.34.0'
Version = __version__  # for backware compatibility

# http://bugs.python.org/issue7980
datetime.datetime.strptime('', '')

UserAgent = 'Boto/%s Python/%s %s/%s' % (
    __version__,
    platform.python_version(),
    platform.system(),
    platform.release()
)
config = Config()

# Regex to disallow buckets violating charset or not [3..255] chars total.
BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$')
# Regex to disallow buckets with individual DNS labels longer than 63.
TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
                           r'#(?P<generation>[0-9]+)$')
VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json')


def init_logging():
    for file in BotoConfigLocations:
        try:
            logging.config.fileConfig(os.path.expanduser(file))
예제 #47
0
class Provider(object):

    CredentialMap = {
        'aws':    ('aws_access_key_id', 'aws_secret_access_key',
                   'aws_security_token', 'aws_profile'),
        'google': ('gs_access_key_id',  'gs_secret_access_key',
                   None, None),
    }

    AclClassMap = {
        'aws':    Policy,
        'google': ACL
    }

    CannedAclsMap = {
        'aws':    CannedS3ACLStrings,
        'google': CannedGSACLStrings
    }

    HostKeyMap = {
        'aws':    's3',
        'google': 'gs'
    }

    ChunkedTransferSupport = {
        'aws':    False,
        'google': True
    }

    MetadataServiceSupport = {
        'aws': True,
        'google': False
    }

    # If you update this map please make sure to put "None" for the
    # right-hand-side for any headers that don't apply to a provider, rather
    # than simply leaving that header out (which would cause KeyErrors).
    HeaderInfoMap = {
        'aws': {
            HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
            METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
            ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
            AUTH_HEADER_KEY: 'AWS',
            COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
            COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX +
                                                'copy-source-version-id',
            COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX +
                                           'copy-source-range',
            DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
            DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
            METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX +
                                            'metadata-directive',
            RESUMABLE_UPLOAD_HEADER_KEY: None,
            SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
            SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX +
                                         'server-side-encryption',
            VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
            STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
            MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
            RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore',
        },
        'google': {
            HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
            METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
            ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
            AUTH_HEADER_KEY: 'GOOG1',
            COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
            COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX +
                                                'copy-source-version-id',
            COPY_SOURCE_RANGE_HEADER_KEY: None,
            DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
            DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
            METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX  +
                                            'metadata-directive',
            RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
            SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
            SERVER_SIDE_ENCRYPTION_KEY: None,
            # Note that this version header is not to be confused with
            # the Google Cloud Storage 'x-goog-api-version' header.
            VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
            STORAGE_CLASS_HEADER_KEY: None,
            MFA_HEADER_KEY: None,
            RESTORE_HEADER_KEY: None,
        }
    }

    ErrorMap = {
        'aws': {
            STORAGE_COPY_ERROR: boto.exception.S3CopyError,
            STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
            STORAGE_DATA_ERROR: boto.exception.S3DataError,
            STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
            STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
        },
        'google': {
            STORAGE_COPY_ERROR: boto.exception.GSCopyError,
            STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
            STORAGE_DATA_ERROR: boto.exception.GSDataError,
            STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
            STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
        }
    }

    def __init__(self, name, access_key=None, secret_key=None,
                 security_token=None, profile_name=None):
        self.host = None
        self.port = None
        self.host_header = None
        self.access_key = access_key
        self.secret_key = secret_key
        self.security_token = security_token
        self.profile_name = profile_name
        self.name = name
        self.acl_class = self.AclClassMap[self.name]
        self.canned_acls = self.CannedAclsMap[self.name]
        self._credential_expiry_time = None

        # Load shared credentials file if it exists
        shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
        self.shared_credentials = Config(do_load=False)
        if os.path.isfile(shared_path):
            self.shared_credentials.load_from_path(shared_path)

        self.get_credentials(access_key, secret_key, security_token, profile_name)
        self.configure_headers()
        self.configure_errors()

        # Allow config file to override default host and port.
        host_opt_name = '%s_host' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_opt_name):
            self.host = config.get('Credentials', host_opt_name)
        port_opt_name = '%s_port' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', port_opt_name):
            self.port = config.getint('Credentials', port_opt_name)
        host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_header_opt_name):
            self.host_header = config.get('Credentials', host_header_opt_name)

    def get_access_key(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._access_key

    def set_access_key(self, value):
        self._access_key = value

    access_key = property(get_access_key, set_access_key)

    def get_secret_key(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._secret_key

    def set_secret_key(self, value):
        self._secret_key = value

    secret_key = property(get_secret_key, set_secret_key)

    def get_security_token(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._security_token

    def set_security_token(self, value):
        self._security_token = value

    security_token = property(get_security_token, set_security_token)

    def _credentials_need_refresh(self):
        if self._credential_expiry_time is None:
            return False
        else:
            # The credentials should be refreshed if they're going to expire
            # in less than 5 minutes.
            delta = self._credential_expiry_time - datetime.utcnow()
            # python2.6 does not have timedelta.total_seconds() so we have
            # to calculate this ourselves.  This is straight from the
            # datetime docs.
            seconds_left = (
                (delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
                 * 10 ** 6) / 10 ** 6)
            if seconds_left < (5 * 60):
                boto.log.debug("Credentials need to be refreshed.")
                return True
            else:
                return False

    def get_credentials(self, access_key=None, secret_key=None,
                        security_token=None, profile_name=None):
        access_key_name, secret_key_name, security_token_name, \
            profile_name_name = self.CredentialMap[self.name]

        # Load profile from shared environment variable if it was not
        # already passed in and the environment variable exists
        if profile_name is None and profile_name_name is not None and \
           profile_name_name.upper() in os.environ:
            profile_name = os.environ[profile_name_name.upper()]

        shared = self.shared_credentials

        if access_key is not None:
            self.access_key = access_key
            boto.log.debug("Using access key provided by client.")
        elif access_key_name.upper() in os.environ:
            self.access_key = os.environ[access_key_name.upper()]
            boto.log.debug("Using access key found in environment variable.")
        elif profile_name is not None:
            if shared.has_option(profile_name, access_key_name):
                self.access_key = shared.get(profile_name, access_key_name)
                boto.log.debug("Using access key found in shared credential "
                               "file for profile %s." % profile_name)
            elif config.has_option("profile %s" % profile_name,
                                   access_key_name):
                self.access_key = config.get("profile %s" % profile_name,
                                             access_key_name)
                boto.log.debug("Using access key found in config file: "
                               "profile %s." % profile_name)
            else:
                raise ProfileNotFoundError('Profile "%s" not found!' %
                                           profile_name)
        elif shared.has_option('default', access_key_name):
            self.access_key = shared.get('default', access_key_name)
            boto.log.debug("Using access key found in shared credential file.")
        elif config.has_option('Credentials', access_key_name):
            self.access_key = config.get('Credentials', access_key_name)
            boto.log.debug("Using access key found in config file.")

        if secret_key is not None:
            self.secret_key = secret_key
            boto.log.debug("Using secret key provided by client.")
        elif secret_key_name.upper() in os.environ:
            self.secret_key = os.environ[secret_key_name.upper()]
            boto.log.debug("Using secret key found in environment variable.")
        elif profile_name is not None:
            if shared.has_option(profile_name, secret_key_name):
                self.secret_key = shared.get(profile_name, secret_key_name)
                boto.log.debug("Using secret key found in shared credential "
                               "file for profile %s." % profile_name)
            elif config.has_option("profile %s" % profile_name, secret_key_name):
                self.secret_key = config.get("profile %s" % profile_name,
                                             secret_key_name)
                boto.log.debug("Using secret key found in config file: "
                               "profile %s." % profile_name)
            else:
                raise ProfileNotFoundError('Profile "%s" not found!' %
                                           profile_name)
        elif shared.has_option('default', secret_key_name):
            self.secret_key = shared.get('default', secret_key_name)
            boto.log.debug("Using secret key found in shared credential file.")
        elif config.has_option('Credentials', secret_key_name):
            self.secret_key = config.get('Credentials', secret_key_name)
            boto.log.debug("Using secret key found in config file.")
        elif config.has_option('Credentials', 'keyring'):
            keyring_name = config.get('Credentials', 'keyring')
            try:
                import keyring
            except ImportError:
                boto.log.error("The keyring module could not be imported. "
                               "For keyring support, install the keyring "
                               "module.")
                raise
            self.secret_key = keyring.get_password(
                keyring_name, self.access_key)
            boto.log.debug("Using secret key found in keyring.")

        if security_token is not None:
            self.security_token = security_token
            boto.log.debug("Using security token provided by client.")
        elif ((security_token_name is not None) and
              (access_key is None) and (secret_key is None)):
            # Only provide a token from the environment/config if the
            # caller did not specify a key and secret.  Otherwise an
            # environment/config token could be paired with a
            # different set of credentials provided by the caller
            if security_token_name.upper() in os.environ:
                self.security_token = os.environ[security_token_name.upper()]
                boto.log.debug("Using security token found in environment"
                               " variable.")
            elif shared.has_option(profile_name or 'default',
                                   security_token_name):
                self.security_token = shared.get(profile_name or 'default',
                                                 security_token_name)
                boto.log.debug("Using security token found in shared "
                               "credential file.")
            elif profile_name is not None:
                if config.has_option("profile %s" % profile_name,
                                     security_token_name):
                    boto.log.debug("config has option")
                    self.security_token = config.get("profile %s" % profile_name,
                                                     security_token_name)
                    boto.log.debug("Using security token found in config file: "
                                   "profile %s." % profile_name)
            elif config.has_option('Credentials', security_token_name):
                self.security_token = config.get('Credentials',
                                                 security_token_name)
                boto.log.debug("Using security token found in config file.")

        if ((self._access_key is None or self._secret_key is None) and
                self.MetadataServiceSupport[self.name]):
            self._populate_keys_from_metadata_server()
        self._secret_key = self._convert_key_to_str(self._secret_key)

    def _populate_keys_from_metadata_server(self):
        # get_instance_metadata is imported here because of a circular
        # dependency.
        boto.log.debug("Retrieving credentials from metadata server.")
        from boto.utils import get_instance_metadata
        timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
        attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
        # The num_retries arg is actually the total number of attempts made,
        # so the config options is named *_num_attempts to make this more
        # clear to users.
        metadata = get_instance_metadata(
            timeout=timeout, num_retries=attempts,
            data='meta-data/iam/security-credentials/')
        if metadata:
            # I'm assuming there's only one role on the instance profile.
            security = list(metadata.values())[0]
            self._access_key = security['AccessKeyId']
            self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
            self._security_token = security['Token']
            expires_at = security['Expiration']
            self._credential_expiry_time = datetime.strptime(
                expires_at, "%Y-%m-%dT%H:%M:%SZ")
            boto.log.debug("Retrieved credentials will expire in %s at: %s",
                           self._credential_expiry_time - datetime.now(), expires_at)

    def _convert_key_to_str(self, key):
        if isinstance(key, six.text_type):
            # the secret key must be bytes and not unicode to work
            #  properly with hmac.new (see http://bugs.python.org/issue5285)
            return str(key)
        return key

    def configure_headers(self):
        header_info_map = self.HeaderInfoMap[self.name]
        self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
        self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
        self.acl_header = header_info_map[ACL_HEADER_KEY]
        self.auth_header = header_info_map[AUTH_HEADER_KEY]
        self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
        self.copy_source_version_id = header_info_map[
            COPY_SOURCE_VERSION_ID_HEADER_KEY]
        self.copy_source_range_header = header_info_map[
            COPY_SOURCE_RANGE_HEADER_KEY]
        self.date_header = header_info_map[DATE_HEADER_KEY]
        self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
        self.metadata_directive_header = (
            header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
        self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
        self.resumable_upload_header = (
            header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
        self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY]
        self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
        self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
        self.mfa_header = header_info_map[MFA_HEADER_KEY]
        self.restore_header = header_info_map[RESTORE_HEADER_KEY]

    def configure_errors(self):
        error_map = self.ErrorMap[self.name]
        self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
        self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
        self.storage_data_error = error_map[STORAGE_DATA_ERROR]
        self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
        self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]

    def get_provider_name(self):
        return self.HostKeyMap[self.name]

    def supports_chunked_transfer(self):
        return self.ChunkedTransferSupport[self.name]
예제 #48
0
 def has_option(self, option):
     return Config.has_option(self, self.name, option)
예제 #49
0
 def load_config(self):
     self._config = Config(do_load=False)
     self._config.load_from_sdb("botoConfigs", self.id)
예제 #50
0
파일: servicedef.py 프로젝트: 0t3dWCE/boto
 def get(self, name, default=None):
     return Config.get(self, self.name, name, default)
예제 #51
0
class Provider(object):

    CredentialMap = {
        'aws': ('aws_access_key_id', 'aws_secret_access_key',
                'aws_security_token', 'aws_profile'),
        'google': ('gs_access_key_id', 'gs_secret_access_key', None, None),
    }

    AclClassMap = {'aws': Policy, 'google': ACL}

    CannedAclsMap = {'aws': CannedS3ACLStrings, 'google': CannedGSACLStrings}

    HostKeyMap = {'aws': 's3', 'google': 'gs'}

    ChunkedTransferSupport = {'aws': False, 'google': True}

    MetadataServiceSupport = {'aws': True, 'google': False}

    # If you update this map please make sure to put "None" for the
    # right-hand-side for any headers that don't apply to a provider, rather
    # than simply leaving that header out (which would cause KeyErrors).
    HeaderInfoMap = {
        'aws': {
            HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
            METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
            ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
            AUTH_HEADER_KEY: 'AWS',
            COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
            COPY_SOURCE_VERSION_ID_HEADER_KEY:
            AWS_HEADER_PREFIX + 'copy-source-version-id',
            COPY_SOURCE_RANGE_HEADER_KEY:
            AWS_HEADER_PREFIX + 'copy-source-range',
            DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
            DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
            METADATA_DIRECTIVE_HEADER_KEY:
            AWS_HEADER_PREFIX + 'metadata-directive',
            RESUMABLE_UPLOAD_HEADER_KEY: None,
            SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
            SERVER_SIDE_ENCRYPTION_KEY:
            AWS_HEADER_PREFIX + 'server-side-encryption',
            VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
            STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
            MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
            RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore',
        },
        'google': {
            HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
            METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
            ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
            AUTH_HEADER_KEY: 'GOOG1',
            COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
            COPY_SOURCE_VERSION_ID_HEADER_KEY:
            GOOG_HEADER_PREFIX + 'copy-source-version-id',
            COPY_SOURCE_RANGE_HEADER_KEY: None,
            DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
            DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
            METADATA_DIRECTIVE_HEADER_KEY:
            GOOG_HEADER_PREFIX + 'metadata-directive',
            RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
            SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
            SERVER_SIDE_ENCRYPTION_KEY: None,
            # Note that this version header is not to be confused with
            # the Google Cloud Storage 'x-goog-api-version' header.
            VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
            STORAGE_CLASS_HEADER_KEY: None,
            MFA_HEADER_KEY: None,
            RESTORE_HEADER_KEY: None,
        }
    }

    ErrorMap = {
        'aws': {
            STORAGE_COPY_ERROR: boto.exception.S3CopyError,
            STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
            STORAGE_DATA_ERROR: boto.exception.S3DataError,
            STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
            STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
        },
        'google': {
            STORAGE_COPY_ERROR: boto.exception.GSCopyError,
            STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
            STORAGE_DATA_ERROR: boto.exception.GSDataError,
            STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
            STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
        }
    }

    def __init__(self,
                 name,
                 access_key=None,
                 secret_key=None,
                 security_token=None,
                 profile_name=None):
        self.host = None
        self.port = None
        self.host_header = None
        self.access_key = access_key
        self.secret_key = secret_key
        self.security_token = security_token
        self.profile_name = profile_name
        self.name = name
        self.acl_class = self.AclClassMap[self.name]
        self.canned_acls = self.CannedAclsMap[self.name]
        self._credential_expiry_time = None

        # Load shared credentials file if it exists
        shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
        self.shared_credentials = Config(do_load=False)
        if os.path.isfile(shared_path):
            self.shared_credentials.load_from_path(shared_path)

        self.get_credentials(access_key, secret_key, security_token,
                             profile_name)
        self.configure_headers()
        self.configure_errors()

        # Allow config file to override default host and port.
        host_opt_name = '%s_host' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_opt_name):
            self.host = config.get('Credentials', host_opt_name)
        port_opt_name = '%s_port' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', port_opt_name):
            self.port = config.getint('Credentials', port_opt_name)
        host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
        if config.has_option('Credentials', host_header_opt_name):
            self.host_header = config.get('Credentials', host_header_opt_name)

    def get_access_key(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._access_key

    def set_access_key(self, value):
        self._access_key = value

    access_key = property(get_access_key, set_access_key)

    def get_secret_key(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._secret_key

    def set_secret_key(self, value):
        self._secret_key = value

    secret_key = property(get_secret_key, set_secret_key)

    def get_security_token(self):
        if self._credentials_need_refresh():
            self._populate_keys_from_metadata_server()
        return self._security_token

    def set_security_token(self, value):
        self._security_token = value

    security_token = property(get_security_token, set_security_token)

    def _credentials_need_refresh(self):
        if self._credential_expiry_time is None:
            return False
        else:
            # The credentials should be refreshed if they're going to expire
            # in less than 5 minutes.
            delta = self._credential_expiry_time - datetime.utcnow()
            # python2.6 does not have timedelta.total_seconds() so we have
            # to calculate this ourselves.  This is straight from the
            # datetime docs.
            seconds_left = (
                (delta.microseconds +
                 (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6)
            if seconds_left < (5 * 60):
                boto.log.debug("Credentials need to be refreshed.")
                return True
            else:
                return False

    def get_credentials(self,
                        access_key=None,
                        secret_key=None,
                        security_token=None,
                        profile_name=None):
        access_key_name, secret_key_name, security_token_name, \
            profile_name_name = self.CredentialMap[self.name]

        # Load profile from shared environment variable if it was not
        # already passed in and the environment variable exists
        if profile_name is None and profile_name_name is not None and \
           profile_name_name.upper() in os.environ:
            profile_name = os.environ[profile_name_name.upper()]

        shared = self.shared_credentials

        if access_key is not None:
            self.access_key = access_key
            boto.log.debug("Using access key provided by client.")
        elif access_key_name.upper() in os.environ:
            self.access_key = os.environ[access_key_name.upper()]
            boto.log.debug("Using access key found in environment variable.")
        elif profile_name is not None:
            if shared.has_option(profile_name, access_key_name):
                self.access_key = shared.get(profile_name, access_key_name)
                boto.log.debug("Using access key found in shared credential "
                               "file for profile %s." % profile_name)
            elif config.has_option("profile %s" % profile_name,
                                   access_key_name):
                self.access_key = config.get("profile %s" % profile_name,
                                             access_key_name)
                boto.log.debug("Using access key found in config file: "
                               "profile %s." % profile_name)
            else:
                raise ProfileNotFoundError('Profile "%s" not found!' %
                                           profile_name)
        elif shared.has_option('default', access_key_name):
            self.access_key = shared.get('default', access_key_name)
            boto.log.debug("Using access key found in shared credential file.")
        elif config.has_option('Credentials', access_key_name):
            self.access_key = config.get('Credentials', access_key_name)
            boto.log.debug("Using access key found in config file.")

        if secret_key is not None:
            self.secret_key = secret_key
            boto.log.debug("Using secret key provided by client.")
        elif secret_key_name.upper() in os.environ:
            self.secret_key = os.environ[secret_key_name.upper()]
            boto.log.debug("Using secret key found in environment variable.")
        elif profile_name is not None:
            if shared.has_option(profile_name, secret_key_name):
                self.secret_key = shared.get(profile_name, secret_key_name)
                boto.log.debug("Using secret key found in shared credential "
                               "file for profile %s." % profile_name)
            elif config.has_option("profile %s" % profile_name,
                                   secret_key_name):
                self.secret_key = config.get("profile %s" % profile_name,
                                             secret_key_name)
                boto.log.debug("Using secret key found in config file: "
                               "profile %s." % profile_name)
            else:
                raise ProfileNotFoundError('Profile "%s" not found!' %
                                           profile_name)
        elif shared.has_option('default', secret_key_name):
            self.secret_key = shared.get('default', secret_key_name)
            boto.log.debug("Using secret key found in shared credential file.")
        elif config.has_option('Credentials', secret_key_name):
            self.secret_key = config.get('Credentials', secret_key_name)
            boto.log.debug("Using secret key found in config file.")
        elif config.has_option('Credentials', 'keyring'):
            keyring_name = config.get('Credentials', 'keyring')
            try:
                import keyring
            except ImportError:
                boto.log.error("The keyring module could not be imported. "
                               "For keyring support, install the keyring "
                               "module.")
                raise
            self.secret_key = keyring.get_password(keyring_name,
                                                   self.access_key)
            boto.log.debug("Using secret key found in keyring.")

        if security_token is not None:
            self.security_token = security_token
            boto.log.debug("Using security token provided by client.")
        elif ((security_token_name is not None) and (access_key is None)
              and (secret_key is None)):
            # Only provide a token from the environment/config if the
            # caller did not specify a key and secret.  Otherwise an
            # environment/config token could be paired with a
            # different set of credentials provided by the caller
            if security_token_name.upper() in os.environ:
                self.security_token = os.environ[security_token_name.upper()]
                boto.log.debug("Using security token found in environment"
                               " variable.")
            elif shared.has_option(profile_name or 'default',
                                   security_token_name):
                self.security_token = shared.get(profile_name or 'default',
                                                 security_token_name)
                boto.log.debug("Using security token found in shared "
                               "credential file.")
            elif config.has_option('Credentials', security_token_name):
                self.security_token = config.get('Credentials',
                                                 security_token_name)
                boto.log.debug("Using security token found in config file.")

        if ((self._access_key is None or self._secret_key is None)
                and self.MetadataServiceSupport[self.name]):
            self._populate_keys_from_metadata_server()
        self._secret_key = self._convert_key_to_str(self._secret_key)

    def _populate_keys_from_metadata_server(self):
        # get_instance_metadata is imported here because of a circular
        # dependency.
        boto.log.debug("Retrieving credentials from metadata server.")
        from boto.utils import get_instance_metadata
        timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
        attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
        # The num_retries arg is actually the total number of attempts made,
        # so the config options is named *_num_attempts to make this more
        # clear to users.
        metadata = get_instance_metadata(
            timeout=timeout,
            num_retries=attempts,
            data='meta-data/iam/security-credentials/')
        if metadata:
            # I'm assuming there's only one role on the instance profile.
            security = metadata.values()[0]
            self._access_key = security['AccessKeyId']
            self._secret_key = self._convert_key_to_str(
                security['SecretAccessKey'])
            self._security_token = security['Token']
            expires_at = security['Expiration']
            self._credential_expiry_time = datetime.strptime(
                expires_at, "%Y-%m-%dT%H:%M:%SZ")
            boto.log.debug("Retrieved credentials will expire in %s at: %s",
                           self._credential_expiry_time - datetime.now(),
                           expires_at)

    def _convert_key_to_str(self, key):
        if isinstance(key, unicode):
            # the secret key must be bytes and not unicode to work
            #  properly with hmac.new (see http://bugs.python.org/issue5285)
            return str(key)
        return key

    def configure_headers(self):
        header_info_map = self.HeaderInfoMap[self.name]
        self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
        self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
        self.acl_header = header_info_map[ACL_HEADER_KEY]
        self.auth_header = header_info_map[AUTH_HEADER_KEY]
        self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
        self.copy_source_version_id = header_info_map[
            COPY_SOURCE_VERSION_ID_HEADER_KEY]
        self.copy_source_range_header = header_info_map[
            COPY_SOURCE_RANGE_HEADER_KEY]
        self.date_header = header_info_map[DATE_HEADER_KEY]
        self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
        self.metadata_directive_header = (
            header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
        self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
        self.resumable_upload_header = (
            header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
        self.server_side_encryption_header = header_info_map[
            SERVER_SIDE_ENCRYPTION_KEY]
        self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
        self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
        self.mfa_header = header_info_map[MFA_HEADER_KEY]
        self.restore_header = header_info_map[RESTORE_HEADER_KEY]

    def configure_errors(self):
        error_map = self.ErrorMap[self.name]
        self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
        self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
        self.storage_data_error = error_map[STORAGE_DATA_ERROR]
        self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
        self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]

    def get_provider_name(self):
        return self.HostKeyMap[self.name]

    def supports_chunked_transfer(self):
        return self.ChunkedTransferSupport[self.name]