Esempio n. 1
0
 def __init__(self, conn):
     self.conn = conn
     self.availability_zone = get_instance_metadata()["placement"]["availability-zone"]
     self.instance_id = get_instance_metadata()["instance-id"]
     # 3 seconds sleep period for repeatable tasks
     self.TIME_SLEEP = 3
     logging.debug("Our instanceID is %s in %s availability zone" % (self.instance_id, self.availability_zone))
Esempio n. 2
0
def rewrite_query(username, query):
    cred_pattern="'aws_access_key_id=\S*;aws_secret_access_key=\S*'"
    if re.search(cred_pattern, query) is not None:
        role=list(utils.get_instance_metadata()['iam']['security-credentials'])[0]
        creds=utils.get_instance_metadata()['iam']['security-credentials'][role]
        repl_pattern="'aws_access_key_id=%s;aws_secret_access_key=%s;token=%s'" % (creds['AccessKeyId'], creds['SecretAccessKey'], creds['Token'])
        new_query = re.sub(cred_pattern, repl_pattern, query).replace("\n"," ")
    else:
        new_query = query
    return new_query
Esempio n. 3
0
def ec2_roles():
    # Get meta-data from instance
    metadata = get_instance_metadata()

    # Chop off the AZ letter to get the region
    region = metadata['placement']['availability-zone'][:-1]

    # Connect to EC2 and get the instance information for this instance id
    conn = boto.ec2.connect_to_region(region,
    aws_access_key_id='XXXXXXXXXXXXXXXXXXXX',
    aws_secret_access_key='XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
    reservation = conn.get_all_reservations(filters={'instance-id': metadata['instance-id']})

    # Dump tags from instance. Feel free to add variables here to get other tags.
    # Use var = instance.tags['TAG NAME']
    instances = [i for r in reservation for i in r.instances]
    for instance in instances:
        roles = instance.tags['Roles']

    # Initialize grains dict
    grains = {}

    # Fill grains dict with tags
    # Don't forget to add any variables you added from above!
    grains['ec2_roles'] = roles.split(',')

    # Return our dict
    return grains
Esempio n. 4
0
 def configure(self, config, parser):
     super(EucaCloudPlugin, self).configure(config, parser)
     host = config.context.web_log.get('host', False)
     if not host:
         md = get_instance_metadata()
         pub, ipv4 = 'public-hostname', 'local-ipv4'
         config.context.web_log['host'] = md[pub] if pub in md else md[ipv4]
Esempio n. 5
0
def __get_connection_SNS():
    """ Ensure connection to SNS """
    try:
        if get_global_option("aws_access_key_id") and get_global_option("aws_secret_access_key"):
            logger.debug("Authenticating to SNS using " "credentials in configuration file")
            connection = sns.connect_to_region(
                get_global_option("region"),
                aws_access_key_id=get_global_option("aws_access_key_id"),
                aws_secret_access_key=get_global_option("aws_secret_access_key"),
            )
        else:
            try:
                logger.debug("Authenticating to SNS using EC2 instance profile")
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = sns.connect_to_region(
                    metadata["placement"]["availability-zone"][:-1],
                    profile_name=metadata["iam"]["info"][u"InstanceProfileArn"],
                )
            except KeyError:
                logger.debug("Authenticating to SNS using " "env vars / boto configuration")
                connection = sns.connect_to_region(get_global_option("region"))

    except Exception as err:
        logger.error("Failed connecting to SNS: {0}".format(err))
        logger.error("Please report an issue at: " "https://github.com/sebdah/dynamic-dynamodb/issues")
        raise

    logger.debug("Connected to SNS in {0}".format(get_global_option("region")))
    return connection
Esempio n. 6
0
    def _connect(self, **kwargs):
        cloud_config = self._config.plugins[self.full_name]
        context = self._config.context
        self._instance_metadata = get_instance_metadata()
        euca_path = "/services/Eucalyptus"
        euca_port = 8773
        ec2_region = RegionInfo()
        ec2_region.name = 'eucalyptus'
        ec2_region.endpoint = context.cloud.ec2_endpoint
        connection_args = { 'is_secure': False,
                            'debug': 0,
                            'port' : 8773,
                            'path' : euca_path,
                            'host' : context.cloud.ec2_endpoint,
                            'api_version': '2012-07-20',
                            'region': ec2_region }

        if float(boto.__version__[0:3]) >= 2.6:
            connection_args['validate_certs'] = False

        self._connection = boto.connect_ec2(**connection_args)

        log.info('Aminating in region {0}: http://{1}:{2}{3}'.format(ec2_region.name,
                                                                      context.cloud.ec2_endpoint,
                                                                      euca_port,
                                                                      euca_path))
Esempio n. 7
0
def _getCurrentAWSZone(spotBid=None, nodeType=None, ctx=None):
    zone = None
    try:
        import boto
        from boto.utils import get_instance_metadata
    except ImportError:
        pass
    else:
        zone = os.environ.get('TOIL_AWS_ZONE', None)
        if spotBid:
            # if spot bid is present, all the other parameters must be as well
            assert bool(spotBid) == bool(nodeType) == bool(ctx)
            # if the zone is unset and we are using the spot market, optimize our
            # choice based on the spot history
            return optimize_spot_bid(ctx=ctx, instance_type=nodeType, spot_bid=spotBid)
        if not zone:
            zone = boto.config.get('Boto', 'ec2_region_name')
            if zone is not None:
                zone += 'a'  # derive an availability zone in the region
        if not zone:
            try:
                zone = get_instance_metadata()['placement']['availability-zone']
            except KeyError:
                pass
    return zone
Esempio n. 8
0
    def _populate_keys_from_metadata_server(self):
        # get_instance_metadata is imported here because of a circular
        # dependency.
        boto.log.debug("Retrieving credentials from metadata server.")
        from boto.utils import get_instance_metadata

        timeout = config.getfloat("Boto", "metadata_service_timeout", 1.0)
        attempts = config.getint("Boto", "metadata_service_num_attempts", 1)
        # The num_retries arg is actually the total number of attempts made,
        # so the config options is named *_num_attempts to make this more
        # clear to users.
        metadata = get_instance_metadata(
            timeout=timeout, num_retries=attempts, data="meta-data/iam/security-credentials/"
        )
        if metadata:
            # I'm assuming there's only one role on the instance profile.
            security = metadata.values()[0]
            self._access_key = security["AccessKeyId"]
            self._secret_key = self._convert_key_to_str(security["SecretAccessKey"])
            self._security_token = security["Token"]
            expires_at = security["Expiration"]
            self._credential_expiry_time = datetime.strptime(expires_at, "%Y-%m-%dT%H:%M:%SZ")
            boto.log.debug(
                "Retrieved credentials will expire in %s at: %s",
                self._credential_expiry_time - datetime.now(),
                expires_at,
            )
Esempio n. 9
0
File: provider.py Progetto: 10sr/hue
 def _populate_keys_from_metadata_server(self):
     # get_instance_metadata is imported here because of a circular
     # dependency.
     boto.log.debug("Retrieving credentials from metadata server.")
     from boto.utils import get_instance_metadata
     timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
     attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
     # The num_retries arg is actually the total number of attempts made,
     # so the config options is named *_num_attempts to make this more
     # clear to users.
     metadata = get_instance_metadata(
         timeout=timeout, num_retries=attempts,
         data='meta-data/iam/security-credentials/')
     if metadata:
         creds = self._get_credentials_from_metadata(metadata)
         self._access_key = creds[0]
         self._secret_key = creds[1]
         self._security_token = creds[2]
         expires_at = creds[3]
         # I'm assuming there's only one role on the instance profile.
         self._credential_expiry_time = datetime.strptime(
             expires_at, "%Y-%m-%dT%H:%M:%SZ")
         boto.log.debug("Retrieved credentials will expire in %s at: %s",
                        self._credential_expiry_time - datetime.now(),
                        expires_at)
Esempio n. 10
0
    def _init_logging(self):
        self.log = logging.getLogger('my_logger')
 
        # Should set the level on the logger itself to DEBUG
        # and let the handlers below do the filtering 
        self.log.setLevel(logging.DEBUG)
 
        # Setting console output to DEBUG for easier debugging
        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        ch.setFormatter(formatter)
        self.log.addHandler(ch)
 
        """
        Assuming that this script is running on an EC2 instance
        we grab the instance ID so it can be included in the SNS
        message for reference.  If you're not running this on EC2,
        remove this rather than trap the exception because the timeout
        is several seconds long.
        """
        instance_id = get_instance_metadata()['instance_id']
        sns = SNSLogHandler(self.sns_topic, self.sns_subject, instance_id)
 
        # We only want critical messages bothering us via AWS SNS
        sns.setLevel(logging.CRITICAL)
        sns.setFormatter(formatter)
        self.log.addHandler(sns)
Esempio n. 11
0
def call_services(args):
    bucket = connect_s3().get_bucket("nlp-data")
    key = bucket.get_key(args.s3key)
    if key is None:
        return

    folder = args.s3key.split("/")[0]

    eventfile = "%s_processing/%s_%s_%s" % (
        folder,
        get_instance_metadata()["local-hostname"],
        str(time.time()),
        str(int(random.randint(0, 100))),
    )

    key.copy("nlp-data", eventfile)
    key.delete()

    k = Key(bucket)
    k.key = eventfile

    lines = k.get_contents_as_string().split("\n")
    map(lambda x: process_file(x, args.services.split(",")), lines)
    print args.s3key, len(lines), "ids completed"

    k.delete()
Esempio n. 12
0
 def compute_instance_id(self):
     """
     Look up the EC2 instance ID for this node.
     """
     instance_id = get_instance_metadata().get("instance-id", None)
     if instance_id is None:
         raise UnknownInstanceID(self)
     return instance_id.decode("ascii")
Esempio n. 13
0
 def account( self ):
     try:
         arn = self.iam.get_user( ).arn
     except:
         # Agent boxes run with IAM role credentials instead of user credentials.
         arn = get_instance_metadata( )[ 'iam' ][ 'info' ][ 'InstanceProfileArn' ]
     _, partition, service, region, account, resource = arn.split( ':', 6 )
     return account
Esempio n. 14
0
def _get_sts_credentials():
    metadata = get_instance_metadata()['iam']['security-credentials'].values().pop()
    access_key = metadata['AccessKeyId']
    secret_key = metadata['SecretAccessKey']
    token = metadata['Token']
    credentials = "credentials 'aws_access_key_id=%s;aws_secret_access_key=%s;token=%s'"
    credentials %= (access_key, secret_key, token)
    return credentials
Esempio n. 15
0
def get_volume(conn, device):
    '''Returns volume to make snapshot'''
    instance_id = get_instance_metadata()["instance-id"]
    logging.debug("Our instanceID is %s" % instance_id)
    volumes = conn.get_all_volumes(filters={
                                   'attachment.instance-id': instance_id,
                                   'attachment.device': device})
    logging.debug("Our volume is %s" % volumes[0])
    return volumes[0]
Esempio n. 16
0
 def setUpClass( cls ):
     super( CgcloudTestCase, cls ).setUpClass( )
     if running_on_ec2( ):
         os.environ.setdefault( 'CGCLOUD_ZONE',
                                get_instance_metadata( )[ 'placement' ][ 'availability-zone' ] )
     suffix = hex( int( time.time( ) ) )[ 2: ]
     assert len( suffix ) == test_namespace_suffix_length
     cls.namespace = '/test/%s/' % suffix
     os.environ.setdefault( 'CGCLOUD_NAMESPACE', cls.namespace )
Esempio n. 17
0
def get_self_instance_id():
    if not silent and verbose > 0:
        print "Enquiring self instance id"
    metadata = utils.get_instance_metadata()
    instance_id = metadata['instance-id'] if metadata.has_key('instance-id') else None
    if not silent and verbose > 0:
        print "Instance Id: %s" % (instance_id)

    return instance_id
Esempio n. 18
0
    def set_aws_region():
        """
        Set an environment variable AWS_DEFAULT_REGION with the name of the AWS Region got from instance metadata.
        :return: None
        """
        aws_region = utils.get_instance_metadata(data='meta-data/placement/')['availability-zone'][:-1]
        LOG.debug("AWS Region : %s", aws_region)

        os.environ["AWS_DEFAULT_REGION"] = aws_region
Esempio n. 19
0
def get_metadata():
    """
    Returns the metadata information about the instance.
    """

    global _metadata
    try:
        return _metadata
    except NameError:
        _metadata = get_instance_metadata()
        return _metadata
    def __init__(self, aws_region):
        if not aws_region:
            instance_metadata = get_instance_metadata(timeout=5)
            if not instance_metadata:
                raise ImproperlyConfigured('Failed to get instance metadata')
            aws_region = instance_metadata['placement']['availability-zone']
            # The AWS Region will return as `us-east-1a` but we want
            # `us-east-1` so we trim the trailing character.
            aws_region = aws_region[:-1]

        self.aws_region = aws_region
Esempio n. 21
0
    def get_allocation_id(self):
        """
        Get the allocation id
        :return:
        """
        metadata = get_instance_metadata()
        for address in self.ec2_connection.get_all_addresses():
            if address.public_ip == metadata['public-ipv4']:
                return address.association_id, address.allocation_id, address.public_ip

        return None, None, None
Esempio n. 22
0
    def run(self):

        ec2 = self.session(region=self.region).resource('ec2')

        if self.instance_id is None:
            self.instance_id = get_instance_metadata(data='meta-data/instance-id').keys()[0]

        if self.filters is None:
            instance = ec2.Instance(self.instance_id)

            autoscaling_group_tags = dict(map(
                lambda tag_pair: (tag_pair['Key'], tag_pair['Value']),
                filter(
                    lambda tag_pair: tag_pair['Key'] == 'aws:autoscaling:groupName',
                    instance.tags
                )
            ))

            self.filters = [{
                'Name': 'tag-key',
                'Values': ['aws:autoscaling:groupName'],
            }, {
                'Name': 'tag-value',
                'Values': [
                    autoscaling_group_tags['aws:autoscaling:groupName']
                ]
            }]

        instances = ec2.instances.filter(Filters=self.filters)

        excluded_instances = []
        if self.filter_self:
            excluded_instances.append(self.instance_id)

        peer_metadata = map(
            lambda instance: {
                'instance_id': instance.id,
                'private_ip': instance.private_ip_address,
                'public_ip': instance.public_ip_address,
            } if (instance.state['Name'] == 'running') and (instance.id not in excluded_instances) else None,
            instances
        )

        def exists(it):
            return (it is not None)

        peer_metadata = filter(exists, peer_metadata)

        print json.dumps(
            peer_metadata,
            sort_keys=True,
            indent=4,
            separators=(',', ': ')
        )
Esempio n. 23
0
def config():
    metadata = utils.get_instance_metadata()
    
    region = metadata['placement']['availability-zone']
    instance_id = metadata['instance-id']
    size = 64 # in GB
    drives = ['sdf', 'sdg', 'sdh', 'sdi' ]
    
    conn = EC2Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_KEY)
    
    for drive in drives:
        target = '/dev/%s' % (drive)
        vol = createEBS(conn, size, region)
        time.sleep(5)
        
        while True:
            try:
                vol.attach(instance_id, target)
                break
            except:
                printException()
                time.sleep(5)
        
        while True:
            if os.path.exists(target):
                break
            else:
                printException()
                time.sleep(5)

    # Build bash command to run
    bash = ''
    for drive in drives:
        bash += '\n echo ",,L" | sudo sfdisk /dev/' + drive
    bash += '\n'
    bash += 'sleep 10 \n' ## Not sure if this is necessary.
    bash += 'sudo /sbin/mdadm /dev/md0 --create --level=10 --raid-devices=' + str(len(drives))
    for drive in drives:
        bash += ' /dev/' + drive + '1'
    bash += '\n '
    bash += 'sleep 10 \n' ## Not sure if this is necessary, either.
    bash += 'sudo /sbin/pvcreate /dev/md0 \n'
    bash += 'sudo /sbin/vgcreate -s 64M mongodb_vg /dev/md0 \n'
    bash += 'sudo /sbin/lvcreate -l ' + str(size * 1000 / 64) + ' -nmongodb_lv mongodb_vg \n'
    bash += 'sudo /sbin/mkfs.ext4 /dev/mongodb_vg/mongodb_lv \n'
    
    bash += 'echo "/dev/mongodb_vg/mongodb_lv /data ext4 defaults,noatime 0 0" | sudo -E tee -a /etc/fstab \n'
    bash += 'sudo mkdir /data \n'
    bash += 'sudo mount /dev/mongodb_vg/mongodb_lv /data \n'
    
    print bash
    os.system(bash)
    
    """
Esempio n. 24
0
    def _get_instance_info(self):
        ''' Get instance metadata and identity data'''

        metadata = get_instance_metadata(timeout=1, num_retries=2)

        if metadata == {}:
            raise Exception("Should be on an EC2 instance for this inventory script to work.")

        identity = get_instance_identity(timeout=1, num_retries=2)

        return metadata, identity
Esempio n. 25
0
 def setInstanceId(self):
     try:
         # This looks a little weird, but the idea is that if you would
         # to have the InstanceId automatically filled in, then simply
         # add the key in the yaml file, but not the value. If you'd like
         # to override it, then you can override it by providing a value.
         # So, this covers the case that the key is provided, but no value
         if not self.dims:
             self.dims['InstanceId'] = get_instance_metadata()['instance-id']
     except:
         logger.warn('Failed to get an instance ID for this node from Amazon')
Esempio n. 26
0
    def __init__(self, config=None):
        """
        Initialize the AWS Provisioner object. The object is created in two distinct
        ways:

        1.  The first is by the `toil launch-cluster` utility which does not pass a config
            and creates a provisioner. Fields are initialized to None and
            are set later when the leader is created via `self.launchCluster`. This
            round-about initialization is necessary because launch-cluster is a
            classmethod.

        2.  The second is used when doing regular autoscaling and the provisioner is
            initialized with a config file. This happens in `Toil._setProvisioner()`

        This is due to the fact that the provisioner is used both in Toil runs to manage
        autoscaling as well as outside of Toil runs to launch clusters and manage statically
        provisioned nodes. Static provisioned nodes are those that the user explicitly adds into
        the cluster via `launch-cluster -workers n`, which will launch a cluster with n statically
        provisioned nodes.

        :param config: Optional config object from common.py
        :param batchSystem:
        """
        super(AWSProvisioner, self).__init__(config)
        self.spotBid = None
        self.instanceType = {}
        if config:
            self.instanceMetaData = get_instance_metadata()
            self.clusterName = self._getClusterNameFromTags(self.instanceMetaData)
            self.ctx = self._buildContext(clusterName=self.clusterName)
            self.leaderIP = self.instanceMetaData['local-ipv4']  # this is PRIVATE IP
            self.keyName = self.instanceMetaData['public-keys'].keys()[0]
            self.tags = self._getLeader(self.clusterName).tags
            self.masterPublicKey = self._setSSH()
            self.nodeStorage = config.nodeStorage
            assert config.preemptableNodeType or config.nodeType
            if config.preemptableNodeType is not None:
                nodeBidTuple = config.preemptableNodeType.split(':', 1)
                preemptable = True
                self.spotBid = nodeBidTuple[1]
                self.instanceType[preemptable] = ec2_instance_types[nodeBidTuple[0]]
            else:
                preemptable = False
                self.instanceType[preemptable] = ec2_instance_types[config.nodeType]
        else:
            self.ctx = None
            self.clusterName = None
            self.instanceMetaData = None
            self.leaderIP = None
            self.keyName = None
            self.tags = None
            self.masterPublicKey = None
            self.nodeStorage = None
        self.subnetID = None
Esempio n. 27
0
def get_metadata_credentials():
    meta_data = get_instance_metadata(data='meta-data/iam/security-credentials',
                                      num_retries=1, timeout=2)
    if not meta_data:
        logging.debug('Failed to contact instance meta-data server.')
    else:
        security = meta_data.values()[0]
        access_key = security['AccessKeyId']
        secret_key = security['SecretAccessKey']
        security_token = security['Token']
    
        print_credentials(access_key, secret_key, security_token)
Esempio n. 28
0
    def send(self):
        metadata = get_instance_metadata()
        instance_id = metadata['instance-id']
        region = metadata['placement']['availability-zone'][0:-1]

        cw = cloudwatch.connect_to_region(region)

        group = self._get_auto_scaling_group_name(instance_id, region)

        for (unit, metrics) in self._metrics.items():
            cw.put_metric_data('EC2', metrics.keys(), metrics.values(), unit=unit, dimensions={'InstanceId': instance_id})
            if group:
                cw.put_metric_data('EC2', metrics.keys(), metrics.values(), unit=unit, dimensions={'AutoScalingGroupName': group})
Esempio n. 29
0
def handle_instance_metadata():
    '''
    {'instance-type': 't1.micro', 
     'instance-id': 'i-807e52d7', 
     'iam': {'info': {'InstanceProfileArn': 'arn:aws:iam::334918212912:instance-profile/django_frontend_nimbostratus',
                      'InstanceProfileId': 'AIPAIMOLOJUADL3JWAN56',
                      'Code': 'Success',
                      'LastUpdated':
                      '2013-08-22T20:07:44Z'},
            'security-credentials': {'django_frontend_nimbostratus':
                                        {'Code': 'Success',
                                         'LastUpdated': '2013-08-22T20:08:22Z',
                                         'AccessKeyId': 'ASIAJ432XXHBO2V3R5OA',
                                         'SecretAccessKey': 'x+M61ZRT/TgUZ3UnGtjY40wOK9UTeTilnNol98kX',
                                         'Token': 'glN...AU',
                                         'Expiration': '2013-08-23T02:15:14Z',
                                         'Type': 'AWS-HMAC'}}},
     'local-hostname': 'ip-10-130-77-91.ap-southeast-1.compute.internal',
     'network': {'interfaces': {'macs': {'12:31:41:00:4e:91': {'local-hostname': 'ip-10-130-77-91.ap-southeast-1.compute.internal',
                                                               'public-hostname': 'ec2-54-254-24-239.ap-southeast-1.compute.amazonaws.com',
                                                               'public-ipv4s': '54.254.24.239',
                                                               'mac': '12:31:41:00:4e:91',
                                                               'owner-id': '334918212912',
                                                               'local-ipv4s': '10.130.77.91',
                                                               'device-number': '0'}}}},
     'hostname': 'ip-10-130-77-91.ap-southeast-1.compute.internal',
     'ami-id': 'ami-a02f66f2',
     'kernel-id': 'aki-fe1354ac',
     'instance-action': 'none',
     'profile': 'default-paravirtual',
     'reservation-id': 'r-e9efbebe',
     'security-groups': 'django_frontend_nimbostratus_sg',
     'metrics': {'vhostmd': '<?xml version="1.0" encoding="UTF-8"?>'},
     'mac': '12:31:41:00:4E:91',
     'public-ipv4': '54.254.24.239',
     'ami-manifest-path': '(unknown)',
     'local-ipv4': '10.130.77.91',
     'placement': {'availability-zone': 'ap-southeast-1a'},
     'ami-launch-index': '0',
     'public-hostname': 'ec2-54-254-24-239.ap-southeast-1.compute.amazonaws.com',
     'public-keys': {'django_frontend_nimbostratus': ['ssh-rsa A...jxT django_frontend_nimbostratus', '']},
     'block-device-mapping': {'ami': '/dev/sda1', 'root': '/dev/sda1', 'ephemeral0': 'sdb'}
    }
    '''
    meta_data = get_instance_metadata()
    logging.debug(pprint.pformat(meta_data))
    
    logging.info('Instance type: %s' % meta_data['instance-type'])
    logging.info('AMI ID: %s' % meta_data['ami-id'])
    logging.info('Security groups: %s' % meta_data['security-groups'])
    logging.info('Availability zone: %s' % meta_data['placement']['availability-zone'])
Esempio n. 30
0
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    while not connected:
        logger.debug('Connecting to DynamoDB in {0}'.format(
            get_global_option('region')))

        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to DynamoDB using '
                'credentials in configuration file')
            connection = dynamodb2.connect_to_region(
                get_global_option('region'),
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            try:
                logger.debug(
                    'Authenticating to DynamoDB using EC2 instance profile')
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = dynamodb2.connect_to_region(
                    metadata['placement']['availability-zone'][:-1],
                    profile_name=metadata['iam']['info'][u'InstanceProfileArn'])
            except KeyError:
                logger.debug(
                    'Authenticating to DynamoDB using '
                    'env vars / boto configuration')
                connection = dynamodb2.connect_to_region(
                    get_global_option('region'))

        if not connection:
            if retries == 0:
                logger.error('Failed to connect to DynamoDB. Giving up.')
                raise
            else:
                logger.error(
                    'Failed to connect to DynamoDB. Retrying in 5 seconds')
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug('Connected to DynamoDB in {0}'.format(
                get_global_option('region')))

    return connection
Esempio n. 31
0
def run():
    parser = argparse.ArgumentParser(
        description=
        'Loops through all EBS volumes, and snapshots them, then loops through all snapshots, and removes the oldest ones.'
    )
    parser.add_argument(
        '--region',
        metavar='REGION',
        help=
        'the region to loop through and snapshot (default is current region of EC2 instance this is running on). E.g. us-east-1'
    )
    parser.add_argument(
        '--max-snapshots-per-volume',
        metavar='SNAPSHOTS',
        default=3,
        type=int,
        help=
        'the maximum number of snapshots to keep per EBS volume. The oldest snapshots will be deleted. Default: 3'
    )
    parser.add_argument(
        '--snapshot-only',
        action='store_true',
        default=False,
        help='Only snapshot EBS volumes, do not remove old snapshots')
    parser.add_argument(
        '--remove-only',
        action='store_true',
        default=False,
        help='Only remove old snapshots, do not create new snapshots')
    parser.add_argument('--verbose',
                        '-v',
                        action='count',
                        help='enable verbose output (-vvv for more)')
    parser.add_argument('--version',
                        action='version',
                        version='%(prog)s ' + __version__,
                        help='display version number and exit')
    parser.add_argument(
        '--tags',
        nargs="+",
        help=
        'Only snapshot instances that match passed in tags. E.g. --tag Name:foo will snapshot all instances with a tag `Name` and value is `foo`'
    )
    parser.add_argument(
        '--reverse-tags',
        action='store_true',
        default=False,
        help=
        'Do a reverse match on the passed in tags. E.g. --tag Name:foo --reverse-tags will snapshot all instances that do not have a `Name` tag with the value `foo`'
    )
    parser.add_argument(
        '--label',
        action='store',
        help=
        'Only snapshot instances that match passed in label are created or deleted. Default: None. Selected all snapshot. You have the posibility of create a different strategies for daily, weekly and monthly for example. Label daily won\'t deleted label weekly'
    )
    parser.add_argument(
        '--cross-account-number',
        action='store',
        help=
        'Do a cross-account snapshot (this is the account number to do snapshots on). NOTE: This requires that you pass in the --cross-account-role parameter. E.g. --cross-account-number 111111111111 --cross-account-role Snapshot'
    )
    parser.add_argument(
        '--cross-account-role',
        action='store',
        help=
        'The name of the role that backup-monkey will assume when doing a cross-account snapshot. E.g. --cross-account-role Snapshot'
    )

    args = parser.parse_args()

    if args.cross_account_number and not args.cross_account_role:
        parser.error(
            'The --cross-account-role parameter is required if you specify --cross-account-number (doing a cross-account snapshot)'
        )

    if args.cross_account_role and not args.cross_account_number:
        parser.error(
            'The --cross-account-number parameter is required if you specify --cross-account-role (doing a cross-account snapshot)'
        )

    if args.reverse_tags and not args.tags:
        parser.error(
            'The --tags parameter is required if you specify --reverse-tags (doing a blacklist filter)'
        )

    if args.label and len(args.label) > LIMIT_LABEL:
        parser.error('The --label parameter lenght should be less than 32')

    Logging().configure(args.verbose)

    log.debug("CLI parse args: %s", args)

    if args.region:
        region = args.region
    else:
        # If no region was specified, assume this is running on an EC2 instance
        # and work out what region it is in
        log.debug("Figure out which region I am running in...")
        instance_metadata = get_instance_metadata(timeout=5)
        log.debug('Instance meta-data: %s', instance_metadata)
        if not instance_metadata:
            _fail(
                'Could not determine region. This script is either not running on an EC2 instance (in which case you should use the --region option), or the meta-data service is down'
            )

        region = instance_metadata['placement']['availability-zone'][:-1]
        log.debug("Running in region: %s", region)

    try:
        monkey = BackupMonkey(region, args.max_snapshots_per_volume, args.tags,
                              args.reverse_tags, args.label,
                              args.cross_account_number,
                              args.cross_account_role)

        if not args.remove_only:
            monkey.snapshot_volumes()
        if not args.snapshot_only:
            monkey.remove_old_snapshots()

    except BackupMonkeyException as e:
        _fail(e.message)

    log.info('Backup Monkey completed successfully!')
    sys.exit(0)
Esempio n. 32
0
def get_credentials():
    obj = get_instance_metadata()
    return obj['iam']['security-credentials'].values()[0]
Esempio n. 33
0
def execute_task_from_sqs():
    instance_meta = get_instance_metadata()
    inst_ip = instance_meta.get('public-ipv4')
    inst_id = instance_meta.get('instance-id')
    logger.info("IMPORTANT: ip: %s, instance id: %s", inst_ip, inst_id)
    # increment quantity of instances spinned up during the day.
    redis_db = connect_to_redis_database(redis_host=REDIS_HOST,
                                         redis_port=REDIS_PORT)
    increment_metric_counter(INSTANCES_COUNTER_REDIS_KEY, redis_db)

    set_global_variables_from_data_file()
    while 1:  # try to read from the queue until a new message arrives
        TASK_QUEUE_NAME = random.choice([q for q in QUEUES_LIST.values()])
        logger.info("Try to get task message from queue %s.",
                    TASK_QUEUE_NAME)
        if TEST_MODE:
            msg = test_read_msg_from_fs(TASK_QUEUE_NAME)
        else:
            msg = read_msg_from_sqs(TASK_QUEUE_NAME)
        if msg is None:
            time.sleep(3)
            continue
        metadata, task_queue = msg  # store task_queue to re-use this instance
                                    #  later
        break
    # due to task performance may take more than 12 hrs remove task immediately
    task_queue.task_done()
    logger.info("Task message was successfully received and "
                "removed form queue.")
    logger.info("Whole tasks msg: %s", str(metadata))
    increment_metric_counter(TASKS_COUNTER_REDIS_KEY, redis_db)
    update_handled_tasks_set(HANDLED_TASKS_SORTED_SET, redis_db)

    switch_branch_if_required(metadata)
    task_id = metadata.get('task_id', metadata.get('task', None))
    searchterms_str = metadata.get('searchterms_str', None)
    url = metadata.get('url', None)
    urls = metadata.get('urls', None)
    site = metadata['site']
    server_name = metadata['server_name']
    cmd_line_args = metadata.get('cmd_args', {})  # dict of extra command-line
                                                  # args, such as ordering

    # make sure the job output dir exists
    if not os.path.exists(os.path.expanduser(JOB_OUTPUT_PATH)):
        logger.debug("Create job output dir %s",
                      os.path.expanduser(JOB_OUTPUT_PATH))
        os.makedirs(os.path.expanduser(JOB_OUTPUT_PATH))

    local_job_id = job_to_fname(metadata)
    output_path = '%s/%s' % (os.path.expanduser(JOB_OUTPUT_PATH), local_job_id)
    cmd = ('cd %s/product-ranking'
           ' && scrapy crawl %s -a %s="%s" %s'
           ' -s LOG_FILE=%s -o %s &')
    # prepare command-line arguments
    options = ' '

    for key, value in cmd_line_args.items():
        options += ' -a %s=%s' % (key, value)
    if searchterms_str:
        arg_name = 'searchterms_str'
        arg_value = searchterms_str
    if url:
        arg_name = 'product_url'
        arg_value = url
    if urls:
        arg_name = 'products_url'
        arg_value = urls
    cmd = cmd % (
        REPO_BASE_PATH, site+'_products', arg_name, arg_value,
        options, output_path+'.log', output_path+'.jl'
    )
    logger.info("Runing %s", cmd)

    data_bs_file = None
    if "with_best_seller_ranking" in metadata \
            and bool(metadata["with_best_seller_ranking"]):
        data_bs_file = output_path + '_bs.jl'
        cmdbs = ('cd %s/product-ranking'
                 ' && scrapy crawl %s -a %s="%s" %s'
                 ' -a search_sort=%s -s LOG_FILE=%s -o %s &') % (
            REPO_BASE_PATH, site + '_products', arg_name, arg_value,
            options, "best_sellers", output_path + '_bs.log', data_bs_file
        )

        pbs = Popen(cmdbs, shell=True, stdout=PIPE, stderr=PIPE)
    p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)

    # report progress and wait until the task is done
    report_progress_and_wait(output_path+'.jl', output_path+'.log', data_bs_file, metadata)
    # upload the files to SQS and S3
    data_key = put_file_into_s3(AMAZON_BUCKET_NAME, output_path+'.jl')
    logs_key = put_file_into_s3(AMAZON_BUCKET_NAME, output_path+'.log')

    csv_data_key = None
    global CONVERT_TO_CSV
    if CONVERT_TO_CSV:
        try:
            csv_filepath = convert_json_to_csv(output_path)
            csv_data_key = put_file_into_s3(AMAZON_BUCKET_NAME, csv_filepath)
        except Exception as e:
            logger.warning("CSV converter failed with exception: %s", str(e))

    if data_key and logs_key:
        dump_result_data_into_sqs(data_key, logs_key, csv_data_key,
            server_name+OUTPUT_QUEUE_NAME, metadata)
    else:
        logger.error("Failed to load info to results sqs. Amazon keys "
                     "wasn't received")

    logger.info("Spider default output:\n%s%s",
                p.stderr.read(),
                p.stdout.read().strip())
Esempio n. 34
0
 def get_metadata(self):
     data = get_instance_metadata()
     data.update(json.loads(get_instance_userdata()))
     return data
Esempio n. 35
0
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""

import os

from boto.utils import get_instance_metadata

from base import *  # NOQA


instance_metadata = get_instance_metadata(timeout=5)

if not instance_metadata:
    raise ImproperlyConfigured('Unable to access the instance metadata')


# HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production  # NOQA
ALLOWED_HOSTS = [
    'app.wikiwatershed.org',
    'staging.app.wikiwatershed.org',
    'portal.bigcz.org',
    'staging.portal.bigcz.org',
    '.elb.amazonaws.com',
    'localhost'
]
Esempio n. 36
0
def get_my_id():
    return get_instance_metadata()[u'instance-id']
    args = parser.parse_args()

    report = []
    prefix = None
    notify = None

    try:
        if args.hipchat_api_key:
            hc = hipchat.HipChat(token=args.hipchat_api_key)
            notify = lambda message: hc.message_room(room_id=args.hipchat_room,
                message_from=HIPCHAT_USER, message=message)
    except Exception as e:
        print("Failed to initialize hipchat, {}".format(e))
        traceback.print_exc()

    instance_id = get_instance_metadata()['instance-id']
    prefix = instance_id


    ec2 = boto.connect_ec2()
    reservations = ec2.get_all_instances(instance_ids=[instance_id])
    instance = reservations[0].instances[0]
    if instance.instance_profile['arn'].endswith('/abbey'):
        print("Running an abbey build. Not starting any services.")
        # Needs to exit with 1 instead of 0 to prevent
        # services from starting.
        exit(1)
    time_left = MAX_BACKOFF
    backoff = INITIAL_BACKOFF

    environment = None
Esempio n. 38
0
def get_region():
    metadata = get_instance_metadata()
    return metadata['placement']['availability-zone'][:-1]
Esempio n. 39
0
def get_instance_id():
    return get_instance_metadata().get('instance-id')
Esempio n. 40
0
def get_region():
    return get_instance_metadata().get('placement')['availability-zone'][:-1]
Esempio n. 41
0
 def __init__(self):
     self.md = get_instance_metadata()
Esempio n. 42
0
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = not secrets['production']

# String that must be passed to post user flags for destinations or events (liked, been, etc.)
USER_FLAG_API_KEY = secrets['user_flag_api_key']

ALLOWED_HOSTS = [
    '.gophillygo.org',
    '.elb.amazonaws.com',
    'localhost',
    '.ngrok.io',
]

if secrets['production']:
    instance_metadata = get_instance_metadata()

    if not instance_metadata:
        raise ImproperlyConfigured('Unable to access instance metadata')

    # ELBs use the instance IP in the Host header and ALLOWED_HOSTS
    # checks against the Host header.
    ALLOWED_HOSTS.append(instance_metadata['local-ipv4'])

INTERNAL_IPS = tuple(secrets['internal_ips'])

# Needed in order to call collectstatic without a DB (during AMI creation)
POSTGIS_VERSION = tuple(secrets['postgis_version'])

# Application definition
Esempio n. 43
0
 def __init__(self, config):
     self.metadata = get_instance_metadata()
     self.config = config
     log('Rerouter initialized')
Esempio n. 44
0
File: aws.py Progetto: 3lixy/galahad
 def get_instance_info():
     return get_instance_metadata(timeout=0.5, num_retries=2)
Esempio n. 45
0
    def __init__(self, config=None):
        """
        Initialize the AWS Provisioner object. The object is created in two distinct
        ways:

        1.  The first is by the `toil launch-cluster` utility which does not pass a config
            and creates a provisioner. Fields are initialized to None and
            are set later when the leader is created via `self.launchCluster`. This
            round-about initialization is necessary because launch-cluster is a
            classmethod.

        2.  The second is used when doing regular autoscaling and the provisioner is
            initialized with a config file. This happens in `Toil._setProvisioner()`

        This is due to the fact that the provisioner is used both in Toil runs to manage
        autoscaling as well as outside of Toil runs to launch clusters and manage statically
        provisioned nodes. Static provisioned nodes are those that the user explicitly adds into
        the cluster via `launch-cluster --workers n`, which will launch a cluster with n statically
        provisioned nodes.

        :param config: Optional config object from common.py
        :param batchSystem:
        """
        super(AWSProvisioner, self).__init__(config)
        if config:
            self.instanceMetaData = get_instance_metadata()
            self.clusterName = self._getClusterNameFromTags(
                self.instanceMetaData)
            self.ctx = self._buildContext(clusterName=self.clusterName)
            self.leaderIP = self.instanceMetaData[
                'local-ipv4']  # this is PRIVATE IP
            self.keyName = list(self.instanceMetaData['public-keys'].keys())[0]
            self.tags = self._getLeader(self.clusterName).tags
            self.masterPublicKey = self._setSSH()
            self.nodeStorage = config.nodeStorage
            spotBids = []
            self.nonPreemptableNodeTypes = []
            self.preemptableNodeTypes = []
            for nodeTypeStr in config.nodeTypes:
                nodeBidTuple = nodeTypeStr.split(":")
                if len(nodeBidTuple) == 2:
                    #This is a preemptable node type, with a spot bid
                    self.preemptableNodeTypes.append(nodeBidTuple[0])
                    spotBids.append(nodeBidTuple[1])
                else:
                    self.nonPreemptableNodeTypes.append(nodeTypeStr)
            self.preemptableNodeShapes = [
                self.getNodeShape(nodeType=nodeType, preemptable=True)
                for nodeType in self.preemptableNodeTypes
            ]
            self.nonPreemptableNodeShapes = [
                self.getNodeShape(nodeType=nodeType, preemptable=False)
                for nodeType in self.nonPreemptableNodeTypes
            ]

            self.nodeShapes = self.nonPreemptableNodeShapes + self.preemptableNodeShapes
            self.nodeTypes = self.nonPreemptableNodeTypes + self.preemptableNodeTypes
            self.spotBids = dict(zip(self.preemptableNodeTypes, spotBids))

        else:
            self.ctx = None
            self.clusterName = None
            self.instanceMetaData = None
            self.leaderIP = None
            self.keyName = None
            self.tags = None
            self.masterPublicKey = None
            self.nodeStorage = None
        self.subnetID = None
Esempio n. 46
0
#!/usr/bin/env python

# Example code to assume a role within a sitting account

import re
import boto
from boto.utils import get_instance_metadata

#local call to instance metadata and extract account number from iam info
account_number = str(
    re.split(
        ":",
        (get_instance_metadata()['iam']['info']['InstanceProfileArn']))[4])

#or to test remote aws account
#account_number = '<12-digit-aws-account-number>'

# make sts call
sts = boto.connect_sts()

desired_role = 'awsdit-role'
role_description = 'awsditRoleTest'

role = sts.assume_role(
    'arn:aws:iam::{0}:role/{1}'.format(account_number, desired_role),
    role_description)

## make a connection to an AWS service such as below, and continue with your normal code.
#EC2
ec2 = boto.connect_ec2(role.credentials.access_key,
                       role.credentials.secret_key,
Esempio n. 47
0
        "-p",
        "--port",
        help=
        "Debug console port. Access debug console by typing \"nc localhost _port_\"",
        type=str,
        default="9000")
    parser.add_argument(
        '-O',
        '--options',
        type=json.loads,
        help=
        "Option dictionary (as described above) given in json form '{\"key1\": \"value1\"}\'.",
        default='{}')
    Storage = PVM_Storage.Storage()
    try:
        meta = get_instance_metadata(timeout=2, num_retries=2)
    except:
        meta = {}
    args = parser.parse_args()
    have_display = args.display
    if not args.quiet:
        logging.getLogger().addHandler(logging.StreamHandler())
    try:
        from Tkinter import Tk
        Tk()
    except:
        have_display = False
        print "No display detected, turning off visualizations."

    using_cmdline_options = len(args.options) > 0
    using_json_options = len(args.spec) > 0
Esempio n. 48
0
from sqs import (make_SQS_connection, get_queue, get_message, get_attributes,
                 delete_message_from_handle)
import os
import boto
import zipfile

os.getcwd()
PATH_DOWNLOAD = os.getcwd() + '/download'

AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
JOBS_QUEUE = 'snapsat_composite_queue'
REGION = 'us-west-2'

try:
    INSTANCE_METADATA = utils.get_instance_metadata(timeout=0.5, num_retries=1)
    INSTANCE_ID = INSTANCE_METADATA['instance-id']
except:
    INSTANCE_ID = socket.gethostname()


def cleanup_downloads(folder_path):
    """Clean up download folder if process fails.

    Return True if the download folder is empty.
    """
    for file_object in os.listdir(folder_path):
        file_object_path = os.path.join(folder_path, file_object)
        if os.path.isfile(file_object_path):
            os.remove(file_object_path)
        else:
Esempio n. 49
0
 def compute_instance_id(self):
     """
     Look up the EC2 instance ID for this node.
     """
     return get_instance_metadata()['instance-id'].decode("ascii")
Esempio n. 50
0
def get_my_ip():
    return get_instance_metadata()[u'local-ipv4']
Esempio n. 51
0
 def _get_local_instance_id(self):
     return utils.get_instance_metadata()['instance-id']
Esempio n. 52
0
def get_my_hostname():
    return get_instance_metadata()[u'local-hostname'].split('.')[1]
                  dest="IP_TYPE",
                  help="IP type (e.g. public or local)",
                  action="store")
(options, args) = parser.parse_args()

logger.info(
    "--hosted-zone %s --dns-name %s --dns-type %s --ip-type %s starting..." %
    (options.HOSTED_ZONE, options.RECORD_SET_VALUE, options.RECORD_SET_TYPE,
     options.IP_TYPE))

## Retrieve meta data
if options.RECORD_SET_TYPE == 'CNAME':
    metadata_data = 'meta-data/' + options.IP_TYPE + '-hostname/'
    metadata = get_instance_metadata(version='latest',
                                     url='http://169.254.169.254',
                                     data=metadata_data,
                                     timeout=None,
                                     num_retries=1)

elif options.RECORD_SET_TYPE == 'A':
    metadata_data = 'meta-data/' + options.IP_TYPE + '-ipv4/'
    metadata = get_instance_metadata(version='latest',
                                     url='http://169.254.169.254',
                                     data=metadata_data,
                                     timeout=None,
                                     num_retries=1)

new_recordset_value = list(metadata.values())[0]
logger.info("Set value = %s" % (new_recordset_value))

response = client.change_resource_record_sets(
 def get_metadata(self):
     return get_instance_metadata(timeout=2, num_retries=1)
Esempio n. 55
0
        "Comma seperated list of service names that should be checked for migrations"
    )
    app_migration_args.add_argument(
        "--app-python",
        help="Path to python to use for executing migration check.")
    app_migration_args.add_argument(
        "--app-env", help="Location of the app environment file.")
    app_migration_args.add_argument("--app-code-dir",
                                    help="Location of the app code.")

    args = parser.parse_args()

    report = []
    prefix = None

    instance_id = get_instance_metadata()['instance-id']
    prefix = instance_id

    ec2 = boto.ec2.connect_to_region(REGION)
    reservations = ec2.get_all_instances(instance_ids=[instance_id])
    instance = reservations[0].instances[0]
    if instance.instance_profile['arn'].endswith('/abbey'):
        print("Running an abbey build. Not starting any services.")
        # Needs to exit with 1 instead of 0 to prevent
        # services from starting.
        exit(1)
    time_left = MAX_BACKOFF
    backoff = INITIAL_BACKOFF

    environment = None
    deployment = None
Esempio n. 56
0
def get_availability_zone():
    return get_instance_metadata().get('placement')['availability-zone']
Esempio n. 57
0
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for setting up an sqs queue subscribed to
an sns topic polling for messages pertaining to our
impending doom.

"""
import json

from boto.utils import get_instance_metadata
import boto.sns as sns
import boto.sqs as sqs

from shudder.config import CONFIG

INSTANCE_ID = get_instance_metadata()['instance-id']
QUEUE_NAME = "{prefix}-{id}".format(prefix=CONFIG['sqs_prefix'],
                                    id=INSTANCE_ID)


def create_queue():
    """Creates the SQS queue and returns the connection/queue"""
    conn = sqs.connect_to_region(CONFIG['region'])
    queue = conn.create_queue(QUEUE_NAME)
    queue.set_timeout(60 * 60)  # one hour
    return conn, queue


def subscribe_sns(queue):
    """Subscribes the SNS topic to the queue."""
    conn = sns.connect_to_region(CONFIG['region'])
Esempio n. 58
0
#!/usr/bin/env python
from boto import ec2
from boto import utils
from datetime import datetime, timedelta

region = utils.get_instance_metadata()['placement']['availability-zone'][:-1]
conn = ec2.connect_to_region(region)
instance_id = utils.get_instance_metadata()['instance-id']


def get_volumes():
    volumes = []
    volumes = [
        v for v in conn.get_all_volumes()
        if v.attach_data.instance_id == instance_id
    ]
    return volumes


def main():
    volumes = get_volumes()
    for volume in volumes:
        description = "%s - %s" % (volume.attach_data.device,
                                   datetime.utcnow())
        if not volume.tags.get('Name'):
            print "Volume sem tag, adicionando..."
            volume.add_tag('Name', instance_id)

        snap = conn.create_snapshot(volume.id, description=description)
        print "Snapshot - %s " % snap.id
Esempio n. 59
0
def get_instance_metadata(api_version, metadata_address):
    metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
    if not isinstance(metadata, (dict)):
        metadata = {}
    return _unlazy_dict(metadata)