Exemple #1
0
    def __init__(self, required=False):
        helpstr = '''attributes of a tag to affect.  Tags follow the following
        format: "id=resource-name, t=resource-type, k=tag-key, v=tag-val,
        p=propagate-at-launch-flag", where k is the tag's name, v is the tag's
        value, id is a resource ID, t is a resource type, and p is whether to
        propagate tags to instances created by the group.  A value for 'k=' is
        required for each tag.  The rest are optional.  This argument may be
        used more than once.  Each time affects a different tag.'''

        if required:
            helpstr += '  (at least 1 required)'

        Arg.__init__(self, '--tag', dest='Tags.member', required=required,
                     action='append', type=autoscaling_tag_def, help=helpstr,
                     metavar=('"k=VALUE, id=VALUE, t=VALUE, v=VALUE, '
                              'p={true,false}"'))
class DeleteDiskImage(EC2Request, S3AccessMixin):
    DESCRIPTION = 'Delete a disk image used for an import task'
    ARGS = [
        MutuallyExclusiveArgList(
            Arg('-t', '--task',
                help='ID of the task to delete the image from'),
            Arg('-u', '--manifest-url',
                help='location of the import manifest')).required(),
        Arg('--ignore-active-task',
            action='store_true',
            help='''delete the image even if the import task is active
                (only works with -t/--task)''')
    ]

    def configure(self):
        EC2Request.configure(self)
        self.configure_s3_access()
        if self.args.get('ignore_active_task') and not self.args.get('task'):
            raise ArgumentError('argument --ignore-active-task my only be '
                                'used with -t/--task')

    def main(self):
        if self.args.get('manifest_url'):
            manifest_url = self.args['manifest_url']
        if self.args.get('task'):
            desc_conv = DescribeConversionTasks.from_other(
                self, ConversionTaskId=[self.args['task']])
            task = desc_conv.main()['conversionTasks'][0]
            assert task['conversionTaskId'] == self.args['task']
            if task.get('importVolume'):
                vol_container = task['importVolume']
            else:
                vol_container = task['importInstance']['volumes'][0]
            manifest_url = vol_container['image']['importManifestUrl']
        _, bucket, key = self.args['s3_service'].resolve_url_to_location(
            manifest_url)
        manifest_s3path = '/'.join((bucket, key))
        manifest = self.__download_manifest(manifest_s3path)

        for part in manifest.image_parts:
            delete_req = DeleteObject.from_other(
                self,
                service=self.args['s3_service'],
                auth=self.args['s3_auth'],
                path='/'.join((bucket, part.key)))
            delete_req.main()
        delete_req = DeleteObject.from_other(self,
                                             service=self.args['s3_service'],
                                             auth=self.args['s3_auth'],
                                             path=manifest_s3path)
        delete_req.main()

    def __download_manifest(self, s3path):
        with tempfile.SpooledTemporaryFile(max_size=1024000) as \
                manifest_destfile:
            get_req = GetObject.from_other(self,
                                           service=self.args['s3_service'],
                                           auth=self.args['s3_auth'],
                                           source=s3path,
                                           dest=manifest_destfile,
                                           show_progress=False)
            try:
                get_req.main()
            except AWSError as err:
                if err.status_code == 404:
                    raise ArgumentError(
                        'import manifest "{0}" does not exist'.format(s3path))
                raise
            manifest_destfile.seek(0)
            return ImportManifest.read_from_fileobj(manifest_destfile)
Exemple #3
0
class PostObject(S3Request):
    DESCRIPTION = ('Upload an object to the server using an upload policy\n\n'
                   'Note that uploading a large file to a region other than '
                   'the one the bucket is may result in "Broken pipe" errors '
                   'or other connection problems that this program cannot '
                   'detect.')
    AUTH_CLASS = None
    ARGS = [
        Arg('source',
            metavar='FILE',
            route_to=None,
            help='file to upload (required)'),
        Arg('dest',
            metavar='BUCKET/KEY',
            route_to=None,
            help='bucket and key name to upload the object to (required)'),
        MutuallyExclusiveArgList(
            Arg('--policy',
                dest='Policy',
                metavar='POLICY',
                type=base64.b64encode,
                help='upload policy to use for authorization'),
            Arg('--policy-file',
                dest='Policy',
                metavar='FILE',
                type=b64encoded_file_contents,
                help='''file containing the
                    upload policy to use for authorization''')).required(),
        Arg('--policy-signature',
            dest='Signature',
            required=True,
            help='signature for the upload policy (required)'),
        Arg('-I',
            '--access-key-id',
            dest='AWSAccessKeyId',
            required=True,
            metavar='KEY_ID',
            help='''ID of the access key that signed the
                upload policy (required)'''),
        # --security-token is an extension meant for eucalyptus's back end
        # to use for BundleInstance operations started by the web console.
        # https://eucalyptus.atlassian.net/browse/EUCA-9911
        # https://eucalyptus.atlassian.net/browse/TOOLS-511
        Arg('--security-token',
            dest='x-amz-security-token',
            default=argparse.SUPPRESS,
            help=argparse.SUPPRESS),
        Arg('--acl',
            default=argparse.SUPPRESS,
            choices=('private', 'public-read', 'public-read-write',
                     'authenticated-read', 'bucket-owner-read',
                     'bucket-owner-full-control', 'aws-exec-read',
                     'ec2-bundle-read'),
            help='''the ACL the object should have
                once uploaded.  Take care to ensure this satisfies any
                restrictions the upload policy may contain.'''),
        Arg('--mime-type',
            dest='Content-Type',
            default=argparse.SUPPRESS,
            help='MIME type for the file being uploaded')
    ]
    METHOD = 'POST'

    # noinspection PyExceptionInherit
    def configure(self):
        S3Request.configure(self)

        if self.args['source'] == '-':
            self.files['file'] = sys.stdin
        elif isinstance(self.args['source'], basestring):
            self.files['file'] = open(self.args['source'])
        else:
            self.files['file'] = self.args['source']
        bucket, _, key = self.args['dest'].partition('/')
        if not bucket:
            raise ArgumentError('destination bucket name must be non-empty')
        if not key:
            raise ArgumentError('destination key name must be non-empty')

    # noinspection PyExceptionInherit
    def preprocess(self):
        # FIXME:  This should really stream the contents of the source rather
        # than reading it all into memory at once, but at the moment doing so
        # would require me to write a multipart MIME encoder that supports
        # both streaming and file rewinding.  Patches that do that are very
        # welcome.
        #
        # FIXME:  While you're in there, would you mind adding progress bar
        # support?  8^)
        # pylint: disable=access-member-before-definition
        self.path, _, self.params['key'] = self.args['dest'].partition('/')
        self.body = self.params
        # pylint: enable=access-member-before-definition
        self.params = None
class ListUserPolicies(IAMRequest):
    DESCRIPTION = 'List one or all policies attached to a user'
    ARGS = [
        arg_user(help='user owning the policies to list (required)'),
        Arg('-p',
            '--policy-name',
            metavar='POLICY',
            route_to=None,
            help='display a specific policy'),
        Arg('-v',
            '--verbose',
            action='store_true',
            route_to=None,
            help='''display the contents of the resulting policies (in
                        addition to their names)'''),
        Arg('--pretty-print',
            action='store_true',
            route_to=None,
            help='''when printing the contents of policies, reformat them
                        for easier reading'''), AS_ACCOUNT
    ]
    LIST_TAGS = ['PolicyNames']

    def main(self):
        return PaginatedResponse(self, (None, ), ('PolicyNames', ))

    def prepare_for_page(self, page):
        # Pages are defined by markers
        self.params['Marker'] = page

    # pylint: disable=no-self-use
    def get_next_page(self, response):
        if response.get('IsTruncated') == 'true':
            return response['Marker']

    # pylint: enable=no-self-use

    def print_result(self, result):
        if self.args.get('policy_name'):
            # Look for the specific policy the user asked for
            for policy_name in result.get('PolicyNames', []):
                if policy_name == self.args['policy_name']:
                    if self.args['verbose']:
                        self.print_policy(policy_name)
                    else:
                        print policy_name
                    break
        else:
            for policy_name in result.get('PolicyNames', []):
                print policy_name
                if self.args['verbose']:
                    self.print_policy(policy_name)
        # We already take care of pagination
        print 'IsTruncated: false'

    def print_policy(self, policy_name):
        req = GetUserPolicy.from_other(
            self,
            UserName=self.args['UserName'],
            PolicyName=policy_name,
            pretty_print=self.args['pretty_print'],
            DelegateAccount=self.params.get('DelegateAccount'))
        response = req.main()
        req.print_result(response)
Exemple #5
0
class AssumeRole(STSRequest):
    DESCRIPTION = '''\
        Assume an IAM role

        The %(prog)s utility obtains credentials for an IAM role and
        outputs them in the form of shellcode that sets environment
        variables that allow euca2ools commands to use them.  Use it
        inside an eval command to make this process seamless:

            $ eval `%(prog)s myrole`

        To stop using the role, use euare-releaserole(1).'''
    ARGS = [Arg('rolename', metavar='ROLE', route_to=None,
                help='the role to assume'),
            Arg('-d', '--duration', dest='DurationSeconds', metavar='SECONDS',
                type=int, default=900, help='''number of seconds the
                credentials should be valid for (900-3600) (default: 900)'''),
            Arg('--session-name', dest='RoleSessionName', metavar='PATH',
                help='''role session identifier to include in the
                assumed role user ID (default: automatic)'''),
            MutuallyExclusiveArgList(
                Arg('-c', dest='csh_output', route_to=None,
                    action='store_true', help='''generate C-shell commands on
                    stdout (default if SHELL looks like a csh-style shell'''),
                Arg('-s', dest='sh_output', route_to=None,
                    action='store_true', help='''generate Bourne shell
                    commands on stdout (default if SHELL does not look
                    like a csh-style shell''')),
            MutuallyExclusiveArgList(
                Arg('--policy-content', dest='Policy',
                    metavar='POLICY_CONTENT', help='''an IAM policy
                    further restricting what the credentials will be
                    allowed to do.  This cannot grant additional
                    permissions.'''),
                Arg('--policy-document', dest='Policy',
                    metavar='FILE', type=open, help='''file containing
                    an IAM policy further restricting what the
                    credentials will be allowed to do.  This cannot
                    grant additional permissions.''')),
            Arg('--external-id', dest='ExternalId', metavar='STR',
                help='external ID to use for comparison with policies'),
            Arg('--mfa-serial', dest='SerialNumber', metavar='MFA',
                help='MFA token serial number'),
            Arg('--mfa-code', dest='TokenCode', metavar='CODE',
                help='MFA token code')]

    def preprocess(self):
        self.params['RoleArn'] = self.__build_role_arn(
            self.args.get('rolename'))
        if not self.params.get('RoleSessionName'):
            session = datetime.datetime.utcnow().strftime(
                'euca2ools-%Y-%m-%dT%H:%M:%SZ')
            self.params['RoleSessionName'] = session

    def __build_role_arn(self, arn):
        """
        Build an ARN for a role from the fragment that was supplied at
        the command line.
        """
        if arn.count(':') == 1 and '/' not in arn:
            # Special case syntactic sugar
            arn = '{0}:role/{1}'.format(*arn.split(':'))
        if arn.count(':') == 0:
            # S3Access
            if not arn.startswith('role/'):
                arn = 'role/' + arn
            # role/A3Access
            arn = '{0}:{1}'.format(self.__get_account_id(), arn)
        if arn.count(':') == 1:
            # 123456789012:role/S3Access
            arn = ':' + arn
        if arn.count(':') == 2:
            # :123456789012:role/S3Access
            arn = 'iam:' + arn
        if arn.count(':') == 3:
            # iam::123456789012:role/S3Access
            arn = 'aws:' + arn
        if arn.count(':') == 4:
            # aws:iam::123456789012:role/S3Access
            arn = 'arn:' + arn
        # Shound be arn:aws:iam::123456789012:role/S3Access at this point
        return arn

    def __get_account_id(self):
        account_id = self.config.get_user_option('account-id')
        if not account_id:
            account_id = os.getenv('EC2_USER_ID')
        if not account_id:
            raise ArgumentError(
                'failed to determine account ID; set account-id for '
                'the user in configuration or EC2_USER_ID in the '
                'environment')
        return account_id

    def print_result(self, result):
        creds = result['Credentials']
        # If this list changes please go update ReleaseRole.
        self.__print_var('AWS_ACCESS_KEY_ID', creds['AccessKeyId'])
        self.__print_var('AWS_ACCESS_KEY', creds['AccessKeyId'])
        self.__print_var('EC2_ACCESS_KEY', creds['AccessKeyId'])
        self.__print_var('AWS_SECRET_ACCESS_KEY', creds['SecretAccessKey'])
        self.__print_var('AWS_SECRET_KEY', creds['SecretAccessKey'])
        self.__print_var('EC2_SECRET_KEY', creds['SecretAccessKey'])
        self.__print_var('AWS_SESSION_TOKEN', creds['SessionToken'])
        self.__print_var('AWS_SECURITY_TOKEN', creds['SessionToken'])
        self.__print_var('AWS_CREDENTIAL_EXPIRATION', creds['Expiration'])
        self.__print_var('EC2_USER_ID', self.params['RoleArn'].split(':')[4])
        # Unset AWS_CREDENTIAL_FILE to avoid accidentally using its creds
        self.__print_var('AWS_CREDENTIAL_FILE', None)
        print
        print '# If you can read this, rerun this program with eval:'
        print '#     eval `{0}`'.format(
            ' '.join(pipes.quote(arg) for arg in sys.argv))

    def __print_var(self, key, val):
        if (self.args.get('csh_output') or
                (not self.args.get('sh_output') and
                 os.getenv('SHELL', '').endswith('csh'))):
            if val:
                fmt = 'setenv {key} {val};'
            else:
                fmt = 'unsetenv {key};'
        else:
            if val:
                fmt = '{key}={val}; export {key};'
            else:
                fmt = 'unset {key};'
        print fmt.format(key=key, val=val)
Exemple #6
0
class BundleDownloadingMixin(object):
    # When fetching the manifest from the server there are two ways to get
    # its path:
    #  -m:  BUCKET[/PREFIX]/MANIFEST
    #  -p:  BUCKET[/PREFIX]/PREFIX.manifest.xml  (the PREFIXes are different)
    #
    # In all cases, after we obtain the manifest (whether it is local or not)
    # we choose key names for parts based on the file names in the manifest:
    #  BUCKET[/PREFIX]/PART

    ARGS = [
        Arg('-b',
            '--bucket',
            metavar='BUCKET[/PREFIX]',
            required=True,
            route_to=None,
            help='''the bucket that contains the bundle,
                with an optional path prefix (required)'''),
        MutuallyExclusiveArgList(
            Arg('-m',
                '--manifest',
                dest='manifest',
                route_to=None,
                help='''the manifest's complete file name, not including
                    any path that may be specified using -b'''),
            Arg('-p',
                '--prefix',
                dest='manifest',
                route_to=None,
                type=(lambda x: x + '.manifest.xml'),
                help='''the portion of the manifest's file name that
                    precedes ".manifest.xml"'''),
            Arg('--local-manifest',
                dest='local_manifest',
                metavar='FILE',
                route_to=None,
                help='''use a manifest on disk and ignore
                    any that appear on the server''')).required()
    ]

    def fetch_manifest(self, s3_service, privkey_filename=None):
        if self.args.get('local_manifest'):
            _assert_is_file(self.args['local_manifest'], 'manifest')
            return euca2ools.bundle.manifest.BundleManifest.read_from_file(
                self.args['local_manifest'], privkey_filename=privkey_filename)

        # It's on the server, so do things the hard way
        manifest_s3path = self.get_manifest_s3path()
        with tempfile.TemporaryFile() as manifest_tempfile:
            self.log.info('reading manifest from %s', manifest_s3path)
            req = GetObject.from_other(self,
                                       service=s3_service,
                                       source=manifest_s3path,
                                       dest=manifest_tempfile)
            try:
                req.main()
            except AWSError as err:
                if err.status_code == 404:
                    self.log.debug('failed to fetch manifest', exc_info=True)
                    raise ValueError("manifest '{0}' does not exist on the "
                                     "server".format(manifest_s3path))
                raise
            manifest_tempfile.flush()
            manifest_tempfile.seek(0)
            return euca2ools.bundle.manifest.BundleManifest.read_from_fileobj(
                manifest_tempfile, privkey_filename=privkey_filename)

    def get_manifest_s3path(self):
        if self.args.get('manifest'):
            return '/'.join((self.args['bucket'], self.args['manifest']))
        else:
            # With a local manifest we can't divine the manifest's key name is
            return None

    def download_bundle_to_dir(self, manifest, dest_dir, s3_service):
        parts = self.map_bundle_parts_to_s3paths(manifest)
        for part, part_s3path in parts:
            part.filename = os.path.join(dest_dir,
                                         os.path.basename(part_s3path))
            self.log.info('downloading part %s to %s', part_s3path,
                          part.filename)
            req = GetObject.from_other(self,
                                       service=s3_service,
                                       source=part_s3path,
                                       dest=part.filename,
                                       show_progress=self.args.get(
                                           'show_progress', False))
            response = req.main()
            self.__check_part_sha1(part, part_s3path, response)

        manifest_s3path = self.get_manifest_s3path()
        if manifest_s3path:
            # Can't download a manifest if we're using a local one
            manifest_dest = os.path.join(dest_dir,
                                         os.path.basename(manifest_s3path))
            self.log.info('downloading manifest %s to %s', manifest_s3path,
                          manifest_dest)
            req = GetObject.from_other(self,
                                       service=s3_service,
                                       source=manifest_s3path,
                                       dest=manifest_dest,
                                       show_progress=self.args.get(
                                           'show_progress', False))
            req.main()
            return manifest_dest
        return None

    def download_bundle_to_fileobj(self, manifest, fileobj, s3_service):
        # We can skip downloading the manifest since we're just writing all
        # parts to a file object.
        parts = self.map_bundle_parts_to_s3paths(manifest)
        for part, part_s3path in parts:
            self.log.info('downloading part %s', part_s3path)
            req = GetObject.from_other(self,
                                       service=s3_service,
                                       source=part_s3path,
                                       dest=fileobj,
                                       show_progress=self.args.get(
                                           'show_progress', False))
            response = req.main()
            self.__check_part_sha1(part, part_s3path, response)

    def map_bundle_parts_to_s3paths(self, manifest):
        parts = []
        for part in manifest.image_parts:
            parts.append((part, '/'.join(
                (self.args['bucket'], part.filename))))
        return parts

    def __check_part_sha1(self, part, part_s3path, response):
        if response[part_s3path]['sha1'] != part.hexdigest:
            self.log.error(
                'rejecting download due to manifest SHA1 '
                'mismatch (expected: %s, actual: %s)', part.hexdigest,
                response[part_s3path]['sha1'])
            raise RuntimeError('downloaded file {0} appears to be corrupt '
                               '(expected SHA1: {0}, actual: {1}'.format(
                                   part.hexdigest,
                                   response[part_s3path]['sha1']))
Exemple #7
0
class ImportVolume(EC2Request, S3AccessMixin, FileTransferProgressBarMixin):
    DESCRIPTION = 'Import a file to a volume in the cloud'
    ARGS = [
        Arg('source',
            metavar='FILE',
            route_to=None,
            help='file containing the disk image to import (required)'),
        Arg('-f',
            '--format',
            dest='Image.Format',
            metavar='FORMAT',
            required=True,
            help='''the image's format ("vmdk", "raw", or
                "vhd") (required)'''),
        Arg('-z',
            '--availability-zone',
            dest='AvailabilityZone',
            metavar='ZONE',
            required=True,
            help='the zone in which to create the volume (required)'),
        Arg('-s',
            '--volume-size',
            metavar='GiB',
            dest='Volume.Size',
            type=int,
            help='size of the volume to import to, in GiB'),
        Arg('--image-size',
            dest='Image.Bytes',
            metavar='BYTES',
            type=filesize,
            help='size of the image (required for non-raw files'),
        MutuallyExclusiveArgList(
            Arg('-b',
                '--bucket',
                route_to=None,
                help='the bucket to upload the volume to'),
            Arg('--manifest-url',
                dest='Image.ImportManifestUrl',
                metavar='URL',
                help='''a pre-signed URL that points to
                    the import manifest to use''')).required(),
        Arg('--prefix',
            route_to=None,
            help='''a prefix to add to the
                names of the volume parts as they are uploaded'''),
        Arg('-x',
            '--expires',
            metavar='DAYS',
            type=int,
            default=30,
            route_to=None,
            help='''how long the import manifest should
                remain valid, in days (default: 30 days)'''),
        Arg('--no-upload',
            action='store_true',
            route_to=None,
            help='''start the import process, but do not actually upload
                the volume (see euca-resume-import)'''),
        Arg('-d',
            '--description',
            dest='Description',
            help='a description for the import task (not the volume)'),
        # This is not yet implemented
        Arg('--ignore-region-affinity',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS),
        # This does no validation, but it does prevent taking action
        Arg('--dry-run',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS),
        # This is not yet implemented
        Arg('--dont-verify-format',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS)
    ]

    def configure(self):
        EC2Request.configure(self)
        self.configure_s3_access()

        if self.params['Image.Format'].upper() in ('VMDK', 'VHD', 'RAW'):
            self.params['Image.Format'] = self.params['Image.Format'].upper()
        if not self.params.get('Image.Bytes'):
            if self.params['Image.Format'] == 'RAW':
                image_size = euca2ools.util.get_filesize(self.args['source'])
                self.params['Image.Bytes'] = image_size
            else:
                raise ArgumentError(
                    'argument --image-size is required for {0} files'.format(
                        self.params['Image.Format']))
        if not self.params.get('Volume.Size'):
            vol_size = math.ceil(self.params['Image.Bytes'] / 2**30)
            self.params['Volume.Size'] = int(vol_size)

        if not self.args.get('expires'):
            self.args['expires'] = 30
        if self.args['expires'] < 1:
            raise ArgumentError(
                'argument -x/--expires: value must be positive')

    def main(self):
        if self.args.get('dry_run'):
            return

        if not self.args.get('Image.ImportManifestUrl'):
            manifest_key = '{0}/{1}.manifest.xml'.format(
                uuid.uuid4(), self.args['source'])
            if self.args.get('prefix'):
                manifest_key = '/'.join((self.args['prefix'], manifest_key))
            getobj = GetObject.from_other(self,
                                          service=self.args['s3_service'],
                                          auth=self.args['s3_auth'],
                                          source='/'.join((self.args['bucket'],
                                                           manifest_key)))
            days = self.args.get('expires') or 30
            expiration = datetime.datetime.utcnow() + datetime.timedelta(days)
            get_url = getobj.get_presigned_url(expiration)
            self.log.info('generated manifest GET URL: %s', get_url)
            self.params['Image.ImportManifestUrl'] = get_url

        result = self.send()

        # The manifest creation and uploading parts are done by ResumeImport.
        if not self.args.get('no_upload'):
            resume = ResumeImport.from_other(
                self,
                source=self.args['source'],
                task=result['conversionTask']['conversionTaskId'],
                s3_service=self.args['s3_service'],
                s3_auth=self.args['s3_auth'],
                expires=self.args['expires'],
                show_progress=self.args.get('show_progress', False))
            resume.main()

        return result

    def print_result(self, result):
        self.print_conversion_task(result['conversionTask'])
class CreateLoadBalancerPolicy(ELBRequest):
    DESCRIPTION = 'Add a new policy to a load balancer'
    ARGS = [
        Arg('LoadBalancerName',
            metavar='ELB',
            help='name of the load balancer to modify (required)'),
        Arg('-n',
            '--policy-name',
            dest='PolicyName',
            metavar='POLICY',
            required=True,
            help='name of the new policy (required)'),
        Arg('-t',
            '--policy-type',
            dest='PolicyTypeName',
            metavar='POLTYPE',
            required=True,
            help='''type of the new policy.  For a list of policy types,
                use eulb-describe-lb-policy-types.  (required)'''),
        Arg('-a',
            '--attribute',
            dest='PolicyAttributes.member',
            action='append',
            metavar='"name=NAME, value=VALUE"',
            type=attribute,
            help='''name and value for each attribute
                associated with the new policy.  Use this option multiple times
                to supply multiple attributes.'''),
        Arg('-A',
            '--attributes',
            dest='new_attr_lists',
            route_to=None,
            metavar='NAME=VALUE,...',
            action='append',
            type=delimited_list(',', item_type=key_value_attribute),
            help='''a comma-delimited list of attribute names and values
                to associate with the new policy, each pair of which is
                separated by "=".  This is a more concise alternative to the
                -a/--attribute option.'''),
        Arg('--attributes-from-file',
            dest='attr_filename',
            metavar='FILE',
            route_to=None,
            help='''a file containing
                attribute names and values to associate with the new
                policy, one per line, each pair of which is separated by
                "=".  Lines that are blank or begin with "#" are ignored.''')
    ]

    def preprocess(self):
        if not self.params.get('PolicyAttributes.member'):
            self.params['PolicyAttributes.member'] = []
        for attr_list in self.args.get('new_attr_lists') or []:
            self.params['PolicyAttributes.member'].extend(attr_list or [])
        if self.args.get('attr_filename'):
            if self.args['attr_filename'] == '-':
                attr_file = sys.stdin
            else:
                attr_file = open(self.args['attr_filename'])
            with attr_file:
                for line_no, line in enumerate(attr_file, 1):
                    if line.strip() and not line.startswith('#'):
                        try:
                            self.params['PolicyAttributes.member'].append(
                                key_value_attribute(line.strip()))
                        except ArgumentError as err:
                            raise ValueError(
                                'error on {0} line {1}: {2}'.format(
                                    self.args['attr_filename'], line_no,
                                    err.args[0]))
Exemple #9
0
class Unbundle(BaseCommand, FileTransferProgressBarMixin):
    DESCRIPTION = ('Recreate an image from its bundled parts\n\nThe key used '
                   'to unbundle the image must match the certificate that was '
                   'used to bundle it.')
    SUITE = Euca2ools
    ARGS = [
        Arg('-m',
            '--manifest',
            metavar='FILE',
            required=True,
            help="the bundle's manifest file (required)"),
        Arg('-k',
            '--privatekey',
            metavar='FILE',
            help='''file containing the private key to decrypt the bundle
                with.  This must match the certificate used when bundling the
                image.'''),
        Arg('-d',
            '--destination',
            metavar='DIR',
            default='.',
            help='''where to place the unbundled image (default: current
                directory)'''),
        Arg('-s',
            '--source',
            metavar='DIR',
            default='.',
            help='''directory containing the bundled image parts (default:
                current directory)'''),
        Arg('--region',
            dest='userregion',
            metavar='USER@REGION',
            help='''use encryption keys specified for a user and/or region
                in configuration files''')
    ]

    # noinspection PyExceptionInherit
    def configure(self):
        BaseCommand.configure(self)
        set_userregion(self.config, self.args.get('userregion'))
        set_userregion(self.config, os.getenv('EUCA_REGION'))

        if not self.args.get('privatekey'):
            config_privatekey = self.config.get_user_option('private-key')
            if self.args.get('userregion'):
                self.args['privatekey'] = config_privatekey
            elif 'EC2_PRIVATE_KEY' in os.environ:
                self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY')
            elif config_privatekey:
                self.args['privatekey'] = config_privatekey
            else:
                raise ArgumentError(
                    'missing private key; please supply one with -k')
        self.args['privatekey'] = os.path.expanduser(
            os.path.expandvars(self.args['privatekey']))
        if not os.path.exists(self.args['privatekey']):
            raise ArgumentError("private key file '{0}' does not exist".format(
                self.args['privatekey']))
        if not os.path.isfile(self.args['privatekey']):
            raise ArgumentError("private key file '{0}' is not a file".format(
                self.args['privatekey']))

    def main(self):
        bundle = Bundle.create_from_manifest(
            self.args['manifest'],
            partdir=self.args['source'],
            privkey_filename=self.args['privatekey'])
        pbar = self.get_progressbar(maxval=bundle.bundled_size)
        return bundle.extract_image(self.args['destination'], progressbar=pbar)

    def print_result(self, result):
        print 'Wrote', result
Exemple #10
0
class DescribeAutoScalingGroups(AutoScalingRequest, TabifyingMixin):
    DESCRIPTION = 'Describe auto-scaling groups'
    ARGS = [
        Arg('AutoScalingGroupNames.member',
            metavar='ASGROUP',
            nargs='*',
            help='limit results to specific auto-scaling groups'),
        Arg('--show-long',
            action='store_true',
            route_to=None,
            help="show all of the groups' info")
    ]
    LIST_TAGS = [
        'AutoScalingGroups', 'AvailabilityZones', 'EnabledMetrics',
        'Instances', 'LoadBalancerNames', 'SuspendedProcesses', 'Tags',
        'TerminationPolicies'
    ]

    def main(self):
        return PaginatedResponse(self, (None, ), ('AutoScalingGroups', ))

    def prepare_for_page(self, page):
        # Pages are defined by NextToken
        self.params['NextToken'] = page

    def get_next_page(self, response):
        return response.get('NextToken') or None

    def print_result(self, result):
        lines = []
        for group in result.get('AutoScalingGroups', []):
            bits = [
                'AUTO-SCALING-GROUP',
                group.get('AutoScalingGroupName'),
                group.get('LaunchConfigurationName'),
                ','.join(group.get('AvailabilityZones'))
            ]
            if self.args['show_long']:
                bits.append(group.get('CreatedTime'))
            balancers = group.get('LoadBalancerNames')
            if balancers:
                bits.append(','.join(balancers))
            else:
                bits.append(None)
            if self.args['show_long']:
                bits.append(group.get('HealthCheckType'))
            bits.append(group.get('MinSize'))
            bits.append(group.get('MaxSize'))
            bits.append(group.get('DesiredCapacity'))
            if self.args['show_long']:
                bits.append(group.get('DefaultCooldown'))
                bits.append(group.get('HealthCheckGracePeriod'))
                bits.append(group.get('VPCZoneIdentifier'))
                bits.append(group.get('PlacementGroup'))
                bits.append(group.get('AutoScalingGroupARN'))
            policies = group.get('TerminationPolicies')
            if policies:
                bits.append(','.join(policies))
            else:
                bits.append(None)
            lines.append(self.tabify(bits))
            for instance in group.get('Instances', []):
                lines.append(self._get_tabified_instance(instance))
            scale_group = group.get('AutoScalingGroupName')
            for process in group.get('SuspendedProcesses', []):
                lines.append(
                    self._get_tabified_suspended_process(process, scale_group))
            for metric in group.get('EnabledMetrics', []):
                lines.append(self._get_tabified_metric(metric))
            for tag in group.get('Tags') or []:
                lines.append(
                    self.tabify([
                        'TAG',
                        tag.get('ResourceType'),
                        tag.get('ResourceId'),
                        tag.get('Key'),
                        tag.get('Value'),
                        tag.get('PropagateAtLaunch')
                    ]))
        for line in lines:
            print line

    def _get_tabified_instance(self, instance):
        return self.tabify([
            'INSTANCE',
            instance.get('InstanceId'),
            instance.get('AvailabilityZone'),
            instance.get('LifecycleState'),
            instance.get('HealthStatus'),
            instance.get('LaunchConfigurationName')
        ])

    def _get_tabified_suspended_process(self, process, scale_group):
        return self.tabify([
            'SUSPENDED-PROCESS',
            process.get('ProcessName'),
            process.get('SuspensionReason'), scale_group
        ])

    def _get_tabified_metric(self, metric):
        return self.tabify([
            'ENABLED-METRICS',
            metric.get('Metric'),
            metric.get('Granularity')
        ])
Exemple #11
0
class ModifySecurityGroupRequest(EucalyptusRequest):
    """
    The basis for security group-editing commands
    """

    ARGS = [
        Arg('group',
            metavar='GROUP',
            route_to=None,
            help='name or ID of the security group to modify (required)'),
        Arg('--egress',
            action='store_true',
            route_to=None,
            help='''[VPC only] manage an egress rule, which controls
                traffic leaving the group'''),
        Arg('-P',
            '--protocol',
            dest='IpPermissions.1.IpProtocol',
            choices=['tcp', 'udp', 'icmp', '6', '17', '1'],
            default='tcp',
            help='protocol to affect (default: tcp)'),
        Arg('-p',
            '--port-range',
            dest='port_range',
            metavar='RANGE',
            route_to=None,
            help='''range of ports (specified as "from-to")
                or a single port number (required for tcp and udp)'''),
        # ^ required for tcp and udp
        Arg('-t',
            '--icmp-type-code',
            dest='icmp_type_code',
            metavar='TYPE:CODE',
            route_to=None,
            help='''ICMP type and code (specified as "type:code") (required
                for icmp)'''),
        # ^ required for icmp
        MutuallyExclusiveArgList(
            Arg('-s',
                '--cidr',
                metavar='CIDR',
                dest='IpPermissions.1.IpRanges.1.CidrIp',
                help='''IP range (default: 0.0.0.0/0)'''),
            # ^ default is added by main()
            Arg('-o',
                dest='target_group',
                metavar='GROUP',
                route_to=None,
                help='''[Non-VPC only] name of a security group with which
                    to affect network communication''')),
        Arg('-u',
            metavar='ACCOUNT',
            dest='IpPermissions.1.Groups.1.UserId',
            help='''ID of the account that owns the security group
                specified with -o''')
    ]

    # noinspection PyExceptionInherit
    def configure(self):
        EucalyptusRequest.configure(self)

        if (self.args['group'].startswith('sg-')
                and len(self.args['group']) == 11):
            # The check could probably be a little better, but meh.  Fix if
            # needed.
            self.params['GroupId'] = self.args['group']
        else:
            if self.args['egress']:
                raise ArgumentError('egress rules must use group IDs, not '
                                    'names')
            self.params['GroupName'] = self.args['group']

        target_group = self.args.get('target_group')
        if (target_group is not None and target_group.startswith('sg-')
                and len(target_group) == 11):
            # Same note as above
            self.params['IpPermissions.1.Groups.1.GroupId'] = target_group
        else:
            if self.args['egress']:
                raise ArgumentError('argument -o: egress rules must use group '
                                    'IDs, not names')
            self.params['IpPermissions.1.Groups.1.GroupName'] = target_group

        protocol = self.args.get('IpPermissions.1.IpProtocol')
        if protocol in ['icmp', '1']:
            if self.args.get('port_range'):
                raise ArgumentError('argument -p/--port-range: not compatible '
                                    'with protocol ' + protocol)
            if not self.args.get('icmp_type_code'):
                self.args['icmp_type_code'] = '-1:-1'
            types = self.args['icmp_type_code'].split(':')
            if len(types) == 2:
                try:
                    from_port = int(types[0])
                    to_port = int(types[1])
                except ValueError:
                    raise ArgumentError('argument -t/--icmp-type-code: value '
                                        'must have format "1:2"')
            else:
                raise ArgumentError('argument -t/--icmp-type-code: value must '
                                    'have format "1:2"')
            if from_port < -1 or to_port < -1:
                raise ArgumentError('argument -t/--icmp-type-code: type, code '
                                    'must be at least -1')

        elif protocol in ['tcp', 'udp', '6', '17']:
            if self.args.get('icmp_type_code'):
                raise ArgumentError('argument -t/--icmp-type-code: not '
                                    'compatible with protocol ' + protocol)
            if not self.args.get('port_range'):
                raise ArgumentError('argument -p/--port-range is required for '
                                    'protocol ' + protocol)
            if ':' in self.args['port_range']:
                # Be extra helpful in the event of this common typo
                raise ArgumentError('argument -p/--port-range: multi-port '
                                    'range must be separated by "-", not ":"')
            from_port, to_port = _get_port_range(self.args['port_range'],
                                                 protocol)
            if from_port < -1 or to_port < -1:
                raise ArgumentError('argument -p/--port-range: port number(s) '
                                    'must be at least -1')
            if from_port == -1:
                from_port = 1
            if to_port == -1:
                to_port = 65535
        else:
            # Shouldn't get here since argparse should only allow the values we
            # handle
            raise ValueError('unrecognized protocol: "{0}"'.format(protocol))

        self.params['IpPermissions.1.FromPort'] = from_port
        self.params['IpPermissions.1.ToPort'] = to_port

        if (not self.args.get('IpPermissions.1.IpRanges.1.GroupName')
                and not self.args.get('IpPermissions.1.IpRanges.1.CidrIp')):
            # Default rule target is the entire Internet
            self.params['IpPermissions.1.IpRanges.1.CidrIp'] = '0.0.0.0/0'
        if (self.params.get('IpPermissions.1.Groups.1.GroupName')
                and not self.args.get('IpPermissions.1.Groups.1.UserId')):
            raise ArgumentError('argument -u is required when -o names a '
                                'security group by name')

    def print_result(self, result):
        print self.tabify(['GROUP', self.args.get('group')])
        perm_str = [
            'PERMISSION',
            self.args.get('group'), 'ALLOWS',
            self.params.get('IpPermissions.1.IpProtocol'),
            self.params.get('IpPermissions.1.FromPort'),
            self.params.get('IpPermissions.1.ToPort')
        ]
        if self.params.get('IpPermissions.1.Groups.1.UserId'):
            perm_str.append('USER')
            perm_str.append(self.params.get('IpPermissions.1.Groups.1.UserId'))
        if self.params.get('IpPermissions.1.Groups.1.GroupId'):
            perm_str.append('GRPID')
            perm_str.append(
                self.params.get('IpPermissions.1.Groups.1.GroupId'))
        elif self.params.get('IpPermissions.1.Groups.1.GroupName'):
            perm_str.append('GRPNAME')
            perm_str.append(
                self.params.get('IpPermissions.1.Groups.1.GroupName'))
        if self.params.get('IpPermissions.1.IpRanges.1.CidrIp'):
            perm_str.extend(['FROM', 'CIDR'])
            perm_str.append(
                self.params.get('IpPermissions.1.IpRanges.1.CidrIp'))
        print self.tabify(perm_str)

    def process_cli_args(self):
        # We need to parse out -t and -p *before* argparse can see it because
        # of Python bug 9334, which prevents argparse from recognizing '-1:-1'
        # as an option value and not a (nonexistent) option name.
        saved_sys_argv = list(sys.argv)

        def parse_neg_one_value(opt_name):
            if opt_name in sys.argv:
                index = sys.argv.index(opt_name)
                if (index < len(sys.argv) - 1
                        and sys.argv[index + 1].startswith('-1')):
                    opt_val = sys.argv[index + 1]
                    del sys.argv[index:index + 2]
                    return opt_val

        icmp_type_code = (parse_neg_one_value('-t')
                          or parse_neg_one_value('--icmp-type-code'))
        port_range = (parse_neg_one_value('-p')
                      or parse_neg_one_value('--port-range'))
        EucalyptusRequest.process_cli_args(self)
        if icmp_type_code:
            self.args['icmp_type_code'] = icmp_type_code
        if port_range:
            self.args['port_range'] = port_range
        sys.argv = saved_sys_argv
class ModifyInstanceAttribute(EC2Request):
    DESCRIPTION = 'Modify an attribute of an instance'
    ARGS = [
        Arg('InstanceId',
            metavar='INSTANCE',
            help='ID of the instance to modify (required)'),
        MutuallyExclusiveArgList(
            Arg('-b',
                '--block-device-mapping',
                dest='BlockDeviceMapping',
                action='append',
                metavar='DEVICE=::(true|false)',
                type=_min_ec2_block_device_mapping,
                default=[],
                help='''change whether a volume attached to the instance
                    will be deleted upon the instance's termination'''),
            Arg('--disable-api-termination',
                choices=('true', 'false'),
                dest='DisableApiTermination.Value',
                help='''change whether
                    or not the instance may be terminated'''),
            Arg('--ebs-optimized',
                dest='EbsOptimized.Value',
                choices=('true', 'false'),
                help='''change whether or not
                    the instance should be optimized for EBS I/O'''),
            Arg('-g',
                '--group-id',
                dest='GroupId',
                metavar='GROUP',
                action='append',
                default=[],
                help='''[VPC only] Change the
                    security group(s) the instance is in'''),
            Arg('--instance-initiated-shutdown-behavior',
                dest='InstanceInitiatedShutdownBehavior.Value',
                choices=('stop', 'terminate'),
                help='''whether to stop or
                    terminate the EBS instance when it shuts down
                    (instance-store instances are always terminated)'''),
            Arg('-t',
                '--instance-type',
                dest='InstanceType.Value',
                metavar='INSTANCETYPE',
                help="change the instance's type"),
            Arg('--kernel',
                dest='Kernel.Value',
                metavar='IMAGE',
                help="change the instance's kernel image"),
            Arg('--ramdisk',
                dest='Ramdisk.Value',
                metavar='IMAGE',
                help="change the instance's ramdisk image"),
            Arg('--source-dest-check',
                dest='SourceDestCheck.Value',
                choices=('true', 'false'),
                help='''change whether
                    source/destination address checking is enabled'''),
            Arg('--sriov',
                dest='SriovNetSupport.Value',
                metavar='simple',
                choices=('simple', ),
                help='''enable enhanced networking for
                    the instance and its descendants'''),
            Arg('--user-data',
                dest='UserData.Value',
                metavar='DATA',
                help='''change the instance's user data (must be
                    base64-encoded)'''),
            Arg('--user-data-file',
                dest='UserData.Value',
                metavar='FILE',
                type=b64encoded_file_contents,
                help='''change the
                    instance's user data to the contents of a file''')).
        required()
    ]
Exemple #13
0
class CreateVpnConnection(EC2Request):
    DESCRIPTION = ('Create a VPN connection between a virtual private '
                   'gateway and a customer gateway\n\nYou can optionally '
                   'format the connection information for specific '
                   'devices using the --format or --stylesheet options.  '
                   'If the --stylesheet option is an HTTP or HTTPS URL it '
                   'will be downloaded as needed.')
    ARGS = [
        Arg('-t',
            '--type',
            dest='Type',
            metavar='ipsec.1',
            required=True,
            choices=('ipsec.1', ),
            help='the type of VPN connection to use (required)'),
        Arg('--customer-gateway',
            dest='CustomerGatewayId',
            required=True,
            metavar='CGATEWAY',
            help='ID of the customer gateway to connect (required)'),
        Arg('--vpn-gateway',
            dest='VpnGatewayId',
            required=True,
            metavar='VGATEWAY',
            help='''ID of the virtual private gateway
                to connect (required)'''),
        Arg('--static-routes-only',
            dest='Options.StaticRoutesOnly',
            action='store_true',
            help='use only static routes instead of BGP'),
        Arg('--format',
            route_to=None,
            help='''show connection
                information in a specific format (cisco-ios-isr,
                juniper-junos-j, juniper-screenos-6.1, juniper-screenos-6.2,
                generic, xml, none) (default: xml)'''),
        Arg('--stylesheet',
            route_to=None,
            help='''format the connection
                information using an XSL stylesheet.  If the value contains
                "{format}" it will be replaced with the format chosen by the
                --format option.  If the value is an HTTP or HTTPS URL it
                will be downloaded as needed.  (default: value of
                "vpn-stylesheet" region option)''')
    ]

    def print_result(self, result):
        if self.args.get('format') is None:
            # If --stylesheet is used it will be applied.  Otherwise,
            # None will make it print the raw XML, which is what we want.
            stylesheet = self.args.get('stylesheet')
            show_conn_info = True
        elif self.args.get('format') == 'none':
            stylesheet = None
            show_conn_info = False
        elif self.args.get('format') == 'xml':
            stylesheet = None
            show_conn_info = True
        else:
            stylesheet = self.args.get('stylesheet')
            if not stylesheet:
                stylesheet = self.config.get_region_option('vpn-stylesheet')
            if stylesheet:
                stylesheet = stylesheet.format(format=self.args['format'])
            else:
                self.log.warn('current region has no stylesheet')
                msg = ('current region has no XSLT stylesheet to format '
                       'output; connection info will not be shown  (try '
                       'specifying one with "--stylesheet" or using '
                       '"--format xml")')
                six.print_(msg, file=sys.stderr)
            show_conn_info = bool(stylesheet)
        self.print_vpn_connection(result.get('vpnConnection') or {},
                                  show_conn_info=show_conn_info,
                                  stylesheet=stylesheet)
Exemple #14
0
class DeleteRole(IAMRequest):
    DESCRIPTION = 'Delete a role'
    ARGS = [arg_role(help='name of the role to delete (required)'),
            Arg('-c', '--recursive', action='store_true', route_to=None,
                help='''remove all IAM resources associated with the role
                first'''),
            Arg('-p', '--pretend', action='store_true', route_to=None,
                help='''list the resources that would be deleted instead of
                actually deleting them.  Implies -c.'''),
            AS_ACCOUNT]

    def main(self):
        if self.args.get('recursive') or self.args.get('pretend'):
            # Figure out what we have to delete
            req = ListInstanceProfilesForRole.from_other(
                self, RoleName=self.args['RoleName'],
                DelegateAccount=self.args.get('DelegateAccount'))
            response = req.main()
            instance_profiles = []
            for profile in response.get('InstanceProfiles') or []:
                instance_profiles.append(
                    {'arn': profile.get('Arn'),
                     'name': profile.get('InstanceProfileName')})

            req = ListRolePolicies.from_other(
                self, RoleName=self.args['RoleName'],
                DelegateAccount=self.args.get('DelegateAccount'))
            response = req.main()
            policies = []
            for policy in response.get('PolicyNames') or []:
                policies.append(policy)
        else:
            # Just in case
            instance_profiles = []
            policies = []
        if self.args.get('pretend'):
            return {'instance_profiles': instance_profiles,
                    'policies': policies}
        else:
            if self.args.get('recursive'):
                for profile in instance_profiles:
                    req = RemoveRoleFromInstanceProfile.from_other(
                        self, RoleName=self.args['RoleName'],
                        InstanceProfileName=profile['name'],
                        DelegateAccount=self.args.get('DelegateAccount'))
                    req.main()
                for policy in policies:
                    req = DeleteRolePolicy.from_other(
                        self, RoleName=self.args['RoleName'],
                        PolicyName=policy,
                        DelegateAccount=self.args.get('DelegateAccount'))
                    req.main()
        return self.send()

    def print_result(self, result):
        if self.args.get('pretend'):
            print 'instance profiles'
            for profile in result['instance_profiles']:
                print '\t' + profile['arn']
            print 'policies'
            for policy in result['policies']:
                print '\t' + policy
class CreateAccessKey(IAMRequest):
    DESCRIPTION = 'Create a new access key for a user'
    ARGS = [
        arg_user(help='''user the new key will belong to
                     (default: current user)'''),
        Arg('-w',
            '--write-config',
            action='store_true',
            route_to=None,
            help='''output access keys and region information in the
                form of a euca2ools.ini(5) configuration file instead of
                by themselves'''),
        Arg('-d',
            '--domain',
            route_to=None,
            help='''the DNS domain
                to use for region information in configuration file
                output (default: based on IAM URL)'''),
        Arg('-l',
            '--set-default-user',
            action='store_true',
            route_to=None,
            help='''set this user as the default user for the region
                in euca2ools.ini(5) configuration file output.  This
                option is only useful when used with -w.'''), AS_ACCOUNT
    ]

    def postprocess(self, result):
        if self.args.get('write_config'):
            parsed = six.moves.urllib.parse.urlparse(self.service.endpoint)
            if not self.args.get('domain'):
                dnsname = parsed.netloc.split(':')[0]
                if all(label.isdigit() for label in dnsname.split('.')):
                    msg = ('warning: IAM URL {0} refers to a specific IP; '
                           'for a complete configuration file supply '
                           'the region\'s DNS domain with -d/--domain'.format(
                               self.service.endpoint))
                    print >> sys.stderr, msg
                else:
                    self.args['domain'] = parsed.netloc.split('.', 1)[1]
            configfile = six.moves.configparser.SafeConfigParser()
            if self.args.get('domain'):
                if ':' not in self.args['domain'] and ':' in parsed.netloc:
                    # Add the port
                    self.args['domain'] += ':' + parsed.netloc.split(':')[1]
                # This uses self.config.region instead of
                # self.service.region_name because the latter is a global
                # service in AWS and thus frequently deferred with "use"
                # statements.  That may eventually happen in eucalyptus
                # cloud federations as well.
                #
                # At some point an option that lets one choose a region
                # name at the command line may be useful, but until
                # someone asks for it let's not clutter it up for now.
                region_name = self.config.region or self.args['domain']
                region_section = 'region {0}'.format(region_name.split(':')[0])
                configfile.add_section(region_section)
                for service in sorted(euca2ools.util.generate_service_names()):
                    url = '{scheme}://{service}.{domain}/'.format(
                        scheme=parsed.scheme,
                        domain=self.args['domain'],
                        service=service)
                    configfile.set(region_section, '{0}-url'.format(service),
                                   url)
                if self.config.get_region_option('verify-tls') is not None:
                    configfile.set(region_section, 'verify-tls',
                                   self.config.get_region_option('verify-tls'))
                if self.config.get_region_option('verify-ssl') is not None:
                    configfile.set(region_section, 'verify-ssl',
                                   self.config.get_region_option('verify-ssl'))

            user_name = result['AccessKey'].get('UserName') or 'root'
            account_id = self.get_user_account_id()
            if account_id:
                user_name = '{0}:{1}'.format(account_id, user_name)
            user_section = 'user {0}'.format(user_name)
            configfile.add_section(user_section)
            configfile.set(user_section, 'key-id',
                           result['AccessKey']['AccessKeyId'])
            configfile.set(user_section, 'secret-key',
                           result['AccessKey']['SecretAccessKey'])
            if account_id:
                configfile.set(user_section, 'account-id', account_id)
            if self.args.get('set_default_user'):
                configfile.set(region_section, 'user', user_name)
            result['configfile'] = configfile

    def print_result(self, result):
        if self.args.get('write_config'):
            result['configfile'].write(sys.stdout)
        else:
            print result['AccessKey']['AccessKeyId']
            print result['AccessKey']['SecretAccessKey']

    def get_user_account_id(self):
        req = GetUser.from_other(
            self,
            UserName=self.params['UserName'],
            DelegateAccount=self.params.get('DelegateAccount'))
        try:
            response = req.main()
        except euca2ools.exceptions.AWSError as err:
            if err.status_code == 403:
                msg = ('warning: unable to retrieve account ID ({0})'.format(
                    err.message))
                print >> sys.stderr, msg
                return None
            raise
        arn = response['User']['Arn']
        return arn.split(':')[4]
Exemple #16
0
class DescribeImages(EC2Request):
    DESCRIPTION = ('Show information about images\n\nBy default, only images '
                   'your account owns and images for which your account has '
                   'explicit launch permissions are shown.')
    ARGS = [
        Arg('ImageId',
            metavar='IMAGE',
            nargs='*',
            help='limit results to specific images'),
        Arg('-a',
            '--all',
            action='store_true',
            route_to=None,
            help='describe all images'),
        Arg('-o',
            '--owner',
            dest='Owner',
            metavar='ACCOUNT',
            action='append',
            help='describe images owned by the specified owner'),
        Arg('-x',
            '--executable-by',
            dest='ExecutableBy',
            metavar='ACCOUNT',
            action='append',
            help='''describe images for which the specified account has
                explicit launch permissions''')
    ]
    FILTERS = [
        Filter('architecture', help='CPU architecture'),
        Filter('block-device-mapping.delete-on-termination',
               help='''whether a volume is deleted upon instance
                      termination'''),
        Filter('block-device-mapping.device-name',
               help='device name for a volume mapped to the image'),
        Filter('block-device-mapping.snapshot-id',
               help='snapshot ID for a volume mapped to the image'),
        Filter('block-device-mapping.volume-size',
               help='volume size for a volume mapped to the image'),
        Filter('block-device-mapping.volume-type',
               help='volume type for a volume mapped to the image'),
        Filter('description', help='image description'),
        Filter('hypervisor', help='image\'s hypervisor type'),
        Filter('image-id'),
        Filter('image-type',
               help='image type ("machine", "kernel", or "ramdisk")'),
        Filter('is-public', help='whether the image is public'),
        Filter('kernel-id'),
        Filter('manifest-location'),
        Filter('name'),
        Filter('owner-alias', help="image owner's account alias"),
        Filter('owner-id', help="image owner's account ID"),
        Filter('platform', help='"windows" for Windows images'),
        Filter('product-code', help='product code associated with the image'),
        Filter('product-code.type',
               help='''type of product code
                      associated with the image ("devpay", "marketplace")'''),
        Filter('ramdisk-id'),
        Filter('root-device-name'),
        Filter('root-device-type',
               help='root device type ("ebs" or "instance-store")'),
        Filter('state',
               help='''image state ("available", "pending", or
                      "failed")'''),
        Filter('state-reason-code',
               help='reason code for the most recent state change'),
        Filter('state-reason-message',
               help='message for the most recent state change'),
        Filter('tag-key', help='key of a tag assigned to the image'),
        Filter('tag-value', help='value of a tag assigned to the image'),
        GenericTagFilter('tag:KEY', help='specific tag key/value combination'),
        Filter('virtualization-type',
               help='virtualization type ("paravirtual" or "hvm")')
    ]
    LIST_TAGS = ['imagesSet', 'productCodes', 'blockDeviceMapping', 'tagSet']

    # noinspection PyExceptionInherit
    def configure(self):
        EC2Request.configure(self)
        if self.args.get('all', False):
            if self.args.get('ImageId'):
                raise ArgumentError('argument -a/--all: not allowed with '
                                    'a list of images')
            if self.args.get('ExecutableBy'):
                raise ArgumentError('argument -a/--all: not allowed with '
                                    'argument -x/--executable-by')
            if self.args.get('Owner'):
                raise ArgumentError('argument -a/--all: not allowed with '
                                    'argument -o/--owner')

    def main(self):
        if not any(
                self.args.get(item)
                for item in ('all', 'ImageId', 'ExecutableBy', 'Owner')):
            # Default to owned images and images with explicit launch perms
            self.params['Owner'] = ['self']
            owned = self.send()
            del self.params['Owner']
            self.params['ExecutableBy'] = ['self']
            executable = self.send()
            del self.params['ExecutableBy']
            owned['imagesSet'] = (owned.get('imagesSet', []) +
                                  executable.get('imagesSet', []))
            return owned
        else:
            return self.send()

    def print_result(self, result):
        images = {}
        for image in result.get('imagesSet', []):
            images.setdefault(image['imageId'], image)
        for _, image in sorted(images.iteritems()):
            self.print_image(image)

    def print_image(self, image):
        if image.get('name'):
            imagename = '/'.join((image.get('imageOwnerId',
                                            ''), image['name']))
        else:
            imagename = image.get('imageLocation')

        print self.tabify(
            ('IMAGE', image.get('imageId'), imagename,
             image.get('imageOwnerAlias')
             or image.get('imageOwnerId'), image.get('imageState'),
             ('public' if image.get('isPublic') == 'true' else 'private'),
             image.get('architecture'), image.get('imageType'),
             image.get('kernelId'), image.get('ramdiskId'),
             image.get('platform'), image.get('rootDeviceType'),
             image.get('virtualizationType'), image.get('hypervisor')))
        for mapping in image.get('blockDeviceMapping', []):
            self.print_blockdevice_mapping(mapping)
        for tag in image.get('tagSet', []):
            self.print_resource_tag(tag, image.get('imageId'))
Exemple #17
0
class PutObject(S3Request, FileTransferProgressBarMixin):
    DESCRIPTION = ('Upload an object to the server\n\nNote that uploading a '
                   'large file to a region other than the one the bucket is '
                   'may result in "Broken pipe" errors or other connection '
                   'problems that this program cannot detect.')
    ARGS = [
        Arg('source',
            metavar='FILE',
            route_to=None,
            help='file to upload (required)'),
        Arg('dest',
            metavar='BUCKET/KEY',
            route_to=None,
            help='bucket and key name to upload the object to (required)'),
        Arg('--size',
            type=int,
            route_to=None,
            help='''the number of
                bytes to upload (required when reading from stdin)'''),
        Arg('--acl',
            route_to=None,
            choices=('private', 'public-read', 'public-read-write',
                     'authenticated-read', 'bucket-owner-read',
                     'bucket-owner-full-control', 'aws-exec-read')),
        Arg('--mime-type',
            route_to=None,
            help='MIME type for the file being uploaded'),
        Arg('--retry',
            dest='retries',
            action='store_const',
            const=5,
            default=0,
            route_to=None,
            help='retry interrupted uploads up to 5 times'),
        Arg('--progressbar-label', help=argparse.SUPPRESS)
    ]
    METHOD = 'PUT'

    def __init__(self, **kwargs):
        S3Request.__init__(self, **kwargs)
        self.last_upload_error = None
        self._lock = threading.Lock()

    # noinspection PyExceptionInherit
    def configure(self):
        S3Request.configure(self)
        if self.args['source'] == '-':
            if self.args.get('size') is None:
                raise requestbuilder.exceptions.ArgumentError(
                    "argument --size is required when uploading stdin")
            source = _FileObjectExtent(sys.stdin, self.args['size'])
        elif isinstance(self.args['source'], basestring):
            source = _FileObjectExtent.from_filename(
                self.args['source'], size=self.args.get('size'))
        else:
            if self.args.get('size') is None:
                raise requestbuilder.exceptions.ArgumentError(
                    "argument --size is required when uploading a file object")
            source = _FileObjectExtent(self.args['source'], self.args['size'])
        self.args['source'] = source
        bucket, _, key = self.args['dest'].partition('/')
        if not bucket:
            raise requestbuilder.exceptions.ArgumentError(
                'destination bucket name must be non-empty')
        if not key:
            raise requestbuilder.exceptions.ArgumentError(
                'destination key name must be non-empty')

    def preprocess(self):
        self.path = self.args['dest']
        if self.args.get('acl'):
            self.headers['x-amz-acl'] = self.args['acl']
        if self.args.get('mime_type'):
            self.headers['Content-Type'] = self.args['mime_type']

    # noinspection PyExceptionInherit
    def main(self):
        self.preprocess()
        source = self.args['source']
        self.headers['Content-Length'] = source.size

        # We do the upload in another thread so the main thread can show a
        # progress bar.
        upload_thread = threading.Thread(
            target=self.try_send,
            args=(source, ),
            kwargs={'retries_left': self.args.get('retries') or 0})
        # The upload thread is daemonic so ^C will kill the program more
        # cleanly.
        upload_thread.daemon = True
        upload_thread.start()
        pbar_label = self.args.get('progressbar_label') or source.filename
        pbar = self.get_progressbar(label=pbar_label, maxval=source.size)
        pbar.start()
        while upload_thread.is_alive():
            pbar.update(source.tell())
            time.sleep(0.05)
        pbar.finish()
        upload_thread.join()
        source.close()
        with self._lock:
            if self.last_upload_error is not None:
                # pylint: disable=E0702
                raise self.last_upload_error
                # pylint: enable=E0702

    def try_send(self, source, retries_left=0):
        self.body = source
        if retries_left > 0 and not source.can_rewind:
            self.log.warn('source cannot rewind, so requested retries will '
                          'not be attempted')
            retries_left = 0
        try:
            response = self.send()
            our_md5 = source.read_hexdigest
            their_md5 = response.headers['ETag'].lower().strip('"')
            if their_md5 != our_md5:
                self.log.error('corrupt upload (our MD5: %s, their MD5: %s',
                               our_md5, their_md5)
                raise requestbuilder.exceptions.ClientError(
                    'upload was corrupted during transit')
        except requestbuilder.exceptions.ClientError as err:
            if isinstance(err, requestbuilder.exceptions.TimeoutError):
                if retries_left > 0:
                    self.log.info(
                        'retrying upload (%i retry attempt(s) '
                        'remaining)', retries_left)
                    source.rewind()
                    return self.try_send(source, retries_left - 1)
            with self._lock:
                self.log.error('upload failed', exc_info=True)
                self.last_upload_error = err
            return
        except Exception as err:
            with self._lock:
                self.log.error('upload failed', exc_info=True)
                self.last_upload_error = err
            return
class CreateLaunchConfiguration(AutoScalingRequest):
    DESCRIPTION = 'Create a new auto-scaling instance launch configuration'
    ARGS = [
        Arg('LaunchConfigurationName',
            metavar='LAUNCHCONFIG',
            help='name of the new launch configuration (required)'),
        Arg('-i',
            '--image-id',
            dest='ImageId',
            metavar='IMAGE',
            required=True,
            help='machine image to use for instances (required)'),
        Arg('-t',
            '--instance-type',
            dest='InstanceType',
            metavar='TYPE',
            required=True,
            help='instance type for use for instances (required)'),
        Arg('--block-device-mapping',
            dest='BlockDeviceMappings.member',
            metavar='DEVICE1=MAPPED1,DEVICE2=MAPPED2,...',
            type=delimited_list(',', item_type=ec2_block_device_mapping),
            help='''a comma-separated list of block device mappings for the
                image, in the form DEVICE=MAPPED, where "MAPPED" is "none",
                "ephemeral(0-3)", or "[SNAP-ID]:[GiB]:[true|false]'''),
        Arg('--ebs-optimized',
            dest='EbsOptimized',
            action='store_const',
            const='true',
            help='whether the instance is optimized for EBS I/O'),
        Arg('--group',
            dest='SecurityGroups.member',
            metavar='GROUP1,GROUP2,...',
            type=delimited_list(','),
            help='''a comma-separated list of security groups with which
                to associate instances.  Either all group names or all group
                IDs are allowed, but not both.'''),
        Arg('--iam-instance-profile',
            dest='IamInstanceProfile',
            metavar='PROFILE',
            help='''ARN of the instance profile
                associated with instances' IAM roles'''),
        Arg('--kernel',
            dest='KernelId',
            metavar='KERNEL',
            help='kernel image to use for instances'),
        Arg('--key',
            dest='KeyName',
            metavar='KEYPAIR',
            help='name of the key pair to use for instances'),
        Arg('--monitoring-enabled',
            dest='InstanceMonitoring.Enabled',
            action='store_const',
            const='true',
            help='enable detailed monitoring (enabled by default)'),
        Arg('--monitoring-disabled',
            dest='InstanceMonitoring.Enabled',
            action='store_const',
            const='false',
            help='disable detailed monitoring (enabled by default)'),
        Arg('--ramdisk',
            dest='RamdiskId',
            metavar='RAMDISK',
            help='ramdisk image to use for instances'),
        Arg('--spot-price',
            dest='SpotPrice',
            metavar='PRICE',
            help='maximum hourly price for any spot instances launched'),
        MutuallyExclusiveArgList(
            Arg('-d',
                '--user-data',
                metavar='DATA',
                route_to=None,
                help='user data to make available to instances'),
            Arg('--user-data-force',
                metavar='DATA',
                route_to=None,
                help='''same as -d/--user-data, but without checking if a
                    file by that name exists first'''),
            Arg('-f',
                '--user-data-file',
                metavar='FILE',
                route_to=None,
                help='''file containing user data to make available to
                    instances'''))
    ]

    # noinspection PyExceptionInherit
    def configure(self):
        AutoScalingRequest.configure(self)
        if self.args.get('user_data'):
            if os.path.isfile(self.args['user_data']):
                raise ArgumentError(
                    'argument -d/--user-data: to pass the contents of a file '
                    'as user data, use -f/--user-data-file.  To pass the '
                    "literal value '{0}' as user data even though it matches "
                    'the name of a file, use --user-data-force.')
            else:
                self.params['UserData'] = base64.b64encode(
                    self.args['user_data'])
        elif self.args.get('user_data_force'):
            self.params['UserData'] = base64.b64encode(
                self.args['user_data_force'])
        elif self.args.get('user_data_file'):
            with open(self.args['user_data_file']) as user_data_file:
                self.params['UserData'] = base64.b64encode(
                    user_data_file.read())
Exemple #19
0
class DescribeServices(EucalyptusAdminRequest):

    DESCRIPTION = 'Get services'
    LIST_TAGS = ['serviceStatuses']
    METHOD = 'GET'
    SERVICE_PATH = 'services/Empyrean/'

    ARGS = [
        Arg('-A',
            '--all',
            dest='ListAll',
            action="store_true",
            default=False,
            help=
            'Include all public service information.  Reported state information is determined by the view available to the target host, which should be treated as advisory (See documentation for guidance on interpreting this information).'
            ),
        Arg('--system-internal',
            dest='ListInternal',
            action="store_true",
            default=False,
            help=
            'Include internal services information (note: this information is only for the target host).'
            ),
        Arg('--user-services',
            dest='ListUserServices',
            action="store_true",
            default=False,
            help=
            'Include services which are user facing and are co-located with some other top-level service (note: this information is only for the target host).'
            ),
        Arg('-E',
            '--events',
            dest='ShowEvents',
            action="store_true",
            default=False,
            help='Show service event details.'),
        Arg('--events-verbose',
            dest='ShowEventStacks',
            action="store_true",
            default=False,
            help='Show verbose service event details.')
    ]

    def preprocess(self):
        self.params['ListAll'] = repr(self.args.get('ListAll', False))
        self.params['ListInternal'] = repr(self.args.get(
            'ListInternal', False))
        self.params['ShowEvents'] = repr(self.args.get('ShowEvents', False))
        self.params['ShowEventStacks'] = repr(
            self.args.get('ShowEventStacks', False))

    def print_result(self, data):
        services = data.get('serviceStatuses')
        fmt = 'SERVICE\t%-15.15s\t%-15s\t%-15s\t%-10s\t%-4s\t%-40s\t%s'
        detail_fmt = 'SERVICEEVENT\t%-36.36s\t%s'
        for s in services:
            service_id = s['serviceId']
            print fmt % (service_id.get('type'), service_id.get('partition'),
                         service_id.get('name'), s.get('localState'),
                         s.get('localEpoch'), service_id.get('uri'),
                         service_id.get('fullName'))
            details = s.get('statusDetails')
            if details:
                detail_item = details.get('item')
                if detail_item:
                    print detail_fmt % (detail_item.get('uuid'),
                                        detail_item.get('serviceFullName'))
                    print detail_fmt % (detail_item.get('uuid'),
                                        detail_item.get('euca:severity'))
                    print detail_fmt % (detail_item.get('uuid'),
                                        detail_item.get('timestamp'))
                    print detail_fmt % (detail_item.get('uuid'),
                                        detail_item.get('message'))
                    if detail_item.get('stackTrace'):
                        print detail_item['stackTrace']
                    print
Exemple #20
0
class BundleUploadingMixin(object):
    ARGS = [
        Arg('-b',
            '--bucket',
            metavar='BUCKET[/PREFIX]',
            required=True,
            help='bucket to upload the bundle to (required)'),
        Arg('--acl',
            default='aws-exec-read',
            choices=('public-read', 'aws-exec-read', 'ec2-bundle-read'),
            help='''canned ACL policy to apply to the bundle (default:
                aws-exec-read)'''),
        MutuallyExclusiveArgList(
            Arg('--upload-policy',
                dest='upload_policy',
                metavar='POLICY',
                type=base64.b64encode,
                help='upload policy to use for authorization'),
            Arg('--upload-policy-file',
                dest='upload_policy',
                metavar='FILE',
                type=b64encoded_file_contents,
                help='''file containing an upload policy to use for
                    authorization''')),
        Arg('--upload-policy-signature',
            metavar='SIGNATURE',
            help='''signature for the upload policy (required when an
                'upload policy is used)'''),
        Arg('--location',
            help='''location constraint of the destination
                bucket (default: inferred from s3-location-constraint in
                configuration, or otherwise none)'''),
        Arg('--retry',
            dest='retries',
            action='store_const',
            const=5,
            default=0,
            help='retry failed uploads up to 5 times')
    ]

    def configure_bundle_upload_auth(self):
        if self.args.get('upload_policy'):
            if not self.args.get('key_id'):
                raise ArgumentError('-I/--access-key-id is required when '
                                    'using an upload policy')
            if not self.args.get('upload_policy_signature'):
                raise ArgumentError('--upload-policy-signature is required '
                                    'when using an upload policy')
            self.auth = None

    def get_bundle_key_prefix(self):
        (bucket, _, prefix) = self.args['bucket'].partition('/')
        if prefix and not prefix.endswith('/'):
            prefix += '/'
        return bucket + '/' + prefix

    def ensure_dest_bucket_exists(self):
        if self.args.get('upload_policy'):
            # We won't have creds to sign our own requests
            self.log.info('using an upload policy; not verifying bucket '
                          'existence')
            return

        bucket = self.args['bucket'].split('/', 1)[0]
        try:
            req = CheckBucket.from_other(self, bucket=bucket)
            req.main()
        except AWSError as err:
            if err.status_code == 404:
                # No such bucket
                self.log.info("creating bucket '%s'", bucket)
                req = CreateBucket.from_other(
                    self, bucket=bucket, location=self.args.get('location'))
                req.main()
            else:
                raise
        # At this point we know we can at least see the bucket, but it's still
        # possible that we can't write to it with the desired key names.  So
        # many policies are in play here that it isn't worth trying to be
        # proactive about it.

    def upload_bundle_file(self,
                           source,
                           dest,
                           show_progress=False,
                           **putobj_kwargs):
        if self.args.get('upload_policy'):
            if show_progress:
                # PostObject does not yet support show_progress
                print source, 'uploading...'
            if self.args.get('security_token'):
                postobj_kwargs = \
                    {'x-amz-security-token': self.args['security_token']}
            else:
                postobj_kwargs = {}
            postobj_kwargs.update(putobj_kwargs)
            req = PostObject.from_other(
                self,
                source=source,
                dest=dest,
                acl=self.args.get('acl') or 'aws-exec-read',
                Policy=self.args['upload_policy'],
                Signature=self.args['upload_policy_signature'],
                AWSAccessKeyId=self.args['key_id'],
                **postobj_kwargs)
        else:
            req = PutObject.from_other(self,
                                       source=source,
                                       dest=dest,
                                       acl=self.args.get('acl')
                                       or 'aws-exec-read',
                                       retries=self.args.get('retries') or 0,
                                       show_progress=show_progress,
                                       **putobj_kwargs)
        req.main()

    def upload_bundle_parts(self,
                            partinfo_in_mpconn,
                            key_prefix,
                            partinfo_out_mpconn=None,
                            part_write_sem=None,
                            **putobj_kwargs):
        try:
            while True:
                part = partinfo_in_mpconn.recv()
                dest = key_prefix + os.path.basename(part.filename)
                self.upload_bundle_file(part.filename, dest, **putobj_kwargs)
                if part_write_sem is not None:
                    # Allow something that's waiting for the upload to finish
                    # to continue
                    part_write_sem.release()
                if partinfo_out_mpconn is not None:
                    partinfo_out_mpconn.send(part)
        except EOFError:
            return
        finally:
            partinfo_in_mpconn.close()
            if partinfo_out_mpconn is not None:
                partinfo_out_mpconn.close()
Exemple #21
0
class UploadBundle(S3Request, BundleUploadingMixin,
                   FileTransferProgressBarMixin):
    DESCRIPTION = 'Upload a bundle prepared by euca-bundle-image to the cloud'
    ARGS = [Arg('-m', '--manifest', metavar='FILE', required=True,
                help='manifest for the bundle to upload (required)'),
            Arg('-d', '--directory', metavar='DIR',
                help='''directory that contains the bundle parts (default:
                directory that contains the manifest)'''),
            # TODO:  make this work
            Arg('--part', metavar='INT', type=int, default=0, help='''begin
                uploading with a specific part number (default: 0)'''),
            Arg('--skipmanifest', action='store_true',
                help='do not upload the manifest')]

    def configure(self):
        self.configure_bundle_upload_auth()
        S3Request.configure(self)

    def main(self):
        key_prefix = self.get_bundle_key_prefix()
        self.ensure_dest_bucket_exists()

        manifest = BundleManifest.read_from_file(self.args['manifest'])
        part_dir = (self.args.get('directory') or
                    os.path.dirname(self.args['manifest']))
        for part in manifest.image_parts:
            part.filename = os.path.join(part_dir, part.filename)
            if not os.path.isfile(part.filename):
                raise ValueError("no such part: '{0}'".format(part.filename))

        # manifest -> upload
        part_out_r, part_out_w = multiprocessing.Pipe(duplex=False)
        part_gen = multiprocessing.Process(target=_generate_bundle_parts,
                                           args=(manifest, part_out_w))
        part_gen.start()
        part_out_w.close()

        # Drive the upload process by feeding in part info
        self.upload_bundle_parts(part_out_r, key_prefix,
                                 show_progress=self.args.get('show_progress'))
        part_gen.join()

        # (conditionally) upload the manifest
        if not self.args.get('skip_manifest'):
            manifest_dest = (key_prefix +
                             os.path.basename(self.args['manifest']))
            req = PutObject.from_other(
                self, source=self.args['manifest'], dest=manifest_dest,
                acl=self.args.get('acl') or 'aws-exec-read',
                retries=self.args.get('retries') or 0)
            req.main()
        else:
            manifest_dest = None

        return {'parts': tuple({'filename': part.filename,
                                'key': (key_prefix +
                                        os.path.basename(part.filename))}
                               for part in manifest.image_parts),
                'manifests': ({'filename': self.args['manifest'],
                               'key': manifest_dest},)}

    def print_result(self, result):
        if self.debug:
            for part in result['parts']:
                print 'Uploaded', part['key']
        if result['manifests'][0]['key'] is not None:
            print 'Uploaded', result['manifests'][0]['key']
Exemple #22
0
class BundleCreatingMixin(object):
    ARGS = [
        Arg('-i',
            '--image',
            metavar='FILE',
            required=True,
            help='file containing the image to bundle (required)'),
        Arg('-p',
            '--prefix',
            help='''the file name prefix to give the
                bundle's files (required when bundling stdin; otherwise
                defaults to the image's file name)'''),
        Arg('-d',
            '--destination',
            metavar='DIR',
            help='''location to place
                the bundle's files (default:  dir named by TMPDIR, TEMP, or TMP
                environment variables, or otherwise /var/tmp)'''),
        Arg('-r',
            '--arch',
            required=True,
            choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64'),
            help="the image's architecture (required)"),

        # User- and cloud-specific stuff
        Arg('-k',
            '--privatekey',
            metavar='FILE',
            help='''file containing
                your private key to sign the bundle's manifest with.  If one
                is not available the bundle will not be signed.'''),
        Arg('-c',
            '--cert',
            metavar='FILE',
            help='''file containing
                your X.509 certificate.  If one is not available it
                will not be possible to unbundle the bundle without
                cloud administrator assistance.'''),
        Arg('--ec2cert',
            metavar='FILE',
            help='''file containing the
                cloud's X.509 certificate.  If one is not available
                locally it must be available from the empyrean
                service.'''),
        Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'),
        Arg('--kernel',
            metavar='IMAGE',
            help='''ID of the kernel image to
                associate with this machine image'''),
        Arg('--ramdisk',
            metavar='IMAGE',
            help='''ID of the ramdisk image
                to associate with this machine image'''),
        Arg('--empyrean-url',
            route_to=None,
            help='''[Eucalyptus
                only] bootstrap service endpoint URL (used for obtaining
                --ec2cert automatically'''),
        Arg('--empyrean-service', route_to=None, help=argparse.SUPPRESS),
        Arg('--empyrean-auth', route_to=None, help=argparse.SUPPRESS),

        # Obscurities
        Arg('-B',
            '--block-device-mappings',
            metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...',
            type=manifest_block_device_mappings,
            help='''block device mapping scheme with which to launch
                instances of this machine image'''),
        Arg('--productcodes',
            metavar='CODE1,CODE2,...',
            type=delimited_list(','),
            default=[],
            help='comma-separated list of product codes for the image'),
        Arg('--image-type',
            choices=('machine', 'kernel', 'ramdisk'),
            default='machine',
            help=argparse.SUPPRESS),

        # Stuff needed to fill out TarInfo when input comes from stdin.
        #
        # We technically could ask for a lot more, but most of it is
        # unnecessary since owners/modes/etc will be ignored at unbundling
        # time anyway.
        #
        # When bundling stdin we interpret --prefix as the image's file
        # name.
        Arg('--image-size',
            type=filesize,
            help='''the image's size
                (required when bundling stdin)'''),

        # Overrides for debugging and other entertaining uses
        Arg(
            '--part-size',
            type=filesize,
            default=10485760,  # 10M
            help=argparse.SUPPRESS),
        Arg('--enc-key', type=(lambda s: int(s, 16)),
            help=argparse.SUPPRESS),  # a hex string
        Arg('--enc-iv', type=(lambda s: int(s, 16)),
            help=argparse.SUPPRESS),  # a hex string

        # Noop, for compatibility
        Arg('--batch', action='store_true', help=argparse.SUPPRESS)
    ]

    # CONFIG METHODS #

    def configure_bundle_creds(self):
        # User's account ID (user-level)
        if not self.args.get('user'):
            config_val = self.config.get_user_option('account-id')
            if 'EC2_USER_ID' in os.environ:
                self.log.debug('using account ID from environment')
                self.args['user'] = os.getenv('EC2_USER_ID')
            elif config_val:
                self.log.debug('using account ID from configuration')
                self.args['user'] = config_val
        if self.args.get('user'):
            self.args['user'] = self.args['user'].replace('-', '')
        if not self.args.get('user'):
            raise ArgumentError(
                'missing account ID; please supply one with --user')
        self.log.debug('account ID: %s', self.args['user'])

        # User's X.509 certificate (user-level in config)
        if not self.args.get('cert'):
            config_val = self.config.get_user_option('certificate')
            if 'EC2_CERT' in os.environ:
                self.log.debug('using certificate from environment')
                self.args['cert'] = os.getenv('EC2_CERT')
            elif 'EUCA_CERT' in os.environ:  # used by the NC
                self.log.debug('using certificate from environment')
                self.args['cert'] = os.getenv('EUCA_CERT')
            elif config_val:
                self.log.debug('using certificate from configuration')
                self.args['cert'] = config_val
        if self.args.get('cert'):
            self.args['cert'] = os.path.expanduser(
                os.path.expandvars(self.args['cert']))
            _assert_is_file(self.args['cert'], 'user certificate')
        self.log.debug('certificate: %s', self.args['cert'])

        # User's private key (user-level in config)
        if not self.args.get('privatekey'):
            config_val = self.config.get_user_option('private-key')
            if 'EC2_PRIVATE_KEY' in os.environ:
                self.log.debug('using private key from environment')
                self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY')
            if 'EUCA_PRIVATE_KEY' in os.environ:  # used by the NC
                self.log.debug('using private key from environment')
                self.args['privatekey'] = os.getenv('EUCA_PRIVATE_KEY')
            elif config_val:
                self.log.debug('using private key from configuration')
                self.args['privatekey'] = config_val
        if self.args.get('privatekey'):
            self.args['privatekey'] = os.path.expanduser(
                os.path.expandvars(self.args['privatekey']))
            _assert_is_file(self.args['privatekey'], 'private key')
        self.log.debug('private key: %s', self.args['privatekey'])

        # Cloud's X.509 cert (region-level in config)
        if not self.args.get('ec2cert'):
            config_val = self.config.get_region_option('certificate')
            if 'EUCALYPTUS_CERT' in os.environ:
                # This has no EC2 equivalent since they just bundle their cert.
                self.log.debug('using cloud certificate from environment')
                self.args['ec2cert'] = os.getenv('EUCALYPTUS_CERT')
            elif config_val:
                self.log.debug('using cloud certificate from configuration')
                self.args['ec2cert'] = config_val
            elif (self.args.get('empyrean_service')
                  and self.args.get('empyrean_auth')):
                # Sending requests during configure() can be precarious.
                # Pay close attention to ordering to ensure all
                # of this request's dependencies have been fulfilled.
                fetched_cert = self.__get_bundle_certificate(
                    self.args['empyrean_service'], self.args['empyrean_auth'])
                if fetched_cert:
                    self.log.debug('using cloud certificate from '
                                   'empyrean service')
                    self.args['ec2cert'] = fetched_cert
        if self.args.get('ec2cert'):
            self.args['ec2cert'] = os.path.expanduser(
                os.path.expandvars(self.args['ec2cert']))
            _assert_is_file(self.args['ec2cert'], 'cloud certificate')
        if not self.args.get('ec2cert'):
            raise ArgumentError(
                'missing cloud certificate; please supply one with '
                '--ec2cert or use --empyrean-url to fetch one automatically')
        self.log.debug('cloud certificate: %s', self.args['ec2cert'])

    def configure_bundle_output(self):
        if (self.args.get('destination')
                and os.path.exists(self.args['destination'])
                and not os.path.isdir(self.args['destination'])):
            raise ArgumentError("argument -d/--destination: '{0}' is not a "
                                "directory".format(self.args['destination']))
        if self.args['image'] == '-':
            self.args['image'] = os.fdopen(os.dup(sys.stdin.fileno()))
            if not self.args.get('prefix'):
                raise ArgumentError(
                    'argument --prefix is required when bundling stdin')
            if not self.args.get('image_size'):
                raise ArgumentError(
                    'argument --image-size is required when bundling stdin')
        elif isinstance(self.args['image'], basestring):
            if not self.args.get('prefix'):
                self.args['prefix'] = os.path.basename(self.args['image'])
            if not self.args.get('image_size'):
                self.args['image_size'] = euca2ools.util.get_filesize(
                    self.args['image'])
            self.args['image'] = open(self.args['image'])
        else:
            # Assume it is already a file object
            if not self.args.get('prefix'):
                raise ArgumentError('argument --prefix is required when '
                                    'bundling a file object')
            if not self.args.get('image_size'):
                raise ArgumentError('argument --image-size is required when '
                                    'bundling a file object')
        if self.args['image_size'] > EC2_BUNDLE_SIZE_LIMIT:
            self.log.warn(
                'image is incompatible with EC2 due to its size (%i > %i)',
                self.args['image_size'], EC2_BUNDLE_SIZE_LIMIT)

    def configure_bundle_properties(self):
        if self.args.get('kernel') == 'true':
            self.args['image_type'] = 'kernel'
        if self.args.get('ramdisk') == 'true':
            self.args['image_type'] = 'ramdisk'
        if self.args['image_type'] == 'kernel':
            if self.args.get('kernel') and self.args['kernel'] != 'true':
                raise ArgumentError("argument --kernel: not compatible with "
                                    "image type 'kernel'")
            if self.args.get('ramdisk'):
                raise ArgumentError("argument --ramdisk: not compatible with "
                                    "image type 'kernel'")
            if self.args.get('block_device_mappings'):
                raise ArgumentError("argument -B/--block-device-mappings: not "
                                    "compatible with image type 'kernel'")
        if self.args['image_type'] == 'ramdisk':
            if self.args.get('kernel'):
                raise ArgumentError("argument --kernel: not compatible with "
                                    "image type 'ramdisk'")
            if self.args.get('ramdisk') and self.args['ramdisk'] != 'true':
                raise ArgumentError("argument --ramdisk: not compatible with "
                                    "image type 'ramdisk'")
            if self.args.get('block_device_mappings'):
                raise ArgumentError("argument -B/--block-device-mappings: not "
                                    "compatible with image type 'ramdisk'")

    def generate_encryption_keys(self):
        srand = random.SystemRandom()
        if self.args.get('enc_key'):
            self.log.info('using preexisting encryption key')
            enc_key_i = self.args['enc_key']
        else:
            enc_key_i = srand.getrandbits(128)
        if self.args.get('enc_iv'):
            self.log.info('using preexisting encryption IV')
            enc_iv_i = self.args['enc_iv']
        else:
            enc_iv_i = srand.getrandbits(128)
        self.args['enc_key'] = '{0:0>32x}'.format(enc_key_i)
        self.args['enc_iv'] = '{0:0>32x}'.format(enc_iv_i)

    def __get_bundle_certificate(self, empyrean_service, empyrean_auth):
        self.log.info('attempting to obtain cloud certificate from '
                      'empyrean service')
        req = DescribeServiceCertificates(config=self.config,
                                          loglevel=self.log.level,
                                          service=empyrean_service,
                                          auth=empyrean_auth,
                                          Format='pem',
                                          FingerprintDigest='SHA-256')
        response = req.main()
        for cert in response.get('serviceCertificates') or []:
            if (cert.get('certificateUsage') == 'image-bundling'
                    and cert.get('serviceType') == 'compute'):
                cert_file = tempfile.NamedTemporaryFile(delete=False)
                cert_file.write(cert['certificate'])
                cert_file.file.flush()
                self.args['ec2cert'] = cert_file.name
                atexit.register(os.remove, cert_file.name)
                return cert_file.name

    # MANIFEST GENERATION METHODS #

    def build_manifest(self, digest, partinfo):
        manifest = euca2ools.bundle.manifest.BundleManifest(
            loglevel=self.log.level)
        manifest.image_arch = self.args['arch']
        manifest.kernel_id = self.args.get('kernel')
        manifest.ramdisk_id = self.args.get('ramdisk')
        if self.args.get('block_device_mappings'):
            manifest.block_device_mappings.update(
                self.args['block_device_mappings'])
        if self.args.get('productcodes'):
            manifest.product_codes.extend(self.args['productcodes'])
        manifest.image_name = self.args['prefix']
        manifest.account_id = self.args['user']
        manifest.image_type = self.args['image_type']
        manifest.image_digest = digest
        manifest.image_digest_algorithm = 'SHA1'  # shouldn't be hardcoded here
        manifest.image_size = self.args['image_size']
        manifest.bundled_image_size = sum(part.size for part in partinfo)
        manifest.enc_key = self.args['enc_key']
        manifest.enc_iv = self.args['enc_iv']
        manifest.enc_algorithm = 'AES-128-CBC'  # shouldn't be hardcoded here
        manifest.image_parts = partinfo
        return manifest

    def dump_manifest_to_file(self, manifest, filename, pretty_print=False):
        with open(filename, 'w') as manifest_file:
            manifest_file.write(
                self.dump_manifest_to_str(manifest, pretty_print=pretty_print))

    def dump_manifest_to_str(self, manifest, pretty_print=False):
        return manifest.dump_to_str(self.args['privatekey'],
                                    self.args['cert'],
                                    self.args['ec2cert'],
                                    pretty_print=pretty_print)
Exemple #23
0
class ImportInstance(EC2Request, S3AccessMixin, FileTransferProgressBarMixin):
    DESCRIPTION = 'Import an instance into the cloud'
    ARGS = [
        Arg('source',
            metavar='FILE',
            route_to=None,
            help='file containing the disk image to import (required)'),
        Arg('-t',
            '--instance-type',
            metavar='INSTANCETYPE',
            required=True,
            dest='LaunchSpecification.InstanceType',
            help='the type of instance to import to (required)'),
        Arg('-f',
            '--format',
            dest='DiskImage.1.Image.Format',
            metavar='FORMAT',
            required=True,
            help='''the image's format
                ("vmdk", "raw", or "vhd") (required)'''),
        Arg('-a',
            '--architecture',
            metavar='ARCH',
            required=True,
            dest='LaunchSpecification.Architecture',
            help="the instance's processor architecture (required)"),
        Arg('-p',
            '--platform',
            dest='Platform',
            required=True,
            choices=('Windows', 'Linux'),
            help="the instance's operating system (required)"),
        MutuallyExclusiveArgList(
            Arg('-b',
                '--bucket',
                route_to=None,
                help='the bucket to upload the volume to'),
            Arg('--manifest-url',
                metavar='URL',
                dest='DiskImage.1.Image.ImportManifestUrl',
                help='''a pre-signed URL that points to the import
                    manifest to use''')).required(),
        Arg('--prefix',
            route_to=None,
            help='''a prefix to add to the
                names of the volume parts as they are uploaded'''),
        Arg('-x',
            '--expires',
            metavar='DAYS',
            type=int,
            default=30,
            route_to=None,
            help='''how long the import manifest should
                remain valid, in days (default: 30 days)'''),
        Arg('--no-upload',
            action='store_true',
            route_to=None,
            help='''start the import process, but do not actually upload
                the volume (see euca-resume-import)'''),
        Arg('-d',
            '--description',
            dest='Description',
            help='a description for the import task (not the volume)'),
        Arg('-g',
            '--group',
            metavar='GROUP',
            dest='LaunchSpecification.GroupName.1',
            help='name of the security group to create the instance in'),
        Arg('-z',
            '--availability-zone',
            metavar='ZONE',
            dest='LaunchSpecification.Placement.AvailabilityZone',
            help='the zone in which to create the instance'),
        Arg('-s',
            '--volume-size',
            metavar='GiB',
            type=int,
            dest='DiskImage.1.Volume.Size',
            help='size of the volume to import to, in GiB'),
        Arg('--image-size',
            dest='DiskImage.1.Image.Bytes',
            metavar='BYTES',
            type=filesize,
            help='size of the image (required for non-raw files'),
        MutuallyExclusiveArgList(
            Arg('--user-data',
                metavar='DATA',
                type=base64.b64encode,
                dest='LaunchSpecification.UserData',
                help='user data to supply to the instance'),
            Arg('--user-data-file',
                metavar='FILE',
                type=b64encoded_file_contents,
                dest='LaunchSpecification.UserData',
                help='''file
                    containing user data to supply to the instance''')),
        Arg('--subnet',
            metavar='SUBNET',
            dest='LaunchSpecification.SubnetId',
            help='''[VPC only] subnet
                to create the instance's network interface in'''),
        Arg('--private-ip-address',
            metavar='ADDRESS',
            dest='LaunchSpecification.PrivateIpAddress',
            help='''[VPC only] assign a specific primary private IP address
                to the instance's interface'''),
        Arg('--monitor',
            action='store_true',
            dest='LaunchSpecification.Monitoring.Enabled',
            help='enable detailed monitoring for the instance'),
        Arg('--instance-initiated-shutdown-behavior',
            dest='LaunchSpecification.InstanceInitiatedShutdownBehavior',
            choices=('stop', 'terminate'),
            help='''whether to "stop"
                (default) or terminate the instance when it shuts down'''),
        Arg('--key',
            dest='LaunchSpecification.KeyName',
            metavar='KEYPAIR',
            help='''[Eucalyptus only] name of the key pair to use when
                running the instance'''),
        # This is not yet implemented
        Arg('--ignore-region-affinity',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS),
        # This does no validation, but it does prevent taking action
        Arg('--dry-run',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS),
        # This is not yet implemented
        Arg('--dont-verify-format',
            action='store_true',
            route_to=None,
            help=argparse.SUPPRESS)
    ]
    LIST_TAGS = ['volumes']

    def configure(self):
        EC2Request.configure(self)
        self.configure_s3_access()

        if (self.params['DiskImage.1.Image.Format'].upper()
                in ('VMDK', 'VHD', 'RAW')):
            self.params['DiskImage.1.Image.Format'] = \
                self.params['DiskImage.1.Image.Format'].upper()
        if not self.params.get('DiskImage.1.Image.Bytes'):
            if self.params['DiskImage.1.Image.Format'] == 'RAW':
                image_size = euca2ools.util.get_filesize(self.args['source'])
                self.params['DiskImage.1.Image.Bytes'] = image_size
            elif self.params['DiskImage.1.Image.Format'] == 'VMDK':
                image_size = euca2ools.util.get_vmdk_image_size(
                    self.args['source'])
                self.params['DiskImage.1.Image.Bytes'] = image_size
            else:
                raise ArgumentError(
                    'argument --image-size is required for {0} files'.format(
                        self.params['DiskImage.1.Image.Format']))
        if not self.params.get('DiskImage.1.Volume.Size'):
            vol_size = math.ceil(self.params['DiskImage.1.Image.Bytes'] /
                                 2**30)
            self.params['DiskImage.1.Volume.Size'] = int(vol_size)

        if not self.args.get('expires'):
            self.args['expires'] = 30
        if self.args['expires'] < 1:
            raise ArgumentError(
                'argument -x/--expires: value must be positive')

    def main(self):
        if self.args.get('dry_run'):
            return

        if self.args.get('bucket'):
            self.ensure_bucket_exists(self.args['bucket'])

        if not self.args.get('DiskImage.1.Image.ImportManifestUrl'):
            manifest_key = '{0}/{1}.manifest.xml'.format(
                uuid.uuid4(), self.args['source'])
            if self.args.get('prefix'):
                manifest_key = '/'.join((self.args['prefix'], manifest_key))
            getobj = GetObject.from_other(self,
                                          service=self.args['s3_service'],
                                          auth=self.args['s3_auth'],
                                          source='/'.join((self.args['bucket'],
                                                           manifest_key)))
            days = self.args.get('expires') or 30
            get_url = getobj.get_presigned_url2(days * 86400)  # in seconds
            self.log.info('generated manifest GET URL: %s', get_url)
            self.params['DiskImage.1.Image.ImportManifestUrl'] = get_url

        result = self.send()

        # The manifest creation and uploading parts are done by ResumeImport.
        if not self.args.get('no_upload'):
            resume = ResumeImport.from_other(
                self,
                source=self.args['source'],
                task=result['conversionTask']['conversionTaskId'],
                s3_service=self.args['s3_service'],
                s3_auth=self.args['s3_auth'],
                expires=self.args['expires'],
                show_progress=self.args.get('show_progress', False))
            resume.main()

        return result

    def print_result(self, result):
        self.print_conversion_task(result['conversionTask'])
Exemple #24
0
class S3(requestbuilder.service.BaseService):
    NAME = 's3'
    DESCRIPTION = 'Object storage service'
    REGION_ENVVAR = 'AWS_DEFAULT_REGION'
    URL_ENVVAR = 'S3_URL'

    ARGS = [
        Arg('-U',
            '--url',
            metavar='URL',
            help='object storage service endpoint URL')
    ]

    # pylint: disable=no-self-use
    def handle_http_error(self, response):
        raise AWSError(response)

    # pylint: enable=no-self-use

    def build_presigned_url(self,
                            method='GET',
                            path=None,
                            params=None,
                            auth=None,
                            auth_args=None):
        # requestbuilder 0.2
        msg = ('S3.build_presigned_url is deprecated; use '
               'S3Request.get_presigned_url2 instead')
        self.log.warn(msg)
        warnings.warn(msg, DeprecationWarning)
        if path:
            # We can't simply use urljoin because a path might start with '/'
            # like it could for keys that start with that character.
            if self.endpoint.endswith('/'):
                url = self.endpoint + path
            else:
                url = self.endpoint + '/' + path
        else:
            url = self.endpoint
        request = requests.Request(method=method, url=url, params=params)
        if auth is not None:
            auth.apply_to_request_params(request, self, **(auth_args or {}))
        p_request = request.prepare()
        return p_request.url

    def resolve_url_to_location(self, url):
        """
        Given a URL, try to return its associated region, bucket, and
        key names based on this object's endpoint info as well as all
        S3 endpoints given in the configuration.
        """
        parsed_url = six.moves.urllib.parse.urlparse(url)
        if not parsed_url.scheme:
            parsed_url = six.moves.urllib.parse.urlparse('http://' + url)
        parsed_own_url = six.moves.urllib.parse.urlparse(self.endpoint)
        bucket, key = self.__match_path(parsed_url, parsed_own_url)
        if bucket:
            return self.region_name, bucket, key
        else:
            # Try to look it up in the config
            s3_urls = self.config.get_all_region_options('s3-url')
            for section, conf_url in s3_urls.iteritems():
                parsed_conf_url = six.moves.urllib.parse.urlparse(conf_url)
                bucket, key = self.__match_path(parsed_url, parsed_conf_url)
                if bucket:
                    region = self.config.get_region_option('name',
                                                           region=section)
                    return region or section, bucket, key
        raise ValueError("URL '{0}' matches no known object storage "
                         "endpoints.  Supply one via the command line or "
                         "configuration.".format(url))

    def __match_path(self, given, service):
        if given.netloc == service.netloc:
            # path-style
            service_path = service.path
            if not service_path.endswith('/'):
                service_path += '/'
            cpath = given.path.split(service_path, 1)[1]
            bucket, key = cpath.split('/', 1)
            self.log.debug('URL path match:  %s://%s%s + %s://%s%s -> %s/%s',
                           given.scheme, given.netloc, given.path,
                           service.scheme, service.netloc, service.path,
                           bucket, key)
        elif given.netloc.endswith(service.netloc):
            # vhost-style
            bucket = given.netloc.rsplit('.' + service.netloc, 1)[0]
            bucket = bucket.lstrip('/')
            if given.path.startswith('/'):
                key = given.path[1:]
            else:
                key = given.path
            self.log.debug('URL vhost match:  %s://%s%s + %s://%s%s -> %s/%s',
                           given.scheme, given.netloc, given.path,
                           service.scheme, service.netloc, service.path,
                           bucket, key)
        else:
            bucket = None
            key = None
        return bucket, key
Exemple #25
0
class _ModifySecurityGroupRule(EC2Request):
    """
    The basis for security group-editing commands
    """

    ARGS = [Arg('group', metavar='GROUP', route_to=None,
                help='name or ID of the security group to modify (required)'),
            Arg('--egress', action='store_true', route_to=None,
                help='''[VPC only] manage an egress rule, which controls
                traffic leaving the group'''),
            Arg('-P', '--protocol', dest='IpPermissions.1.IpProtocol',
                choices=['tcp', 'udp', 'icmp', '6', '17', '1'], default='tcp',
                help='protocol to affect (default: tcp)'),
            Arg('-p', '--port-range', dest='port_range', metavar='RANGE',
                route_to=None, help='''range of ports (specified as "from-to")
                or a single port number (required for tcp and udp)'''),
            Arg('-t', '--icmp-type-code', dest='icmp_type_code',
                metavar='TYPE:CODE', route_to=None, help='''ICMP type and
                code (specified as "type:code") (required for icmp)'''),
            MutuallyExclusiveArgList(
                Arg('-s', '--cidr', metavar='CIDR',
                    dest='IpPermissions.1.IpRanges.1.CidrIp',
                    help='''IP range (default: 0.0.0.0/0)'''),
                # ^ default is added by main()
                Arg('-o', dest='target_group', metavar='GROUP', route_to=None,
                    help='''[Non-VPC only] name of a security group with which
                    to affect network communication''')),
            Arg('-u', metavar='ACCOUNT',
                dest='IpPermissions.1.Groups.1.UserId',
                help='''ID of the account that owns the security group
                specified with -o''')]

    def process_cli_args(self):
        self.process_port_cli_args()

    # noinspection PyExceptionInherit
    def configure(self):
        EC2Request.configure(self)

        if (self.args['group'].startswith('sg-') and
                len(self.args['group']) == 11):
            # The check could probably be a little better, but meh.  Fix if
            # needed.
            self.params['GroupId'] = self.args['group']
        else:
            if self.args['egress']:
                raise ArgumentError('egress rules must use group IDs, not '
                                    'names')
            self.params['GroupName'] = self.args['group']

        target_group = self.args.get('target_group')
        if (target_group is not None and target_group.startswith('sg-') and
                len(target_group) == 11):
            # Same note as above
            self.params['IpPermissions.1.Groups.1.GroupId'] = target_group
        else:
            if self.args['egress']:
                raise ArgumentError('argument -o: egress rules must use group '
                                    'IDs, not names')
            self.params['IpPermissions.1.Groups.1.GroupName'] = target_group

        from_port, to_port = parse_ports(
            self.args.get('IpPermissions.1.IpProtocol'),
            self.args.get('port_range'), self.args.get('icmp_type_code'))
        self.params['IpPermissions.1.FromPort'] = from_port
        self.params['IpPermissions.1.ToPort'] = to_port

        if (not self.args.get('IpPermissions.1.IpRanges.1.GroupName') and
                not self.args.get('IpPermissions.1.IpRanges.1.CidrIp')):
            # Default rule target is the entire Internet
            self.params['IpPermissions.1.IpRanges.1.CidrIp'] = '0.0.0.0/0'
        if (self.params.get('IpPermissions.1.Groups.1.GroupName') and
                not self.args.get('IpPermissions.1.Groups.1.UserId')):
            raise ArgumentError('argument -u is required when -o names a '
                                'security group by name')

    def print_result(self, _):
        print self.tabify(['GROUP', self.args.get('group')])
        perm_str = ['PERMISSION', self.args.get('group'), 'ALLOWS',
                    self.params.get('IpPermissions.1.IpProtocol'),
                    self.params.get('IpPermissions.1.FromPort'),
                    self.params.get('IpPermissions.1.ToPort')]
        if self.params.get('IpPermissions.1.Groups.1.UserId'):
            perm_str.append('USER')
            perm_str.append(self.params.get('IpPermissions.1.Groups.1.UserId'))
        if self.params.get('IpPermissions.1.Groups.1.GroupId'):
            perm_str.append('GRPID')
            perm_str.append(self.params.get(
                'IpPermissions.1.Groups.1.GroupId'))
        elif self.params.get('IpPermissions.1.Groups.1.GroupName'):
            perm_str.append('GRPNAME')
            perm_str.append(self.params.get(
                'IpPermissions.1.Groups.1.GroupName'))
        if self.params.get('IpPermissions.1.IpRanges.1.CidrIp'):
            perm_str.extend(['FROM', 'CIDR'])
            perm_str.append(self.params.get(
                'IpPermissions.1.IpRanges.1.CidrIp'))
        print self.tabify(perm_str)
class CreateAccount(IAMRequest, TabifyingMixin):
    DESCRIPTION = '[Eucalyptus cloud admin only] Create a new account'
    ARGS = [
        arg_account_name(nargs='?',
                         help='''also add an alias (name) to the
                             new account (required on eucalyptus < 4.2)'''),
        Arg('-k',
            '--create-accesskey',
            action='store_true',
            route_to=None,
            help='''also create an access key for the new account's
                administrator and show it'''),
        Arg('-w',
            '--write-config',
            action='store_true',
            route_to=None,
            help='''output access keys and region information in the
                form of a euca2ools.ini(5) configuration file instead of
                by themselves (implies -k)'''),
        Arg('-d',
            '--domain',
            route_to=None,
            help='''the DNS domain to
                use for region information in configuration file output
                (default: based on IAM URL)'''),
        Arg('-l',
            '--set-default-user',
            action='store_true',
            route_to=None,
            help='''set this user as the default user for the region
                in euca2ools.ini(5) configuration file output.  This
                option is only useful when used with -w.''')
    ]

    def configure(self):
        try:
            IAMRequest.configure(self)
        except requestbuilder.exceptions.AuthError as err:
            if (os.path.exists(CLC_CRED_CHECK_FILE) and len(err.args) > 0
                    and isinstance(err.args[0], six.string_types)):
                msg = ("{0}.  If a cloud controller is running, you "
                       "can assume administrator credentials with "
                       "eval `clcadmin-assume-system-credentials`")
                err.args = (msg.format(err.args[0]), ) + err.args[1:]
            raise

    def postprocess(self, result):
        if self.args.get('create_accesskey') or self.args.get('write_config'):
            obj = CreateAccessKey.from_other(
                self,
                UserName='******',
                DelegateAccount=result['Account']['AccountId'],
                write_config=self.args.get('write_config'),
                domain=self.args.get('domain'),
                set_default_user=self.args.get('set_default_user'))
            key_result = obj.main()
            result.update(key_result)

    def print_result(self, result):
        if self.args.get('write_config'):
            result['configfile'].write(sys.stdout)
        else:
            print self.tabify((result.get('Account', {}).get('AccountName'),
                               result.get('Account', {}).get('AccountId')))
            if 'AccessKey' in result:
                print result['AccessKey']['AccessKeyId']
                print result['AccessKey']['SecretAccessKey']
Exemple #27
0
class DeleteUser(IAMRequest):
    DESCRIPTION = 'Delete a user'
    ARGS = [
        arg_user(help='name of the user to delete (required)'),
        Arg('-r',
            '--recursive',
            action='store_true',
            route_to=None,
            help='''remove all IAM resources associated with the user
                        first'''),
        Arg('-R',
            '--recursive-euca',
            dest='IsRecursive',
            action='store_const',
            const='true',
            help=argparse.SUPPRESS),
        Arg('-p',
            '--pretend',
            action='store_true',
            route_to=None,
            help='''list the resources that would be deleted instead of
                        actually deleting them. Implies -r.'''), AS_ACCOUNT
    ]

    def main(self):
        if self.args['recursive'] or self.args['pretend']:
            # Figure out what we'd have to delete
            req = ListAccessKeys.from_other(
                self,
                UserName=self.args['UserName'],
                DelegateAccount=self.params['DelegateAccount'])
            keys = req.main().get('AccessKeyMetadata', [])
            req = ListUserPolicies.from_other(
                self,
                UserName=self.args['UserName'],
                DelegateAccount=self.params['DelegateAccount'])
            policies = req.main().get('PolicyNames', [])
            req = ListSigningCertificates.from_other(
                self,
                UserName=self.args['UserName'],
                DelegateAccount=self.params['DelegateAccount'])
            certs = req.main().get('Certificates', [])
            req = ListGroupsForUser.from_other(
                self,
                UserName=self.args['UserName'],
                DelegateAccount=self.params['DelegateAccount'])
            groups = req.main().get('Groups', [])
            req = GetLoginProfile.from_other(
                self,
                UserName=self.args['UserName'],
                DelegateAccount=self.params['DelegateAccount'])
            try:
                # This will raise an exception if no login profile is found.
                req.main()
                has_login_profile = True
            except AWSError as err:
                if err.code == 'NoSuchEntity':
                    # It doesn't exist
                    has_login_profile = False
                else:
                    # Something else went wrong; not our problem
                    raise
        else:
            # Just in case
            keys = []
            policies = []
            certs = []
            groups = []
            has_login_profile = False
        if self.args['pretend']:
            return {
                'keys': keys,
                'policies': policies,
                'certificates': certs,
                'groups': groups,
                'has_login_profile': has_login_profile
            }
        else:
            if self.args['recursive']:
                for key in keys:
                    req = DeleteAccessKey.from_other(
                        self,
                        UserName=self.args['UserName'],
                        AccessKeyId=key['AccessKeyId'],
                        DelegateAccount=self.params['DelegateAccount'])
                    req.main()
                for policy in policies:
                    req = DeleteUserPolicy.from_other(
                        self,
                        UserName=self.args['UserName'],
                        PolicyName=policy,
                        DelegateAccount=self.params['DelegateAccount'])
                    req.main()
                for cert in certs:
                    req = DeleteSigningCertificate.from_other(
                        self,
                        UserName=self.args['UserName'],
                        CertificateId=cert['CertificateId'],
                        DelegateAccount=self.params['DelegateAccount'])
                    req.main()
                for group in groups:
                    req = RemoveUserFromGroup.from_other(
                        self,
                        user_names=[self.args['UserName']],
                        GroupName=group['GroupName'],
                        DelegateAccount=self.params['DelegateAccount'])
                    req.main()
                if has_login_profile:
                    req = DeleteLoginProfile.from_other(
                        self,
                        UserName=self.args['UserName'],
                        DelegateAccount=self.params['DelegateAccount'])
                    req.main()
            return self.send()

    def print_result(self, result):
        if self.args['pretend']:
            print 'accesskeys'
            for key in result['keys']:
                print '\t' + key['AccessKeyId']
            print 'policies'
            for policy in result['policies']:
                print '\t' + policy
            print 'certificates'
            for cert in result['certificates']:
                print '\t' + cert['CertificateId']
            print 'groups'
            for group in result['groups']:
                print '\t' + group['Arn']
class DescribeInstances(EC2Request):
    DESCRIPTION = 'Show information about instances'
    ARGS = [
        Arg('InstanceId',
            metavar='INSTANCE',
            nargs='*',
            help='limit results to specific instances')
    ]
    FILTERS = [
        Filter('architecture', help='CPU architecture'),
        Filter('association.allocation-id',
               help='''[VPC only] allocation ID bound to a network
                      interface's elastic IP address'''),
        Filter('association.association-id',
               help='''[VPC only]
                      association ID returned when an elastic IP was associated
                      with a network interface'''),
        Filter('association.ip-owner-id',
               help='''[VPC only] ID of the owner of the elastic IP
                      address associated with a network interface'''),
        Filter('association.public-ip',
               help='''[VPC only] address of
                      the elastic IP address bound to a network interface'''),
        Filter('availability-zone'),
        Filter('block-device-mapping.attach-time',
               help='volume attachment time'),
        Filter('block-device-mapping.delete-on-termination',
               type=bool,
               help='''whether a volume is deleted upon instance
                      termination'''),
        Filter('block-device-mapping.device-name',
               help='volume device name (e.g. /dev/sdf)'),
        Filter('block-device-mapping.status', help='volume status'),
        Filter('block-device-mapping.volume-id', help='volume ID'),
        Filter('client-token',
               help='idempotency token provided at instance run time'),
        Filter('dns-name', help='public DNS name'),
        # EC2's documentation for "group-id" refers VPC users to
        # "instance.group-id", while their documentation for the latter
        # refers them to the former.  Consequently, I'm not going to
        # document a difference for either.  They both seem to work for
        # non-VPC instances.
        Filter('group-id', help='security group ID'),
        Filter('group-name', help='security group name'),
        Filter('hypervisor', help='hypervisor type'),
        Filter('image-id', help='machine image ID'),
        Filter('instance.group-id', help='security group ID'),
        Filter('instance.group-name', help='security group name'),
        Filter('instance-id'),
        Filter('instance-lifecycle', help='whether this is a spot instance'),
        Filter('instance-state-code',
               type=int,
               help='numeric code identifying instance state'),
        Filter('instance-state-name', help='instance state'),
        Filter('instance-type'),
        Filter('ip-address', help='public IP address'),
        Filter('kernel-id', help='kernel image ID'),
        Filter('key-name',
               help='key pair name provided at instance launch time'),
        Filter('launch-index', help='launch index within a reservation'),
        Filter('launch-time', help='instance launch time'),
        Filter('monitoring-state',
               help='monitoring state ("enabled" or "disabled")'),
        Filter('network-interface.addresses.association.ip-owner-id',
               help='''[VPC only] ID of the owner of the private IP
                      address associated with a network interface'''),
        Filter('network-interface.addresses.association.public-ip',
               help='''[VPC only] ID of the association of an elastic IP
                      address with a network interface'''),
        Filter('network-interface.addresses.primary',
               help='''[VPC only] whether the IP address of the VPC
                      network interface is the primary private IP address
                      ("true" or "false")'''),
        Filter('network-interface.addresses.private-ip-address',
               help='''[VPC only] network interface's private IP
                      address'''),
        Filter('network-interface.attachment.device-index',
               type=int,
               help='''[VPC only] device index to which a network
                      interface is attached'''),
        Filter('network-interface.attachment.attach-time',
               help='''[VPC only] time a network interface was attached
                      to an instance'''),
        Filter('network-interface.attachment.attachment-id',
               help='''[VPC only] ID of a network interface's
                      attachment'''),
        Filter('network-interface.attachment.delete-on-termination',
               help='''[VPC only] whether a network interface attachment
                      is deleted when an instance is terminated ("true" or
                      "false")'''),
        Filter('network-interface.attachment.instance-owner-id',
               help='''[VPC only] ID of the instance to which a network
                      interface is attached'''),
        Filter('network-interface.attachment.status',
               help="[VPC only] network interface's attachment status"),
        Filter('network-interface.availability-zone',
               help="[VPC only] network interface's availability zone"),
        Filter('network-interface.description',
               help='[VPC only] description of a network interface'),
        Filter('network-interface.group-id',
               help="[VPC only] network interface's security group ID"),
        Filter('network-interface.group-name',
               help='''[VPC only]
                      network interface's security group name'''),
        Filter('network-interface.mac-address',
               help="[VPC only] network interface's hardware address"),
        Filter('network-interface.network-interface.id',
               help='[VPC only] ID of a network interface'),
        Filter('network-interface.owner-id',
               help="[VPC only] ID of a network interface's owner"),
        Filter('network-interface.private-dns-name',
               help="[VPC only] network interface's private DNS name"),
        Filter('network-interface.requester-id',
               help="[VPC only] network interface's requester ID"),
        Filter('network-interface.requester-managed',
               help='''[VPC only] whether the network interface is
                      managed by the service'''),
        Filter('network-interface.source-destination-check',
               help='''[VPC only] whether source/destination checking is
                      enabled for a network interface ("true" or "false")'''),
        Filter('network-interface.status',
               help="[VPC only] network interface's status"),
        Filter('network-interface.subnet-id',
               help="[VPC only] ID of a network interface's subnet"),
        Filter('network-interface.vpc-id',
               help="[VPC only] ID of a network interface's VPC"),
        Filter('owner-id', help="instance owner's account ID"),
        Filter('placement-group-name'),
        Filter('platform', help='"windows" for Windows instances'),
        Filter('private-dns-name'),
        Filter('private-ip-address'),
        Filter('product-code'),
        Filter('product-code.type',
               help='type of product code ("devpay" or "marketplace")'),
        Filter('ramdisk-id', help='ramdisk image ID'),
        Filter('reason', help="reason for the instance's current state"),
        Filter('requestor-id',
               help='ID of the entity that launched an instance'),
        Filter('reservation-id'),
        Filter('root-device-name', help='root device name (e.g. /dev/sda1)'),
        Filter('root-device-type',
               help='root device type ("ebs" or "instance-store")'),
        Filter('spot-instance-request-id'),
        Filter('state-reason-code',
               help='reason code for the most recent state change'),
        Filter('state-reason-message',
               help='message describing the most recent state change'),
        Filter('subnet-id',
               help='[VPC only] ID of the subnet the instance is in'),
        Filter('tag-key', help='name of any tag assigned to the instance'),
        Filter('tag-value', help='value of any tag assigned to the instance'),
        GenericTagFilter('tag:KEY', help='specific tag key/value combination'),
        Filter('virtualization-type'),
        Filter('vpc-id', help='[VPC only] ID of the VPC the instance is in')
    ]
    LIST_TAGS = [
        'reservationSet', 'instancesSet', 'groupSet', 'tagSet',
        'blockDeviceMapping', 'productCodes', 'networkInterfaceSet',
        'privateIpAddressesSet'
    ]

    def print_result(self, result):
        for reservation in result.get('reservationSet'):
            self.print_reservation(reservation)
class CreateAutoScalingGroup(AutoScalingRequest):
    DESCRIPTION = 'Create a new auto-scaling group'
    ARGS = [
        Arg('AutoScalingGroupName',
            metavar='ASGROUP',
            help='name of the new auto-scaling group (required)'),
        Arg('-l',
            '--launch-configuration',
            dest='LaunchConfigurationName',
            metavar='LAUNCHCONFIG',
            required=True,
            help='''name of the
                launch configuration to use with the new group (required)'''),
        Arg('-M',
            '--max-size',
            dest='MaxSize',
            metavar='COUNT',
            type=int,
            required=True,
            help='maximum group size (required)'),
        Arg('-m',
            '--min-size',
            dest='MinSize',
            metavar='COUNT',
            type=int,
            required=True,
            help='minimum group size (required)'),
        Arg('--default-cooldown',
            dest='DefaultCooldown',
            metavar='SECONDS',
            type=int,
            help='''amount of time, in seconds, after a scaling activity
                        completes before any further trigger-related scaling
                        activities may start'''),
        Arg('--desired-capacity',
            dest='DesiredCapacity',
            metavar='COUNT',
            type=int,
            help='number of running instances the group should contain'),
        Arg('--grace-period',
            dest='HealthCheckGracePeriod',
            metavar='SECONDS',
            type=int,
            help='''number of seconds to wait
                before starting health checks on newly-created instances'''),
        Arg('--health-check-type',
            dest='HealthCheckType',
            choices=('EC2', 'ELB'),
            help='service to obtain health check status from'),
        Arg('--load-balancers',
            dest='LoadBalancerNames.member',
            metavar='ELB1,ELB2,...',
            type=delimited_list(','),
            help='comma-separated list of load balancers to use'),
        Arg('--placement-group',
            dest='PlacementGroup',
            help='placement group in which to launch new instances'),
        TagArg(required=False),
        Arg('--termination-policies',
            dest='TerminationPolicies.member',
            metavar='POLICY1,POLICY2,...',
            type=delimited_list(','),
            help='''ordered list of termination policies.  The first has
                the highest precedence.'''),
        Arg('--vpc-zone-identifier',
            dest='VPCZoneIdentifier',
            metavar='ZONE1,ZONE2,...',
            help='''comma-separated list of subnet identifiers.  If you
                specify availability zones as well, ensure the subnets'
                availability zones match the ones you specified'''),
        Arg('-z',
            '--availability-zones',
            dest='AvailabilityZones.member',
            metavar='ZONE1,ZONE2,...',
            type=delimited_list(','),
            help='''comma-separated list of availability zones for the new
                group (required unless subnets are supplied)''')
    ]
Exemple #30
0
class RegisterImage(EC2Request):
    DESCRIPTION = 'Register a new image'
    ARGS = [
        Arg('ImageLocation',
            metavar='MANIFEST',
            nargs='?',
            help='''location of the image manifest in S3 storage
                (required for instance-store images)'''),
        Arg('-n',
            '--name',
            dest='Name',
            required=True,
            help='name of the new image (required)'),
        Arg('-d',
            '--description',
            dest='Description',
            help='description of the new image'),
        Arg('-a',
            '--architecture',
            dest='Architecture',
            choices=('i386', 'x86_64', 'armhf'),
            help='CPU architecture of the new image'),
        Arg('--kernel',
            dest='KernelId',
            metavar='KERNEL',
            help='ID of the kernel to associate with the new image'),
        Arg('--ramdisk',
            dest='RamdiskId',
            metavar='RAMDISK',
            help='ID of the ramdisk to associate with the new image'),
        Arg('--root-device-name',
            dest='RootDeviceName',
            metavar='DEVICE',
            help='root device name (default: /dev/sda1)'),
        # ^ default is added by main()
        Arg('-s',
            '--snapshot',
            route_to=None,
            help='snapshot to use for the root device'),
        Arg('-b',
            '--block-device-mapping',
            metavar='DEVICE=MAPPED',
            dest='BlockDeviceMapping',
            action='append',
            type=ec2_block_device_mapping,
            default=[],
            help='''define a block device mapping for the image, in the
                form DEVICE=MAPPED, where "MAPPED" is "none", "ephemeral(0-3)",
                or
                "[SNAP-ID]:[GiB]:[true|false]:[standard|VOLTYPE[:IOPS]]"'''),
        Arg('--virtualization-type',
            dest='VirtualizationType',
            choices=('paravirtual', 'hvm'),
            help='virtualization type for the new image'),
        Arg('--platform',
            dest='Platform',
            metavar='windows',
            choices=('windows', ),
            help="[Privileged] the new image's platform (windows)")
    ]

    # noinspection PyExceptionInherit
    def preprocess(self):
        if self.args.get('ImageLocation'):
            # instance-store image
            if self.args.get('RootDeviceName'):
                raise ArgumentError('argument --root-device-name: not allowed '
                                    'with argument MANIFEST')
            if self.args.get('snapshot'):
                raise ArgumentError('argument --snapshot: not allowed with '
                                    'argument MANIFEST')
        else:
            # Try for an EBS image
            if not self.params.get('RootDeviceName'):
                self.params['RootDeviceName'] = '/dev/sda1'
            snapshot = self.args.get('snapshot')
            # Look for a mapping for the root device
            for mapping in self.args['BlockDeviceMapping']:
                if mapping.get('DeviceName') == self.params['RootDeviceName']:
                    if (snapshot != mapping.get('Ebs', {}).get('SnapshotId')
                            and snapshot):
                        # The mapping's snapshot differs or doesn't exist
                        raise ArgumentError(
                            'snapshot ID supplied with --snapshot conflicts '
                            'with block device mapping for root device {0}'.
                            format(mapping['DeviceName']))
                    else:
                        # No need to apply --snapshot since the mapping is
                        # already there
                        break
            else:
                if snapshot:
                    self.params['BlockDeviceMapping'].append({
                        'DeviceName':
                        self.params['RootDeviceName'],
                        'Ebs': {
                            'SnapshotId': snapshot
                        }
                    })
                else:
                    raise ArgumentError(
                        'either a manifest location or a root device snapshot '
                        'mapping must be specified')

    def print_result(self, result):
        print self.tabify(('IMAGE', result.get('imageId')))
Exemple #31
0
class BundleVolume(BaseCommand, FileTransferProgressBarMixin):
    SUITE = Euca2ools
    DESCRIPTION = ("Prepare this machine's filesystem for use in the cloud\n\n"
                   "This command must be run as the superuser.")
    REGION_ENVVAR = 'AWS_DEFAULT_REGION'
    ARGS = [
        Arg('-p',
            '--prefix',
            help='''the file name prefix to give the
                bundle's files (default: image)'''),
        Arg('-d',
            '--destination',
            metavar='DIR',
            help='''location to place
                the bundle's files (default:  dir named by TMPDIR, TEMP, or TMP
                environment variables, or otherwise /var/tmp)'''),
        # -r/--arch is required, but to keep the UID check we do at the
        # beginning of configure() first we enforce that there instead.
        Arg('-r',
            '--arch',
            help="the image's architecture (required)",
            choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64')),
        Arg('-e',
            '--exclude',
            metavar='PATH,...',
            type=delimited_list(','),
            help='comma-separated list of paths to exclude'),
        Arg('-i',
            '--include',
            metavar='PATH,...',
            type=delimited_list(','),
            help='comma-separated list of paths to include'),
        Arg('-s',
            '--size',
            metavar='MiB',
            type=int,
            default=10240,
            help='size of the image to create (default: 10240 MiB)'),
        Arg('--no-filter',
            action='store_true',
            help='do not filter out sensitive/system files'),
        Arg('--all',
            action='store_true',
            help='''include all filesystems regardless of type
                (default: only include local filesystems)'''),
        MutuallyExclusiveArgList(
            Arg('--inherit',
                dest='inherit',
                action='store_true',
                help='''use the metadata service to provide metadata for
                    the bundle (this is the default)'''),
            Arg('--no-inherit',
                dest='inherit',
                action='store_false',
                help='''do not use the metadata service for bundle
                    metadata''')),
        Arg('-v',
            '--volume',
            metavar='DIR',
            default='/',
            help='''location
                of the volume from which to create the bundle (default: /)'''),
        Arg('-P',
            '--partition',
            choices=('mbr', 'gpt', 'none'),
            help='''the type of partition table to create (default: attempt
                to guess based on the existing disk)'''),
        Arg('-S',
            '--script',
            metavar='FILE',
            help='''location of a script
                to run immediately before bundling.  It will receive the
                volume's mount point as its only argument.'''),
        MutuallyExclusiveArgList(
            Arg('--fstab',
                metavar='FILE',
                help='''location of an
                    fstab(5) file to copy into the bundled image'''),
            Arg('--generate-fstab',
                action='store_true',
                help='''automatically generate an fstab(5) file for
                    the bundled image''')),
        Arg('--grub-config',
            metavar='FILE',
            help='''location of a GRUB 1
                configuration file to copy to /boot/grub/menu.lst on the
                bundled image'''),

        # Bundle-related stuff
        Arg('-k',
            '--privatekey',
            metavar='FILE',
            help='''file containing
                your private key to sign the bundle's manifest with.  This
                private key will also be required to unbundle the image in the
                future.'''),
        Arg('-c',
            '--cert',
            metavar='FILE',
            help='file containing your X.509 certificate'),
        Arg('--ec2cert',
            metavar='FILE',
            help='''file containing the
                cloud's X.509 certificate'''),
        Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'),
        Arg('--kernel',
            metavar='IMAGE',
            help='''ID of the kernel image to
                associate with this machine image'''),
        Arg('--ramdisk',
            metavar='IMAGE',
            help='''ID of the ramdisk image
                to associate with this machine image'''),
        Arg('-B',
            '--block-device-mappings',
            metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...',
            type=manifest_block_device_mappings,
            help='''block device mapping scheme with which to launch
                instances of this machine image'''),
        Arg('--productcodes',
            metavar='CODE1,CODE2,...',
            type=delimited_list(','),
            default=[],
            help='comma-separated list of product codes for the image'),
        Arg('--part-size',
            type=filesize,
            default=10485760,
            help=argparse.SUPPRESS),
        Arg('--enc-key', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS),
        Arg('--enc-iv', type=(lambda s: int(s, 16)), help=argparse.SUPPRESS)
    ]

    def configure(self):
        if os.geteuid() != 0:
            raise RuntimeError('must be superuser')

        if not self.args.get('arch'):
            raise ArgumentError('argument -r/--arch is required')

        # Farm all the bundle arg validation out to BundleImage
        self.__build_bundle_command('/dev/null', image_size=1)

        root_device = _get_root_device()
        if self.args.get('inherit'):
            self.__populate_args_from_metadata()
        if not self.args.get('partition'):
            self.args['partition'] = _get_partition_table_type(root_device)
            if not self.args['partition']:
                self.log.warn(
                    'could not determine the partition table type '
                    'for root device %s', root_device)
                raise ArgumentError(
                    'could not determine the type of partition table to use; '
                    'specify one with -P/--partition'.format(root_device))
            self.log.info('discovered partition table type %s',
                          self.args['partition'])
        if not self.args.get('fstab') and not self.args.get('generate_fstab'):
            self.args['fstab'] = '/etc/fstab'

    def main(self):
        if self.args.get('destination'):
            destdir = self.args['destination']
        else:
            destdir = euca2ools.util.mkdtemp_for_large_files(prefix='bundle-')
        image = os.path.join(destdir, self.args.get('prefix') or 'image')
        mountpoint = tempfile.mkdtemp(prefix='target-', dir=destdir)

        # Prepare the disk image
        device = self.__create_disk_image(image, self.args['size'])
        try:
            self.__create_and_mount_filesystem(device, mountpoint)
            try:
                # Copy files
                exclude_opts = self.__get_exclude_and_include_args()
                exclude_opts.extend(
                    ['--exclude', image, '--exclude', mountpoint])
                self.__copy_to_target_dir(mountpoint, exclude_opts)
                self.__insert_fstab(mountpoint)
                self.__insert_grub_config(mountpoint)
                if self.args.get('script'):
                    cmd = [self.args['script'], mountpoint]
                    self.log.info("running user script ``%s''",
                                  _quote_cmd(cmd))
                    subprocess.check_call(cmd)

            except KeyboardInterrupt:
                self.log.info('received ^C; skipping to cleanup')
                msg = ('Cleaning up after ^C -- pressing ^C again will '
                       'result in the need for manual device cleanup')
                print >> sys.stderr, msg
                raise
            # Cleanup
            finally:
                time.sleep(0.2)
                self.__unmount_filesystem(device)
                os.rmdir(mountpoint)
        finally:
            self.__detach_disk_image(image, device)

        bundle_cmd = self.__build_bundle_command(image)
        result = bundle_cmd.main()
        os.remove(image)
        return result

    def print_result(self, result):
        for manifest_filename in result[1]:
            print 'Wrote manifest', manifest_filename

    def __build_bundle_command(self, image_filename, image_size=None):
        bundle_args = ('prefix', 'destination', 'arch', 'privatekey', 'cert',
                       'ec2cert', 'user', 'kernel', 'ramdisk',
                       'block_device_mappings', 'productcodes', 'part_size',
                       'enc_key', 'enc_iv', 'show_progress')
        bundle_args_dict = dict(
            (key, self.args.get(key)) for key in bundle_args)
        return BundleImage.from_other(self,
                                      image=image_filename,
                                      image_size=image_size,
                                      image_type='machine',
                                      **bundle_args_dict)

    # INSTANCE METADATA #

    def __read_metadata_value(self, path):
        self.log.debug("reading metadata service value '%s'", path)
        url = 'http://169.254.169.254/2012-01-12/meta-data/' + path
        response = requests.get(url, timeout=1)
        if response.status_code == 200:
            return response.text
        return None

    def __read_metadata_list(self, path):
        value = self.__read_metadata_value(path)
        if value:
            return [line.rstrip('/') for line in value.splitlines() if line]
        return []

    def __read_metadata_dict(self, path):
        metadata = {}
        if not path.endswith('/'):
            path += '/'
        keys = self.__read_metadata_list(path)
        for key in keys:
            if key:
                metadata[key] = self.__read_metadata_value(path + key)
        return metadata

    def __populate_args_from_metadata(self):
        """
        Populate missing/empty values in self.args using info obtained
        from the metadata service.
        """
        try:
            if not self.args.get('kernel'):
                self.args['kernel'] = self.__read_metadata_value('kernel-id')
                self.log.info('inherited kernel: %s', self.args['kernel'])
            if not self.args.get('ramdisk'):
                self.args['ramdisk'] = self.__read_metadata_value('ramdisk-id')
                self.log.info('inherited ramdisk: %s', self.args['ramdisk'])
            if not self.args.get('productcodes'):
                self.args['productcodes'] = self.__read_metadata_list(
                    'product-codes')
                if self.args['productcodes']:
                    self.log.info('inherited product codes: %s',
                                  ','.join(self.args['productcodes']))
            if not self.args.get('block_device_mappings'):
                self.args['block_device_mappings'] = {}
                for key, val in (
                        self.__read_metadata_dict('block-device-mapping')
                        or {}).iteritems():
                    if not key.startswith('ebs'):
                        self.args['block_device_mappings'][key] = val
                for key, val in self.args['block_device_mappings'].iteritems():
                    self.log.info('inherited block device mapping: %s=%s', key,
                                  val)
        except requests.exceptions.Timeout:
            raise ClientError('metadata service is absent or unresponsive; '
                              'use --no-inherit to proceed without it')

    # DISK MANAGEMENT #

    def __create_disk_image(self, image, size_in_mb):
        subprocess.check_call([
            'dd', 'if=/dev/zero', 'of={0}'.format(image), 'bs=1M', 'count=1',
            'seek={0}'.format(int(size_in_mb) - 1)
        ])
        if self.args['partition'] == 'mbr':
            # Why use sfdisk when we can use parted?  :-)
            parted_script = (b'unit s', b'mklabel msdos',
                             b'mkpart primary 64 -1s', b'set 1 boot on',
                             b'print', b'quit')
            subprocess.check_call(
                ['parted', '-s', image, '--', ' '.join(parted_script)])
        elif self.args['partition'] == 'gpt':
            # type 0xef02 == BIOS boot (we'll put it at the end of the list)
            subprocess.check_call([
                'sgdisk', '--new', '128:1M:+1M', '--typecode', '128:ef02',
                '--change-name', '128:BIOS Boot', image
            ])
            # type 0x8300 == Linux filesystem data
            subprocess.check_call([
                'sgdisk', '--largest-new=1', '--typecode', '1:8300',
                '--change-name', '1:Image', image
            ])
            subprocess.check_call(['sgdisk', '--print', image])

        mapped = self.__map_disk_image(image)
        assert os.path.exists(mapped)
        return mapped

    def __map_disk_image(self, image):
        if self.args['partition'] in ('mbr', 'gpt'):
            # Create /dev/mapper/loopXpY and return that.
            # We could do this with losetup -Pf as well, but that isn't
            # available on RHEL 6.
            self.log.debug('mapping partitioned image %s', image)
            kpartx = subprocess.Popen(['kpartx', '-s', '-v', '-a', image],
                                      stdout=subprocess.PIPE)
            try:
                for line in kpartx.stdout.readlines():
                    line_split = line.split()
                    if line_split[:2] == ['add', 'map']:
                        device = line_split[2]
                        if device.endswith('p1'):
                            return '/dev/mapper/{0}'.format(device)
                self.log.error('failed to get usable map output from kpartx')
                raise RuntimeError('device mapping failed')
            finally:
                # Make sure the process exits
                kpartx.communicate()
        else:
            # No partition table
            self.log.debug('mapping unpartitioned image %s', image)
            losetup = subprocess.Popen(['losetup', '-f', image, '--show'],
                                       stdout=subprocess.PIPE)
            loopdev, _ = losetup.communicate()
            return loopdev.strip()

    def __create_and_mount_filesystem(self, device, mountpoint):
        root_device = _get_root_device()
        fsinfo = _get_filesystem_info(root_device)
        self.log.info('creating filesystem on %s using metadata from %s: %s',
                      device, root_device, fsinfo)
        fs_cmds = [['mkfs', '-t', fsinfo['type']]]
        if fsinfo.get('label'):
            fs_cmds[0].extend(['-L', fsinfo['label']])
        elif fsinfo['type'] in ('ext2', 'ext3', 'ext4'):
            if fsinfo.get('uuid'):
                fs_cmds[0].extend(['-U', fsinfo['uuid']])
            # Time-based checking doesn't make much sense for cloud images
            fs_cmds.append(['tune2fs', '-i', '0'])
        elif fsinfo['type'] == 'jfs':
            if fsinfo.get('uuid'):
                fs_cmds.append(['jfs_tune', '-U', fsinfo['uuid']])
        elif fsinfo['type'] == 'xfs':
            if fsinfo.get('uuid'):
                fs_cmds.append(['xfs_admin', '-U', fsinfo['uuid']])
        for fs_cmd in fs_cmds:
            fs_cmd.append(device)
            self.log.info("formatting with ``%s''", _quote_cmd(fs_cmd))
            subprocess.check_call(fs_cmd)
        self.log.info('mounting %s filesystem %s at %s', fsinfo['type'],
                      device, mountpoint)
        subprocess.check_call(
            ['mount', '-t', fsinfo['type'], device, mountpoint])

    def __unmount_filesystem(self, device):
        self.log.info('unmounting %s', device)
        subprocess.check_call(['sync'])
        time.sleep(0.2)
        subprocess.check_call(['umount', device])

    def __detach_disk_image(self, image, device):
        if self.args['partition'] in ('mbr', 'gpt'):
            self.log.debug('unmapping partitioned image %s', image)
            cmd = ['kpartx', '-s', '-d', image]
        else:
            self.log.debug('unmapping unpartitioned device %s', device)
            cmd = ['losetup', '-d', device]
        subprocess.check_call(cmd)

    # FILE MANAGEMENT #

    def __get_exclude_and_include_args(self):
        args = []
        for exclude in self.args.get('exclude') or []:
            args.extend(['--exclude', exclude])
        for include in self.args.get('include') or []:
            args.extend(['--include', include])
        # Exclude remote filesystems
        if not self.args.get('all'):
            for device, mountpoint, fstype in _get_all_mounts():
                if fstype not in ALLOWED_FILESYSTEM_TYPES:
                    self.log.debug('excluding %s filesystem %s at %s', fstype,
                                   device, mountpoint)
                    args.extend(['--exclude', os.path.join(mountpoint, '**')])
        # Add pre-defined exclusions
        if not self.args.get('no_filter') and os.path.isfile(EXCLUDES_FILE):
            self.log.debug('adding path exclusions from %s', EXCLUDES_FILE)
            args.extend(['--exclude-from', EXCLUDES_FILE])
        return args

    def __copy_to_target_dir(self, dest, exclude_opts):
        source = self.args.get('volume') or '/'
        if not source.endswith('/'):
            source += '/'
        if not dest.endswith('/'):
            dest += '/'

        rsync_opts = ['-rHlpogDtS']
        if self.args.get('show_progress'):
            rsync = subprocess.Popen(['rsync', '--version'],
                                     stdout=subprocess.PIPE)
            out, _ = rsync.communicate()
            rsync_version = (out.partition('version ')[2] or '\0').split()[0]
            if rsync_version >= '3.1.0':
                # Use the new summarizing version
                rsync_opts.append('--info=progress2')
            else:
                rsync_opts.append('--progress')
        else:
            rsync_opts.append('--quiet')
        cmd = ['rsync', '-X'] + rsync_opts + exclude_opts + [source, dest]
        self.log.info("copying files with ``%s''", _quote_cmd(cmd))
        print 'Copying files...'
        rsync = subprocess.Popen(cmd)
        rsync.wait()
        if rsync.returncode == 1:
            # Try again without xattrs
            self.log.info('rsync exited with code %i; retrying without xattrs',
                          rsync.returncode)
            print 'Retrying without extended attributes'
            cmd = ['rsync'] + rsync_opts + exclude_opts + [source, dest]
            rsync = subprocess.Popen(cmd)
            rsync.wait()
        if rsync.returncode not in (0, 23):
            self.log.error('rsync exited with code %i', rsync.returncode)
            raise subprocess.CalledProcessError(rsync.returncode, 'rsync')

    def __insert_fstab(self, mountpoint):
        fstab_filename = os.path.join(mountpoint, 'etc', 'fstab')
        if os.path.exists(fstab_filename):
            fstab_bak = fstab_filename + '.bak'
            self.log.debug('backing up original fstab file as %s', fstab_bak)
            _copy_with_xattrs(fstab_filename, fstab_bak)
        if self.args.get('generate_fstab'):
            # This isn't really a template, but if the need arises we
            # can add something of that sort later.
            self.log.info('generating fstab file from %s', self.args['fstab'])
            _copy_with_xattrs(FSTAB_TEMPLATE_FILE, fstab_filename)
        elif self.args.get('fstab'):
            self.log.info('using fstab file %s', self.args['fstab'])
            _copy_with_xattrs(self.args['fstab'], fstab_filename)

    def __insert_grub_config(self, mountpoint):
        if self.args.get('grub_config'):
            grub_filename = os.path.join(mountpoint, 'boot', 'grub',
                                         'menu.lst')
            if os.path.exists(grub_filename):
                grub_back = grub_filename + '.bak'
                self.log.debug('backing up original grub1 config file as %s',
                               grub_back)
                _copy_with_xattrs(grub_filename, grub_back)
            self.log.info('using grub1 config file %s',
                          self.args['grub_config'])
            _copy_with_xattrs(self.args['grub_config'], grub_filename)