Beispiel #1
0
class ConfigureGetCommand(BasicCommand):
    NAME = 'get'
    DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
                                         '_description.rst')
    SYNOPSIS = ('aws configure get varname [--profile profile-name]')
    EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
    ARG_TABLE = [
        {
            'name': 'varname',
            'help_text': 'The name of the config value to retrieve.',
            'action': 'store',
            'cli_type_name': 'string',
            'positional_arg': True
        },
    ]

    def __init__(self, session, stream=sys.stdout):
        super(ConfigureGetCommand, self).__init__(session)
        self._stream = stream

    def _run_main(self, args, parsed_globals):
        varname = args.varname
        value = None
        if '.' not in varname:
            # get_scoped_config() returns the config variables in the config
            # file (not the logical_var names), which is what we want.
            config = self._session.get_scoped_config()
            value = config.get(varname)
        else:
            num_dots = varname.count('.')
            if num_dots == 1:
                full_config = self._session.full_config
                section, config_name = varname.split('.')
                value = full_config.get(section, {}).get(config_name)
                if value is None:
                    # Try to retrieve it from the profile config.
                    value = full_config['profiles'].get(section,
                                                        {}).get(config_name)
            elif num_dots == 2 and varname.startswith('profile'):
                # We're hard coding logic for profiles here.  Really
                # we could support any generic format of [section subsection],
                # but we'd need some botocore.session changes for that,
                # and nothing would immediately use that feature.
                dot_section, config_name = varname.rsplit('.', 1)
                start, profile_name = dot_section.split('.')
                self._session.profile = profile_name
                config = self._session.get_scoped_config()
                value = config.get(config_name)
        if value is not None:
            self._stream.write(value)
            self._stream.write('\n')
            return 0
        else:
            return 1
Beispiel #2
0
class ConfigureSetCommand(BasicCommand):
    NAME = 'set'
    DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
                                         '_description.rst')
    SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
    EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
    ARG_TABLE = [
        {
            'name': 'varname',
            'help_text': 'The name of the config value to set.',
            'action': 'store',
            'cli_type_name': 'string',
            'positional_arg': True
        },
        {
            'name': 'value',
            'help_text': 'The value to set.',
            'action': 'store',
            'cli_type_name': 'string',
            'positional_arg': True
        },
    ]

    def __init__(self, session, config_writer=None):
        super(ConfigureSetCommand, self).__init__(session)
        if config_writer is None:
            config_writer = ConfigFileWriter()
        self._config_writer = config_writer

    def _run_main(self, args, parsed_globals):
        varname = args.varname
        value = args.value
        section = 'default'
        if '.' not in varname:
            # unqualified name, scope it to the current
            # profile (or leave it as the 'default' section if
            # no profile is set).
            if self._session.profile is not None:
                section = 'profile %s' % self._session.profile
        else:
            # It's either section.config-name,
            # of profile.profile-name.config-name (we
            # don't support arbitrary.thing.config-name).
            num_dots = varname.count('.')
            if num_dots == 1:
                section, varname = varname.split('.')
            elif num_dots == 2 and varname.startswith('profile'):
                dotted_section, varname = varname.rsplit('.', 1)
                profile = dotted_section.split('.')[1]
                section = 'profile %s' % profile
        config_filename = os.path.expanduser(
            self._session.get_config_variable('config_file'))
        updated_config = {'__section__': section, varname: value}
        self._config_writer.update_config(updated_config, config_filename)
class AddSteps(BasicCommand):
    NAME = 'add-steps'
    DESCRIPTION = ('Add a list of steps to a cluster.')
    ARG_TABLE = [
        {'name': 'cluster-id', 'required': True,
         'help_text': helptext.CLUSTER_ID
         },
        {'name': 'steps',
         'required': True,
         'nargs': '+',
         'schema': argumentschema.STEPS_SCHEMA,
         'help_text': helptext.STEPS
         }
    ]
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'add-steps.rst')

    def _run_main(self, parsed_args, parsed_globals):
        parsed_steps = parsed_args.steps
        step_list = steputils.build_step_config_list(
            parsed_step_list=parsed_steps, region=parsed_globals.region)
        parameters = {
            'JobFlowId': parsed_args.cluster_id,
            'Steps': step_list
        }

        emrutils.call_and_display_response(self._session, 'AddJobFlowSteps',
                                           parameters, parsed_globals)
        return 0
Beispiel #4
0
class S3(BasicCommand):
    NAME = 's3'
    DESCRIPTION = BasicCommand.FROM_FILE('s3/_concepts.rst')
    SYNOPSIS = "aws s3 <Command> [<Arg> ...]"
    SUBCOMMANDS = [{
        'name': 'ls',
        'command_class': ListCommand
    }, {
        'name': 'website',
        'command_class': WebsiteCommand
    }, {
        'name': 'cp',
        'command_class': CpCommand
    }, {
        'name': 'mv',
        'command_class': MvCommand
    }, {
        'name': 'rm',
        'command_class': RmCommand
    }, {
        'name': 'sync',
        'command_class': SyncCommand
    }, {
        'name': 'mb',
        'command_class': MbCommand
    }, {
        'name': 'rb',
        'command_class': RbCommand
    }]

    def _run_main(self, parsed_args, parsed_globals):
        if parsed_args.subcommand is None:
            raise ValueError("usage: aws [options] <command> <subcommand> "
                             "[parameters]\naws: error: too few arguments")
Beispiel #5
0
class RbCommand(S3TransferCommand):
    NAME = 'rb'
    DESCRIPTION = "Deletes an S3 bucket."
    USAGE = "<S3Path>"
    ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True,
                  'synopsis': USAGE}, FORCE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/rb.rst')
Beispiel #6
0
class ECRLogin(BasicCommand):
    """Log in with docker login"""
    NAME = 'get-login'

    DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')

    ARG_TABLE = [
        {
            'name': 'registry-ids',
            'help_text': 'A list of AWS account IDs that correspond to the '
                         'Amazon ECR registries that you want to log in to.',
            'required': False,
            'nargs': '+'
        }
    ]

    def _run_main(self, parsed_args, parsed_globals):
        ecr_client = create_client_from_parsed_globals(
            self._session, 'ecr', parsed_globals)
        if not parsed_args.registry_ids:
            result = ecr_client.get_authorization_token()
        else:
            result = ecr_client.get_authorization_token(
                registryIds=parsed_args.registry_ids)
        for auth in result['authorizationData']:
            auth_token = b64decode(auth['authorizationToken']).decode()
            username, password = auth_token.split(':')
            sys.stdout.write('docker login -u %s -p %s -e none %s\n'
                             % (username, password, auth['proxyEndpoint']))
        return 0
Beispiel #7
0
class S3(BasicCommand):
    NAME = 's3'
    DESCRIPTION = BasicCommand.FROM_FILE('s3/_concepts.rst')
    SYNOPSIS = "aws s3 <Command> [<Arg> ...]"
    SUBCOMMANDS = [{
        'name': 'ls',
        'command_class': ListCommand
    }, {
        'name': 'website',
        'command_class': WebsiteCommand
    }, {
        'name': 'cp',
        'command_class': CpCommand
    }, {
        'name': 'mv',
        'command_class': MvCommand
    }, {
        'name': 'rm',
        'command_class': RmCommand
    }, {
        'name': 'sync',
        'command_class': SyncCommand
    }, {
        'name': 'mb',
        'command_class': MbCommand
    }, {
        'name': 'rb',
        'command_class': RbCommand
    }]
Beispiel #8
0
class RmCommand(S3TransferCommand):
    NAME = 'rm'
    DESCRIPTION = "Deletes an S3 object."
    USAGE = "<S3Path>"
    ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True,
                  'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, INCLUDE,
                 EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/rm.rst')
Beispiel #9
0
class SyncCommand(S3TransferCommand):
    NAME = 'sync'
    DESCRIPTION = "Syncs directories and S3 prefixes."
    USAGE = "<LocalPath> <S3Path> or <S3Path> " \
            "<LocalPath> or <S3Path> <S3Path>"
    ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
                  'synopsis': USAGE}] + TRANSFER_ARGS
    EXAMPLES = BasicCommand.FROM_FILE('s3/sync.rst')
Beispiel #10
0
class ECRLogin(BasicCommand):
    """Log in with 'docker login'"""
    NAME = 'get-login'

    DESCRIPTION = BasicCommand.FROM_FILE('ecr/get-login_description.rst')

    ARG_TABLE = [
        {
            'name': 'registry-ids',
            'help_text': 'A list of AWS account IDs that correspond to the '
                         'Amazon ECR registries that you want to log in to.',
            'required': False,
            'nargs': '+'
        },
        {
            'name': 'include-email',
            'action': 'store_true',
            'group_name': 'include-email',
            'dest': 'include_email',
            'default': True,
            'required': False,
            'help_text': (
                "Specify if the '-e' flag should be included in the "
                "'docker login' command.  The '-e' option has been deprecated "
                "and is removed in Docker version 17.06 and later.  You must "
                "specify --no-include-email if you're using Docker version "
                "17.06 or later.  The default behavior is to include the "
                "'-e' flag in the 'docker login' output."),
        },
        {
            'name': 'no-include-email',
            'help_text': 'Include email arg',
            'action': 'store_false',
            'default': True,
            'group_name': 'include-email',
            'dest': 'include_email',
            'required': False,
        },
    ]

    def _run_main(self, parsed_args, parsed_globals):
        ecr_client = create_client_from_parsed_globals(
            self._session, 'ecr', parsed_globals)
        if not parsed_args.registry_ids:
            result = ecr_client.get_authorization_token()
        else:
            result = ecr_client.get_authorization_token(
                registryIds=parsed_args.registry_ids)
        for auth in result['authorizationData']:
            auth_token = b64decode(auth['authorizationToken']).decode()
            username, password = auth_token.split(':')
            command = ['docker', 'login', '-u', username, '-p', password]
            if parsed_args.include_email:
                command.extend(['-e', 'none'])
            command.append(auth['proxyEndpoint'])
            sys.stdout.write(' '.join(command))
            sys.stdout.write('\n')
        return 0
Beispiel #11
0
class MvCommand(S3TransferCommand):
    NAME = 'mv'
    DESCRIPTION = "Moves a local file or S3 object to " \
                  "another location locally or in S3."
    USAGE = "<LocalPath> <S3Path> or <S3Path> <LocalPath> " \
            "or <S3Path> <S3Path>"
    ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
                  'synopsis': USAGE}] + TRANSFER_ARGS + [METADATA_DIRECTIVE,
                                                         RECURSIVE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/mv.rst')
Beispiel #12
0
class CpCommand(S3TransferCommand):
    NAME = 'cp'
    DESCRIPTION = "Copies a local file or S3 object to another location " \
                  "locally or in S3."
    USAGE = "<LocalPath> <S3Path> or <S3Path> <LocalPath> " \
            "or <S3Path> <S3Path>"
    ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True,
                  'synopsis': USAGE}] + TRANSFER_ARGS + \
                [EXPECTED_SIZE, RECURSIVE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/cp.rst')
Beispiel #13
0
 def test_basic_help_with_contents(self):
     cmd_object = mock.Mock()
     mock_module = mock.Mock()
     mock_module.__file__ = '/some/root'
     cmd_object.DESCRIPTION = BasicCommand.FROM_FILE(
         'foo', 'bar', 'baz.txt', root_module=mock_module)
     help_command = BasicHelp(mock.Mock(), cmd_object, {}, {})
     with mock.patch('awscli.customizations.commands._open') as mock_open:
         mock_open.return_value.__enter__.return_value.read.return_value = \
             'fake description'
         self.assertEqual(help_command.description, 'fake description')
Beispiel #14
0
class ECRPublicGetLoginPassword(BasicCommand):
    """Get a password to be used with container clients such as Docker"""
    NAME = 'get-login-password'

    DESCRIPTION = BasicCommand.FROM_FILE(
        'ecr-public/get-login-password_description.rst')

    def _run_main(self, parsed_args, parsed_globals):
        ecr_public_client = create_client_from_parsed_globals(
            self._session, 'ecr-public', parsed_globals)
        result = ecr_public_client.get_authorization_token()
        auth = result['authorizationData']
        auth_token = b64decode(auth['authorizationToken']).decode()
        _, password = auth_token.split(':')
        sys.stdout.write(password)
        sys.stdout.write('\n')
        return 0
Beispiel #15
0
class S3(BasicCommand):
    NAME = 's3'
    DESCRIPTION = BasicCommand.FROM_FILE('s3/_concepts.rst')
    SYNOPSIS = "aws s3 <Command> [<Arg> ...]"
    SUBCOMMANDS = [
        {
            'name': 'ls',
            'command_class': ListCommand
        },
        {
            'name': 'website',
            'command_class': WebsiteCommand
        },
        {
            'name': 'cp',
            'command_class': CpCommand
        },
        {
            'name': 'mv',
            'command_class': MvCommand
        },
        {
            'name': 'rm',
            'command_class': RmCommand
        },
        {
            'name': 'sync',
            'command_class': SyncCommand
        },
        {
            'name': 'mb',
            'command_class': MbCommand
        },
        {
            'name': 'rb',
            'command_class': RbCommand
        },
        {
            'name': 'presign',
            'command_class': PresignCommand
        },
    ]

    def _run_main(self, parsed_args, parsed_globals):
        if parsed_args.subcommand is None:
            self._raise_usage_error()
Beispiel #16
0
class ConfigureSetCommand(BasicCommand):
    NAME = 'set'
    DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
                                         '_description.rst')
    SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
    EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
    ARG_TABLE = [
        {'name': 'varname',
         'help_text': 'The name of the config value to set.',
         'action': 'store',
         'cli_type_name': 'string', 'positional_arg': True},
        {'name': 'value',
         'help_text': 'The value to set.',
         'action': 'store',
         'cli_type_name': 'string', 'positional_arg': True},
    ]
    # Any variables specified in this list will be written to
    # the ~/.aws/credentials file instead of ~/.aws/config.
    _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',
                            'aws_session_token']

    def __init__(self, session, config_writer=None):
        super(ConfigureSetCommand, self).__init__(session)
        if config_writer is None:
            config_writer = ConfigFileWriter()
        self._config_writer = config_writer

    def _run_main(self, args, parsed_globals):
        varname = args.varname
        value = args.value
        section = 'default'
        # Before handing things off to the config writer,
        # we need to find out three things:
        # 1. What section we're writing to (section).
        # 2. The name of the config key (varname)
        # 3. The actual value (value).
        if '.' not in varname:
            # unqualified name, scope it to the current
            # profile (or leave it as the 'default' section if
            # no profile is set).
            if self._session.profile is not None:
                section = 'profile %s' % self._session.profile
        else:
            # First figure out if it's been scoped to a profile.
            parts = varname.split('.')
            if parts[0] in ('default', 'profile'):
                # Then we know we're scoped to a profile.
                if parts[0] == 'default':
                    section = 'default'
                    remaining = parts[1:]
                else:
                    # [profile, profile_name, ...]
                    section = "profile %s" % parts[1]
                    remaining = parts[2:]
                varname = remaining[0]
                if len(remaining) == 2:
                    value = {remaining[1]: value}
            elif len(parts) == 2:
                # Otherwise it's something like "set preview.service true"
                # of something in the [plugin] section.
                section, varname = parts
        config_filename = os.path.expanduser(
            self._session.get_config_variable('config_file'))
        updated_config = {'__section__': section, varname: value}
        if varname in self._WRITE_TO_CREDS_FILE:
            config_filename = os.path.expanduser(
                self._session.get_config_variable('credentials_file'))
            section_name = updated_config['__section__']
            if section_name.startswith('profile '):
                updated_config['__section__'] = section_name[8:]
        self._config_writer.update_config(updated_config, config_filename)
Beispiel #17
0
class ConfigureCommand(BasicCommand):
    NAME = 'configure'
    DESCRIPTION = BasicCommand.FROM_FILE()
    SYNOPSIS = ('aws configure [--profile profile-name]')
    EXAMPLES = ('To create a new configuration::\n'
                '\n'
                '    $ aws configure\n'
                '    AWS Access Key ID [None]: accesskey\n'
                '    AWS Secret Access Key [None]: secretkey\n'
                '    Default region name [None]: us-west-2\n'
                '    Default output format [None]:\n'
                '\n'
                'To update just the region name::\n'
                '\n'
                '    $ aws configure\n'
                '    AWS Access Key ID [****]:\n'
                '    AWS Secret Access Key [****]:\n'
                '    Default region name [us-west-1]: us-west-2\n'
                '    Default output format [None]:\n')
    SUBCOMMANDS = [{
        'name': 'list',
        'command_class': ConfigureListCommand
    }, {
        'name': 'get',
        'command_class': ConfigureGetCommand
    }, {
        'name': 'set',
        'command_class': ConfigureSetCommand
    }, {
        'name': 'add-model',
        'command_class': AddModelCommand
    }]

    # If you want to add new values to prompt, update this list here.
    VALUES_TO_PROMPT = [
        # (logical_name, config_name, prompt_text)
        ('aws_access_key_id', "AWS Access Key ID"),
        ('aws_secret_access_key', "AWS Secret Access Key"),
        ('region', "Default region name"),
        ('output', "Default output format"),
    ]

    def __init__(self, session, prompter=None, config_writer=None):
        super(ConfigureCommand, self).__init__(session)
        if prompter is None:
            prompter = InteractivePrompter()
        self._prompter = prompter
        if config_writer is None:
            config_writer = ConfigFileWriter()
        self._config_writer = config_writer

    def _run_main(self, parsed_args, parsed_globals):
        # Called when invoked with no args "aws configure"
        new_values = {}
        # This is the config from the config file scoped to a specific
        # profile.
        try:
            config = self._session.get_scoped_config()
        except ProfileNotFound:
            config = {}
        for config_name, prompt_text in self.VALUES_TO_PROMPT:
            current_value = config.get(config_name)
            new_value = self._prompter.get_value(current_value, config_name,
                                                 prompt_text)
            if new_value is not None and new_value != current_value:
                new_values[config_name] = new_value
        config_filename = os.path.expanduser(
            self._session.get_config_variable('config_file'))
        if new_values:
            self._write_out_creds_file_values(new_values,
                                              parsed_globals.profile)
            if parsed_globals.profile is not None:
                new_values['__section__'] = ('profile %s' %
                                             parsed_globals.profile)
            self._config_writer.update_config(new_values, config_filename)

    def _write_out_creds_file_values(self, new_values, profile_name):
        # The access_key/secret_key are now *always* written to the shared
        # credentials file (~/.aws/credentials), see aws/aws-cli#847.
        # post-conditions: ~/.aws/credentials will have the updated credential
        # file values and new_values will have the cred vars removed.
        credential_file_values = {}
        if 'aws_access_key_id' in new_values:
            credential_file_values['aws_access_key_id'] = new_values.pop(
                'aws_access_key_id')
        if 'aws_secret_access_key' in new_values:
            credential_file_values['aws_secret_access_key'] = new_values.pop(
                'aws_secret_access_key')
        if credential_file_values:
            if profile_name is not None:
                credential_file_values['__section__'] = profile_name
            shared_credentials_filename = os.path.expanduser(
                self._session.get_config_variable('credentials_file'))
            self._config_writer.update_config(credential_file_values,
                                              shared_credentials_filename)
Beispiel #18
0
class ListCommand(S3Command):
    NAME = 'ls'
    DESCRIPTION = ("List S3 objects and common prefixes under a prefix or "
                   "all S3 buckets. Note that the --output argument "
                   "is ignored for this command.")
    USAGE = "<S3Path> or NONE"
    ARG_TABLE = [{
        'name': 'paths',
        'nargs': '?',
        'default': 's3://',
        'positional_arg': True,
        'synopsis': USAGE
    }, RECURSIVE, PAGE_SIZE, HUMAN_READABLE, SUMMARIZE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/ls.rst')

    def _run_main(self, parsed_args, parsed_globals):
        super(ListCommand, self)._run_main(parsed_args, parsed_globals)
        self._empty_result = False
        self._at_first_page = True
        self._size_accumulator = 0
        self._total_objects = 0
        self._human_readable = parsed_args.human_readable
        path = parsed_args.paths
        if path.startswith('s3://'):
            path = path[5:]
        bucket, key = find_bucket_key(path)
        if not bucket:
            self._list_all_buckets()
        elif parsed_args.dir_op:
            # Then --recursive was specified.
            self._list_all_objects_recursive(bucket, key,
                                             parsed_args.page_size)
        else:
            self._list_all_objects(bucket, key, parsed_args.page_size)
        if parsed_args.summarize:
            self._print_summary()
        if key:
            # User specified a key to look for. We should return an rc of one
            # if there are no matching keys and/or prefixes or return an rc
            # of zero if there are matching keys or prefixes.
            return self._check_no_objects()
        else:
            # This covers the case when user is trying to list all of of
            # the buckets or is trying to list the objects of a bucket
            # (without specifying a key). For both situations, a rc of 0
            # should be returned because applicable errors are supplied by
            # the server (i.e. bucket not existing). These errors will be
            # thrown before reaching the automatic return of rc of zero.
            return 0

    def _list_all_objects(self, bucket, key, page_size=None):
        operation = self.service.get_operation('ListObjects')
        iterator = operation.paginate(self.endpoint,
                                      bucket=bucket,
                                      prefix=key,
                                      delimiter='/',
                                      page_size=page_size)
        for _, response_data in iterator:
            self._display_page(response_data)

    def _display_page(self, response_data, use_basename=True):
        common_prefixes = response_data.get('CommonPrefixes', [])
        contents = response_data.get('Contents', [])
        if not contents and not common_prefixes:
            self._empty_result = True
            return
        for common_prefix in common_prefixes:
            prefix_components = common_prefix['Prefix'].split('/')
            prefix = prefix_components[-2]
            pre_string = "PRE".rjust(30, " ")
            print_str = pre_string + ' ' + prefix + '/\n'
            uni_print(print_str)
        for content in contents:
            last_mod_str = self._make_last_mod_str(content['LastModified'])
            self._size_accumulator += int(content['Size'])
            self._total_objects += 1
            size_str = self._make_size_str(content['Size'])
            if use_basename:
                filename_components = content['Key'].split('/')
                filename = filename_components[-1]
            else:
                filename = content['Key']
            print_str = last_mod_str + ' ' + size_str + ' ' + \
                filename + '\n'
            uni_print(print_str)
        self._at_first_page = False

    def _list_all_buckets(self):
        operation = self.service.get_operation('ListBuckets')
        response_data = operation.call(self.endpoint)[1]
        buckets = response_data['Buckets']
        for bucket in buckets:
            last_mod_str = self._make_last_mod_str(bucket['CreationDate'])
            print_str = last_mod_str + ' ' + bucket['Name'] + '\n'
            uni_print(print_str)

    def _list_all_objects_recursive(self, bucket, key, page_size=None):
        operation = self.service.get_operation('ListObjects')
        iterator = operation.paginate(self.endpoint,
                                      bucket=bucket,
                                      prefix=key,
                                      page_size=page_size)
        for _, response_data in iterator:
            self._display_page(response_data, use_basename=False)

    def _check_no_objects(self):
        if self._empty_result and self._at_first_page:
            # Nothing was returned in the first page of results when listing
            # the objects.
            return 1
        return 0

    def _make_last_mod_str(self, last_mod):
        """
        This function creates the last modified time string whenever objects
        or buckets are being listed
        """
        last_mod = parse(last_mod)
        last_mod = last_mod.astimezone(tzlocal())
        last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2),
                        str(last_mod.day).zfill(2),
                        str(last_mod.hour).zfill(2),
                        str(last_mod.minute).zfill(2),
                        str(last_mod.second).zfill(2))
        last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup
        return last_mod_str.ljust(19, ' ')

    def _make_size_str(self, size):
        """
        This function creates the size string when objects are being listed.
        """
        if self._human_readable:
            size_str = human_readable_size(size)
        else:
            size_str = str(size)
        return size_str.rjust(10, ' ')

    def _print_summary(self):
        """
        This function prints a summary of total objects and total bytes
        """
        print_str = str(self._total_objects)
        uni_print("\nTotal Objects: ".rjust(15, ' ') + print_str + "\n")
        if self._human_readable:
            print_str = human_readable_size(self._size_accumulator)
        else:
            print_str = str(self._size_accumulator)
        uni_print("Total Size: ".rjust(15, ' ') + print_str + "\n")
Beispiel #19
0
class DescribeCluster(BasicCommand):
    NAME = 'describe-cluster'
    DESCRIPTION = ('Provides  cluster-level details including status, hardware'
                   ' and software configuration, VPC settings, bootstrap'
                   ' actions, instance groups and so on. For information about'
                   ' the cluster steps, see <code>list-steps</code>.')
    ARG_TABLE = [{
        'name': 'cluster-id',
        'required': True,
        'help_text': helptext.CLUSTER_ID
    }]
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'describe-cluster.rst')

    def _run_main(self, parsed_args, parsed_globals):
        parameters = {'ClusterId': parsed_args.cluster_id}

        describe_cluster_result = self._call(self._session, 'describe_cluster',
                                             parameters, parsed_globals)

        list_instance_groups_result = self._call(self._session,
                                                 'list_instance_groups',
                                                 parameters, parsed_globals)

        list_bootstrap_actions_result = self._call(self._session,
                                                   'list_bootstrap_actions',
                                                   parameters, parsed_globals)

        master_public_dns = self._find_master_public_dns(
            cluster_id=parsed_args.cluster_id, parsed_globals=parsed_globals)

        constructed_result = self._construct_result(
            describe_cluster_result, list_instance_groups_result,
            list_bootstrap_actions_result, master_public_dns)

        emrutils.display_response(self._session, 'describe_cluster',
                                  constructed_result, parsed_globals)

        return 0

    def _find_master_public_dns(self, cluster_id, parsed_globals):
        return emrutils.find_master_public_dns(session=self._session,
                                               cluster_id=cluster_id,
                                               parsed_globals=parsed_globals)

    def _call(self, session, operation_name, parameters, parsed_globals):
        return emrutils.call(session,
                             operation_name,
                             parameters,
                             region_name=parsed_globals.region,
                             endpoint_url=parsed_globals.endpoint_url,
                             verify=parsed_globals.verify_ssl)

    def _get_key_of_result(self, keys):
        # Return the first key that is not "Marker"
        for key in keys:
            if key != "Marker":
                return key

    def _construct_result(self, describe_cluster_result,
                          list_instance_groups_result,
                          list_bootstrap_actions_result, master_public_dns):
        result = describe_cluster_result
        result['Cluster']['MasterPublicDnsName'] = master_public_dns
        result['Cluster']['InstanceGroups'] = []
        result['Cluster']['BootstrapActions'] = []

        if (list_instance_groups_result is not None and
                list_instance_groups_result.get('InstanceGroups') is not None):
            result['Cluster']['InstanceGroups'] = \
                list_instance_groups_result.get('InstanceGroups')
        if (list_bootstrap_actions_result is not None
                and list_bootstrap_actions_result.get('BootstrapActions')
                is not None):
            result['Cluster']['BootstrapActions'] = \
                list_bootstrap_actions_result['BootstrapActions']

        return result
Beispiel #20
0
class ListCommand(S3Command):
    NAME = 'ls'
    DESCRIPTION = ("List S3 objects and common prefixes under a prefix or "
                   "all S3 buckets. Note that the --output argument "
                   "is ignored for this command.")
    USAGE = "<S3Path> or NONE"
    ARG_TABLE = [{
        'name': 'paths',
        'nargs': '?',
        'default': 's3://',
        'positional_arg': True,
        'synopsis': USAGE
    }, RECURSIVE]
    EXAMPLES = BasicCommand.FROM_FILE('s3/ls.rst')

    def _run_main(self, parsed_args, parsed_globals):
        super(ListCommand, self)._run_main(parsed_args, parsed_globals)
        path = parsed_args.paths
        if path.startswith('s3://'):
            path = path[5:]
        bucket, key = find_bucket_key(path)
        if not bucket:
            self._list_all_buckets()
        elif parsed_args.dir_op:
            # Then --recursive was specified.
            self._list_all_objects_recursive(bucket, key,
                                             parsed_globals.page_size)
        else:
            self._list_all_objects(bucket, key, parsed_globals.page_size)
        return 0

    def _list_all_objects(self, bucket, key, page_size=None):

        operation = self.service.get_operation('ListObjects')
        iterator = operation.paginate(self.endpoint,
                                      bucket=bucket,
                                      prefix=key,
                                      delimiter='/',
                                      page_size=page_size)
        for _, response_data in iterator:
            self._display_page(response_data)

    def _display_page(self, response_data, use_basename=True):
        common_prefixes = response_data['CommonPrefixes']
        contents = response_data['Contents']
        for common_prefix in common_prefixes:
            prefix_components = common_prefix['Prefix'].split('/')
            prefix = prefix_components[-2]
            pre_string = "PRE".rjust(30, " ")
            print_str = pre_string + ' ' + prefix + '/\n'
            uni_print(print_str)
            sys.stdout.flush()
        for content in contents:
            last_mod_str = self._make_last_mod_str(content['LastModified'])
            size_str = self._make_size_str(content['Size'])
            if use_basename:
                filename_components = content['Key'].split('/')
                filename = filename_components[-1]
            else:
                filename = content['Key']
            print_str = last_mod_str + ' ' + size_str + ' ' + \
                filename + '\n'
            uni_print(print_str)
            sys.stdout.flush()

    def _list_all_buckets(self):
        operation = self.service.get_operation('ListBuckets')
        response_data = operation.call(self.endpoint)[1]
        buckets = response_data['Buckets']
        for bucket in buckets:
            last_mod_str = self._make_last_mod_str(bucket['CreationDate'])
            print_str = last_mod_str + ' ' + bucket['Name'] + '\n'
            uni_print(print_str)
            sys.stdout.flush()

    def _list_all_objects_recursive(self, bucket, key, page_size=None):
        operation = self.service.get_operation('ListObjects')
        iterator = operation.paginate(self.endpoint,
                                      bucket=bucket,
                                      prefix=key,
                                      page_size=page_size)
        for _, response_data in iterator:
            self._display_page(response_data, use_basename=False)

    def _make_last_mod_str(self, last_mod):
        """
        This function creates the last modified time string whenever objects
        or buckets are being listed
        """
        last_mod = parse(last_mod)
        last_mod = last_mod.astimezone(tzlocal())
        last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2),
                        str(last_mod.day).zfill(2),
                        str(last_mod.hour).zfill(2),
                        str(last_mod.minute).zfill(2),
                        str(last_mod.second).zfill(2))
        last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup
        return last_mod_str.ljust(19, ' ')

    def _make_size_str(self, size):
        """
        This function creates the size string when objects are being listed.
        """
        size_str = str(size)
        return size_str.rjust(10, ' ')
Beispiel #21
0
 def setUp(self):
     self.session = mock.Mock()
     self.command = BasicCommand(self.session)
Beispiel #22
0
class ConfigureGetCommand(BasicCommand):
    NAME = 'get'
    DESCRIPTION = BasicCommand.FROM_FILE('configure', 'get',
                                         '_description.rst')
    SYNOPSIS = 'aws configure get varname [--profile profile-name]'
    EXAMPLES = BasicCommand.FROM_FILE('configure', 'get', '_examples.rst')
    ARG_TABLE = [
        {
            'name': 'varname',
            'help_text': 'The name of the config value to retrieve.',
            'action': 'store',
            'cli_type_name': 'string',
            'positional_arg': True
        },
    ]

    def __init__(self, session, stream=None, error_stream=None):
        super(ConfigureGetCommand, self).__init__(session)
        if stream is None:
            stream = sys.stdout
        if error_stream is None:
            error_stream = sys.stderr
        self._stream = stream
        self._error_stream = error_stream

    def _run_main(self, args, parsed_globals):
        varname = args.varname

        if '.' not in varname:
            # get_scoped_config() returns the config variables in the config
            # file (not the logical_var names), which is what we want.
            config = self._session.get_scoped_config()
            value = config.get(varname)
        else:
            value = self._get_dotted_config_value(varname)

        LOG.debug(u'Config value retrieved: %s' % value)

        if isinstance(value, six.string_types):
            self._stream.write(value)
            self._stream.write('\n')
            return 0
        elif isinstance(value, dict):
            # TODO: add support for this. We would need to print it off in
            # the same format as the config file.
            self._error_stream.write(
                'varname (%s) must reference a value, not a section or '
                'sub-section.' % varname)
            return 1
        else:
            return 1

    def _get_dotted_config_value(self, varname):
        parts = varname.split('.')
        num_dots = varname.count('.')

        # Logic to deal with predefined sections like [preview], [plugin] and
        # etc.
        if num_dots == 1 and parts[0] in PREDEFINED_SECTION_NAMES:
            full_config = self._session.full_config
            section, config_name = varname.split('.')
            value = full_config.get(section, {}).get(config_name)
            if value is None:
                # Try to retrieve it from the profile config.
                value = full_config['profiles'].get(section,
                                                    {}).get(config_name)
            return value

        if parts[0] == 'profile':
            profile_name = parts[1]
            config_name = parts[2]
            remaining = parts[3:]
        # Check if varname starts with 'default' profile (e.g.
        # default.emr-dev.emr.instance_profile) If not, go further to check
        # if varname starts with a known profile name
        elif parts[0] == 'default' or (
                parts[0] in self._session.full_config['profiles']):
            profile_name = parts[0]
            config_name = parts[1]
            remaining = parts[2:]
        else:
            profile_name = self._session.get_config_variable('profile')
            if profile_name is None:
                profile_name = 'default'
            config_name = parts[0]
            remaining = parts[1:]

        value = self._session.full_config['profiles'].get(profile_name,
                                                          {}).get(config_name)
        if len(remaining) == 1:
            try:
                value = value.get(remaining[-1])
            except AttributeError:
                value = None
        return value
Beispiel #23
0
class CreateCluster(Command):
    NAME = 'create-cluster'
    DESCRIPTION = helptext.CREATE_CLUSTER_DESCRIPTION
    ARG_TABLE = [
        {'name': 'release-label',
         'help_text': helptext.RELEASE_LABEL},
        {'name': 'ami-version',
         'help_text': helptext.AMI_VERSION},
        {'name': 'instance-groups',
         'schema': argumentschema.INSTANCE_GROUPS_SCHEMA,
         'help_text': helptext.INSTANCE_GROUPS},
        {'name': 'instance-type',
         'help_text': helptext.INSTANCE_TYPE},
        {'name': 'instance-count',
         'help_text': helptext.INSTANCE_COUNT},
        {'name': 'auto-terminate', 'action': 'store_true',
         'group_name': 'auto_terminate',
         'help_text': helptext.AUTO_TERMINATE},
        {'name': 'no-auto-terminate', 'action': 'store_true',
         'group_name': 'auto_terminate'},
        {'name': 'instance-fleets',
         'schema': argumentschema.INSTANCE_FLEETS_SCHEMA,
         'help_text': helptext.INSTANCE_FLEETS},
        {'name': 'name',
         'default': 'Development Cluster',
         'help_text': helptext.CLUSTER_NAME},
        {'name': 'log-uri',
         'help_text': helptext.LOG_URI},
        {'name': 'log-encryption-kms-key-id',
         'help_text': helptext.LOG_ENCRYPTION_KMS_KEY_ID},
        {'name': 'service-role',
         'help_text': helptext.SERVICE_ROLE},
        {'name': 'auto-scaling-role',
         'help_text': helptext.AUTOSCALING_ROLE},
        {'name': 'use-default-roles', 'action': 'store_true',
         'help_text': helptext.USE_DEFAULT_ROLES},
        {'name': 'configurations',
         'help_text': helptext.CONFIGURATIONS},
        {'name': 'ec2-attributes',
         'help_text': helptext.EC2_ATTRIBUTES,
         'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA},
        {'name': 'termination-protected', 'action': 'store_true',
         'group_name': 'termination_protected',
         'help_text': helptext.TERMINATION_PROTECTED},
        {'name': 'no-termination-protected', 'action': 'store_true',
         'group_name': 'termination_protected'},
        {'name': 'scale-down-behavior',
         'help_text': helptext.SCALE_DOWN_BEHAVIOR},
        {'name': 'visible-to-all-users', 'action': 'store_true',
         'group_name': 'visibility',
         'help_text': helptext.VISIBILITY},
        {'name': 'no-visible-to-all-users', 'action': 'store_true',
         'group_name': 'visibility'},
        {'name': 'enable-debugging', 'action': 'store_true',
         'group_name': 'debug',
         'help_text': helptext.DEBUGGING},
        {'name': 'no-enable-debugging', 'action': 'store_true',
         'group_name': 'debug'},
        {'name': 'tags', 'nargs': '+',
         'help_text': helptext.TAGS,
         'schema': argumentschema.TAGS_SCHEMA},
        {'name': 'bootstrap-actions',
         'help_text': helptext.BOOTSTRAP_ACTIONS,
         'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA},
        {'name': 'applications',
         'help_text': helptext.APPLICATIONS,
         'schema': argumentschema.APPLICATIONS_SCHEMA},
        {'name': 'emrfs',
         'help_text': helptext.EMR_FS,
         'schema': argumentschema.EMR_FS_SCHEMA},
        {'name': 'steps',
         'schema': argumentschema.STEPS_SCHEMA,
         'help_text': helptext.STEPS},
        {'name': 'additional-info',
         'help_text': helptext.ADDITIONAL_INFO},
        {'name': 'restore-from-hbase-backup',
         'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA,
         'help_text': helptext.RESTORE_FROM_HBASE},
        {'name': 'security-configuration',
         'help_text': helptext.SECURITY_CONFIG},
        {'name': 'custom-ami-id',
         'help_text' : helptext.CUSTOM_AMI_ID},
        {'name': 'ebs-root-volume-size',
         'help_text' : helptext.EBS_ROOT_VOLUME_SIZE},
        {'name': 'repo-upgrade-on-boot',
         'help_text' : helptext.REPO_UPGRADE_ON_BOOT},
        {'name': 'kerberos-attributes',
         'schema': argumentschema.KERBEROS_ATTRIBUTES_SCHEMA,
         'help_text': helptext.KERBEROS_ATTRIBUTES},
        {'name': 'step-concurrency-level',
         'cli_type_name': 'integer',
         'help_text': helptext.STEP_CONCURRENCY_LEVEL},
        {'name': 'managed-scaling-policy',
         'schema': argumentschema.MANAGED_SCALING_POLICY_SCHEMA,
         'help_text': helptext.MANAGED_SCALING_POLICY},
        {'name': 'placement-group-configs',
         'schema': argumentschema.PLACEMENT_GROUP_CONFIGS_SCHEMA,
         'help_text': helptext.PLACEMENT_GROUP_CONFIGS}
    ]
    SYNOPSIS = BasicCommand.FROM_FILE('emr', 'create-cluster-synopsis.txt')
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-cluster-examples.rst')

    def _run_main_command(self, parsed_args, parsed_globals):
        params = {}
        params['Name'] = parsed_args.name

        self._validate_release_label_ami_version(parsed_args)

        service_role_validation_message = (
            " Either choose --use-default-roles or use both --service-role "
            "<roleName> and --ec2-attributes InstanceProfile=<profileName>.")

        if parsed_args.use_default_roles is True and \
                parsed_args.service_role is not None:
            raise exceptions.MutualExclusiveOptionError(
                option1="--use-default-roles",
                option2="--service-role",
                message=service_role_validation_message)

        if parsed_args.use_default_roles is True and \
                parsed_args.ec2_attributes is not None and \
                'InstanceProfile' in parsed_args.ec2_attributes:
            raise exceptions.MutualExclusiveOptionError(
                option1="--use-default-roles",
                option2="--ec2-attributes InstanceProfile",
                message=service_role_validation_message)

        if parsed_args.instance_groups is not None and \
                parsed_args.instance_fleets is not None:
            raise exceptions.MutualExclusiveOptionError(
                option1="--instance-groups",
                option2="--instance-fleets")

        instances_config = {}
        if parsed_args.instance_fleets is not None:
            instances_config['InstanceFleets'] = \
                instancefleetsutils.validate_and_build_instance_fleets(
                    parsed_args.instance_fleets)
        else:
            instances_config['InstanceGroups'] = \
                instancegroupsutils.validate_and_build_instance_groups(
                    instance_groups=parsed_args.instance_groups,
                    instance_type=parsed_args.instance_type,
                    instance_count=parsed_args.instance_count)

        if parsed_args.release_label is not None:
            params["ReleaseLabel"] = parsed_args.release_label
            if parsed_args.configurations is not None:
                try:
                    params["Configurations"] = json.loads(
                        parsed_args.configurations)
                except ValueError:
                    raise ValueError('aws: error: invalid json argument for '
                                     'option --configurations')

        if (parsed_args.release_label is None and
                parsed_args.ami_version is not None):
            is_valid_ami_version = re.match('\d?\..*', parsed_args.ami_version)
            if is_valid_ami_version is None:
                raise exceptions.InvalidAmiVersionError(
                    ami_version=parsed_args.ami_version)
            params['AmiVersion'] = parsed_args.ami_version
        emrutils.apply_dict(
            params, 'AdditionalInfo', parsed_args.additional_info)
        emrutils.apply_dict(params, 'LogUri', parsed_args.log_uri)

        if parsed_args.log_encryption_kms_key_id is not None:
            emrutils.apply_dict(params, 'LogEncryptionKmsKeyId',
                parsed_args.log_encryption_kms_key_id)

        if parsed_args.use_default_roles is True:
            parsed_args.service_role = EMR_ROLE_NAME
            if parsed_args.ec2_attributes is None:
                parsed_args.ec2_attributes = {}
            parsed_args.ec2_attributes['InstanceProfile'] = EC2_ROLE_NAME

        emrutils.apply_dict(params, 'ServiceRole', parsed_args.service_role)

        if parsed_args.instance_groups is not None:
            for instance_group in instances_config['InstanceGroups']:
                if 'AutoScalingPolicy' in instance_group.keys():
                    if parsed_args.auto_scaling_role is None:
                        raise exceptions.MissingAutoScalingRoleError()

        emrutils.apply_dict(params, 'AutoScalingRole', parsed_args.auto_scaling_role)

        if parsed_args.scale_down_behavior is not None:
            emrutils.apply_dict(params, 'ScaleDownBehavior', parsed_args.scale_down_behavior)

        if (
                parsed_args.no_auto_terminate is False and
                parsed_args.auto_terminate is False):
            parsed_args.no_auto_terminate = True

        instances_config['KeepJobFlowAliveWhenNoSteps'] = \
            emrutils.apply_boolean_options(
                parsed_args.no_auto_terminate,
                '--no-auto-terminate',
                parsed_args.auto_terminate,
                '--auto-terminate')

        instances_config['TerminationProtected'] = \
            emrutils.apply_boolean_options(
                parsed_args.termination_protected,
                '--termination-protected',
                parsed_args.no_termination_protected,
                '--no-termination-protected')

        if (parsed_args.visible_to_all_users is False and
                parsed_args.no_visible_to_all_users is False):
            parsed_args.visible_to_all_users = True

        params['VisibleToAllUsers'] = \
            emrutils.apply_boolean_options(
                parsed_args.visible_to_all_users,
                '--visible-to-all-users',
                parsed_args.no_visible_to_all_users,
                '--no-visible-to-all-users')

        params['Tags'] = emrutils.parse_tags(parsed_args.tags)
        params['Instances'] = instances_config

        if parsed_args.ec2_attributes is not None:
            self._build_ec2_attributes(
                cluster=params, parsed_attrs=parsed_args.ec2_attributes)

        debugging_enabled = emrutils.apply_boolean_options(
            parsed_args.enable_debugging,
            '--enable-debugging',
            parsed_args.no_enable_debugging,
            '--no-enable-debugging')

        if parsed_args.log_uri is None and debugging_enabled is True:
            raise exceptions.LogUriError

        if debugging_enabled is True:
            self._update_cluster_dict(
                cluster=params,
                key='Steps',
                value=[
                    self._build_enable_debugging(parsed_args, parsed_globals)])

        if parsed_args.applications is not None:
            if parsed_args.release_label is None:
                app_list, ba_list, step_list = \
                    applicationutils.build_applications(
                        region=self.region,
                        parsed_applications=parsed_args.applications,
                        ami_version=params['AmiVersion'])
                self._update_cluster_dict(
                    params, 'NewSupportedProducts', app_list)
                self._update_cluster_dict(
                    params, 'BootstrapActions', ba_list)
                self._update_cluster_dict(
                    params, 'Steps', step_list)
            else:
                params["Applications"] = []
                for application in parsed_args.applications:
                    params["Applications"].append(application)

        hbase_restore_config = parsed_args.restore_from_hbase_backup
        if hbase_restore_config is not None:
            args = hbaseutils.build_hbase_restore_from_backup_args(
                dir=hbase_restore_config.get('Dir'),
                backup_version=hbase_restore_config.get('BackupVersion'))
            step_config = emrutils.build_step(
                jar=constants.HBASE_JAR_PATH,
                name=constants.HBASE_RESTORE_STEP_NAME,
                action_on_failure=constants.CANCEL_AND_WAIT,
                args=args)
            self._update_cluster_dict(
                params, 'Steps', [step_config])

        if parsed_args.bootstrap_actions is not None:
            self._build_bootstrap_actions(
                cluster=params,
                parsed_boostrap_actions=parsed_args.bootstrap_actions)

        if parsed_args.emrfs is not None:
            self._handle_emrfs_parameters(
                cluster=params,
                emrfs_args=parsed_args.emrfs,
                release_label=parsed_args.release_label)

        if parsed_args.steps is not None:
            steps_list = steputils.build_step_config_list(
                parsed_step_list=parsed_args.steps,
                region=self.region,
                release_label=parsed_args.release_label)
            self._update_cluster_dict(
                cluster=params, key='Steps', value=steps_list)

        if parsed_args.security_configuration is not None:
            emrutils.apply_dict(
                params, 'SecurityConfiguration', parsed_args.security_configuration)

        if parsed_args.custom_ami_id is not None:
            emrutils.apply_dict(
                params, 'CustomAmiId', parsed_args.custom_ami_id
            )
        if parsed_args.ebs_root_volume_size is not None:
            emrutils.apply_dict(
                params, 'EbsRootVolumeSize', int(parsed_args.ebs_root_volume_size)
            )

        if parsed_args.repo_upgrade_on_boot is not None:
            emrutils.apply_dict(
                params, 'RepoUpgradeOnBoot', parsed_args.repo_upgrade_on_boot
            )

        if parsed_args.kerberos_attributes is not None:
            emrutils.apply_dict(
                params, 'KerberosAttributes', parsed_args.kerberos_attributes)

        if parsed_args.step_concurrency_level is not None:
            params['StepConcurrencyLevel'] = parsed_args.step_concurrency_level

        if parsed_args.managed_scaling_policy is not None:
            emrutils.apply_dict(
                params, 'ManagedScalingPolicy', parsed_args.managed_scaling_policy)

        if parsed_args.placement_group_configs is not None:
            emrutils.apply_dict(
                params, 'PlacementGroupConfigs',
                parsed_args.placement_group_configs)

        self._validate_required_applications(parsed_args)

        run_job_flow_response = emrutils.call(
            self._session, 'run_job_flow', params, self.region,
            parsed_globals.endpoint_url, parsed_globals.verify_ssl)

        constructed_result = self._construct_result(run_job_flow_response)
        emrutils.display_response(self._session, 'run_job_flow',
                                  constructed_result, parsed_globals)

        return 0

    def _construct_result(self, run_job_flow_result):
        jobFlowId = None
        clusterArn = None
        if run_job_flow_result is not None:
            jobFlowId = run_job_flow_result.get('JobFlowId')
            clusterArn = run_job_flow_result.get('ClusterArn')

        if jobFlowId is not None:
            return {'ClusterId': jobFlowId,
                    'ClusterArn': clusterArn }
        else:
            return {}

    def _build_ec2_attributes(self, cluster, parsed_attrs):
        keys = parsed_attrs.keys()
        instances = cluster['Instances']

        if ('SubnetId' in keys and 'SubnetIds' in keys):
            raise exceptions.MutualExclusiveOptionError(
                option1="SubnetId",
                option2="SubnetIds")

        if ('AvailabilityZone' in keys and 'AvailabilityZones' in keys):
            raise exceptions.MutualExclusiveOptionError(
                option1="AvailabilityZone",
                option2="AvailabilityZones")

        if ('SubnetId' in keys or 'SubnetIds' in keys) \
                and ('AvailabilityZone' in keys or 'AvailabilityZones' in keys):
            raise exceptions.SubnetAndAzValidationError

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='KeyName',
            dest_params=instances, dest_key='Ec2KeyName')
        emrutils.apply_params(
            src_params=parsed_attrs, src_key='SubnetId',
            dest_params=instances, dest_key='Ec2SubnetId')
        emrutils.apply_params(
            src_params=parsed_attrs, src_key='SubnetIds',
            dest_params=instances, dest_key='Ec2SubnetIds')

        if 'AvailabilityZone' in keys:
            instances['Placement'] = dict()
            emrutils.apply_params(
                src_params=parsed_attrs, src_key='AvailabilityZone',
                dest_params=instances['Placement'],
                dest_key='AvailabilityZone')

        if 'AvailabilityZones' in keys:
            instances['Placement'] = dict()
            emrutils.apply_params(
                src_params=parsed_attrs, src_key='AvailabilityZones',
                dest_params=instances['Placement'],
                dest_key='AvailabilityZones')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='InstanceProfile',
            dest_params=cluster, dest_key='JobFlowRole')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='EmrManagedMasterSecurityGroup',
            dest_params=instances, dest_key='EmrManagedMasterSecurityGroup')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='EmrManagedSlaveSecurityGroup',
            dest_params=instances, dest_key='EmrManagedSlaveSecurityGroup')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='ServiceAccessSecurityGroup',
            dest_params=instances, dest_key='ServiceAccessSecurityGroup')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='AdditionalMasterSecurityGroups',
            dest_params=instances, dest_key='AdditionalMasterSecurityGroups')

        emrutils.apply_params(
            src_params=parsed_attrs, src_key='AdditionalSlaveSecurityGroups',
            dest_params=instances, dest_key='AdditionalSlaveSecurityGroups')

        emrutils.apply(params=cluster, key='Instances', value=instances)

        return cluster

    def _build_bootstrap_actions(
            self, cluster, parsed_boostrap_actions):
        cluster_ba_list = cluster.get('BootstrapActions')
        if cluster_ba_list is None:
            cluster_ba_list = []

        bootstrap_actions = []
        if len(cluster_ba_list) + len(parsed_boostrap_actions) \
                > constants.MAX_BOOTSTRAP_ACTION_NUMBER:
            raise ValueError('aws: error: maximum number of '
                             'bootstrap actions for a cluster exceeded.')

        for ba in parsed_boostrap_actions:
            ba_config = {}
            if ba.get('Name') is not None:
                ba_config['Name'] = ba.get('Name')
            else:
                ba_config['Name'] = constants.BOOTSTRAP_ACTION_NAME
            script_arg_config = {}
            emrutils.apply_params(
                src_params=ba, src_key='Path',
                dest_params=script_arg_config, dest_key='Path')
            emrutils.apply_params(
                src_params=ba, src_key='Args',
                dest_params=script_arg_config, dest_key='Args')
            emrutils.apply(
                params=ba_config,
                key='ScriptBootstrapAction',
                value=script_arg_config)
            bootstrap_actions.append(ba_config)

        result = cluster_ba_list + bootstrap_actions
        if len(result) > 0:
            cluster['BootstrapActions'] = result

        return cluster

    def _build_enable_debugging(self, parsed_args, parsed_globals):
        if parsed_args.release_label:
            jar = constants.COMMAND_RUNNER
            args = [constants.DEBUGGING_COMMAND]
        else:
            jar = emrutils.get_script_runner(self.region)
            args = [emrutils.build_s3_link(
                relative_path=constants.DEBUGGING_PATH,
                region=self.region)]

        return emrutils.build_step(
            name=constants.DEBUGGING_NAME,
            action_on_failure=constants.TERMINATE_CLUSTER,
            jar=jar,
            args=args)

    def _update_cluster_dict(self, cluster, key, value):
        if key in cluster.keys():
            cluster[key] += value
        elif value is not None and len(value) > 0:
            cluster[key] = value
        return cluster

    def _validate_release_label_ami_version(self, parsed_args):
        if parsed_args.ami_version is not None and \
                parsed_args.release_label is not None:
            raise exceptions.MutualExclusiveOptionError(
                option1="--ami-version",
                option2="--release-label")

        if parsed_args.ami_version is None and \
                parsed_args.release_label is None:
            raise exceptions.RequiredOptionsError(
                option1="--ami-version",
                option2="--release-label")

    # Checks if the applications required by steps are specified
    # using the --applications option.
    def _validate_required_applications(self, parsed_args):

        specified_apps = set([])
        if parsed_args.applications is not None:
            specified_apps = \
                set([app['Name'].lower() for app in parsed_args.applications])

        missing_apps = self._get_missing_applications_for_steps(specified_apps,
                                                                parsed_args)
        # Check for HBase.
        if parsed_args.restore_from_hbase_backup is not None:
            if constants.HBASE not in specified_apps:
                missing_apps.add(constants.HBASE.title())

        if len(missing_apps) != 0:
            raise exceptions.MissingApplicationsError(
                applications=missing_apps)

    def _get_missing_applications_for_steps(self, specified_apps, parsed_args):
        allowed_app_steps = set([constants.HIVE, constants.PIG,
                                 constants.IMPALA])
        missing_apps = set([])
        if parsed_args.steps is not None:
            for step in parsed_args.steps:
                if len(missing_apps) == len(allowed_app_steps):
                    break
                step_type = step.get('Type')

                if step_type is not None:
                    step_type = step_type.lower()
                    if step_type in allowed_app_steps and \
                            step_type not in specified_apps:
                        missing_apps.add(step['Type'].title())
        return missing_apps

    def _filter_configurations_in_special_cases(self, configurations,
                                                parsed_args, parsed_configs):
        if parsed_args.use_default_roles:
            configurations = [x for x in configurations
                              if x.name != 'service_role' and
                              x.name != 'instance_profile']
        return configurations

    def _handle_emrfs_parameters(self, cluster, emrfs_args, release_label):
        if release_label:
            self.validate_no_emrfs_configuration(cluster)
            emrfs_configuration = emrfsutils.build_emrfs_confiuration(
                emrfs_args)

            self._update_cluster_dict(
                cluster=cluster, key='Configurations',
                value=[emrfs_configuration])
        else:
            emrfs_ba_config_list = emrfsutils.build_bootstrap_action_configs(
                self.region, emrfs_args)
            self._update_cluster_dict(
                cluster=cluster, key='BootstrapActions',
                value=emrfs_ba_config_list)

    def validate_no_emrfs_configuration(self, cluster):
        if 'Configurations' in cluster:
            for config in cluster['Configurations']:
                if config is not None and \
                        config.get('Classification') == constants.EMRFS_SITE:
                    raise exceptions.DuplicateEmrFsConfigurationError
class CreateCluster(BasicCommand):
    NAME = 'create-cluster'
    DESCRIPTION = (
        'Creates and starts running an EMR cluster.\n'
        '\nQuick start:\n'
        '\naws emr create-cluster --ami-version <ami-version> --instance-type'
        ' <instance-type> [--instance-count <instance-count>]\n')
    ARG_TABLE = [{
        'name': 'ami-version',
        'help_text': helptext.AMI_VERSION,
        'required': True
    }, {
        'name': 'instance-groups',
        'schema': argumentschema.INSTANCE_GROUPS_SCHEMA,
        'help_text': helptext.INSTANCE_GROUPS
    }, {
        'name': 'instance-type',
        'help_text': helptext.INSTANCE_TYPE
    }, {
        'name': 'instance-count',
        'help_text': helptext.INSTANCE_COUNT
    }, {
        'name': 'auto-terminate',
        'action': 'store_true',
        'group_name': 'auto_terminate',
        'help_text': helptext.AUTO_TERMINATE
    }, {
        'name': 'no-auto-terminate',
        'action': 'store_true',
        'group_name': 'auto_terminate'
    }, {
        'name': 'name',
        'default': 'Development Cluster',
        'help_text': helptext.CLUSTER_NAME
    }, {
        'name': 'log-uri',
        'help_text': helptext.LOG_URI
    }, {
        'name': 'service-role',
        'help_text': helptext.SERVICE_ROLE
    }, {
        'name': 'use-default-roles',
        'action': 'store_true',
        'help_text': helptext.USE_DEFAULT_ROLES
    }, {
        'name': 'ec2-attributes',
        'help_text': helptext.EC2_ATTRIBUTES,
        'schema': argumentschema.EC2_ATTRIBUTES_SCHEMA
    }, {
        'name': 'termination-protected',
        'action': 'store_true',
        'group_name': 'termination_protected',
        'help_text': helptext.TERMINATION_PROTECTED
    }, {
        'name': 'no-termination-protected',
        'action': 'store_true',
        'group_name': 'termination_protected'
    }, {
        'name': 'visible-to-all-users',
        'action': 'store_true',
        'group_name': 'visibility',
        'help_text': helptext.VISIBILITY
    }, {
        'name': 'no-visible-to-all-users',
        'action': 'store_true',
        'group_name': 'visibility'
    }, {
        'name': 'enable-debugging',
        'action': 'store_true',
        'group_name': 'debug',
        'help_text': helptext.DEBUGGING
    }, {
        'name': 'no-enable-debugging',
        'action': 'store_true',
        'group_name': 'debug'
    }, {
        'name': 'tags',
        'nargs': '+',
        'help_text': helptext.TAGS
    }, {
        'name': 'bootstrap-actions',
        'help_text': helptext.BOOTSTRAP_ACTIONS,
        'schema': argumentschema.BOOTSTRAP_ACTIONS_SCHEMA
    }, {
        'name': 'applications',
        'help_text': helptext.APPLICATIONS,
        'schema': argumentschema.APPLICATIONS_SCHEMA,
        'default': defaultconfig.APPLICATIONS
    }, {
        'name': 'emrfs',
        'help_text': helptext.EMR_FS,
        'schema': argumentschema.EMR_FS_SCHEMA
    }, {
        'name': 'steps',
        'schema': argumentschema.STEPS_SCHEMA,
        'help_text': helptext.STEPS
    }, {
        'name': 'additional-info',
        'help_text': helptext.ADDITIONAL_INFO
    }, {
        'name': 'restore-from-hbase-backup',
        'schema': argumentschema.HBASE_RESTORE_FROM_BACKUP_SCHEMA,
        'help_text': helptext.RESTORE_FROM_HBASE
    }]
    SYNOPSIS = BasicCommand.FROM_FILE('emr', 'create-cluster-synopsis.rst')
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-cluster-examples.rst')

    def _run_main(self, parsed_args, parsed_globals):
        emr = self._session.get_service('emr')
        params = {}
        bootstrap_actions = []
        params['Name'] = parsed_args.name

        instances_config = {}
        instances_config['InstanceGroups'] = \
            instancegroupsutils.validate_and_build_instance_groups(
                instance_groups=parsed_args.instance_groups,
                instance_type=parsed_args.instance_type,
                instance_count=parsed_args.instance_count)

        is_valid_ami_version = re.match('\d?\..*', parsed_args.ami_version)
        if is_valid_ami_version is None:
            raise exceptions.InvalidAmiVersionError(
                ami_version=parsed_args.ami_version)
        params['AmiVersion'] = parsed_args.ami_version
        emrutils.apply_dict(params, 'AdditionalInfo',
                            parsed_args.additional_info)
        emrutils.apply_dict(params, 'LogUri', parsed_args.log_uri)
        if parsed_args.use_default_roles is True:
            parsed_args.service_role = EMR_ROLE_NAME
            if parsed_args.ec2_attributes is None:
                parsed_args.ec2_attributes = {}
            parsed_args.ec2_attributes['InstanceProfile'] = EC2_ROLE_NAME

        emrutils.apply_dict(params, 'ServiceRole', parsed_args.service_role)

        if (parsed_args.no_auto_terminate is False
                and parsed_args.auto_terminate is False):
            parsed_args.no_auto_terminate = True

        instances_config['KeepJobFlowAliveWhenNoSteps'] = \
            emrutils.apply_boolean_options(
                parsed_args.no_auto_terminate,
                '--no-auto-terminate',
                parsed_args.auto_terminate,
                '--auto-terminate')

        instances_config['TerminationProtected'] = \
            emrutils.apply_boolean_options(
                parsed_args.termination_protected,
                '--termination-protected',
                parsed_args.no_termination_protected,
                '--no-termination-protected')

        if (parsed_args.visible_to_all_users is False
                and parsed_args.no_visible_to_all_users is False):
            parsed_args.visible_to_all_users = True

        params['VisibleToAllUsers'] = \
            emrutils.apply_boolean_options(
                parsed_args.visible_to_all_users,
                '--visible-to-all-users',
                parsed_args.no_visible_to_all_users,
                '--no-visible-to-all-users')

        params['Tags'] = emrutils.parse_tags(parsed_args.tags)
        params['Instances'] = instances_config

        if parsed_args.ec2_attributes is not None:
            self._build_ec2_attributes(cluster=params,
                                       parsed_attrs=parsed_args.ec2_attributes)

        debugging_enabled = emrutils.apply_boolean_options(
            parsed_args.enable_debugging, '--enable-debugging',
            parsed_args.no_enable_debugging, '--no-enable-debugging')

        if parsed_args.log_uri is None and debugging_enabled is True:
            raise exceptions.LogUriError

        if debugging_enabled is True:
            self._update_cluster_dict(
                cluster=params,
                key='Steps',
                value=[self._build_enable_debugging(parsed_globals)])

        if parsed_args.applications is not None:
            app_list, ba_list, step_list = applicationutils.build_applications(
                session=self._session,
                parsed_applications=parsed_args.applications,
                parsed_globals=parsed_globals,
                ami_version=params['AmiVersion'])
            self._update_cluster_dict(params, 'NewSupportedProducts', app_list)
            self._update_cluster_dict(params, 'BootstrapActions', ba_list)
            self._update_cluster_dict(params, 'Steps', step_list)

        hbase_restore_config = parsed_args.restore_from_hbase_backup
        if hbase_restore_config is not None:
            args = hbaseutils.build_hbase_restore_from_backup_args(
                dir=hbase_restore_config.get('Dir'),
                backup_version=hbase_restore_config.get('BackupVersion'))
            step_config = emrutils.build_step(
                jar=constants.HBASE_JAR_PATH,
                name=constants.HBASE_RESTORE_STEP_NAME,
                action_on_failure=constants.CANCEL_AND_WAIT,
                args=args)
            self._update_cluster_dict(params, 'Steps', [step_config])

        if parsed_args.bootstrap_actions is not None:
            self._build_bootstrap_actions(
                cluster=params,
                parsed_boostrap_actions=parsed_args.bootstrap_actions)

        if parsed_args.emrfs is not None:
            emr_fs_ba_args = self._build_emr_fs_args(parsed_args.emrfs)
            emr_fs_ba_config = \
                emrutils.build_bootstrap_action(
                    path=emrutils.build_s3_link(
                        relative_path=constants.CONFIG_HADOOP_PATH,
                        region=parsed_globals.region),
                    name=constants.EMR_FS_BA_NAME,
                    args=emr_fs_ba_args)
            self._update_cluster_dict(cluster=params,
                                      key='BootstrapActions',
                                      value=[emr_fs_ba_config])

        if parsed_args.steps is not None:
            steps_list = steputils.build_step_config_list(
                parsed_step_list=parsed_args.steps,
                region=parsed_globals.region)
            self._update_cluster_dict(cluster=params,
                                      key='Steps',
                                      value=steps_list)

        self._validate_required_applications(parsed_args)

        run_job_flow = emr.get_operation('RunJobFlow')
        run_job_flow_response = emrutils.call(self._session, run_job_flow,
                                              params, parsed_globals.region,
                                              parsed_globals.endpoint_url,
                                              parsed_globals.verify_ssl)

        constructed_result = self._construct_result(run_job_flow_response[1])
        emrutils.display_response(self._session, run_job_flow,
                                  constructed_result, parsed_globals)

        return 0

    def _construct_result(self, run_job_flow_result):
        jobFlowId = None
        if run_job_flow_result is not None:
            jobFlowId = run_job_flow_result.get('JobFlowId')

        if jobFlowId is not None:
            return {'ClusterId': jobFlowId}
        else:
            return {}

    def _build_ec2_attributes(self, cluster, parsed_attrs):
        keys = parsed_attrs.keys()
        instances = cluster['Instances']
        if 'AvailabilityZone' in keys and 'SubnetId' in keys:
            raise exceptions.SubnetAndAzValidationError

        emrutils.apply_params(src_params=parsed_attrs,
                              src_key='KeyName',
                              dest_params=instances,
                              dest_key='Ec2KeyName')
        emrutils.apply_params(src_params=parsed_attrs,
                              src_key='SubnetId',
                              dest_params=instances,
                              dest_key='Ec2SubnetId')

        if 'AvailabilityZone' in keys:
            instances['Placement'] = dict()
            emrutils.apply_params(src_params=parsed_attrs,
                                  src_key='AvailabilityZone',
                                  dest_params=instances['Placement'],
                                  dest_key='AvailabilityZone')

        emrutils.apply_params(src_params=parsed_attrs,
                              src_key='InstanceProfile',
                              dest_params=cluster,
                              dest_key='JobFlowRole')

        emrutils.apply(params=cluster, key='Instances', value=instances)

        return cluster

    def _build_bootstrap_actions(self, cluster, parsed_boostrap_actions):
        cluster_ba_list = cluster.get('BootstrapActions')
        if cluster_ba_list is None:
            cluster_ba_list = []

        bootstrap_actions = []
        if len(cluster_ba_list) + len(parsed_boostrap_actions) \
                > constants.MAX_BOOTSTRAP_ACTION_NUMBER:
            raise ValueError('aws: error: maximum number of '
                             'bootstrap actions for a cluster exceeded.')

        for ba in parsed_boostrap_actions:
            ba_config = {}
            if ba.get('Name') is not None:
                ba_config['Name'] = ba.get('Name')
            else:
                ba_config['Name'] = constants.BOOTSTRAP_ACTION_NAME
            script_arg_config = {}
            emrutils.apply_params(src_params=ba,
                                  src_key='Path',
                                  dest_params=script_arg_config,
                                  dest_key='Path')
            emrutils.apply_params(src_params=ba,
                                  src_key='Args',
                                  dest_params=script_arg_config,
                                  dest_key='Args')
            emrutils.apply(params=ba_config,
                           key='ScriptBootstrapAction',
                           value=script_arg_config)
            bootstrap_actions.append(ba_config)

        result = cluster_ba_list + bootstrap_actions
        if len(result) > 0:
            cluster['BootstrapActions'] = result

        return cluster

    def _build_enable_debugging(self, parsed_globals):
        return emrutils.build_step(
            name=constants.DEBUGGING_NAME,
            action_on_failure=constants.TERMINATE_CLUSTER,
            jar=emrutils.get_script_runner(),
            args=[
                emrutils.build_s3_link(relative_path=constants.DEBUGGING_PATH,
                                       region=parsed_globals.region)
            ])

    def _update_cluster_dict(self, cluster, key, value):
        if key in cluster.keys():
            cluster[key] += value
        elif value is not None and len(value) > 0:
            cluster[key] = value
        return cluster

    # Checks if the applications required by steps are specified
    # using the --applications option.
    def _validate_required_applications(self, parsed_args):

        specified_apps = set([])
        if parsed_args.applications is not None:
            specified_apps = \
                set([app['Name'].lower() for app in parsed_args.applications])

        missing_apps = self._get_missing_applications_for_steps(
            specified_apps, parsed_args)
        # Check for HBase.
        if parsed_args.restore_from_hbase_backup is not None:
            if constants.HBASE not in specified_apps:
                missing_apps.add(constants.HBASE.title())

        if len(missing_apps) != 0:
            raise exceptions.MissingApplicationsError(
                applications=missing_apps)

    def _get_missing_applications_for_steps(self, specified_apps, parsed_args):
        allowed_app_steps = set(
            [constants.HIVE, constants.PIG, constants.IMPALA])
        missing_apps = set([])
        if parsed_args.steps is not None:
            for step in parsed_args.steps:
                if len(missing_apps) == len(allowed_app_steps):
                    break
                step_type = step.get('Type')

                if step_type is not None:
                    step_type = step_type.lower()
                    if step_type in allowed_app_steps and \
                            step_type not in specified_apps:
                        missing_apps.add(step['Type'].title())
        return missing_apps

    def _build_emr_fs_args(self, parsed_emr_fs):
        args = []
        if parsed_emr_fs.get('Consistent') is not None:
            args.append(constants.EMR_FS_BA_ARG_KEY)
            args.append(constants.EMR_FS_CONSISTENT_KEY + '=' +
                        str(parsed_emr_fs.get('Consistent')).lower())

        if parsed_emr_fs.get('SSE') is not None:
            args.append(constants.EMR_FS_BA_ARG_KEY)
            args.append(constants.EMR_FS_SSE_KEY + '=' +
                        str(parsed_emr_fs.get('SSE')).lower())

        if parsed_emr_fs.get('RetryCount') is not None:
            args.append(constants.EMR_FS_BA_ARG_KEY)
            args.append(constants.EMR_FS_RETRY_COUNT_KEY + '=' +
                        str(parsed_emr_fs.get('RetryCount')))

        if parsed_emr_fs.get('RetryPeriod') is not None:
            args.append(constants.EMR_FS_BA_ARG_KEY)
            args.append(constants.EMR_FS_RETRY_PERIOD_KEY + '=' +
                        str(parsed_emr_fs.get('RetryPeriod')))

        if parsed_emr_fs.get('Args') is not None:
            for arg in parsed_emr_fs.get('Args'):
                args.append(constants.EMR_FS_BA_ARG_KEY)
                args.append(arg)

        return args
Beispiel #25
0
class PackageCommand(BasicCommand):

    MSG_PACKAGED_TEMPLATE_WRITTEN = (
        "Successfully packaged artifacts and wrote output template "
        "to file {output_file_name}."
        "\n"
        "Execute the following command to deploy the packaged template"
        "\n"
        "aws cloudformation deploy --template-file {output_file_path} "
        "--stack-name <YOUR STACK NAME>"
        "\n")

    NAME = "package"

    DESCRIPTION = BasicCommand.FROM_FILE("cloudformation",
                                         "_package_description.rst")

    ARG_TABLE = [{
        'name':
        'template-file',
        'required':
        True,
        'help_text': ('The path where your AWS CloudFormation'
                      ' template is located.')
    }, {
        'name':
        's3-bucket',
        'required':
        True,
        'help_text': ('The name of the S3 bucket where this command uploads'
                      ' the artifacts that are referenced in your template.')
    }, {
        'name':
        's3-prefix',
        'help_text':
        ('A prefix name that the command adds to the'
         ' artifacts\' name when it uploads them to the S3 bucket.'
         ' The prefix name is a path name (folder name) for'
         ' the S3 bucket.')
    }, {
        'name':
        'kms-key-id',
        'help_text':
        ('The ID of an AWS KMS key that the command uses'
         ' to encrypt artifacts that are at rest in the S3 bucket.')
    }, {
        "name":
        "output-template-file",
        "help_text":
        ("The path to the file where the command writes the"
         " output AWS CloudFormation template. If you don't specify"
         " a path, the command writes the template to the standard"
         " output.")
    }, {
        "name":
        "use-json",
        "action":
        "store_true",
        "help_text":
        ("Indicates whether to use JSON as the format for the output AWS"
         " CloudFormation template. YAML is used by default.")
    }, {
        "name":
        "force-upload",
        "action":
        "store_true",
        "help_text":
        ('Indicates whether to override existing files in the S3 bucket.'
         ' Specify this flag to upload artifacts even if they '
         ' match existing artifacts in the S3 bucket.')
    }, {
        "name":
        "metadata",
        "cli_type_name":
        "map",
        "schema": {
            "type": "map",
            "key": {
                "type": "string"
            },
            "value": {
                "type": "string"
            }
        },
        "help_text":
        "A map of metadata to attach to *ALL* the artifacts that"
        " are referenced in your template."
    }]

    def _run_main(self, parsed_args, parsed_globals):
        s3_client = self._session.create_client(
            "s3",
            config=Config(signature_version='s3v4'),
            region_name=parsed_globals.region,
            verify=parsed_globals.verify_ssl)

        template_path = parsed_args.template_file
        if not os.path.isfile(template_path):
            raise exceptions.InvalidTemplatePathError(
                template_path=template_path)

        bucket = parsed_args.s3_bucket

        self.s3_uploader = S3Uploader(s3_client, bucket, parsed_args.s3_prefix,
                                      parsed_args.kms_key_id,
                                      parsed_args.force_upload)
        # attach the given metadata to the artifacts to be uploaded
        self.s3_uploader.artifact_metadata = parsed_args.metadata

        output_file = parsed_args.output_template_file
        use_json = parsed_args.use_json
        exported_str = self._export(template_path, use_json)

        sys.stdout.write("\n")
        self.write_output(output_file, exported_str)

        if output_file:
            msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
                output_file_name=output_file,
                output_file_path=os.path.abspath(output_file))
            sys.stdout.write(msg)

        sys.stdout.flush()
        return 0

    def _export(self, template_path, use_json):
        template = Template(template_path, os.getcwd(), self.s3_uploader)
        exported_template = template.export()

        if use_json:
            exported_str = json.dumps(exported_template,
                                      indent=4,
                                      ensure_ascii=False)
        else:
            exported_str = yaml_dump(exported_template)

        return exported_str

    def write_output(self, output_file_name, data):
        if output_file_name is None:
            sys.stdout.write(data)
            return

        with open(output_file_name, "w") as fp:
            fp.write(data)
Beispiel #26
0
class DeployCommand(BasicCommand):

    MSG_NO_EXECUTE_CHANGESET = \
        ("Changeset created successfully. Run the following command to "
         "review changes:"
         "\n"
         "aws cloudformation describe-change-set --change-set-name "
         "{changeset_id}"
         "\n")

    MSG_EXECUTE_SUCCESS = "Successfully created/updated stack - {stack_name}\n"

    PARAMETER_OVERRIDE_CMD = "parameter-overrides"

    NAME = 'deploy'
    DESCRIPTION = BasicCommand.FROM_FILE("cloudformation",
                                         "_deploy_description.rst")

    ARG_TABLE = [
        {
            'name': 'template-file',
            'required': True,
            'help_text': (
                'The path where your AWS CloudFormation'
                ' template is located.'
            )
        },
        {
            'name': 'stack-name',
            'action': 'store',
            'required': True,
            'help_text': (
                'The name of the AWS CloudFormation stack you\'re deploying to.'
                ' If you specify an existing stack, the command updates the'
                ' stack. If you specify a new stack, the command creates it.'
            )
        },
        {
            'name': PARAMETER_OVERRIDE_CMD,
            'action': 'store',
            'required': False,
            'schema': {
                'type': 'array',
                'items': {
                    'type': 'string'
                }
            },
            'default': [],
            'help_text': (
                'A list of parameter structures that specify input parameters'
                ' for your stack template. If you\'re updating a stack and you'
                ' don\'t specify a parameter, the command uses the stack\'s'
                ' existing value. For new stacks, you must specify'
                ' parameters that don\'t have a default value.'
                ' Syntax: ParameterKey1=ParameterValue1'
                ' ParameterKey2=ParameterValue2 ...'
            )
        },
        {
            'name': 'capabilities',
            'action': 'store',
            'required': False,
            'schema': {
                'type': 'array',
                'items': {
                    'type': 'string',
                    'enum': [
                        'CAPABILITY_IAM',
                        'CAPABILITY_NAMED_IAM'
                    ]
                }
            },
            'default': [],
            'help_text': (
                'A list of capabilities that you must specify before AWS'
                ' Cloudformation can create certain stacks. Some stack'
                ' templates might include resources that can affect'
                ' permissions in your AWS account, for example, by creating'
                ' new AWS Identity and Access Management (IAM) users. For'
                ' those stacks, you must explicitly acknowledge their'
                ' capabilities by specifying this parameter. '
                ' The only valid values are CAPABILITY_IAM and'
                ' CAPABILITY_NAMED_IAM. If you have IAM resources, you can'
                ' specify either capability. If you have IAM resources with'
                ' custom names, you must specify CAPABILITY_NAMED_IAM. If you'
                ' don\'t specify this parameter, this action returns an'
                ' InsufficientCapabilities error.'
            )

        },
        {
            'name': 'no-execute-changeset',
            'action': 'store_false',
            'dest': 'execute_changeset',
            'required': False,
            'help_text': (
                'Indicates whether to execute the change set. Specify this'
                ' flag if you want to view your stack changes before'
                ' executing the change set. The command creates an'
                ' AWS CloudFormation change set and then exits without'
                ' executing the change set. After you view the change set,'
                ' execute it to implement your changes.'
            )
        },
        {
            'name': 'role-arn',
            'required': False,
            'help_text': (
                'The Amazon Resource Name (ARN) of an AWS Identity and Access '
                'Management (IAM) role that AWS CloudFormation assumes when '
                'executing the change set.'
            )
        },
        {
            'name': 'notification-arns',
            'required': False,
            'schema': {
                'type': 'array',
                'items': {
                    'type': 'string'
                }
            },
            'help_text': (
                'Amazon Simple Notification Service topic Amazon Resource Names'
                ' (ARNs) that AWS CloudFormation associates with the stack.'
            )
        },
        {
            'name': 'fail-on-empty-changeset',
            'required': False,
            'action': 'store_true',
            'group_name': 'fail-on-empty-changeset',
            'dest': 'fail_on_empty_changeset',
            'default': True,
            'help_text': (
                'Specify if the CLI should return a non-zero exit code if '
                'there are no changes to be made to the stack. The default '
                'behavior is to return a non-zero exit code.'
            )
        },
        {
            'name': 'no-fail-on-empty-changeset',
            'required': False,
            'action': 'store_false',
            'group_name': 'fail-on-empty-changeset',
            'dest': 'fail_on_empty_changeset',
            'default': True,
            'help_text': (
                'Causes the CLI to return an exit code of 0 if there are no '
                'changes to be made to the stack.'
            )
        }
    ]

    def _run_main(self, parsed_args, parsed_globals):
        cloudformation_client = \
            self._session.create_client(
                    'cloudformation', region_name=parsed_globals.region,
                    endpoint_url=parsed_globals.endpoint_url,
                    verify=parsed_globals.verify_ssl)

        template_path = parsed_args.template_file
        if not os.path.isfile(template_path):
            raise exceptions.InvalidTemplatePathError(
                    template_path=template_path)

        # Parse parameters
        with open(template_path, "r") as handle:
            template_str = handle.read()

        stack_name = parsed_args.stack_name
        parameter_overrides = self.parse_parameter_arg(
                parsed_args.parameter_overrides)

        template_dict = yaml_parse(template_str)

        parameters = self.merge_parameters(template_dict, parameter_overrides)

        deployer = Deployer(cloudformation_client)
        return self.deploy(deployer, stack_name, template_str,
                           parameters, parsed_args.capabilities,
                           parsed_args.execute_changeset, parsed_args.role_arn,
                           parsed_args.notification_arns,
                           parsed_args.fail_on_empty_changeset)

    def deploy(self, deployer, stack_name, template_str,
               parameters, capabilities, execute_changeset, role_arn,
               notification_arns, fail_on_empty_changeset=True):
        try:
            result = deployer.create_and_wait_for_changeset(
                    stack_name=stack_name,
                    cfn_template=template_str,
                    parameter_values=parameters,
                    capabilities=capabilities,
                    role_arn=role_arn,
                    notification_arns=notification_arns)
        except exceptions.ChangeEmptyError as ex:
            if fail_on_empty_changeset:
                raise
            write_exception(ex, outfile=get_stdout_text_writer())
            return 0

        if execute_changeset:
            deployer.execute_changeset(result.changeset_id, stack_name)
            deployer.wait_for_execute(stack_name, result.changeset_type)
            sys.stdout.write(self.MSG_EXECUTE_SUCCESS.format(
                    stack_name=stack_name))
        else:
            sys.stdout.write(self.MSG_NO_EXECUTE_CHANGESET.format(
                    changeset_id=result.changeset_id))

        sys.stdout.flush()
        return 0

    def merge_parameters(self, template_dict, parameter_overrides):
        """
        CloudFormation CreateChangeset requires a value for every parameter
        from the template, either specifying a new value or use previous value.
        For convenience, this method will accept new parameter values and
        generates a dict of all parameters in a format that ChangeSet API
        will accept

        :param parameter_overrides:
        :return:
        """
        parameter_values = []

        if not isinstance(template_dict.get("Parameters", None), dict):
            return parameter_values

        for key, value in template_dict["Parameters"].items():

            obj = {
                "ParameterKey": key
            }

            if key in parameter_overrides:
                obj["ParameterValue"] = parameter_overrides[key]
            else:
                obj["UsePreviousValue"] = True

            parameter_values.append(obj)

        return parameter_values

    def parse_parameter_arg(self, parameter_arg):
        result = {}
        for data in parameter_arg:

            # Split at first '=' from left
            key_value_pair = data.split("=", 1)

            if len(key_value_pair) != 2:
                raise exceptions.InvalidParameterOverrideArgumentError(
                        argname=self.PARAMETER_OVERRIDE_CMD,
                        value=key_value_pair)

            result[key_value_pair[0]] = key_value_pair[1]

        return result
Beispiel #27
0
class UpdateKubeconfigCommand(BasicCommand):
    NAME = 'update-kubeconfig'

    DESCRIPTION = BasicCommand.FROM_FILE('eks', 'update-kubeconfig',
                                         '_description.rst')

    ARG_TABLE = [{
        'name':
        'name',
        'help_text': ("The name of the cluster for which "
                      "to create a kubeconfig entry. "
                      "This cluster must exist in your account and in the "
                      "specified or configured default Region "
                      "for your AWS CLI installation."),
        'required':
        True
    }, {
        'name':
        'kubeconfig',
        'help_text': ("Optionally specify a kubeconfig file to append "
                      "with your configuration. "
                      "By default, the configuration is written to the "
                      "first file path in the KUBECONFIG "
                      "environment variable (if it is set) "
                      "or the default kubeconfig path (.kube/config) "
                      "in your home directory."),
        'required':
        False
    }, {
        'name':
        'role-arn',
        'help_text': ("To assume a role for cluster authentication, "
                      "specify an IAM role ARN with this option. "
                      "For example, if you created a cluster "
                      "while assuming an IAM role, "
                      "then you must also assume that role to "
                      "connect to the cluster the first time."),
        'required':
        False
    }, {
        'name':
        'dry-run',
        'action':
        'store_true',
        'default':
        False,
        'help_text': ("Print the merged kubeconfig to stdout instead of "
                      "writing it to the specified file."),
        'required':
        False
    }, {
        'name':
        'verbose',
        'action':
        'store_true',
        'default':
        False,
        'help_text': ("Print more detailed output "
                      "when writing to the kubeconfig file, "
                      "including the appended entries.")
    }, {
        'name':
        'alias',
        'help_text': ("Alias for the cluster context name. "
                      "Defaults to match cluster ARN."),
        'required':
        False
    }]

    def _display_entries(self, entries):
        """ 
        Display entries in yaml format

        :param entries: a list of OrderedDicts to be printed
        :type entries: list
        """
        uni_print("Entries:\n\n")
        for entry in entries:
            uni_print(ordered_yaml_dump(entry))
            uni_print("\n")

    def _run_main(self, parsed_args, parsed_globals):
        client = EKSClient(self._session, parsed_args.name,
                           parsed_args.role_arn, parsed_globals)
        new_cluster_dict = client.get_cluster_entry()
        new_user_dict = client.get_user_entry()

        config_selector = KubeconfigSelector(os.environ.get("KUBECONFIG", ""),
                                             parsed_args.kubeconfig)
        config = config_selector.choose_kubeconfig(new_cluster_dict["name"])
        updating_existing = config.has_cluster(new_cluster_dict["name"])
        appender = KubeconfigAppender()
        new_context_dict = appender.insert_cluster_user_pair(
            config, new_cluster_dict, new_user_dict, parsed_args.alias)

        if parsed_args.dry_run:
            uni_print(config.dump_content())
        else:
            writer = KubeconfigWriter()
            writer.write_kubeconfig(config)

            if updating_existing:
                uni_print("Updated context {0} in {1}\n".format(
                    new_context_dict["name"], config.path))
            else:
                uni_print("Added new context {0} to {1}\n".format(
                    new_context_dict["name"], config.path))

            if parsed_args.verbose:
                self._display_entries(
                    [new_context_dict, new_user_dict, new_cluster_dict])
Beispiel #28
0
class OpsWorksRegister(BasicCommand):
    NAME = "register"
    DESCRIPTION = textwrap.dedent("""
        Registers an EC2 instance or machine with AWS OpsWorks.

        Registering a machine using this command will install the AWS OpsWorks
        agent on the target machine and register it with an existing OpsWorks
        stack.
    """).strip()
    EXAMPLES = BasicCommand.FROM_FILE('opsworks/register.rst')

    ARG_TABLE = [
        {
            'name':
            'stack-id',
            'required':
            True,
            'help_text':
            """A stack ID. The instance will be registered with the
                         given stack."""
        },
        {
            'name':
            'infrastructure-class',
            'required':
            True,
            'choices': ['ec2', 'on-premises'],
            'help_text':
            """Specifies whether to register an EC2 instance (`ec2`)
                         or an on-premises instance (`on-premises`)."""
        },
        {
            'name':
            'override-hostname',
            'dest':
            'hostname',
            'help_text':
            """The instance hostname. If not provided, the current
                         hostname of the machine will be used."""
        },
        {
            'name':
            'override-private-ip',
            'dest':
            'private_ip',
            'help_text':
            """An IP address. If you set this parameter, the given IP
                         address will be used as the private IP address within
                         OpsWorks.  Otherwise the private IP address will be
                         determined automatically. Not to be used with EC2
                         instances."""
        },
        {
            'name':
            'override-public-ip',
            'dest':
            'public_ip',
            'help_text':
            """An IP address. If you set this parameter, the given IP
                         address will be used as the public IP address within
                         OpsWorks.  Otherwise the public IP address will be
                         determined automatically. Not to be used with EC2
                         instances."""
        },
        {
            'name':
            'override-ssh',
            'dest':
            'ssh',
            'help_text':
            """If you set this parameter, the given command will be
                         used to connect to the machine."""
        },
        {
            'name':
            'ssh-username',
            'dest':
            'username',
            'help_text':
            """If provided, this username will be used to connect to
                         the host."""
        },
        {
            'name':
            'ssh-private-key',
            'dest':
            'private_key',
            'help_text':
            """If provided, the given private key file will be used
                         to connect to the machine."""
        },
        {
            'name':
            'local',
            'action':
            'store_true',
            'help_text':
            """If given, instead of a remote machine, the local
                         machine will be imported. Cannot be used together
                         with `target`."""
        },
        {
            'name':
            'target',
            'positional_arg':
            True,
            'nargs':
            '?',
            'synopsis':
            '[<target>]',
            'help_text':
            """Either the EC2 instance ID or the hostname of the
                         instance or machine to be registered with OpsWorks.
                         Cannot be used together with `--local`."""
        },
    ]

    def __init__(self, session):
        super(OpsWorksRegister, self).__init__(session)
        self._stack = None
        self._ec2_instance = None
        self._prov_params = None
        self._use_address = None
        self._use_hostname = None
        self._name_for_iam = None

    def _create_clients(self, args, parsed_globals):
        endpoint_args = {}
        if 'region' in parsed_globals:
            endpoint_args['region_name'] = parsed_globals.region
        if 'endpoint_url' in parsed_globals:
            endpoint_args['endpoint_url'] = parsed_globals.endpoint_url
        self.iam = self._session.create_client('iam')
        self.opsworks = self._session.create_client('opsworks',
                                                    **endpoint_args)

    def _run_main(self, args, parsed_globals):
        self._create_clients(args, parsed_globals)

        self.prevalidate_arguments(args)
        self.retrieve_stack(args)
        self.validate_arguments(args)
        self.determine_details(args)
        self.create_iam_entities()
        self.setup_target_machine(args)

    def prevalidate_arguments(self, args):
        """
        Validates command line arguments before doing anything else.
        """
        if not args.target and not args.local:
            raise ValueError("One of target or --local is required.")
        elif args.target and args.local:
            raise ValueError(
                "Arguments target and --local are mutually exclusive.")

        if args.local and sys.platform != 'linux2':
            raise ValueError(
                "Non-Linux instances are not supported by AWS OpsWorks.")

        if args.ssh and (args.username or args.private_key):
            raise ValueError(
                "Argument --override-ssh cannot be used together with "
                "--ssh-username or --ssh-private-key.")

        if args.infrastructure_class == 'ec2':
            if args.private_ip:
                raise ValueError(
                    "--override-private-ip is not supported for EC2.")
            if args.public_ip:
                raise ValueError(
                    "--override-public-ip is not supported for EC2.")

        if args.hostname:
            if not HOSTNAME_RE.match(args.hostname):
                raise ValueError(
                    "Invalid hostname: '%s'. Hostnames must consist of "
                    "letters, digits and dashes only and must not start or "
                    "end with a dash." % args.hostname)

    def retrieve_stack(self, args):
        """
        Retrieves the stack from the API, thereby ensures that it exists.

        Provides `self._stack`, `self._prov_params`, `self._use_address`, and
        `self._ec2_instance`.
        """

        LOG.debug("Retrieving stack and provisioning parameters")
        self._stack = self.opsworks.describe_stacks(
            StackIds=[args.stack_id])['Stacks'][0]
        self._prov_params = \
            self.opsworks.describe_stack_provisioning_parameters(
                StackId=self._stack['StackId']
            )

        if args.infrastructure_class == 'ec2' and not args.local:
            LOG.debug("Retrieving EC2 instance information")
            ec2 = self._session.create_client(
                'ec2', region_name=self._stack['Region'])

            # `desc_args` are arguments for the describe_instances call,
            # whereas `conditions` is a list of lambdas for further filtering
            # on the results of the call.
            desc_args = {'Filters': []}
            conditions = []

            # make sure that the platforms (EC2/VPC) and VPC IDs of the stack
            # and the instance match
            if 'VpcId' in self._stack:
                desc_args['Filters'].append({
                    'Name': 'vpc-id',
                    'Values': [self._stack['VpcId']]
                })
            else:
                # Cannot search for non-VPC instances directly, thus filter
                # afterwards
                conditions.append(lambda instance: 'VpcId' not in instance)

            # target may be an instance ID, an IP address, or a name
            if INSTANCE_ID_RE.match(args.target):
                desc_args['InstanceIds'] = [args.target]
            elif IP_ADDRESS_RE.match(args.target):
                # Cannot search for either private or public IP at the same
                # time, thus filter afterwards
                conditions.append(
                    lambda instance: instance.get('PrivateIpAddress') == args.
                    target or instance.get('PublicIpAddress') == args.target)
                # also use the given address to connect
                self._use_address = args.target
            else:
                # names are tags
                desc_args['Filters'].append({
                    'Name': 'tag:Name',
                    'Values': [args.target]
                })

            # find all matching instances
            instances = [
                i for r in ec2.describe_instances(**desc_args)['Reservations']
                for i in r['Instances'] if all(c(i) for c in conditions)
            ]

            if not instances:
                raise ValueError("Did not find any instance matching %s." %
                                 args.target)
            elif len(instances) > 1:
                raise ValueError("Found multiple instances matching %s: %s." %
                                 (args.target, ", ".join(i['InstanceId']
                                                         for i in instances)))

            self._ec2_instance = instances[0]

    def validate_arguments(self, args):
        """
        Validates command line arguments using the retrieved information.
        """

        if args.hostname:
            instances = self.opsworks.describe_instances(
                StackId=self._stack['StackId'])['Instances']
            if any(args.hostname.lower() == instance['Hostname']
                   for instance in instances):
                raise ValueError(
                    "Invalid hostname: '%s'. Hostnames must be unique within "
                    "a stack." % args.hostname)

        if args.infrastructure_class == 'ec2' and args.local:
            # make sure the regions match
            region = json.loads(urlopen(IDENTITY_URL).read())['region']
            if region != self._stack['Region']:
                raise ValueError(
                    "The stack's and the instance's region must match.")

    def determine_details(self, args):
        """
        Determine details (like the address to connect to and the hostname to
        use) from the given arguments and the retrieved data.

        Provides `self._use_address` (if not provided already),
        `self._use_hostname` and `self._name_for_iam`.
        """

        # determine the address to connect to
        if not self._use_address:
            if args.local:
                pass
            elif args.infrastructure_class == 'ec2':
                if 'PublicIpAddress' in self._ec2_instance:
                    self._use_address = self._ec2_instance['PublicIpAddress']
                elif 'PrivateIpAddress' in self._ec2_instance:
                    LOG.warn(
                        "Instance does not have a public IP address. Trying "
                        "to use the private address to connect.")
                    self._use_address = self._ec2_instance['PrivateIpAddress']
                else:
                    # Should never happen
                    raise ValueError(
                        "The instance does not seem to have an IP address.")
            elif args.infrastructure_class == 'on-premises':
                self._use_address = args.target

        # determine the names to use
        if args.hostname:
            self._use_hostname = args.hostname
            self._name_for_iam = args.hostname
        elif args.local:
            self._use_hostname = None
            self._name_for_iam = socket.gethostname()
        else:
            self._use_hostname = None
            self._name_for_iam = args.target

    def create_iam_entities(self):
        """
        Creates an IAM group, user and corresponding credentials.

        Provides `self.access_key`.
        """

        LOG.debug("Creating the IAM group if necessary")
        group_name = "OpsWorks-%s" % clean_for_iam(self._stack['StackId'])
        try:
            self.iam.create_group(GroupName=group_name, Path=IAM_PATH)
            LOG.debug("Created IAM group %s", group_name)
        except ClientError as e:
            if e.error_code == 'EntityAlreadyExists':
                LOG.debug("IAM group %s exists, continuing", group_name)
                # group already exists, good
                pass
            else:
                raise

        # create the IAM user, trying alternatives if it already exists
        LOG.debug("Creating an IAM user")
        base_username = "******" % (shorten_name(
            clean_for_iam(self._stack['Name']),
            25), shorten_name(clean_for_iam(self._name_for_iam), 25))
        for try_ in range(20):
            username = base_username + ("+%s" % try_ if try_ else "")
            try:
                self.iam.create_user(UserName=username, Path=IAM_PATH)
            except ClientError as e:
                if e.error_code == 'EntityAlreadyExists':
                    LOG.debug(
                        "IAM user %s already exists, trying another name",
                        username)
                    # user already exists, try the next one
                    pass
                else:
                    raise
            else:
                LOG.debug("Created IAM user %s", username)
                break
        else:
            raise ValueError("Couldn't find an unused IAM user name.")

        LOG.debug("Adding the user to the group and attaching a policy")
        self.iam.add_user_to_group(GroupName=group_name, UserName=username)
        self.iam.put_user_policy(PolicyName=IAM_USER_POLICY_NAME,
                                 PolicyDocument=self._iam_policy_document(
                                     self._stack['Arn'],
                                     IAM_USER_POLICY_TIMEOUT),
                                 UserName=username)

        LOG.debug("Creating an access key")
        self.access_key = self.iam.create_access_key(
            UserName=username)['AccessKey']

    def setup_target_machine(self, args):
        """
        Setups the target machine by copying over the credentials and starting
        the installation process.
        """

        remote_script = REMOTE_SCRIPT % {
            'agent_installer_url':
            self._prov_params['AgentInstallerUrl'],
            'preconfig':
            self._to_ruby_yaml(self._pre_config_document(args)),
            'assets_download_bucket':
            self._prov_params['Parameters']['assets_download_bucket']
        }

        if args.local:
            LOG.debug("Running the installer locally")
            subprocess.check_call(["/bin/sh", "-c", remote_script])
        else:
            LOG.debug("Connecting to the target machine to run the installer.")
            self.ssh(args, remote_script)

    def ssh(self, args, remote_script):
        """
        Runs a (sh) script on a remote machine via SSH.
        """

        if sys.platform == 'win32':
            try:
                script_file = tempfile.NamedTemporaryFile("wt", delete=False)
                script_file.write(remote_script)
                script_file.close()
                if args.ssh:
                    call = args.ssh
                else:
                    call = 'plink'
                    if args.username:
                        call += ' -l "%s"' % args.username
                    if args.private_key:
                        call += ' -i "%s"' % args.private_key
                    call += ' "%s"' % self._use_address
                    call += ' -m'
                call += ' "%s"' % script_file.name

                subprocess.check_call(call, shell=True)
            finally:
                os.remove(script_file.name)
        else:
            if args.ssh:
                call = shlex.split(str(args.ssh))
            else:
                call = ['ssh', '-tt']
                if args.username:
                    call.extend(['-l', args.username])
                if args.private_key:
                    call.extend(['-i', args.private_key])
                call.append(self._use_address)

            remote_call = ["/bin/sh", "-c", remote_script]
            call.append(" ".join(shlex_quote(word) for word in remote_call))
            subprocess.check_call(call)

    def _pre_config_document(self, args):
        parameters = dict(access_key_id=self.access_key['AccessKeyId'],
                          secret_access_key=self.access_key['SecretAccessKey'],
                          stack_id=self._stack['StackId'],
                          **self._prov_params["Parameters"])
        if self._use_hostname:
            parameters['hostname'] = self._use_hostname
        if args.private_ip:
            parameters['private_ip'] = args.private_ip
        if args.public_ip:
            parameters['public_ip'] = args.public_ip
        parameters['import'] = args.infrastructure_class == 'ec2'
        LOG.debug("Using pre-config: %r", parameters)
        return parameters

    @staticmethod
    def _iam_policy_document(arn, timeout=None):
        statement = {
            "Action": "opsworks:RegisterInstance",
            "Effect": "Allow",
            "Resource": arn,
        }
        if timeout is not None:
            valid_until = datetime.datetime.utcnow() + timeout
            statement["Condition"] = {
                "DateLessThan": {
                    "aws:CurrentTime":
                    valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
                }
            }
        policy_document = {"Statement": [statement], "Version": "2012-10-17"}
        return json.dumps(policy_document)

    @staticmethod
    def _to_ruby_yaml(parameters):
        return "\n".join(":%s: %s" % (k, json.dumps(v))
                         for k, v in sorted(parameters.items()))
class CreateDefaultRoles(Command):
    NAME = "create-default-roles"
    DESCRIPTION = ('Creates the default IAM role ' + EC2_ROLE_NAME + ' and ' +
                   EMR_ROLE_NAME + ' which can be used when'
                   ' creating the cluster using the create-cluster command.\n'
                   '\nIf you do not have a Service Role and Instance Profile '
                   'variable set for your create-cluster command in the AWS '
                   'CLI config file, create-default-roles will automatically '
                   'set the values for these variables with these default '
                   'roles. If you have already set a value for Service Role '
                   'or Instance Profile, create-default-roles will not '
                   'automatically set the defaults for these variables in the '
                   'AWS CLI config file. You can view settings for variables '
                   'in the config file using the "aws configure get" command.'
                   '\n')
    ARG_TABLE = [{
        'name':
        'iam-endpoint',
        'no_paramfile':
        True,
        'help_text':
        '<p>The IAM endpoint to call for creating the roles.'
        ' This is optional and should only be specified when a'
        ' custom endpoint should be called for IAM operations'
        '.</p>'
    }]
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-default-roles.rst')

    def _run_main_command(self, parsed_args, parsed_globals):
        ec2_result = None
        emr_result = None
        self.iam_endpoint_url = parsed_args.iam_endpoint

        self._check_for_iam_endpoint(self.region, self.iam_endpoint_url)
        self.emr_endpoint_url = \
            self._session.create_client(
                'emr',
                region_name=self.region,
                endpoint_url=parsed_globals.endpoint_url,
                verify=parsed_globals.verify_ssl).meta.endpoint_url

        LOG.debug('elasticmapreduce endpoint used for resolving'
                  ' service principal: ' + self.emr_endpoint_url)

        # Check if the default EC2 Role for EMR exists.
        role_name = EC2_ROLE_NAME
        if self._check_if_role_exists(role_name, parsed_globals):
            LOG.debug('Role ' + role_name + ' exists.')
        else:
            LOG.debug('Role ' + role_name + ' does not exist.'
                      ' Creating default role for EC2: ' + role_name)
            ec2_result = self._create_role_with_role_policy(
                role_name, role_name, EC2,
                emrutils.dict_to_string(EC2_ROLE_POLICY), parsed_globals)

        # Check if the default EC2 Instance Profile for EMR exists.
        instance_profile_name = EC2_ROLE_NAME
        if self._check_if_instance_profile_exists(instance_profile_name,
                                                  parsed_globals):
            LOG.debug('Instance Profile ' + instance_profile_name + ' exists.')
        else:
            LOG.debug('Instance Profile ' + instance_profile_name +
                      'does not exist. Creating default Instance Profile ' +
                      instance_profile_name)
            self._create_instance_profile_with_role(instance_profile_name,
                                                    instance_profile_name,
                                                    parsed_globals)

        # Check if the default EMR Role exists.
        role_name = EMR_ROLE_NAME
        if self._check_if_role_exists(role_name, parsed_globals):
            LOG.debug('Role ' + role_name + ' exists.')
        else:
            LOG.debug('Role ' + role_name + ' does not exist.'
                      ' Creating default role for EMR: ' + role_name)
            emr_result = self._create_role_with_role_policy(
                role_name, role_name, EMR,
                emrutils.dict_to_string(EMR_ROLE_POLICY), parsed_globals)

        configutils.update_roles(self._session)

        emrutils.display_response(
            self._session, 'create_role',
            self._construct_result(ec2_result, emr_result), parsed_globals)

        return 0

    def _check_for_iam_endpoint(self, region, iam_endpoint):
        try:
            self._session.create_client('emr', region)
        except botocore.exceptions.UnknownEndpointError:
            if iam_endpoint is None:
                raise exceptions.UnknownIamEndpointError(region=region)

    def _construct_result(self, ec2_response, emr_response):
        result = []
        self._construct_role_and_role_policy_structure(result, ec2_response,
                                                       EC2_ROLE_POLICY)
        self._construct_role_and_role_policy_structure(result, emr_response,
                                                       EMR_ROLE_POLICY)
        return result

    def _construct_role_and_role_policy_structure(self, list, response,
                                                  role_policy):
        if response is not None and response[1] is not None:
            list.append({
                'Role': response[1]['Role'],
                'RolePolicy': role_policy
            })
            return list

    def _check_if_role_exists(self, role_name, parsed_globals):
        parameters = {'RoleName': role_name}
        try:
            self._call_iam_operation('GetRole', parameters, parsed_globals)
        except Exception as e:
            role_not_found_msg = 'The role with name ' + role_name +\
                                 ' cannot be found'
            if role_not_found_msg in e.message:
                # No role error.
                return False
            else:
                # Some other error. raise.
                raise e

        return True

    def _check_if_instance_profile_exists(self, instance_profile_name,
                                          parsed_globals):
        parameters = {'InstanceProfileName': instance_profile_name}
        try:
            self._call_iam_operation('GetInstanceProfile', parameters,
                                     parsed_globals)
        except Exception as e:
            profile_not_found_msg = 'Instance Profile ' +\
                                    instance_profile_name +\
                                    ' cannot be found.'
            if profile_not_found_msg in e.message:
                # No instance profile error.
                return False
            else:
                # Some other error. raise.
                raise e

        return True

    def _create_role_with_role_policy(self, role_name, policy_name,
                                      service_name, policy_document,
                                      parsed_globals):
        service_principal = get_service_principal(service_name,
                                                  self.emr_endpoint_url)
        LOG.debug(service_principal)

        parameters = {'RoleName': role_name}
        _assume_role_policy = \
            emrutils.dict_to_string(assume_role_policy(service_principal))
        parameters['AssumeRolePolicyDocument'] = _assume_role_policy
        create_role_response = self._call_iam_operation(
            'CreateRole', parameters, parsed_globals)

        parameters = {}
        parameters['PolicyDocument'] = policy_document
        parameters['PolicyName'] = policy_name
        parameters['RoleName'] = role_name
        self._call_iam_operation('PutRolePolicy', parameters, parsed_globals)

        return create_role_response

    def _create_instance_profile_with_role(self, instance_profile_name,
                                           role_name, parsed_globals):
        # Creating an Instance Profile
        parameters = {'InstanceProfileName': instance_profile_name}
        self._call_iam_operation('CreateInstanceProfile', parameters,
                                 parsed_globals)
        # Adding the role to the Instance Profile
        parameters = {}
        parameters['InstanceProfileName'] = instance_profile_name
        parameters['RoleName'] = role_name
        self._call_iam_operation('AddRoleToInstanceProfile', parameters,
                                 parsed_globals)

    def _call_iam_operation(self, operation_name, parameters, parsed_globals):
        client = self._session.create_client('iam', self.region,
                                             self.iam_endpoint_url,
                                             parsed_globals.verify_ssl)
        return getattr(client, xform_name(operation_name))(**parameters)
class CreateDefaultRoles(BasicCommand):
    NAME = "create-default-roles"
    DESCRIPTION = ('Creates the default IAM role ' + EC2_ROLE_NAME +
                   ' which can be used when'
                   ' creating the cluster using the create-cluster command.')
    ARG_TABLE = [{
        'name':
        'iam-endpoint',
        'no_paramfile':
        True,
        'help_text':
        '<p>The IAM endpoint to call for creating the roles.'
        ' This is optional and should only be specified when a'
        ' custom endpoint should be called for IAM operations'
        '.</p>'
    }]
    EXAMPLES = BasicCommand.FROM_FILE('emr', 'create-default-roles.rst')

    def _run_main(self, parsed_args, parsed_globals):
        ec2_result = None
        self.iam = self._session.get_service('iam')
        self.iam_endpoint_url = parsed_args.iam_endpoint
        region = self._get_region(parsed_globals)

        self._check_for_iam_endpoint(region, self.iam_endpoint_url)
        self.emr_endpoint_url = \
            self._session.get_service('emr').get_endpoint(
                region_name=parsed_globals.region,
                endpoint_url=parsed_globals.endpoint_url,
                verify=parsed_globals.verify_ssl).host

        LOG.debug('elasticmapreduce endpoint used for resolving'
                  ' service principal: ' + self.emr_endpoint_url)

        # Check if the default EC2 Role for EMR exists.
        role_name = EC2_ROLE_NAME
        if self._check_if_role_exists(role_name, parsed_globals):
            LOG.debug('Role ' + role_name + ' exists.')
        else:
            LOG.debug('Role ' + role_name + ' does not exist.'
                      ' Creating default role ' + role_name)
            ec2_result = self._create_role_with_role_policy(
                role_name, role_name, constants.EC2,
                emrutils.dict_to_string(EC2_ROLE_POLICY), parsed_globals)

        # Check if the default EC2 Instance Profile for EMR exists.
        instance_profile_name = EC2_ROLE_NAME
        if self._check_if_instance_profile_exists(instance_profile_name,
                                                  parsed_globals):
            LOG.debug('Instance Profile ' + instance_profile_name + ' exists.')
        else:
            LOG.debug('Instance Profile ' + instance_profile_name +
                      'does not exist. Creating default Instance Profile ' +
                      instance_profile_name)
            self._create_instance_profile_with_role(instance_profile_name,
                                                    instance_profile_name,
                                                    parsed_globals)

        emrutils.display_response(
            self._session,
            self._session.get_service('iam').get_operation('CreateRole'),
            self._construct_result(ec2_result), parsed_globals)

        return 0

    def _check_for_iam_endpoint(self, region, iam_endpoint):
        try:
            self._session.get_service('emr').get_endpoint(region)
        except botocore.exceptions.UnknownEndpointError:
            if iam_endpoint is None:
                raise exceptions.UnknownIamEndpointError(region=region)

    def _construct_result(self, ec2_response):
        result = []
        self._construct_role_and_role_policy_structure(result, ec2_response,
                                                       EC2_ROLE_POLICY)

        return result

    def _construct_role_and_role_policy_structure(self, list, response,
                                                  role_policy):
        if response is not None and response[1] is not None:
            list.append({
                'Role': response[1]['Role'],
                'RolePolicy': role_policy
            })
            return list

    def _get_region(self, parsed_globals):
        region = self._session.get_config_variable('region')

        if parsed_globals.region is not None:
            region = parsed_globals.region

        return region

    def _check_if_role_exists(self, role_name, parsed_globals):
        parameters = {'RoleName': role_name}
        try:
            self._call_iam_operation('GetRole', parameters, parsed_globals)
        except Exception as e:
            role_not_found_msg = 'The role with name ' + role_name +\
                                 ' cannot be found'
            if role_not_found_msg in e.message:
                # No role error.
                return False
            else:
                # Some other error. raise.
                raise e

        return True

    def _check_if_instance_profile_exists(self, instance_profile_name,
                                          parsed_globals):
        parameters = {'InstanceProfileName': instance_profile_name}
        try:
            self._call_iam_operation('GetInstanceProfile', parameters,
                                     parsed_globals)
        except Exception as e:
            profile_not_found_msg = 'Instance Profile ' +\
                                    instance_profile_name +\
                                    ' cannot be found.'
            if profile_not_found_msg in e.message:
                # No instance profile error.
                return False
            else:
                # Some other error. raise.
                raise e

        return True

    def _create_role_with_role_policy(self, role_name, policy_name,
                                      service_name, policy_document,
                                      parsed_globals):
        service_principal = get_service_principal(service_name,
                                                  self.emr_endpoint_url)
        LOG.debug(service_principal)

        parameters = {'RoleName': role_name}
        _assume_role_policy = \
            emrutils.dict_to_string(assume_role_policy(service_principal))
        parameters['AssumeRolePolicyDocument'] = _assume_role_policy
        create_role_response = self._call_iam_operation(
            'CreateRole', parameters, parsed_globals)

        parameters = {}
        parameters['PolicyDocument'] = policy_document
        parameters['PolicyName'] = policy_name
        parameters['RoleName'] = role_name
        self._call_iam_operation('PutRolePolicy', parameters, parsed_globals)

        return create_role_response

    def _create_instance_profile_with_role(self, instance_profile_name,
                                           role_name, parsed_globals):
        # Creating an Instance Profile
        parameters = {'InstanceProfileName': instance_profile_name}
        self._call_iam_operation('CreateInstanceProfile', parameters,
                                 parsed_globals)
        # Adding the role to the Instance Profile
        parameters = {}
        parameters['InstanceProfileName'] = instance_profile_name
        parameters['RoleName'] = role_name
        self._call_iam_operation('AddRoleToInstanceProfile', parameters,
                                 parsed_globals)

    def _call_iam_operation(self, operation_name, parameters, parsed_globals):
        operation_object = self.iam.get_operation(operation_name)
        return emrutils.call(self._session, operation_object, parameters,
                             parsed_globals.region, self.iam_endpoint_url,
                             parsed_globals.verify_ssl)