コード例 #1
0
class EnvironmentValidator:
    """
    Houses genric environment validation logic that may branch based on current
    configurations, such as Bastion / OKTA / GOOGLE / ETC.
    """
    def __init__(self, defaults: CLIDefaults):
        self._defaults = defaults
        self._out = Output(self._defaults.colors_enabled)
        self._utils = Utils(self._defaults.colors_enabled)

    def validate_all(self):
        if self._defaults.provider == Provider.AWS_BASTION:
            self.validate_environment_variables()

        return self

    def validate_environment_variables(self):
        # If figgy is operating in a TEST environment, ignore this.
        if os.environ.get(FIGGY_TESTS_ENABLED):
            return self

        invalid_vars = []

        for env_var in RESTRICTED_ENV_VARS:
            if os.environ.get(env_var):
                invalid_vars.append(env_var)

        if invalid_vars:
            self._out.error_h2(
                f'AWS Environment overrides detected.\n\n {invalid_vars} is currently set in your '
                f'environment. AWS_* prefixed environment variables can interfere with figgy '
                f'operations and may cause unpredictable behavior. Please unset all AWS_ prefixed ENV '
                f'variables before continuing.')

            self._out.print(
                '\nTo unset the problematic variables, please run the following command(s) in your shell: '
                '\n')
            for var in invalid_vars:
                self._out.print(f'unset {var}')

            self._utils.error_exit("Invalid environment detected, exiting.")

        return self
コード例 #2
0
class HelpFactory(Factory):
    def __init__(self, command: CliCommand, context: HelpContext, figgy_context: FiggyContext):
        self._command = command
        self._context = context
        self._figgy_context = figgy_context
        self._options = context.options
        self._utils = Utils(False)
        self._setup: FiggySetup = FiggySetup(self._figgy_context)

    def instance(self):
        return self.get(self._command)

    def get(self, command: CliCommand):
        if configure in self._options:
            return Configure(self._context, self._setup)
        elif command == login or command == sandbox:
            return Login(self._context, self._setup, self._figgy_context)
        else:
            self._utils.error_exit(f"{command.name} is not a valid command. You must select from: "
                                   f"[{CollectionUtils.printable_set(help_commands)}]. Try using --help for more info.")
コード例 #3
0
ファイル: ui_factory.py プロジェクト: figtools/figgy-cli
class UIFactory(Factory):
    def __init__(self, command: CliCommand,
                 context: CommandContext,
                 session_manager: SessionManager,
                 figgy_context: FiggyContext):
        self._command = command
        self._context = context
        self._utils = Utils(False)
        self._setup: FiggySetup = FiggySetup(figgy_context)
        self._session_manager = session_manager

    def instance(self):
        return self.get(self._command)

    def get(self, command: CliCommand):
        if command == ui:
            return UI(self._context, self._session_manager)
        else:
            self._utils.error_exit(f"{command.name} is not a valid command. You must select from: "
                                   f"[{CollectionUtils.printable_set(ui_commands)}]. Try using --help for more info.")
コード例 #4
0
class MaintenanceFactory(Factory):
    def __init__(self, command: CliCommand, context: MaintenanceContext,
                 figgy_context: FiggyContext, config: Optional[ConfigService]):
        self._command = command
        self._context = context
        self._figgy_context = figgy_context
        self._options = context.options
        self._utils = Utils(False)
        self._setup: FiggySetup = FiggySetup(self._figgy_context)
        self._config: Optional[ConfigService] = config

    def instance(self):
        return self.get(self._command)

    def get(self, command: CliCommand):
        if version in self._options:
            return Version(self._context, self._config)
        elif upgrade in self._options:
            return Upgrade(self._context, self._config)
        else:
            self._utils.error_exit(
                f"{command.name} is not a valid command. You must select from: "
                f"[{CollectionUtils.printable_set(help_commands)}]. Try using --help for more info."
            )
コード例 #5
0
class SSOSessionProvider(SessionProvider, ABC):
    _MAX_ATTEMPTS = 5

    def __init__(self, defaults: CLIDefaults, context: FiggyContext):
        super().__init__(defaults, context)
        self._utils = Utils(defaults.colors_enabled)
        self._sts = boto3.client('sts')
        self._context = context
        keychain_enabled = defaults.extras.get(DISABLE_KEYRING) is not True
        vault = FiggyVault(keychain_enabled=keychain_enabled,
                           secrets_mgr=self._secrets_mgr)
        self._sts_cache: CacheManager = CacheManager(
            file_override=STS_SESSION_CACHE_PATH, vault=vault)
        self._saml_cache: CacheManager = CacheManager(
            file_override=SAML_SESSION_CACHE_PATH, vault=vault)

    @abstractmethod
    def get_assumable_roles(self) -> List[AssumableRole]:
        pass

    @abstractmethod
    def cleanup_session_cache(self):
        pass

    @abstractmethod
    def get_saml_assertion(self,
                           prompt: bool = False,
                           mfa: Optional[str] = None):
        pass

    def get_session(self,
                    env: GlobalEnvironment,
                    prompt: bool,
                    exit_on_fail=True,
                    mfa: Optional[str] = None) -> boto3.Session:
        """
        Creates a session in the specified ENV for the target role from a SAML assertion returned by SSO authentication.
        Args:
            assumable_role: AssumableRole - The role to be leveraged to authenticate this session
            prompt: If prompt is set, we will not use a cached session and will generate new sessions for okta and mgmt.
            exit_on_fail: Exit the program if this session hydration fails.
            mfa: MFA to use with authentication attempt.

        returns: Hydrated session for role + account that match the specified one in the provided AssumableRole
        """

        log.info(f"Getting session, was provided MFA: {mfa}")

        # Prevent multiple requests from differing threads all generating new sessions / authing at the same time.
        # Sessions are encrypted and cached in the lockbox, so we want to re-auth once, then read from the lockbox.
        # This cannot be an instance variable, it does not work properly evne though there is only one instantiated
        # SSOSessionProvider
        lock = FileLock(f'{SAML_SESSION_CACHE_PATH}-provider.lock')
        with lock:
            log.info(f"Got lock: {SAML_SESSION_CACHE_PATH}-provider.lock")
            role_arn = f"arn:aws:iam::{env.role.account_id}:role/{env.role.role.full_name}"
            principal_arn = f"arn:aws:iam::{env.role.account_id}:saml-provider/{env.role.provider_name}"
            forced = False
            log.info(
                f"Getting session for role: {role_arn} in env: {env.role.run_env.env} "
                f"with principal: {principal_arn}")
            attempts = 0
            while True:
                try:
                    if prompt and not forced:
                        forced = True
                        raise InvalidSessionError(
                            "Forcing new session due to prompt.")

                    # One role can create N sessions across N regions.
                    creds: FiggyAWSSession = self._sts_cache.get_val(
                        env.role.cache_key())
                    log.debug(
                        f"Got creds from cache: {creds} when searching for env: {env}"
                    )

                    if creds:
                        session = boto3.Session(
                            aws_access_key_id=creds.access_key,
                            aws_secret_access_key=creds.secret_key,
                            aws_session_token=creds.token,
                            region_name=env.region)

                        if creds.expires_soon(
                        ) or not self._is_valid_session(session):
                            self._utils.validate(
                                attempts < self._MAX_ATTEMPTS,
                                f"Failed to authenticate with AWS after {attempts} attempts. Exiting."
                            )

                            attempts = attempts + 1
                            log.info(
                                "Invalid session detected in cache. Raising session error."
                            )
                            raise InvalidSessionError(
                                "Invalid Session Detected")

                        log.info("Valid SSO session returned from cache.")
                        return session
                    else:
                        raise InvalidSessionError(
                            "Forcing new session, cache is empty.")
                except (FileNotFoundError, JSONDecodeError, NoCredentialsError,
                        InvalidSessionError) as e:
                    log.info(f"SessionProvider -- got expected error: {e}")
                    try:
                        # Todo Remove requiring raw saml and instead work with b64 encoded saml?
                        try:
                            assertion: str = self._saml_cache.get_val_or_refresh(
                                SAML_ASSERTION_CACHE_KEY,
                                self.get_saml_assertion, (prompt, mfa),
                                max_age=SAML_ASSERTION_MAX_AGE)
                            encoded_assertion = base64.b64encode(
                                assertion.encode('utf-8')).decode('utf-8')
                            response = self._sts.assume_role_with_saml(
                                RoleArn=role_arn,
                                PrincipalArn=principal_arn,
                                SAMLAssertion=encoded_assertion,
                                DurationSeconds=3500)
                        except ClientError:
                            log.info(
                                "Refreshing SAML assertion, auth failed with cached or refreshed version."
                            )
                            assertion = self.get_saml_assertion(prompt,
                                                                mfa=mfa)
                            encoded_assertion = base64.b64encode(
                                assertion.encode('utf-8')).decode('utf-8')
                            response = self._sts.assume_role_with_saml(
                                RoleArn=role_arn,
                                PrincipalArn=principal_arn,
                                SAMLAssertion=encoded_assertion,
                                DurationSeconds=3500)

                        # response['Credentials']['Expiration'] = "cleared"
                        session = FiggyAWSSession(
                            **response.get('Credentials', {}))
                        self._saml_cache.write(SAML_ASSERTION_CACHE_KEY,
                                               assertion)
                        self._sts_cache.write(env.role.cache_key(), session)
                    except (ClientError, ParamValidationError) as e:
                        if isinstance(
                                e, ParamValidationError
                        ) or "AccessDenied" == e.response['Error']['Code']:
                            if exit_on_fail:
                                self._utils.error_exit(
                                    f"Error authenticating with AWS from SAML Assertion: {e}"
                                )
                        else:
                            if exit_on_fail:
                                print(e)
                                self._utils.error_exit(
                                    f"Error getting session for role: {role_arn} -- Are you sure you have permissions?"
                                )

                        raise e
コード例 #6
0
ファイル: sync.py プロジェクト: figtools/figgy-cli
class Sync(ConfigCommand):
    """
    Synchronizes local application configuration state as defined in the figgy.json file and the existing remote state
    in the targeted environment. Also configures replication for designated shared parameters in the
    figgy.json file.
    """
    def __init__(self, ssm_init: SsmDao, config_init: ConfigDao,
                 repl_dao: ReplicationDao, colors_enabled: bool,
                 context: ConfigContext, get: Get, put: Put):
        super().__init__(sync, colors_enabled, context)
        self._config = config_init
        self._ssm = ssm_init
        self._repl = repl_dao
        self._config_path = context.ci_config_path if context.ci_config_path else Utils.find_figgy_json(
        )
        self._utils = Utils(colors_enabled)
        self._replication_only = context.replication_only
        self._errors_detected = False
        self.example = f"{self.c.fg_bl}{CLI_NAME} config {self.command_printable} " \
                       f"--env dev --config /path/to/config{self.c.rs}"
        self._get: Get = get
        self._put: Put = put
        self._FILE_PREFIX = "file://"
        self._out = Output(colors_enabled)

    def _input_config_values(self, config_keys: Set[str]) -> None:
        """
        Prompts the user for each of the passed in set of config values if any are missing from PS.
        :param config_keys: Set[string] - config values to prompt the user to add.
        """
        def validate_msg(ps_name: str):
            self._out.success(f"Name Validated: [[{ps_name}]]")
            return validate_msg

        count = 0
        for key in config_keys:
            try:
                if not self._get.get(key):
                    self._out.warn(
                        f"Fig: [[{key}]] missing from PS in environment: [[{self.run_env}]]."
                    )
                    self._put.put_param(key=key, display_hints=False)
                    count = count + 1
                else:
                    validate_msg(key)
            except ClientError:
                validate_msg(key)

        if count:
            self._out.success(
                f"[[{count}]] {'value' if count == 1 else 'values'} added successfully"
            )

    def _sync_keys(self, config_namespace: str, all_keys: Set):
        """
        Looks for stray parameters (keys) under the namespace provided and prints out information about
        missing parameters that are not defined in the figgy.json file
        Args:
            config_namespace: Namespace to query PS under.
            all_keys: All keys that exist in figgy.json to compare against.
        """
        self._out.notify(f"Checking for stray config names.")

        # Find & Prune stray keys
        ps_keys = set(
            list(
                map(lambda x: x['Name'],
                    self._ssm.get_all_parameters([config_namespace]))))
        ps_only_keys = ps_keys.difference(all_keys)

        UNUSED_CONFIG_DETECTED = f"%%red%%The following Names were found in PS but are not referenced in your configurations. \n" \
                                 f"Use the %%rs%%%%blue%%`prune`%%rs%%%%red%% command to clean them up once all " \
                                 f"deployed application versions no longer use these configurations: %%rs%%"

        if len(ps_only_keys) > 0:
            self._out.warn(
                "The following Names were found in PS but are not referenced in your configurations. \n"
                "Use the [[prune]] command to clean them up once all.")

        for key in ps_only_keys:
            self._out.print(f"Unused Parameter: [[{key}]]")

        if not ps_only_keys:
            self._out.success(f"No stray configurations found.")

    def _sync_repl_configs(self,
                           config_repl: Dict,
                           namespace: str = None) -> None:
        """
        Syncs replication configs from a defined "replicate_figs" block parsed from either the figgy.json file
        or the data replication config json file.
        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            namespace: Optional namespace. Parsed from destination if not supplied.
        """
        local_configs: List[ReplicationConfig] = ReplicationConfig.from_dict(
            conf=config_repl,
            type=ReplicationType(REPL_TYPE_APP),
            run_env=self.run_env,
            namespace=namespace)
        for l_cfg in local_configs:
            # Namespace will be missing for --replication-only syncs. Otherwise, with standard syncs, namespace is passed
            # as a parameter here.
            if not namespace:
                namespace = l_cfg.namespace

            if not l_cfg.destination.startswith(namespace):
                self._out.error(
                    f"Replication config [[{l_cfg.source} -> {l_cfg.destination}]] has a destination that "
                    f"is not in your service namespace: [[{namespace}]]. This is invalid."
                )
                self.errors_detected = True
                continue

            remote_cfg = self._repl.get_config_repl(l_cfg.destination)

            # Should never happen, except when someone manually deletes source / destination without going through CLI
            missing_from_ps = self.__get_param_encrypted(l_cfg.source) is None

            if not remote_cfg or remote_cfg != l_cfg or missing_from_ps:
                try:
                    if self._can_replicate_from(
                            l_cfg.source
                    ) and not remote_cfg or missing_from_ps:
                        self._repl.put_config_repl(l_cfg)
                        self._out.print(
                            f"[[Replication added:]] {l_cfg.source} -> {l_cfg.destination}"
                        )
                    elif self._can_replicate_from(l_cfg.source) and remote_cfg:
                        self._repl.put_config_repl(l_cfg)
                        self._out.notify(f"Replication updated.")
                        self._out.warn(
                            f"Removed: {remote_cfg.source} -> {remote_cfg.destination}"
                        )
                        self._out.success(
                            f"Added: {l_cfg.source} -> {l_cfg.destination}")
                    else:
                        self._errors_detected = True
                        # print(f"{self.c.fg_rd}You do not have permission to configure replication from source:"
                        #       f"{self.c.rs} {key}")
                except ClientError:
                    self._utils.validate(
                        False,
                        f"Error detected when attempting to store replication config "
                        f"for {l_cfg.destination}")
                    self._errors_detected = True
            else:
                self._out.success(
                    f"Replication Validated: [[{l_cfg.source} -> {l_cfg.destination}]]"
                )

    def _notify_of_data_repl_orphans(self, config_repl: Dict) -> None:
        """
        Notify user of detected stray replication configurations when using the --replication-only flag.
        :param config_repl: replication configuration block.
        """
        strays: Set[ReplicationConfig] = set()
        notify = False
        for repl in config_repl:
            namespace = self._utils.parse_namespace(config_repl[repl])
            remote_cfgs = self._repl.get_all_configs(namespace)

            if remote_cfgs:
                for cfg in remote_cfgs:
                    if cfg.source not in list(config_repl.keys()) \
                            and cfg.type == REPL_TYPE_APP \
                            and not cfg.source.startswith(shared_ns) \
                            and not cfg.source.startswith(self.context.defaults.service_ns):
                        strays.add(cfg)
                        notify = True

        for stray in strays:
            print(
                f"{self.c.fg_yl}stray replication mapping detected: {self.c.rs}"
                f" {self.c.fg_bl}{stray.source} -> {stray.destination}{self.c.rs}."
            )
        if notify:
            print(
                f"To prune stray replication configs, "
                f"delete the destination, THEN the source with the `figgy config delete` command"
            )

    def _sync_replication(self, config_repl: Dict, expected_destinations: Set,
                          namespace: str):
        """
        Calls sync_repl_configs which adds/removes repl configs. Then searches for stray configurations and notifies
        the user of detected stray configurations.
        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            expected_destinations: expected replication destinations, as defined in merge key sources,
             or shared_figs
            namespace: Namespace to sync replication configs to. E.g. /app/demo-time/
        """

        self._out.notify(f"Validating replication for all parameters.")

        self._sync_repl_configs(config_repl, namespace=namespace)
        self._out.notify(f"\nChecking for stray replication configurations.")
        remote_cfgs = self._repl.get_all_configs(namespace)
        notify = True
        if remote_cfgs:
            for cfg in remote_cfgs:
                if cfg.source not in list(config_repl.keys()) \
                        and cfg.destination not in list(config_repl.values()) \
                        and cfg.destination not in expected_destinations \
                        and (isinstance(cfg.source, list)
                             or cfg.source.startswith(shared_ns) or cfg.source.startswith(
                            self.context.defaults.service_ns)):
                    print(
                        f"{self.c.fg_rd}Stray replication mapping detected: {self.c.rs}"
                        f" {self.c.fg_bl}{cfg.source} -> {cfg.destination}{self.c.rs}."
                    )
                    notify = False
        if notify:
            self._out.success(
                f"No stray replication configs found for: {namespace}")
        else:
            self._out.warn(f"{CLEANUP_REPLICA_ORPHANS}")

    def _validate_merge_keys(self, destination: str, sources: Union[List, str],
                             namespace: str) -> bool:
        """
        Validates merge key sources & destinations
        Args:
            destination: str -> Destination of merge key replication
            sources: List or Str -> Source(e) of this merge key
            namespace: application namespace
        """
        if not destination.startswith(namespace):
            print(
                f"{self.c.fg_rd}Merge config: {self.c.rs}{self.c.fg_bl}{destination}{self.c.rs}{self.c.fg_rd} has a "
                f"destination that is not in your service namespace: "
                f"{self.c.rs}{self.c.fg_bl}{namespace}{self.c.rs}{self.c.fg_rd}. This is invalid.{self.c.rs}"
            )
            self.errors_detected = True
            return False

        if isinstance(sources, list):
            for item in sources:
                if item.startswith(MERGE_KEY_PREFIX):
                    self._utils.validate(
                        item.replace(MERGE_KEY_PREFIX,
                                     "").startswith(namespace),
                        f"Source: {item} in merge config must begin with your namespace: {namespace}."
                    )
                    self.errors_detected = True
                    return False
        else:
            self._utils.validate(
                sources.startswith(namespace),
                f"Source {sources} in merge config must begin with your namespace: {namespace}"
            )
            self.errors_detected = True
            return False

        return True

    def _sync_merge_keys(self, config_merge: Dict, namespace: str) -> None:
        """
            Pushes merge key configs into replication config table.
        Args:
            config_merge: Dict of merge_parameters parsed from figcli.json file
            namespace: namespace for app
        """
        self._out.notify("Validating replication for all merge keys.")
        for key in config_merge:
            self._validate_merge_keys(key, config_merge[key], namespace)

            config = self._repl.get_config_repl(key)
            if not config or (config.source != config_merge[key]):
                try:
                    repl_config = ReplicationConfig(
                        destination=key,
                        run_env=self.run_env,
                        namespace=namespace,
                        source=config_merge[key],
                        type=ReplicationType(REPL_TYPE_MERGE))
                    self._repl.put_config_repl(repl_config)
                except ClientError:
                    self._utils.validate(
                        False,
                        f"Error detected when attempting to store replication config for {key}"
                    )
                    self._errors_detected = True
            else:
                self._out.success(
                    f"Merge key replication config validated: [[{key}]]")

    def _validate_expected_names(self, all_names: Set, repl_conf: Dict,
                                 merge_conf: Dict):
        self._out.notify(f"Validating shared keys exist.")
        print_resolution_message = False
        merged_confs = {**repl_conf, **merge_conf}
        for name in all_names:
            if self.__get_param_encrypted(name) is None:
                awaiting_repl = False
                for cnf in merged_confs:
                    if name == cnf or name in list(repl_conf.values()):
                        self._out.print(
                            f"\nConfig value [[{name}]] is a destination for replication, but doesn't exist"
                            f" yet. If you commit now your build could fail. This will auto-resolve itself if all of "
                            f"its dependencies exist. This will probably resolve itself in a few seconds. "
                            f"Try re-running sync.")
                        awaiting_repl = True
                        break

                if not awaiting_repl:
                    self._out.print(
                        f"Config value of [[{name}]] does not exist and is expected based on "
                        f"your defined configuration.")
                    print_resolution_message = True
                    self._errors_detected = True

        if print_resolution_message:
            self._out.error(f"{SHARED_NAME_RESOLUTION_MESSAGE}")
        else:
            self._out.success("Shared keys have been validated.")

    def _can_replicate_from(self, source: str):
        try:
            if self.__get_param_encrypted(source) is not None:
                return True
            else:
                self._out.warn(
                    f"Replication source: [[{source}]] is missing from ParameterStore. "
                    f"It must be added before config replication can be configured.\n"
                )
                self._input_config_values({source})
                return True
        except ClientError as e:
            denied = "AccessDeniedException" == e.response['Error']['Code']
            if denied and "AWSKMS; Status Code: 400;" in e.response['Error'][
                    'Message']:
                self._out.error(
                    f"You do not have access to decrypt the value of Name: [[{source}]]"
                )
            elif denied:
                self._out.error(
                    f"You do not have access to Parameter: [[{source}]]")
            else:
                raise
        return False

    def __get_param_encrypted(self, source: str) -> Optional[str]:
        try:
            return self._ssm.get_parameter_encrypted(source)
        except ClientError as e:
            denied = "AccessDeniedException" == e.response['Error']['Code']
            if denied and "AWSKMS; Status Code: 400;" in e.response['Error'][
                    'Message']:
                self._out.error(
                    f"You do not have access to decrypt the value of Name: [[{source}]]"
                )
                return None
            elif denied:
                self._utils.error_exit(
                    f"You do not have access to Parameter: {source}")
            else:
                raise

    def _validate_replication_config(self,
                                     config_repl: Dict,
                                     app_conf: bool = True):
        """
        Validates replication config blocks are valid / legal. Prevents people from setting up replication from
        disallowed namespaces, etc. Exits with error if invalid config is discovered.

        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            app_conf: bool: T/F - True if this is an application config block in an application config (figgy.json).
                    False if other, which for now is only repl-configs for data teams.
        """
        for key in config_repl:
            if app_conf:
                self._utils.validate(
                    re.match(
                        f'^/shared/.*$|^{self.context.defaults.service_ns}/.*$',
                        key) is not None,
                    f"The SOURCE of your replication configs must begin with `/shared/` or "
                    f"`{self.context.defaults.service_ns}/`. "
                    f"{key} is non compliant.")

            self._utils.validate(
                re.match(f'^{self.context.defaults.service_ns}/.*$',
                         config_repl[key]) is not None,
                f"The DESTINATION of your replication configs must always begin with "
                f"`{self.context.defaults.service_ns}/`")

    def _find_missing_shared_figs(self, namespace: str, config_repl: Dict,
                                  shared_names: set, merge_conf: Dict):
        """
            Notifies the user if there is a parameter that has been shared into their namespace by an outside party
            but they have not added it to the `shared_figs` block of their figgy.json
        """
        all_repl_cfgs = self._repl.get_all_configs(namespace)
        for cfg in all_repl_cfgs:
            in_merge_conf = self._in_merge_value(cfg.destination, merge_conf)

            if cfg.destination not in shared_names and cfg.type == REPL_TYPE_APP \
                    and cfg.destination not in config_repl.values() and not in_merge_conf:
                print(
                    f"It appears that {self.c.fg_bl}{cfg.user}{self.c.rs} shared "
                    f"{self.c.fg_bl}{cfg.source}{self.c.rs} to {self.c.fg_bl}{cfg.destination}{self.c.rs} "
                    f"and you have not added {self.c.fg_bl}{cfg.destination}{self.c.rs} to the "
                    f"{self.c.fg_bl}{SHARED_KEY}{self.c.rs} section of your figgy.json. This is also not "
                    f"referenced in any defined merge parameter. Please add "
                    f"{self.c.fg_bl}{cfg.destination}{self.c.rs} to your figgy.json, or delete this parameter "
                    f"and the replication config with the prune command.")

    def _in_merge_value(self, dest: str, merge_conf: Dict):
        for key in merge_conf:
            value = merge_conf[key]
            # 'value' can be a list or a str, but the way 'in' operates, this works either way. #dynamic programming
            for suffix in merge_suffixes:
                if f"${'{'}{dest}{suffix}{'}'}" in value:
                    return True

        return False

    def _fill_repl_conf_variables(self, repl_conf: Dict) -> Dict:
        repl_copy = {}
        all_vars = []
        for key, val in repl_conf.items():
            all_vars = all_vars + re.findall(r'\${(\w+)}', key)
            all_vars = all_vars + re.findall(r'\${(\w+)}', key)

        all_vars = set(all_vars)
        if all_vars:
            print(
                f"{self.c.fg_bl}{len(all_vars)} variables detected in: {self.c.rs}{self.c.fg_yl}"
                f"{self._config_path}{self.c.rs}\n")

        template_vals = {}
        for var in all_vars:
            print(f"Template variable: {self.c.fg_bl}{var}{self.c.rs} found.")
            input_val = Input.input(
                f"Please input a value for {self.c.fg_bl}{var}{self.c.rs}: ",
                min_length=1)
            template_vals[var] = input_val

        for key, val in repl_conf.items():
            updated_key = key
            updated_val = val

            for template_key, template_val in template_vals.items():
                updated_key = updated_key.replace(f"${{{template_key}}}",
                                                  template_val)
                updated_val = updated_val.replace(f"${{{template_key}}}",
                                                  template_val)

            repl_copy[updated_key] = updated_val
            repl_copy[updated_key] = updated_val

        return repl_copy

    def run_ci_sync(self) -> None:
        """
            Orchestrates a standard `sync` command WITHOUT The `--replication-only` flag set.
        """
        # Validate & parse figgy.json
        config = self._utils.get_ci_config(self._config_path)
        shared_names = set(
            self._utils.get_config_key_safe(SHARED_KEY, config, default=[]))
        repl_conf = self._utils.get_config_key_safe(REPLICATION_KEY,
                                                    config,
                                                    default={})
        repl_from_conf = self._utils.get_config_key_safe(REPL_FROM_KEY,
                                                         config,
                                                         default={})
        merge_conf = self._utils.get_config_key_safe(MERGE_KEY,
                                                     config,
                                                     default={})
        config_keys = set(
            self._utils.get_config_key_safe(CONFIG_KEY, config, default=[]))
        namespace = self._utils.get_namespace(config)
        merge_keys = set(merge_conf.keys())
        all_keys = KeyUtils.find_all_expected_names(config_keys, shared_names,
                                                    merge_conf, repl_conf,
                                                    repl_from_conf, namespace)

        repl_conf = KeyUtils.merge_repl_and_repl_from_blocks(
            repl_conf, repl_from_conf, namespace)
        # Add missing config values
        self._out.notify(
            f"Validating all configuration keys exist in ParameterStore.")
        self._input_config_values(config_keys)

        # Sync keys between PS / Local config
        print()
        self._sync_keys(namespace, all_keys)

        print()

        self._find_missing_shared_figs(namespace, repl_conf, shared_names,
                                       merge_conf)

        # Disabling requirement (for now) of replication to be in /replicated path
        # print()
        self._validate_replication_config(repl_conf, app_conf=True)

        print()
        # sync replication config
        all_shared_keys = shared_names | set(merge_conf.keys())
        self._sync_replication(repl_conf, all_shared_keys, namespace)

        print()
        self._sync_merge_keys(merge_conf, namespace)

        print()
        # validate expected keys exist
        self._validate_expected_names(all_keys, repl_conf, merge_conf)

    def run_repl_sync(self) -> None:
        """
        Orchestrates sync when the user passes in the `--replication-only` flag.
        """
        self._utils.validate(
            os.path.exists(self._config_path),
            f"Path {self._config_path} is invalid. "
            f"That file does not exist.")
        repl_conf = self._utils.get_repl_config(self._config_path)

        repl_conf = self._fill_repl_conf_variables(repl_conf)
        self._validate_replication_config(repl_conf, app_conf=False)
        self._sync_repl_configs(repl_conf)
        self._notify_of_data_repl_orphans(repl_conf)

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        print()
        if self._replication_only:
            self.run_repl_sync()
        else:
            self.run_ci_sync()

        if self._errors_detected:
            self._out.error_h2(
                'Sync failed. Please address the outputted errors.')
        else:
            self._out.success_h2('Sync completed with no errors!')
コード例 #7
0
ファイル: restore.py プロジェクト: figtools/figgy-cli
class Restore(ConfigCommand):
    def __init__(self, ssm_init: SsmDao, kms_init: KmsService,
                 config_init: ConfigDao, repl_dao: ReplicationDao,
                 audit_dao: AuditDao, cfg_view: RBACLimitedConfigView,
                 colors_enabled: bool, context: ConfigContext,
                 config_completer: WordCompleter, delete: Delete):
        super().__init__(restore, colors_enabled, context)
        self._config_context = context
        self._ssm = ssm_init
        self._kms = kms_init
        self._config = config_init
        self._repl = repl_dao
        self._audit = audit_dao
        self._cfg_view = cfg_view
        self._utils = Utils(colors_enabled)
        self._point_in_time = context.point_in_time
        self._config_completer = config_completer
        self._delete = delete
        self._out = Output(colors_enabled=colors_enabled)

    def _client_exception_msg(self, item: RestoreConfig, e: ClientError):
        if "AccessDeniedException" == e.response["Error"]["Code"]:
            self._out.error(
                f"\n\nYou do not have permissions to restore config at the path: [[{item.ps_name}]]"
            )
        else:
            self._out.error(
                f"Error message: [[{e.response['Error']['Message']}]]")

    def get_parameter_arn(self, parameter_name: str):
        account_id = self._ssm.get_parameter(ACCOUNT_ID_PATH)

        return f"arn:aws:ssm:us-east-1:{account_id}:parameter{parameter_name}"

    def _restore_param(self) -> None:
        """
        Allow the user to query a parameter store entry from dynamo, so we can query + restore it, if desired.
        """

        table_entries = []

        ps_name = prompt(f"Please input PS key to restore: ",
                         completer=self._config_completer)

        if self._is_replication_destination(ps_name):
            repl_conf = self._repl.get_config_repl(ps_name)
            self._print_cannot_restore_msg(repl_conf)
            exit(0)

        self._out.notify(
            f"\n\nAttempting to retrieve all restorable values of [[{ps_name}]]"
        )
        items: List[RestoreConfig] = self._audit.get_parameter_restore_details(
            ps_name)

        if len(items) == 0:
            self._out.warn(
                "No restorable values were found for this parameter.")
            return

        for i, item in enumerate(items):
            date = time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(item.ps_time / 1000))

            # we need to decrypt the value, if encrypted, in order to show it to the user
            if item.ps_key_id:
                item.ps_value = self._kms.decrypt_with_context(
                    item.ps_value,
                    {"PARAMETER_ARN": self.get_parameter_arn(item.ps_name)},
                )
            table_entries.append([i, date, item.ps_value, item.ps_user])

        self._out.print(
            tabulate(
                table_entries,
                headers=["Item #", "Time Created", "Value", "User"],
                tablefmt="grid",
                numalign="center",
                stralign="left",
            ))

        valid_options = [f'{x}' for x in range(0, len(items))]
        choice = int(
            Input.select("Select an item number to restore: ",
                         valid_options=valid_options))
        item = items[choice] if items[choice] else None

        restore = Input.y_n_input(
            f"Are you sure you want to restore item #{choice} and have it be the latest version? ",
            default_yes=False)

        if not restore:
            self._utils.warn_exit("Restore aborted.")

        key_id = None if item.ps_type == "String" else item.ps_key_id

        try:
            self._ssm.set_parameter(item.ps_name,
                                    item.ps_value,
                                    item.ps_description,
                                    item.ps_type,
                                    key_id=key_id)

            current_value = self._ssm.get_parameter(item.ps_name)
            if current_value == item.ps_value:
                self._out.success("Restore was successful")
            else:
                self._out.error(
                    "Latest version in parameter store doesn't match what we restored."
                )
                self._out.print(
                    f"Current value: [[{current_value}]].  Expected value: [[{item.ps_value}]]"
                )

        except ClientError as e:
            self._client_exception_msg(item, e)

    def _decrypt_if_applicable(self, entry: RestoreConfig) -> str:
        if entry.ps_type != "String":
            return self._kms.decrypt_with_context(
                entry.ps_value,
                {"PARAMETER_ARN": self.get_parameter_arn(entry.ps_name)})
        else:
            return entry.ps_value

    def _is_replication_destination(self, ps_name: str):
        return self._repl.get_config_repl(ps_name)

    def _restore_params_to_point_in_time(self):
        """
        Restores parameters as they were to a point-in-time as defined by the time provided by the users.
        Replays parameter history to that point-in-time so versioning remains intact.
        """

        repl_destinations = []
        ps_prefix = Input.input(
            f"Which parameter store prefix would you like to recursively restore? "
            f"(e.g., /app/demo-time): ",
            completer=self._config_completer)

        authed_nses = self._cfg_view.get_authorized_namespaces()
        valid_prefix = (
            [True for ns in authed_nses if ps_prefix.startswith(ns)]
            or [False])[0]
        self._utils.validate(
            valid_prefix,
            f"Selected namespace must begin with a 'Fig Tree' you have access to. "
            f"Such as: {authed_nses}")

        time_selected, time_converted = None, None
        try:
            time_selected = Input.input(
                "Seconds since epoch to restore latest values from: ")
            time_converted = datetime.fromtimestamp(float(time_selected))
        except ValueError as e:
            if "out of range" in e.args[0]:
                try:
                    time_converted = datetime.fromtimestamp(
                        float(time_selected) / 1000)
                except ValueError as e:
                    self._utils.error_exit(
                        "Make sure you're using a format of either seconds or milliseconds since epoch."
                    )
            elif "could not convert" in e.args[0]:
                self._utils.error_exit(
                    f"The format of this input should be seconds since epoch. (e.g., 1547647091)\n"
                    f"Try using: https://www.epochconverter.com/ to convert your date to this "
                    f"specific format.")
            else:
                self._utils.error_exit(
                    "An unexpected exception triggered: "
                    f"'{e}' while trying to convert {time_selected} to 'datetime' format."
                )

        self._utils.validate(
            time_converted is not None,
            f"`{CLI_NAME}` encountered an error parsing your input for "
            f"target rollback time.")
        keep_going = Input.y_n_input(
            f"Are you sure you want to restore all figs under {ps_prefix} values to their state at: "
            f"{time_converted}? ",
            default_yes=False)

        if not keep_going:
            self._utils.warn_exit("Aborting restore due to user selection")

        ps_history: PSHistory = self._audit.get_parameter_history_before_time(
            time_converted, ps_prefix)
        restore_count = len(ps_history.history.values())

        if len(ps_history.history.values()) == 0:
            self._utils.warn_exit(
                "No results found for time range.  Aborting.")

        last_item_name = 'Unknown'
        try:
            for item in ps_history.history.values():
                last_item_name = item.name

                if self._is_replication_destination(item.name):
                    repl_destinations.append(item.name)
                    continue

                if item.cfg_at(time_converted).ps_action == SSM_PUT:
                    cfgs_before: List[RestoreConfig] = item.cfgs_before(
                        time_converted)
                    cfg_at: RestoreConfig = item.cfg_at(time_converted)
                    ssm_value = self._ssm.get_parameter(item.name)
                    dynamo_value = self._decrypt_if_applicable(cfg_at)

                    if ssm_value != dynamo_value:
                        if ssm_value is not None:
                            self._ssm.delete_parameter(item.name)

                        for cfg in cfgs_before:
                            decrypted_value = self._decrypt_if_applicable(cfg)
                            self._out.print(
                                f"\nRestoring: [[{cfg.ps_name}]] \nValue: [[{decrypted_value}]]"
                                f"\nDescription: [[{cfg.ps_description}]]\nKMS Key: "
                                f"[[{cfg.ps_key_id if cfg.ps_key_id else '[[No KMS Key Specified]]'}]]"
                            )
                            self._out.notify(
                                f"Replaying version: [[{cfg.ps_version}]] of [[{cfg.ps_name}]]"
                            )
                            print()

                            self._ssm.set_parameter(cfg.ps_name,
                                                    decrypted_value,
                                                    cfg.ps_description,
                                                    cfg.ps_type,
                                                    key_id=cfg.ps_key_id)
                    else:
                        self._out.success(
                            f"Config: {item.name} is current. Skipping.")
                else:
                    # This item must have been a delete, which means this config didn't exist at that time.
                    self._out.print(
                        f"Checking if [[{item.name}]] exists. It was previously deleted."
                    )
                    self._prompt_delete(item.name)
        except ClientError as e:
            if "AccessDeniedException" == e.response["Error"]["Code"]:
                self._utils.error_exit(
                    f"\n\nYou do not have permissions to restore config at the path:"
                    f" [[{last_item_name}]]")
            else:
                self._utils.error_exit(
                    f"Caught error when attempting restore. {e}")

        for item in repl_destinations:
            cfg = self._repl.get_config_repl(item)
            self._print_cannot_restore_msg(cfg)

        print("\n\n")
        if not repl_destinations:
            self._out.success_h2(
                f"[[{restore_count}]] configurations restored successfully!")
        else:
            self._out.warn(
                f"\n\n[[{len(repl_destinations)}]] configurations were not restored because they are shared "
                f"from other destinations. To restore them, restore their sources."
            )
            self._out.success(
                f"{restore_count - len(repl_destinations)} configurations restored successfully."
            )

    def _print_cannot_restore_msg(self, repl_conf: ReplicationConfig):
        self._out.print(
            f"Parameter: [[{repl_conf.destination}]] is a shared parameter. ")
        self._out.print(f"Shared From: [[{repl_conf.source}]]")
        self._out.print(f"Shared by: [[{repl_conf.user}]]")
        self._out.warn(
            f"To restore this parameter you should restore the source: {repl_conf.source} instead!"
        )
        print()

    def _prompt_delete(self, name):
        param = self._ssm.get_parameter_encrypted(name)
        if param:
            selection = Input.y_n_input(
                f"PS Name: {name} did not exist at this restore time."
                f" Delete it? ",
                default_yes=False)

            if selection:
                self._delete.delete_param(name)

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        if self._point_in_time:
            self._restore_params_to_point_in_time()
        else:
            self._restore_param()
コード例 #8
0
ファイル: command_factory.py プロジェクト: figtools/figgy-cli
class CommandFactory(Factory):
    """
    Uses the provided FiggyContext (which contains details of args passed in, etc), and initializes a command
    factory of the appropriate type, and generates the appropriate command.
    """
    def __init__(self, context: FiggyContext, cli_defaults: CLIDefaults):
        self._id = uuid.uuid4()
        self._context = context
        self._utils = Utils(context.colors_enabled)
        self._cli_defaults = cli_defaults
        self._session_mgr = None
        self._session_provider = None
        self._env_session = None
        self._ssm = None
        self._config = None
        self._kms = None
        self._s3_rsc = None
        self._service_registry = None
        self._all_sessions = None
        self._config_svc = None
        self._cache_mgr = None
        self._rbac_config_view = None
        self._audit = None
        self._repl = None
        self._ots_svc = None
        self._kms_dao: Optional[KmsDao] = None
        self.__env_lock = Lock()
        self.__mgr_lock = Lock()

    def __session_provider(self) -> SessionProvider:
        if not self._session_provider:
            self._session_provider = SessionProviderFactory(
                self._cli_defaults, self._context).instance()

        return self._session_provider

    def __session_manager(self):
        """
        Lazy load the session manager, only create a session if this command requires it.
        :return: 
        """
        with self.__mgr_lock:
            if not self._session_mgr:
                self._session_mgr = SessionManager(self._cli_defaults,
                                                   self.__session_provider())

        return self._session_mgr

    def __build_env(self) -> GlobalEnvironment:
        return GlobalEnvironment(role=self._context.selected_role,
                                 region=self._cli_defaults.region)

    def __env_session(self) -> boto3.session.Session:
        """
        Lazy load an ENV session object for the ENV selected in the FiggyContext
        :return: Hydrated session for the selected environment.
        """
        with self.__env_lock:
            if not self._env_session:
                self._env_session = self.__session_manager().get_session(
                    self.__build_env(), prompt=False)

        return self._env_session

    def __audit(self) -> AuditDao:
        if not self._audit:
            self._audit = AuditDao(self.__env_session().resource('dynamodb'))

        return self._audit

    def __repl(self) -> ReplicationDao:
        if not self._repl:
            self._repl = ReplicationDao(
                self.__env_session().resource('dynamodb'))

        return self._repl

    def __ssm(self) -> SsmDao:
        """
        Returns an SSMDao initialized with a session for the selected ENV based on FiggyContext
        """
        if not self._ssm:
            self._ssm = SsmDao(self.__env_session().client('ssm'))

        return self._ssm

    def __kms(self) -> KmsService:
        """
        Returns a hydrated KMS Service object based on these selected ENV
        """
        if not self._kms:
            self._kms: KmsService = KmsService(self.__kms_dao(), self.__ssm())

        return self._kms

    def __kms_dao(self) -> KmsDao:
        if not self._kms_dao:
            self._kms_dao = KmsDao(self.__env_session().client('kms'))

        return self._kms_dao

    def __config(self) -> ConfigDao:
        """
        Returns a hydrated ConfigDao for the selected environment.
        """
        return ConfigDao(self.__env_session().resource('dynamodb'))

    def __s3_resource(self):
        """
        Returns a hydrated boto3 S3 Resource for the mgmt account.
        """
        if not self._s3_rsc:
            self._s3_rsc = self.__env_session().resource('s3')

        return self._s3_rsc

    def __all_sessions(self) -> Dict[str, boto3.session.Session]:
        """
        Populates a DICT containing boto sessions for all 4 environments (dev -> prod).
        """
        assumable_roles = self._cli_defaults.assumable_roles
        matching_roles = list(
            set([x for x in assumable_roles if x.role == self._context.role]))

        if not self._all_sessions and self._context.all_profiles:
            self._all_sessions: Dict[str, boto3.session.Session] = {}

            with ThreadPoolExecutor(max_workers=10) as pool:
                session_futures: Dict[str, thread] = {
                    role.role.full_name:
                    pool.submit(self.__session_manager().get_session,
                                role,
                                prompt=False)
                    for role in matching_roles
                }

                for env, future in session_futures.items():
                    self._all_sessions[env] = future.result()
        else:
            self._all_sessions = None

        return self._all_sessions

    def __cache_mgr(self) -> CacheManager:
        """Builds a cache manager service for the specified resource."""
        if not self._cache_mgr:
            self._cache_mgr: CacheManager = CacheManager(
                self._context.resource)

        return self._cache_mgr

    def __config_service(self) -> ConfigService:
        """Returns a hydrated ConfigService."""
        if not self._config_svc:
            self._config_svc = ConfigService(self.__config(), self.__ssm(),
                                             self.__repl(), self.__cache_mgr(),
                                             self.__kms(),
                                             self._context.run_env)

        return self._config_svc

    def __rbac_config_view(self) -> RBACLimitedConfigView:
        if not self._rbac_config_view:
            self._rbac_config_view = RBACLimitedConfigView(
                self._context.role, self.__cache_mgr(), self.__ssm(),
                self.__config_service(), self._context.profile)
        return self._rbac_config_view

    def __service_registry(self) -> ServiceRegistry:
        """
        Returns an initialized service registry that may be used to hydrate and build services
        """
        if not self._service_registry:
            context = CommandContext(self._context.run_env,
                                     self._context.command,
                                     defaults=self._cli_defaults)
            self._service_registry = ServiceRegistry(self.__session_manager(),
                                                     context)

        return self._service_registry

    def __ots_svc(self) -> OTSService:
        """
        Returns a valid & hydrated One-time-secret servce
        """
        if not self._ots_svc:
            self._ots_svc = self.__service_registry().ots_svc(
                env=self.__build_env())

        return self._ots_svc

    def __init_sessions(self):
        """
        Bootstraps sessions (blocking) before we do threaded lookups that require these sessions.
        """
        self.__session_manager().get_session(self.__build_env(), prompt=False)

    def instance(self):
        """
        Get an instance of a particular command based on the FiggyContext provided into this factory.
        """
        factory: Factory = None
        start = time.time()
        if self._context.command in config_commands and self._context.resource == config:
            if self._context.command != ui:
                self.__init_sessions()

            context = ConfigContext(self._context.run_env,
                                    self._context.role,
                                    self._context.args,
                                    config,
                                    defaults=self._cli_defaults)

            futures = set()
            # Multiple threads to init resources saves 500 - 1000 MS
            with ThreadPoolExecutor(max_workers=5) as pool:
                futures.add(pool.submit(self.__ssm))
                futures.add(pool.submit(self.__kms))
                futures.add(pool.submit(self.__s3_resource))

            for future in as_completed(futures):
                pass  # Force lazy init for all futures.

            factory = ConfigFactory(self._context.command, context,
                                    self.__ssm(), self.__config_service(),
                                    self.__config(), self.__kms(),
                                    self.__s3_resource(),
                                    self._context.colors_enabled,
                                    self.__rbac_config_view(), self.__audit(),
                                    self.__repl(), self.__session_manager())

        elif self._context.command in iam_commands and self._context.resource == iam:
            self.__init_sessions()
            context = IAMContext(self._context.run_env,
                                 self._context.role,
                                 self._context.colors_enabled,
                                 iam,
                                 defaults=self._cli_defaults)
            factory = IAMFactory(self._context.command,
                                 context,
                                 self.__env_session(),
                                 all_sessions=self.__all_sessions())

        elif self._context.find_matching_optional_arguments(
                help_commands) or self._context.resource in help_commands:
            optional_args = self._context.find_matching_optional_arguments(
                help_commands)
            context = HelpContext(self._context.resource,
                                  self._context.command,
                                  optional_args,
                                  self._context.run_env,
                                  defaults=self._cli_defaults,
                                  role=self._context.role)
            factory = HelpFactory(self._context.command, context,
                                  self._context)

        elif self._context.find_matching_optional_arguments(
                maintenance_commands
        ) or self._context.resource in maintenance_commands:
            optional_args = self._context.find_matching_optional_arguments(
                maintenance_commands)

            try:
                cfg = self.__config_service()
            except NotImplementedError:
                cfg = None

            context = MaintenanceContext(self._context.resource,
                                         self._context.command,
                                         optional_args,
                                         self._context.run_env,
                                         defaults=self._cli_defaults,
                                         role=self._context.role)
            factory = MaintenanceFactory(self._context.command, context,
                                         self._context, cfg)

        elif self._context.command in ui_commands or self._context.resource == ui:
            context = CommandContext(self._context.run_env,
                                     self._context.command,
                                     defaults=self._cli_defaults)
            factory = UIFactory(self._context.command, context,
                                self.__session_manager(), self._context)

        elif self._context.command in ots_commands or self._context.resource == ots:
            context = OTSContext(self._context.run_env,
                                 self._context.role,
                                 defaults=self._cli_defaults)
            factory = OTSFactory(self._context.command, context,
                                 self.__env_session(),
                                 self._context.colors_enabled,
                                 self.__ots_svc())

        else:
            if self._context.command is None or self._context.resource:
                self._utils.error_exit(
                    f"Proper {CLI_NAME} syntax is `{CLI_NAME} <resource> <command> --options`. "
                    f"For example `{CLI_NAME} config get`. Either resource or command were "
                    f"not supplied.")
            else:
                self._utils.error_exit(
                    f"Command: {self._context.command.name} was not found in this version of figgy."
                )

        logger.info(f"Init completed in {time.time() - start} seconds.")
        return factory.instance()
コード例 #9
0
ファイル: edit.py プロジェクト: figtools/figgy-cli
class Edit(ConfigCommand):
    def __init__(self, ssm_init: SsmDao, colors_enabled: bool,
                 config_context: ConfigContext,
                 config_view: RBACLimitedConfigView,
                 config_completer: WordCompleter):
        super().__init__(edit, colors_enabled, config_context)
        self._ssm = ssm_init
        self._config_view = config_view
        self._utils = Utils(colors_enabled)
        self._config_completer = config_completer

    def edit(self) -> None:
        """
        Allows a user to define a PS name and add or edit a parameter at that location. Uses NPYscreen editor.
        """
        key = Input.input('Please input a PS Name: ',
                          completer=self._config_completer)

        try:
            value, desc = self._ssm.get_parameter_with_description(key)
            edit_app = EditApp(key, value, desc)
            edit_app.run()

            value, desc = edit_app.value_box.value, edit_app.description_box.value
            log.info(f"Edited value: {value} - description: {desc}")

            is_secret = Input.is_secret()
            parameter_type, kms_id = SSM_SECURE_STRING if is_secret else SSM_STRING, None
            if is_secret:
                valid_keys = self._config_view.get_authorized_kms_keys()
                if len(valid_keys) > 1:
                    key_name = Input.select_kms_key(valid_keys)
                else:
                    key_name = valid_keys[0]

                kms_id = self._config_view.get_authorized_key_id(
                    key_name, self.run_env)

            if not self._utils.is_valid_input(key, f"Parameter name", True) \
                    or not self._utils.is_valid_input(value, key, True):
                self._utils.error_exit(
                    "Invalid input detected, please resolve the issue and retry."
                )

            self._ssm.set_parameter(key,
                                    value,
                                    desc,
                                    parameter_type,
                                    key_id=kms_id)
            print(f"{self.c.fg_gr}{key} saved successfully.{self.c.rs}")
        except ClientError as e:
            if "AccessDeniedException" == e.response['Error']['Code']:
                denied = "AccessDeniedException" == e.response['Error']['Code']
                if denied and "AWSKMS; Status Code: 400;" in e.response[
                        'Error']['Message']:
                    print(
                        f"\n{self.c.fg_rd}You do not have access to decrypt the value of: {key}{self.c.rs}"
                    )
                elif denied:
                    print(
                        f"\n{self.c.fg_rd}You do not have access to Parameter: {key}{self.c.rs}"
                    )
                else:
                    raise
            else:
                self._utils.error_exit(
                    f"{self.c.fg_rd}Exception caught attempting to add config: {e}{self.c.rs}"
                )

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        self.edit()
コード例 #10
0
ファイル: upgrade.py プロジェクト: figtools/figgy-cli
class Upgrade(MaintenanceCommand):
    """
    Drives the --version command
    """
    def __init__(self, maintenance_context: MaintenanceContext,
                 config_service: Optional[ConfigService]):
        super().__init__(version, maintenance_context.defaults.colors_enabled,
                         maintenance_context)
        self.tracker = VersionTracker(self.context.defaults, config_service)
        self.upgrade_mgr = UpgradeManager(
            maintenance_context.defaults.colors_enabled)
        self._utils = Utils(
            colors_enabled=maintenance_context.defaults.colors_enabled)
        self._out = Output(
            colors_enabled=maintenance_context.defaults.colors_enabled)

    def upgrade(self):
        latest_version: FiggyVersionDetails = self.tracker.get_version()
        install_success, upgrade_it = False, True

        if self.upgrade_mgr.is_pip_install():
            self._out.error(
                f"Figgy appears to have been installed with pip. Please upgrade [[{CLI_NAME}]] with "
                f"`pip` instead.")
            self._out.print(
                f"\n\n[[Try this command]]: pip install figgy-cli --upgrade")

            self._out.print(
                f"\n\nPip based [[{CLI_NAME}]] installations do not support automatic upgrades and "
                f"instead require pip-managed upgrades; however,  Homebrew, one-line, and manual "
                f"installations support auto-upgrade. Please consider installing figgy through one "
                f"of these other methods to take advantage of this feature. "
                f"It will save you time, help keep you up-to-date, and enable important features like "
                f"release-rollbacks and canary releases! "
                f"[[https://www.figgy.dev/docs/getting-started/install/]]")
            sys.exit(0)

        install_path = self.upgrade_mgr.install_path

        if not install_path:
            self._utils.error_exit(
                f"Unable to detect local figgy installation. Please reinstall figgy and follow one "
                f"of the recommended installation procedures.")

        if latest_version.version == VERSION:
            self._out.success(
                f'You are currently using the latest version of [[{CLI_NAME}]]: [[{VERSION}]]'
            )
            upgrade_it = False
        elif self.tracker.upgrade_available():
            self._out.notify_h2(
                f"New version: [[{latest_version.version}]] is more recent than your version: [[{VERSION}]]"
            )
            upgrade_it = True
        elif not self.tracker.cloud_version_compatible_with_upgrade():
            self._out.notify_h2(
                f"Version [[{self.tracker.get_version().version}]] of the Figgy CLI is available but your "
                f"current version of Figgy Cloud ([[{self.tracker.current_cloud_version()}]]) is not compatible."
                f" Your administrator must first update FiggyCloud to at least version: "
                f"[[{self.tracker.required_cloud_version()}]] before you can upgrade Figgy."
            )
            upgrade_it = False
        else:
            self._out.notify_h2(
                f"Your version: [[{VERSION}]] is more recent then the current recommended version "
                f"of {CLI_NAME}: [[{latest_version.version}]]")
            upgrade_it = Input.y_n_input(
                f'Would you like to revert to the current recommended version '
                f'of {CLI_NAME}?')

        if upgrade_it:
            if self._utils.is_mac():
                self._out.print(
                    f"\nMacOS auto-upgrade is supported. Performing auto-upgrade."
                )
                install_success = self.install_mac(latest_version)
            elif self._utils.is_linux():
                self._out.print(
                    f"\nLinux auto-upgrade is supported. Performing auto-upgrade."
                )
                install_success = self.install_linux(latest_version)
            elif self._utils.is_windows():
                self._out.print(
                    f"\nWindows auto-upgrade is supported. Performing auto-upgrade."
                )
                install_success = self.install_windows(latest_version)

            if install_success:
                self._out.success(
                    f"Installation successful! Exiting. Rerun `[[{CLI_NAME}]]` "
                    f"to use the latest version!")
            else:
                self._out.warn(
                    f"\nUpgrade may not have been successful. Check by re-running "
                    f"[[`{CLI_NAME}` --version]] to see if it was. If it wasn't, please reinstall [[`{CLI_NAME}`]]. "
                    f"See {INSTALL_URL}.")

    def install_mac(self, latest_version: FiggyVersionDetails) -> bool:
        install_path = '/usr/local/bin/figgy'

        if self.upgrade_mgr.is_brew_install():
            self._out.notify_h2(f"Homebrew installation detected!")

            print(
                f"This upgrade process will not remove your brew installation but will instead unlink it. "
                f"Going forward you will no longer need homebrew to manage {CLI_NAME}. Continuing is recommended.\n"
            )

            selection = Input.y_n_input(f"Continue? ", default_yes=True)
        else:
            selection = True

        if selection:
            self.upgrade_mgr.install_onedir(install_path,
                                            latest_version.version, MAC)
            return True
        else:
            self._out.print(
                f'\n[[Auto-upgrade aborted. To upgrade through brew run:]] \n'
                f'-> brew upgrade figtools/figgy/figgy')
            self._out.warn(
                f"\n\nYou may continue to manage [[{CLI_NAME}]] through Homebrew, but doing so will "
                f"limit some upcoming functionality around canary releases, rollbacks, and dynamic "
                f"version-swapping.")
            return False

    def install_linux(self, latest_version: FiggyVersionDetails) -> bool:
        install_path = self.upgrade_mgr.install_path
        self.upgrade_mgr.install_onedir(install_path, latest_version.version,
                                        LINUX)
        return True

    def install_windows(self, latest_version: FiggyVersionDetails) -> bool:
        install_path = self.upgrade_mgr.install_path
        self.upgrade_mgr.install_onedir(install_path, latest_version.version,
                                        WINDOWS)
        return True

    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        self.upgrade()
コード例 #11
0
class BastionSessionProvider(SessionProvider):
    _MAX_ATTEMPTS = 5

    def __init__(self, defaults: CLIDefaults, context: FiggyContext):
        super().__init__(defaults, context)
        self.__id = uuid.uuid4()
        self._utils = Utils(defaults.colors_enabled)
        self.__bastion_session = boto3.session.Session(
            profile_name=self._defaults.provider_config.profile_name)
        self._ssm = None
        self._sts = None
        self._iam_client = None
        self._iam = None
        keychain_enabled = defaults.extras.get(DISABLE_KEYRING) is not True
        vault = FiggyVault(keychain_enabled=keychain_enabled,
                           secrets_mgr=self._secrets_mgr)
        self._sts_cache: CacheManager = CacheManager(
            file_override=STS_SESSION_CACHE_PATH, vault=vault)
        self._role_name_prefix = os.getenv(FIGGY_ROLE_PREFIX_OVERRIDE_ENV,
                                           FIGGY_ROLE_NAME_PREFIX)

    def __get_iam_user(self):
        self._defaults.user = self.__get_iam_resource().CurrentUser().user_name
        return self._defaults.user

    def __get_iam_resource(self):
        if not self._iam:
            self._iam = self.__bastion_session.resource('iam')

        return self._iam

    def __get_iam_client(self):
        if not self._iam_client:
            self._iam_client = self.__bastion_session.client('iam')

        return self._iam_client

    def __get_ssm(self):
        if not self._ssm:
            self._ssm = SsmDao(self.__bastion_session.client('ssm'))
        return self._ssm

    def __get_sts(self):
        if not self._sts:
            self._sts = self.__bastion_session.client('sts')
        return self._sts

    def get_mfa_serial(self) -> Optional[str]:
        response = self.__get_iam_client().list_mfa_devices(
            UserName=self._defaults.user)
        devices = response.get('MFADevices', [])
        log.info(f'Found MFA devices: {devices}.')
        return devices[0].get('SerialNumber') if devices else None

    def get_session(self,
                    env: GlobalEnvironment,
                    prompt: bool,
                    exit_on_fail=True,
                    mfa: Optional[str] = None) -> boto3.Session:
        forced = False
        log.info(
            f"Getting session for role: {env.role.role_arn} in env: {env.role.run_env.env}"
        )
        attempts = 0
        while True:
            try:
                if prompt and not forced:
                    forced = True
                    raise InvalidSessionError(
                        "Forcing new session due to prompt.")

                creds: FiggyAWSSession = self._sts_cache.get_val(
                    env.role.cache_key())

                if creds:
                    session = boto3.Session(
                        aws_access_key_id=creds.access_key,
                        aws_secret_access_key=creds.secret_key,
                        aws_session_token=creds.token,
                        region_name=env.region)

                    if creds.expires_soon(
                    ) or not self._is_valid_session(session):
                        self._utils.validate(
                            attempts < self._MAX_ATTEMPTS,
                            f"Failed to authenticate with AWS after {attempts} attempts. Exiting. "
                        )

                        attempts = attempts + 1
                        log.info(
                            "Invalid session detected in cache. Raising session error."
                        )
                        raise InvalidSessionError("Invalid Session Detected")

                    log.info("Valid bastion SSO session returned from cache.")
                    return session
                else:
                    raise InvalidSessionError(
                        "Forcing new session, cache is empty.")
            except (FileNotFoundError, NoCredentialsError,
                    InvalidSessionError) as e:
                try:
                    if self._defaults.mfa_enabled:
                        self._defaults.mfa_serial = self.get_mfa_serial()
                        color = Utils.default_colors(
                        ) if self._defaults.colors_enabled else None

                        if not mfa:
                            if self._context.command == commands.ui and not self._defaults.auto_mfa:
                                raise CannotRetrieveMFAException(
                                    "Cannot retrieve MFA, UI mode is activated."
                                )
                            else:
                                mfa = self._secrets_mgr.get_next_mfa(self._defaults.user) if self._defaults.auto_mfa else \
                                                                    Input.get_mfa(display_hint=True, color=color)

                        response = self.__get_sts().assume_role(
                            RoleArn=env.role.role_arn,
                            RoleSessionName=Utils.sanitize_session_name(
                                self._defaults.user),
                            DurationSeconds=self._defaults.session_duration,
                            SerialNumber=self._defaults.mfa_serial,
                            TokenCode=mfa)
                    else:
                        response = self.__get_sts().assume_role(
                            RoleArn=env.role.role_arn,
                            RoleSessionName=Utils.sanitize_session_name(
                                self._defaults.user),
                            DurationSeconds=self._defaults.session_duration)

                    session = FiggyAWSSession(
                        **response.get('Credentials', {}))
                    log.info(f"Got session response: {response}")
                    self._sts_cache.write(env.role.cache_key(), session)
                except (ClientError, ParamValidationError) as e:
                    if isinstance(
                            e, ParamValidationError
                    ) or "AccessDenied" == e.response['Error']['Code']:
                        if exit_on_fail:
                            self._utils.error_exit(
                                f"Error authenticating with AWS from Bastion Profile:"
                                f" {self._defaults.provider_config.profile_name}: {e}"
                            )
                    else:
                        if exit_on_fail:
                            log.error(
                                f"Failed to authenticate due to error: {e}")
                            self._utils.error_exit(
                                f"Error getting session for role: {env.role.role_arn} "
                                f"-- Are you sure you have permissions?")

                    raise e

    def get_assumable_roles(self):
        if self.is_role_session():
            user_roles = [self._defaults.role.role]
        else:
            ROLE_PATH = f'/figgy/users/{self.__get_iam_user()}/roles'
            user_roles = self.__get_ssm().get_parameter(ROLE_PATH)
            self._utils.stc_validate(
                user_roles is not None and user_roles != "[]",
                "Something is wrong with your user's configuration with Figgy. "
                "Unable to find any eligible roles for your user. Please contact your"
                " administrator.")

            user_roles = json.loads(user_roles)

        environments = self.__get_ssm().get_all_parameters(
            [PS_FIGGY_ACCOUNTS_PREFIX], option='OneLevel')
        names: List[str] = [env.get('Name') for env in environments]
        parameters = self.__get_ssm().get_parameter_values(names)
        assumable_roles: List[AssumableRole] = []

        for param in parameters:
            env_name = param.get('Name').split('/')[-1]
            account_id = param.get('Value')

            for role in user_roles:
                assumable_roles.append(
                    AssumableRole(
                        run_env=RunEnv(env=env_name, account_id=account_id),
                        role=Role(
                            role=role,
                            full_name=
                            f'{FIGGY_ROLE_NAME_PREFIX}{env_name}-{role}'),
                        account_id=account_id,
                        provider_name=Provider.AWS_BASTION.value,
                        profile=None))

        return assumable_roles

    def is_role_session(self):
        """
        For sandbox demos, where users aren't coming from user accounts, we want to skip looking up user -> role.
        :return: bool - Is this session originating from a role?
        """
        creds = self.__bastion_session.get_credentials(
        ).get_frozen_credentials()

        return hasattr(creds, 'token') and creds.token is not None

    def cleanup_session_cache(self):
        self._sts_cache.wipe_cache()
コード例 #12
0
class Validate(ConfigCommand):
    def __init__(self, ssm_init: SsmDao, colors_enabled: bool,
                 context: ConfigContext):
        super().__init__(validate, colors_enabled, context)
        self._ssm = ssm_init
        self._config_path = context.ci_config_path if context.ci_config_path else Utils.find_figgy_json(
        )
        self._utils = Utils(colors_enabled)
        self._replication_only = context.replication_only
        self._errors_detected = False
        self.example = f"{self.c.fg_bl}{CLI_NAME} config {self.command_printable} " \
                       f"--env dev --config /path/to/config{self.c.rs}"
        self._FILE_PREFIX = "file://"
        self._out = Output(colors_enabled)

    def _validate(self):
        missing_key = False
        config = self._utils.get_ci_config(self._config_path)
        shared_names = set(
            self._utils.get_config_key_safe(SHARED_KEY, config, default=[]))
        repl_conf = self._utils.get_config_key_safe(REPLICATION_KEY,
                                                    config,
                                                    default={})
        repl_from_conf = self._utils.get_config_key_safe(REPL_FROM_KEY,
                                                         config,
                                                         default={})
        merge_conf = self._utils.get_config_key_safe(MERGE_KEY,
                                                     config,
                                                     default={})
        config_keys = set(
            self._utils.get_config_key_safe(CONFIG_KEY, config, default=[]))
        namespace = self._utils.get_namespace(config)
        all_names = KeyUtils.find_all_expected_names(config_keys, shared_names,
                                                     merge_conf, repl_conf,
                                                     repl_from_conf, namespace)

        all_params = self._ssm.get_all_parameters([namespace])

        all_param_names = []
        for param in all_params:
            all_param_names.append(param['Name'])

        print()
        for name in all_names:
            if name not in all_param_names:
                self._out.warn(
                    f"Fig missing from [[{self.run_env}]] environment Parameter Store: [[{name}]]"
                )
                missing_key = True
            else:
                self._out.print(f"Fig found in ParameterStore: [[{name}]].")

        if missing_key:
            print("\n\n")
            self._utils.error_exit(f"{MISSING_PS_NAME_MESSAGE}")
        else:
            self._out.success(
                f"\nSuccess! All figs have been located in the [[{self.run_env}]] ParameterStore!"
            )

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        self._validate()
コード例 #13
0
class Login(HelpCommand, ABC):
    """
    Log the user into every possible environment they have access to. Sessions are cached.
    This improves figgy performance throughout the day.
    """
    def __init__(self, help_context: HelpContext, figgy_setup: FiggySetup,
                 figgy_context: FiggyContext):
        super().__init__(login, Utils.not_windows(), help_context)
        self._setup = figgy_setup
        self._defaults: CLIDefaults = figgy_setup.get_defaults()
        self._figgy_context = figgy_context
        self._utils = Utils(self._defaults.colors_enabled)
        self._aws_cfg = AWSConfig(color=self.c)
        self._out = Output(self._defaults.colors_enabled)

        self.example = f"\n\n{self.c.fg_bl}{CLI_NAME} {login.name} \n" \
                       f"{self.c.rs}{self.c.fg_yl}  --or--{self.c.rs}\n" \
                       f"{self.c.fg_bl}{CLI_NAME} {login.name} {sandbox.name}{self.c.rs}"

    def login(self):
        self._utils.validate(
            self._defaults.provider.name in Provider.names(),
            f"You cannot login until you've configured Figgy. Please run `{CLI_NAME}` --configure"
        )
        provider = SessionProviderFactory(self._defaults,
                                          self._figgy_context).instance()
        assumable_roles: List[AssumableRole] = provider.get_assumable_roles()
        self._out.print(
            f"{self.c.fg_bl}Found {len(assumable_roles)} possible logins. Logging in...{self.c.rs}"
        )

        for role in assumable_roles:
            self._out.print(
                f"Login successful for {role.role} in environment: {role.run_env}"
            )
            provider.get_session_and_role(role, False)

        self._out.print(
            f"{self.c.fg_gr}Login successful. All sessions are cached.{self.c.rs}"
        )

    def login_sandbox(self):
        """
        If user provides --role flag, skip role & env selection for a smoother user experience.
        """
        EnvironmentValidator(self._defaults).validate_environment_variables()

        Utils.wipe_vaults() or Utils.wipe_defaults(
        ) or Utils.wipe_config_cache()

        self._out.print(
            f"{self.c.fg_bl}Logging you into the Figgy Sandbox environment.{self.c.rs}"
        )
        user = Input.input("Please input a user name: ", min_length=2)
        colors = Input.select_enable_colors()

        # Prompt user for role if --role not provided
        if commands.role not in self.context.options:
            role = Input.select("\n\nPlease select a role to impersonate: ",
                                valid_options=SANDBOX_ROLES)
        else:
            role = self.context.role.role
            self._utils.validate(
                role in SANDBOX_ROLES,
                f"Provided role: >>>`{role}`<<< is not a valid sandbox role."
                f" Please choose from {SANDBOX_ROLES}")

        params = {'role': role, 'user': user}
        result = requests.get(GET_SANDBOX_CREDS_URL, params=params)

        if result.status_code != 200:
            self._utils.error_exit(
                "Unable to get temporary credentials from the Figgy sandbox. If this problem "
                f"persists please notify us on our GITHUB: {FIGGY_GITHUB}")

        data = result.json()
        response = SandboxLoginResponse(**data)
        self._aws_cfg.write_credentials(
            access_key=response.AWS_ACCESS_KEY_ID,
            secret_key=response.AWS_SECRET_ACCESS_KEY,
            token=response.AWS_SESSION_TOKEN,
            region=FIGGY_SANDBOX_REGION,
            profile_name=FIGGY_SANDBOX_PROFILE)

        defaults = CLIDefaults.sandbox(user=user, role=role, colors=colors)
        self._setup.save_defaults(defaults)

        run_env = RunEnv(
            env='dev',
            account_id=SANDBOX_DEV_ACCOUNT_ID) if self.context.role else None

        config_mgr = ConfigManager.figgy()
        config_mgr.set(Config.Section.Bastion.PROFILE, FIGGY_SANDBOX_PROFILE)
        defaults = self._setup.configure_extras(defaults)
        defaults = self._setup.configure_roles(current_defaults=defaults,
                                               role=Role(role=role),
                                               run_env=run_env)
        defaults = self._setup.configure_figgy_defaults(defaults)
        self._setup.save_defaults(defaults)

        self._out.success(
            f"\nLogin successful. Your sandbox session will last for [[1 hour]]."
        )

        self._out.print(
            f"\nIf your session expires, you may rerun `{CLI_NAME} login sandbox` to get another sandbox session. "
            f"\nAll previous figgy sessions have been disabled, you'll need to run {CLI_NAME} "
            f"--configure to leave the sandbox.")

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        if self.context.command == login:
            self.login()
        elif self.context.command == sandbox:
            Utils.wipe_vaults() or Utils.wipe_defaults()
            self.login_sandbox()