Beispiel #1
0
class Put(OTSCommand):
    """
    Allows retrieval of a recently stored one-time-secret.
    """
    def __init__(self, ots_svc: OTSService, ots_context: OTSContext,
                 colors_enabled: bool):
        super().__init__(ots_put, colors_enabled, ots_context)
        self._ots = ots_svc
        self._utils = Utils(colors_enabled)
        self._out = Output(colors_enabled)

    def _put(self):
        value = Input.input(f"Please input a value to share: ")

        # Safe convert to int or float, then validate
        expires_in_hours = Input.input(
            f"Select # of hours before value auto-expires: ", default="1")
        expires_in_hours = Utils.safe_cast(expires_in_hours, int,
                                           expires_in_hours)
        expires_in_hours = Utils.safe_cast(expires_in_hours, float,
                                           expires_in_hours)
        self._utils.validate(
            isinstance(expires_in_hours, int)
            or isinstance(expires_in_hours, float),
            "You must provide a number of hours for when this secret should expire. No strings accepted."
        )
        self._utils.validate(
            expires_in_hours <= 48,
            "You may not specify an expiration time more than 48 hours in the future."
        )

        secret_id = self._ots.put_ots(value, expires_in_hours)
        self._out.print(
            f"\n\nTo share this secret, recipients will need the following")
        self._out.print(f"\n[[Secret Id]] -> {secret_id}")
        self._out.success(
            f"\n\nValue successfully stored, it will expire in {expires_in_hours} hours, or when retrieved."
        )

    def execute(self):
        self._put()
class SSOSessionProvider(SessionProvider, ABC):
    _MAX_ATTEMPTS = 5

    def __init__(self, defaults: CLIDefaults, context: FiggyContext):
        super().__init__(defaults, context)
        self._utils = Utils(defaults.colors_enabled)
        self._sts = boto3.client('sts')
        self._context = context
        keychain_enabled = defaults.extras.get(DISABLE_KEYRING) is not True
        vault = FiggyVault(keychain_enabled=keychain_enabled,
                           secrets_mgr=self._secrets_mgr)
        self._sts_cache: CacheManager = CacheManager(
            file_override=STS_SESSION_CACHE_PATH, vault=vault)
        self._saml_cache: CacheManager = CacheManager(
            file_override=SAML_SESSION_CACHE_PATH, vault=vault)

    @abstractmethod
    def get_assumable_roles(self) -> List[AssumableRole]:
        pass

    @abstractmethod
    def cleanup_session_cache(self):
        pass

    @abstractmethod
    def get_saml_assertion(self,
                           prompt: bool = False,
                           mfa: Optional[str] = None):
        pass

    def get_session(self,
                    env: GlobalEnvironment,
                    prompt: bool,
                    exit_on_fail=True,
                    mfa: Optional[str] = None) -> boto3.Session:
        """
        Creates a session in the specified ENV for the target role from a SAML assertion returned by SSO authentication.
        Args:
            assumable_role: AssumableRole - The role to be leveraged to authenticate this session
            prompt: If prompt is set, we will not use a cached session and will generate new sessions for okta and mgmt.
            exit_on_fail: Exit the program if this session hydration fails.
            mfa: MFA to use with authentication attempt.

        returns: Hydrated session for role + account that match the specified one in the provided AssumableRole
        """

        log.info(f"Getting session, was provided MFA: {mfa}")

        # Prevent multiple requests from differing threads all generating new sessions / authing at the same time.
        # Sessions are encrypted and cached in the lockbox, so we want to re-auth once, then read from the lockbox.
        # This cannot be an instance variable, it does not work properly evne though there is only one instantiated
        # SSOSessionProvider
        lock = FileLock(f'{SAML_SESSION_CACHE_PATH}-provider.lock')
        with lock:
            log.info(f"Got lock: {SAML_SESSION_CACHE_PATH}-provider.lock")
            role_arn = f"arn:aws:iam::{env.role.account_id}:role/{env.role.role.full_name}"
            principal_arn = f"arn:aws:iam::{env.role.account_id}:saml-provider/{env.role.provider_name}"
            forced = False
            log.info(
                f"Getting session for role: {role_arn} in env: {env.role.run_env.env} "
                f"with principal: {principal_arn}")
            attempts = 0
            while True:
                try:
                    if prompt and not forced:
                        forced = True
                        raise InvalidSessionError(
                            "Forcing new session due to prompt.")

                    # One role can create N sessions across N regions.
                    creds: FiggyAWSSession = self._sts_cache.get_val(
                        env.role.cache_key())
                    log.debug(
                        f"Got creds from cache: {creds} when searching for env: {env}"
                    )

                    if creds:
                        session = boto3.Session(
                            aws_access_key_id=creds.access_key,
                            aws_secret_access_key=creds.secret_key,
                            aws_session_token=creds.token,
                            region_name=env.region)

                        if creds.expires_soon(
                        ) or not self._is_valid_session(session):
                            self._utils.validate(
                                attempts < self._MAX_ATTEMPTS,
                                f"Failed to authenticate with AWS after {attempts} attempts. Exiting."
                            )

                            attempts = attempts + 1
                            log.info(
                                "Invalid session detected in cache. Raising session error."
                            )
                            raise InvalidSessionError(
                                "Invalid Session Detected")

                        log.info("Valid SSO session returned from cache.")
                        return session
                    else:
                        raise InvalidSessionError(
                            "Forcing new session, cache is empty.")
                except (FileNotFoundError, JSONDecodeError, NoCredentialsError,
                        InvalidSessionError) as e:
                    log.info(f"SessionProvider -- got expected error: {e}")
                    try:
                        # Todo Remove requiring raw saml and instead work with b64 encoded saml?
                        try:
                            assertion: str = self._saml_cache.get_val_or_refresh(
                                SAML_ASSERTION_CACHE_KEY,
                                self.get_saml_assertion, (prompt, mfa),
                                max_age=SAML_ASSERTION_MAX_AGE)
                            encoded_assertion = base64.b64encode(
                                assertion.encode('utf-8')).decode('utf-8')
                            response = self._sts.assume_role_with_saml(
                                RoleArn=role_arn,
                                PrincipalArn=principal_arn,
                                SAMLAssertion=encoded_assertion,
                                DurationSeconds=3500)
                        except ClientError:
                            log.info(
                                "Refreshing SAML assertion, auth failed with cached or refreshed version."
                            )
                            assertion = self.get_saml_assertion(prompt,
                                                                mfa=mfa)
                            encoded_assertion = base64.b64encode(
                                assertion.encode('utf-8')).decode('utf-8')
                            response = self._sts.assume_role_with_saml(
                                RoleArn=role_arn,
                                PrincipalArn=principal_arn,
                                SAMLAssertion=encoded_assertion,
                                DurationSeconds=3500)

                        # response['Credentials']['Expiration'] = "cleared"
                        session = FiggyAWSSession(
                            **response.get('Credentials', {}))
                        self._saml_cache.write(SAML_ASSERTION_CACHE_KEY,
                                               assertion)
                        self._sts_cache.write(env.role.cache_key(), session)
                    except (ClientError, ParamValidationError) as e:
                        if isinstance(
                                e, ParamValidationError
                        ) or "AccessDenied" == e.response['Error']['Code']:
                            if exit_on_fail:
                                self._utils.error_exit(
                                    f"Error authenticating with AWS from SAML Assertion: {e}"
                                )
                        else:
                            if exit_on_fail:
                                print(e)
                                self._utils.error_exit(
                                    f"Error getting session for role: {role_arn} -- Are you sure you have permissions?"
                                )

                        raise e
Beispiel #3
0
class Sync(ConfigCommand):
    """
    Synchronizes local application configuration state as defined in the figgy.json file and the existing remote state
    in the targeted environment. Also configures replication for designated shared parameters in the
    figgy.json file.
    """
    def __init__(self, ssm_init: SsmDao, config_init: ConfigDao,
                 repl_dao: ReplicationDao, colors_enabled: bool,
                 context: ConfigContext, get: Get, put: Put):
        super().__init__(sync, colors_enabled, context)
        self._config = config_init
        self._ssm = ssm_init
        self._repl = repl_dao
        self._config_path = context.ci_config_path if context.ci_config_path else Utils.find_figgy_json(
        )
        self._utils = Utils(colors_enabled)
        self._replication_only = context.replication_only
        self._errors_detected = False
        self.example = f"{self.c.fg_bl}{CLI_NAME} config {self.command_printable} " \
                       f"--env dev --config /path/to/config{self.c.rs}"
        self._get: Get = get
        self._put: Put = put
        self._FILE_PREFIX = "file://"
        self._out = Output(colors_enabled)

    def _input_config_values(self, config_keys: Set[str]) -> None:
        """
        Prompts the user for each of the passed in set of config values if any are missing from PS.
        :param config_keys: Set[string] - config values to prompt the user to add.
        """
        def validate_msg(ps_name: str):
            self._out.success(f"Name Validated: [[{ps_name}]]")
            return validate_msg

        count = 0
        for key in config_keys:
            try:
                if not self._get.get(key):
                    self._out.warn(
                        f"Fig: [[{key}]] missing from PS in environment: [[{self.run_env}]]."
                    )
                    self._put.put_param(key=key, display_hints=False)
                    count = count + 1
                else:
                    validate_msg(key)
            except ClientError:
                validate_msg(key)

        if count:
            self._out.success(
                f"[[{count}]] {'value' if count == 1 else 'values'} added successfully"
            )

    def _sync_keys(self, config_namespace: str, all_keys: Set):
        """
        Looks for stray parameters (keys) under the namespace provided and prints out information about
        missing parameters that are not defined in the figgy.json file
        Args:
            config_namespace: Namespace to query PS under.
            all_keys: All keys that exist in figgy.json to compare against.
        """
        self._out.notify(f"Checking for stray config names.")

        # Find & Prune stray keys
        ps_keys = set(
            list(
                map(lambda x: x['Name'],
                    self._ssm.get_all_parameters([config_namespace]))))
        ps_only_keys = ps_keys.difference(all_keys)

        UNUSED_CONFIG_DETECTED = f"%%red%%The following Names were found in PS but are not referenced in your configurations. \n" \
                                 f"Use the %%rs%%%%blue%%`prune`%%rs%%%%red%% command to clean them up once all " \
                                 f"deployed application versions no longer use these configurations: %%rs%%"

        if len(ps_only_keys) > 0:
            self._out.warn(
                "The following Names were found in PS but are not referenced in your configurations. \n"
                "Use the [[prune]] command to clean them up once all.")

        for key in ps_only_keys:
            self._out.print(f"Unused Parameter: [[{key}]]")

        if not ps_only_keys:
            self._out.success(f"No stray configurations found.")

    def _sync_repl_configs(self,
                           config_repl: Dict,
                           namespace: str = None) -> None:
        """
        Syncs replication configs from a defined "replicate_figs" block parsed from either the figgy.json file
        or the data replication config json file.
        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            namespace: Optional namespace. Parsed from destination if not supplied.
        """
        local_configs: List[ReplicationConfig] = ReplicationConfig.from_dict(
            conf=config_repl,
            type=ReplicationType(REPL_TYPE_APP),
            run_env=self.run_env,
            namespace=namespace)
        for l_cfg in local_configs:
            # Namespace will be missing for --replication-only syncs. Otherwise, with standard syncs, namespace is passed
            # as a parameter here.
            if not namespace:
                namespace = l_cfg.namespace

            if not l_cfg.destination.startswith(namespace):
                self._out.error(
                    f"Replication config [[{l_cfg.source} -> {l_cfg.destination}]] has a destination that "
                    f"is not in your service namespace: [[{namespace}]]. This is invalid."
                )
                self.errors_detected = True
                continue

            remote_cfg = self._repl.get_config_repl(l_cfg.destination)

            # Should never happen, except when someone manually deletes source / destination without going through CLI
            missing_from_ps = self.__get_param_encrypted(l_cfg.source) is None

            if not remote_cfg or remote_cfg != l_cfg or missing_from_ps:
                try:
                    if self._can_replicate_from(
                            l_cfg.source
                    ) and not remote_cfg or missing_from_ps:
                        self._repl.put_config_repl(l_cfg)
                        self._out.print(
                            f"[[Replication added:]] {l_cfg.source} -> {l_cfg.destination}"
                        )
                    elif self._can_replicate_from(l_cfg.source) and remote_cfg:
                        self._repl.put_config_repl(l_cfg)
                        self._out.notify(f"Replication updated.")
                        self._out.warn(
                            f"Removed: {remote_cfg.source} -> {remote_cfg.destination}"
                        )
                        self._out.success(
                            f"Added: {l_cfg.source} -> {l_cfg.destination}")
                    else:
                        self._errors_detected = True
                        # print(f"{self.c.fg_rd}You do not have permission to configure replication from source:"
                        #       f"{self.c.rs} {key}")
                except ClientError:
                    self._utils.validate(
                        False,
                        f"Error detected when attempting to store replication config "
                        f"for {l_cfg.destination}")
                    self._errors_detected = True
            else:
                self._out.success(
                    f"Replication Validated: [[{l_cfg.source} -> {l_cfg.destination}]]"
                )

    def _notify_of_data_repl_orphans(self, config_repl: Dict) -> None:
        """
        Notify user of detected stray replication configurations when using the --replication-only flag.
        :param config_repl: replication configuration block.
        """
        strays: Set[ReplicationConfig] = set()
        notify = False
        for repl in config_repl:
            namespace = self._utils.parse_namespace(config_repl[repl])
            remote_cfgs = self._repl.get_all_configs(namespace)

            if remote_cfgs:
                for cfg in remote_cfgs:
                    if cfg.source not in list(config_repl.keys()) \
                            and cfg.type == REPL_TYPE_APP \
                            and not cfg.source.startswith(shared_ns) \
                            and not cfg.source.startswith(self.context.defaults.service_ns):
                        strays.add(cfg)
                        notify = True

        for stray in strays:
            print(
                f"{self.c.fg_yl}stray replication mapping detected: {self.c.rs}"
                f" {self.c.fg_bl}{stray.source} -> {stray.destination}{self.c.rs}."
            )
        if notify:
            print(
                f"To prune stray replication configs, "
                f"delete the destination, THEN the source with the `figgy config delete` command"
            )

    def _sync_replication(self, config_repl: Dict, expected_destinations: Set,
                          namespace: str):
        """
        Calls sync_repl_configs which adds/removes repl configs. Then searches for stray configurations and notifies
        the user of detected stray configurations.
        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            expected_destinations: expected replication destinations, as defined in merge key sources,
             or shared_figs
            namespace: Namespace to sync replication configs to. E.g. /app/demo-time/
        """

        self._out.notify(f"Validating replication for all parameters.")

        self._sync_repl_configs(config_repl, namespace=namespace)
        self._out.notify(f"\nChecking for stray replication configurations.")
        remote_cfgs = self._repl.get_all_configs(namespace)
        notify = True
        if remote_cfgs:
            for cfg in remote_cfgs:
                if cfg.source not in list(config_repl.keys()) \
                        and cfg.destination not in list(config_repl.values()) \
                        and cfg.destination not in expected_destinations \
                        and (isinstance(cfg.source, list)
                             or cfg.source.startswith(shared_ns) or cfg.source.startswith(
                            self.context.defaults.service_ns)):
                    print(
                        f"{self.c.fg_rd}Stray replication mapping detected: {self.c.rs}"
                        f" {self.c.fg_bl}{cfg.source} -> {cfg.destination}{self.c.rs}."
                    )
                    notify = False
        if notify:
            self._out.success(
                f"No stray replication configs found for: {namespace}")
        else:
            self._out.warn(f"{CLEANUP_REPLICA_ORPHANS}")

    def _validate_merge_keys(self, destination: str, sources: Union[List, str],
                             namespace: str) -> bool:
        """
        Validates merge key sources & destinations
        Args:
            destination: str -> Destination of merge key replication
            sources: List or Str -> Source(e) of this merge key
            namespace: application namespace
        """
        if not destination.startswith(namespace):
            print(
                f"{self.c.fg_rd}Merge config: {self.c.rs}{self.c.fg_bl}{destination}{self.c.rs}{self.c.fg_rd} has a "
                f"destination that is not in your service namespace: "
                f"{self.c.rs}{self.c.fg_bl}{namespace}{self.c.rs}{self.c.fg_rd}. This is invalid.{self.c.rs}"
            )
            self.errors_detected = True
            return False

        if isinstance(sources, list):
            for item in sources:
                if item.startswith(MERGE_KEY_PREFIX):
                    self._utils.validate(
                        item.replace(MERGE_KEY_PREFIX,
                                     "").startswith(namespace),
                        f"Source: {item} in merge config must begin with your namespace: {namespace}."
                    )
                    self.errors_detected = True
                    return False
        else:
            self._utils.validate(
                sources.startswith(namespace),
                f"Source {sources} in merge config must begin with your namespace: {namespace}"
            )
            self.errors_detected = True
            return False

        return True

    def _sync_merge_keys(self, config_merge: Dict, namespace: str) -> None:
        """
            Pushes merge key configs into replication config table.
        Args:
            config_merge: Dict of merge_parameters parsed from figcli.json file
            namespace: namespace for app
        """
        self._out.notify("Validating replication for all merge keys.")
        for key in config_merge:
            self._validate_merge_keys(key, config_merge[key], namespace)

            config = self._repl.get_config_repl(key)
            if not config or (config.source != config_merge[key]):
                try:
                    repl_config = ReplicationConfig(
                        destination=key,
                        run_env=self.run_env,
                        namespace=namespace,
                        source=config_merge[key],
                        type=ReplicationType(REPL_TYPE_MERGE))
                    self._repl.put_config_repl(repl_config)
                except ClientError:
                    self._utils.validate(
                        False,
                        f"Error detected when attempting to store replication config for {key}"
                    )
                    self._errors_detected = True
            else:
                self._out.success(
                    f"Merge key replication config validated: [[{key}]]")

    def _validate_expected_names(self, all_names: Set, repl_conf: Dict,
                                 merge_conf: Dict):
        self._out.notify(f"Validating shared keys exist.")
        print_resolution_message = False
        merged_confs = {**repl_conf, **merge_conf}
        for name in all_names:
            if self.__get_param_encrypted(name) is None:
                awaiting_repl = False
                for cnf in merged_confs:
                    if name == cnf or name in list(repl_conf.values()):
                        self._out.print(
                            f"\nConfig value [[{name}]] is a destination for replication, but doesn't exist"
                            f" yet. If you commit now your build could fail. This will auto-resolve itself if all of "
                            f"its dependencies exist. This will probably resolve itself in a few seconds. "
                            f"Try re-running sync.")
                        awaiting_repl = True
                        break

                if not awaiting_repl:
                    self._out.print(
                        f"Config value of [[{name}]] does not exist and is expected based on "
                        f"your defined configuration.")
                    print_resolution_message = True
                    self._errors_detected = True

        if print_resolution_message:
            self._out.error(f"{SHARED_NAME_RESOLUTION_MESSAGE}")
        else:
            self._out.success("Shared keys have been validated.")

    def _can_replicate_from(self, source: str):
        try:
            if self.__get_param_encrypted(source) is not None:
                return True
            else:
                self._out.warn(
                    f"Replication source: [[{source}]] is missing from ParameterStore. "
                    f"It must be added before config replication can be configured.\n"
                )
                self._input_config_values({source})
                return True
        except ClientError as e:
            denied = "AccessDeniedException" == e.response['Error']['Code']
            if denied and "AWSKMS; Status Code: 400;" in e.response['Error'][
                    'Message']:
                self._out.error(
                    f"You do not have access to decrypt the value of Name: [[{source}]]"
                )
            elif denied:
                self._out.error(
                    f"You do not have access to Parameter: [[{source}]]")
            else:
                raise
        return False

    def __get_param_encrypted(self, source: str) -> Optional[str]:
        try:
            return self._ssm.get_parameter_encrypted(source)
        except ClientError as e:
            denied = "AccessDeniedException" == e.response['Error']['Code']
            if denied and "AWSKMS; Status Code: 400;" in e.response['Error'][
                    'Message']:
                self._out.error(
                    f"You do not have access to decrypt the value of Name: [[{source}]]"
                )
                return None
            elif denied:
                self._utils.error_exit(
                    f"You do not have access to Parameter: {source}")
            else:
                raise

    def _validate_replication_config(self,
                                     config_repl: Dict,
                                     app_conf: bool = True):
        """
        Validates replication config blocks are valid / legal. Prevents people from setting up replication from
        disallowed namespaces, etc. Exits with error if invalid config is discovered.

        Args:
            config_repl: Dict of KV Pairs for a repl config. Source -> Dest
            app_conf: bool: T/F - True if this is an application config block in an application config (figgy.json).
                    False if other, which for now is only repl-configs for data teams.
        """
        for key in config_repl:
            if app_conf:
                self._utils.validate(
                    re.match(
                        f'^/shared/.*$|^{self.context.defaults.service_ns}/.*$',
                        key) is not None,
                    f"The SOURCE of your replication configs must begin with `/shared/` or "
                    f"`{self.context.defaults.service_ns}/`. "
                    f"{key} is non compliant.")

            self._utils.validate(
                re.match(f'^{self.context.defaults.service_ns}/.*$',
                         config_repl[key]) is not None,
                f"The DESTINATION of your replication configs must always begin with "
                f"`{self.context.defaults.service_ns}/`")

    def _find_missing_shared_figs(self, namespace: str, config_repl: Dict,
                                  shared_names: set, merge_conf: Dict):
        """
            Notifies the user if there is a parameter that has been shared into their namespace by an outside party
            but they have not added it to the `shared_figs` block of their figgy.json
        """
        all_repl_cfgs = self._repl.get_all_configs(namespace)
        for cfg in all_repl_cfgs:
            in_merge_conf = self._in_merge_value(cfg.destination, merge_conf)

            if cfg.destination not in shared_names and cfg.type == REPL_TYPE_APP \
                    and cfg.destination not in config_repl.values() and not in_merge_conf:
                print(
                    f"It appears that {self.c.fg_bl}{cfg.user}{self.c.rs} shared "
                    f"{self.c.fg_bl}{cfg.source}{self.c.rs} to {self.c.fg_bl}{cfg.destination}{self.c.rs} "
                    f"and you have not added {self.c.fg_bl}{cfg.destination}{self.c.rs} to the "
                    f"{self.c.fg_bl}{SHARED_KEY}{self.c.rs} section of your figgy.json. This is also not "
                    f"referenced in any defined merge parameter. Please add "
                    f"{self.c.fg_bl}{cfg.destination}{self.c.rs} to your figgy.json, or delete this parameter "
                    f"and the replication config with the prune command.")

    def _in_merge_value(self, dest: str, merge_conf: Dict):
        for key in merge_conf:
            value = merge_conf[key]
            # 'value' can be a list or a str, but the way 'in' operates, this works either way. #dynamic programming
            for suffix in merge_suffixes:
                if f"${'{'}{dest}{suffix}{'}'}" in value:
                    return True

        return False

    def _fill_repl_conf_variables(self, repl_conf: Dict) -> Dict:
        repl_copy = {}
        all_vars = []
        for key, val in repl_conf.items():
            all_vars = all_vars + re.findall(r'\${(\w+)}', key)
            all_vars = all_vars + re.findall(r'\${(\w+)}', key)

        all_vars = set(all_vars)
        if all_vars:
            print(
                f"{self.c.fg_bl}{len(all_vars)} variables detected in: {self.c.rs}{self.c.fg_yl}"
                f"{self._config_path}{self.c.rs}\n")

        template_vals = {}
        for var in all_vars:
            print(f"Template variable: {self.c.fg_bl}{var}{self.c.rs} found.")
            input_val = Input.input(
                f"Please input a value for {self.c.fg_bl}{var}{self.c.rs}: ",
                min_length=1)
            template_vals[var] = input_val

        for key, val in repl_conf.items():
            updated_key = key
            updated_val = val

            for template_key, template_val in template_vals.items():
                updated_key = updated_key.replace(f"${{{template_key}}}",
                                                  template_val)
                updated_val = updated_val.replace(f"${{{template_key}}}",
                                                  template_val)

            repl_copy[updated_key] = updated_val
            repl_copy[updated_key] = updated_val

        return repl_copy

    def run_ci_sync(self) -> None:
        """
            Orchestrates a standard `sync` command WITHOUT The `--replication-only` flag set.
        """
        # Validate & parse figgy.json
        config = self._utils.get_ci_config(self._config_path)
        shared_names = set(
            self._utils.get_config_key_safe(SHARED_KEY, config, default=[]))
        repl_conf = self._utils.get_config_key_safe(REPLICATION_KEY,
                                                    config,
                                                    default={})
        repl_from_conf = self._utils.get_config_key_safe(REPL_FROM_KEY,
                                                         config,
                                                         default={})
        merge_conf = self._utils.get_config_key_safe(MERGE_KEY,
                                                     config,
                                                     default={})
        config_keys = set(
            self._utils.get_config_key_safe(CONFIG_KEY, config, default=[]))
        namespace = self._utils.get_namespace(config)
        merge_keys = set(merge_conf.keys())
        all_keys = KeyUtils.find_all_expected_names(config_keys, shared_names,
                                                    merge_conf, repl_conf,
                                                    repl_from_conf, namespace)

        repl_conf = KeyUtils.merge_repl_and_repl_from_blocks(
            repl_conf, repl_from_conf, namespace)
        # Add missing config values
        self._out.notify(
            f"Validating all configuration keys exist in ParameterStore.")
        self._input_config_values(config_keys)

        # Sync keys between PS / Local config
        print()
        self._sync_keys(namespace, all_keys)

        print()

        self._find_missing_shared_figs(namespace, repl_conf, shared_names,
                                       merge_conf)

        # Disabling requirement (for now) of replication to be in /replicated path
        # print()
        self._validate_replication_config(repl_conf, app_conf=True)

        print()
        # sync replication config
        all_shared_keys = shared_names | set(merge_conf.keys())
        self._sync_replication(repl_conf, all_shared_keys, namespace)

        print()
        self._sync_merge_keys(merge_conf, namespace)

        print()
        # validate expected keys exist
        self._validate_expected_names(all_keys, repl_conf, merge_conf)

    def run_repl_sync(self) -> None:
        """
        Orchestrates sync when the user passes in the `--replication-only` flag.
        """
        self._utils.validate(
            os.path.exists(self._config_path),
            f"Path {self._config_path} is invalid. "
            f"That file does not exist.")
        repl_conf = self._utils.get_repl_config(self._config_path)

        repl_conf = self._fill_repl_conf_variables(repl_conf)
        self._validate_replication_config(repl_conf, app_conf=False)
        self._sync_repl_configs(repl_conf)
        self._notify_of_data_repl_orphans(repl_conf)

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        print()
        if self._replication_only:
            self.run_repl_sync()
        else:
            self.run_ci_sync()

        if self._errors_detected:
            self._out.error_h2(
                'Sync failed. Please address the outputted errors.')
        else:
            self._out.success_h2('Sync completed with no errors!')
Beispiel #4
0
class FiggyCLI:
    @staticmethod
    def add_arg(com_parser, com_arg, cmd, rsc):
        com_parser.add_argument(f'--{com_arg.name}', help=HELP_TEXT_MAP[com_arg],
                                action=arg_options[rsc][cmd][com_arg][action],
                                required=arg_options[rsc][cmd][com_arg][required])

    @staticmethod
    def parse_args():
        """
        Parses Figgy command line arguments and returns generic "args" object.
        """
        parser = argparse.ArgumentParser(description=RESOURCE_PARSER_DESC)
        parser.add_argument(f'--{configure.name}', help=CONFIGURE_HELP_TEXT, action=store_true)
        parser.add_argument(f'--{prompt_com.name}', help=PROMPT_HELP_TEXT, action=store_true)
        parser.add_argument(f'--{version.name}', help=VERSION_HELP_TEXT, action=store_true)
        parser.add_argument(f'--{skip_upgrade.name}', help=SKIP_UPGRADE_HELP_TEXT, action=store_true)
        parser.add_argument(f'--{upgrade.name}', help=UPGRADE_HELP_TEXT, action=store_true)

        resource_subparsers = parser.add_subparsers(title='resources', dest='resource', metavar='')

        for rsc in resource_map:
            cmd_parser = resource_subparsers.add_parser(rsc.name, help=HELP_TEXT_MAP[rsc])
            subparser = cmd_parser.add_subparsers(title=f'{rsc.name} commands', dest='command', metavar='',
                                                  help=HELP_TEXT_MAP[rsc])

            for cmd in resource_map[rsc]:
                com_parser = subparser.add_parser(cmd.name, help=HELP_TEXT_MAP[cmd])
                for com_arg, val in arg_options[rsc][cmd].items():
                    FiggyCLI.add_arg(com_parser, com_arg, cmd, rsc)

        return parser.parse_args()

    def get_profile(self, prompt: bool) -> str:
        """Returns the user's profile.

        Checks ENV variable, if not there, checks the config file (created via the --configure option), otherwise prompts
        the user

        Args:
            prompt: True/False - if True, users will always be prompted to input their profile

        :return: str: aws profile name
        """

        if BASTION_PROFILE_ENV_NAME in os.environ and not prompt:
            return os.environ.get(BASTION_PROFILE_ENV_NAME)
        else:
            defaults: CLIDefaults = FiggySetup.stc_get_defaults(self._is_setup_command, profile=self._profile)
            if defaults is not None and not prompt:
                return defaults.provider_config.profile
            else:
                return Input.select_aws_cli_profile()

    def get_role(self, prompt: bool, role_override: str = None, is_setup: bool = False) -> Role:
        """
        Returns a string of the user's selected role.

        Lookup the user's default role from the config file (created via the --configure option), an ENV variable, or
        instead prompt the user for the session.

        :param prompt: True/False - if True, users will always be prompted to input their role
        :param role_override: String representation of the role to get, regardless of defaults.
        :return: str: name of the selected role.
        """

        defaults = FiggySetup.stc_get_defaults(self._is_setup_command, profile=self._profile)
        if defaults is not None and not prompt:

            if role_override:
                if role_override in [role.role.role for role in defaults.assumable_roles] or is_setup:
                    return Role(role=role_override)
                else:
                    self._utils.error_exit(f"Invalid role override provided of: {role_override}. "
                                           f"You do not have permissions to assume this role. Contact your system "
                                           f"administrator to receive permissions then rerun `{CLI_NAME} "
                                           f"--{configure.name}`.")

            return defaults.role
        else:
            roles = self.__setup().get_assumable_roles()
            role_names = list(set([x.role.role for x in roles]))
            return Input.select_role(role_names)

    def get_colors_enabled(self) -> bool:
        """
        Defaults to true, unless user ran --configure and disabled colored output
        Returns: True/False
        """

        defaults = FiggySetup.stc_get_defaults(skip=self._is_setup_command, profile=self._profile)
        if defaults is not None:
            return defaults.colors_enabled
        else:
            return Utils.not_windows()

    def get_command(self) -> Command:
        """
        Maps the user's passed in text command to one of our defined 'command' objects we use in the code.
        Args:

        Returns: command object.
        """
        return self.__command_factory().instance()

    def find_assumable_role(self, env: RunEnv, role: Role, skip: bool = False, profile=None) -> AssumableRole:
        """
        Looks up the appropriate assumable role based on the user's selected defaults or command-line overrides for
        --env, --role, and --profile.
        """

        if profile:
            return AssumableRole.from_profile(profile)

        assumable_roles: List[AssumableRole] = FiggySetup.stc_get_defaults(skip=skip).assumable_roles
        matching_role = [ar for ar in assumable_roles if ar.role == role and ar.run_env == env]
        if matching_role:
            matching_role = matching_role.pop()
        else:
            if not skip:
                matching_role = None
            else:
                matching_role = AssumableRole.default_from_role_env(role, env)

        return matching_role

    def __setup(self) -> FiggySetup:
        if not self._setup:
            self._setup = FiggySetup(self._context)

        return self._setup

    def __command_factory(self) -> CommandFactory:
        if not self._command_factory:
            self._command_factory = CommandFactory(self._context,
                                                   FiggySetup.stc_get_defaults(skip=self._is_setup_command,
                                                                               profile=self._profile))

        return self._command_factory

    @staticmethod
    def is_setup_command(args):
        """
        Returns True for 'special' commands that configure figgy itself or follow non-normal execution paths.
        Needed to skip past steps that are not necessary because figgy isn't set up yet, or to support a special
        use case (like sandbox logins).
        """

        return Utils.is_set_true(configure, args) \
               or Utils.command_set(sandbox, args) \
               or Utils.is_set_true(version, args) \
               or Utils.attr_exists(profile, args) \
               or Utils.is_set_true(upgrade, args)

    @staticmethod
    def validate_environment(defaults: CLIDefaults):
        EnvironmentValidator(defaults).validate_all()

    def __init__(self, args):
        """
        Initializes global shared properties
        :param args: Arguments passed in from user, collected from ArgParse
        """
        self._profile = None
        self._command_factory = None
        self._setup = None
        self._is_setup_command: bool = FiggyCLI.is_setup_command(args)
        self._utils = Utils(self.get_colors_enabled())
        self._profile = Utils.attr_if_exists(profile, args)
        self._defaults: CLIDefaults = FiggySetup.stc_get_defaults(skip=self._is_setup_command, profile=self._profile)
        self._run_env = self._defaults.run_env
        role_override = Utils.attr_if_exists(role, args)
        self._role: Role = self.get_role(args.prompt, role_override=role_override, is_setup=self._is_setup_command)

        FiggyCLI.validate_environment(self._defaults)

        if not self._is_setup_command:
            if not hasattr(args, 'env') or args.env is None:
                print(f"{EMPTY_ENV_HELP_TEXT}{self._run_env.env}\n")
            else:
                Utils.stc_validate(args.env in self._defaults.valid_envs,
                                   f'{ENV_HELP_TEXT} {self._defaults.valid_envs}. Provided: {args.env}')
                self._run_env = RunEnv(env=args.env)

        self._utils.validate(Utils.attr_exists(configure, args) or Utils.attr_exists(command, args),
                             f"No command found. Proper format is `{CLI_NAME} <resource> <command> --option(s)`")

        self._assumable_role = self.find_assumable_role(self._run_env, self._role, skip=self._is_setup_command,
                                                        profile=self._profile)

        command_name = Utils.attr_if_exists(command, args)
        resource_name = Utils.attr_if_exists(resource, args)

        found_command: CliCommand = Utils.find_command(str(command_name))
        found_resource: CliCommand = Utils.find_resource(str(resource_name))

        self._context: FiggyContext = FiggyContext(self.get_colors_enabled(), found_resource, found_command,
                                                   self._run_env, self._assumable_role, args)
Beispiel #5
0
class Restore(ConfigCommand):
    def __init__(self, ssm_init: SsmDao, kms_init: KmsService,
                 config_init: ConfigDao, repl_dao: ReplicationDao,
                 audit_dao: AuditDao, cfg_view: RBACLimitedConfigView,
                 colors_enabled: bool, context: ConfigContext,
                 config_completer: WordCompleter, delete: Delete):
        super().__init__(restore, colors_enabled, context)
        self._config_context = context
        self._ssm = ssm_init
        self._kms = kms_init
        self._config = config_init
        self._repl = repl_dao
        self._audit = audit_dao
        self._cfg_view = cfg_view
        self._utils = Utils(colors_enabled)
        self._point_in_time = context.point_in_time
        self._config_completer = config_completer
        self._delete = delete
        self._out = Output(colors_enabled=colors_enabled)

    def _client_exception_msg(self, item: RestoreConfig, e: ClientError):
        if "AccessDeniedException" == e.response["Error"]["Code"]:
            self._out.error(
                f"\n\nYou do not have permissions to restore config at the path: [[{item.ps_name}]]"
            )
        else:
            self._out.error(
                f"Error message: [[{e.response['Error']['Message']}]]")

    def get_parameter_arn(self, parameter_name: str):
        account_id = self._ssm.get_parameter(ACCOUNT_ID_PATH)

        return f"arn:aws:ssm:us-east-1:{account_id}:parameter{parameter_name}"

    def _restore_param(self) -> None:
        """
        Allow the user to query a parameter store entry from dynamo, so we can query + restore it, if desired.
        """

        table_entries = []

        ps_name = prompt(f"Please input PS key to restore: ",
                         completer=self._config_completer)

        if self._is_replication_destination(ps_name):
            repl_conf = self._repl.get_config_repl(ps_name)
            self._print_cannot_restore_msg(repl_conf)
            exit(0)

        self._out.notify(
            f"\n\nAttempting to retrieve all restorable values of [[{ps_name}]]"
        )
        items: List[RestoreConfig] = self._audit.get_parameter_restore_details(
            ps_name)

        if len(items) == 0:
            self._out.warn(
                "No restorable values were found for this parameter.")
            return

        for i, item in enumerate(items):
            date = time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(item.ps_time / 1000))

            # we need to decrypt the value, if encrypted, in order to show it to the user
            if item.ps_key_id:
                item.ps_value = self._kms.decrypt_with_context(
                    item.ps_value,
                    {"PARAMETER_ARN": self.get_parameter_arn(item.ps_name)},
                )
            table_entries.append([i, date, item.ps_value, item.ps_user])

        self._out.print(
            tabulate(
                table_entries,
                headers=["Item #", "Time Created", "Value", "User"],
                tablefmt="grid",
                numalign="center",
                stralign="left",
            ))

        valid_options = [f'{x}' for x in range(0, len(items))]
        choice = int(
            Input.select("Select an item number to restore: ",
                         valid_options=valid_options))
        item = items[choice] if items[choice] else None

        restore = Input.y_n_input(
            f"Are you sure you want to restore item #{choice} and have it be the latest version? ",
            default_yes=False)

        if not restore:
            self._utils.warn_exit("Restore aborted.")

        key_id = None if item.ps_type == "String" else item.ps_key_id

        try:
            self._ssm.set_parameter(item.ps_name,
                                    item.ps_value,
                                    item.ps_description,
                                    item.ps_type,
                                    key_id=key_id)

            current_value = self._ssm.get_parameter(item.ps_name)
            if current_value == item.ps_value:
                self._out.success("Restore was successful")
            else:
                self._out.error(
                    "Latest version in parameter store doesn't match what we restored."
                )
                self._out.print(
                    f"Current value: [[{current_value}]].  Expected value: [[{item.ps_value}]]"
                )

        except ClientError as e:
            self._client_exception_msg(item, e)

    def _decrypt_if_applicable(self, entry: RestoreConfig) -> str:
        if entry.ps_type != "String":
            return self._kms.decrypt_with_context(
                entry.ps_value,
                {"PARAMETER_ARN": self.get_parameter_arn(entry.ps_name)})
        else:
            return entry.ps_value

    def _is_replication_destination(self, ps_name: str):
        return self._repl.get_config_repl(ps_name)

    def _restore_params_to_point_in_time(self):
        """
        Restores parameters as they were to a point-in-time as defined by the time provided by the users.
        Replays parameter history to that point-in-time so versioning remains intact.
        """

        repl_destinations = []
        ps_prefix = Input.input(
            f"Which parameter store prefix would you like to recursively restore? "
            f"(e.g., /app/demo-time): ",
            completer=self._config_completer)

        authed_nses = self._cfg_view.get_authorized_namespaces()
        valid_prefix = (
            [True for ns in authed_nses if ps_prefix.startswith(ns)]
            or [False])[0]
        self._utils.validate(
            valid_prefix,
            f"Selected namespace must begin with a 'Fig Tree' you have access to. "
            f"Such as: {authed_nses}")

        time_selected, time_converted = None, None
        try:
            time_selected = Input.input(
                "Seconds since epoch to restore latest values from: ")
            time_converted = datetime.fromtimestamp(float(time_selected))
        except ValueError as e:
            if "out of range" in e.args[0]:
                try:
                    time_converted = datetime.fromtimestamp(
                        float(time_selected) / 1000)
                except ValueError as e:
                    self._utils.error_exit(
                        "Make sure you're using a format of either seconds or milliseconds since epoch."
                    )
            elif "could not convert" in e.args[0]:
                self._utils.error_exit(
                    f"The format of this input should be seconds since epoch. (e.g., 1547647091)\n"
                    f"Try using: https://www.epochconverter.com/ to convert your date to this "
                    f"specific format.")
            else:
                self._utils.error_exit(
                    "An unexpected exception triggered: "
                    f"'{e}' while trying to convert {time_selected} to 'datetime' format."
                )

        self._utils.validate(
            time_converted is not None,
            f"`{CLI_NAME}` encountered an error parsing your input for "
            f"target rollback time.")
        keep_going = Input.y_n_input(
            f"Are you sure you want to restore all figs under {ps_prefix} values to their state at: "
            f"{time_converted}? ",
            default_yes=False)

        if not keep_going:
            self._utils.warn_exit("Aborting restore due to user selection")

        ps_history: PSHistory = self._audit.get_parameter_history_before_time(
            time_converted, ps_prefix)
        restore_count = len(ps_history.history.values())

        if len(ps_history.history.values()) == 0:
            self._utils.warn_exit(
                "No results found for time range.  Aborting.")

        last_item_name = 'Unknown'
        try:
            for item in ps_history.history.values():
                last_item_name = item.name

                if self._is_replication_destination(item.name):
                    repl_destinations.append(item.name)
                    continue

                if item.cfg_at(time_converted).ps_action == SSM_PUT:
                    cfgs_before: List[RestoreConfig] = item.cfgs_before(
                        time_converted)
                    cfg_at: RestoreConfig = item.cfg_at(time_converted)
                    ssm_value = self._ssm.get_parameter(item.name)
                    dynamo_value = self._decrypt_if_applicable(cfg_at)

                    if ssm_value != dynamo_value:
                        if ssm_value is not None:
                            self._ssm.delete_parameter(item.name)

                        for cfg in cfgs_before:
                            decrypted_value = self._decrypt_if_applicable(cfg)
                            self._out.print(
                                f"\nRestoring: [[{cfg.ps_name}]] \nValue: [[{decrypted_value}]]"
                                f"\nDescription: [[{cfg.ps_description}]]\nKMS Key: "
                                f"[[{cfg.ps_key_id if cfg.ps_key_id else '[[No KMS Key Specified]]'}]]"
                            )
                            self._out.notify(
                                f"Replaying version: [[{cfg.ps_version}]] of [[{cfg.ps_name}]]"
                            )
                            print()

                            self._ssm.set_parameter(cfg.ps_name,
                                                    decrypted_value,
                                                    cfg.ps_description,
                                                    cfg.ps_type,
                                                    key_id=cfg.ps_key_id)
                    else:
                        self._out.success(
                            f"Config: {item.name} is current. Skipping.")
                else:
                    # This item must have been a delete, which means this config didn't exist at that time.
                    self._out.print(
                        f"Checking if [[{item.name}]] exists. It was previously deleted."
                    )
                    self._prompt_delete(item.name)
        except ClientError as e:
            if "AccessDeniedException" == e.response["Error"]["Code"]:
                self._utils.error_exit(
                    f"\n\nYou do not have permissions to restore config at the path:"
                    f" [[{last_item_name}]]")
            else:
                self._utils.error_exit(
                    f"Caught error when attempting restore. {e}")

        for item in repl_destinations:
            cfg = self._repl.get_config_repl(item)
            self._print_cannot_restore_msg(cfg)

        print("\n\n")
        if not repl_destinations:
            self._out.success_h2(
                f"[[{restore_count}]] configurations restored successfully!")
        else:
            self._out.warn(
                f"\n\n[[{len(repl_destinations)}]] configurations were not restored because they are shared "
                f"from other destinations. To restore them, restore their sources."
            )
            self._out.success(
                f"{restore_count - len(repl_destinations)} configurations restored successfully."
            )

    def _print_cannot_restore_msg(self, repl_conf: ReplicationConfig):
        self._out.print(
            f"Parameter: [[{repl_conf.destination}]] is a shared parameter. ")
        self._out.print(f"Shared From: [[{repl_conf.source}]]")
        self._out.print(f"Shared by: [[{repl_conf.user}]]")
        self._out.warn(
            f"To restore this parameter you should restore the source: {repl_conf.source} instead!"
        )
        print()

    def _prompt_delete(self, name):
        param = self._ssm.get_parameter_encrypted(name)
        if param:
            selection = Input.y_n_input(
                f"PS Name: {name} did not exist at this restore time."
                f" Delete it? ",
                default_yes=False)

            if selection:
                self._delete.delete_param(name)

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        if self._point_in_time:
            self._restore_params_to_point_in_time()
        else:
            self._restore_param()
Beispiel #6
0
class Share(ConfigCommand):
    def __init__(self, ssm_init, repl_init: ReplicationDao,
                 config_completer_init, colors_enabled: bool,
                 config_context: ConfigContext):
        super().__init__(share, colors_enabled, config_context)

        self._ssm = ssm_init
        self._repl = repl_init
        self._config_completer = config_completer_init
        self._utils = Utils(colors_enabled)
        self._out = Output(colors_enabled)

    def _share_param(self):
        """
        Enables sharing of parameters from one namespace to the /app/service-name/replicated namespace.
        Args:
            run_env: Run Environment
        """

        source_name_msg = [(f'class:{self.c.bl}',
                            'Input the PS Name you wish to share: ')]

        dest_name_msg = [(f'class:{self.c.bl}',
                          'Input the destination of the shared value: ')]

        share_another = True
        while share_another:
            print()
            key = prompt(source_name_msg,
                         completer=self._config_completer,
                         style=FIGGY_STYLE)
            if re.match(f"{self.context.defaults.service_ns}/.*", key):
                self._out.error(
                    f"The SOURCE of replication may not be from within the "
                    f"[[{self.context.defaults.service_ns}/]] namespace.\n")
                continue

            dest = prompt(dest_name_msg,
                          completer=self._config_completer,
                          style=FIGGY_STYLE)
            key_value = None
            try:
                key_value = self._ssm.get_parameter(key)
            except ClientError as e:
                denied = "AccessDeniedException" == e.response['Error']['Code']
                if denied and "AWSKMS; Status Code: 400;" in e.response[
                        'Error']['Message']:
                    self._out.error(
                        f"You do not have access to decrypt the value of Name: [[{key}]]"
                    )
                elif denied:
                    self._out.error(
                        f"You do not have access to Name: [[{key}]]")
                else:
                    raise

                self._utils.validate(
                    key_value is not None,
                    "Either the Name you provided to share does not exist or you do not have the "
                    "proper permissions to share the provided Name.")

            namespace = self._utils.parse_namespace(dest)
            repl_config = ReplicationConfig(destination=dest,
                                            env_alias=self.run_env.env,
                                            namespace=namespace,
                                            source=key,
                                            type=ReplicationType.APP.value)
            self._repl.put_config_repl(repl_config)
            self._out.success(f"[[{key}]] successfully shared.")
            to_continue = input(f"Share another? (y/N): ")
            to_continue = to_continue if to_continue != '' else 'n'
            share_another = to_continue.lower() == "y"

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        self._share_param()
class BastionSessionProvider(SessionProvider):
    _MAX_ATTEMPTS = 5

    def __init__(self, defaults: CLIDefaults, context: FiggyContext):
        super().__init__(defaults, context)
        self.__id = uuid.uuid4()
        self._utils = Utils(defaults.colors_enabled)
        self.__bastion_session = boto3.session.Session(
            profile_name=self._defaults.provider_config.profile_name)
        self._ssm = None
        self._sts = None
        self._iam_client = None
        self._iam = None
        keychain_enabled = defaults.extras.get(DISABLE_KEYRING) is not True
        vault = FiggyVault(keychain_enabled=keychain_enabled,
                           secrets_mgr=self._secrets_mgr)
        self._sts_cache: CacheManager = CacheManager(
            file_override=STS_SESSION_CACHE_PATH, vault=vault)
        self._role_name_prefix = os.getenv(FIGGY_ROLE_PREFIX_OVERRIDE_ENV,
                                           FIGGY_ROLE_NAME_PREFIX)

    def __get_iam_user(self):
        self._defaults.user = self.__get_iam_resource().CurrentUser().user_name
        return self._defaults.user

    def __get_iam_resource(self):
        if not self._iam:
            self._iam = self.__bastion_session.resource('iam')

        return self._iam

    def __get_iam_client(self):
        if not self._iam_client:
            self._iam_client = self.__bastion_session.client('iam')

        return self._iam_client

    def __get_ssm(self):
        if not self._ssm:
            self._ssm = SsmDao(self.__bastion_session.client('ssm'))
        return self._ssm

    def __get_sts(self):
        if not self._sts:
            self._sts = self.__bastion_session.client('sts')
        return self._sts

    def get_mfa_serial(self) -> Optional[str]:
        response = self.__get_iam_client().list_mfa_devices(
            UserName=self._defaults.user)
        devices = response.get('MFADevices', [])
        log.info(f'Found MFA devices: {devices}.')
        return devices[0].get('SerialNumber') if devices else None

    def get_session(self,
                    env: GlobalEnvironment,
                    prompt: bool,
                    exit_on_fail=True,
                    mfa: Optional[str] = None) -> boto3.Session:
        forced = False
        log.info(
            f"Getting session for role: {env.role.role_arn} in env: {env.role.run_env.env}"
        )
        attempts = 0
        while True:
            try:
                if prompt and not forced:
                    forced = True
                    raise InvalidSessionError(
                        "Forcing new session due to prompt.")

                creds: FiggyAWSSession = self._sts_cache.get_val(
                    env.role.cache_key())

                if creds:
                    session = boto3.Session(
                        aws_access_key_id=creds.access_key,
                        aws_secret_access_key=creds.secret_key,
                        aws_session_token=creds.token,
                        region_name=env.region)

                    if creds.expires_soon(
                    ) or not self._is_valid_session(session):
                        self._utils.validate(
                            attempts < self._MAX_ATTEMPTS,
                            f"Failed to authenticate with AWS after {attempts} attempts. Exiting. "
                        )

                        attempts = attempts + 1
                        log.info(
                            "Invalid session detected in cache. Raising session error."
                        )
                        raise InvalidSessionError("Invalid Session Detected")

                    log.info("Valid bastion SSO session returned from cache.")
                    return session
                else:
                    raise InvalidSessionError(
                        "Forcing new session, cache is empty.")
            except (FileNotFoundError, NoCredentialsError,
                    InvalidSessionError) as e:
                try:
                    if self._defaults.mfa_enabled:
                        self._defaults.mfa_serial = self.get_mfa_serial()
                        color = Utils.default_colors(
                        ) if self._defaults.colors_enabled else None

                        if not mfa:
                            if self._context.command == commands.ui and not self._defaults.auto_mfa:
                                raise CannotRetrieveMFAException(
                                    "Cannot retrieve MFA, UI mode is activated."
                                )
                            else:
                                mfa = self._secrets_mgr.get_next_mfa(self._defaults.user) if self._defaults.auto_mfa else \
                                                                    Input.get_mfa(display_hint=True, color=color)

                        response = self.__get_sts().assume_role(
                            RoleArn=env.role.role_arn,
                            RoleSessionName=Utils.sanitize_session_name(
                                self._defaults.user),
                            DurationSeconds=self._defaults.session_duration,
                            SerialNumber=self._defaults.mfa_serial,
                            TokenCode=mfa)
                    else:
                        response = self.__get_sts().assume_role(
                            RoleArn=env.role.role_arn,
                            RoleSessionName=Utils.sanitize_session_name(
                                self._defaults.user),
                            DurationSeconds=self._defaults.session_duration)

                    session = FiggyAWSSession(
                        **response.get('Credentials', {}))
                    log.info(f"Got session response: {response}")
                    self._sts_cache.write(env.role.cache_key(), session)
                except (ClientError, ParamValidationError) as e:
                    if isinstance(
                            e, ParamValidationError
                    ) or "AccessDenied" == e.response['Error']['Code']:
                        if exit_on_fail:
                            self._utils.error_exit(
                                f"Error authenticating with AWS from Bastion Profile:"
                                f" {self._defaults.provider_config.profile_name}: {e}"
                            )
                    else:
                        if exit_on_fail:
                            log.error(
                                f"Failed to authenticate due to error: {e}")
                            self._utils.error_exit(
                                f"Error getting session for role: {env.role.role_arn} "
                                f"-- Are you sure you have permissions?")

                    raise e

    def get_assumable_roles(self):
        if self.is_role_session():
            user_roles = [self._defaults.role.role]
        else:
            ROLE_PATH = f'/figgy/users/{self.__get_iam_user()}/roles'
            user_roles = self.__get_ssm().get_parameter(ROLE_PATH)
            self._utils.stc_validate(
                user_roles is not None and user_roles != "[]",
                "Something is wrong with your user's configuration with Figgy. "
                "Unable to find any eligible roles for your user. Please contact your"
                " administrator.")

            user_roles = json.loads(user_roles)

        environments = self.__get_ssm().get_all_parameters(
            [PS_FIGGY_ACCOUNTS_PREFIX], option='OneLevel')
        names: List[str] = [env.get('Name') for env in environments]
        parameters = self.__get_ssm().get_parameter_values(names)
        assumable_roles: List[AssumableRole] = []

        for param in parameters:
            env_name = param.get('Name').split('/')[-1]
            account_id = param.get('Value')

            for role in user_roles:
                assumable_roles.append(
                    AssumableRole(
                        run_env=RunEnv(env=env_name, account_id=account_id),
                        role=Role(
                            role=role,
                            full_name=
                            f'{FIGGY_ROLE_NAME_PREFIX}{env_name}-{role}'),
                        account_id=account_id,
                        provider_name=Provider.AWS_BASTION.value,
                        profile=None))

        return assumable_roles

    def is_role_session(self):
        """
        For sandbox demos, where users aren't coming from user accounts, we want to skip looking up user -> role.
        :return: bool - Is this session originating from a role?
        """
        creds = self.__bastion_session.get_credentials(
        ).get_frozen_credentials()

        return hasattr(creds, 'token') and creds.token is not None

    def cleanup_session_cache(self):
        self._sts_cache.wipe_cache()
Beispiel #8
0
class Login(HelpCommand, ABC):
    """
    Log the user into every possible environment they have access to. Sessions are cached.
    This improves figgy performance throughout the day.
    """
    def __init__(self, help_context: HelpContext, figgy_setup: FiggySetup,
                 figgy_context: FiggyContext):
        super().__init__(login, Utils.not_windows(), help_context)
        self._setup = figgy_setup
        self._defaults: CLIDefaults = figgy_setup.get_defaults()
        self._figgy_context = figgy_context
        self._utils = Utils(self._defaults.colors_enabled)
        self._aws_cfg = AWSConfig(color=self.c)
        self._out = Output(self._defaults.colors_enabled)

        self.example = f"\n\n{self.c.fg_bl}{CLI_NAME} {login.name} \n" \
                       f"{self.c.rs}{self.c.fg_yl}  --or--{self.c.rs}\n" \
                       f"{self.c.fg_bl}{CLI_NAME} {login.name} {sandbox.name}{self.c.rs}"

    def login(self):
        self._utils.validate(
            self._defaults.provider.name in Provider.names(),
            f"You cannot login until you've configured Figgy. Please run `{CLI_NAME}` --configure"
        )
        provider = SessionProviderFactory(self._defaults,
                                          self._figgy_context).instance()
        assumable_roles: List[AssumableRole] = provider.get_assumable_roles()
        self._out.print(
            f"{self.c.fg_bl}Found {len(assumable_roles)} possible logins. Logging in...{self.c.rs}"
        )

        for role in assumable_roles:
            self._out.print(
                f"Login successful for {role.role} in environment: {role.run_env}"
            )
            provider.get_session_and_role(role, False)

        self._out.print(
            f"{self.c.fg_gr}Login successful. All sessions are cached.{self.c.rs}"
        )

    def login_sandbox(self):
        """
        If user provides --role flag, skip role & env selection for a smoother user experience.
        """
        EnvironmentValidator(self._defaults).validate_environment_variables()

        Utils.wipe_vaults() or Utils.wipe_defaults(
        ) or Utils.wipe_config_cache()

        self._out.print(
            f"{self.c.fg_bl}Logging you into the Figgy Sandbox environment.{self.c.rs}"
        )
        user = Input.input("Please input a user name: ", min_length=2)
        colors = Input.select_enable_colors()

        # Prompt user for role if --role not provided
        if commands.role not in self.context.options:
            role = Input.select("\n\nPlease select a role to impersonate: ",
                                valid_options=SANDBOX_ROLES)
        else:
            role = self.context.role.role
            self._utils.validate(
                role in SANDBOX_ROLES,
                f"Provided role: >>>`{role}`<<< is not a valid sandbox role."
                f" Please choose from {SANDBOX_ROLES}")

        params = {'role': role, 'user': user}
        result = requests.get(GET_SANDBOX_CREDS_URL, params=params)

        if result.status_code != 200:
            self._utils.error_exit(
                "Unable to get temporary credentials from the Figgy sandbox. If this problem "
                f"persists please notify us on our GITHUB: {FIGGY_GITHUB}")

        data = result.json()
        response = SandboxLoginResponse(**data)
        self._aws_cfg.write_credentials(
            access_key=response.AWS_ACCESS_KEY_ID,
            secret_key=response.AWS_SECRET_ACCESS_KEY,
            token=response.AWS_SESSION_TOKEN,
            region=FIGGY_SANDBOX_REGION,
            profile_name=FIGGY_SANDBOX_PROFILE)

        defaults = CLIDefaults.sandbox(user=user, role=role, colors=colors)
        self._setup.save_defaults(defaults)

        run_env = RunEnv(
            env='dev',
            account_id=SANDBOX_DEV_ACCOUNT_ID) if self.context.role else None

        config_mgr = ConfigManager.figgy()
        config_mgr.set(Config.Section.Bastion.PROFILE, FIGGY_SANDBOX_PROFILE)
        defaults = self._setup.configure_extras(defaults)
        defaults = self._setup.configure_roles(current_defaults=defaults,
                                               role=Role(role=role),
                                               run_env=run_env)
        defaults = self._setup.configure_figgy_defaults(defaults)
        self._setup.save_defaults(defaults)

        self._out.success(
            f"\nLogin successful. Your sandbox session will last for [[1 hour]]."
        )

        self._out.print(
            f"\nIf your session expires, you may rerun `{CLI_NAME} login sandbox` to get another sandbox session. "
            f"\nAll previous figgy sessions have been disabled, you'll need to run {CLI_NAME} "
            f"--configure to leave the sandbox.")

    @VersionTracker.notify_user
    @AnonymousUsageTracker.track_command_usage
    def execute(self):
        if self.context.command == login:
            self.login()
        elif self.context.command == sandbox:
            Utils.wipe_vaults() or Utils.wipe_defaults()
            self.login_sandbox()