Beispiel #1
0
async def retrieve_accounts_from_aws_organizations() -> CloudAccountModelArray:
    """
    Polls AWS Organizations for our Account ID to Account Name mapping
    :param: null
    :return: CloudAccountModelArray
    """

    cloud_accounts = []
    for organization in config.get("cache_accounts_from_aws_organizations", []):
        organizations_master_account_id = organization.get(
            "organizations_master_account_id"
        )
        role_to_assume = organization.get(
            "organizations_master_role_to_assume",
            config.get("policies.role_name"),
        )
        if not organizations_master_account_id:
            raise MissingConfigurationValue(
                "Your AWS Organizations Master Account ID is not specified in configuration. "
                "Unable to sync accounts from "
                "AWS Organizations"
            )

        if not role_to_assume:
            raise MissingConfigurationValue(
                "ConsoleMe doesn't know what role to assume to retrieve account information "
                "from AWS Organizations. please set the appropriate configuration value."
            )
        client = await sync_to_async(boto3_cached_conn)(
            "organizations",
            account_number=organizations_master_account_id,
            assume_role=role_to_assume,
            session_name="ConsoleMeOrganizationsSync",
        )
        paginator = await sync_to_async(client.get_paginator)("list_accounts")
        page_iterator = await sync_to_async(paginator.paginate)()
        accounts = []
        for page in page_iterator:
            accounts.extend(page["Accounts"])

        for account in accounts:
            status = account["Status"].lower()
            cloud_accounts.append(
                CloudAccountModel(
                    id=account["Id"],
                    name=account["Name"],
                    email=account["Email"],
                    status=status,
                    type="aws",
                    sync_enabled=True,  # TODO: Check for tag to disable sync?
                )
            )

    return CloudAccountModelArray(accounts=cloud_accounts)
Beispiel #2
0
async def populate_oidc_config():
    http_client = tornado.httpclient.AsyncHTTPClient()
    metadata_url = config.get("get_user_by_oidc_settings.metadata_url")

    if metadata_url:
        res = await http_client.fetch(
            metadata_url,
            method="GET",
            headers={
                "Content-Type": "application/x-www-form-urlencoded",
                "Accept": "application/json",
            },
        )
        oidc_config = json.loads(res.body)
    else:
        authorization_endpoint = config.get(
            "get_user_by_oidc_settings.authorization_endpoint")
        token_endpoint = config.get("get_user_by_oidc_settings.token_endpoint")
        jwks_uri = config.get("get_user_by_oidc_settings.jwks_uri")
        if not (authorization_endpoint or token_endpoint or jwks_uri):
            raise MissingConfigurationValue("Missing OIDC Configuration.")
        oidc_config = {
            "authorization_endpoint": authorization_endpoint,
            "token_endpoint": token_endpoint,
            "jwks_uri": jwks_uri,
        }
    client_id = config.get("oidc_secrets.client_id")
    client_secret = config.get("oidc_secrets.secret")
    if not (client_id or client_secret):
        raise MissingConfigurationValue("Missing OIDC Secrets")
    oidc_config["client_id"] = client_id
    oidc_config["client_secret"] = client_secret
    # Fetch jwks_uri for jwt validation
    res = await http_client.fetch(
        oidc_config["jwks_uri"],
        method="GET",
        headers={
            "Content-Type": "application/x-www-form-urlencoded",
            "Accept": "application/json",
        },
    )
    oidc_config["jwks_data"] = json.loads(res.body)
    oidc_config["jwt_keys"] = {}
    for k in oidc_config["jwks_data"]["keys"]:
        key_type = k["kty"]
        key_id = k["kid"]
        if key_type == "RSA":
            oidc_config["jwt_keys"][key_id] = RSAAlgorithm.from_jwk(
                json.dumps(k))
        elif key_type == "EC":
            oidc_config["jwt_keys"][key_id] = ECAlgorithm.from_jwk(
                json.dumps(k))
    return oidc_config
Beispiel #3
0
    async def get(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        challenge_j = red.hget(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
        )
        if not challenge_j:
            self.write({"status": "unknown"})
            return
        challenge = json.loads(challenge_j)

        # Delete the token if it has expired
        current_time = int(
            datetime.utcnow().replace(tzinfo=pytz.UTC).timestamp())
        if challenge.get("ttl", 0) < current_time:
            red.hdel(
                config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
                requested_challenge_token,
            )
            self.write({"status": "expired"})
            return

        ip = self.get_request_ip()

        if ip != challenge.get("ip"):
            self.write({"status": "unauthorized"})
            return

        # Generate a jwt if user authentication was successful
        if challenge.get("status") == "success":
            jwt_expiration = datetime.utcnow().replace(
                tzinfo=pytz.UTC) + timedelta(
                    minutes=config.get("jwt.expiration_minutes", 60))
            encoded_jwt = await generate_jwt_token(challenge.get("user"),
                                                   challenge.get("groups"),
                                                   exp=jwt_expiration)

            self.write({
                "status":
                challenge["status"],
                "cookie_name":
                config.get("auth_cookie_name", "consoleme_auth"),
                "expiration":
                int(jwt_expiration.timestamp()),
                "encoded_jwt":
                encoded_jwt,
                "user":
                challenge["user"],
            })
            # Delete the token so that it cannot be re-used
            red.hdel(
                config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
                requested_challenge_token,
            )
            return
        self.write({"status": challenge.get("status")})
        return
Beispiel #4
0
async def is_object_older_than_seconds(key: str,
                                       older_than_seconds: int,
                                       bucket: Optional[str] = None,
                                       s3_client=None) -> bool:
    """
    This function checks if an S3 object is older than the specified number of seconds. if the object doesn't
    exist, this function will return True.
    """
    if not bucket:
        bucket = config.get("consoleme_s3_bucket")
    if not bucket:
        raise MissingConfigurationValue(
            "`bucket` not defined, and we can't find the default bucket in "
            "the configuration key `consoleme_s3_bucket`.")
    now = datetime.utcnow().replace(tzinfo=pytz.utc)
    if not s3_client:
        s3_client = boto3.client("s3", **config.get("boto3.client_kwargs", {}))
    try:
        res = await sync_to_async(s3_client.head_object)(Bucket=bucket,
                                                         Key=key)
    except ClientError as e:
        # If file is not found, we'll tell the user it's older than the specified time
        if e.response.get("Error", {}).get("Code") == "404":
            return True
        raise
    datetime_value = res["LastModified"]
    if (now - datetime_value).total_seconds() > older_than_seconds:
        return True
    return False
async def _generate_inline_policy_statement_from_mapping(
    generator: ChangeGeneratorModel, ) -> Dict:
    """
    Generates an inline policy statement given a ChangeGeneratorModel from a action mapping stored in configuration.

    :param generator: ChangeGeneratorModel
    :return: policy_statement: A dictionary representing an inline policy statement.
    """
    generator_type = generator.generator_type
    if not isinstance(generator_type, str):
        generator_type = generator.generator_type.value
    permissions_map = config.get(
        f"self_service_iam.permissions_map.{generator_type}.action_map")
    if not permissions_map:
        raise MissingConfigurationValue(
            f"Unable to find applicable action map configuration for {generator_type}."
        )

    action_group_actions: List[str] = []
    resource_arns = [generator.resource_arn]
    effect = generator.effect

    for action in generator.action_groups:
        # TODO: Seems like a datamodel bug when we don't have a enum defined for an array type, but I need to access
        # this as a string sometimes
        if isinstance(action, str):
            action_group_actions.append(action)
        else:
            action_group_actions.append(action)
    actions = await _get_actions_from_groups(action_group_actions,
                                             permissions_map)
    condition: Optional[Dict] = await _generate_condition_with_substitutions(
        generator)
    return await _generate_policy_statement(actions, resource_arns, effect,
                                            condition)
Beispiel #6
0
async def _generate_inline_policy_statement_from_policy_sentry(
    generator: CrudChangeGeneratorModel, ) -> Dict:
    """
    Generates an inline policy statement given a ChangeGeneratorModel from a action mapping provided by policy
    sentry.

    :param generator: ChangeGeneratorModel
    :return: policy_statement: A dictionary representing an inline policy statement.
    """
    permissions_map = (self_service_iam_config.get("permissions_map", {}).get(
        "crud_lookup", {}).get("action_map"))
    if not permissions_map:
        raise MissingConfigurationValue(
            "Unable to find applicable action map configuration.")
    access_level_actions: List[str] = []
    for access in generator.action_groups:
        for pm in permissions_map:
            if pm["name"] == access:
                access_level_actions += pm.get("permissions")
    actions = await _get_policy_sentry_access_level_actions(
        generator.service_name, access_level_actions)
    if generator.extra_actions:
        actions.extend(generator.extra_actions)
    if isinstance(generator.resource_arn, str):
        generator.resource_arn = [generator.resource_arn]
    return await _generate_policy_statement(actions, generator.resource_arn,
                                            generator.effect,
                                            generator.condition)
Beispiel #7
0
    async def post(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        data = tornado.escape.json_decode(self.request.body)
        ip = self.request.headers.get("X-Forwarded-For", self.request.remote_ip).split(
            ","
        )
        if isinstance(ip, list):
            ip = ip[0]
        log_data = {
            "user": self.user,
            "function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "requested_challenge_token": requested_challenge_token,
            "message": "Incoming request",
            "ip": ip,
        }
        log.debug(log_data)

        all_challenges = red.hgetall(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP")
        )
        if not all_challenges:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL."
            )
            self.write({"message": message})
            return

        await delete_expired_challenges(all_challenges)

        valid_user_challenge = await retrieve_user_challenge(
            self, requested_challenge_token, log_data
        )
        if not valid_user_challenge:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL."
            )
            self.write({"message": message})
            return

        if data.get("nonce") != valid_user_challenge["nonce"]:
            message = "Unable to validate challenge URL. The Nonce you've submitted is invalid."
            log.error({**log_data, "message": message})
            self.write({"message": message})
            return

        valid_user_challenge["status"] = "success"
        valid_user_challenge["user"] = self.user
        valid_user_challenge["groups"] = self.groups
        red.hset(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
            json.dumps(valid_user_challenge),
        )
        message = "You've successfully authenticated to ConsoleMe and may now close this page."
        self.write({"message": message})
Beispiel #8
0
    async def get(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        log_data = {
            "user": self.user,
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "requested_challenge_token": requested_challenge_token,
            "message": "Incoming request",
            "ip": self.ip,
        }
        log.debug(log_data)

        all_challenges = red.hgetall(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"))
        if not all_challenges:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        await delete_expired_challenges(all_challenges)

        valid_user_challenge = await retrieve_user_challenge(
            self, requested_challenge_token, log_data)
        if not valid_user_challenge:
            return

        if valid_user_challenge.get("visited"):
            message = ("This unique challenge URL has already been viewed. "
                       "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        valid_user_challenge["visited"] = True
        valid_user_challenge["nonce"] = str(uuid.uuid4())
        red.hset(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
            json.dumps(valid_user_challenge),
        )

        request_ip = valid_user_challenge["ip"]
        request_user = valid_user_challenge["user"]
        message = (
            f"A user at **{request_ip}** has requested ConsoleMe credentials for **{request_user}**.\n\n"
            f"You must approve this request for credentials to be provided. "
            f"You will not be able to refresh or revisit this page after closing it.\n\n"
            f"If you did not create this request, please report it to your security team."
        )

        self.write({
            "message": message,
            "nonce": valid_user_challenge["nonce"],
            "show_approve_button": True,
        })
Beispiel #9
0
def query(query: str,
          use_aggregator: bool = True,
          account_id: Optional[str] = None) -> List:
    resources = []
    if use_aggregator:
        config_client = boto3.client("config", region_name=config.region)
        configuration_aggregator_name: str = config.get(
            "aws_config.configuration_aggregator.name").format(
                region=config.region)
        if not configuration_aggregator_name:
            raise MissingConfigurationValue(
                "Invalid configuration for aws_config")
        response = config_client.select_aggregate_resource_config(
            Expression=query,
            ConfigurationAggregatorName=configuration_aggregator_name,
            Limit=100,
        )
        for r in response.get("Results", []):
            resources.append(json.loads(r))
        while response.get("NextToken"):
            response = config_client.select_aggregate_resource_config(
                Expression=query,
                ConfigurationAggregatorName=configuration_aggregator_name,
                Limit=100,
                NextToken=response["NextToken"],
            )
            for r in response.get("Results", []):
                resources.append(json.loads(r))
        return resources
    else:  # Don't use Config aggregator and instead query all the regions on an account
        session = boto3.Session()
        available_regions = session.get_available_regions("config")
        excluded_regions = config.get(
            "api_protect.exclude_regions",
            ["af-south-1", "ap-east-1", "eu-south-1", "me-south-1"],
        )
        regions = [x for x in available_regions if x not in excluded_regions]
        for region in regions:
            config_client = boto3_cached_conn(
                "config",
                account_number=account_id,
                assume_role=config.get("policies.role_name"),
                region=region,
            )
            response = config_client.select_resource_config(Expression=query,
                                                            Limit=100)
            for r in response.get("Results", []):
                resources.append(json.loads(r))
            # Query Config for a specific account in all regions we care about
            while response.get("NextToken"):
                response = config_client.select_resource_config(
                    Expression=query,
                    Limit=100,
                    NextToken=response["NextToken"])
                for r in response.get("Results", []):
                    resources.append(json.loads(r))
        return resources
Beispiel #10
0
async def retrieve_accounts_from_swag() -> CloudAccountModelArray:
    function: str = f"{sys._getframe().f_code.co_name}"
    expected_owners: List = config.get(
        "retrieve_accounts_from_swag.expected_owners", [])

    swag_base_url = config.get("retrieve_accounts_from_swag.base_url")
    if not swag_base_url:
        raise MissingConfigurationValue(
            "Unable to find Swag URL in configuration")
    swag_url = swag_base_url + "api/1/accounts"

    try:
        http_client = AsyncHTTPClient(force_instance=True)
        resp = await http_client.fetch(
            swag_url,
            headers={
                "Content-Type": "application/json",
                "Accept": "application/json"
            },
        )

    except (ConnectionError, HTTPClientError) as e:
        log.error(
            {
                "message": "Unable to connect to SWAG",
                "error": str(e),
                "function": function,
            },
            exc_info=True,
        )
        stats.count(f"{function}.connectionerror")
        raise
    swag_accounts = json.loads(resp.body)
    cloud_accounts = []
    for account in swag_accounts:
        # Ignore third party accounts
        if expected_owners and account.get("owner") not in expected_owners:
            continue
        account_status = account["account_status"]
        sync_enabled = False
        if account_status == "ready":
            account_status = "active"
            sync_enabled = True
        cloud_accounts.append(
            CloudAccountModel(
                id=account["id"],
                name=account["name"],
                email=account["email"],
                status=account_status,
                sync_enabled=sync_enabled,
                sensitive=account["sensitive"],
                environment=account["environment"],
                aliases=account["aliases"],
                type="aws",
            ))
    return CloudAccountModelArray(accounts=cloud_accounts)
Beispiel #11
0
async def get_conglomo_url_for_resource(
    account_id, resource_id, technology, region="global"
):
    conglomo_url = config.get("get_aws_config_history_url_for_resource.conglomo_url")
    if not conglomo_url:
        raise MissingConfigurationValue(
            "Unable to find conglomo URL in configuration: `get_aws_config_history_url_for_resource.conglomo_url`"
        )
    encoded_resource_id = base64.urlsafe_b64encode(resource_id.encode("utf-8")).decode(
        "utf-8"
    )
    return f"{conglomo_url}/resource/{account_id}/{region}/{technology}/{encoded_resource_id}"
Beispiel #12
0
async def populate_oidc_config():
    http_client = tornado.httpclient.AsyncHTTPClient()
    metadata_url = config.get(
        "get_user_by_aws_alb_auth_settings.access_token_validation.metadata_url"
    )

    if metadata_url:
        res = await http_client.fetch(
            metadata_url,
            method="GET",
            headers={
                "Content-Type": "application/x-www-form-urlencoded",
                "Accept": "application/json",
            },
        )
        oidc_config = json.loads(res.body)
    else:
        jwks_uri = config.get(
            "get_user_by_aws_alb_auth_settings.access_token_validation.jwks_uri"
        )
        if not jwks_uri:
            raise MissingConfigurationValue("Missing OIDC Configuration.")
        oidc_config = {
            "jwks_uri": jwks_uri,
        }

    # Fetch jwks_uri for jwt validation
    res = await http_client.fetch(
        oidc_config["jwks_uri"],
        method="GET",
        headers={
            "Content-Type": "application/x-www-form-urlencoded",
            "Accept": "application/json",
        },
    )
    oidc_config["jwks_data"] = json.loads(res.body)
    oidc_config["jwt_keys"] = {}
    for k in oidc_config["jwks_data"]["keys"]:
        key_type = k["kty"]
        key_id = k["kid"]
        if key_type == "RSA":
            oidc_config["jwt_keys"][key_id] = RSAAlgorithm.from_jwk(
                json.dumps(k))
        elif key_type == "EC":
            oidc_config["jwt_keys"][key_id] = ECAlgorithm.from_jwk(
                json.dumps(k))
    oidc_config["aud"] = config.get(
        "get_user_by_aws_alb_auth_settings.access_token_validation.client_id")
    return oidc_config
Beispiel #13
0
    async def get(self, user):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        ip = self.request.headers.get("X-Forwarded-For",
                                      self.request.remote_ip).split(",")
        if isinstance(ip, list):
            ip = ip[0]

        token = str(uuid.uuid4())
        entry = {
            "ttl":
            int((datetime.utcnow().replace(tzinfo=pytz.UTC) +
                 timedelta(minutes=2)).timestamp()),
            "ip":
            ip,
            "status":
            "pending",
            "user":
            user,
        }
        red.hset(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            token,
            json.dumps(entry),
        )

        challenge_url = "{url}/challenge_validator/{token}".format(
            url=config.get("url"), token=token)
        polling_url = "{url}/noauth/v1/challenge_poller/{token}".format(
            url=config.get("url"), token=token)
        self.write({
            "challenge_url": challenge_url,
            "polling_url": polling_url
        })

        log_data = {
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "challenge_url": challenge_url,
            "polling_url": polling_url,
            "message": "Incoming request",
            "ip": ip,
            "user": user,
        }
        log.debug(log_data)
Beispiel #14
0
 async def validate_and_return_api_caller(self, headers: dict):
     cli_auth_required_headers = config.get("cli_auth.required_headers")
     if not cli_auth_required_headers:
         raise MissingConfigurationValue(
             "You must specified the header key and expected value in order to validate a certificate for mutual "
             "TLS authentication. Refer to the `cli_auth.required_headers` configuration"
         )
     for header in cli_auth_required_headers:
         for k, v in header.items():
             if headers.get(k) != v:
                 raise Exception(
                     f"Header {k} is supposed to equal {v}, but it equals {headers.get(k)}."
                 )
     cert = await self.extract_user_from_certificate(headers)
     user = cert.get("name")
     if not user or user not in config.get("api_auth.valid_entities", []):
         raise Exception("Not authorized to call this API with that certificate.")
     return user
Beispiel #15
0
async def _generate_s3_inline_policy_statement_from_mapping(
    generator: ChangeGeneratorModel, ) -> Dict:
    """
    Generates an S3 inline policy statement from a ChangeGeneratorModel. S3 is an edge case, thus it gets a
    unique function for this purpose. We need to consider the resource ARN and prefix.

    :param generator: ChangeGeneratorModel
    :return: policy_statement: A dictionary representing an inline policy statement.
    """
    permissions_map = (self_service_iam_config.get("permissions_map", {}).get(
        "s3", {}).get("action_map"))
    if not permissions_map:
        raise MissingConfigurationValue(
            "Unable to find applicable action map configuration.")
    action_group_actions: List[str] = []
    resource_arns = []
    effect = generator.effect
    condition = generator.condition

    if isinstance(generator.resource_arn, str):
        generator.resource_arn = [generator.resource_arn]
    # Handle the bucket ARNs
    for arn in generator.resource_arn:
        if not arn.startswith("arn:aws:s3:::"):
            arn = f"arn:aws:s3:::{arn}"
        resource_arns.append(arn)

        # Make sure prefix starts with "/"
        if not generator.bucket_prefix.startswith("/"):
            generator.bucket_prefix = f"/{generator.bucket_prefix}"

        resource_arns.append(f"{arn}{generator.bucket_prefix}")

    for action in generator.action_groups:
        action_group_actions.append(action)
    actions = await _get_actions_from_groups(action_group_actions,
                                             permissions_map)
    if generator.extra_actions:
        actions.extend(generator.extra_actions)
    return await _generate_policy_statement(actions, resource_arns, effect,
                                            condition)
Beispiel #16
0
 async def validate_certificate(self, headers: dict):
     cli_auth_required_headers = config.get("cli_auth.required_headers")
     if not cli_auth_required_headers:
         raise MissingConfigurationValue(
             "You must specified the header key and expected value in order to validate a certificate for mutual "
             "TLS authentication. Refer to the `cli_auth.required_headers` configuration"
         )
     for header in cli_auth_required_headers:
         for k, v in header.items():
             if headers.get(k) != v:
                 stats.count("auth.validate_certificate.error")
                 error = (
                     "Header {} is supposed to equal {}, but it equals {}.".
                     format(k, v, headers.get(k)))
                 log_data = {
                     "function": "auth.validate_certificate",
                     "message": error,
                 }
                 log.error(log_data)
                 raise InvalidCertificateException(error)
     return True
Beispiel #17
0
    async def get(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        log_data = {
            "user": self.user,
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "requested_challenge_token": requested_challenge_token,
            "message": "Incoming request",
            "ip": self.ip,
        }
        log.debug(log_data)

        all_challenges = red.hgetall(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"))
        if not all_challenges:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        await delete_expired_challenges(all_challenges)

        valid_user_challenge = await retrieve_user_challenge(
            self, requested_challenge_token, log_data)
        if not valid_user_challenge:
            return

        if valid_user_challenge.get("visited"):
            message = ("This unique challenge URL has already been viewed. "
                       "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        request_ip = self.get_request_ip()

        # By default, the challenge URL requester IP must match the URL the challenge was created with. In some cases
        # (i.e. IPv4 vs IPv6), the challenge may have been created with an IPv4 address, and the authenticated browser
        # verification request may originate from an IPv6 one, or visa versa, in which case this configuration may
        # need to be explicitly set to False.
        if config.get(
                "challenge_url.request_ip_must_match_challenge_creation_ip",
                True):
            if request_ip != valid_user_challenge.get("ip"):
                log.error({
                    **log_data,
                    "request_ip": request_ip,
                    "challenge_ip": valid_user_challenge.get("ip"),
                    "message": "Request IP doesn't match challenge IP",
                })
                self.write({
                    "message":
                    "Your originating IP doesn't match the IP the challenge was created with."
                })
                return

        valid_user_challenge["visited"] = True
        valid_user_challenge["nonce"] = str(uuid.uuid4())
        red.hset(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
            json.dumps(valid_user_challenge),
        )

        request_ip = valid_user_challenge["ip"]
        request_user = valid_user_challenge["user"]
        message = (
            f"A user at **{request_ip}** has requested ConsoleMe credentials for **{request_user}**.\n\n"
            f"You must approve this request for credentials to be provided. "
            f"You will not be able to refresh or revisit this page after closing it.\n\n"
            f"If you did not create this request, please report it to your security team."
        )

        self.write({
            "message": message,
            "nonce": valid_user_challenge["nonce"],
            "show_approve_button": True,
        })
Beispiel #18
0
async def get_service(service_name: str, service_path: str,
                      group: str) -> Resource:
    """Get service connection."""
    function = f"{__name__}.{sys._getframe().f_code.co_name}"
    stats.count(function)
    log_data = {
        "function":
        function,
        "service_name":
        service_name,
        "service_path":
        service_path,
        "group":
        group,
        "message":
        f"Building service connection for {service_name} / {service_path}",
    }
    log.debug(log_data)
    if config.get("google.service_key_file"):
        admin_credentials = service_account.Credentials.from_service_account_file(
            config.get("google.service_key_file"),
            scopes=config.get(
                "google.admin_scopes",
                ["https://www.googleapis.com/auth/admin.directory.group"],
            ),
        )
    elif config.get("google.service_key_dict"):
        admin_credentials = service_account.Credentials.from_service_account_info(
            config.get("google.service_key_dict"),
            scopes=config.get(
                "google.admin_scopes",
                ["https://www.googleapis.com/auth/admin.directory.group"],
            ),
        )
    else:
        raise MissingConfigurationValue(
            "Missing configuration for Google. You must configure either `google.service_key_file` "
            "or `google.service_key_dict`.")

    # Change credential subject based on group domain
    credential_subjects = config.get("google.credential_subject")
    credential_subject = None
    for k, v in credential_subjects.items():
        if k == group.split("@")[1]:
            credential_subject = v
            break

    if not credential_subject:
        raise NoCredentialSubjectException(
            "Error: Unable to find Google credential subject for domain {}. "
            "{}".format(
                group.split("@")[1], config.get("ses.support_reference", "")))

    admin_delegated_credentials = admin_credentials.with_subject(
        credential_subject)
    service = await sync_to_async(googleapiclient.discovery.build
                                  )(service_name,
                                    service_path,
                                    credentials=admin_delegated_credentials)

    return service
Beispiel #19
0
def query(
    query: str, use_aggregator: bool = True, account_id: Optional[str] = None
) -> List:
    resources = []
    if use_aggregator:
        config_client = boto3.client("config", region_name=config.region)
        configuration_aggregator_name: str = config.get(
            "aws_config.configuration_aggregator.name"
        ).format(region=config.region)
        if not configuration_aggregator_name:
            raise MissingConfigurationValue("Invalid configuration for aws_config")
        response = config_client.select_aggregate_resource_config(
            Expression=query,
            ConfigurationAggregatorName=configuration_aggregator_name,
            Limit=100,
        )
        for r in response.get("Results", []):
            resources.append(json.loads(r))
        while response.get("NextToken"):
            response = config_client.select_aggregate_resource_config(
                Expression=query,
                ConfigurationAggregatorName=configuration_aggregator_name,
                Limit=100,
                NextToken=response["NextToken"],
            )
            for r in response.get("Results", []):
                resources.append(json.loads(r))
        return resources
    else:  # Don't use Config aggregator and instead query all the regions on an account
        session = boto3.Session()
        available_regions = session.get_available_regions("config")
        excluded_regions = config.get(
            "api_protect.exclude_regions",
            ["af-south-1", "ap-east-1", "ap-northeast-3", "eu-south-1", "me-south-1"],
        )
        regions = [x for x in available_regions if x not in excluded_regions]
        for region in regions:
            config_client = boto3_cached_conn(
                "config",
                account_number=account_id,
                assume_role=config.get("policies.role_name"),
                region=region,
                sts_client_kwargs=dict(
                    region_name=config.region,
                    endpoint_url=f"https://sts.{config.region}.amazonaws.com",
                ),
            )
            try:
                response = config_client.select_resource_config(
                    Expression=query, Limit=100
                )
                for r in response.get("Results", []):
                    resources.append(json.loads(r))
                # Query Config for a specific account in all regions we care about
                while response.get("NextToken"):
                    response = config_client.select_resource_config(
                        Expression=query, Limit=100, NextToken=response["NextToken"]
                    )
                    for r in response.get("Results", []):
                        resources.append(json.loads(r))
            except ClientError as e:
                log.error(
                    {
                        "function": f"{__name__}.{sys._getframe().f_code.co_name}",
                        "message": "Failed to query AWS Config",
                        "query": query,
                        "use_aggregator": use_aggregator,
                        "account_id": account_id,
                        "region": region,
                        "error": str(e),
                    },
                    exc_info=True,
                )
                sentry_sdk.capture_exception()
        return resources
Beispiel #20
0
async def clone_iam_role(clone_model: CloneRoleRequestModel, username):
    """
    Clones IAM role within same account or across account, always creating and attaching instance profile if one exists
    on the source role.
    ;param username: username of user requesting action
    ;:param clone_model: CloneRoleRequestModel, which has the following attributes:
        account_id: source role's account ID
        role_name: source role's name
        dest_account_id: destination role's account ID (may be same as account_id)
        dest_role_name: destination role's name
        clone_options: dict to indicate what to copy when cloning:
            assume_role_policy: bool
                default: False - uses default ConsoleMe AssumeRolePolicy
            tags: bool
                default: False - defaults to no tags
            copy_description: bool
                default: False - defaults to copying provided description or default description
            description: string
                default: "Role cloned via ConsoleMe by `username` from `arn:aws:iam::<account_id>:role/<role_name>`
                if copy_description is True, then description is ignored
            inline_policies: bool
                default: False - defaults to no inline policies
            managed_policies: bool
                default: False - defaults to no managed policies
    :return: results: - indicating the results of each action
    """

    log_data = {
        "function": f"{__name__}.{sys._getframe().f_code.co_name}",
        "message": "Attempting to clone role",
        "account_id": clone_model.account_id,
        "role_name": clone_model.role_name,
        "dest_account_id": clone_model.dest_account_id,
        "dest_role_name": clone_model.dest_role_name,
        "user": username,
    }
    log.info(log_data)
    role = await fetch_role_details(clone_model.account_id, clone_model.role_name)

    default_trust_policy = config.get("user_role_creator.default_trust_policy")
    trust_policy = (
        role.assume_role_policy_document
        if clone_model.options.assume_role_policy
        else default_trust_policy
    )
    if trust_policy is None:
        raise MissingConfigurationValue(
            "Missing Default Assume Role Policy Configuration"
        )

    if (
        clone_model.options.copy_description
        and role.description is not None
        and role.description != ""
    ):
        description = role.description
    elif (
        clone_model.options.description is not None
        and clone_model.options.description != ""
    ):
        description = clone_model.options.description
    else:
        description = f"Role cloned via ConsoleMe by {username} from {role.arn}"

    tags = role.tags if clone_model.options.tags and role.tags else []

    iam_client = await sync_to_async(boto3_cached_conn)(
        "iam",
        service_type="client",
        account_number=clone_model.dest_account_id,
        region=config.region,
        assume_role=config.get("policies.role_name"),
        session_name="clone_role_" + username,
    )
    results = {"errors": 0, "role_created": "false", "action_results": []}
    try:
        await sync_to_async(iam_client.create_role)(
            RoleName=clone_model.dest_role_name,
            AssumeRolePolicyDocument=json.dumps(trust_policy),
            Description=description,
            Tags=tags,
        )
        results["action_results"].append(
            {
                "status": "success",
                "message": f"Role arn:aws:iam::{clone_model.dest_account_id}:role/{clone_model.dest_role_name} "
                f"successfully created",
            }
        )
        results["role_created"] = "true"
    except Exception as e:
        log_data["message"] = "Exception occurred creating cloned role"
        log_data["error"] = str(e)
        log.error(log_data, exc_info=True)
        results["action_results"].append(
            {
                "status": "error",
                "message": f"Error creating role {clone_model.dest_role_name} in account {clone_model.dest_account_id}:"
                + str(e),
            }
        )
        results["errors"] += 1
        sentry_sdk.capture_exception()
        # Since we were unable to create the role, no point continuing, just return
        return results

    if clone_model.options.tags:
        results["action_results"].append(
            {"status": "success", "message": "Successfully copied tags"}
        )
    if clone_model.options.assume_role_policy:
        results["action_results"].append(
            {
                "status": "success",
                "message": "Successfully copied Assume Role Policy Document",
            }
        )
    else:
        results["action_results"].append(
            {
                "status": "success",
                "message": "Successfully added default Assume Role Policy Document",
            }
        )
    if (
        clone_model.options.copy_description
        and role.description is not None
        and role.description != ""
    ):
        results["action_results"].append(
            {"status": "success", "message": "Successfully copied description"}
        )
    elif clone_model.options.copy_description:
        results["action_results"].append(
            {
                "status": "error",
                "message": "Failed to copy description, so added default description: "
                + description,
            }
        )
    else:
        results["action_results"].append(
            {
                "status": "success",
                "message": "Successfully added description: " + description,
            }
        )
    # Create instance profile and attach if it exists in source role
    if len(list(await sync_to_async(role.instance_profiles.all)())) > 0:
        try:
            await sync_to_async(iam_client.create_instance_profile)(
                InstanceProfileName=clone_model.dest_role_name
            )
            await sync_to_async(iam_client.add_role_to_instance_profile)(
                InstanceProfileName=clone_model.dest_role_name,
                RoleName=clone_model.dest_role_name,
            )
            results["action_results"].append(
                {
                    "status": "success",
                    "message": f"Successfully added instance profile {clone_model.dest_role_name} to role "
                    f"{clone_model.dest_role_name}",
                }
            )
        except Exception as e:
            log_data[
                "message"
            ] = "Exception occurred creating/attaching instance profile"
            log_data["error"] = str(e)
            log.error(log_data, exc_info=True)
            sentry_sdk.capture_exception()
            results["action_results"].append(
                {
                    "status": "error",
                    "message": f"Error creating/attaching instance profile {clone_model.dest_role_name} to role: "
                    + str(e),
                }
            )
            results["errors"] += 1

    # other optional attributes to copy over after role has been successfully created

    cloned_role = await fetch_role_details(
        clone_model.dest_account_id, clone_model.dest_role_name
    )

    # Copy inline policies
    if clone_model.options.inline_policies:
        for src_policy in await sync_to_async(role.policies.all)():
            await sync_to_async(src_policy.load)()
            try:
                dest_policy = await sync_to_async(cloned_role.Policy)(src_policy.name)
                await sync_to_async(dest_policy.put)(
                    PolicyDocument=json.dumps(src_policy.policy_document)
                )
                results["action_results"].append(
                    {
                        "status": "success",
                        "message": f"Successfully copied inline policy {src_policy.name}",
                    }
                )
            except Exception as e:
                log_data["message"] = "Exception occurred copying inline policy"
                log_data["error"] = str(e)
                log.error(log_data, exc_info=True)
                sentry_sdk.capture_exception()
                results["action_results"].append(
                    {
                        "status": "error",
                        "message": f"Error copying inline policy {src_policy.name}: "
                        + str(e),
                    }
                )
                results["errors"] += 1

    # Copy managed policies
    if clone_model.options.managed_policies:
        for src_policy in await sync_to_async(role.attached_policies.all)():
            await sync_to_async(src_policy.load)()
            dest_policy_arn = src_policy.arn.replace(
                clone_model.account_id, clone_model.dest_account_id
            )
            try:
                await sync_to_async(cloned_role.attach_policy)(
                    PolicyArn=dest_policy_arn
                )
                results["action_results"].append(
                    {
                        "status": "success",
                        "message": f"Successfully attached managed policy {src_policy.arn} as {dest_policy_arn}",
                    }
                )
            except Exception as e:
                log_data["message"] = "Exception occurred copying managed policy"
                log_data["error"] = str(e)
                log.error(log_data, exc_info=True)
                sentry_sdk.capture_exception()
                results["action_results"].append(
                    {
                        "status": "error",
                        "message": f"Error attaching managed policy {dest_policy_arn}: "
                        + str(e),
                    }
                )
                results["errors"] += 1

    stats.count(
        f"{log_data['function']}.success", tags={"role_name": clone_model.role_name}
    )
    log_data["message"] = "Successfully cloned role"
    log.info(log_data)
    return results
Beispiel #21
0
async def detect_cloudtrail_denies_and_update_cache():
    log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
    dynamo = UserDynamoHandler()
    queue_arn = config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn",
        "").format(region=config.region)
    if not queue_arn:
        raise MissingConfigurationValue(
            "Unable to find required configuration value: "
            "`event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn`"
        )
    queue_name = queue_arn.split(":")[-1]
    queue_account_number = queue_arn.split(":")[4]
    queue_region = queue_arn.split(":")[3]
    # Optionally assume a role before receiving messages from the queue
    queue_assume_role = config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.assume_role")

    sqs_client = await sync_to_async(boto3_cached_conn)(
        "sqs",
        service_type="client",
        region=queue_region,
        retry_max_attempts=2,
        account_number=queue_account_number,
        assume_role=queue_assume_role,
    )

    queue_url_res = await sync_to_async(sqs_client.get_queue_url
                                        )(QueueName=queue_name)
    queue_url = queue_url_res.get("QueueUrl")
    if not queue_url:
        raise DataNotRetrievable(
            f"Unable to retrieve Queue URL for {queue_arn}")
    ct_events = []
    messages_awaitable = await sync_to_async(sqs_client.receive_message
                                             )(QueueUrl=queue_url,
                                               MaxNumberOfMessages=10)
    messages = messages_awaitable.get("Messages", [])
    while messages:
        processed_messages = []
        for message in messages:
            try:
                message_body = json.loads(message["Body"])
                decoded_message = json.loads(message_body["Message"])["detail"]
                event_name = decoded_message.get("eventName")
                event_source = decoded_message.get("eventSource")
                for event_source_substitution in config.get(
                        "event_bridge.detect_cloudtrail_denies_and_update_cache.event_bridge_substitutions",
                    [".amazonaws.com"],
                ):
                    event_source = event_source.replace(
                        event_source_substitution, "")
                event_time = decoded_message.get("eventTime")
                utc_time = datetime.strptime(event_time, "%Y-%m-%dT%H:%M:%SZ")
                epoch_event_time = int(
                    (utc_time - datetime(1970, 1, 1)).total_seconds() * 1000)
                try:
                    session_name = decoded_message["userIdentity"][
                        "arn"].split("/")[-1]
                except (
                        IndexError,
                        KeyError,
                ):  # If IAM user, there won't be a session name
                    session_name = ""
                try:
                    role_arn = decoded_message["userIdentity"][
                        "sessionContext"]["sessionIssuer"]["arn"]
                except KeyError:  # Skip events without a parsable ARN
                    continue

                ct_event = dict(
                    error_code=decoded_message.get("errorCode"),
                    error_message=decoded_message.get("errorMessage"),
                    arn=role_arn,
                    session_name=session_name,
                    request_id=decoded_message["requestID"],
                    event_call=f"{event_source}:{event_name}",
                    epoch_event_time=epoch_event_time,
                    ttl=(epoch_event_time + 86400000) / 1000,
                )
                ct_event["resource"] = await get_resource_from_cloudtrail_deny(
                    ct_event)
                generated_policy = await generate_policy_from_cloudtrail_deny(
                    ct_event)
                if generated_policy:
                    ct_event["generated_policy"] = generated_policy
                ct_events.append(ct_event)
            except Exception as e:
                log.error({**log_data, "error": str(e)}, exc_info=True)
                sentry_sdk.capture_exception()
            processed_messages.append({
                "Id":
                message["MessageId"],
                "ReceiptHandle":
                message["ReceiptHandle"],
            })
        await sync_to_async(sqs_client.delete_message_batch
                            )(QueueUrl=queue_url, Entries=processed_messages)
        await sync_to_async(dynamo.batch_write_cloudtrail_events)(ct_events)
        messages_awaitable = await sync_to_async(sqs_client.receive_message
                                                 )(QueueUrl=queue_url,
                                                   MaxNumberOfMessages=10)
        messages = messages_awaitable.get("Messages", [])
    log.debug({
        **log_data,
        "num_events": len(ct_events),
        "message": "Successfully cached Cloudtrail Access Denies",
    })

    return ct_events
Beispiel #22
0
async def detect_cloudtrail_denies_and_update_cache(
    celery_app,
    event_ttl=config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.event_ttl",
        86400),
    max_num_messages_to_process=config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.max_num_messages_to_process",
        100,
    ),
) -> Dict[str, Any]:
    log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
    dynamo = UserDynamoHandler()
    queue_arn = config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn",
        "").format(region=config.region)
    if not queue_arn:
        raise MissingConfigurationValue(
            "Unable to find required configuration value: "
            "`event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn`"
        )
    queue_name = queue_arn.split(":")[-1]
    queue_account_number = queue_arn.split(":")[4]
    queue_region = queue_arn.split(":")[3]
    # Optionally assume a role before receiving messages from the queue
    queue_assume_role = config.get(
        "event_bridge.detect_cloudtrail_denies_and_update_cache.assume_role")

    # Modify existing cloudtrail deny samples
    all_cloudtrail_denies_l = await dynamo.parallel_scan_table_async(
        dynamo.cloudtrail_table)
    all_cloudtrail_denies = {}
    for cloudtrail_deny in all_cloudtrail_denies_l:
        all_cloudtrail_denies[cloudtrail_deny["request_id"]] = cloudtrail_deny

    sqs_client = await sync_to_async(boto3_cached_conn)(
        "sqs",
        service_type="client",
        region=queue_region,
        retry_max_attempts=2,
        account_number=queue_account_number,
        assume_role=queue_assume_role,
        client_kwargs=config.get("boto3.client_kwargs", {}),
    )

    queue_url_res = await sync_to_async(sqs_client.get_queue_url
                                        )(QueueName=queue_name)
    queue_url = queue_url_res.get("QueueUrl")
    if not queue_url:
        raise DataNotRetrievable(
            f"Unable to retrieve Queue URL for {queue_arn}")
    messages_awaitable = await sync_to_async(sqs_client.receive_message
                                             )(QueueUrl=queue_url,
                                               MaxNumberOfMessages=10)
    new_events = 0
    messages = messages_awaitable.get("Messages", [])
    num_events = 0
    reached_limit_on_num_messages_to_process = False

    while messages:
        if num_events >= max_num_messages_to_process:
            reached_limit_on_num_messages_to_process = True
            break
        processed_messages = []
        for message in messages:
            try:
                message_body = json.loads(message["Body"])
                try:
                    if "Message" in message_body:
                        decoded_message = json.loads(
                            message_body["Message"])["detail"]
                    else:
                        decoded_message = message_body["detail"]
                except Exception as e:
                    log.error({
                        **log_data,
                        "message": "Unable to process Cloudtrail message",
                        "message_body": message_body,
                        "error": str(e),
                    })
                    sentry_sdk.capture_exception()
                    continue
                event_name = decoded_message.get("eventName")
                event_source = decoded_message.get("eventSource")
                for event_source_substitution in config.get(
                        "event_bridge.detect_cloudtrail_denies_and_update_cache.event_bridge_substitutions",
                    [".amazonaws.com"],
                ):
                    event_source = event_source.replace(
                        event_source_substitution, "")
                event_time = decoded_message.get("eventTime")
                utc_time = datetime.strptime(event_time, "%Y-%m-%dT%H:%M:%SZ")
                epoch_event_time = int(
                    (utc_time - datetime(1970, 1, 1)).total_seconds())
                # Skip entries older than a day
                if int(time.time()) - 86400 > epoch_event_time:
                    continue
                try:
                    session_name = decoded_message["userIdentity"][
                        "arn"].split("/")[-1]
                except (
                        IndexError,
                        KeyError,
                ):  # If IAM user, there won't be a session name
                    session_name = ""
                try:
                    principal_arn = decoded_message["userIdentity"][
                        "sessionContext"]["sessionIssuer"]["arn"]
                except KeyError:  # Skip events without a parsable ARN
                    continue

                event_call = f"{event_source}:{event_name}"

                ct_event = dict(
                    error_code=decoded_message.get("errorCode"),
                    error_message=decoded_message.get("errorMessage"),
                    arn=principal_arn,
                    # principal_owner=owner,
                    session_name=session_name,
                    source_ip=decoded_message["sourceIPAddress"],
                    event_call=event_call,
                    epoch_event_time=epoch_event_time,
                    ttl=epoch_event_time + event_ttl,
                    count=1,
                )
                resource = await get_resource_from_cloudtrail_deny(
                    ct_event, decoded_message)
                ct_event["resource"] = resource
                request_id = f"{principal_arn}-{session_name}-{event_call}-{resource}"
                ct_event["request_id"] = request_id
                generated_policy = await generate_policy_from_cloudtrail_deny(
                    ct_event)
                if generated_policy:
                    ct_event["generated_policy"] = generated_policy

                if all_cloudtrail_denies.get(request_id):
                    existing_count = all_cloudtrail_denies[request_id].get(
                        "count", 1)
                    ct_event["count"] += existing_count
                    all_cloudtrail_denies[request_id] = ct_event
                else:
                    all_cloudtrail_denies[request_id] = ct_event
                    new_events += 1
                num_events += 1
            except Exception as e:
                log.error({**log_data, "error": str(e)}, exc_info=True)
                sentry_sdk.capture_exception()
            processed_messages.append({
                "Id":
                message["MessageId"],
                "ReceiptHandle":
                message["ReceiptHandle"],
            })
        if processed_messages:
            await sync_to_async(sqs_client.delete_message_batch
                                )(QueueUrl=queue_url,
                                  Entries=processed_messages)

        await sync_to_async(dynamo.batch_write_cloudtrail_events
                            )(all_cloudtrail_denies.values())
        messages_awaitable = await sync_to_async(sqs_client.receive_message
                                                 )(QueueUrl=queue_url,
                                                   MaxNumberOfMessages=10)
        messages = messages_awaitable.get("Messages", [])
    if reached_limit_on_num_messages_to_process:
        # We hit our limit. Let's spawn another task immediately to process remaining messages
        celery_app.send_task(
            "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies", )
    log_data["message"] = "Successfully cached Cloudtrail Access Denies"
    log_data["num_events"] = num_events
    log_data["new_events"] = new_events
    log.debug(log_data)

    return log_data
Beispiel #23
0
async def create_iam_role(create_model: RoleCreationRequestModel, username):
    """
    Creates IAM role.
    :param create_model: RoleCreationRequestModel, which has the following attributes:
        account_id: destination account's ID
        role_name: destination role name
        description: optional string - description of the role
                     default: Role created by {username} through ConsoleMe
        instance_profile: optional boolean - whether to create an instance profile and attach it to the role or not
                     default: True
    :param username: username of user requesting action
    :return: results: - indicating the results of each action
    """
    log_data = {
        "function": f"{__name__}.{sys._getframe().f_code.co_name}",
        "message": "Attempting to create role",
        "account_id": create_model.account_id,
        "role_name": create_model.role_name,
        "user": username,
    }
    log.info(log_data)

    default_trust_policy = config.get("user_role_creator.default_trust_policy")
    if default_trust_policy is None:
        raise MissingConfigurationValue(
            "Missing Default Assume Role Policy Configuration"
        )
    if create_model.description:
        description = create_model.description
    else:
        description = f"Role created by {username} through ConsoleMe"

    iam_client = await sync_to_async(boto3_cached_conn)(
        "iam",
        service_type="client",
        account_number=create_model.account_id,
        region=config.region,
        assume_role=config.get("policies.role_name"),
        session_name="create_role_" + username,
    )
    results = {"errors": 0, "role_created": "false", "action_results": []}
    try:
        await sync_to_async(iam_client.create_role)(
            RoleName=create_model.role_name,
            AssumeRolePolicyDocument=json.dumps(default_trust_policy),
            Description=description,
            Tags=[],
        )
        results["action_results"].append(
            {
                "status": "success",
                "message": f"Role arn:aws:iam::{create_model.account_id}:role/{create_model.role_name} "
                f"successfully created",
            }
        )
        results["role_created"] = "true"
    except Exception as e:
        log_data["message"] = "Exception occurred creating role"
        log_data["error"] = str(e)
        log.error(log_data, exc_info=True)
        results["action_results"].append(
            {
                "status": "error",
                "message": f"Error creating role {create_model.role_name} in account {create_model.account_id}:"
                + str(e),
            }
        )
        results["errors"] += 1
        sentry_sdk.capture_exception()
        # Since we were unable to create the role, no point continuing, just return
        return results

    # If here, role has been successfully created, add status updates for each action
    results["action_results"].append(
        {
            "status": "success",
            "message": "Successfully added default Assume Role Policy Document",
        }
    )
    results["action_results"].append(
        {
            "status": "success",
            "message": "Successfully added description: " + description,
        }
    )

    # Create instance profile and attach if specified
    if create_model.instance_profile:
        try:
            await sync_to_async(iam_client.create_instance_profile)(
                InstanceProfileName=create_model.role_name
            )
            await sync_to_async(iam_client.add_role_to_instance_profile)(
                InstanceProfileName=create_model.role_name,
                RoleName=create_model.role_name,
            )
            results["action_results"].append(
                {
                    "status": "success",
                    "message": f"Successfully added instance profile {create_model.role_name} to role "
                    f"{create_model.role_name}",
                }
            )
        except Exception as e:
            log_data[
                "message"
            ] = "Exception occurred creating/attaching instance profile"
            log_data["error"] = str(e)
            log.error(log_data, exc_info=True)
            sentry_sdk.capture_exception()
            results["action_results"].append(
                {
                    "status": "error",
                    "message": f"Error creating/attaching instance profile {create_model.role_name} to role: "
                    + str(e),
                }
            )
            results["errors"] += 1

    stats.count(
        f"{log_data['function']}.success", tags={"role_name": create_model.role_name}
    )
    log_data["message"] = "Successfully created role"
    log.info(log_data)
    return results
Beispiel #24
0
async def get_service(service_name: str, service_path: str,
                      group: str) -> Resource:
    """
    Get a service connection to Google. You'll need to generate a GCP service account first from instructions here:
    https://hawkins.gitbook.io/consoleme/configuration/authentication-and-authorization/google-groups-support

    ConsoleMe requires that you either have a service key file with content like below,
    and you've set the configuration for `google.service_key_file` to the full path of that file on disk,
    or you've just put the json for this in your ConsoleMe configuration in the `google.service_key_dict` configuration
    key.

    There are sensitive secrets here, so if you want to
    reference them directly in configuration, we encourage you to store these secrets in AWS Secrets Manager
    https://hawkins.gitbook.io/consoleme/configuration/aws-secret-manager-integration
        {
          "type": "service_account",
          "project_id": "cons...",
          "private_key_id": "dc61.....",
          "private_key": "-----BEGIN PRIVATE KEY-----\nMII.....",
          "client_email": "*****@*****.**",
          "client_id": "1234....",
          "auth_uri": "https://accounts.google.com/o/oauth2/auth",
          "token_uri": "https://oauth2.googleapis.com/token",
          "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
          "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/consolem...."
        }

    """

    function = f"{__name__}.{sys._getframe().f_code.co_name}"
    stats.count(function)
    log_data = {
        "function":
        function,
        "service_name":
        service_name,
        "service_path":
        service_path,
        "group":
        group,
        "message":
        f"Building service connection for {service_name} / {service_path}",
    }
    log.debug(log_data)
    if config.get("google.service_key_file"):
        admin_credentials = service_account.Credentials.from_service_account_file(
            config.get("google.service_key_file"),
            scopes=config.get(
                "google.admin_scopes",
                ["https://www.googleapis.com/auth/admin.directory.group"],
            ),
        )
    elif config.get("google.service_key_dict"):
        admin_credentials = service_account.Credentials.from_service_account_info(
            config.get("google.service_key_dict"),
            scopes=config.get(
                "google.admin_scopes",
                ["https://www.googleapis.com/auth/admin.directory.group"],
            ),
        )
    else:
        raise MissingConfigurationValue(
            "Missing configuration for Google. You must configure either `google.service_key_file` "
            "or `google.service_key_dict`.")

    # Change credential subject based on group domain
    credential_subjects = config.get("google.credential_subject")
    credential_subject = None
    for k, v in credential_subjects.items():
        if k == group.split("@")[1]:
            credential_subject = v
            break

    if not credential_subject:
        raise NoCredentialSubjectException(
            "Error: Unable to find Google credential subject for domain {}. "
            "{}".format(
                group.split("@")[1], config.get("ses.support_reference", "")))

    admin_delegated_credentials = admin_credentials.with_subject(
        credential_subject)
    service = await sync_to_async(googleapiclient.discovery.build
                                  )(service_name,
                                    service_path,
                                    credentials=admin_delegated_credentials)

    return service
Beispiel #25
0
def detect_role_changes_and_update_cache(celery_app):
    """
    This function detects role changes through event bridge rules, and forces a refresh of the roles.
    """
    log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
    queue_arn = config.get(
        "event_bridge.detect_role_changes_and_update_cache.queue_arn", ""
    ).format(region=config.region)

    if not queue_arn:
        raise MissingConfigurationValue(
            "Unable to find required configuration value: "
            "`event_bridge.detect_role_changes_and_update_cache.queue_arn`"
        )
    queue_name = queue_arn.split(":")[-1]
    queue_account_number = queue_arn.split(":")[4]
    queue_region = queue_arn.split(":")[3]
    # Optionally assume a role before receiving messages from the queue
    queue_assume_role = config.get(
        "event_bridge.detect_role_changes_and_update_cache.assume_role"
    )

    sqs_client = boto3_cached_conn(
        "sqs",
        service_type="client",
        region=queue_region,
        retry_max_attempts=2,
        account_number=queue_account_number,
        assume_role=queue_assume_role,
    )

    queue_url_res = sqs_client.get_queue_url(QueueName=queue_name)
    queue_url = queue_url_res.get("QueueUrl")
    if not queue_url:
        raise DataNotRetrievable(f"Unable to retrieve Queue URL for {queue_arn}")
    roles_to_update = set()
    messages = sqs_client.receive_message(
        QueueUrl=queue_url, MaxNumberOfMessages=10
    ).get("Messages", [])

    while messages:
        processed_messages = []
        for message in messages:
            try:
                message_body = json.loads(message["Body"])
                decoded_message = json.loads(message_body["Message"])
                role_name = decoded_message["detail"]["requestParameters"]["roleName"]
                role_account_id = decoded_message["account"]
                role_arn = f"arn:aws:iam::{role_account_id}:role/{role_name}"

                if role_arn not in roles_to_update:
                    celery_app.send_task(
                        "consoleme.celery_tasks.celery_tasks.refresh_iam_role",
                        args=[role_arn],
                    )
                roles_to_update.add(role_arn)
            except Exception as e:
                log.error(
                    {**log_data, "error": str(e), "raw_message": message}, exc_info=True
                )
                sentry_sdk.capture_exception()
            processed_messages.append(
                {
                    "Id": message["MessageId"],
                    "ReceiptHandle": message["ReceiptHandle"],
                }
            )
        sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=processed_messages)
        messages = sqs_client.receive_message(
            QueueUrl=queue_url, MaxNumberOfMessages=10
        ).get("Messages", [])
    log.debug(
        {
            **log_data,
            "num_roles": len(roles_to_update),
            "message": "Triggered role cache update for roles that were created or changed",
        }
    )

    return roles_to_update
Beispiel #26
0
    async def prepare(self):
        self.tracer = None
        self.span = None
        self.spans = {}
        self.responses = []
        self.request_uuid = str(uuid.uuid4())
        self.auth_cookie_expiration = 0
        stats.timer("base_handler.incoming_request")
        if config.get("auth.require_mtls", False):
            try:
                await auth.validate_certificate(self.request.headers)
            except InvalidCertificateException:
                stats.count(
                    "GetCredentialsHandler.post.invalid_certificate_header_value"
                )
                self.set_status(403)
                self.write({"code": "403", "message": "Invalid Certificate"})
                await self.finish()
                return

            # Extract user from valid certificate
            try:
                self.requester = await auth.extract_user_from_certificate(
                    self.request.headers)
                self.current_cert_age = await auth.get_cert_age_seconds(
                    self.request.headers)
            except (MissingCertificateException, Exception) as e:
                if isinstance(e, MissingCertificateException):
                    stats.count(
                        "BaseMtlsHandler.post.missing_certificate_header")
                    message = "Missing Certificate in Header."
                else:
                    stats.count("BaseMtlsHandler.post.exception")
                    message = f"Invalid Mtls Certificate: {e}"
                self.set_status(400)
                self.write({"code": "400", "message": message})
                await self.finish()
                return
        elif config.get("auth.require_jwt", True):
            # Check to see if user has a valid auth cookie
            if config.get("auth_cookie_name", "consoleme_auth"):
                auth_cookie = self.get_cookie(
                    config.get("auth_cookie_name", "consoleme_auth"))

                if auth_cookie:
                    res = await validate_and_return_jwt_token(auth_cookie)
                    if not res:
                        error = {
                            "code": "invalid_jwt",
                            "message": "JWT is invalid or has expired.",
                            "request_id": self.request_uuid,
                        }
                        self.set_status(403)
                        self.write(error)
                        await self.finish()
                    self.user = res.get("user")
                    self.groups = res.get("groups")
                    self.requester = {"type": "user", "email": self.user}
                    self.current_cert_age = int(time.time()) - res.get("iat")
                    self.auth_cookie_expiration = res.get("exp")
            else:
                raise MissingConfigurationValue(
                    "Auth cookie name is not defined in configuration.")
        else:
            raise MissingConfigurationValue(
                "Unsupported authentication scheme.")
        if not hasattr(self, "requester"):
            raise tornado.web.HTTPError(403, "Unable to authenticate user.")
        self.ip = self.get_request_ip()
        await self.configure_tracing()
Beispiel #27
0
    async def get(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        endpoint = self.kwargs.get("type")
        ip = self.request.headers.get("X-Forwarded-For",
                                      self.request.remote_ip).split(",")
        if isinstance(ip, list):
            ip = ip[0]
        log_data = {
            "user": self.user,
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "requested_challenge_token": requested_challenge_token,
            "message": "Incoming request",
            "ip": ip,
        }
        log.debug(log_data)

        all_challenges = red.hgetall(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"))
        current_time = int(
            datetime.utcnow().replace(tzinfo=pytz.UTC).timestamp())
        expired_challenge_tokens = []
        # Delete expired tokens
        if all_challenges:
            for token, challenge_j in all_challenges.items():
                challenge = json.loads(challenge_j)
                if challenge.get("ttl", 0) < current_time:
                    expired_challenge_tokens.append(token)
            if expired_challenge_tokens:
                red.hdel(
                    config.get("challenge_url.redis_key",
                               "TOKEN_CHALLENGES_TEMP"),
                    *expired_challenge_tokens,
                )
        else:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL.")
            if endpoint == "web":
                self.write(message)
            elif endpoint == "api":
                self.write({"message": message})
            return
        # Get fresh challenge for user's request
        user_challenge_j = red.hget(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
        )
        if user_challenge_j:
            # Do a double-take check on the ttl
            # Delete the token
            user_challenge = json.loads(user_challenge_j)
            if user_challenge.get("ttl", 0) < current_time:
                message = "This challenge URL has expired. Please try requesting a new challenge URL."
                if endpoint == "web":
                    self.write(message)
                elif endpoint == "api":
                    self.write({"message": message})
                return
            if ip != user_challenge.get("ip"):
                # Todo: Sometimes the request from the CLI will be IPv6, and the request from browser is ipv4. How
                # can we reconcile this so that we can perform validation?
                pass
                # raise Exception("IP address used to generate challenge URL is different than IP you are using now.")
            if self.user != user_challenge.get("user"):
                log_data = {
                    **log_data,
                    "message":
                    "Authenticated user is different then user that requested token",
                    "authenticated_user": self.user,
                    "challenge_user": user_challenge.get("user"),
                }
                log.error(log_data)
                message = (
                    "This challenge URL is associated with a different user. Ensure that your client"
                    "configuration is specifying the correct user.")
                if endpoint == "web":
                    self.write(message)
                elif endpoint == "api":
                    self.write({"message": message})
                return
            user_challenge["status"] = "success"
            user_challenge["user"] = self.user
            user_challenge["groups"] = self.groups
            red.hset(
                config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
                requested_challenge_token,
                json.dumps(user_challenge),
            )
            message = "You've successfully authenticated to ConsoleMe and may now close this page."
            if endpoint == "web":
                self.write(message)
            elif endpoint == "api":
                self.write({"message": message})
        else:
            message = "The requested challenge URL was not found. Please try requesting a new challenge URL."
            if endpoint == "web":
                self.write(message)
            elif endpoint == "api":
                self.write({"message": message})
            return
Beispiel #28
0
    async def post(self, requested_challenge_token):
        if not config.get("challenge_url.enabled", False):
            raise MissingConfigurationValue(
                "Challenge URL Authentication is not enabled in ConsoleMe's configuration"
            )
        data = tornado.escape.json_decode(self.request.body)

        log_data = {
            "user": self.user,
            "function":
            f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
            "requested_challenge_token": requested_challenge_token,
            "message": "Incoming request",
            "ip": self.ip,
        }
        log.debug(log_data)

        all_challenges = red.hgetall(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"))
        if not all_challenges:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        await delete_expired_challenges(all_challenges)

        valid_user_challenge = await retrieve_user_challenge(
            self, requested_challenge_token, log_data)
        if not valid_user_challenge:
            message = (
                "Unable to find a matching challenge URL. This usually means that it has expired. "
                "Please try requesting a new challenge URL.")
            self.write({"message": message})
            return

        if data.get("nonce") != valid_user_challenge["nonce"]:
            message = "Unable to validate challenge URL. The Nonce you've submitted is invalid."
            log.error({**log_data, "message": message})
            self.write({"message": message})
            return

        request_ip = self.get_request_ip()

        # By default, the challenge URL requester IP must match the URL the challenge was created with. In some cases
        # (i.e. IPv4 vs IPv6), the challenge may have been created with an IPv4 address, and the authenticated browser
        # verification request may originate from an IPv6 one, or visa versa, in which case this configuration may
        # need to be explicitly set to False.
        if config.get(
                "challenge_url.request_ip_must_match_challenge_creation_ip",
                True):
            if request_ip != valid_user_challenge.get("ip"):
                log.error({
                    **log_data,
                    "request_ip": request_ip,
                    "challenge_ip": valid_user_challenge.get("ip"),
                    "message": "Request IP doesn't match challenge IP",
                })
                self.write({
                    "message":
                    "Your originating IP doesn't match the IP the challenge was created with."
                })
                return

        valid_user_challenge["status"] = "success"
        valid_user_challenge["user"] = self.user
        valid_user_challenge["groups"] = self.groups
        red.hset(
            config.get("challenge_url.redis_key", "TOKEN_CHALLENGES_TEMP"),
            requested_challenge_token,
            json.dumps(valid_user_challenge),
        )
        message = "You've successfully authenticated to ConsoleMe and may now close this page."
        self.write({"message": message})