def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: self._next_transition = model.job.next_transition issues = [] with _open(self.cfg_stream) as stream: users_list: List[dict] = yaml.safe_load(stream) or [] default_db_name = {db_uid: db.db_name for db_uid, db in model.aws.databases.items() if DbStatus[db.status] >= DbStatus.ENABLED} enabled_databases = list(default_db_name.keys()) users = {} databases = {} for user in users_list: login = user["login"] default_grant_type = user.get("default_grant_type", DEFAULT_GRANT_TYPE) try: permissions = self._parse_permissions( user.get("permissions", []), model.aws.single_region, default_grant_type, enabled_databases, default_db_name) users[login] = { "db_username": login[:MAX_DB_USERNAME_LENGTH], "permissions": permissions } for db_uid, grant_type in permissions.items(): databases.setdefault(db_uid, {"permissions": {}})["permissions"][login] = grant_type except ValueError as e: issues.append(Issue(level=IssueLevel.ERROR, type='USER', id=login, message=str(e))) updates = Prodict(okta={"users": users}, aws={"databases": databases}) if self._next_transition: updates.job = dict(next_transition=self._next_transition) return updates, issues
def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: issues = [] updates = {} with open(self.cfg_filename) as file: services = yaml.safe_load(file) enabled_databases = [db_uid for db_uid, db in model.aws.databases.items() if DbStatus[db.status] >= DbStatus.ENABLED] for conn in services.get("glue_connections", []): db_ref = conn['db'] db_id_list = wc_expand(db_ref, enabled_databases) if not db_id_list: issues.append(Issue(level=IssueLevel.ERROR, type='GLUE', id=db_ref, message=f"Not existing and enabled DB instance reference '{db_ref}'")) continue pcr = conn.get("physical_connection_requirements", {}) supplied_db_names = conn.get("db_names") if isinstance(supplied_db_names, str): supplied_db_names = [supplied_db_names] grant_type = conn.get("grant_type", DEFAULT_GRANT_TYPE) for db_uid in db_id_list: db = model.aws.databases[db_uid] updates[db_uid] = { "db_names": supplied_db_names or [db.db_name], "grant_type": grant_type, "physical_connection_requirements": { "availability_zone": pcr.get("availability_zone", db.availability_zone), "security_group_id_list": pcr.get("security_group_id_list", db.vpc_security_group_ids), "subnet_id": pcr.get("subnet_id", db.primary_subnet), }, } return Prodict(aws={"glue_connections": updates}), issues
def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: with open(self.cfg_filename) as file: rds_list: List[dict] = yaml.safe_load(file) issues = [] databases = {} for cfg_db in rds_list: db_id = cfg_db["id"] db_uid = f"{self.region}/{db_id}" enabled = _to_bool(cfg_db.setdefault("enabled", True)) if enabled: try: master_password, password_age = self.pwd_resolver.resolve(db_id, cfg_db.get("master_password")) db = { "status": DbStatus.ENABLED.name, "permissions": {}, "master_password": master_password, "password_age": password_age, } except Exception as e: issues.append(Issue(level=IssueLevel.ERROR, type="DB", id=db_uid, message=str(e))) continue else: db = dict(status=DbStatus.DISABLED.name) databases[db_uid] = db return Prodict(aws={"databases": databases}), issues
def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: """ Check if the users exist and retrieve their corresponding user_id and ssh_pubkey. """ okta = model.okta session = async_retryable_session(self.executor) futures = [] searcher = jmespath.compile( "[*].[id, status, profile.sshPubKey] | [0]") for login in okta.users: future = session.get( f"https://{okta.organization}.okta.com/api/v1/users?limit=1&search=profile.login+eq+" + urllib.parse.quote(f'"{login}"'), headers=(self._http_headers())) futures.append(future) issues = [] users_ext = {} logger.info(f"Checking Okta {okta.organization.capitalize()}'s Users:") login_max_len = max(map(len, okta.users), default=0) for login, future in zip(okta.users, futures): result = future.result() result.raise_for_status() json_response = json.loads(result.content.decode()) match = searcher.search(json_response) user_data = {} if match: user_id, status, ssh_pubkey = match if status != "ACTIVE": err_msg = f"status={status}" elif ssh_pubkey: err_msg = None user_data = { "user_id": user_id, "ssh_pubkey": ssh_pubkey, } else: status = "MISSING_SSH_PUBKEY" err_msg = "Missing SSH PubKey" else: status = "ABSENT" err_msg = "Not found in OKTA" user_data["status"] = status if err_msg: color = "red" issues.append( Issue(level=IssueLevel.ERROR, type="USER", id=login, message=err_msg)) else: color = "green" leader = "." * (2 + login_max_len - len(login)) logger.opt(colors=True).info( f" {login} {leader} <{color}>{status}</{color}>") users_ext[login] = user_data return Prodict(okta={"users": users_ext}), issues
def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: issues = [] updates = {} with open(self.cfg_filename) as file: applications: List[dict] = yaml.safe_load(file) enabled_databases = [db_uid for db_uid, db in model.aws.databases.items() if DbStatus[db.status] >= DbStatus.ENABLED] for app in applications: app_name = app['name'] db_ref = app['db'] db_id_list = wc_expand(db_ref, enabled_databases) if not db_id_list: issues.append(Issue(level=IssueLevel.ERROR, type='APP', id=app_name, message=f"Not existing and enabled DB instance reference '{db_ref}'")) continue updates[app_name] = db_id_list return Prodict(applications=updates), issues
def _gather_rds_status(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: """ For all RDS instances: check if it's possible to connect, authenticate with credentials, and get authorized access to the primary DB. Reports each instance check on the console. """ databases = model.aws.databases logger.info("Checking access to RDS instances:") issues = [] futures = [] for db_uid, db in databases.items(): if 'endpoint' in db: future = self.executor.submit(_check_mysql_instance, db) else: future = None futures.append(future) db_id_max_len = max(map(len, databases)) updates = {} accessible = dict(status=DbStatus.ACCESSIBLE.name) for db_uid, future in zip(databases, futures): if future: success, message = future.result(MYSQL_LOGIN_TIMEOUT) color = ("red", "green")[success] if success: updates[db_uid] = accessible else: issues.append( Issue(level=IssueLevel.ERROR, type="DB", id=db_uid, message=message)) else: success, message = (False, databases[db_uid].status) color = "light-magenta" leader = "." * (2 + db_id_max_len - len(db_uid)) logger.opt(colors=True).info( f" {db_uid} {leader} <{color}>{message}</{color}>") return Prodict(aws={"databases": updates}), issues
def gather(self, model: Prodict) -> Tuple[Prodict, List[Issue]]: issues = [] configured_databases = model.aws.databases not_found = dict(status=DbStatus.ABSENT.name) updates = { db_uid: not_found for db_uid in configured_databases if db_uid.startswith(f"{self.aws.region}/") } for db in self.aws.rds_enum_databases(ENGINE_TYPE): db_id = db["DBInstanceIdentifier"] db_uid = f"{self.aws.region}/{db_id}" if db_uid not in configured_databases: try: master_password, password_age = self.pwd_resolver.resolve( db_id, None) db_upd = { "status": DbStatus.AUTO_ENABLED.name, "permissions": {}, "master_password": master_password, "password_age": password_age, } except Exception as e: issues.append( Issue(level=IssueLevel.WARNING, type="DB", id=db_uid, message=f"Failed to auto-configure: {e}")) continue elif DbStatus[ configured_databases[db_uid].status] == DbStatus.ENABLED: db_upd = {} else: del updates[db_uid] continue subnets_by_az = _get_subnets_by_az(db) az = db.get( "AvailabilityZone", # Chose the AZ of the first subnet arbitrarily. # Required for MOTO since AZ is not defined. next(iter(subnets_by_az.keys()))) db_upd.update({ "db_name": db["DBName"], "master_username": db["MasterUsername"], "endpoint": { "address": db["Endpoint"]["Address"], "port": db["Endpoint"]["Port"], }, "dbi_resource_id": db["DbiResourceId"], "availability_zone": az, "vpc_security_group_ids": [ sg["VpcSecurityGroupId"] for sg in db["VpcSecurityGroups"] if sg["Status"] == "active" ], "primary_subnet": subnets_by_az[az][0] }) updates[db_uid] = db_upd for db_uid, db in updates.items(): if db == not_found: issues.append( Issue(level=IssueLevel.ERROR, type="DB", id=db_uid, message="Not found in AWS")) return Prodict(aws={"databases": updates}), issues