def client_boto3(self, service_name, profile_name=None, region_name=None): # todo: refactor with resource_boto3 try: profile_name = profile_name or AWS_Config( ).aws_session_profile_name() region_name = region_name or AWS_Config().aws_session_region_name() # profiles = get_session()._build_profile_map() # if profile_name in profiles: # session = boto3.Session(profile_name=profile_name, region_name=region_name) if profile_name in self.profiles( ): # seeing if this is a more efficient way to get the data session = boto3.Session( profile_name=profile_name, region_name=region_name ) # tried to pass this params but had side effects: , botocore_session=self.boto_session() return { 'status': 'ok', 'client': session.client(service_name=service_name), "session": session } return { 'status': 'ok', 'client': boto3.client(service_name=service_name, region_name=region_name) } except Exception as error: return {'status': 'error', 'data': '{0}'.format(error)}
def test_client_boto3(self): assert "Unknown service: 'aaaa'. Valid service names are:" in self.session.client_boto3( 'aaaa').get('data') AWS_Config().set_aws_session_profile_name('bad_profile') assert type( self.session.client_boto3('s3').get('client')).__name__ == 'S3' AWS_Config().set_aws_session_profile_name('default')
def _test_run_lambdas_in_multiple_accounts(self): from osbot_aws.apis.test_helpers.Temp_Lambda import Temp_Lambda AWS_Config().set_aws_session_profile_name('gs-detect-aws') AWS_Config().set_lambda_s3_bucket('gs-detect-lambda') with Temp_Lambda() as _: _.invoke_raw().get('status') == 'ok' AWS_Config().set_aws_session_profile_name('default')
def __init__(self, image_name, path_images=None, image_tag='latest') -> object: self.api_docker = API_Docker() self.ecr = ECR() self.aws_config = AWS_Config() self.image_name = image_name self.image_tag = image_tag self.path_images = path_images or path_combine(__file__, '../../images')
class Create_Image_ECR: def __init__(self, image_name, image_tag='latest'): self.api_docker = API_Docker() self.ecr = ECR() self.aws_config = AWS_Config() self.image_name = image_name self.image_tag = image_tag def build_image(self): repository = self.image_repository() tag = self.image_tag result = self.api_docker.image_build(path=self.path_image(), repository=repository, tag=tag) return result.get('status') == 'ok' def create_repository(self): self.ecr.repository_create(self.image_name) return self.ecr.repository_exists(self.image_name) def image_repository(self): account_id = self.aws_config.aws_session_account_id() region = self.aws_config.aws_session_region_name() return f'{account_id}.dkr.ecr.{region}.amazonaws.com/{self.image_name}' def ecr_login(self): auth_data = self.ecr.authorization_token() return self.api_docker.registry_login( registry=auth_data.get('registry'), username=auth_data.get('username'), password=auth_data.get('password')) def path_image(self): return path_combine(self.path_images(), self.image_name) def path_images(self): return path_combine(__file__, '../../images') def push_image(self): return self.api_docker.image_push(self.image_repository(), self.image_tag) def run(self): create_repository = self.create_repository() ecr_login = self.ecr_login() build_image = self.build_image() push_image = self.push_image() return { 'create_repository': create_repository, 'ecr_login': ecr_login, 'build_image': build_image, 'push_image': push_image }
def session( self, profile_name=None, region_name=None) -> Session: # todo: refactor with resource_boto3 profile_name = profile_name or AWS_Config().aws_session_profile_name() region_name = region_name or AWS_Config().aws_session_region_name() profiles = get_session()._build_profile_map() if profile_name in profiles: return boto3.Session(profile_name=profile_name, region_name=region_name) else: return boto3.Session()
def setUp(self): self.aws_config = AWS_Config() self.lambda_ = Lambda() self.s3 = S3() self.sts = STS() self.expected_account_id = '785217600689' self.expected_region = 'eu-west-1' self.expected_s3_prefix = 'lambdas' self.expected_role_name = None self.expected_s3_bucket = f'{self.expected_account_id}-osbot-{self.expected_s3_prefix}' self.expected_module = 'osbot_aws.lambdas.dev.hello_world' self.function_name = 'osbot_aws_lambdas_dev_hello_world' self.lambda_handler = run
def setUpClass(cls) -> None: #STS().check_current_session_credentials() cls.s3 = S3() cls.aws_config = AWS_Config() cls.account_id = cls.aws_config.aws_session_account_id() cls.s3_bucket = cls.aws_config.lambda_s3_bucket() cls.region = cls.aws_config.aws_session_region_name()
def test_account_id(self): account_id_1 = self.iam.account_id('gs-detect-aws') # todo: rewrite since account_id doesn't take this parameter any more assert AWS_Config().aws_session_profile_name() == 'gs-detect-aws' self.iam._account_id = None self.iam._sts = None account_id_2 = self.iam.account_id('default') assert AWS_Config().aws_session_profile_name() == 'default' assert account_id_1 != account_id_2 self.iam._account_id = None self.iam._sts = None account_id_3 = self.iam.account_id() assert AWS_Config().aws_session_profile_name() == 'default' assert account_id_2 == account_id_3
def __init__(self, layer_name='', runtimes=None, license_info=None, s3_bucket=None, s3_folder=None, description=None, version_number=None): self.layer_name = layer_name.replace('.', '-') #self.folders_mapping = folders_mapping or {} self.runtimes = runtimes or ['python3.8', 'python3.7', 'python3.6'] self.license_info = license_info or 'https://github.com/owasp-sbot/OSBot-AWS/blob/master/LICENSE' self.description = description or '' self.s3_bucket = s3_bucket or AWS_Config().lambda_s3_bucket() self.s3_folder = s3_folder or AWS_Config().lambda_s3_folder_layers() self.s3_key = f'{self.s3_folder}/{self.layer_name}.zip' self.version_number = version_number
def __init__(self, file_name=None,s3_bucket=None, s3_prefix=None): self.file_name = file_name or f"temp_zip_file_{random_string_and_numbers()}" self.s3_bucket = AWS_Config().lambda_s3_bucket() self.s3_prefix = f'{AWS_Config().lambda_s3_folder_lambdas()}/unit_tests/temp_zips' self.s3_key = f'{self.s3_prefix}/{self.file_name}.zip' self.folder = None self.lambda_code = "def run(event, context): return 'hello {0}'.format(event.get('name'))" self.tmp_file = None self.create_temp_file()
def upload_dependency(target): s3 = S3() s3_bucket = AWS_Config().lambda_s3_bucket() s3_file = 'lambdas-dependencies/{0}.zip'.format(target) path_libs = Files.path_combine('../../../_lambda_dependencies/', target) if Files.not_exists(path_libs): raise Exception(f"In Lambda upload_dependency, could not find dependency for: {target} , which resolved to {path_libs}") s3.folder_upload(path_libs, s3_bucket, s3_file) return s3.file_exists(s3_bucket, s3_file)
def reset(self): if self.aws_lambda.s3_bucket is None: # if these values are not set self.aws_lambda.set_s3_bucket( AWS_Config().lambda_s3_bucket()) # use default values self.aws_lambda.set_s3_key( f'{AWS_Config().lambda_s3_folder_lambdas()}/{self.aws_lambda.original_name}.zip' ) # which are needed return self.aws_lambda.update_lambda_code( ) # to trigger the update (which will reset the lambda and force a cold start on next lambda invocation)
def __init__(self, lambda_name=None, delete_on_exit=True): self.lambda_name = lambda_name or "temp_lambda_{0}".format(random_string_and_numbers()) self.aws_lambda = Lambda(self.lambda_name) self.tmp_folder = Temp_Folder_With_Lambda_File(self.lambda_name).create_temp_file() self.role_arn = Temp_Aws_Roles().for_lambda_invocation__role_arn() # todo: refactor to have option to create the role programatically (needs feature to wait for role to be available) self.create_log = None self.delete_on_exit = delete_on_exit self.s3_bucket = AWS_Config().lambda_s3_bucket() self.s3_key = 'unit_tests/lambdas/{0}.zip'.format(self.lambda_name) self.s3 = self.aws_lambda.s3()
def setUpClass(cls) -> None: cls.aws_config = AWS_Config() cls.account_id = cls.aws_config.aws_session_account_id() cls.ecr = ECR() cls.repository_name = 'an_test_repository' cls.repository_tags = {'an_tag': 'an_value'} assert cls.ecr.repository_exists(cls.repository_name) is False cls.result_create = cls.ecr.repository_create(name=cls.repository_name, tags=cls.repository_tags) assert cls.ecr.repository_exists(cls.repository_name) is True
def __init__(self,lambda_name): self.lambda_name = lambda_name self.aws_lambda = Lambda(self.lambda_name) self.s3_bucket = AWS_Config().lambda_s3_bucket() self.s3_key = f'{AWS_Config().lambda_s3_folder_lambdas()}/{self.lambda_name}.zip' self.role_arn = Temp_Aws_Roles().for_lambda_invocation__role_arn() self.tmp_folder = Files.temp_folder('tmp_lambda_') (self.aws_lambda.set_s3_bucket (self.s3_bucket ) .set_s3_key (self.s3_key ) .set_role (self.role_arn ) .set_folder_code (self.tmp_folder ))
def resource_boto3(self, service_name, profile_name=None, region_name=None): # todo: refactor with client_boto3 try: profile_name = profile_name or AWS_Config( ).aws_session_profile_name() region_name = region_name or AWS_Config().aws_session_region_name() profiles = get_session()._build_profile_map() if profile_name in profiles: session = boto3.Session(profile_name=profile_name, region_name=region_name) return { 'status': 'ok', 'resource': session.resource(service_name=service_name) } return { 'status': 'ok', 'resource': boto3.resource(service_name=service_name) } except Exception as error: return {'status': 'error', 'data': '{0}'.format(error)}
def test_session(self): assert self.session.session().profile_name == AWS_Config( ).aws_session_profile_name() assert self.session.session().region_name == AWS_Config( ).aws_session_region_name() with Temp_User() as temp_user: iam = temp_user.iam user_info = iam.user_info() access_key = iam.user_access_key_create(wait_for_key_working=True) aws_access_key_id = access_key.get('AccessKeyId') aws_secret_access_key = access_key.get('SecretAccessKey') session = boto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) user_identity = session.client('sts').get_caller_identity() assert user_identity.get('UserId') == user_info.get('UserId') assert user_identity.get('Arn') == user_info.get('Arn') self.result = user_identity
def check_current_session_credentials( self, raise_exception=True ): # todo: see if there is a faster way to do this, at the moment it takes about 500ms which is quite a lot if AWS_Config().dev_skip_aws_key_check() == "True": return status_warning(message="check was skipped") result = self.check_aws_session() if result.get('status') == 'error': self.print_bad_credentials_exception(result.get('message')) #if exit_on_error: # todo: see better way to do this # self.end_process_bad_credentials_exception() if raise_exception: self.raise_bad_credentials_exception(result.get('message')) return result
def __init__(self, cluster_name, image_name, subnet_id=None, security_group_id=None): self.ecs = ECS() self.ec2 = EC2() self.cloud_watch_logs = Cloud_Watch_Logs() self.aws_config = AWS_Config() # load config from env variables self.account_id = self.cloud_watch_logs.account_id self.region_name = self.cloud_watch_logs.region_name self.cluster_name = cluster_name self.image_name = image_name self.subnet_id = subnet_id self.security_group_id = security_group_id self.task_family = f"family__{self.image_name}" self.task_name = f'task__{self.cluster_name}' #self.iam_execution_role = f'fargate-execution-role_{self.region_name}_{self.task_family}' #self.iam_task_role = f'fargate-task-role_{self.region_name}_{self.task_family}' self.task_arn = None
def load_dependency(target): if os.getenv('AWS_REGION') is None: return from osbot_aws.apis.S3 import S3 import shutil import sys s3 = S3() s3_bucket = AWS_Config().lambda_s3_bucket() s3_key = 'lambdas-dependencies/{0}.zip'.format(target) tmp_dir = Files.path_combine('/tmp/lambdas-dependencies', target) #return s3.file_exists(s3_bucket,s3_key) if s3.file_exists(s3_bucket,s3_key) is False: raise Exception("In Lambda load_dependency, could not find dependency for: {0}".format(target)) if file_not_exists(tmp_dir): # download dependency zip_file = s3.file_download(s3_bucket, s3_key,False) # download zip file with dependencies shutil.unpack_archive(zip_file, extract_dir = tmp_dir) # unpack them if tmp_dir not in sys.path: # if not currently in the path sys.path.append(tmp_dir) # add tmp_dir to the path that python uses to check for dependencies return Files.exists(tmp_dir)
def test_for_lambda_invocation__role_arn(self): account_id = AWS_Config().aws_session_account_id() role_arn = self.temp_aws_roles.for_lambda_invocation__role_arn() assert role_arn == f"arn:aws:iam::{account_id}:role/temp_role_for_lambda_invocation"
import pytest from osbot_aws.apis.STS import STS from osbot_utils.testing.Catch import Catch from osbot_utils.utils.Json import json_to_str from osbot_utils.utils.Misc import wait from osbot_aws.AWS_Config import AWS_Config from osbot_aws.helpers.Test_Helper import Test_Helper from osbot_aws.apis.IAM import IAM from osbot_utils.utils.Assert import Assert from osbot_utils.utils.Dev import pprint account_id = AWS_Config().aws_session_account_id() delete_created = True test_user = '******' test_user_arn = 'arn:aws:iam::{0}:user/test_user'.format(account_id) test_role = 'test_role' test_role_arn = 'arn:aws:iam::{0}:role/test_role'.format(account_id) policy_document = {'Statement': [ { 'Action' : 'sts:AssumeRole', 'Effect' : 'Allow', 'Principal': { 'Service': 'codebuild.amazonaws.com'}}]} class Test_IAM(Test_Helper): @classmethod def setUpClass(cls):
def for_lambda_invocation__role_arn(self): account_id = AWS_Config().aws_session_account_id() role_name = self.role_name__for_lambda_invocation return f"arn:aws:iam::{account_id}:role/{role_name}"
def test_image_repository(self): aws_config = AWS_Config() account_id = aws_config.aws_session_account_id() region = aws_config.aws_session_region_name() assert self._.image_repository( ) == f'{account_id}.dkr.ecr.{region}.amazonaws.com/{self.image_name}'
def setUp(self) -> None: self.deploy = Deploy_Lambda(self.lambda_function) self.aws_config = AWS_Config() # Test_Helper().check_aws_token() pass
class test_Deploy_Lambda(TestCase): lambda_name = None lambda_code = None code_folder = None @staticmethod def setup_test_environment__Deploy_Lambda( cls): # todo: refactor into separate class STS().check_current_session_credentials() cls.lambda_name = "osbot_test_deploy_lambda" cls.lambda_code = Temp_Folder_With_Lambda_File(cls.lambda_name) cls.code_folder = cls.lambda_code.folder lambda_file = cls.lambda_code.tmp_file module_folder = path_combine(cls.code_folder, "osbot_test_deploy_lambda") lambda_in_module = path_combine(module_folder, file_name(lambda_file)) folder_create(module_folder) file_copy(lambda_file, lambda_in_module) # todo add a file_move to OSBot_Utils file_delete(lambda_file) file_create(path_combine(module_folder, '__init__.py'), "") sys.path.append(cls.code_folder) cls.lambda_module = importlib.import_module( "osbot_test_deploy_lambda.osbot_test_deploy_lambda") cls.lambda_function = cls.lambda_module.run @staticmethod def teardown_test_environment__Deploy_Lambda(cls): sys.path.remove(cls.code_folder) pass @classmethod def setUpClass(cls) -> None: cls.setup_test_environment__Deploy_Lambda(cls) @classmethod def tearDownClass(cls): cls.teardown_test_environment__Deploy_Lambda(cls) def setUp(self) -> None: self.deploy = Deploy_Lambda(self.lambda_function) self.aws_config = AWS_Config() # Test_Helper().check_aws_token() pass def tearDown(self): self.deploy.delete() def test_check_aws_role(self): assert Temp_Aws_Roles().for_lambda_invocation_exists() def test_get_package(self): package = self.deploy.get_package() assert package.lambda_name == 'osbot_test_deploy_lambda.osbot_test_deploy_lambda' assert package.s3_bucket == self.aws_config.lambda_s3_bucket() assert package.s3_key == f'{self.aws_config.lambda_s3_folder_lambdas()}/{package.lambda_name}.zip' assert package.role_arn == f"arn:aws:iam::{self.aws_config.aws_session_account_id()}:role/temp_role_for_lambda_invocation" assert folder_exists(package.tmp_folder) def test_update(self): deploy = Deploy_Lambda(self.lambda_function) result = deploy.update() assert result['status'] == 'ok' assert result['name'] == self.deploy.lambda_name().replace('.', "_") assert result['data'][ 'FunctionArn'] == f'arn:aws:lambda:{self.aws_config.aws_session_region_name()}:{self.aws_config.aws_session_account_id()}:function:osbot_test_deploy_lambda_osbot_test_deploy_lambda' assert result['data'][ 'FunctionName'] == 'osbot_test_deploy_lambda_osbot_test_deploy_lambda' assert result['data'][ 'Handler'] == 'osbot_test_deploy_lambda.osbot_test_deploy_lambda.run' assert result['data']['MemorySize'] == 10240 assert result['data']['PackageType'] == 'Zip' assert deploy.invoke() == 'hello None' assert deploy.invoke({"name": "world"}) == "hello world" print(deploy.delete()) def test_invoke(self): self.deploy.update() assert self.deploy.invoke({"name": "world"}) == "hello world" #invoke directly aws_lambda = Lambda(name=self.deploy.lambda_name()) assert aws_lambda.invoke() == 'hello None' assert aws_lambda.invoke({'name': 'world'}) == 'hello world'
class Create_Image_ECR: def __init__(self, image_name, path_images=None, image_tag='latest') -> object: self.api_docker = API_Docker() self.ecr = ECR() self.aws_config = AWS_Config() self.image_name = image_name self.image_tag = image_tag self.path_images = path_images or path_combine(__file__, '../../images') def build_image(self): repository = self.image_repository() tag = self.image_tag result = self.api_docker.image_build(path=self.path_image(), repository=repository, tag=tag) return result.get('status') == 'ok' def create_repository(self): self.ecr.repository_create(self.image_name) return self.ecr.repository_exists(self.image_name) def image_name(self): return f'{self.image_repository()}:{self.image_tag}' def image_repository(self): account_id = self.aws_config.aws_session_account_id() region = self.aws_config.aws_session_region_name() return f'{account_id}.dkr.ecr.{region}.amazonaws.com/{self.image_name}' def ecr_login(self): auth_data = self.ecr.authorization_token() return self.api_docker.registry_login( registry=auth_data.get('registry'), username=auth_data.get('username'), password=auth_data.get('password')) def path_image(self): return path_combine(self.path_images, self.image_name) def push_image(self): json_lines = self.api_docker.image_push(self.image_repository(), self.image_tag) return json_lines def run_locally(self): image_name = self.image_name() return self.api_docker.docker_run(image_name) def create(self): create_repository = self.create_repository() ecr_login = self.ecr_login() build_image = self.build_image() push_image = self.push_image() # status = create_repository and \ # build_image and \ # ecr_login.get('Status') == 'Login Succeeded' #\ # # push_image ????\ # todo: add success/error detector to push_image images logs (use json_lines_parse to parse string into json) return { 'create_repository': create_repository, 'ecr_login': ecr_login, 'build_image': build_image, 'push_image': push_image, #'status' : status }
def __init__(self, bot_name= None, profile_name = None, account_id=None, region_name=None, lambda_s3_bucket=None, lambda_role_name=None): aws_config = AWS_Config() if bot_name : aws_config.set_bot_name (bot_name) if profile_name : aws_config.set_aws_session_profile_name(profile_name ) if account_id : aws_config.set_aws_session_account_id (account_id ) if region_name : aws_config.set_aws_session_region_name (region_name ) if lambda_s3_bucket : aws_config.set_lambda_s3_bucket (lambda_s3_bucket) if lambda_role_name : aws_config.set_lambda_role_name (lambda_role_name) self.bot_name = aws_config.bot_name() self.profile_name = aws_config.aws_session_profile_name() self.region_name = aws_config.aws_session_region_name() self.account_id = aws_config.aws_session_account_id() self.s3_bucket_lambdas = aws_config.lambda_s3_bucket() self.lambda_role_name = aws_config.lambda_role_name() self.lambda_role_arn = f"arn:aws:iam::{self.account_id}:role/{self.lambda_role_name}" self.s3 = S3()
def setUp(self) -> None: self.deploy = Deploy(run) #Test_Helper().check_aws_token() self.aws_config = AWS_Config()