def create_mockinstance(monkeypatch): """Creates a mock instance for the purpose of getting and modifying tags. BEWARE: As this spins up a localstack instance, is a check of function only, not permissions. """ session = localstack_client.session.Session() ec2_resource = session.resource('ec2') ec2_client = session.client('ec2') monkeypatch.setattr(ec2_rogue_killer, 'ec2_client', session.client('ec2')) output = ec2_resource.create_instances(ImageId='garbage', InstanceType='t2.micro', MinCount=1, MaxCount=1, TagSpecifications=[{ 'ResourceType': 'volume', 'Tags': [{ 'Key': 'Timeout', 'Value': str(100) }] }, { 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Timeout', 'Value': str(100) }] }]) yield output instance_id = output[0].instance_id ec2_client.terminate_instances(InstanceIds=[instance_id])
def get_files_to_transform(s3_connection=None, bucket_name=None): """Get the list of files to be transformed from the customer's bucket""" if not s3_connection: s3_connection = session.resource('s3') try: bucket = s3_connection.Bucket(bucket_name) objects = bucket.objects.all() last_download_time = datetime.utcnow() - timedelta( hours=int(number_of_hours)) files_to_download = [] for obj in objects: last_modified = obj.last_modified.replace(tzinfo=pytz.UTC) if is_not_production: files_to_download.append(obj.key) # collect files that are less than three hours old for production elif last_modified > last_download_time.replace(tzinfo=pytz.UTC): files_to_download.append(obj.key) return files_to_download except Exception as e: logging.error(f'Error getting file keys from {bucket_name} bucket', exc_info=True)
def create_mockinstance_ssm(monkeypatch): """Creates a mock instance without tags and with an ssm command running, mimicking deployment instances. """ session = localstack_client.session.Session() ec2_resource = session.resource('ec2') ec2_client = session.client('ec2') monkeypatch.setattr(ec2_rogue_killer, 'ec2_client', session.client('ec2')) output = ec2_resource.create_instances(ImageId='garbage', InstanceType='t2.micro', MinCount=1, MaxCount=1) ssm_client_local.send_command(DocumentName='AWS-RunShellScript', InstanceIds=[output[0].instance_id], Parameters={ 'commands': ['sleep 100; sleep 10'], 'executionTimeout': [str(3600)] }) def mockfunc(instance_info): return output[0].instance_id != instance_info['InstanceId'] yield (output, mockfunc) instance_id = output[0].instance_id ec2_client.terminate_instances(InstanceIds=[instance_id])
def mock_resources(monkeypatch): ## mock s3 resources: monkeypatch.setattr( s3, "s3_client", session.client("s3") ) ## TODO I don't think these are scoped correctly w/o a context manager. monkeypatch.setattr(s3, "s3_resource", session.resource("s3")) ## mock ssm resources: monkeypatch.setattr(ssm, "ssm_client", session.client("ssm")) ## mock ec2 resources: monkeypatch.setattr(ec2, "ec2_resource", session.resource("ec2")) monkeypatch.setattr(ec2, "ec2_client", session.client("ec2")) ## mock events: monkeypatch.setattr(events, "events", session.client("events")) ## mock pricing #monkeypatch.setattr(pricing,"client",session.client("pricing")) monkeypatch.setattr(pricing, "ec2client", session.client("ec2"))
def create_mockinstance_doomed(monkeypatch): """Creates a mock instance for the purpose of getting and modifying tags. Doomed because the relevant timeouts are set to 0 minutes. """ session = localstack_client.session.Session() ec2_resource = session.resource('ec2') ec2_client = session.client('ec2') monkeypatch.setattr(ec2_rogue_killer, 'ec2_client', session.client('ec2')) output = ec2_resource.create_instances(ImageId='garbage', InstanceType='t2.micro', MinCount=1, MaxCount=1, TagSpecifications=[{ 'ResourceType': 'volume', 'Tags': [{ 'Key': 'Timeout', 'Value': str(-1) }] }, { 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Timeout', 'Value': str(-1) }] }]) yield output instance_id = output[0].instance_id ec2_client.terminate_instances(InstanceIds=[instance_id])
def setUp(self): self.selenium.file_detector = LocalFileDetector() session = localstack_client.session.Session(region_name="us-west-1") self.s3 = session.resource("s3") self.bucket = self.s3.Bucket(settings.AWS_STORAGE_BUCKET_NAME) # Delete all current files for obj in self.bucket.objects.all(): obj.delete() super().setUp()
def test_user_policy(tags, responsecodes): session = boto3.Session(profile_name="testdev") ec2_client = session.client("ec2") ec2_resource = session.resource("ec2") try: ec2_resource.create_instances(ImageId="ami-07ebfd5b3428b6f4d", InstanceType="t2.micro", MinCount=1, MaxCount=1, DryRun=True, TagSpecifications=tags) except ClientError as e: assert e.response["Error"]["Code"] == responsecodes
def fetch_file_content_from_bucket(s3_connection=None, bucket_name=None, file_key=None): """Fetch the content of the file without downloading it""" if not s3_connection: s3_connection = session.resource('s3') try: return s3_connection.Object(bucket_name, file_key).get()['Body'].read() except Exception as e: logging.error( f'Error while reading {file_key} from {bucket_name} bucket', exc_info=True)
def download_file_from_bucket(s3_connection=None, bucket_name=None, file_key=None): """Download files from s3 bucket""" try: if not s3_connection: s3_connection = session.resource('s3') object = s3_connection.Object(bucket_name, file_key) object.download_file(f'src/downloads/{file_key}') except Exception as e: logging.error( f'Error while downloading {file_key} from {bucket_name} bucket', exc_info=True)
def init(): """Initialize the environment for development and test""" s3_client = session.client('s3') s3_resource = session.resource('s3') make_bucket(s3_client, CLIENT_S3_BUCKET) make_bucket(s3_client, JSON_S3_BUCKET) file_key = 'test-file.xml' file = open(f'src/uploads/{file_key}', "r") upload_file_to_bucket(s3_connection=s3_resource, bucket_name=CLIENT_S3_BUCKET, file_key=file_key, data=file.read())
def create_mockinstance_untagged(monkeypatch): """Creates a mock instance for the purpose of getting and modifying tags. BEWARE: As this spins up a localstack instance, is a check of function only, not permissions. """ session = localstack_client.session.Session() ec2_resource = session.resource('ec2') ec2_client = session.client('ec2') monkeypatch.setattr(ec2_rogue_killer, 'ec2_client', session.client('ec2')) output = ec2_resource.create_instances(ImageId='garbage', InstanceType='t2.micro', MinCount=1, MaxCount=1) yield output instance_id = output[0].instance_id ec2_client.terminate_instances(InstanceIds=[instance_id])
def create_mockinstance(monkeypatch): """Creates a mock instance for the purpose of getting and modifying tags. BEWARE: As this spins up a localstack instance, is a check of function only, not permissions. """ session = localstack_client.session.Session() ec2_resource = session.resource("ec2") ec2_client = session.client("ec2") monkeypatch.setattr(develop_blueprint, "ec2_resource", session.resource("ec2")) monkeypatch.setattr(develop_blueprint, "ec2_client", session.client("ec2")) output = ec2_resource.create_instances(ImageId="garbage", InstanceType="t2.micro", MinCount=1, MaxCount=1, TagSpecifications=[{ "ResourceType": "volume", "Tags": [{ "Key": "Timeout", "Value": str(timeout_init) }] }, { "ResourceType": "instance", "Tags": [{ "Key": "Timeout", "Value": str(timeout_init) }] }]) yield output instance_id = output[0].instance_id ec2_client.terminate_instances(InstanceIds=[instance_id])
def create_lambda(monkeypatch): """Sets up the module to use localstack, and creates a lambda function in localstack called test-lambda. Source code taken from ./test_mats/testmainlambda.zip. """ session = localstack_client.session.Session() lambda_client = session.client("lambda") lambda_resource = session.resource("lambda") lambda_client.create_function( FunctionName="test-lambda", Runtime='python3.6', Role='todo', Handler='submit_start.handler', Description="test Lambda Function for Serverless", MemorySize=128, )
def upload_file_to_bucket(s3_connection=None, bucket_name=None, file_key=None, data=None): """Upload file to an s3 bucket""" try: if not s3_connection: s3_connection = session.resource('s3') bucket = s3_connection.Bucket(bucket_name) bucket.put_object( Key=file_key, Body=data, ) s3_connection.ObjectAcl(bucket_name, file_key).put(ACL='public-read') s3_url = f'https://{bucket_name}.s3.amazonaws.com/{file_key}' if is_not_production: s3_url = f'http://localhost:4566/{bucket_name}/{file_key}' return s3_url except Exception as e: logging.error( f'Error while uploading {file_key} to {bucket_name} bucket', exc_info=True)
'graceperiod': '10', 'exemptlist': 'i-029339d0ff4fa4318,i-0c366d0e991bc6fde,i-04a5f4794bafda3b1,i-0a7b60fe2661444da', 'dryrun': '1', 'topicarn': 'arn:aws:sns:us-east-1:739988523141:EC2_Instance_Sweeper', 'localstack': '1', 'AWS_ACCCESS_KEY_ID': 'foo', 'AWS_SECRET_ACCESS_KEY': 'bar', 'LOCALSTACK_HOSTNAME': 'localhost' } for var in env_vars.items(): os.environ[var[0]] = var[1] import ncap_iac.permissions.management_lambdas.ec2_rogue_killer as ec2_rogue_killer ## this is eseentially the function ec2_rogue_killer session = localstack_client.session.Session() ec2_resource_local = session.resource('ec2') ec2_client_local = session.client('ec2') ssm_client_local = session.client('ssm') @pytest.fixture() def set_ssm_exempt(monkeypatch): monkeypatch.setattr(ec2_rogue_killer, 'ssm_client', ssm_client_local) ssm_client_local.put_parameter(Name='exempt_instances', Type='String', Overwrite=True, Value=(env_vars['exemptlist'])) yield 'values' ssm_client_local.delete_parameter(Name='exempt_instances')
import pytest from test_submit_start import setup_lambda_env, get_paths, user_name import os here = os.path.abspath(os.path.dirname(__file__)) test_log_mats = os.path.join(here, "test_mats", "logfolder") bucket_name = "cianalysispermastack" key_name = "test_user/submissions/submit.json" fakedatakey_name = "test_user/submissions/fakedatasubmit.json" fakeconfigkey_name = "test_user/submissions/fakeconfigsubmit.json" notimestampkey_name = "test_user/submissions/notimestampconfigsubmit.json" nodatakey_name = "test_user/submissions/notimestampconfigsubmit.json" noconfigkey_name = "test_user/submissions/notimestampconfigsubmit.json" ## set up mock client and resources. s3_client = session.client("s3") s3_resource = session.resource("s3") ## The lambda function logic we use is monitor_updater:q @pytest.fixture() def mock_resources(monkeypatch): ## mock s3 resources: monkeypatch.setattr( s3, "s3_client", session.client("s3") ) ## TODO I don't think these are scoped correctly w/o a context manager. monkeypatch.setattr(s3, "s3_resource", session.resource("s3")) ## mock ssm resources: monkeypatch.setattr(ssm, "ssm_client", session.client("ssm")) ## mock ec2 resources: monkeypatch.setattr(ec2, "ec2_resource", session.resource("ec2")) monkeypatch.setattr(ec2, "ec2_client", session.client("ec2"))
def setup_testing_bucket(monkeypatch): """Sets up a localstack bucket called cianalysispermastack with the following directory structure: / |-test_user |-inputs |-data.json |-configs |-config.json |-submissions |-submit.json |-logs |-active |-i-1234567890abcdef0.json |-i-superexpensive.json |-test_user |-joblog1 |-joblog2 ... """ subkeys = { "inputs/data1.json": { "data": "value" }, "inputs/data2.json": { "data": "value" }, "configs/config.json": { "param": "p1" }, "configs/fullconfig.json": { "param": "p1", "__duration__": 360, "__dataset_size__": 20, "ensemble_size": 5 }, "submissions/singlesubmit.json": { "dataname": os.path.join(user_name, "inputs", "data1.json"), "configname": os.path.join(user_name, "configs", "config.json"), "timestamp": "testtimestamp" }, "submissions/submit.json": { "dataname": [ os.path.join(user_name, "inputs", d) for d in ["data1.json", "data2.json"] ], "configname": os.path.join(user_name, "configs", "config.json"), "timestamp": "testtimestamp" }, "submissions/{}".format(os.path.basename(fakedatakey_name)): { "dataname": [ os.path.join(user_name, "inputs", d) for d in ["data21.json", "data22.json"] ], "configname": os.path.join(user_name, "configs", "config.json"), "timestamp": "testtimestamp" }, "submissions/{}".format(os.path.basename(fakeconfigkey_name)): { "dataname": [ os.path.join(user_name, "inputs", d) for d in ["data1.json", "data2.json"] ], "configname": os.path.join(user_name, "configs", "config22.json"), "timestamp": "testtimestamp" }, "submissions/{}".format(os.path.basename(nodatakey_name)): { "configname": os.path.join(user_name, "configs", "config22.json"), "timestamp": "testtimestamp" }, "submissions/{}".format(os.path.basename(noconfigkey_name)): { "dataname": [ os.path.join(user_name, "inputs", d) for d in ["data1.json", "data2.json"] ], "timestamp": "testtimestamp" }, "submissions/{}".format(os.path.basename(notimestampkey_name)): { "dataname": [ os.path.join(user_name, "inputs", d) for d in ["data1.json", "data2.json"] ], "configname": os.path.join(user_name, "configs", "config22.json") } } s3_client = session.client("s3") s3_resource = session.resource("s3") monkeypatch.setattr( s3, "s3_client", session.client("s3") ) ## TODO I don't think these are scoped correctly w/o a context manager. monkeypatch.setattr(s3, "s3_resource", session.resource("s3")) try: for sk in subkeys: obj = s3_client.get_object(Bucket=bucket_name, Key=os.path.join(user_name, sk)) s3_client.get_object(Bucket=bucket_name, Key="logs/test_user/i-0ff308d5c9b5786f3.json") except ClientError: ## Write data files s3_client.create_bucket(Bucket=bucket_name) for sk in subkeys: key = os.path.join(user_name, sk) writeobj = s3_resource.Object(bucket_name, key) content = bytes(json.dumps(subkeys[sk]).encode("UTF-8")) writeobj.put(Body=content) ## Write logs log_paths = get_paths(test_log_mats) try: for f in log_paths: s3_client.upload_file(os.path.join(test_log_mats, f), bucket_name, Key=f) except ClientError as e: logging.error(e) raise yield bucket_name, os.path.join(user_name, "submissions/submit.json")
def test_protected_resource_kwargs_not_passed(): """ Test protected kwargs not overwritten in boto3.resource creation """ session = localstack_client.session.Session() kwargs = {'region_name': 'another_region'} sqs = session.resource('sqs', **kwargs) assert not sqs.meta.client.meta.region_name == 'another_region'
def test_resource_kwargs_passed(): """ Test kwargs passed through to boto3.resource creation """ session = localstack_client.session.Session() kwargs = {'config': Config(signature_version='s3v4')} sqs = session.resource('sqs', **kwargs) assert sqs.meta.client.meta.config.signature_version == 's3v4'
import pytest import time import localstack_client.session import boto3 from botocore.exceptions import ClientError import ncap_iac.ncap_blueprints.dev_utils.develop_blueprint as develop_blueprint import os timeout_init = 5 real_test_instance = "i-0ce3833f4cce8fcdf" session = localstack_client.session.Session() ec2_resource_local = session.resource("ec2") ec2_client_local = session.client("ec2") @pytest.fixture() def use_devcred(monkeypatch): monkeypatch.setattr(develop_blueprint, "ec2_resource", boto3.Session(profile_name="testdev").resource("ec2")) monkeypatch.setattr(develop_blueprint, "ec2_client", boto3.Session(profile_name="testdev").client("ec2")) loc = os.path.dirname(os.path.abspath(__file__)) fixturetemplate = os.path.join(loc, "fixtures") @pytest.fixture() def create_realinstance(monkeypatch): """Creates a real instance for the purpose of getting and modifying tags