def store(): with mock_dynamodb2(): dynamodb = boto3.resource('dynamodb', region_name='us-west-2') store = DynamoDBStateStore(filename, 'us-west-2') store.table = dynamodb.create_table( TableName=filename.replace('/', '-'), KeySchema=[ { 'AttributeName': 'key', 'KeyType': 'HASH' # Partition key }, { 'AttributeName': 'index', 'KeyType': 'RANGE' # Sort key } ], AttributeDefinitions=[ { 'AttributeName': 'key', 'AttributeType': 'S' }, { 'AttributeName': 'index', 'AttributeType': 'N' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10 } ) store.client = boto3.client('dynamodb', region_name='us-west-2') # Has to be yield here for moto to work yield store
def setUp(self): self.table_name = 'test.index.boss' self.region_name = 'us-east-1' table_params = self.get_tile_schema() self.mock_dynamo = moto.mock_dynamodb2() self.mock_dynamo.start() self.dynamo = boto3.client('dynamodb', region_name=self.region_name) self.dynamo.create_table(TableName=self.table_name, **table_params)
def dynamodb(request): mock = mock_dynamodb2() mock.start() def tear_down(): mock.stop() request.addfinalizer(tear_down) return boto3.resource('dynamodb', region_name='us-west-2', aws_secret_access_key='123', aws_access_key_id='abc')
def setUp(self): self.timeout = 0.5 twisted.internet.base.DelayedCall.debug = True self.mock_dynamodb2 = mock_dynamodb2() self.mock_dynamodb2.start() self.settings = StatusHandler.ap_settings = AutopushSettings( hostname="localhost", statsd_host=None, ) self.request_mock = Mock() self.status = StatusHandler(Application(), self.request_mock) self.write_mock = self.status.write = Mock()
def setup(self): self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] for m in self.mocks: m.start() # create a table with a stream conn = boto3.client('dynamodb', region_name='us-east-1') resp = conn.create_table( TableName='test-streams', KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, StreamSpecification={ 'StreamEnabled': True, 'StreamViewType': 'NEW_AND_OLD_IMAGES' } ) self.stream_arn = resp['TableDescription']['LatestStreamArn']
def setUp(self): self.timeout = 0.5 twisted.internet.base.DelayedCall.debug = True self.log_mock = patch("autopush.health.log").start() self.mock_dynamodb2 = mock_dynamodb2() self.mock_dynamodb2.start() HealthHandler.ap_settings = self.settings = AutopushSettings( hostname="localhost", statsd_host=None, ) self.router_table = self.settings.router.table self.storage_table = self.settings.storage.table self.request_mock = Mock() self.health = HealthHandler(Application(), self.request_mock) self.status_mock = self.health.set_status = Mock() self.write_mock = self.health.write = Mock() d = self.finish_deferred = Deferred() self.health.finish = lambda: d.callback(True)
def test_lambda_handler_stopping_with_cname_and_zone_tags( self, instance_info, hosted_zones, change_resource_recordset, dhcp_configurations, get_subnet_cidr, hostnames_enabled, dnssupport_enabled, get_hostedzone_properties, dhcp_option_set_id_for_vpc): dhcp_option_set_id_for_vpc.return_value = 'dopt-52a0ea29' mock = moto.mock_dynamodb2() mock.start() client = boto3.client("dynamodb") client.create_table(TableName="DDNS", AttributeDefinitions=[{ "AttributeName": "InstanceId", "AttributeType": "S" }], KeySchema=[{ "AttributeName": "InstanceId", "KeyType": "HASH" }], ProvisionedThroughput={ "ReadCapacityUnits": 1, "WriteCapacityUnits": 1 }) get_hostedzone_properties.return_value = { 'HostedZone': { 'Id': '/hostedzone/Z2705FFK9RBG8N', 'Name': 'ddnslambda.com.', 'CallerReference': 'RISWorkflow-RD:7c9d0012-6791-4dca-b438-0b820efca179', 'Config': { 'Comment': 'test', 'PrivateZone': False }, 'ResourceRecordSetCount': 14 }, 'DelegationSet': { 'NameServers': [ 'ns-1667.awsdns-16.co.uk', 'ns-1140.awsdns-14.org', 'ns-44.awsdns-05.com', 'ns-530.awsdns-02.net' ] }, 'VPCs': [ { 'VPCRegion': 'us-east-1', 'VPCId': 'vpc-43248d39' }, ], 'ResponseMetadata': { 'HTTPStatusCode': 200, 'RequestId': 'omitted' } } dnssupport_enabled.return_value = True hostnames_enabled.return_value = True get_subnet_cidr.return_value = '172.31.80.0/20' dhcp_configurations.return_value = [ 'ddnslambda.com.', 'AmazonProvidedDNS.' ] change_resource_recordset.return_value = { 'ChangeInfo': { 'Id': 'string', 'Status': 'INSYNC', 'SubmittedAt': datetime(2015, 1, 1), 'Comment': 'string' } } instance_info.return_value = { 'ResponseMetadata': { 'HTTPStatusCode': 200, 'RequestId': 'omitted' }, 'Reservations': [{ 'Instances': [ { 'AmiLaunchIndex': 123, 'ImageId': 'string', 'InstanceId': 'i-00deb668716374ec7', 'InstanceType': 't1.micro', 'KernelId': '', 'KeyName': 'string', 'LaunchTime': datetime(2015, 1, 1), 'Monitoring': { 'State': 'enabled' }, 'PrivateDnsName': 'ip-172-31-90-228.ec2.internal', 'PrivateIpAddress': '172.31.90.228', 'StateTransitionReason': 'string', 'SubnetId': 'subnet-24fe650a', 'VpcId': 'vpc-43248d39', 'Architecture': 'x86_64', 'Tags': [{ 'Key': 'CNAME', 'Value': 'internal.ddnslambda.com.' }] }, ], 'OwnerId': 'string', 'RequesterId': 'string', 'ReservationId': 'string' }] } hosted_zones.return_value = { 'ResponseMetadata': { 'HTTPStatusCode': 200, 'RequestId': 'omitted' }, 'HostedZones': [{ 'Id': '/hostedzone/Z2705FFK9RBG8N', 'Name': 'ddnslambda.com.', 'CallerReference': 'RISWorkflow-RD:7c9d0012-6791-4dca-b438-0b820efca179', 'Config': { 'Comment': 'string', 'PrivateZone': True }, 'ResourceRecordSetCount': 123, 'LinkedService': { 'ServicePrincipal': 'string', 'Description': 'string' } }, { 'Id': '/hostedzone/Z2705FFK9RBG8O', 'Name': '90.31.172.in-addr.arpa.', 'CallerReference': 'RISWorkflow-RD:7c9d0012-6791-4dca-b438-0b820efca179', 'Config': { 'Comment': 'string', 'PrivateZone': True }, 'ResourceRecordSetCount': 123, 'LinkedService': { 'ServicePrincipal': 'string', 'Description': 'string' } }], 'Marker': 'string', 'IsTruncated': True, 'NextMarker': 'string', 'MaxItems': 'string' } event = { 'region': 'us-east-1', 'account': '123456789012', 'detail': { 'state': 'stopping', 'instance-id': 'i-00deb668716374ec7' } } results = put_item_in_dynamodb_table( client, 'DDNS', 'i-00deb668716374ec7', { 'Reservations': [{ 'Instances': [{ 'AmiLaunchIndex': 123, 'ImageId': 'string', 'InstanceId': 'i-00deb668716374ec7', 'InstanceType': 't1.micro', 'KeyName': 'string', 'LaunchTime': '2015-01-01T00:00:00', 'Monitoring': { 'State': 'enabled' }, 'PrivateDnsName': 'ip-172-31-90-228.ddnslambda.com', 'PrivateIpAddress': '172.31.90.228', 'StateTransitionReason': 'string', 'SubnetId': 'subnet-24fe650a', 'VpcId': 'vpc-43248d39', 'Architecture': 'x86_64', 'Tags': [{ 'Key': 'CNAME', 'Value': 'machine1.ddnslambda.com.' }] }], 'OwnerId': 'string', 'RequesterId': 'string', 'ReservationId': 'string' }] }) print('results: ' + str(results)) response = lambda_handler(event, 'context', dynamodb_client=client) assert response[0] == 'Successfully removed recordsets' assert response[ 1] == 'Deleted CNAME record in zone id: Z2705FFK9RBG8N for hosted zone machine1.ddnslambda.com. with value: ip-172-31-90-228.ddnslambda.com' assert response[ 2] == 'Deleted A record in zone id: Z2705FFK9RBG8N for hosted zone machine1.ddnslambda.com. with value: 172.31.90.228' assert response[ 3] == 'Deleted PTR record in zone id: Z2705FFK9RBG8O for hosted zone 228.90.31.172.in-addr.arpa with value: machine1.ddnslambda.com.' mock.stop()
from cyclone.web import Application from mock import Mock from moto import mock_dynamodb2 from nose.tools import eq_, ok_ from twisted.internet.defer import Deferred from twisted.logger import Logger from twisted.trial import unittest from autopush.db import Router, create_rotating_message_table from autopush.router.interface import IRouter, RouterResponse from autopush.settings import AutopushSettings dummy_uaid = str(uuid.UUID("abad1dea00000000aabbccdd00000000")) dummy_chid = str(uuid.UUID("deadbeef00000000decafbad00000000")) dummy_token = dummy_uaid + ":" + dummy_chid mock_dynamodb2 = mock_dynamodb2() def setUp(): mock_dynamodb2.start() create_rotating_message_table() def tearDown(): mock_dynamodb2.stop() class TestWebpushHandler(unittest.TestCase): def setUp(self): from autopush.web.webpush import WebPushHandler
def dynamodb(aws_credentials): with mock_dynamodb2(): yield boto3.client("dynamodb", region_name="us-east-1")
def dynamodb(aws_credentials): with moto.mock_dynamodb2(): yield boto3.resource("dynamodb", region_name="eu-north-1")
def test_update_page(): # setup S3 s3 = boto3.resource("s3") bucket = s3.Bucket(os.environ["S3_BUCKET_NAME"]) bucket.create( CreateBucketConfiguration={ "LocationConstraint": os.environ["AWS_DEFAULT_REGION"] } ) with open( os.path.join( os.path.dirname(__file__), "../../../static/horoscope_template.html" ) ) as f: html_template = f.read() bucket.Object(os.environ["S3_TEMPLATE_KEY"]).put(Body=html_template) with open( os.path.join(os.path.dirname(__file__), "../../../static/horoscope.html") ) as f: html = f.read() bucket.Object(os.environ["S3_OUTPUT_KEY"]).put(Body=html) # setup DynamoDB with mock_dynamodb2(): dynamodb = boto3.resource("dynamodb") table = dynamodb.create_table( TableName=os.environ["DYNAMODB_TABLE"], AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], ) record = { "id": "latest", "job_id": "dummy_job_id", "backend_name": "dummy_backend_name", "creation_date": "2019-07-01T00:01:02.123456Z", "num_of_0000": 10, "num_of_0001": 9, "num_of_0010": 8, "num_of_0011": 7, "num_of_0100": 6, "num_of_0101": 5, "num_of_0110": 4, "num_of_0111": 3, "num_of_1000": 2, "num_of_1001": 1, "num_of_1010": 16, "num_of_1011": 15, "num_of_1100": 14, "num_of_1101": 13, "num_of_1110": 12, "num_of_1111": 11, } table.put_item(Item=record) # execution actual = page_updater.update_page(None, None) # validate return value expected = { "backend_name": "dummy_backend_name", "creation_date": "2019-07-01 00:01", "rank1": "<td>Aquarius</td><td>Jan 20 - Feb 18</td>", "rank2": "<td>Pisces</td><td>Feb 19 - Mar 20</td>", "rank3": "<td>Aries</td><td>Mar 21 - Apr 19</td>", "rank4": "<td>Taurus</td><td>Apr 20 - May 20</td>", "rank5": "<td>Gemini</td><td>May 21 - Jun 20</td>", "rank6": "<td>Cancer</td><td>Jun 21 - Jul 22</td>", "rank7": "<td>Leo</td><td>Jul 23 - Aug 22</td>", "rank8": "<td>Virgo</td><td>Aug 23 - Sep 22</td>", "rank9": "<td>Libra</td><td>Sep 23 - Oct 22</td>", "rank10": "<td>Scorpio</td><td>Oct 23 - Nov 21</td>", "rank11": "<td>Sagittarius</td><td>Nov 22 - Dec 21</td>", "rank12": "<td>Capricorn</td><td>Dec 22 -Jan 19</td>", } assert actual == expected # validate S3 actual_html = ( bucket.Object(os.environ["S3_OUTPUT_KEY"]).get()["Body"].read().decode("utf-8") ) with open(os.path.join(os.path.dirname(__file__), "horoscope_expected.html")) as f: expected_html = f.read() assert actual_html == expected_html
def setUp(self): self.motos = [mock_dynamodb2(), mock_sqs()] [m.start() for m in self.motos] Page.create_table(wait=True) Site.create_table(wait=True) boto3.client('sqs').create_queue(QueueName=mock_sqs_name)
def mock_db(request): """We never want to use the real dynamodb.""" mock = mock_dynamodb2() mock.start() request.addfinalizer(mock.stop)
def setUp(self): mock_dynamodb2().start() mock_s3().start() self.senderIDs = None
def mock_dynamo_resource(mock_settings_env_vars): with mock_dynamodb2(): yield boto3.resource("dynamodb", region_name="us-west-2")
def setup(self): self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] for m in self.mocks: m.start()
def setup_test_environment(): with mock_dynamodb2(): set_up_dynamodb() put_data_dynamodb() yield
def dynamodb(): with mock_dynamodb2(): yield dynamodb_backend2
def autoscaler_patches(context): behave.use_fixture(boto_patches, context) rg1 = mock.Mock(spec=SpotFleetResourceGroup, target_capacity=10, fulfilled_capacity=10, is_stale=False) rg2 = mock.Mock(spec=SpotFleetResourceGroup, target_capacity=10, fulfilled_capacity=10, is_stale=False) resource_totals = {'cpus': 80, 'mem': 1000, 'disk': 1000, 'gpus': 0} with staticconf.testing.PatchConfiguration( {'autoscaling': { 'default_signal_role': 'bar' }}, ), mock.patch( 'clusterman.autoscaler.autoscaler.get_monitoring_client', ), mock.patch( 'clusterman.aws.util.SpotFleetResourceGroup.load', return_value={ rg1.id: rg1, rg2.id: rg2 }, ), mock.patch( 'clusterman.autoscaler.pool_manager.PoolManager', wraps=PoolManager, ), mock.patch( 'clusterman.autoscaler.autoscaler.PoolManager.prune_excess_fulfilled_capacity', ), mock.patch( 'clusterman.autoscaler.pool_manager.ClusterConnector.load', ) as mock_cluster_connector, mock.patch( 'clusterman.autoscaler.autoscaler.PoolManager._calculate_non_orphan_fulfilled_capacity', return_value=20, ), mock.patch( 'clusterman.autoscaler.signals.Signal._connect_to_signal_process', ), mock.patch('clusterman.autoscaler.autoscaler.Signal._get_metrics', ) as mock_metrics, mock_dynamodb2(): dynamodb.create_table( TableName=CLUSTERMAN_STATE_TABLE, KeySchema=[ { 'AttributeName': 'state', 'KeyType': 'HASH' }, { 'AttributeName': 'entity', 'KeyType': 'SORT' }, ], AttributeDefinitions=[ { 'AttributeName': 'state', 'AttributeType': 'S' }, { 'AttributeName': 'entity', 'AttributeType': 'S' }, ], ) mock_metrics.return_value = { } # don't know why this is necessary but we get flaky tests if it's not set mock_cluster_connector.return_value.get_resource_total.side_effect = resource_totals.__getitem__ yield
def dynamodb(region_name): with moto.mock_dynamodb2(): yield boto3.client('dynamodb', region_name=region_name)
def dynamodb(): with mock_dynamodb2(): mock_client = boto3.client("dynamodb") yield mock_client
def mock_metastore(): with mock_dynamodb2(): metastore, table_name = build_metastore() yield metastore delete_metastore(table_name)
import boto3 import responses from dotenv import load_dotenv from flask import Flask from flask import request from moto import mock_dynamodb2 from pyngrok import ngrok from twilio.rest import Client from utils.jsonutils import decimal_default load_dotenv(dotenv_path=".env.dev") # Mock DynamoDB calls with an in-memory datastore mock = mock_dynamodb2() mock.start() dynamodb = boto3.resource("dynamodb", os.environ.get("DYNAMODB_REGION")) dynamodb.create_table( TableName=os.environ.get("DYNAMODB_AQI_TABLE"), KeySchema=[ { "AttributeName": "PartitionKey", "KeyType": "HASH" }, ], AttributeDefinitions=[ { "AttributeName": "PartitionKey",
def setUpClass(cls): cls.mock_aws_credentials() cls.mock_dynamodb2 = mock_dynamodb2() cls.mock_dynamodb2.start() cls.create_mocked_table()
def ddb(aws_credentials): with mock_dynamodb2(): yield boto3.client('dynamodb', region_name='us-east-1')
def dynamodb(): with mock_dynamodb2(): yield boto3.client("dynamodb", region_name="ap-southeast-1")
def do_test_setup(): with mock_dynamodb2(): set_up_dynamodb() yield
def tearDown(self): mock_dynamodb2().stop() mock_s3().stop() if self.senderIDs: self.senderIDs.stop()
class TestJobCheck(unittest.TestCase): mock_sqs = mock_sqs() mock_dynamodb2 = mock_dynamodb2() mock_cloudwatch = mock_cloudwatch() def setUp(self): self.queue_name = "test-queue" self.mock_sqs.start() self.mock_dynamodb2.start() self.mock_cloudwatch.start() sqs = boto3.resource("sqs", "us-west-2") sqs.create_queue(QueueName=self.queue_name) with open("test/google/bigquery-discovery.json") as f: self.bigquery_discovery = f.read() with open("test/google/bq_job_done.json") as f: self.bigquery_done = f.read() with open("test/google/bq_job_error.json") as f: self.bigquery_error = f.read() with open("test/google/bq_job_running.json") as f: self.bigquery_running = f.read() with open("test/google/storage_discovery.json") as f: self.storage_discovery = f.read() with open("test/job_check.json") as f: self.data = json.loads(f.read()) dynamodb = boto3.resource("dynamodb", "us-west-2") dynamodb.create_table( TableName=self.queue_name, AttributeDefinitions=[{ 'AttributeName': 'job_id', 'AttributeType': 'S' }], KeySchema=[{ 'AttributeName': 'job_id', 'KeyType': 'HASH' }], ProvisionedThroughput={ 'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123 }, ) def tearDown(self): self.mock_dynamodb2.stop() self.mock_sqs.stop() self.mock_cloudwatch.stop() def test_process_message_done(self): """Tests whether process_message returns True for a completed job""" http = HttpMockSequence([ ({ 'status': '200' }, self.bigquery_discovery), ({ 'status': '200' }, self.bigquery_done), ]) gs = build("bigquery", "v2", http=http) ret = job_check.process_message(self.data, gs=gs) self.assertEqual(ret, True) def test_process_message_error(self): """Tests whether process_message returns False for an errored job, and checks dynamodb for errors""" http = HttpMockSequence([ ({ 'status': '200' }, self.bigquery_discovery), ({ 'status': '200' }, self.bigquery_error), ]) gs = build("bigquery", "v2", http=http) ret = job_check.process_message(self.data, self.queue_name, gs=gs) self.assertEqual(ret, False) r = get_item({"job_id": self.data["job_id"]}, self.queue_name) print(r) self.assertIsNotNone(r) def test_process_message_running(self): """Tests whether process_message returns None for a pending/running job""" http = HttpMockSequence([ ({ 'status': '200' }, self.bigquery_discovery), ({ 'status': '200' }, self.bigquery_running), ]) gs = build("bigquery", "v2", http=http) ret = job_check.process_message(self.data, self.queue_name, gs=gs) self.assertIsNone(ret) def test_handler(self): sqs = boto3.resource("sqs", "us-west-2") job_queue = sqs.get_queue_by_name(QueueName=self.queue_name) job_queue.send_message(MessageBody=json.dumps(self.data)) bq_http = HttpMockSequence([ ({ 'status': '200' }, self.bigquery_discovery), ({ 'status': '200' }, self.bigquery_done), ]) cs_http = HttpMockSequence([ ({ 'status': '200' }, self.storage_discovery), ({ 'status': '200' }, ''), ]) def service(name, scope=None, v=None): print("In service!", name, scope, v) if name == "bigquery": return build("bigquery", "v2", http=bq_http) elif name == "storage": return build("storage", "v1", http=cs_http) with patch.dict("os.environ", {"jobs": self.queue_name}): with patch("clumpy.google.service", service): job_check.handler({}, FakeContext(10))
from twisted.trial import unittest as trialtest from autopush.main import ( connection_main, endpoint_main, make_settings, skip_request_logging, ) from autopush.senderids import SenderIDs from autopush.utils import ( resolve_ip, ) from autopush.settings import AutopushSettings mock_dynamodb2 = mock_dynamodb2() def setUp(): mock_dynamodb2.start() mock_s3().start() def tearDown(): mock_dynamodb2.stop() mock_s3().stop() class SettingsTestCase(unittest.TestCase): def test_resolve_host(self): ip = resolve_ip("example.com")
def dynamodb_mock(): import moto with moto.mock_dynamodb2(): dynamodb = boto3.resource('dynamodb', region_name='us-east-1') create_users_table(dynamodb) yield dynamodb