def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] subnet = conn.create_subnet(VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] conn = boto.connect_ec2() conn.create_security_group('group1', 'description') conn.create_security_group('group2', 'description') start = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 1)) end = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 2)) with assert_raises(JSONResponseError) as ex: request = conn.request_spot_instances( price=0.5, image_id='ami-abcd1234', count=1, type='one-time', valid_from=start, valid_until=end, launch_group="the-group", availability_zone_group='my-group', key_name="test", security_groups=['group1', 'group2'], user_data=b"some test data", instance_type='m1.small', placement='us-east-1c', kernel_id="test-kernel", ramdisk_id="test-ramdisk", monitoring_enabled=True, subnet_id=subnet_id, dry_run=True ) ex.exception.reason.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') request = conn.request_spot_instances( price=0.5, image_id='ami-abcd1234', count=1, type='one-time', valid_from=start, valid_until=end, launch_group="the-group", availability_zone_group='my-group', key_name="test", security_groups=['group1', 'group2'], user_data=b"some test data", instance_type='m1.small', placement='us-east-1c', kernel_id="test-kernel", ramdisk_id="test-ramdisk", monitoring_enabled=True, subnet_id=subnet_id, ) requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) request = requests[0] request.state.should.equal("open") request.price.should.equal(0.5) request.launch_specification.image_id.should.equal('ami-abcd1234') request.type.should.equal('one-time') request.valid_from.should.equal(start) request.valid_until.should.equal(end) request.launch_group.should.equal("the-group") request.availability_zone_group.should.equal('my-group') request.launch_specification.key_name.should.equal("test") security_group_names = [group.name for group in request.launch_specification.groups] set(security_group_names).should.equal(set(['group1', 'group2'])) request.launch_specification.instance_type.should.equal('m1.small') request.launch_specification.placement.should.equal('us-east-1c') request.launch_specification.kernel.should.equal("test-kernel") request.launch_specification.ramdisk.should.equal("test-ramdisk") request.launch_specification.subnet_id.should.equal(subnet_id)
def __init__(self, deployment_id, name, description=""): super(Deployment, self).__init__() self['id'] = deployment_id self['stageName'] = name self['description'] = description self['createdDate'] = iso_8601_datetime_with_milliseconds( datetime.datetime.now())
def __init__( self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme="internet-facing", ): self.name = name self.created_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.scheme = scheme self.security_groups = security_groups self.subnets = subnets or [] self.vpc_id = vpc_id self.listeners = OrderedDict() self.tags = {} self.arn = arn self.dns_name = dns_name self.stack = "ipv4" self.attrs = { "access_logs.s3.enabled": "false", "access_logs.s3.bucket": None, "access_logs.s3.prefix": None, "deletion_protection.enabled": "false", "idle_timeout.timeout_seconds": "60", }
def _set_phases(self, phases): current_date = iso_8601_datetime_with_milliseconds( datetime.datetime.utcnow()) # No phaseStatus for QUEUED on first start for existing_phase in phases: if existing_phase["phaseType"] == "QUEUED": existing_phase["phaseStatus"] = "SUCCEEDED" statuses = [ "PROVISIONING", "DOWNLOAD_SOURCE", "INSTALL", "PRE_BUILD", "BUILD", "POST_BUILD", "UPLOAD_ARTIFACTS", "FINALIZING", "COMPLETED", ] for status in statuses: phase = dict() phase["phaseType"] = status phase["phaseStatus"] = "SUCCEEDED" phase["startTime"] = current_date phase["endTime"] = current_date phase["durationInSeconds"] = randint(10, 100) phases.append(phase) return phases
def sendToSns(self, region, sns_topic_arns): message = """StackId='{stack_id}' Timestamp='{timestamp}' EventId='{event_id}' LogicalResourceId='{logical_resource_id}' Namespace='{account_id}' ResourceProperties='{resource_properties}' ResourceStatus='{resource_status}' ResourceStatusReason='{resource_status_reason}' ResourceType='{resource_type}' StackName='{stack_name}' ClientRequestToken='{client_request_token}'""".format( stack_id=self.stack_id, timestamp=iso_8601_datetime_with_milliseconds(self.timestamp), event_id=self.event_id, logical_resource_id=self.logical_resource_id, account_id=ACCOUNT_ID, resource_properties=self.resource_properties, resource_status=self.resource_status, resource_status_reason=self.resource_status_reason, resource_type=self.resource_type, stack_name=self.stack_name, client_request_token=self.client_request_token, ) for sns_topic_arn in sns_topic_arns: sns_backends[region].publish( message, subject="AWS CloudFormation Notification", arn=sns_topic_arn)
def to_dict(self): return { "id": self.id, "name": self.name, "description": self.description, "createdDate": iso_8601_datetime_with_milliseconds(self.create_date), }
def get_post_data(self, message, message_id, subject, message_attributes=None): post_data = { "Type": "Notification", "MessageId": message_id, "TopicArn": self.topic.arn, "Subject": subject or "my subject", "Message": message, "Timestamp": iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), "SignatureVersion": "1", "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=", "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55", } if message_attributes: post_data["MessageAttributes"] = message_attributes return post_data
def __init__(self, arn, name, definition, roleArn, tags=None): self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.arn = arn self.name = name self.definition = definition self.roleArn = roleArn self.tags = tags
def __init__( self, region, project_name, project_source, artifacts, environment, serviceRole="some_role", ): current_date = iso_8601_datetime_with_milliseconds( datetime.datetime.utcnow()) self.project_metadata = dict() self.project_metadata["name"] = project_name self.project_metadata[ "arn"] = "arn:aws:codebuild:{0}:{1}:project/{2}".format( region, get_account_id(), self.project_metadata["name"]) self.project_metadata[ "encryptionKey"] = "arn:aws:kms:{0}:{1}:alias/aws/s3".format( region, get_account_id()) self.project_metadata[ "serviceRole"] = "arn:aws:iam::{0}:role/service-role/{1}".format( get_account_id(), serviceRole) self.project_metadata["lastModifiedDate"] = current_date self.project_metadata["created"] = current_date self.project_metadata["badge"] = dict() self.project_metadata["badge"][ "badgeEnabled"] = False # this false needs to be a json false not a python false self.project_metadata["environment"] = environment self.project_metadata["artifacts"] = artifacts self.project_metadata["source"] = project_source self.project_metadata["cache"] = dict() self.project_metadata["cache"]["type"] = "NO_CACHE" self.project_metadata["timeoutInMinutes"] = "" self.project_metadata["queuedTimeoutInMinutes"] = ""
def to_dict(self): return { "Arn": self.arn, "CreationTimestamp": iso_8601_datetime_with_milliseconds(self.created_at_datetime), "Id": self.id, "LastUpdatedTimestamp": iso_8601_datetime_with_milliseconds(self.created_at_datetime), "LatestVersion": self.latest_version, "LatestVersionArn": self.latest_version_arn, "Name": self.name, }
def __init__(self, cluster, snapshot_identifier, tags=None): self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier self.snapshot_type = 'manual' self.status = 'available' self.tags = tags or [] self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now())
def __init__(self, cluster, snapshot_identifier, region_name, tags=None): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier self.snapshot_type = 'manual' self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now())
def __init__(self, distribution, paths, caller_ref): self.invalidation_id = Invalidation.random_id() self.create_time = iso_8601_datetime_with_milliseconds(datetime.now()) self.distribution = distribution self.status = "COMPLETED" self.paths = paths self.caller_ref = caller_ref
def to_dict(self): res = { "Arn": self.arn, "CreationTimestamp": iso_8601_datetime_with_milliseconds(self.created_at_datetime), "Id": self.id, "LastUpdatedTimestamp": iso_8601_datetime_with_milliseconds(self.update_at_datetime), "LatestVersion": self.latest_version, "LatestVersionArn": self.latest_version_arn, } if self.name is not None: res["Name"] = self.name return res
def __init__( self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None ): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier self.snapshot_type = "manual" self.status = "available" self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.iam_roles_arn = iam_roles_arn or []
def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, master_user_password, db_name, cluster_type, cluster_security_groups, vpc_security_group_ids, cluster_subnet_group_name, availability_zone, preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, encrypted, region_name, tags=None, iam_roles_arn=None, restored_from_snapshot=False): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()) self.status = 'available' self.node_type = node_type self.master_username = master_username self.master_user_password = master_user_password self.db_name = db_name if db_name else "dev" self.vpc_security_group_ids = vpc_security_group_ids self.cluster_subnet_group_name = cluster_subnet_group_name self.publicly_accessible = publicly_accessible self.encrypted = encrypted self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" self.port = int(port) if port else 5439 self.automated_snapshot_retention_period = int( automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30" if cluster_parameter_group_name: self.cluster_parameter_group_name = [cluster_parameter_group_name] else: self.cluster_parameter_group_name = ['default.redshift-1.0'] if cluster_security_groups: self.cluster_security_groups = cluster_security_groups else: self.cluster_security_groups = ["Default"] if availability_zone: self.availability_zone = availability_zone else: # This could probably be smarter, but there doesn't appear to be a # way to pull AZs for a region in boto self.availability_zone = region_name + "a" if cluster_type == 'single-node': self.number_of_nodes = 1 elif number_of_nodes: self.number_of_nodes = int(number_of_nodes) else: self.number_of_nodes = 1 self.iam_roles_arn = iam_roles_arn or [] self.restored_from_snapshot = restored_from_snapshot
def __init__(self, redshift_backend, cluster_identifier, node_type, master_username, master_user_password, db_name, cluster_type, cluster_security_groups, vpc_security_group_ids, cluster_subnet_group_name, availability_zone, preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, encrypted, region_name, tags=None, iam_roles_arn=None, restored_from_snapshot=False): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.status = 'available' self.node_type = node_type self.master_username = master_username self.master_user_password = master_user_password self.db_name = db_name if db_name else "dev" self.vpc_security_group_ids = vpc_security_group_ids self.cluster_subnet_group_name = cluster_subnet_group_name self.publicly_accessible = publicly_accessible self.encrypted = encrypted self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True self.cluster_version = cluster_version if cluster_version else "1.0" self.port = int(port) if port else 5439 self.automated_snapshot_retention_period = int( automated_snapshot_retention_period) if automated_snapshot_retention_period else 1 self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30" if cluster_parameter_group_name: self.cluster_parameter_group_name = [cluster_parameter_group_name] else: self.cluster_parameter_group_name = ['default.redshift-1.0'] if cluster_security_groups: self.cluster_security_groups = cluster_security_groups else: self.cluster_security_groups = ["Default"] if availability_zone: self.availability_zone = availability_zone else: # This could probably be smarter, but there doesn't appear to be a # way to pull AZs for a region in boto self.availability_zone = region_name + "a" if cluster_type == 'single-node': self.number_of_nodes = 1 elif number_of_nodes: self.number_of_nodes = int(number_of_nodes) else: self.number_of_nodes = 1 self.iam_roles_arn = iam_roles_arn or [] self.restored_from_snapshot = restored_from_snapshot
def __init__(self, arn, name, definition, roleArn, tags=None): self.creation_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.update_date = self.creation_date self.arn = arn self.name = name self.definition = definition self.roleArn = roleArn self.executions = [] self.tags = [] if tags: self.add_tags(tags)
def get_post_data(self, message, message_id, subject): return { "Type": "Notification", "MessageId": message_id, "TopicArn": self.topic.arn, "Subject": subject or "my subject", "Message": message, "Timestamp": iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), "SignatureVersion": "1", "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=", "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55" }
def __init__(self, name, arn, wacl_id, visibility_config, default_action): self.name = name if name else utils.create_test_name( "Mock-WebACL-name") self.created_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) self.id = wacl_id self.arn = arn self.description = "Mock WebACL named {0}".format(self.name) self.capacity = 3 self.visibility_config = VisibilityConfig( **pascal_to_underscores_dict(visibility_config)) self.default_action = DefaultAction( **pascal_to_underscores_dict(default_action))
def create(self, value): if not os.path.exists(self._path): os.makedirs(self._path) with open(self._info_path, 'w') as file: json.dump( { 'last_modified': iso_8601_datetime_with_milliseconds( datetime.datetime.utcnow()), 'etag': None }, file) self.value = value
def test_request_spot_instances(): conn = boto.connect_ec2() conn.create_security_group('group1', 'description') conn.create_security_group('group2', 'description') start = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 1)) end = iso_8601_datetime_with_milliseconds(datetime.datetime(2013, 1, 2)) request = conn.request_spot_instances( price=0.5, image_id='ami-abcd1234', count=1, type='one-time', valid_from=start, valid_until=end, launch_group="the-group", availability_zone_group='my-group', key_name="test", security_groups=['group1', 'group2'], user_data=b"some test data", instance_type='m1.small', placement='us-east-1c', kernel_id="test-kernel", ramdisk_id="test-ramdisk", monitoring_enabled=True, subnet_id="subnet123", ) requests = conn.get_all_spot_instance_requests() requests.should.have.length_of(1) request = requests[0] request.state.should.equal("open") request.price.should.equal(0.5) request.launch_specification.image_id.should.equal('ami-abcd1234') request.type.should.equal('one-time') request.valid_from.should.equal(start) request.valid_until.should.equal(end) request.launch_group.should.equal("the-group") request.availability_zone_group.should.equal('my-group') request.launch_specification.key_name.should.equal("test") security_group_names = [group.name for group in request.launch_specification.groups] set(security_group_names).should.equal(set(['group1', 'group2'])) request.launch_specification.instance_type.should.equal('m1.small') request.launch_specification.placement.should.equal('us-east-1c') request.launch_specification.kernel.should.equal("test-kernel") request.launch_specification.ramdisk.should.equal("test-ramdisk") request.launch_specification.subnet_id.should.equal("subnet123")
def disassociate_role_from_group(self): group_id = self.path.split("/")[-2] self.greengrass_backend.disassociate_role_from_group( group_id=group_id, ) return ( 200, { "status": 200 }, json.dumps({ "DisassociatedAt": iso_8601_datetime_with_milliseconds(datetime.utcnow()) }), )
def stop_build(self, build_id): for metadata in self.build_metadata_history.values(): for build in metadata: if build["id"] == build_id: # set completion properties with variable completion time build["phases"] = self._set_phases(build["phases"]) build["endTime"] = iso_8601_datetime_with_milliseconds( parser.parse(build["startTime"]) + datetime.timedelta(minutes=randint(1, 5))) build["currentPhase"] = "COMPLETED" build["buildStatus"] = "STOPPED" return build
def exit_standby(self): group_name = self._get_param("AutoScalingGroupName") instance_ids = self._get_multi_param("InstanceIds.member") ( standby_instances, original_size, desired_capacity, ) = self.autoscaling_backend.exit_standby_instances(group_name, instance_ids) template = self.response_template(EXIT_STANDBY_TEMPLATE) return template.render( standby_instances=standby_instances, original_size=original_size, desired_capacity=desired_capacity, timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), )
def get_credentials_for_identity(self, identity_id): duration = 90 now = datetime.datetime.utcnow() expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) response = json.dumps({ "Credentials": { "AccessKeyId": "TESTACCESSKEY12345", "Expiration": expiration_str, "SecretKey": "ABCSECRETKEY", "SessionToken": "ABC12345", }, "IdentityId": identity_id, }) return response
def batch_get_builds(self, ids): batch_build_metadata = [] for metadata in self.build_metadata_history.values(): for build in metadata: if build["id"] in ids: build["phases"] = self._set_phases(build["phases"]) build["endTime"] = iso_8601_datetime_with_milliseconds( parser.parse(build["startTime"]) + datetime.timedelta(minutes=randint(1, 5))) build["currentPhase"] = "COMPLETED" build["buildStatus"] = "SUCCEEDED" batch_build_metadata.append(build) return batch_build_metadata
def to_dict(self, include_detail=False): obj = { "Arn": self.arn, "CreationTimestamp": iso_8601_datetime_with_milliseconds(self.created_at_datetime), "Id": self.core_definition_id, "Version": self.version, } if include_detail: obj["Definition"] = self.definition return obj
def get_credentials_for_identity(self, identity_id): duration = 90 now = datetime.datetime.utcnow() expiration = now + datetime.timedelta(seconds=duration) expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) response = json.dumps( { "Credentials": { "AccessKeyId": "TESTACCESSKEY12345", "Expiration": expiration_str, "SecretKey": "ABCSECRETKEY", "SessionToken": "ABC12345" }, "IdentityId": identity_id }) return response
def __init__(self, region, repository_description, repository_name): current_date = iso_8601_datetime_with_milliseconds(datetime.utcnow()) self.repository_metadata = dict() self.repository_metadata["repositoryName"] = repository_name self.repository_metadata[ "cloneUrlSsh"] = "ssh://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( region, repository_name) self.repository_metadata[ "cloneUrlHttp"] = "https://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( region, repository_name) self.repository_metadata["creationDate"] = current_date self.repository_metadata["lastModifiedDate"] = current_date self.repository_metadata[ "repositoryDescription"] = repository_description self.repository_metadata["repositoryId"] = str(uuid.uuid4()) self.repository_metadata[ "Arn"] = "arn:aws:codecommit:{0}:{1}:{2}".format( region, get_account_id(), repository_name) self.repository_metadata["accountId"] = get_account_id()
def terminate_instance_in_auto_scaling_group(self): instance_id = self._get_param("InstanceId") should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") if should_decrement_string == "true": should_decrement = True else: should_decrement = False ( instance, original_size, desired_capacity, ) = self.autoscaling_backend.terminate_instance(instance_id, should_decrement) template = self.response_template(TERMINATE_INSTANCES_TEMPLATE) return template.render( instance=instance, should_decrement=should_decrement, original_size=original_size, desired_capacity=desired_capacity, timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), )
def __init__( self, region_name, account_id, state_machine_name, execution_name, state_machine_arn, execution_input, ): execution_arn = "arn:aws:states:{}:{}:execution:{}:{}" execution_arn = execution_arn.format( region_name, account_id, state_machine_name, execution_name ) self.execution_arn = execution_arn self.name = execution_name self.start_date = iso_8601_datetime_with_milliseconds(datetime.now()) self.state_machine_arn = state_machine_arn self.execution_input = execution_input self.status = "RUNNING" self.stop_date = None
def enter_standby(self): group_name = self._get_param("AutoScalingGroupName") instance_ids = self._get_multi_param("InstanceIds.member") should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity") if should_decrement_string == "true": should_decrement = True else: should_decrement = False ( standby_instances, original_size, desired_capacity, ) = self.autoscaling_backend.enter_standby_instances( group_name, instance_ids, should_decrement ) template = self.response_template(ENTER_STANDBY_TEMPLATE) return template.render( standby_instances=standby_instances, should_decrement=should_decrement, original_size=original_size, desired_capacity=desired_capacity, timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), )
def __init__(self, stats, dt): self.timestamp = iso_8601_datetime_with_milliseconds(dt) self.values = [] self.stats = stats
def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] conn.create_security_group(GroupName='group1', Description='description') conn.create_security_group(GroupName='group2', Description='description') start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) start = iso_8601_datetime_with_milliseconds(start_dt) end = iso_8601_datetime_with_milliseconds(end_dt) with assert_raises(ClientError) as ex: request = conn.request_spot_instances( SpotPrice="0.5", InstanceCount=1, Type='one-time', ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", AvailabilityZoneGroup='my-group', LaunchSpecification={ "ImageId": 'ami-abcd1234', "KeyName": "test", "SecurityGroups": ['group1', 'group2'], "UserData": "some test data", "InstanceType": 'm1.small', "Placement": { "AvailabilityZone": 'us-east-1c', }, "KernelId": "test-kernel", "RamdiskId": "test-ramdisk", "Monitoring": { "Enabled": True, }, "SubnetId": subnet_id, }, DryRun=True, ) ex.exception.response['Error']['Code'].should.equal('DryRunOperation') ex.exception.response['ResponseMetadata'][ 'HTTPStatusCode'].should.equal(400) ex.exception.response['Error']['Message'].should.equal( 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') request = conn.request_spot_instances( SpotPrice="0.5", InstanceCount=1, Type='one-time', ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", AvailabilityZoneGroup='my-group', LaunchSpecification={ "ImageId": 'ami-abcd1234', "KeyName": "test", "SecurityGroups": ['group1', 'group2'], "UserData": "some test data", "InstanceType": 'm1.small', "Placement": { "AvailabilityZone": 'us-east-1c', }, "KernelId": "test-kernel", "RamdiskId": "test-ramdisk", "Monitoring": { "Enabled": True, }, "SubnetId": subnet_id, }, ) requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] requests.should.have.length_of(1) request = requests[0] request['State'].should.equal("open") request['SpotPrice'].should.equal("0.5") request['Type'].should.equal('one-time') request['ValidFrom'].should.equal(start_dt) request['ValidUntil'].should.equal(end_dt) request['LaunchGroup'].should.equal("the-group") request['AvailabilityZoneGroup'].should.equal('my-group') launch_spec = request['LaunchSpecification'] security_group_names = [group['GroupName'] for group in launch_spec['SecurityGroups']] set(security_group_names).should.equal(set(['group1', 'group2'])) launch_spec['ImageId'].should.equal('ami-abcd1234') launch_spec['KeyName'].should.equal("test") launch_spec['InstanceType'].should.equal('m1.small') launch_spec['KernelId'].should.equal("test-kernel") launch_spec['RamdiskId'].should.equal("test-ramdisk") launch_spec['SubnetId'].should.equal(subnet_id)
def last_modified_ISO8601(self): return iso_8601_datetime_with_milliseconds(self.last_modified)
def expiration_ISO8601(self): return iso_8601_datetime_with_milliseconds(self.expiration)