Exemplo n.º 1
0
def test_create_task_definition_through_cloudformation():
    template = {
        "AWSTemplateFormatVersion": "2010-09-09",
        "Description": "ECS Cluster Test CloudFormation",
        "Resources": {
            "testTaskDefinition": {
                "Type": "AWS::ECS::TaskDefinition",
                "Properties": {
                    "ContainerDefinitions": [
                        {
                            "Name": "ecs-sample",
                            "Image": "amazon/amazon-ecs-sample",
                            "Cpu": "200",
                            "Memory": "500",
                            "Essential": "true"
                        }
                    ],
                    "Volumes": [],
                }
            }
        }
    }
    template_json = json.dumps(template)
    cfn_conn = boto3.client('cloudformation', region_name='us-west-1')
    cfn_conn.create_stack(
        StackName="test_stack",
        TemplateBody=template_json,
    )

    ecs_conn = boto3.client('ecs', region_name='us-west-1')
    resp = ecs_conn.list_task_definitions()
    len(resp['taskDefinitionArns']).should.equal(1)
def test_get_resources_s3():
    # Tests pagination
    s3_client = boto3.client('s3', region_name='eu-central-1')

    # Will end up having key1,key2,key3,key4
    response_keys = set()

    # Create 4 buckets
    for i in range(1, 5):
        i_str = str(i)
        s3_client.create_bucket(Bucket='test_bucket' + i_str)
        s3_client.put_bucket_tagging(
            Bucket='test_bucket' + i_str,
            Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]}
        )
        response_keys.add('key' + i_str)

    rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1')
    resp = rtapi.get_resources(ResourcesPerPage=2)
    for resource in resp['ResourceTagMappingList']:
        response_keys.remove(resource['Tags'][0]['Key'])

    response_keys.should.have.length_of(2)

    resp = rtapi.get_resources(
        ResourcesPerPage=2,
        PaginationToken=resp['PaginationToken']
    )
    for resource in resp['ResourceTagMappingList']:
        response_keys.remove(resource['Tags'][0]['Key'])

    response_keys.should.have.length_of(0)
Exemplo n.º 3
0
 def __init__(self):
     self.extractor = Extractor()
     self.sqs = boto3.client('sqs')
     self.queue_url = 'https://sqs.ap-southeast-1.amazonaws.com/841662669278/crawler'
     self.s3 = boto3.client('s3')
     self.dynamodb = boto3.resource('dynamodb')
     self.bloom_filter = MyBloomFilter(self.dynamodb.Table('link'))
Exemplo n.º 4
0
def main():
    s3 = boto3.resource('s3')
    client = boto3.client('s3')
    client_map = {}
    for bucket in s3.buckets.all():
        # Hack to work around SigV4 breaking in eu-central, need to create client with region so boto3 signs request correctly
        region = client.get_bucket_location(Bucket=bucket.name)['LocationConstraint']
        if region == None:
            #US Standard has no location constraint set
            region = 'us-east-1'
        if not client_map.has_key(region):
            #Cache regional clients
            client_map[region] = boto3.client('s3', region_name=region)
        client = client_map[region]

        parts = client.list_multipart_uploads(Bucket=bucket.name, MaxUploads=10)
        if parts.has_key('Uploads'):
            print('[%s] has incomplete multipart uploads and will be affected by this change.' % (bucket.name))
        else:
            # Comment out this block if you want to apply to all buckets regardless of whether or not they contain failed multipart uploads
            print('[%s] does not have incomplete multipart uploads, skipping.' % (bucket.name))
            continue
        update_lifecycle(client, bucket.name)
        # Avoid throttling, take a break
        time.sleep(2)
    print('Done!')
def test_get_resources_ec2():
    client = boto3.client('ec2', region_name='eu-central-1')

    instances = client.run_instances(
        ImageId='ami-123',
        MinCount=1,
        MaxCount=1,
        InstanceType='t2.micro',
        TagSpecifications=[
            {
                'ResourceType': 'instance',
                'Tags': [
                    {
                        'Key': 'MY_TAG1',
                        'Value': 'MY_VALUE1',
                    },
                    {
                        'Key': 'MY_TAG2',
                        'Value': 'MY_VALUE2',
                    },
                ],
            },
            {
                'ResourceType': 'instance',
                'Tags': [
                    {
                        'Key': 'MY_TAG3',
                        'Value': 'MY_VALUE3',
                    },
                ]
            },
        ],
    )
    instance_id = instances['Instances'][0]['InstanceId']
    image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId']

    client.create_tags(
        Resources=[image_id],
        Tags=[{'Key': 'ami', 'Value': 'test'}]
    )

    rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1')
    resp = rtapi.get_resources()
    # Check we have 1 entry for Instance, 1 Entry for AMI
    resp['ResourceTagMappingList'].should.have.length_of(2)

    # 1 Entry for AMI
    resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image'])
    resp['ResourceTagMappingList'].should.have.length_of(1)
    resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/')

    # As were iterating the same data, this rules out that the test above was a fluke
    resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance'])
    resp['ResourceTagMappingList'].should.have.length_of(1)
    resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/')

    # Basic test of tag filters
    resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}])
    resp['ResourceTagMappingList'].should.have.length_of(1)
    resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/')
Exemplo n.º 6
0
def get_alive_master_ip():
    zk_conn_str = get_os_env('ZOOKEEPER_CONN_STR')
    master_stack_name = get_os_env('MASTER_STACK_NAME')
    master_ip = ""
    global region
    if zk_conn_str != "":
        from kazoo.client import KazooClient
        zk = KazooClient(hosts=zk_conn_str)
        zk.start()
        try:
            master_ip = zk.get("/spark/leader_election/current_master")[0].decode('utf-8')
            zk.stop()
        except:
            master_ip = ""
            zk.stop()
        return master_ip
    elif master_stack_name != "" and region is not None:
        try:
            elb = boto3.client('elb', region_name=region)
            ec2 = boto3.client('ec2', region_name=region)
            master_ips = get_instance_ips(elb, ec2, master_stack_name)
            if len(master_ips) != 1:
                return ""  # shouldn't happen without zookeeper
            elif len(master_ips) == 1:
                return master_ips[0]
            else:
                return ""
        except:
            return ""
    else:
        return ""
Exemplo n.º 7
0
def ec2ssh(host):
  with open("./hosts/{}".format(sys.argv[1])) as f:
    host = f.read()
  host = list(host.splitlines()) 

  target = {'key': host[0], 'user': host[1], 'host': host[2]}
  target_ec2 = boto3.client('ec2')
  target_response = target_ec2.describe_instances(InstanceIds=[target['host']])
  
  
  if len(host) > 3:
    bastion = {'key': host[3], 'user': host[4], 'host': host[5]}
    bastion_ec2 = boto3.client('ec2') 
    bastion_response = bastion_ec2.describe_instances(InstanceIds=[bastion['host']]) 
    bastion_ip = bastion_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
    
    target_ip = target_response['Reservations'][0]['Instances'][0]['NetworkInterfaces'][0]['PrivateIpAddress']

    subprocess.call("ssh-add {} {}".format(bastion['key'], target['key']), shell=True)
    subprocess.call("ssh -t -A {}@{} ssh {}@{}".format(bastion['user'], bastion_ip, target['user'], target_ip), shell=True)
  
  else:
    target_ip = target_response['Reservations'][0]['Instances'][0]['PublicIpAddress']
    
    subprocess.call("ssh-add {}".format(target['key']), shell=True)
    subprocess.call("ssh {}@{}".format(target['user'], target_ip), shell=True)
Exemplo n.º 8
0
def get_bucket_metrics(buckets,days=14):
    s3_client = boto3.client('s3')
    METRICS = {'NumberOfObjects' : ['AllStorageTypes'],'BucketSizeBytes': ['StandardStorage','ReducedRedundancyStorage','StandardIAStorage']}
    results = {}
    now = datetime.datetime.utcnow()
    for bucket in buckets:
        region = get_bucket_region(bucket)
        cw = boto3.client('cloudwatch',region_name = region)
        datapoints = {}
        for metric, storage_types in METRICS.items():
                for storage_type in storage_types:
                    response = cw.get_metric_statistics(
                            Namespace = 'AWS/S3',
                            MetricName = metric,
                            Statistics = ['Average'],
                            Period = PERIOD,
                            EndTime = now,
                            StartTime = now - datetime.timedelta(days=days),
                            Dimensions=[ { 'Name': 'BucketName', 'Value': bucket }, { 'Name': 'StorageType','Value': storage_type} ]
                            )
                    for stats in response['Datapoints']:
                        date = stats['Timestamp'].strftime("%Y-%m-%d")
                        if not date in datapoints:
                            datapoints[date] = {}
                        if metric not in datapoints[date]:
                            datapoints[date][metric] = dict.fromkeys(storage_types,0)
                        datapoints[date][metric][storage_type] = stats['Average']
        results[bucket] = datapoints
    return results
def genlistOfObjects(vService,vPrefix,vRegion,vAwsAccessInfo):
  if vAwsAccessInfo:
    if vAwsAccessInfo.has_key('aws_session_token'):
      cw = b.client(vService,region_name=vRegion,
        aws_access_key_id=vAwsAccessInfo['aws_access_key_id'],
        aws_secret_access_key=vAwsAccessInfo['aws_secret_access_key'],
        aws_session_token=vAwsAccessInfo['aws_session_token']
        )
    else:
      cw = b.client(vService,region_name=vRegion,
      aws_access_key_id=vAwsAccessInfo['aws_access_key_id'],
      aws_secret_access_key=vAwsAccessInfo['aws_secret_access_key']
      )
  else:    
    cw = b.client(vService,region_name=vRegion)
  listOfAWSObj = []
  if vService == 'kinesis':
    listOfAWSObj = cw.list_streams(Limit=1000)['StreamNames']
  elif vService == 'firehose':
    listOfAWSObj = cw.list_delivery_streams(Limit=1000)['DeliveryStreamNames']
  elif vService == 'dynamodb':
    listOfAWSObj = cw.list_tables()['TableNames']
  listOfAWSObj_PrefixMatched=[]
  for each in listOfAWSObj:
    if each.startswith(vPrefix):
      listOfAWSObj_PrefixMatched.append(each)
  return listOfAWSObj_PrefixMatched      
Exemplo n.º 10
0
def get_machines(num_instances_to_use, aws_group_name):
    machines = []
    #connect to AWS
    ec2 = boto3.client('ec2');
    autoscale = boto3.client('autoscaling');

    #how many machines are currently running?
    instances = get_instances_in_group(autoscale, aws_group_name)
    num_instances = len(instances)
    rd_print(None,'Number of instances online:', num_instances)

    #grab instance IDs
    instance_ids = [i['InstanceId'] for i in instances]
    rd_print(None,"These instances are online:",instance_ids)
    running_instance_ids = []
    for instance_id in instance_ids:
        try:
            state = state_name_of(instance_id, ec2)
            if state == 'running':
                rd_print(None,instance_id, 'is running!')
                running_instance_ids.append(instance_id)
        except IndexError:
            print(instance_id, 'not queryable yet')
    ok_instance_ids = []
    for instance_id in running_instance_ids:
        try:
            if status_of(instance_id, ec2) == 'ok':
                rd_print(None,instance_id,'reported OK!')
                ok_instance_ids.append(instance_id)
        except IndexError:
            rd_print(None,'Instance',instance_id,'disappeared!')
    for instance_id in ok_instance_ids:
        machines.append(Machine(ip_address_of(instance_id, ec2)))
    return machines
Exemplo n.º 11
0
    def test_client_passes_through_arguments(self):
        boto3.DEFAULT_SESSION = self.Session()

        boto3.client('sqs', region_name='us-west-2', verify=False)

        boto3.DEFAULT_SESSION.client.assert_called_with(
            'sqs', region_name='us-west-2', verify=False)
Exemplo n.º 12
0
def test_basic():
    iot_client = boto3.client('iot', region_name='ap-northeast-1')
    client = boto3.client('iot-data', region_name='ap-northeast-1')
    name = 'my-thing'
    raw_payload = b'{"state": {"desired": {"led": "on"}}}'
    iot_client.create_thing(thingName=name)

    with assert_raises(ClientError):
        client.get_thing_shadow(thingName=name)

    res = client.update_thing_shadow(thingName=name, payload=raw_payload)

    payload = json.loads(res['payload'].read())
    expected_state = '{"desired": {"led": "on"}}'
    payload.should.have.key('state').which.should.equal(json.loads(expected_state))
    payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
    payload.should.have.key('version').which.should.equal(1)
    payload.should.have.key('timestamp')

    res = client.get_thing_shadow(thingName=name)
    payload = json.loads(res['payload'].read())
    expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}'
    payload.should.have.key('state').which.should.equal(json.loads(expected_state))
    payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
    payload.should.have.key('version').which.should.equal(1)
    payload.should.have.key('timestamp')

    client.delete_thing_shadow(thingName=name)
    with assert_raises(ClientError):
        client.get_thing_shadow(thingName=name)
Exemplo n.º 13
0
 def __init__(self):
     self.logger = logging.getLogger(__name__)
     self.logger.setLevel(logging.INFO)
     self.test_resources_dir = self._get_resources_dir()
     self.cfn_conn = boto3.client("cloudformation", region_name="eu-west-1")
     self.kms_conn = boto3.client("kms", region_name="eu-west-1")
     self.config = Config(config_file=os.path.join(self.test_resources_dir, "stacks.yml"))
Exemplo n.º 14
0
    def __init__(
        self,
        log_group_name,
        region_name=None,
        start_time=None,
        end_time=None,
        boto_client_kwargs=None
    ):
        boto_client_kwargs = boto_client_kwargs or {}

        # If a specific region is requested, use it.
        # If not, try to use the environment's configuration (i.e. the
        # AWS_DEFAULT_REGION variable of ~/.aws/config file).
        # If that doesn't work, use a default region.
        if region_name is not None:
            boto_client_kwargs['region_name'] = region_name
            self.logs_client = boto3.client('logs', **boto_client_kwargs)
        else:
            try:
                self.logs_client = boto3.client('logs', **boto_client_kwargs)
            except NoRegionError:
                boto_client_kwargs['region_name'] = DEFAULT_REGION_NAME
                self.logs_client = boto3.client('logs', **boto_client_kwargs)

        self.log_group_name = log_group_name

        # If no time filters are given use the last hour
        now = datetime.utcnow()
        start_time = start_time or now - timedelta(hours=1)
        end_time = end_time or now

        self.start_ms = timegm(start_time.utctimetuple()) * 1000
        self.end_ms = timegm(end_time.utctimetuple()) * 1000
Exemplo n.º 15
0
def load_config(config):
    keys = load_yaml(abs_path("default"))

    keys["credentials"] = {}
    if os.path.exists(abs_path("credentials")):
        keys["credentials"] = load_yaml(abs_path("credentials"))

    if config != "default":
        keys.update(load_yaml(abs_path(config)))

    if "aws_access_key" in keys["credentials"]:
        keys["s3"] = boto3.resource(
            "s3",
            region_name=keys["region"],
            aws_access_key_id=keys["credentials"]["aws_access_key"],
            aws_secret_access_key=keys["credentials"]["aws_access_secret"],
        )
        keys["s3_client"] = boto3.client(
            "s3",
            region_name=keys["region"],
            aws_access_key_id=keys["credentials"]["aws_access_key"],
            aws_secret_access_key=keys["credentials"]["aws_access_secret"],
        )
    else:
        keys["s3"] = boto3.resource("s3", region_name=keys["region"])
        keys["s3_client"] = boto3.client("s3", region_name=keys["region"])

    return AttrDict(keys)
Exemplo n.º 16
0
def start_agent():
    print '*Starting agent... Version ' + __version__

    load_configs()
    if OPTIONS_FROM_CONFIG_FILE.version:
        print "%s" % __version__
        terminate_script()

    global SQS_CLIENT, SNS_CLIENT

    sqs_sns_region = OPTIONS_FROM_CONFIG_FILE.topic_arn.split(':')[3]
    log("SQS/SNS region from topic ARN: %s" % sqs_sns_region)
    aws_access_id = OPTIONS_FROM_CONFIG_FILE.coreo_access_id
    aws_secret_access_key = OPTIONS_FROM_CONFIG_FILE.coreo_access_key
    SQS_CLIENT = boto3.client('sqs',
                              aws_access_key_id='%s' % aws_access_id,
                              aws_secret_access_key='%s' % aws_secret_access_key,
                              region_name='%s' % sqs_sns_region)
    SNS_CLIENT = boto3.client('sns',
                              aws_access_key_id='%s' % aws_access_id,
                              aws_secret_access_key='%s' % aws_secret_access_key,
                              region_name='%s' % sqs_sns_region)

    if not OPTIONS_FROM_CONFIG_FILE.agent_uuid:
        set_agent_uuid()

    publish_agent_online()

    global PROCESSED_SQS_MESSAGES
    PROCESSED_SQS_MESSAGES = read_processed_messages_from_file()
    print PROCESSED_SQS_MESSAGES

    main_loop()
Exemplo n.º 17
0
def resolve_referenced_resource(ref: dict, region: str):
    if 'Stack' in ref and 'LogicalId' in ref:
        cf = boto3.client('cloudformation', region)
        resource = cf.describe_stack_resource(
            StackName=ref['Stack'],
            LogicalResourceId=ref['LogicalId'])['StackResourceDetail']
        if not is_status_complete(resource['ResourceStatus']):
            raise ValueError('Resource "{}" is not ready ("{}")'.format(ref['LogicalId'], resource['ResourceStatus']))

        resource_id = resource['PhysicalResourceId']

        # sg is referenced by its name not its id
        if resource['ResourceType'] == 'AWS::EC2::SecurityGroup':
            sg = get_security_group(region, resource_id)
            return sg.id if sg is not None else None
        else:
            return resource_id
    elif 'Stack' in ref and 'Output' in ref:
        cf = boto3.client('cloudformation', region)
        stack = cf.describe_stacks(
            StackName=ref['Stack'])['Stacks'][0]
        if not is_status_complete(stack['StackStatus']):
            raise ValueError('Stack "{}" is not ready ("{}")'.format(ref['Stack'], stack['StackStatus']))

        for output in stack.get('Outputs', []):
            if output['OutputKey'] == ref['Output']:
                return output['OutputValue']

        return None
    else:
        return ref
def get_own_snapshots_source(pattern, response, backup_interval=None):
# Filters our own snapshots
    filtered = {}
    for snapshot in response['DBSnapshots']:
        # No need to get tags for snapshots outside of the backup interval
        if backup_interval and snapshot['SnapshotCreateTime'].replace(tzinfo=None) < datetime.utcnow().replace(tzinfo=None) - timedelta(hours=backup_interval):
            continue

        if snapshot['SnapshotType'] == 'manual' and re.search(pattern, snapshot['DBSnapshotIdentifier']) and snapshot['Engine'] in _SUPPORTED_ENGINES:
            client = boto3.client('rds', region_name=_REGION)
            response_tags = client.list_tags_for_resource(
                ResourceName=snapshot['DBSnapshotArn'])

            if search_tag_created(response_tags):
                filtered[snapshot['DBSnapshotIdentifier']] = {
                    'Arn': snapshot['DBSnapshotArn'], 'Status': snapshot['Status'], 'DBInstanceIdentifier': snapshot['DBInstanceIdentifier']}

        elif snapshot['SnapshotType'] == 'manual' and (pattern == 'ALL_CLUSTERS' or pattern == 'ALL_SNAPSHOTS' or pattern == 'ALL_INSTANCES') and snapshot['Engine'] in _SUPPORTED_ENGINES:
            client = boto3.client('rds', region_name=_REGION)
            response_tags = client.list_tags_for_resource(
                ResourceName=snapshot['DBSnapshotArn'])

            if search_tag_created(response_tags):
                filtered[snapshot['DBSnapshotIdentifier']] = {
                    'Arn': snapshot['DBSnapshotArn'], 'Status': snapshot['Status'], 'DBInstanceIdentifier': snapshot['DBInstanceIdentifier']}

    return filtered
Exemplo n.º 19
0
def main():
    logging.basicConfig(level=LOG_LEVEL)

    if os.environ['DEPLOYMENT_GROUP_NAME'] == 'cron':
        logging.warn('Local instance is a cron server; exiting...')
        raise SystemExit(0)

    logging.debug('Making request for local instance metadata')
    instance_identity = requests.get(INSTANCE_IDENTITY_URL)
    logging.debug('Parsing metadata response JSON')
    instance_info = instance_identity.json()
    aws_region = instance_info['region']
    instance_id = instance_info['instanceId']
    logging.info('Local instance is \'{}\' in \'{}\''.format(instance_id,
                                                             aws_region))

    as_client = boto3.client('autoscaling', region_name=aws_region)
    ec2_client = boto3.client('ec2', region_name=aws_region)
    elb_client = boto3.client('elb', region_name=aws_region)

    logging.debug('Checking if this instance was just launched by scaling...')
    api_response = as_client.describe_auto_scaling_instances(InstanceIds=[instance_id])
    if api_response['AutoScalingInstances'][0]['LifecycleState'].startswith('Pending'):
        logging.warn('Instance just launched; exiting without action')
        raise SystemExit(0)

    logging.debug('Looking up autoscaling group name...')
    autoscaling_group = get_autoscaling_group(instance_id, ec2_client)

    logging.debug('Removing instance from associated load balancers...')
    if autoscaling_group:
        remove_instance_from_load_balancers(instance_id, autoscaling_group,
                                            as_client, elb_client)

    raise SystemExit(0)
Exemplo n.º 20
0
def test_delete_function():
    s3_conn = boto3.client('s3', 'us-west-2')
    s3_conn.create_bucket(Bucket='test-bucket')

    zip_content = get_test_zip_file()
    s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
    conn = boto3.client('lambda', 'us-west-2')

    conn.create_function(
        FunctionName='testFunction',
        Runtime='python2.7',
        Role='test-iam-role',
        Handler='lambda_function.handler',
        Code={
            'S3Bucket': 'test-bucket',
            'S3Key': 'test.zip',
        },
        Description='test lambda function',
        Timeout=3,
        MemorySize=128,
        Publish=True,
    )

    success_result = conn.delete_function(FunctionName='testFunction')
    success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}})

    conn.delete_function.when.called_with(FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError)
Exemplo n.º 21
0
    def verify_queue(self):
        client = boto3.client('sqs', region_name=get_region())
        https_status_code, new_message_num = self.check_queue_count(client)

        if new_message_num is None:
            self.log.error("sqs_watcherd get_queue_attributes failed. Response HTTPSStatusCode: " + str(
                https_status_code))
            return

        self.old_message_num = self.message_num
        self.message_num = new_message_num

        if ((self.message_num != 0) and (self.message_num == self.old_message_num)):
            client = boto3.client('lambda', region_name=get_region())
            lambdas_to_invoke = min(self.message_num, MAX_LAMBDAS_TO_INVOKE)
            self.log.info("kicking off {} lambdas".format(lambdas_to_invoke))
            for i in range(lambdas_to_invoke):
                response = client.invoke(
                    FunctionName=self.lambda_data["config"]["object_store_config"]["page_out_lambda_function"],
                    InvocationType='Event',
                    Payload=json.dumps(self.lambda_data).encode())
                if response['ResponseMetadata']['HTTPStatusCode'] != 202:
                    self.log.error("sqs_watcherd invoke_lambda failed. Response HTTPSStatusCode: " + str(
                        response['ResponseMetadata']['HTTPStatusCode']))
            return lambdas_to_invoke
 def __init__(self):
     super(SecretProvider, self).__init__()
     self._value = None
     self.request_schema = request_schema
     self.ssm = boto3.client('ssm')
     self.region = boto3.session.Session().region_name
     self.account_id = (boto3.client('sts')).get_caller_identity()['Account']
Exemplo n.º 23
0
        def decorated_function(*args, **kwargs):
            sts = boto3.client("sts")

            account_number = kwargs["account_number"]
            role = CONFIG.get("DIFFY_AWS_ASSUME_ROLE", "Diffy")

            arn = f"arn:aws:iam::{account_number}:role/{role}"

            kwargs.pop("account_number")

            # TODO add incident specific information to RoleSessionName
            logger.debug(f"Assuming role. Arn: {arn}")
            role = sts.assume_role(RoleArn=arn, RoleSessionName="diffy")

            if service_type == "client":
                client = boto3.client(
                    service,
                    region_name=kwargs["region"],
                    aws_access_key_id=role["Credentials"]["AccessKeyId"],
                    aws_secret_access_key=role["Credentials"]["SecretAccessKey"],
                    aws_session_token=role["Credentials"]["SessionToken"],
                )
                kwargs["client"] = client
            elif service_type == "resource":
                resource = boto3.resource(
                    service,
                    region_name=kwargs["region"],
                    aws_access_key_id=role["Credentials"]["AccessKeyId"],
                    aws_secret_access_key=role["Credentials"]["SecretAccessKey"],
                    aws_session_token=role["Credentials"]["SessionToken"],
                )
                kwargs["resource"] = resource
            return f(*args, **kwargs)
Exemplo n.º 24
0
def test_describe_instance_health_boto3():
    elb = boto3.client('elb', region_name="us-east-1")
    ec2 = boto3.client('ec2', region_name="us-east-1")
    instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances']
    lb_name = "my_load_balancer"
    elb.create_load_balancer(
        Listeners=[{
            'InstancePort': 80,
            'LoadBalancerPort': 8080,
            'Protocol': 'HTTP'
        }],
        LoadBalancerName=lb_name,
    )
    elb.register_instances_with_load_balancer(
        LoadBalancerName=lb_name,
        Instances=[{'InstanceId': instances[0]['InstanceId']}]
    )
    instances_health = elb.describe_instance_health(
        LoadBalancerName=lb_name,
        Instances=[{'InstanceId': instance['InstanceId']} for instance in instances]
    )
    instances_health['InstanceStates'].should.have.length_of(2)
    instances_health['InstanceStates'][0]['InstanceId'].\
        should.equal(instances[0]['InstanceId'])
    instances_health['InstanceStates'][0]['State'].\
        should.equal('InService')
    instances_health['InstanceStates'][1]['InstanceId'].\
        should.equal(instances[1]['InstanceId'])
    instances_health['InstanceStates'][1]['State'].\
        should.equal('Unknown')
Exemplo n.º 25
0
def create_task(screen_name, consumer_key, consumer_secret, user_key, user_secret):
    ecs = boto3.client('ecs', region_name=conf.get_config('AWS_REGION_NAME'))
    ec2 = boto3.client('ec2', region_name=conf.get_config('AWS_REGION_NAME'))

    try:
      rw = RandomWords()
      word = rw.random_words(count=3)
      password = '******' % (word[0], word[1], word[2])

      tn_logger.debug('Calling run_task')
      task_arn = run_task(ecs, screen_name, consumer_key, consumer_secret, user_key, user_secret, password)
      tn_logger.debug('Done calling run_task')
      task_info = get_task_info(ecs, task_arn)
      ip_address = get_connection_ip(ec2, task_info['instanceId'])
      try_connecting_neo4j(ip_address, task_info['port'])
      tn_logger.info('Created instance for tw:%s at %s:%s' % (screen_name,ip_address,task_info['port']))
    except Exception as e:
      tn_logger.exception(e)
      tn_logger.error('Error creating docker image for: tu:%s' % screen_name)
      print(traceback.format_exc())
      print(e)
      raise e

    response_dict = { 
      'url': 'http://%s:%s' % (ip_address, task_info['port']),
      'password': password }
 
    return response_dict
Exemplo n.º 26
0
    def __init__(self, queue_name='Battleship_Registration'):
        # Create reusable sns and sqs client resources
        self.sns_client = boto3.client('sns', 'us-east-2')
        self.sqs_client = boto3.client('sqs', 'us-east-2')
        self.sqs_resource = boto3.resource('sqs', 'us-east-2')
        #TODO Validation of queue_name

        # Subscribe to Registration topic
        self.queue = self.sqs_resource.get_queue_by_name(QueueName=queue_name)
      
        sqs_arn = self.sqs_client.get_queue_attributes(QueueUrl=self.queue.url, AttributeNames=['QueueArn'])['Attributes']['QueueArn']

        # Create topic for commuications to this bot and store it in the Player object. Topic name is a random string of 16 characters and letters
        self.my_topic_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(16)])
        self.my_topic_arn = self.sns_client.create_topic(Name=self.my_topic_name)

        # Tie topic to queue
        policy = self.build_policy(self.my_topic_name, queue_name, self.queue.url)

        self.sqs_client.set_queue_attributes(QueueUrl=self.queue.url, Attributes={'Policy': policy})

        # Connect topic to queue
        # Subscribe to own topic
        self.sub_arn = self.sns_client.subscribe(TopicArn=self.my_topic_arn['TopicArn'], Protocol='sqs', Endpoint=sqs_arn)

        # Set subscription filter
        self.sns_client.set_subscription_attributes(SubscriptionArn=self.sub_arn['SubscriptionArn'], AttributeName="FilterPolicy", AttributeValue="{{\"topic_type\": [\"{}\"]}}".format(self.my_topic_arn['TopicArn']))
Exemplo n.º 27
0
def create_function(name, zfile, lsize=512, timeout=10, update=False):
    """ Create, or update if exists, lambda function """
    # create role for this function
    role = get_or_create_role(name + '_lambda',
                              policies=['arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole',
                                        'arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole'])
    l = boto3.client('lambda')
    ec2 = boto3.client('ec2')
    with open(zfile, 'rb') as zipfile:
        funcs = l.list_functions()['Functions']
        if name in [f['FunctionName'] for f in funcs]:
            if update:
                print '%s: Updating %s lambda function code' % (timestamp(), name)
                return l.update_function_code(FunctionName=name, ZipFile=zipfile.read())
            else:
                print '%s: Lambda function %s exists' % (timestamp(), name)
                for f in funcs:
                    if f['FunctionName'] == name:
                        return f
        else:
            print '%s: Creating %s lambda function' % (timestamp(), name)
            group = get_or_create_security_group('%s_lambda' % name)
            subnets = [s['SubnetId'] for s in ec2.describe_subnets()['Subnets']]
            return l.create_function(
                FunctionName=name,
                Runtime='nodejs',
                Role=role['Arn'],
                Handler='examples/kinesis-consumer/index.handler',
                Description='OSM Stats Worker',
                Timeout=timeout,
                MemorySize=lsize,
                Publish=True,
                Code={'ZipFile': zipfile.read()},
                VpcConfig={'SubnetIds': subnets, 'SecurityGroupIds': [group.group_id]}
            )
Exemplo n.º 28
0
def lambda_handler(event, context):
  source = boto3.client('rds', region_name=SOURCE_REGION)
  source_snaps = source.describe_db_snapshots(SnapshotType='automated', DBInstanceIdentifier=SOURCE_DB)['DBSnapshots']
  source_snap = sorted(source_snaps, key=bySnapshotId, reverse=True)[0]['DBSnapshotIdentifier']
  source_snap_arn = 'arn:aws:rds:%s:%s:snapshot:%s' % (SOURCE_REGION, event['account'], source_snap)
  target_snap_id = 'copy-of-%s' % (re.sub('rds:', '', source_snap))
  print('Will Copy %s to %s' % (source_snap_arn, target_snap_id))

  target = boto3.client('rds', region_name=event['region'])
  try:
    response = target.copy_db_snapshot(
      SourceDBSnapshotIdentifier=source_snap_arn,
      TargetDBSnapshotIdentifier=target_snap_id,
      CopyTags = True)
    print(response)
  except botocore.exceptions.ClientError as e:
    raise Exception("Could not issue copy command: %s" % e)

  copied_snaps = target.describe_db_snapshots(SnapshotType='manual', DBInstanceIdentifier=SOURCE_DB)['DBSnapshots']
  if len(copied_snaps) > 0:
    for snap in sorted(copied_snaps, key=byTimestamp, reverse=True)[KEEP:]:
      print('Will remove %s') % (snap['DBSnapshotIdentifier'])
      try:
        target.delete_db_snapshot(DBSnapshotIdentifier=snap['DBSnapshotIdentifier'])
      except botocore.exceptions.ClientError as e:
        raise Exception("Could not delete snapshot " + snap['DBSnapshotIdentifier'] + ": %s" % e)
Exemplo n.º 29
0
    def load(self,cluster, region, name, env, job_path, healthcheckpath, handle_file):

        client = boto3.client('ecs', region_name=region)
        elb_client = boto3.client('elbv2', region_name=region)

        count = 0
        for subdir, dirs, files in os.walk(job_path):
            for fn in files:
                filename = os.path.join(subdir, fn)

                # Skip non-json files
                ext = filename.split('.')[-1]
                if ext != 'json':
                    continue

                if name is None or name in filename:
                    with open(filename, 'r') as f_h:
                        try:
                            ecs_json = json.loads(f_h.read(), object_hook=remove_nulls)
                            count += 1
                            handle_file(client, cluster, ecs_json, env, region, filename, count, elb_client, healthcheckpath)
                        except:
                            logging.exception("Error reading file %s " % filename)
                            self.catfile(filename)
                            raise
Exemplo n.º 30
0
def test_delete_function():
    s3_conn = boto3.client('s3', 'us-west-2')
    s3_conn.create_bucket(Bucket='test-bucket')

    zip_content = get_test_zip_file2()
    s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
    conn = boto3.client('lambda', 'us-west-2')

    conn.create_function(
        FunctionName='testFunction',
        Runtime='python2.7',
        Role='test-iam-role',
        Handler='lambda_function.lambda_handler',
        Code={
            'S3Bucket': 'test-bucket',
            'S3Key': 'test.zip',
        },
        Description='test lambda function',
        Timeout=3,
        MemorySize=128,
        Publish=True,
    )

    success_result = conn.delete_function(FunctionName='testFunction')
    # this is hard to match against, so remove it
    success_result['ResponseMetadata'].pop('HTTPHeaders', None)
    # Botocore inserts retry attempts not seen in Python27
    success_result['ResponseMetadata'].pop('RetryAttempts', None)

    success_result.should.equal({'ResponseMetadata': {'HTTPStatusCode': 204}})

    conn.delete_function.when.called_with(
        FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError)
Exemplo n.º 31
0
def test_put_key_policy_key_not_found():
    client = boto3.client("kms", region_name="us-east-1")

    with assert_raises(client.exceptions.NotFoundException):
        client.put_key_policy(KeyId="00000000-0000-0000-0000-000000000000", PolicyName="default", Policy="new policy")
Exemplo n.º 32
0
def test_enable_key_rotation_key_not_found():
    client = boto3.client("kms", region_name="us-east-1")

    with assert_raises(client.exceptions.NotFoundException):
        client.enable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02")
Exemplo n.º 33
0
def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type):
    client = boto3.client("kms", region_name="us-west-2")

    with assert_raises(error_type):
        client.generate_random(NumberOfBytes=number_of_bytes)
Exemplo n.º 34
0
def test_describe_key_via_alias_invalid_alias(key_id):
    client = boto3.client("kms", region_name="us-east-1")
    client.create_key(Description="key")

    with assert_raises(client.exceptions.NotFoundException):
        client.describe_key(KeyId=key_id)
Exemplo n.º 35
0
def test_generate_data_key_invalid_key(key_id):
    client = boto3.client("kms", region_name="us-east-1")

    with assert_raises(client.exceptions.NotFoundException):
        client.generate_data_key(KeyId=key_id, KeySpec="AES_256")
Exemplo n.º 36
0
def test_generate_data_key_invalid_size_params(kwargs):
    client = boto3.client("kms", region_name="us-east-1")
    key = client.create_key(Description="generate-data-key-size")

    with assert_raises((botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError)) as err:
        client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs)
Exemplo n.º 37
0
def test_invalid_key_ids(key_id):
    client = boto3.client("kms", region_name="us-east-1")

    with assert_raises(client.exceptions.NotFoundException):
        client.generate_data_key(KeyId=key_id, NumberOfBytes=5)
Exemplo n.º 38
0
    def _register_image(self, ami_metadata):
        """Register the AMI using boto3/botocore components which supports ENA
           This is the only use of boto3 in aminator currently"""

        # construct AMI registration payload boto3 style
        request = {}
        request['Name'] = ami_metadata.get('name', None)
        request['Description'] = ami_metadata.get('description', None)
        request['Architecture'] = ami_metadata.get('architecture', None)
        request['EnaSupport'] = ami_metadata.get('ena_networking', False)
        request['VirtualizationType'] = ami_metadata.get('virtualization_type', None)

        # when instance store, don't provide botocore expects a string value
        if ami_metadata.get('block_device_map') is not None:
            request['BlockDeviceMappings'] = ami_metadata.get('block_device_map')
        if ami_metadata.get('root_device_name') is not None:
            request['RootDeviceName'] = ami_metadata.get('root_device_name')

        # only present for instance store
        if ami_metadata.get('image_location') is not None:
            request['ImageLocation'] = ami_metadata.get('image_location')

        # can only be set to 'simple' for hvm.  don't include otherwise
        if ami_metadata.get('sriov_net_support') is not None:
            request['SriovNetSupport'] = ami_metadata.get('sriov_net_support')

        if (ami_metadata.get('virtualization_type') == 'paravirtual'):
            # KernelId required
            request['KernelId'] = ami_metadata.get('kernel_id', None)
            if ami_metadata.get('ramdisk_id') is not None:
                request['RamdiskId'] = ami_metadata.get('ramdisk_id', None)

        # assert we have all the key params. Nothing to _here_ should be None
        for key, value in request.items():
            if request[key] is None:
                raise FinalizerException('{} cannot be None'.format(key))

        log.debug('Boto3 registration request data [{}]'.format(request))

        try:
            client = boto3.client('ec2', region_name=ami_metadata.get('region'))
            response = client.register_image(**request)
            log.debug('Registration response data [{}]'.format(response))

            ami_id = response['ImageId']
            if ami_id is None:
                return False

            log.info('Waiting for [{}] to become available'.format(ami_id))
            waiter = client.get_waiter('image_available')
            wait_request = {}
            wait_request['ImageIds'] = []
            wait_request['ImageIds'].append(ami_id)
            waiter.wait(**wait_request)
            # Now, using boto2, load the Image so downstream tagging operations work
            # using boto2 classes
            log.debug('Image available!  Loading boto2.Image for [{}]'.format(ami_id))
            self._ami = self._connection.get_image(ami_id)
        except ClientError as e:
            if e.response['Error']['Code'] == 'InvalidAMIID.NotFound':
                log.debug('{0} was not found while waiting for it to become available'.format(ami_id))
                log.error('Error during register_image: {}'.format(e))
                return False
            else:
                # defer to registration_retry decorator
                raise e

        log.info('AMI registered: {0} {1}'.format(self._ami.id, self._ami.name))
        self._config.context.ami.image = self._ami

        return True
Exemplo n.º 39
0
def test_operations_with_invalid_tags():
    client = boto3.client("acm", region_name="eu-central-1")

    # request certificate with invalid tags
    with assert_raises(ClientError) as ex:
        client.request_certificate(
            DomainName="example.com",
            Tags=[{
                "Key": "X" * 200,
                "Value": "Valid"
            }],
        )
    ex.exception.response["Error"]["Code"].should.equal("ValidationException")
    ex.exception.response["Error"]["Message"].should.contain(
        "Member must have length less than or equal to 128")

    # import certificate with invalid tags
    with assert_raises(ClientError) as ex:
        client.import_certificate(
            Certificate=SERVER_CRT,
            PrivateKey=SERVER_KEY,
            CertificateChain=CA_CRT,
            Tags=[
                {
                    "Key": "Valid",
                    "Value": "X" * 300
                },
                {
                    "Key": "aws:xx",
                    "Value": "Valid"
                },
            ],
        )

    ex.exception.response["Error"]["Code"].should.equal("ValidationException")
    ex.exception.response["Error"]["Message"].should.contain(
        "Member must have length less than or equal to 256")

    arn = _import_cert(client)

    # add invalid tags to existing certificate
    with assert_raises(ClientError) as ex:
        client.add_tags_to_certificate(
            CertificateArn=arn,
            Tags=[{
                "Key": "aws:xxx",
                "Value": "Valid"
            }, {
                "Key": "key2"
            }],
        )
    ex.exception.response["Error"]["Code"].should.equal("ValidationException")
    ex.exception.response["Error"]["Message"].should.contain(
        "AWS internal tags cannot be changed with this API")

    # try removing invalid tags from existing certificate
    with assert_raises(ClientError) as ex:
        client.remove_tags_from_certificate(CertificateArn=arn,
                                            Tags=[{
                                                "Key": "aws:xxx",
                                                "Value": "Valid"
                                            }])
    ex.exception.response["Error"]["Code"].should.equal("ValidationException")
    ex.exception.response["Error"]["Message"].should.contain(
        "AWS internal tags cannot be changed with this API")
Exemplo n.º 40
0
def test_get_key_policy_key_not_found():
    client = boto3.client("kms", region_name="us-east-1")

    with assert_raises(client.exceptions.NotFoundException):
        client.get_key_policy(KeyId="12366f9b-1230-123d-123e-123e6ae60c02", PolicyName="default")
Exemplo n.º 41
0
#Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)

import boto3

if __name__ == "__main__":

    bucket = 'ReplaceS3BucketName'
    collectionId = 'MyCollection'
    fileName = 'input.jpg'
    threshold = 1
    maxFaces = 1

    client = boto3.client('rekognition')

    response = client.search_faces_by_image(
        CollectionId=collectionId,
        Image={'S3Object': {
            'Bucket': bucket,
            'Name': fileName
        }},
        FaceMatchThreshold=threshold,
        MaxFaces=maxFaces)

    faceMatches = response['FaceMatches']
    print('Matching faces')
    for match in faceMatches:
        print('FileKey:' + match['Face']['ExternalImageId'])
        print('FaceId:' + match['Face']['FaceId'])
        print('Similarity: ' + "{:.2f}".format(match['Similarity']) + "%")
Exemplo n.º 42
0
from datetime import datetime
import os

import boto3


S3_CLIENT = boto3.client('s3')
PROJECT_NAME = os.getenv('PROJECT_NAME')


def read_from(key):
    params = {
        'Bucket': PROJECT_NAME,
        'Key': key
    }
    try:
        response = S3_CLIENT.get_object(**params)
    except S3_CLIENT.exceptions.NoSuchKey:
        return None
    else:
        return response['Body'].read()


def write_to(key, content, content_type):
    params = {
        'Bucket': PROJECT_NAME,
        'ACL': 'public-read',
        'Key': key,
        'Body': content,
        'ContentType': content_type
    }
Exemplo n.º 43
0
def get_s3_url(key):
    client = boto3.client('s3',region_name=os.environ.get("AWS_S3_REGION"))
    url = os.path.join(client.meta.endpoint_url,settings.AWS_STORAGE_BUCKET_NAME,key)
    return url
Exemplo n.º 44
0
def test_request_certificate_with_tags():
    client = boto3.client("acm", region_name="eu-central-1")

    token = str(uuid.uuid4())

    resp = client.request_certificate(
        DomainName="google.com",
        IdempotencyToken=token,
        SubjectAlternativeNames=[
            "google.com", "www.google.com", "mail.google.com"
        ],
        Tags=[
            {
                "Key": "Environment",
                "Value": "QA"
            },
            {
                "Key": "WithEmptyStr",
                "Value": ""
            },
        ],
    )
    resp.should.contain("CertificateArn")
    arn_1 = resp["CertificateArn"]

    resp = client.list_tags_for_certificate(CertificateArn=arn_1)
    tags = {
        item["Key"]: item.get("Value", "__NONE__")
        for item in resp["Tags"]
    }
    tags.should.have.length_of(2)
    tags["Environment"].should.equal("QA")
    tags["WithEmptyStr"].should.equal("")

    # Request certificate for "google.com" with same IdempotencyToken but with different Tags
    resp = client.request_certificate(
        DomainName="google.com",
        IdempotencyToken=token,
        SubjectAlternativeNames=[
            "google.com", "www.google.com", "mail.google.com"
        ],
        Tags=[
            {
                "Key": "Environment",
                "Value": "Prod"
            },
            {
                "Key": "KeyOnly"
            },
        ],
    )
    arn_2 = resp["CertificateArn"]

    assert arn_1 != arn_2  # if tags are matched, ACM would have returned same arn

    resp = client.list_tags_for_certificate(CertificateArn=arn_2)
    tags = {
        item["Key"]: item.get("Value", "__NONE__")
        for item in resp["Tags"]
    }
    tags.should.have.length_of(2)
    tags["Environment"].should.equal("Prod")
    tags["KeyOnly"].should.equal("__NONE__")

    resp = client.request_certificate(
        DomainName="google.com",
        IdempotencyToken=token,
        SubjectAlternativeNames=[
            "google.com", "www.google.com", "mail.google.com"
        ],
        Tags=[
            {
                "Key": "Environment",
                "Value": "QA"
            },
            {
                "Key": "WithEmptyStr",
                "Value": ""
            },
        ],
    )
Exemplo n.º 45
0
def get_job_status(sid):
    desdata = DesignModel.objects.filter(SID=sid).last()
    client = boto3.client('batch')
    jobid = desdata.jobid
    descr = client.describe_jobs(jobs=[jobid])['jobs'][0]
    return descr
Exemplo n.º 46
0
import boto3
import json
import urllib.request

comprehend = boto3.client('comprehend')


def lambda_handler(event, context):

    payload = event['Input']['Payload']
    transcriptFileUri = payload['TranscriptFileUri']
    transcriptionJobName = payload['TranscriptionJobName']

    transcriptFile = urllib.request.urlopen(transcriptFileUri)
    transcript = transcriptFile.read()
    transcript = json.loads(transcript)
    transcript_text = transcript['results']['transcripts'][0]['transcript']

    response = comprehend.detect_sentiment(Text=transcript_text,
                                           LanguageCode='en')

    sentiment = response['Sentiment']

    return {
        'Sentiment': sentiment,
        'TranscriptionJobName': transcriptionJobName
    }
Exemplo n.º 47
0
def post_hits(hit_info, n_sub_hits, save_name):
    """General format for boto3 mturk interface
    - https://github.com/aws-samples/mturk-code-samples/blob/master/Python/CreateHitSample.py
    """

    # set platform to submit to
    if hit_info['platform'] == 'sandbox':
        endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
        external_submit = 'https://workersandbox.mturk.com/mturk/externalSubmit'
        base_url = 'https://workersandbox.mturk.com/mturk/preview?groupId='
    elif hit_info['platform'] == 'live':
        endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
        external_submit = "https://www.mturk.com/mturk/externalSubmit"
        base_url = 'https://www.mturk.com/mturk/preview?groupId='

    # open connection to mturk platform
    mturk = boto3.client(
        'mturk',
        aws_access_key_id=access_id,
        aws_secret_access_key=secret_key,
        region_name='us-east-1',
        endpoint_url=endpoint_url,
    )

    # generate this human intelligence task (HIT)
    HIT = mturk.create_hit(
        Question=experiment(hit_info['external_url'],
                            hit_info['frame_height']).get_as_xml(),
        LifetimeInSeconds=hit_info['lifetime_of_experiment'],
        MaxAssignments=n_sub_hits,
        Title=hit_info['title'],
        Description=hit_info['description'],
        Keywords=hit_info['keywords'],
        QualificationRequirements=hit_info['qualifications'],
        Reward=hit_info['payment_for_experiment'],
        AssignmentDurationInSeconds=hit_info['duration_of_experiment'],
        AutoApprovalDelayInSeconds=hit_info['approval_delay'],
        RequesterAnnotation=hit_info['experiment_name'])

    # save the HIT ID for our records + to display
    hit_info['hit_id'] = HIT['HIT']['HITId']
    # save full URL where the HIT can be found
    hit_info['hit_url'] = "{}{}".format(base_url, HIT['HIT']['HITTypeId'])

    # we'll save this HIT info for our own records
    record_name = '%s_%s.txt' % (hit_info['platform'], save_name)
    # if we've already used this name, load it
    if record_name in os.listdir(os.getcwd()):
        with open(record_name) as json_file:
            turk_info = json.load(json_file)
    else:
        # create a new file, if name hasn't been used
        turk_info = {}
    # name this submission with a unique identifier
    turk_info['submission_%d' % len(turk_info.keys())] = hit_info
    # save this HIT for our own records
    with open(record_name, 'w') as outfile:
        json.dump(turk_info, outfile)
    # print out the HIT URL on the command line
    print('HIT_ID:', HIT['HIT']['HITId'], "\nwhich you can see here:",
          hit_info['hit_url'])
Exemplo n.º 48
0
def stop_job(sid):
    desdata = DesignModel.objects.filter(SID=sid).last()
    client = boto3.client('batch')
    jobid = desdata.jobid
    client.terminate_job(jobId=jobid,reason="User cancelled job.")
Exemplo n.º 49
0
import boto3
import json
from botocore.exceptions import ClientError

codecommit_client = boto3.client('codecommit')
codepipeline_client = boto3.client('codepipeline')

repository_name = 'trigger-test'

#pipeline name : trigger-test-pr-pr_id


def lambda_handler(event, context):

    openpr_list = codecommit_client.list_pull_requests(
        repositoryName=repository_name, pullRequestStatus='OPEN')
    open_pull_requests = openpr_list['pullRequestIds']

    for pr_id in open_pull_requests:
        pipeline_name = str(repository_name + "-" + "pr" + "-" + pr_id)
        try:
            pipeline_response = codepipeline_client.get_pipeline(
                name=pipeline_name)
        except ClientError as e:
            if e.response['Error']['Code'] == 'PipelineNotFoundException':
                print("Pipeline %s does not exist, creating a new one." %
                      pipeline_name)

                pr_metadata = codecommit_client.get_pull_request(
                    pullRequestId=pr_id)['pullRequest']['pullRequestTargets'][
                        0]['sourceReference'].split("/")
Exemplo n.º 50
0
def push_to_s3(filename,key):
    client = boto3.client('s3')
    client.upload_file(filename,settings.AWS_STORAGE_BUCKET_NAME,key)
    response = client.put_object_acl(ACL='public-read', Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=key)
    os.remove(filename)
Exemplo n.º 51
0
        set(bio_ontology.predecessors(node))
    if neigh & always_include:
        return True
    if {bio_ontology.get_ns(n) for n in neigh} & always_include_ns:
        return True
    return False


if __name__ == '__main__':
    bio_ontology = BioOntology()
    bio_ontology.initialize()
    keep_nodes = set()
    for node in bio_ontology.nodes:
        ns = bio_ontology.get_ns(node)
        if keep_node(node):
            keep_nodes.add(node)
    for node in list(bio_ontology.nodes):
        if node not in keep_nodes:
            bio_ontology.remove_node(node)
    bio_ontology._build_name_lookup()
    bio_ontology._build_transitive_closure()
    fname = os.path.join(CACHE_DIR, 'mock_ontology.pkl')
    with open(fname, 'wb') as fh:
        pickle.dump(bio_ontology, fh, protocol=4)
    # Uploading to S3
    s3 = boto3.client('s3')
    s3.put_object(Body=pickle.dumps(bio_ontology), Bucket='bigmech',
                  Key=(f'travis/bio_ontology/{bio_ontology.version}/'
                       f'mock_ontology.pkl'),
                  ACL='public-read')
Exemplo n.º 52
0
import datetime
import requests
import ssl
import OpenSSL
import socket
import csv
import boto3


local_profiles = {'prod', 'dev'}
client = boto3.client('route53')
go_daddy_key = ""
go_daddy_secret = ""
auth_key = 'sso-key ' + go_daddy_key + ':' + go_daddy_secret

dump = [['Host', 'Issuer', 'Start', 'End', 'Expired', 'Subject']]

def rchop(string, ending):
  if string.endswith(ending):
    return string[:-len(ending)]
  return string

def cert_check(hostname):
    hostname = rchop(hostname, ".")
    print(hostname)
    port = 443
    row = []
    row.append(hostname)
    try:
        socket.setdefaulttimeout(1)
        cert = ssl.get_server_certificate(
Exemplo n.º 53
0
    parser = argparse.ArgumentParser(description="Delete several aws log "
                                     "groups at once. Always do a --dryrun "
                                     "before running it for real.")
    parser.add_argument(
            "log_group_prefix",
            type=str,
            help="all the log groups matching this prefix will be deleted "
    )
    parser.add_argument(
            "--dryrun",
            action="store_true",
            help="print the delete-log-group commands that would be executed "
    )
    args = parser.parse_args()

    client = boto3.client("logs")

    # Collect names of all the log groups matching the prefix
    log_groups = []
    response = client.describe_log_groups(
            logGroupNamePrefix=args.log_group_prefix,
    )
    log_groups += response["logGroups"]
    while "nextToken" in response:
        response = client.describe_log_groups(
                logGroupNamePrefix=args.log_group_prefix,
                nextToken=response["nextToken"]
        )
        log_groups += response["logGroups"]
    log_group_names = []
    for log_group in log_groups:
Exemplo n.º 54
0
# pylint: disable=I0011
# pylint: disable=C0111
# pylint: disable=C0103
# pylint: disable=C0301
# pylint: disable=C0325

import boto3
import botocore

client = boto3.client('sqs')

# Delete Launch Queue
try:
    response = client.get_queue_url(
        QueueName='sqs_launch',
    )
    launch_queue_url = response['QueueUrl']

    response = client.delete_queue(
        QueueUrl=launch_queue_url
    )
    print(launch_queue_url + ' deleted')
except botocore.exceptions.ClientError as e:
    if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
        print('Non existent queue')

# Delete Destroy Queue
try:
    response = client.get_queue_url(
        QueueName='sqs_destroy',
    )
Exemplo n.º 55
0
import boto3
from botocore.exceptions import ClientError
import json

iam_c = boto3.client('iam')
iam_r = boto3.resource('iam')


def generate_policy_document(s3buckets=None, snstopicarn=None):
    policy_template = None
    with open('installer/iam_policy_template.json', 'r') as policy_file:
        policy_template = json.loads(policy_file.read())

    bucketresources = []
    for bucket in s3buckets:
        bucketresources.append("arn:aws:s3:::{}".format(bucket))
        bucketresources.append("arn:aws:s3:::{}/*".format(bucket))
    policy_template['Statement'][3]['Resource'] = bucketresources

    if snstopicarn:
        policy_template['Statement'][4]['Resource'] = [snstopicarn]
    else:
        # don't need sns statement if there's no topic
        del policy_template['Statement'][4]
    return json.dumps(policy_template, indent=4)


def get_or_create_role(role_name):
    lambda_assume_role_policy_document = """{
      "Version": "2012-10-17",
      "Statement": [
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


"""
Start job handler
"""

import boto3

ssm = boto3.client("ssm")

def handler(event, _):
    """
    Lambda handler
    """

    response = ssm.send_command(
        InstanceIds=[event['ec2start']['instance_id']],
        DocumentName=event['job_details']['ssm_document'],
        TimeoutSeconds=10800,
        Comment=event['job_details']['job_id'],
        Parameters={"commands":event['job_details']['commands']}
        )

    return response['Command']['CommandId']
Exemplo n.º 57
0
import boto3
import json
import os
from time import sleep
import logging
import ast

logger = logging.getLogger()
logger.setLevel(logging.INFO)

s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
sqs = boto3.client('sqs')

jobFound = False
non_compliant = set(ast.literal_eval(os.environ['LabelsFilter']))
DestinationBucket = os.environ['DestinationBucket']
region = os.environ['Region']


def handler(event, context):
    for record in event['Records']:
        bucket = record['s3']['bucket']['name']
        key = record['s3']['object']['key']
        logger.info(
            "Starting labels detection triggered by S3 object creation: \n {0}"
            .format(event))
        detect_labels(bucket, key)


def detect_labels(bucket,
Exemplo n.º 58
0
import boto3

client = boto3.client('sns', region_name='ap-southeast-2')


def send_sns(sns_topic_arn="", sns_subject="", sns_message="", sns_source=""):

    full_subject = sns_subject + " : " + sns_source

    full_message = sns_message + " : " + sns_source

    print(
        client.publish(TopicArn=sns_topic_arn,
                       Subject=full_subject,
                       Message=full_message))
Exemplo n.º 59
0
import boto3

import os
from dotenv import load_dotenv
import time
from datetime import datetime, timedelta

from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago

load_dotenv()

s3_client = boto3.client(
    's3', 
    aws_access_key_id=os.getenv('S3_ACCESS_KEY_ID'),
    aws_secret_access_key=os.getenv('S3_SECRET_ACCESS_KEY')
)

args = {
    'owner': 'linzichao',
    'start_date': days_ago(2),
}

dag = DAG(
    dag_id='cleanup_version_1',
    default_args=args,
    schedule_interval="@daily",
    tags=['test']
)
#author : Dashrath Goswami
#This function use in lambda to trigger sns topic to notify user that instance is stopped or started
#Use trigger cloudwatch events via instance status
#give lambda function access of sns and cloudwatch log 
#rename topic arn and message as desired.
import boto3

client = boto3.client('sns')
def lambda_handler(event, context):
    topic_arn = 'arn:aws:sns:us-east-1:427413154210:ec2alert'
    message = 'prod server is sttoped.'
    client.publish(TopicArn=topic_arn,Message=message)