def dumpRedshift(self): ''' Method to dump Redshift clusters info. ''' try: if self.botoprfl[0] != "default": conn = boto.connect_redshift(profile_name = self.botoprfl) else: conn = boto.connect_redshift() if conn: print("\n<Start of Redshift clusters>\n") for c in conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']: self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume = True) for i in self.trsinfo: if i == 'ClusterCreateTime': sinfo = " %s: %s" %(str(i),time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(c[i]))) else: sinfo = " %s: %s" %(str(i),str(c[i])) self.opygenericroutines.prntLogErrWarnInfo(sinfo, 'info', bresume = True) self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume = True) print("\n<End of Redshift clusters>\n") except Exception, e: serr = ('%s :: dumpRedshift(...) : connect_redshift,list_clusters(...).clusters, ' '%s' %(self.sclsnme, str(e))) self.opygenericroutines.prntLogErrWarnInfo(serr, bresume = True)
def test_create_cluster_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group( "my_subnet", "This is my subnet group", subnet_ids=[subnet1.id, subnet2.id], ) subnets_response = redshift_conn.describe_cluster_subnet_groups( "my_subnet") my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") my_subnet['Description'].should.equal("This is my subnet group") subnet_ids = [ subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets'] ] set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
def test_create_invalid_cluster_subnet_group(): redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group.when.called_with( "my_subnet", "This is my subnet group", subnet_ids=["subnet-1234"], ).should.throw(InvalidSubnet)
def test_delete_cluster_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group( "my_subnet", "This is my subnet group", subnet_ids=[subnet.id], ) subnets_response = redshift_conn.describe_cluster_subnet_groups() subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(1) redshift_conn.delete_cluster_subnet_group("my_subnet") subnets_response = redshift_conn.describe_cluster_subnet_groups() subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id redshift_conn.delete_cluster_subnet_group.when.called_with( "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound)
def main(): load_data() sys.exit() cluster_id = 'enr-cluster-2' rsCon = boto.connect_redshift(CREDS['akey'], CREDS['skey']) create_cluster(rsCon, cluster_id) try: r = wait_for_cluster(rsCon, cluster_id) endpoint = r['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]['Endpoint'] log("Cluster ready: %s" % r) connect_to_db(endpoint) cur = dbCon.cursor() log ("Creating table...") retval = cur.execute(tbl) if retval: log(retval) retval = cur.fetchall() if retval: log(retval) cp_cmd = """ COPY ado_ucsess FROM 's3://unitcore-stats3/year=2013/month=07/day=31/hour=14/type=sessions/' CREDENTIALS 'aws_access_key_id=%(akey)s;aws_secret_access_key=%(skey)s' delimiter '\t' gzip """ % d # create table testtable (testcol int); except Exception, e: traceback.print_exc()
def main(): load_data() sys.exit() cluster_id = 'enr-cluster-2' rsCon = boto.connect_redshift(CREDS['akey'], CREDS['skey']) create_cluster(rsCon, cluster_id) try: r = wait_for_cluster(rsCon, cluster_id) endpoint = r['DescribeClustersResponse']['DescribeClustersResult'][ 'Clusters'][0]['Endpoint'] log("Cluster ready: %s" % r) connect_to_db(endpoint) cur = dbCon.cursor() log("Creating table...") retval = cur.execute(tbl) if retval: log(retval) retval = cur.fetchall() if retval: log(retval) cp_cmd = """ COPY ado_ucsess FROM 's3://unitcore-stats3/year=2013/month=07/day=31/hour=14/type=sessions/' CREDENTIALS 'aws_access_key_id=%(akey)s;aws_secret_access_key=%(skey)s' delimiter '\t' gzip """ % d # create table testtable (testcol int); except Exception, e: traceback.print_exc()
def test_modify_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' conn.create_cluster_security_group( "security_group", "This is my security group", ) conn.create_cluster_parameter_group( "my_parameter_group", "redshift-1.0", "This is my parameter group", ) conn.create_cluster( cluster_identifier, node_type='single-node', master_username="******", master_user_password="******", ) cluster_response = conn.describe_clusters(cluster_identifier) cluster = cluster_response['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'][0] cluster['EnhancedVpcRouting'].should.equal(False) conn.modify_cluster( cluster_identifier, cluster_type="multi-node", node_type="dw.hs1.xlarge", cluster_security_groups="security_group", master_user_password="******", cluster_parameter_group_name="my_parameter_group", automated_snapshot_retention_period=7, preferred_maintenance_window="Tue:03:00-Tue:11:00", allow_version_upgrade=False, new_cluster_identifier=cluster_identifier, ) cluster_response = conn.describe_clusters(cluster_identifier) cluster = cluster_response['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal(cluster_identifier) cluster['NodeType'].should.equal("dw.hs1.xlarge") cluster['ClusterSecurityGroups'][0][ 'ClusterSecurityGroupName'].should.equal("security_group") cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal( "my_parameter_group") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) cluster['AllowVersionUpgrade'].should.equal(False) # This one should remain unmodified. cluster['NumberOfNodes'].should.equal(1)
def test_create_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( "my_security_group", "This is my security group", ) groups_response = conn.describe_cluster_security_groups("my_security_group") my_group = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] my_group['ClusterSecurityGroupName'].should.equal("my_security_group") my_group['Description'].should.equal("This is my security group") list(my_group['IPRanges']).should.equal([])
def test_create_cluster_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group("my_parameter_group", "redshift-1.0", "This is my parameter group") groups_response = conn.describe_cluster_parameter_groups( "my_parameter_group") my_group = groups_response["DescribeClusterParameterGroupsResponse"][ "DescribeClusterParameterGroupsResult"]["ParameterGroups"][0] my_group["ParameterGroupName"].should.equal("my_parameter_group") my_group["ParameterGroupFamily"].should.equal("redshift-1.0") my_group["Description"].should.equal("This is my parameter group")
def test_create_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group("my_security_group", "This is my security group") groups_response = conn.describe_cluster_security_groups( "my_security_group") my_group = groups_response["DescribeClusterSecurityGroupsResponse"][ "DescribeClusterSecurityGroupsResult"]["ClusterSecurityGroups"][0] my_group["ClusterSecurityGroupName"].should.equal("my_security_group") my_group["Description"].should.equal("This is my security group") list(my_group["IPRanges"]).should.equal([])
def test_create_cluster_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group( "my_parameter_group", "redshift-1.0", "This is my parameter group", ) groups_response = conn.describe_cluster_parameter_groups("my_parameter_group") my_group = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'][0] my_group['ParameterGroupName'].should.equal("my_parameter_group") my_group['ParameterGroupFamily'].should.equal("redshift-1.0") my_group['Description'].should.equal("This is my parameter group")
def test_create_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( "my_security_group", "This is my security group", ) groups_response = conn.describe_cluster_security_groups( "my_security_group") my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] my_group['ClusterSecurityGroupName'].should.equal("my_security_group") my_group['Description'].should.equal("This is my security group") list(my_group['IPRanges']).should.equal([])
def dumpRedshift(self): ''' Method to dump Redshift clusters info. ''' try: if self.botoprfl[0] != "default": conn = boto.connect_redshift(profile_name=self.botoprfl) else: conn = boto.connect_redshift() if conn: print("\n<Start of Redshift clusters>\n") for c in conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters']: self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume=True) for i in self.trsinfo: if i == 'ClusterCreateTime': sinfo = " %s: %s" % (str(i), time.strftime( "%a, %d %b %Y %H:%M:%S", time.gmtime(c[i]))) else: sinfo = " %s: %s" % (str(i), str(c[i])) self.opygenericroutines.prntLogErrWarnInfo( sinfo, 'info', bresume=True) self.opygenericroutines.prntLogErrWarnInfo('', 'info', bresume=True) print("\n<End of Redshift clusters>\n") except Exception, e: serr = ( '%s :: dumpRedshift(...) : connect_redshift,list_clusters(...).clusters, ' '%s' % (self.sclsnme, str(e))) self.opygenericroutines.prntLogErrWarnInfo(serr, bresume=True)
def test_modify_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' conn.create_cluster_security_group( "security_group", "This is my security group", ) conn.create_cluster_parameter_group( "my_parameter_group", "redshift-1.0", "This is my parameter group", ) conn.create_cluster( cluster_identifier, node_type='single-node', master_username="******", master_user_password="******", ) conn.modify_cluster( cluster_identifier, cluster_type="multi-node", node_type="dw.hs1.xlarge", cluster_security_groups="security_group", master_user_password="******", cluster_parameter_group_name="my_parameter_group", automated_snapshot_retention_period=7, preferred_maintenance_window="Tue:03:00-Tue:11:00", allow_version_upgrade=False, new_cluster_identifier="new_identifier", ) cluster_response = conn.describe_clusters("new_identifier") cluster = cluster_response['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'][0] cluster['ClusterIdentifier'].should.equal("new_identifier") cluster['NodeType'].should.equal("dw.hs1.xlarge") cluster['ClusterSecurityGroups'][0][ 'ClusterSecurityGroupName'].should.equal("security_group") cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") cluster['ClusterParameterGroups'][0][ 'ParameterGroupName'].should.equal("my_parameter_group") cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) cluster['AllowVersionUpgrade'].should.equal(False) # This one should remain unmodified. cluster['NumberOfNodes'].should.equal(1)
def __init__(self, access=None, secret=None): self.ac = access self.se = secret self.placement='us-east-1a' self.key='dec15a' # self.myaddress='52.71.62.77' self.myaddress=None try: import boto self.red_conn = boto.connect_redshift(aws_access_key_id=self.ac, aws_secret_access_key=self.se) from boto.s3.connection import OrdinaryCallingFormat self.s3_conn = boto.connect_s3(aws_access_key_id=self.ac, aws_secret_access_key=self.se,calling_format=OrdinaryCallingFormat()) self.buckets = self.s3_conn.get_all_buckets() self.ec2_conn = boto.connect_ec2(aws_access_key_id=self.ac, aws_secret_access_key=self.se) except: pass
def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group("my_parameter_group", "redshift-1.0", "This is my parameter group") conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", cluster_parameter_group_name="my_parameter_group", ) cluster_response = conn.describe_clusters("my_cluster") cluster = cluster_response["DescribeClustersResponse"][ "DescribeClustersResult"]["Clusters"][0] cluster["ClusterParameterGroups"][0]["ParameterGroupName"].should.equal( "my_parameter_group")
def test_delete_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( "my_security_group", "This is my security group", ) groups_response = conn.describe_cluster_security_groups() groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_security_group("my_security_group") groups_response = conn.describe_cluster_security_groups() groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(1) # Delete invalid id conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
def test_create_cluster_with_vpc_security_groups(): vpc_conn = boto.connect_vpc() ec2_conn = boto.connect_ec2() redshift_conn = boto.connect_redshift() vpc = vpc_conn.create_vpc("10.0.0.0/16") security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id) redshift_conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", vpc_security_group_ids=[security_group.id], ) cluster_response = redshift_conn.describe_clusters("my_cluster") cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] group_ids = [group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups']] list(group_ids).should.equal([security_group.id])
def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group( "my_parameter_group", "redshift-1.0", "This is my parameter group", ) conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", cluster_parameter_group_name='my_parameter_group', ) cluster_response = conn.describe_clusters("my_cluster") cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group")
def test_delete_cluster_parameter_group(): conn = boto.connect_redshift() conn.create_cluster_parameter_group("my_parameter_group", "redshift-1.0", "This is my parameter group") groups_response = conn.describe_cluster_parameter_groups() groups = groups_response["DescribeClusterParameterGroupsResponse"][ "DescribeClusterParameterGroupsResult"]["ParameterGroups"] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_parameter_group("my_parameter_group") groups_response = conn.describe_cluster_parameter_groups() groups = groups_response["DescribeClusterParameterGroupsResponse"][ "DescribeClusterParameterGroupsResult"]["ParameterGroups"] groups.should.have.length_of(1) # Delete invalid id conn.delete_cluster_parameter_group.when.called_with( "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound)
def test_create_cluster_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group( "my_subnet", "This is my subnet group", subnet_ids=[subnet1.id, subnet2.id], ) subnets_response = redshift_conn.describe_cluster_subnet_groups("my_subnet") my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") my_subnet['Description'].should.equal("This is my subnet group") subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
def test_delete_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' conn.create_cluster( cluster_identifier, node_type='single-node', master_username="******", master_user_password="******", ) clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) conn.delete_cluster(cluster_identifier) clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) # Delete invalid id conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
def test_delete_cluster(): conn = boto.connect_redshift() cluster_identifier = "my_cluster" snapshot_identifier = "my_snapshot" conn.create_cluster( cluster_identifier, node_type="single-node", master_username="******", master_user_password="******", ) conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(AttributeError) clusters = conn.describe_clusters( )["DescribeClustersResponse"]["DescribeClustersResult"]["Clusters"] list(clusters).should.have.length_of(1) conn.delete_cluster( cluster_identifier=cluster_identifier, skip_final_cluster_snapshot=False, final_cluster_snapshot_identifier=snapshot_identifier, ) clusters = conn.describe_clusters( )["DescribeClustersResponse"]["DescribeClustersResult"]["Clusters"] list(clusters).should.have.length_of(0) snapshots = conn.describe_cluster_snapshots( )["DescribeClusterSnapshotsResponse"]["DescribeClusterSnapshotsResult"][ "Snapshots"] list(snapshots).should.have.length_of(1) assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"] # Delete invalid id conn.delete_cluster.when.called_with("not-a-cluster").should.throw( ClusterNotFound)
def test_delete_cluster_security_group(): conn = boto.connect_redshift() conn.create_cluster_security_group( "my_security_group", "This is my security group", ) groups_response = conn.describe_cluster_security_groups() groups = groups_response['DescribeClusterSecurityGroupsResponse'][ 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(2) # The default group already exists conn.delete_cluster_security_group("my_security_group") groups_response = conn.describe_cluster_security_groups() groups = groups_response['DescribeClusterSecurityGroupsResponse'][ 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] groups.should.have.length_of(1) # Delete invalid id conn.delete_cluster_security_group.when.called_with( "not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
def test_create_cluster_in_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group( "my_subnet_group", "This is my subnet group", subnet_ids=[subnet.id], ) redshift_conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", cluster_subnet_group_name='my_subnet_group', ) cluster_response = redshift_conn.describe_clusters("my_cluster") cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group')
def test_create_cluster_in_subnet_group(): vpc_conn = boto.connect_vpc() vpc = vpc_conn.create_vpc("10.0.0.0/16") subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") redshift_conn = boto.connect_redshift() redshift_conn.create_cluster_subnet_group( "my_subnet_group", "This is my subnet group", subnet_ids=[subnet.id], ) redshift_conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", cluster_subnet_group_name='my_subnet_group', ) cluster_response = redshift_conn.describe_clusters("my_cluster") cluster = cluster_response['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group')
def test_delete_cluster(): conn = boto.connect_redshift() cluster_identifier = 'my_cluster' conn.create_cluster( cluster_identifier, node_type='single-node', master_username="******", master_user_password="******", ) clusters = conn.describe_clusters( )['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) conn.delete_cluster(cluster_identifier) clusters = conn.describe_clusters( )['DescribeClustersResponse']['DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) # Delete invalid id conn.delete_cluster.when.called_with("not-a-cluster").should.throw( ClusterNotFound)
def test_create_cluster_with_vpc_security_groups(): vpc_conn = boto.connect_vpc() ec2_conn = boto.connect_ec2() redshift_conn = boto.connect_redshift() vpc = vpc_conn.create_vpc("10.0.0.0/16") security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id) redshift_conn.create_cluster( "my_cluster", node_type="dw.hs1.xlarge", master_username="******", master_user_password="******", vpc_security_group_ids=[security_group.id], ) cluster_response = redshift_conn.describe_clusters("my_cluster") cluster = cluster_response['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'][0] group_ids = [ group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups'] ] list(group_ids).should.equal([security_group.id])
# author vishwanath subramanian import boto import boto.ec2 import boto.rds import boto.emr import boto.redshift from collections import Counter import boto.ec2.cloudwatch import datetime ec2 = boto.connect_ec2() rds = boto.connect_rds2() s3 = boto.connect_s3() emr = boto.connect_emr() rs = boto.connect_redshift() cw = boto.ec2.cloudwatch.connect_to_region('us-west-1') all_running_clusters = [] all_ins_names = [] today = datetime.datetime.now() yesterday = today - datetime.timedelta(days=1) def getKey(item): return item[1] # Get all the EC2 instances running def all_instances(): reservations = ec2.get_all_instances(filters={'instance-state-code': '16'}) print '\nALL RUNNING EC2 INSTANCES'
def connect(account_name, connection_type, **args): """ Examples of use: ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False) ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000) ec2 = sts_connect.connect(environment, 'ec2') where environment is ( test, prod, dev ) s3 = sts_connect.connect(environment, 's3') ses = sts_connect.connect(environment, 'ses') :param account: Account to connect with (i.e. test, prod, dev) :raises Exception: RDS Region not valid AWS Tech not supported. :returns: STS Connection Object for given tech :note: To use this method a SecurityMonkey role must be created in the target account with full read only privileges. """ account = Account.query.filter(Account.name == account_name).first() sts = boto.connect_sts() role_name = 'SecurityMonkey' if account.role_name and account.role_name != '': role_name = account.role_name role = sts.assume_role('arn:aws:iam::' + account.number + ':role/' + role_name, 'secmonkey') if connection_type == 'botocore': botocore_session = botocore.session.get_session() botocore_session.set_credentials( role.credentials.access_key, role.credentials.secret_key, token=role.credentials.session_token ) return botocore_session if connection_type == 'ec2': return boto.connect_ec2( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'elb': if 'region' in args: region = args['region'] del args['region'] else: region = 'us-east-1' return boto.ec2.elb.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 's3': if 'region' in args: region = args['region'] # drop region key-val pair from args or you'll get an exception del args['region'] return boto.s3.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_s3( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'ses': if 'region' in args: region = args['region'] del args['region'] return boto.ses.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_ses( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'iam_boto3': session = boto3.Session( aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, aws_session_token=role.credentials.session_token ) return session.resource('iam') if connection_type == 'iam': if 'region' in args: region = args['region'] # drop region key-val pair from args or you'll get an exception del args['region'] return boto.iam.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_iam( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'route53': return boto.connect_route53( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'sns': if 'region' in args: region = args['region'] del args['region'] return boto.sns.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_sns( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'sqs': if 'region' in args: region = args['region'] del args['region'] return boto.sqs.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_sqs( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'vpc': return boto.connect_vpc( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'rds': if 'region' in args: reg = args['region'] rds_region = None for boto_region in boto.rds.regions(): if reg.name == boto_region.name: rds_region = boto_region if rds_region is None: raise Exception('The supplied region {0} is not in boto.rds.regions. {1}'.format(reg, boto.rds.regions())) return boto.connect_rds( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'redshift': if 'region' in args: region = args['region'] del args['region'] return boto.redshift.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_redshift( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'vpc': if 'region' in args: region = args['region'] del args['region'] return boto.vpc.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_vpc( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) err_msg = 'The connection_type supplied (%s) is not implemented.' % connection_type raise Exception(err_msg)
def connect(account_name, connection_type, **args): """ Examples of use: ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False) ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000) ec2 = sts_connect.connect(environment, 'ec2') where environment is ( test, prod, dev ) s3 = sts_connect.connect(environment, 's3') ses = sts_connect.connect(environment, 'ses') :param account: Account to connect with (i.e. test, prod, dev) :raises Exception: RDS Region not valid AWS Tech not supported. :returns: STS Connection Object for given tech :note: To use this method a SecurityMonkey role must be created in the target account with full read only privledges. """ account = Account.query.filter(Account.name == account_name).first() sts = boto.connect_sts() role = sts.assume_role('arn:aws:iam::' + account.number + ':role/SecurityMonkey', 'secmonkey') if connection_type == 'botocore': botocore_session = botocore.session.get_session() botocore_session.set_credentials( role.credentials.access_key, role.credentials.secret_key, token=role.credentials.session_token ) return botocore_session if connection_type == 'ec2': return boto.connect_ec2( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'elb': if 'region' in args: region = args['region'] del args['region'] else: region = 'us-east-1' return boto.ec2.elb.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 's3': if 'region' in args: region = args['region'] # drop region key-val pair from args or you'll get an exception del args['region'] return boto.s3.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_s3( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'ses': if 'region' in args: region = args['region'] del args['region'] return boto.ses.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_ses( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'iam_boto3': session = boto3.Session( aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, aws_session_token=role.credentials.session_token ) return session.resource('iam') if connection_type == 'iam': if 'region' in args: region = args['region'] # drop region key-val pair from args or you'll get an exception del args['region'] return boto.iam.connect_to_region( region, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_iam( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'route53': return boto.connect_route53( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'sns': if 'region' in args: region = args['region'] del args['region'] return boto.sns.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_sns( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'sqs': if 'region' in args: region = args['region'] del args['region'] return boto.sqs.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_sqs( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'vpc': return boto.connect_vpc( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'rds': if 'region' in args: reg = args['region'] rds_region = None for boto_region in boto.rds.regions(): if reg.name == boto_region.name: rds_region = boto_region if rds_region is None: raise Exception('The supplied region {0} is not in boto.rds.regions. {1}'.format(reg, boto.rds.regions())) return boto.connect_rds( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'redshift': if 'region' in args: region = args['region'] del args['region'] return boto.redshift.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_redshift( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) if connection_type == 'vpc': if 'region' in args: region = args['region'] del args['region'] return boto.vpc.connect_to_region( region.name, aws_access_key_id=role.credentials.access_key, aws_secret_access_key=role.credentials.secret_key, security_token=role.credentials.session_token, **args) return boto.connect_vpc( role.credentials.access_key, role.credentials.secret_key, security_token=role.credentials.session_token, **args) err_msg = 'The connection_type supplied (%s) is not implemented.' % connection_type raise Exception(err_msg)
def get_entities_for_region(self, region): rs = boto.connect_redshift(self.access_key_id, self.secret_access_key, region=region) return rs.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']