def delete(self, resource): if self.dry_run: return if resource.wrapped["DBInstanceStatus"] == DELETION_STATUS: warnings.warn(Warning(SKIPPING_DELETION_STATEMENT)) self.logger.info(DELETION_STATEMENT % resource.wrapped["DBInstanceIdentifier"]) connection = rds2.connect_to_region(resource.region) connection.delete_db_instance(resource.wrapped["DBInstanceIdentifier"], skip_final_snapshot=True)
def delete(self, resource): if self.dry_run: return if resource.wrapped["DBInstanceStatus"] == DELETION_STATUS: self.logger.info(SKIPPING_DELETION_STATEMENT) return self.logger.info(DELETION_STATEMENT % resource.wrapped["DBInstanceIdentifier"]) connection = rds2.connect_to_region(resource.region) response = connection.delete_db_instance(resource.wrapped["DBInstanceIdentifier"], skip_final_snapshot=True) return response["DeleteDBInstanceResponse"]["DeleteDBInstanceResult"]["DBInstance"]
def fetch_unwanted_resources(self): for region in self.regions: connection = rds2.connect_to_region(region.name) resources = connection.describe_db_snapshots() or [] for resource in resources["DescribeDBSnapshotsResponse"]["DescribeDBSnapshotsResult"]["DBSnapshots"]: resource_wrapper = Resource(resource, region.name) if resource['DBSnapshotIdentifier'] in self.ignored_resources: self.logger.info('IGNORE ' + self.to_string(resource_wrapper)) continue yield resource_wrapper
def delete(self, resource): if self.dry_run: return if resource.wrapped["Status"] == DELETION_STATUS: warnings.warn(Warning(SKIPPING_DELETION_STATEMENT)) if resource.wrapped["Status"] == CREATION_STATUS: warnings.warn(Warning(SKIPPING_CREATION_STATEMENT)) if resource.wrapped["SnapshotType"] == AUTOMATED_STATUS: warnings.warn(Warning(SKIPPING_AUTOGENERATED_STATEMENT)) self.logger.info(DELETION_STATEMENT % resource.wrapped["DBSnapshotIdentifier"]) connection = rds2.connect_to_region(resource.region) connection.delete_db_snapshot(resource.wrapped["DBSnapshotIdentifier"])
def fetch_unwanted_resources(self): for region_name in self.region_names: connection = rds2.connect_to_region(region_name) resources = connection.describe_db_snapshots() or [] for resource in resources["DescribeDBSnapshotsResponse"]["DescribeDBSnapshotsResult"]["DBSnapshots"]: resource_wrapper = Resource(resource=resource, resource_type=self.resource_type, resource_id=resource["DBSnapshotIdentifier"], creation_date=resource["SnapshotCreateTime"], region=region_name) if resource['DBSnapshotIdentifier'] in self.ignored_resources: self.logger.info('IGNORE ' + self.to_string(resource_wrapper)) continue yield resource_wrapper
def delete(self, resource): if self.dry_run: return if resource.wrapped["Status"] == DELETION_STATUS: self.logger.info(SKIPPING_DELETION_STATEMENT) return if resource.wrapped["Status"] == CREATION_STATUS: self.logger.info(SKIPPING_CREATION_STATEMENT) return if resource.wrapped["SnapshotType"] == AUTOMATED_STATUS: self.logger.info(SKIPPING_AUTOGENERATED_STATEMENT) return self.logger.info(DELETION_STATEMENT % resource.wrapped["DBSnapshotIdentifier"]) connection = rds2.connect_to_region(resource.region) response = connection.delete_db_snapshot(resource.wrapped["DBSnapshotIdentifier"]) return response["DeleteDBSnapshotResponse"]["DeleteDBSnapshotResult"]["DBSnapshot"]
def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' # Hack to get the AWS account id. # Amazon does not provide any easy way to get it. ec2_conn = ec2.connect_to_region(region) sg = ec2_conn.get_all_security_groups() account_id = sg[0].owner_id try: conn = rds.connect_to_region(region) conn2 = rds2.connect_to_region(region) # To get RDS tags. if conn: instances = conn.get_all_dbinstances() for instance in instances: # NOTE: Boto 2.27.0 (latest as of 3/24/2014) # is not able to get tags from RDS instances # in a way like it can get EC2 tags. # Until there is a better solution, the following works. # Construct the ARN for this RDS instance, # so we can get its tags. arn = ':'.join([ 'arn', 'aws', 'rds', region, account_id, 'db', instance.id ]) # Get its raw tagset and # standardize it to the way Boto presents # EC2 tags. tagset = conn2.list_tags_for_resource(arn)['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList'] instance.tags = {tag['Key']: tag['Value'] for tag in tagset} self.add_rds_instance(instance, region) except boto.exception.BotoServerError, e: if not e.reason == "Forbidden": print "Looks like AWS RDS is down: " print e sys.exit(1)
def __init__(self, name, platform, env, region, db_instance_class=None, engine_major_version="5.6", storage_size=10, iops=None, multi_az=False, destroy_confirmation=True): self.name = name self.platform = platform self.env = env self.db_instance_name = "%s-%s-%s" % (self.name, self.platform, self.env) self.region = region self.db_instance_class = db_instance_class self.connection = rds.connect_to_region(self.region) self.engine_major_version = engine_major_version self.storage_size = storage_size self.iops = iops self.engine_version = "5.6.13" self.multi_az = multi_az if self.env == "prod": self.multi_az = True self.db_name = "tmp" self.tags=[ ( "Name", self.name ), ( "Platform", self.platform ), ( "Environment", self.env ) ]
def __init__(self, region, **kwargs): self._region = region self._options = kwargs self._connection = rds2.connect_to_region(region) self._ec2 = EC2Client(region)
def _main(): usage = "usage: %prog -i my-instance-id" parser = OptionParser(usage=usage, description="") parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Turn on debug logging") parser.add_option("-q", "--quiet", action="store_true", dest="quiet", help="turn off all logging") parser.add_option("-i", "--instance", action="store", dest="instance", default=None, help="instance name") parser.add_option("-o", "--output", action="store", dest="output_dir", default="./", help="output directory") parser.add_option("-r", "--region", action="store", dest="region", default="us-east-1", choices=AWS_REGIONS, help="AWS region") parser.add_option("-m", "--match", action="store", dest="logfile_match", help="Only download logs matching regexp") parser.add_option( "-l", "--lines", action="store", type="int", dest="lines", help= "Initial number of lines to request per chunk. Number of lines will be reduced if logs get truncated.", default=1000) parser.add_option("-s", "--skip-existing", action="store_true", dest="skip_existing", help="Skip existing files, even if size doesnt match") (options, args) = parser.parse_args() logging.basicConfig(level=logging.DEBUG if options.debug else ( logging.ERROR if options.quiet else logging.INFO)) if not options.instance: logging.error("Instance parameter is required") sys.exit(-1) if not os.path.exists(options.output_dir): os.mkdir(options.output_dir) connection = rds2.connect_to_region(options.region) response = connection.describe_db_log_files(options.instance) logfiles = response['DescribeDBLogFilesResponse'][ 'DescribeDBLogFilesResult']['DescribeDBLogFiles'] for log in logfiles: logging.debug(log) logfilename = log['LogFileName'] lines = options.lines if options.logfile_match is not None and not re.search( options.logfile_match, logfilename): logging.info("Skipping " + logfilename) continue destination = os.path.join(options.output_dir, os.path.basename(logfilename)) if os.path.exists(destination): statinfo = os.stat(destination) if statinfo.st_size == log['Size'] or options.skip_existing: logging.info("File %s exists, skipping" % (logfilename)) continue else: logging.info( "Log file %s exists, but size does not match, redownloading." % (logfilename)) logging.info("Local files size %d expected size:%d" % (statinfo.st_size, log['Size'])) os.remove(destination) chunk = 0 with open(destination, "w") as f: more_data = True marker = "0" while more_data: logging.info("requesting %s marker:%s chunk:%i" % (logfilename, marker, chunk)) try: response = connection.download_db_log_file_portion( options.instance, logfilename, marker=marker, number_of_lines=lines) except JSONResponseError as e: logging.error( "Received error reponse, sleeping for 60 seconds") logging.error(e) sleep(60) continue result = response['DownloadDBLogFilePortionResponse'][ 'DownloadDBLogFilePortionResult'] logging.info( "AdditionalDataPending:%s Marker:%s" % (str(result['AdditionalDataPending']), result['Marker'])) if 'LogFileData' in result and result[ 'LogFileData'] is not None: if result['LogFileData'].endswith( "[Your log message was truncated]\n"): logging.info("Log segment was truncated") if lines > options.lines * 0.1: lines -= int(options.lines * 0.1) logging.info("retrying with %i lines" % lines) continue f.write(result['LogFileData']) else: logging.error("No LogFileData for file:%s" % (logfilename)) more_data = 'AdditionalDataPending' in result and result[ 'AdditionalDataPending'] if 'Marker' in result: marker = result['Marker'] chunk += 1 del result['LogFileData'] logging.debug(result)
clean_env() self.conf = '%s/%s/%s.ini' % (os.getenv('AWS_CRED_DIR'), acct_name, acct_name) try: boto.config.load_credential_file(self.conf) except IOError, msg: print >> sys.stderr, 'ERROR: %s' % msg return False if service == 's3': self.conn = s3.connect_to_region(region) if service == 'ec2': self.conn = ec2.connect_to_region(region) if service == 'rds': self.conn = rds.connect_to_region(region) if service == 'rds2': self.conn = rds2.connect_to_region(region) if service == 'elb': self.conn = elb.connect_to_region(region) if service == 'sqs': self.conn = sqs.connect_to_region(region) if service == 'emr': self.conn = emr.connect_to_region(region) if service == 'route53': self.conn = route53.connect_to_region(region) if service == 'iam': self.conn = iam.connect_to_region('universal') if not self.conn: print >> sys.stderr, 'ERROR: Unknown service' return False return self.conn
from boto.rds2 import connect_to_region import settings if len(sys.argv) < 4: print 'usage: python %s <account> <group name 1> <group name 2>' % sys.argv[0] sys.exit(2) account = sys.argv[1].lower() first_group_name = sys.argv[2] second_group_name = sys.argv[3] account_info = settings.ACCOUNT_INFO[account] region_name = account_info.get('region', 'us-east-1') conn = connect_to_region(region_name, **account_info) def get_group_params(group_name): params = {} marker = None while True: group = conn.describe_db_parameters(group_name, marker=marker)['DescribeDBParametersResponse']['DescribeDBParametersResult'] marker, parameters = group['Marker'], group['Parameters'] for p in parameters: params[p['ParameterName']] = p if not marker: break return params first_params = get_group_params(first_group_name)
import settings if len(sys.argv) < 4: print 'usage: python %s <account> <old group name> <new group name> [<new family> [<new description>]]' % sys.argv[0] sys.exit(2) account = sys.argv[1].lower() old_group_name = sys.argv[2] new_group_name = sys.argv[3] new_family = sys.argv[4] if len(sys.argv) >= 5 else None description = sys.argv[5] if len(sys.argv) >= 6 else None account_info = settings.ACCOUNT_INFO[account] region_name = account_info.get('region', 'us-east-1') conn = connect_to_region(region_name) old_group = conn.describe_db_parameter_groups(old_group_name)['DescribeDBParameterGroupsResponse']['DescribeDBParameterGroupsResult']['DBParameterGroups'][0] new_family = new_family or old_group['DBParameterGroupFamily'] description = description or old_group['Description'] try: conn.delete_db_parameter_group(new_group_name) except DBParameterGroupNotFound: pass new_group = conn.create_db_parameter_group(new_group_name, new_family, description) params_to_update = [] marker = None while True: group = conn.describe_db_parameters(old_group_name, marker=marker)['DescribeDBParametersResponse']['DescribeDBParametersResult']
def _main(): usage = "usage: %prog -i my-instance-id" parser = OptionParser(usage=usage, description="") parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Turn on debug logging") parser.add_option("-q", "--quiet", action="store_true", dest="quiet", help="turn off all logging") parser.add_option("-i", "--instance", action="store", dest="instance", default=None, help="instance name") parser.add_option("-o", "--output", action="store", dest="output_dir", default="./", help="output directory") parser.add_option("-r", "--region", action="store", dest="region", default="us-east-1", choices=AWS_REGIONS, help="AWS region") parser.add_option("-m", "--match", action="store", dest="logfile_match", help="Only download logs matching regexp") parser.add_option("-l", "--lines", action="store", type="int", dest="lines", help="Initial number of lines to request per chunk. Number of lines will be reduced if logs get truncated.", default=1000) (options, args) = parser.parse_args() logging.basicConfig(level=logging.DEBUG if options.debug else (logging.ERROR if options.quiet else logging.INFO)) if not options.instance: logging.error("Instance parameter is required") sys.exit(-1) if not os.path.exists(options.output_dir): os.mkdir(options.output_dir) connection = rds2.connect_to_region(options.region) response = connection.describe_db_log_files(options.instance) logfiles = response['DescribeDBLogFilesResponse']['DescribeDBLogFilesResult']['DescribeDBLogFiles'] for log in logfiles: logging.debug(log) logfilename = log['LogFileName'] lines = options.lines if options.logfile_match is not None and not re.search(options.logfile_match, logfilename): logging.info("Skipping " + logfilename) continue destination = os.path.join(options.output_dir, os.path.basename(logfilename)) if os.path.exists(destination): statinfo = os.stat(destination) if statinfo.st_size == log['Size']: logging.info("File %s exists, skipping" % (logfilename)) continue else: logging.info("Log file %s exists, but size does not match, redownloading." % (logfilename)) logging.info("Local files size %d expected size:%d" % (statinfo.st_size, log['Size'])) os.remove(destination) chunk = 0 with open(destination, "wb") as f: more_data = True marker = "0" while more_data: logging.info("requesting %s marker:%s chunk:%i" % (logfilename, marker, chunk)) response = connection.download_db_log_file_portion(options.instance, logfilename, marker=marker, number_of_lines=lines) result = response['DownloadDBLogFilePortionResponse']['DownloadDBLogFilePortionResult'] logging.info("AdditionalDataPending:%s Marker:%s" % (str(result['AdditionalDataPending']), result['Marker'])) if 'LogFileData' in result and result['LogFileData'] is not None: if result['LogFileData'].endswith("[Your log message was truncated]\n"): logging.info("Log segment was truncated") if lines > options.lines * 0.1: lines -= int(options.lines * 0.1) logging.info("retrying with %i lines" % lines) continue f.write(result['LogFileData']) else: logging.error("No LogFileData for file:%s" % (logfilename)) more_data = 'AdditionalDataPending' in result and result['AdditionalDataPending'] if 'Marker' in result: marker = result['Marker'] chunk += 1 del result['LogFileData'] logging.debug(result)
DEFAULT_REGION = 'us-east-1' region = DEFAULT_REGION # Positional arguments # 1. AWS region if sys.argv[1:]: region = sys.argv.pop(1) # the ARN is the RDS identifier to use with rds2 objects # but, we'll use the Name tag to filter rds_names = list(sys.argv[1:]) regions = rds2.regions() rds = rds2.connect_to_region(region) if rds_arns: dbs = rds.describe_db_instances() else dbs = rds.describe_db_instances() for instance in dbs['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']: #instance['DBInstanceIdentifier'] #instance['DBInstanceStatus'] #instance['Endpoint'] force_tagging = False