Example #1
0
def deploy_resources():
    rds_client = RDSClient().get_client()
    rds = RDS(rds_client)

    rds.create_postgresql_instance()

    print("Creating RDS PostgreSQL Instance...")
Example #2
0
def deprovision(instance_id):
    """Destroys an RDS instance.
    """
    # The deprovision endpoint supports both sync and async requests.
    # Ideally this would be async only since the operation is actually
    # async, but in at least v208 (probably some later versions as
    # well) it seems that the cloud controller does not include the
    # accepts_incomplete param in the request.  The last_operation
    # endpoint supports async deprovisions so this should "just work"
    # with either sync or async operations.  The main difference is
    # that the dynamodb reference will be removed here instead of
    # opportunistically by the last_operation endpoint.
    incompletes = bottle.request.query.getone('accepts_incomplete')
    bottle.response.content_type = 'application/json'
    dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')
    table = dynamodb.Table(name=CONFIG['dynamodb_table'])
    record = table.get_item(Key={'instance_id': instance_id})
    if 'Item' not in record.keys():
        bottle.response.status = 410
        return json.dumps({})
    record = record.pop('Item')
    record['last_operation'] = 'destroy'
    rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])
    rds.destroy_instance()
    if incompletes.lower() == 'true':
        bottle.response.status = 202
        table.put_item(Item=record)
    else:
        bottle.response.status = 200
        table.delete_item(Key={'instance_id': instance_id})
    return json.dumps({})
Example #3
0
def create_from_latest_snapshot(config):
    rds = RDS(aws_id=config['aws_access_key_id'],
              aws_secret_key=config['aws_secret_access_key'],
              aws_region=config['aws_region'])
    snapshot = rds.get_latest_snapshot(
        rds_instance_id=config['rds_instance_id'])
    instance = rds.create_instace_from_snapshot(snapshot,
                                                config['rds_subnet_group'])
    return instance.id
Example #4
0
def create_from_latest_snapshot(config):
    rds = RDS(aws_id=config['aws_access_key_id'],
              aws_secret_key=config['aws_secret_access_key'],
              aws_region=config['aws_region'])
    snapshot = rds.get_latest_snapshot(
        rds_instance_id=config['rds_instance_id'])
    instance = rds.create_instace_from_snapshot(
        snapshot, config['rds_subnet_group'])
    return instance.id
Example #5
0
def deploy_resources():
    rds_client = RDSClient().get_client()
    print(f'RDs client {rds_client}')

    rds = RDS(rds_client)

    rds.create_postgresql_instance()

    print("creating RDS PostGreSQL Instance")
    def ephemeral_zombie_clean(self, args):
        k8s = K8S()
        rds = RDS(self.args)
        route53 = ROUTE53(self.args)

        namespaces = k8s.get_k8s_namespaces(pattern=self.args.target_instance)
        rds_instances = rds.get_rds_instances(
            pattern=self.args.target_instance)

        #Check if any RDS instance does not have matching K8s NS
        rds_zombies = []
        for rds_instance in rds_instances:
            found = False
            for ns in namespaces:
                if (re.search(ns, rds_instance['instance_name'])):
                    found = True
            if (not found): rds_zombies.append(rds_instance)

        if not rds_zombies:
            logging.info("No ephemeral zombies found")
            sys.exit(0)

        logging.info(f"Found ephemeral zombies:{rds_zombies}")

        attributes = {}
        attributes['dns_suffix'] = self.args.dns_suffix
        attributes['zone_id'] = self.args.zone_id

        #Deleting Zombies and associated CNAME records
        zombie_names = []
        for zombie in rds_zombies:
            ephemeral_name = zombie['instance_name'][:10]
            attributes['cname_name'] = f"{ephemeral_name}-rds"
            #Get All records for DNS Zone
            route53_records = route53.get_records_set(attributes['zone_id'])
            route53_record_name = f'{attributes["cname_name"]}.{attributes["dns_suffix"]}.'
            #Check if record exists
            zombie_cname_record = list(
                filter(lambda d: d['Name'] in [route53_record_name],
                       route53_records))
            if zombie_cname_record:
                logging.info(
                    f'DNS record for {route53_record_name} found, deleting ...'
                )
                route53.update_dns(attributes['cname_name'],
                                   attributes['dns_suffix'],
                                   attributes['zone_id'],
                                   zombie['instance_address'],
                                   action='DELETE')
            else:
                logging.info(
                    f'DNS record for {route53_record_name} not found, skipping ...'
                )
            zombie_names.append(zombie['instance_name'])

        rds.destroy_old_instances(zombie_names)
Example #7
0
def update_polling(record):
    """Last operation polling logic for update action.
    """
    rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])
    details = rds.db_instance_details()
    if details['DBInstanceStatus'] != 'available':
        response = {'state': 'in progress', 'description': 'Updating service.'}
        bottle.response.status = 200
        return json.dumps(response)
    else:
        response = {'state': 'succeeded', 'description': 'Service updated.'}
        bottle.response.status = 200
        return json.dumps(response)
Example #8
0
def backup_data(config):
    """
    1. Create an instance from the latest snapshot.
    2. Backup the data to s3 given an instance.
    3. Delete the instance.
    """
    instance_id = db_instance_creater.create_from_latest_snapshot(config)
    print('Created instance', instance_id)

    data_backup_creater.dump_gzip_backkup(instance_id, config)

    print('Deleting instance', instance_id)
    rds = RDS(aws_id=config['aws_access_key_id'],
              aws_secret_key=config['aws_secret_access_key'],
              aws_region=config['aws_region'])
    rds.delete_instance(instance_id)
Example #9
0
def destroy_polling(record):
    """Last operation polling logic for destroy action.
    """
    rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])
    if record['hostname'] in rds.get_all_identifiers():
        response = {
            'state': 'in progress',
            'description': 'Destroying service.'
        }
        bottle.response.status = 200
        return json.dumps(response)
    else:
        dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')
        table = dynamodb.Table(name=CONFIG['dynamodb_table'])
        table.delete_item(Key={'instance_id': record['instance_id']})
        bottle.response.status = 410
        return json.dumps({})
Example #10
0
def dump_gzip_backkup(instance_id, config):
    rds = RDS(aws_id=config['aws_access_key_id'],
              aws_secret_key=config['aws_secret_access_key'],
              aws_region=config['aws_region'])
    instance = rds.get_instance(instance_id)
    host = rds.get_host(instance)
    compress_command = config.get('compress_command', 'gzip')
    assert compress_command in command_to_extension

    for db in config['databases']:
        s3_location = 's3://{}/{}/{}-{}.sql.{}'.format(
            config['s3_bucket'], config['s3_prefix'], db['name'],
            datetime.now().strftime('%y-%b-%d-%H-%M-%S'),
            command_to_extension[compress_command])
        print('Backing Up', db['name'], 'to', s3_location)
        create_backup(host, s3_location, db_user=db['user'],
                      db_password=db['password'], db_name=db['name'],
                      compress_command=compress_command)
Example #11
0
def update(instance_id):
    updateable_params = ('AllocatedStorage',)
    incompletes = bottle.request.query.getone('accepts_incomplete')
    bottle.response.content_type = 'application/json'
    if incompletes is None:
        return _abort_async_required()
    data = json.loads(bottle.request.body.read())
    for param in data['parameters'].keys():
        if param not in updateable_params:
            bottle.response.status = 400
            msg = 'Updating of {0} is not supported'.format(param)
            return json.dumps({'description': msg})
    dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')
    table = dynamodb.Table(name=CONFIG['dynamodb_table'])
    record = table.get_item(Key={'instance_id': instance_id})
    if 'Item' not in record.keys():
        bottle.response.status = 410
        return json.dumps({})
    else:
        record = record.pop('Item')
    rds = RDS(DBInstanceIdentifier=record['hostname'], **CONFIG['aws'])
    details = rds.db_instance_details()
    if data['parameters']['AllocatedStorage'] <= details['AllocatedStorage']:
        bottle.response.status = 400
        return json.dumps({
            'description': 'Decreasing AllocatedStorage is not supported.'
        })
    rds.update_instance(
        DBInstanceIdentifier=record['hostname'],
        **data['parameters']
    )
    for i in xrange(0,10):
        details = rds.db_instance_details()
        if details['DBInstanceStatus'] != 'available':
            break
        sleep(5)
    else:
        bottle.response.status = 408
        return json.dumps({})
    record['last_operation'] = 'update'
    record['parameters'] = data['parameters']
    table.put_item(Item=record)
    bottle.response.status = 202
    return json.dumps({})
Example #12
0
def dump_gzip_backkup(instance_id, config):
    rds = RDS(aws_id=config['aws_access_key_id'],
              aws_secret_key=config['aws_secret_access_key'],
              aws_region=config['aws_region'])
    instance = rds.get_instance(instance_id)
    host = rds.get_host(instance)
    compress_command = config.get('compress_command', 'gzip')
    assert compress_command in command_to_extension

    for db in config['databases']:
        s3_location = 's3://{}/{}/{}-{}.sql.{}'.format(
            config['s3_bucket'], config['s3_prefix'], db['name'],
            datetime.now().strftime('%y-%b-%d-%H-%M-%S'),
            command_to_extension[compress_command])
        print('Backing Up', db['name'], 'to', s3_location)
        create_backup(host,
                      s3_location,
                      db_user=db['user'],
                      db_password=db['password'],
                      db_name=db['name'],
                      compress_command=compress_command)
Example #13
0
def get_values(event):
    region = None
    if 'region' in event:
        region = event['region']

    modules_process = []
    
    if 'custom' not in event:
        beanstalk = Beanstalk()
        modules_process.append(beanstalk.run(region))

        ec2 = EC2()
        modules_process.append(ec2.run(region))

        rds = RDS()
        modules_process.append(rds.run(region))

        ssm = SSM()
        modules_process.append(ssm.run(region))

        ses = SES()
        modules_process.append(ses.run(region))

        elasticache = Elasticache()
        modules_process.append(elasticache.run(region))
    else:
        data = str(event['custom'])

        if 'BEANSTALK' in data:
            beanstalk = Beanstalk()
            modules_process.append(beanstalk.run(region))

        if 'INSTANCES' in data:
            ec2 = EC2()
            modules_process.append(ec2.run(region))

        if 'SSM' in data:
            ssm = SSM()
            modules_process.append(ssm.run(region))

        if 'ELASTICACHE' in data:
            elasticache = Elasticache()
            modules_process.append(elasticache.run(region))

        if 'RDS' in data:
            rds = RDS()
            modules_process.append(rds.run(region))

        if 'SES' in data:
            ses = SES()
            modules_process.append(ses.run(region))


    start_all(modules_process)
    wait_all(modules_process)
    ret_dict = read_all(modules_process)

    return ret_dict
Example #14
0
from rds import RDS
from decouple import config

rds_client = RDS.create_connection()

# AWS POSTGRES DB
db_name = config("DBNAME")
user = config("USER")
endpoint = config("ENTPOIND")
port = "5432"

rds_token = RDS.get_rds_token(rds_client, endpoint, port, user)
RDS.create_instance(rds_client)
RDS.connect_with_postgres(rds_token, db_name, endpoint, port, user)
Example #15
0
def provision(instance_id):
    """Provisions an RDS instance.
    """
    # TODO: Break this up into more maintainable chunks.
    if bottle.request.content_type != 'application/json':
        bottle.abort(
            415,
            'Unsupported Content-Type: expecting application/json'
        )
    incompletes = bottle.request.query.getone('accepts_incomplete')
    bottle.response.content_type = 'application/json'
    if incompletes is None:
        return _abort_async_required()
    if incompletes.lower() == 'true':
        data = json.loads(bottle.request.body.read())
        for plan in CONFIG['plan_settings']:
            if plan['id'] == data['plan_id']:
                plan_params = dict(plan)
                # Remove the id value from the params so we can just
                # pass the whole dict along to the RDS class.
                del plan_params['id']
                break
        else:
            bottle.response.status = 400
            return json.dumps({'description': 'Plan ID does not exist'})
        rds = RDS(**CONFIG['aws'])
        # Update the rds class instance with the parameters for the
        # plan as defined in the configuration.
        rds.__dict__.update(plan_params)
        # Parse and use extra parameters that have been passed in by
        # the user.
        #
        # TODO: Move allowed_params to config so operator can determine
        #       what they want to allow.
        allowed_params = ['DBName', 'AllocatedStorage']
        if CONFIG['deploy_from_snapshots'] is True:
            allowed_params.append('DBSnapshotIdentifier')
        if 'parameters' in data.keys():
            user_params = dict([
                (k, v) for (k, v) in data['parameters'].items()
                if k in allowed_params
            ])
            rds.__dict__.update(user_params)
        else:
            user_params = {}
        params_to_update = {}
        rds.DBInstanceIdentifier = '-'.join([rds.Engine.lower(), instance_id])
        rds.MasterUserPassword = utils.random_string()
        if rds.DBSnapshotIdentifier is None:
            last_operation = 'create'
            source_snapshot = 'NONE'
            step = 'NONE'
            first_char = random.choice(string.letters)
            rds.MasterUsername = ''.join([
                first_char,
                utils.random_string(15)
            ])
            rds.create_instance()
        else:
            last_operation = 'create_from_snapshot'
            source_snapshot = rds.DBSnapshotIdentifier
            step = 'deploy'
            try:
                snapshot_metadata = rds.snapshot_metadata()
            except botocore.exceptions.ClientError as e:
                if e.response['Error']['Code'] == 'DBSnapshotNotFound':
                    bottle.response.status = 400
                    return json.dumps(
                        {'description': 'Invalid snapshot identifier'}
                    )
                else:
                    raise
            if snapshot_metadata['Engine'] != rds.Engine.lower():
                bottle.response.status = 400
                return json.dumps(
                    {'description': 'Database engine in snapshot differs from '
                                    'database engine in plan settings.'}
                )
            rds.MasterUsername = snapshot_metadata['MasterUsername']
            rds.Port = snapshot_metadata['Port']
            # If the user is requesting a bigger disk than the snapshot
            # was generated from store this parameter so we can change
            # it during the modify operation after initial provisioning.
            if rds.AllocatedStorage > snapshot_metadata['AllocatedStorage']:
                params_to_update['AllocatedStorage'] = rds.AllocatedStorage
            if rds.StorageType != snapshot_metadata['StorageType']:
                params_to_update['StorageType']
            # When deploying from snapshot the security groups is always
            # set to the default security group.  The only way to change
            # it is to modify the instance after provisioning is done.
            # If the security group IDs are provided then they take
            # precedence over the named security groups.  If named
            # groups are provided they will be validated now before the
            # instance is created and stored to be applied after the
            # instance is done with initial bootstrapping.
            if rds.VpcSecurityGroupIds:
                params_to_update['VpcSecurityGroupIds'] = rds.VpcSecurityGroupIds
            else:
                group_ids = rds.validate_security_groups()
                if group_ids[0]:
                    params_to_update['VpcSecurityGroupIds'] = group_ids[1]
                else:
                    bottle.response.status = 400
                    return json.dumps(
                        {'description': 'Invalid AWS security group id'}
                    )
            rds.create_from_snapshot()
        iv = utils.Crypt.generate_iv()
        credentials = {
            'username': rds.MasterUsername,
            'password': rds.MasterUserPassword,
            'hostname': '',
            'port': rds.Port,
            'db_name': rds.DBName,
            'uri': '',
        }
        with utils.Crypt(iv=iv, key=CONFIG['encryption_key']) as c:
            creds = c.encrypt(json.dumps(credentials))
        dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')
        table = dynamodb.Table(name=CONFIG['dynamodb_table'])
        record = {
            'instance_id': instance_id,
            'iv': iv,
            'hostname': rds.DBInstanceIdentifier,
            'credentials': creds,
            'engine': rds.Engine,
            'binding_ids': [],
            'parameters': user_params,
            'last_operation': last_operation,
            'source_snapshot': source_snapshot,
            'step': step,
            'params_to_update': params_to_update
        }
        record.update(data)
        table.put_item(Item=record)
    else:
        return _abort_async_required()
    bottle.response.status = 202
    return json.dumps({"dashboard_url": ""})
Example #16
0
def create_from_snapshot_polling(record):
    """Last operation polling logic for create_from_snaphost action.
    """
    bottle.response.content_type = 'application/json'
    try:
        dynamodb = utils.boto3_session(**CONFIG['aws']).resource('dynamodb')
        table = dynamodb.Table(name=CONFIG['dynamodb_table'])
        rds = RDS(name=record['hostname'], **CONFIG['aws'])
        filters = {'DBInstanceIdentifier': record['hostname']}
        details = rds.rds_conn.describe_db_instances(**filters)
        details = details['DBInstances'][0]
    except botocore.exceptions.ClientError as e:
        # This exception will be raised if nothing matches the filter.
        if e.response['Error']['Code'] == 'DBInstanceNotFound':
            bottle.response.status = 410
            return json.dumps({})
        else:
            raise
    if record['step'] == 'deploy':
        if details['DBInstanceStatus'] == 'available':
            params = {'DBInstanceIdentifier': record['hostname']}
            params.update(record['params_to_update'])
            # Get the new password to pass along for modify.
            with utils.Crypt(iv=record['iv'],
                             key=CONFIG['encryption_key']) as c:
                creds = json.loads(c.decrypt(record['credentials']))
            params['MasterUserPassword'] = creds['password']
            rds.update_instance(**params)
            record['step'] = 'modify'
            table.put_item(Item=record)
        msg = ('RDS Instance is currently in the {0} '
               'state.'.format(details['DBInstanceStatus']))
        response = {'state': 'in progress', 'description': msg}
        bottle.response.status = 200
        return json.dumps(response)
    elif record['step'] == 'modify':
        if details['DBInstanceStatus'] == 'available':
            with utils.Crypt(iv=record['iv'],
                             key=CONFIG['encryption_key']) as c:
                creds = json.loads(c.decrypt(record['credentials']))
            creds['hostname'] = details['Endpoint']['Address']
            uri = '{0}://{1}:{2}@{3}:{4}/{5}'.format(
                details['Engine'].lower(),
                creds['username'],
                creds['password'],
                creds['hostname'],
                creds['port'],
                creds['db_name']
            )
            creds['uri'] = uri
            with utils.Crypt(iv=record['iv'],
                             key=CONFIG['encryption_key']) as c:
                creds = c.encrypt(json.dumps(creds))
            record['credentials'] = creds
            record['step'] = 'complete'
            table.put_item(Item=record)
        msg = ('RDS Instance is currently in the {0} '
               'state.'.format(details['DBInstanceStatus']))
        response = {'state': 'in progress', 'description': msg}
        bottle.response.status = 200
        return json.dumps(response)
    elif record['step'] == 'complete':
        response = {'state': 'succeeded', 'description': 'Service Created.'}
        bottle.response.status = 200
        return json.dumps(response)
    else:
        msg = 'The instance failed to provision'
        response = {'state': 'failed', 'description': msg}
        bottle.response.status = 200
        return json.dumps(response)
Example #17
0
def get_rds():
    rds_client = RDSClient().get_client()
    rds = RDS(rds_client)
    return rds