def __get_connection_SNS(): """ Ensure connection to SNS """ try: if get_global_option("aws_access_key_id") and get_global_option("aws_secret_access_key"): logger.debug("Authenticating to SNS using " "credentials in configuration file") connection = sns.connect_to_region( get_global_option("region"), aws_access_key_id=get_global_option("aws_access_key_id"), aws_secret_access_key=get_global_option("aws_secret_access_key"), ) else: try: logger.debug("Authenticating to SNS using EC2 instance profile") metadata = get_instance_metadata(timeout=1, num_retries=1) connection = sns.connect_to_region( metadata["placement"]["availability-zone"][:-1], profile_name=metadata["iam"]["info"][u"InstanceProfileArn"], ) except KeyError: logger.debug("Authenticating to SNS using " "env vars / boto configuration") connection = sns.connect_to_region(get_global_option("region")) except Exception as err: logger.error("Failed connecting to SNS: {0}".format(err)) logger.error("Please report an issue at: " "https://github.com/sebdah/dynamic-dynamodb/issues") raise logger.debug("Connected to SNS in {0}".format(get_global_option("region"))) return connection
def __is_table_maintenance_window(table_name, maintenance_windows): """ Checks that the current time is within the maintenance window :type table_name: str :param table_name: Name of the DynamoDB table :type maintenance_windows: str :param maintenance_windows: Example: '00:00-01:00,10:00-11:00' :returns: bool -- True if within maintenance window """ # Example string '00:00-01:00,10:00-11:00' maintenance_window_list = [] for window in maintenance_windows.split(','): try: start, end = window.split('-', 1) except ValueError: logger.error( '{0} - Malformatted maintenance window'.format(table_name)) return False maintenance_window_list.append((start, end)) now = datetime.datetime.utcnow().strftime('%H%M') for maintenance_window in maintenance_window_list: start = ''.join(maintenance_window[0].split(':')) end = ''.join(maintenance_window[1].split(':')) if now >= start and now <= end: return True return False
def get_tables_and_gsis(): """ Get a set of tables and gsis and their configuration keys :returns: set -- A set of tuples (table_name, table_conf_key) """ table_names = set() configured_tables = get_configured_tables() not_used_tables = set(configured_tables) # Add regexp table names for table_instance in list_tables(): for key_name in configured_tables: try: if re.match(key_name, table_instance.table_name): logger.debug("Table {0} match with config key {1}".format( table_instance.table_name, key_name)) table_names.add((table_instance.table_name, key_name)) not_used_tables.discard(key_name) else: logger.debug( "Table {0} did not match with config key {1}".format( table_instance.table_name, key_name)) except re.error: logger.error( 'Invalid regular expression: "{0}"'.format(key_name)) sys.exit(1) if not_used_tables: logger.warning('No tables matching the following configured ' 'tables found: {0}'.format(', '.join(not_used_tables))) return sorted(table_names)
def __is_maintenance_window(table_name, maintenance_windows): """ Checks that the current time is within the maintenance window :type table_name: str :param table_name: Name of the DynamoDB table :type maintenance_windows: str :param maintenance_windows: Example: '00:00-01:00,10:00-11:00' :returns: bool -- True if within maintenance window """ # Example string '00:00-01:00,10:00-11:00' maintenance_window_list = [] for window in maintenance_windows.split(','): try: start, end = window.split('-', 1) except ValueError: logger.error( '{0} - Malformatted maintenance window'.format(table_name)) return False maintenance_window_list.append((start, end)) now = datetime.datetime.utcnow().strftime('%H%M') for maintenance_window in maintenance_window_list: start = ''.join(maintenance_window[0].split(':')) end = ''.join(maintenance_window[1].split(':')) if now >= start and now <= end: return True return False
def ensure_created(table_name, template_table_name): """ Ensure table has been created in DynamoDB based on given template table name :type table_name: str :param table_name: Name of the DynamoDB table :type template_table_name: str :param template_table_name: Name of the template DynamoDB table (that has hashkey, attribute definitions) """ try: desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table'] except JSONResponseError: try : template_table = get_table( template_table_name ) template_table.describe() logger.info( '{0} - Create table with template table schema {1}, throughput {2}, indexes {3}, global_indexes {4}'.format( table_name, template_table.schema, template_table.throughput, template_table.indexes, template_table.global_indexes)) # Return if dry-run if get_global_option('dry_run'): return table = Table.create(table_name, schema=template_table.schema, throughput=template_table.throughput, indexes=template_table.indexes, global_indexes=template_table.global_indexes, connection=DYNAMODB_CONNECTION) except DynamoDBResponseError: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error( '{0} - Table {1} not found'.format(table_name, table_name)) raise
def get_tables_and_gsis(): """ Get a set of tables and gsis and their configuration keys :returns: set -- A set of tuples (table_name, table_conf_key) """ table_names = set() configured_tables = get_configured_tables() not_used_tables = set(configured_tables) # Add regexp table names for table_instance in list_tables(): for key_name in configured_tables: try: if re.match(key_name, table_instance.table_name): logger.debug("Table {0} match with config key {1}".format(table_instance.table_name, key_name)) table_names.add((table_instance.table_name, key_name)) not_used_tables.discard(key_name) else: logger.debug( "Table {0} did not match with config key {1}".format(table_instance.table_name, key_name) ) except re.error: logger.error('Invalid regular expression: "{0}"'.format(key_name)) sys.exit(1) if not_used_tables: logger.warning( "No tables matching the following configured " "tables found: {0}".format(", ".join(not_used_tables)) ) return sorted(table_names)
def update_table_provisioning(table_name, reads, writes): """""" table = get_table(table_name) try: table.update( throughput={ 'read': reads, 'write': writes }) logger.info( '{0} - Provisioning updated to {1} reads and {2} writes'.format( table_name, reads, writes)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = [ 'LimitExceededException', 'ValidationException', 'ResourceInUseException'] if exception in know_exceptions: logger.warning('{0} - {1}: {2}'.format( table_name, exception, error.body['message'])) else: logger.error( ( '{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format(table_name, exception, error.body['message']))
def __get_aws_metric(table_name, lookback_window_start, metric_name): """ Returns a metric list from the AWS CloudWatch service, may return None if no metric exists :type table_name: str :param table_name: Name of the DynamoDB table :type lookback_window_start: int :param lookback_window_start: How many minutes to look at :type metric_name: str :param metric_name: Name of the metric to retrieve from CloudWatch :returns: list -- A list of time series data for the given metric, may be None if there was no data """ try: now = datetime.utcnow() start_time = now - timedelta(minutes=lookback_window_start) end_time = now - timedelta(minutes=lookback_window_start - 5) return cloudwatch_connection.get_metric_statistics( period=300, # Always look at 5 minutes windows start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error('Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
def __get_aws_metric(table_name, time_frame, metric_name): """ Returns a metric list from the AWS CloudWatch service, may return None if no metric exists :type table_name: str :param table_name: Name of the DynamoDB table :type time_frame: int :param time_frame: How many seconds to look at :type metric_name str :param metric_name Name of the metric to retrieve from CloudWatch :returns: list -- A list of time series data for the given metric, may be None if there was no data """ try: start_time = datetime.utcnow()-timedelta(minutes=10, seconds=time_frame) end_time = datetime.utcnow()-timedelta(minutes=10) return cloudwatch_connection.get_metric_statistics( period=time_frame, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise
def list_tables(): """ Return list of DynamoDB tables available from AWS :returns: list -- List of DynamoDB tables """ tables = [] try: tables = DYNAMODB_CONNECTION.list_tables() except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error('No tables found') elif dynamodb_error == 'AccessDeniedException': logger.debug( 'Your AWS API keys lack access to listing tables. ' 'That is an issue if you are trying to use regular expressions ' 'in your table configuration.') else: logger.error( ('Unhandled exception: {0}: {1}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues').format( dynamodb_error, error.body['message'])) return tables
def list_tables(): """ Return list of DynamoDB tables available from AWS :returns: list -- List of DynamoDB tables """ tables = [] try: tables = DYNAMODB_CONNECTION.list_tables() except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error('No tables found') elif dynamodb_error == 'AccessDeniedException': logger.debug( 'Your AWS API keys lack access to listing tables. ' 'That is an issue if you are trying to use regular expressions ' 'in your table configuration.') else: logger.error( ( 'Unhandled exception: {0}: {1}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( dynamodb_error, error.body['message'])) return tables
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug('Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug('Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error('Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
def main(): """ Main function called from dynamic-dynamodb """ try: if get_global_option('show_config'): print json.dumps(config.get_configuration(), indent=2) elif get_global_option('daemon'): daemon = DynamicDynamoDBDaemon( '{0}/dynamic-dynamodb.{1}.pid'.format( get_global_option('pid_file_dir'), get_global_option('instance'))) if get_global_option('daemon') == 'start': logger.debug('Starting daemon') try: daemon.start() logger.info('Daemon started') except IOError as error: logger.error( 'Could not create pid file: {0}'.format(error)) logger.error('Daemon not started') elif get_global_option('daemon') == 'stop': logger.debug('Stopping daemon') daemon.stop() logger.info('Daemon stopped') sys.exit(0) elif get_global_option('daemon') == 'restart': logger.debug('Restarting daemon') daemon.restart() logger.info('Daemon restarted') elif get_global_option('daemon') in ['foreground', 'fg']: logger.debug('Starting daemon in foreground') daemon.run() logger.info('Daemon started in foreground') else: print( 'Valid options for --daemon are start, ' 'stop, restart, and foreground') sys.exit(1) else: if get_global_option('run_once'): execute() else: while True: execute() except Exception as error: logger.exception(error) except KeyboardInterrupt: while threading.active_count() > 1: for thread in table_threads.values(): thread.do_run = False logger.info('Waiting for all threads... {}s'.format( get_global_option('check_interval'))) time.sleep(get_global_option('check_interval')) raise
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False while not connected: logger.debug('Connecting to DynamoDB in {0}'.format( get_global_option('region'))) if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to DynamoDB using ' 'credentials in configuration file') connection = dynamodb2.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to DynamoDB using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = dynamodb2.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'][u'InstanceProfileArn']) except KeyError: logger.debug( 'Authenticating to DynamoDB using ' 'env vars / boto configuration') connection = dynamodb2.connect_to_region( get_global_option('region')) if not connection: if retries == 0: logger.error('Failed to connect to DynamoDB. Giving up.') raise else: logger.error( 'Failed to connect to DynamoDB. Retrying in 5 seconds') retries -= 1 time.sleep(5) else: connected = True logger.debug('Connected to DynamoDB in {0}'.format( get_global_option('region'))) return connection
def main(): """ Main function called from dynamic-dynamodb """ try: if get_global_option('show_config'): print json.dumps(config.get_configuration(), indent=2) elif get_global_option('daemon'): daemon = DynamicDynamoDBDaemon( '{0}/dynamic-dynamodb.{1}.pid'.format( get_global_option('pid_file_dir'), get_global_option('instance'))) if get_global_option('daemon') == 'start': logger.debug('Starting daemon') try: daemon.start() logger.info('Daemon started') except IOError as error: logger.error( 'Could not create pid file: {0}'.format(error)) logger.error('Daemon not started') elif get_global_option('daemon') == 'stop': logger.debug('Stopping daemon') daemon.stop() logger.info('Daemon stopped') sys.exit(0) elif get_global_option('daemon') == 'restart': logger.debug('Restarting daemon') daemon.restart() logger.info('Daemon restarted') elif get_global_option('daemon') in ['foreground', 'fg']: logger.debug('Starting daemon in foreground') daemon.run() logger.info('Daemon started in foreground') else: print( 'Valid options for --daemon are start, ' 'stop, restart, and foreground') sys.exit(1) else: if get_global_option('run_once'): execute() else: while True: execute() except Exception as error: logger.exception(error)
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False while not connected: logger.debug('Connecting to DynamoDB in {0}'.format( get_global_option('region'))) if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug('Authenticating to DynamoDB using ' 'credentials in configuration file') connection = dynamodb2.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to DynamoDB using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = dynamodb2.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'] [u'InstanceProfileArn']) except KeyError: logger.debug('Authenticating to DynamoDB using ' 'env vars / boto configuration') connection = dynamodb2.connect_to_region( get_global_option('region')) if not connection: if retries == 0: logger.error('Failed to connect to DynamoDB. Giving up.') raise else: logger.error( 'Failed to connect to DynamoDB. Retrying in 5 seconds') retries -= 1 time.sleep(5) else: connected = True logger.debug('Connected to DynamoDB in {0}'.format( get_global_option('region'))) return connection
def main(): """ Main function called from dynamic-dynamodb """ try: if get_global_option('show_config'): print json.dumps(config.get_configuration(), indent=2) elif get_global_option('daemon'): daemon = DynamicDynamoDBDaemon( '{0}/dynamic-dynamodb.{1}.pid'.format( get_global_option('pid_file_dir'), get_global_option('instance'))) if get_global_option('daemon') == 'start': logger.debug('Starting daemon') try: daemon.start() logger.info('Daemon started') except IOError as error: logger.error('Could not create pid file: {0}'.format(error)) logger.error('Daemon not started') elif get_global_option('daemon') == 'stop': logger.debug('Stopping daemon') daemon.stop() logger.info('Daemon stopped') sys.exit(0) elif get_global_option('daemon') == 'restart': logger.debug('Restarting daemon') daemon.restart() logger.info('Daemon restarted') elif get_global_option('daemon') in ['foreground', 'fg']: logger.debug('Starting daemon in foreground') daemon.run() logger.info('Daemon started in foreground') else: print( 'Valid options for --daemon are start, ' 'stop, restart, and foreground') sys.exit(1) else: if get_global_option('run_once'): execute() else: while True: execute() except Exception as error: logger.exception(error)
def execute_in_thread(table_name, table_key): if table_name in table_threads: thread = table_threads.get(table_name) if thread.isAlive(): logger.debug("Thread {} still running".format(table_name)) else: logger.error("Thread {} was stopped!".format(table_name)) sys.exit(1) else: table_logger = log_handler.getTableLogger(table_name) logger.info("Start new thread: " + table_name) thread = threading.Thread(target=execute_table_in_loop, args=[table_name, table_key, table_logger]) table_threads[table_name] = thread thread.start()
def __get_aws_metric(table_name, gsi_name, lookback_window_start, lookback_period, metric_name): """ Returns a metric list from the AWS CloudWatch service, may return None if no metric exists :type table_name: str :param table_name: Name of the DynamoDB table :type gsi_name: str :param gsi_name: Name of a GSI on the given DynamoDB table :type lookback_window_start: int :param lookback_window_start: How many minutes to look at :type lookback_period: int :type lookback_period: Length of the lookback period in minutes :type metric_name: str :param metric_name: Name of the metric to retrieve from CloudWatch :returns: list -- A list of time series data for the given metric, may be None if there was no data """ try: now = datetime.utcnow() start_time = now - timedelta(minutes=lookback_window_start) end_time = now - timedelta( minutes=lookback_window_start - lookback_period) return cloudwatch_connection.get_metric_statistics( period=lookback_period * 60, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={ 'TableName': table_name, 'GlobalSecondaryIndexName': gsi_name }, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise
def get_table(table_name): """ Return the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :returns: boto.dynamodb.table.Table """ try: table = Table(table_name, connection=DYNAMODB_CONNECTION) except DynamoDBResponseError as error: dynamodb_error = error.body["__type"].rsplit("#", 1)[1] if dynamodb_error == "ResourceNotFoundException": logger.error("{0} - Table {1} not found".format(table_name, table_name)) raise return table
def get_tables_and_gsis(): """ Get a set of tables and gsis and their configuration keys :returns: set -- A set of tuples (table_name, table_conf_key) """ table_names = set() configured_tables = get_configured_tables() not_used_tables = set(configured_tables) # Add regexp table names for table_instance in list_tables(): for key_name in configured_tables: try: if re.match(key_name, table_instance.table_name): logger.debug("Table {0} match with config key {1}".format( table_instance.table_name, key_name)) # Notify users about regexps that match multiple tables if table_instance.table_name in [x[0] for x in table_names]: logger.warning( 'Table {0} matches more than one regexp in config, ' 'skipping this match: "{1}"'.format( table_instance.table_name, key_name)) else: table_names.add( ( table_instance.table_name, key_name )) not_used_tables.discard(key_name) else: logger.debug( "Table {0} did not match with config key {1}".format( table_instance.table_name, key_name)) except re.error: logger.error('Invalid regular expression: "{0}"'.format( key_name)) sys.exit(1) if not_used_tables: logger.warning( 'No tables matching the following configured ' 'tables found: {0}'.format(', '.join(not_used_tables))) return sorted(table_names)
def get_table(table_name): """ Return the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :returns: boto.dynamodb.table.Table """ try: table = Table(table_name, connection=DYNAMODB_CONNECTION) except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error('{0} - Table {1} not found'.format( table_name, table_name)) raise return table
def get_table(table_name): """ Return the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :returns: boto.dynamodb.table.Table """ try: table = Table(table_name, connection=__get_connection_dynamodb()) except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error( '{0} - Table {1} not found'.format(table_name, table_name)) raise return table
def __publish(topic, message, subject=None): """ Publish a message to a SNS topic :type topic: str :param topic: SNS topic to publish the message to :type message: str :param message: Message to send via SNS :type subject: str :param subject: Subject to use for e-mail notifications :returns: None """ try: SNS_CONNECTION.publish(topic=topic, message=message, subject=subject) logger.info("Sent SNS notification to {0}".format(topic)) except BotoServerError as error: logger.error("Problem sending SNS notification: {0}".format(error.message)) return
def __publish(topic, message, subject=None): """ Publish a message to a SNS topic :type topic: str :param topic: SNS topic to publish the message to :type message: str :param message: Message to send via SNS :type subject: str :param subject: Subject to use for e-mail notifications :returns: None """ try: SNS_CONNECTION.publish(topic=topic, message=message, subject=subject) logger.info('Sent SNS notification to {0}'.format(topic)) except BotoServerError as error: logger.error('Problem sending SNS notification: {0}'.format( error.message)) return
def get_table(table_name): """ Return the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :returns: boto.dynamodb.table.Table """ try: table = DYNAMODB_CONNECTION.get_table(table_name) except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error( '{0} - Table {1} not found'.format(table_name, table_name)) sys.exit(1) else: raise return table
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False while not connected: logger.debug("Connecting to DynamoDB in {0}".format(get_global_option("region"))) if get_global_option("aws_access_key_id") and get_global_option("aws_secret_access_key"): logger.debug("Authenticating to DynamoDB using " "credentials in configuration file") connection = dynamodb2.connect_to_region( get_global_option("region"), aws_access_key_id=get_global_option("aws_access_key_id"), aws_secret_access_key=get_global_option("aws_secret_access_key"), ) else: try: logger.debug("Authenticating to DynamoDB using EC2 instance profile") metadata = get_instance_metadata(timeout=1, num_retries=1) connection = dynamodb2.connect_to_region( metadata["placement"]["availability-zone"][:-1], profile_name=metadata["iam"]["info"][u"InstanceProfileArn"], ) except KeyError: logger.debug("Authenticating to DynamoDB using " "env vars / boto configuration") connection = dynamodb2.connect_to_region(get_global_option("region")) if not connection: if retries == 0: logger.error("Failed to connect to DynamoDB. Giving up.") raise else: logger.error("Failed to connect to DynamoDB. Retrying in 5 seconds") retries -= 1 time.sleep(5) else: connected = True logger.debug("Connected to DynamoDB in {0}".format(get_global_option("region"))) return connection
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False while not connected: try: if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = dynamodb.connect_to_region( configuration['global']['region'], aws_access_key_id=configuration['global']['aws_access_key_id'], aws_secret_access_key=\ configuration['global']['aws_secret_access_key']) else: connection = dynamodb.connect_to_region( configuration['global']['region']) connected = True except Exception as err: logger.error('Failed to connect to DynamoDB: {0}'.format(err)) if retries == 0: logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise else: logger.error('Retrying in 5 seconds') retries -= 1 time.sleep(5) logger.debug('Connected to DynamoDB') return connection
def update_gsi_provisioning(table_name, gsi_name, reads, writes): """ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the table :type gsi_name: str :param gsi_name: Name of the GSI :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision """ try: DYNAMODB_CONNECTION.update_table( table_name=table_name, global_secondary_index_updates=[ { "Update": { "IndexName": gsi_name, "ProvisionedThroughput": { "ReadCapacityUnits": reads, "WriteCapacityUnits": writes } } } ]) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format( table_name, gsi_name, exception, error.body['message'])) else: logger.error( ( '{0} - GSI: {1} - Unhandled exception: {2}: {3}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, gsi_name, exception, error.body['message']))
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False region = get_global_option('region') while not connected: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to DynamoDB using ' 'credentials in configuration file') connection = dynamodb2.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = dynamodb2.connect_to_region(region) if not connection: if retries == 0: logger.error('Failed to connect to DynamoDB. Giving up.') raise else: logger.error( 'Failed to connect to DynamoDB. Retrying in 5 seconds') retries -= 1 time.sleep(5) else: connected = True logger.debug('Connected to DynamoDB in {0}'.format(region)) return connection
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ try: if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = cloudwatch.connect_to_region( configuration['global']['region'], aws_access_key_id=configuration['global']['aws_access_key_id'], aws_secret_access_key=\ configuration['global']['aws_secret_access_key']) else: connection = cloudwatch.connect_to_region( configuration['global']['region']) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error('Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch') return connection
def __get_connection_cloudwatch(): """ Ensure connection to SNS """ try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to CloudWatch using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = cloudwatch.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'][u'InstanceProfileArn']) except KeyError: logger.debug( 'Authenticating to CloudWatch using ' 'env vars / boto configuration') connection = cloudwatch.connect_to_region( get_global_option('region')) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format( get_global_option('region'))) return connection
def __get_connection_dynamodb(retries=3): """ Ensure connection to DynamoDB :type retries: int :param retries: Number of times to retry to connect to DynamoDB """ connected = False while not connected: logger.debug('Connecting to DynamoDB in {0}'.format( configuration['global']['region'])) if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = dynamodb2.connect_to_region( configuration['global']['region'], aws_access_key_id= configuration['global']['aws_access_key_id'], aws_secret_access_key= configuration['global']['aws_secret_access_key']) else: connection = dynamodb2.connect_to_region( configuration['global']['region']) if not connection: if retries == 0: logger.error('Failed to connect to DynamoDB. Giving up.') raise else: logger.error( 'Failed to connect to DynamoDB. Retrying in 5 seconds') retries -= 1 time.sleep(5) else: connected = True logger.debug('Connected to DynamoDB in {0}'.format( configuration['global']['region'])) return connection
def __get_connection_cloudwatch(): """ Ensure connection to CloudWatch """ try: if (configuration['global']['aws_access_key_id'] and configuration['global']['aws_secret_access_key']): connection = cloudwatch.connect_to_region( configuration['global']['region'], aws_access_key_id=configuration['global']['aws_access_key_id'], aws_secret_access_key=\ configuration['global']['aws_secret_access_key']) else: connection = cloudwatch.connect_to_region( configuration['global']['region']) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch') return connection
def list_tables(): """ Return list of DynamoDB tables available from AWS :returns: list -- List of DynamoDB tables """ tables = [] try: table_list = DYNAMODB_CONNECTION.list_tables() while True: for table_name in table_list[u'TableNames']: tables.append(get_table(table_name)) if u'LastEvaluatedTableName' in table_list: table_list = DYNAMODB_CONNECTION.list_tables( table_list[u'LastEvaluatedTableName']) else: break except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error('No tables found') elif dynamodb_error == 'AccessDeniedException': logger.debug( 'Your AWS API keys lack access to listing tables. ' 'That is an issue if you are trying to use regular ' 'expressions in your table configuration.') elif dynamodb_error == 'UnrecognizedClientException': logger.error( 'Invalid security token. Are your AWS API keys correct?') else: logger.error( ( 'Unhandled exception: {0}: {1}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( dynamodb_error, error.body['message'])) except JSONResponseError as error: logger.error('Communication error: {0}'.format(error)) sys.exit(1) return tables
def __get_connection_SNS(): """ Ensure connection to SNS """ try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to SNS using ' 'credentials in configuration file') connection = sns.connect_to_region( get_global_option('region'), aws_access_key_id=get_global_option( 'aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: try: logger.debug( 'Authenticating to SNS using EC2 instance profile') metadata = get_instance_metadata(timeout=1, num_retries=1) connection = sns.connect_to_region( metadata['placement']['availability-zone'][:-1], profile_name=metadata['iam']['info'][u'InstanceProfileArn']) except KeyError: logger.debug( 'Authenticating to SNS using ' 'env vars / boto configuration') connection = sns.connect_to_region(get_global_option('region')) except Exception as err: logger.error('Failed connecting to SNS: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to SNS in {0}'.format(get_global_option('region'))) return connection
def list_tables(): """ Return list of DynamoDB tables available from AWS :returns: list -- List of DynamoDB tables """ tables = [] try: table_list = DYNAMODB_CONNECTION.list_tables() while True: for table_name in table_list[u"TableNames"]: tables.append(get_table(table_name)) if u"LastEvaluatedTableName" in table_list: table_list = DYNAMODB_CONNECTION.list_tables(table_list[u"LastEvaluatedTableName"]) else: break except DynamoDBResponseError as error: dynamodb_error = error.body["__type"].rsplit("#", 1)[1] if dynamodb_error == "ResourceNotFoundException": logger.error("No tables found") elif dynamodb_error == "AccessDeniedException": logger.debug( "Your AWS API keys lack access to listing tables. " "That is an issue if you are trying to use regular " "expressions in your table configuration." ) elif dynamodb_error == "UnrecognizedClientException": logger.error("Invalid security token. Are your AWS API keys correct?") else: logger.error( ( "Unhandled exception: {0}: {1}. " "Please file a bug report at " "https://github.com/sebdah/dynamic-dynamodb/issues" ).format(dynamodb_error, error.body["message"]) ) except JSONResponseError as error: logger.error("Communication error: {0}".format(error)) sys.exit(1) return tables
def update_gsi_provisioning( table_name, gsi_name, reads, writes, retry_with_only_increase=False): """ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the table :type gsi_name: str :param gsi_name: Name of the GSI :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ if retry_with_only_increase: current_reads = int(get_provisioned_table_read_units(table_name)) current_writes = int(get_provisioned_table_write_units(table_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes logger.info( '{0} - Retrying to update provisioning, excluding any decreases. ' 'Setting new reads to {1} and new writes to {2}'.format( table_name, reads, writes)) try: DYNAMODB_CONNECTION.update_table( table_name=table_name, global_secondary_index_updates=[ { "Update": { "IndexName": gsi_name, "ProvisionedThroughput": { "ReadCapacityUnits": reads, "WriteCapacityUnits": writes } } } ]) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format( table_name, gsi_name, exception, error.body['message'])) else: logger.error( ( '{0} - GSI: {1} - Unhandled exception: {2}: {3}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, gsi_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info( '{0} - GSI: {1} - Will retry to update provisioning ' 'with only increases'.format(table_name, gsi_name)) update_gsi_provisioning( table_name, gsi_name, reads, writes, retry_with_only_increase=True)
def update_table_provisioning( table_name, reads, writes, retry_with_only_increase=False): """ Update provisioning for a given table :type table_name: str :param table_name: Name of the table :type reads: int :param reads: New number of provisioned read units :type writes: int :param writes: New number of provisioned write units :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ table = get_table(table_name) if retry_with_only_increase: current_reads = int(get_provisioned_table_read_units(table_name)) current_writes = int(get_provisioned_table_write_units(table_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes logger.info( '{0} - Retrying to update provisioning, excluding any decreases. ' 'Setting new reads to {1} and new writes to {2}'.format( table_name, reads, writes)) try: table.update( throughput={ 'read': reads, 'write': writes }) logger.info( '{0} - Provisioning updated to {1} reads and {2} writes'.format( table_name, reads, writes)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = [ 'LimitExceededException', 'ValidationException', 'ResourceInUseException'] if exception in know_exceptions: logger.warning('{0} - {1}: {2}'.format( table_name, exception, error.body['message'])) else: logger.error( ( '{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format(table_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info( '{0} - Will retry to update provisioning ' 'with only increases'.format(table_name)) update_table_provisioning( table_name, reads, writes, retry_with_only_increase=True)
def execute(): """ Ensure provisioning """ boto_server_error_retries = 3 # Ensure provisioning for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()): try: table_num_consec_read_checks = \ CHECK_STATUS['tables'][table_name]['reads'] except KeyError: table_num_consec_read_checks = 0 try: table_num_consec_write_checks = \ CHECK_STATUS['tables'][table_name]['writes'] except KeyError: table_num_consec_write_checks = 0 try: # The return var shows how many times the scale-down criteria # has been met. This is coupled with a var in config, # "num_intervals_scale_down", to delay the scale-down table_num_consec_read_checks, table_num_consec_write_checks = \ table.ensure_provisioning( table_name, table_key, table_num_consec_read_checks, table_num_consec_write_checks) CHECK_STATUS['tables'][table_name] = { 'reads': table_num_consec_read_checks, 'writes': table_num_consec_write_checks } gsi_names = set() # Add regexp table names for gst_instance in dynamodb.table_gsis(table_name): gsi_name = gst_instance[u'IndexName'] try: gsi_keys = get_table_option(table_key, 'gsis').keys() except AttributeError: # Continue if there are not GSIs configured continue for gsi_key in gsi_keys: try: if re.match(gsi_key, gsi_name): logger.debug( 'Table {0} GSI {1} matches ' 'GSI config key {2}'.format( table_name, gsi_name, gsi_key)) gsi_names.add((gsi_name, gsi_key)) except re.error: logger.error('Invalid regular expression: "{0}"'.format( gsi_key)) sys.exit(1) for gsi_name, gsi_key in sorted(gsi_names): try: gsi_num_consec_read_checks = \ CHECK_STATUS['tables'][table_name]['reads'] except KeyError: gsi_num_consec_read_checks = 0 try: gsi_num_consec_write_checks = \ CHECK_STATUS['tables'][table_name]['writes'] except KeyError: gsi_num_consec_write_checks = 0 gsi_num_consec_read_checks, gsi_num_consec_write_checks = \ gsi.ensure_provisioning( table_name, table_key, gsi_name, gsi_key, gsi_num_consec_read_checks, gsi_num_consec_write_checks) CHECK_STATUS['gsis'][gsi_name] = { 'reads': gsi_num_consec_read_checks, 'writes': gsi_num_consec_write_checks } except JSONResponseError as error: exception = error.body['__type'].split('#')[1] if exception == 'ResourceNotFoundException': logger.error('{0} - Table {1} does not exist anymore'.format( table_name, table_name)) continue except BotoServerError as error: if boto_server_error_retries > 0: logger.error( 'Unknown boto error. Status: "{0}". Reason: "{1}"'.format( error.status, error.reason)) logger.error( 'Please bug report if this error persists') boto_server_error_retries -= 1 continue else: raise # Sleep between the checks if not get_global_option('run_once'): logger.debug('Sleeping {0} seconds until next check'.format( get_global_option('check_interval'))) time.sleep(get_global_option('check_interval'))
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None): """ Checks whether the circuit breaker is open :param table_name: Name of the table being checked :param table_key: Configuration key for table :param gsi_name: Name of the GSI being checked :param gsi_key: Configuration key for the GSI :returns: bool -- True if the circuit is open """ logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile(r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$') url = timeout = None if gsi_name: url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url') timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout') elif table_name: url = get_table_option(table_key, 'circuit_breaker_url') timeout = get_table_option(table_key, 'circuit_breaker_timeout') if not url: url = get_global_option('circuit_breaker_url') timeout = get_global_option('circuit_breaker_timeout') match = pattern.match(url) if not match: logger.error('Malformatted URL: {0}'.format(url)) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call auth = () if use_basic_auth: url = '{scheme}{url}'.format(scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) headers = {} if table_name: headers["x-table-name"] = table_name if gsi_name: headers["x-gsi-name"] = gsi_name # Make the actual request try: response = requests.get(url, auth=auth, timeout=timeout / 1000.00, headers=headers) if int(response.status_code) == 200: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error('Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
def update_throughput(table_name, read_units, write_units, key_name): """ Update throughput on the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :type read_units: int :param read_units: New read unit provisioning :type write_units: int :param write_units: New write unit provisioning :type key_name: str :param key_name: Configuration option key name """ table = dynamodb.get_table(table_name) # Check that we are in the right time frame if get_table_option(key_name, 'maintenance_windows'): if not __is_maintenance_window(table_name, get_table_option(key_name, 'maintenance_windows')): logger.warning( '{0} - Current time is outside maintenance window'.format( table_name)) return else: logger.info( '{0} - Current time is within maintenance window'.format( table_name)) # Check table status if table.status != 'ACTIVE': logger.warning( '{0} - Not performing throughput changes when table ' 'is in {1} state'.format(table_name, table.status)) # If this setting is True, we will only scale down when # BOTH reads AND writes are low if get_table_option(key_name, 'always_decrease_rw_together'): if (read_units < table.read_units) or (table.read_units == get_table_option(key_name, 'min_provisioned_reads')): if (write_units < table.write_units) or (table.write_units == get_table_option(key_name, 'min_provisioned_writes')): logger.info( '{0} - Both reads and writes will be decreased'.format( table_name)) elif read_units < table.read_units: logger.info( '{0} - Will not decrease reads nor writes, waiting for ' 'both to become low before decrease'.format(table_name)) read_units = table.read_units elif write_units < table.write_units: logger.info( '{0} - Will not decrease reads nor writes, waiting for ' 'both to become low before decrease'.format(table_name)) write_units = table.write_units if read_units == table.read_units and write_units == table.write_units: logger.debug('{0} - No need to update provisioning') return if not get_global_option('dry_run'): try: table.update_throughput(int(read_units), int(write_units)) logger.info('Provisioning updated') except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'LimitExceededException': logger.warning( '{0} - {1}'.format(table_name, error.body['message'])) if int(read_units) > table.read_units: logger.info('{0} - Scaling up reads to {1:d}'.format( table_name, int(read_units))) update_throughput( table_name, int(read_units), int(table.write_units), key_name) elif int(write_units) > table.write_units: logger.info('{0} - Scaling up writes to {1:d}'.format( table_name, int(write_units))) update_throughput( table_name, int(table.read_units), int(write_units), key_name) elif dynamodb_error == 'ValidationException': logger.warning('{0} - ValidationException: {1}'.format( table_name, error.body['message'])) elif dynamodb_error == 'ResourceInUseException': logger.warning('{0} - ResourceInUseException: {1}'.format( table_name, error.body['message'])) elif dynamodb_error == 'AccessDeniedException': logger.warning('{0} - AccessDeniedException: {1}'.format( table_name, error.body['message'])) else: logger.error( ( '{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, dynamodb_error, error.body['message']))
def __circuit_breaker_is_open(): """ Checks wether the circuit breaker is open :returns: bool -- True if the circuit is open """ logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) match = pattern.match(get_global_option('circuit_breaker_url')) if not match: logger.error('Malformatted URL: {0}'.format( get_global_option('circuit_breaker_url'))) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call if use_basic_auth: url = '{scheme}{url}'.format( scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) else: url = get_global_option('circuit_breaker_url') auth = () # Make the actual request try: response = requests.get( url, auth=auth, timeout=get_global_option('circuit_breaker_timeout') / 1000.00) if int(response.status_code) == 200: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
def update_throughput(table_name, read_units, write_units, key_name): """ Update throughput on the DynamoDB table :type table_name: str :param table_name: Name of the DynamoDB table :type read_units: int :param read_units: New read unit provisioning :type write_units: int :param write_units: New write unit provisioning :type key_name: str :param key_name: Configuration option key name """ try: table = dynamodb.get_table(table_name) except DynamoDBResponseError: # Return if the table does not exist return None # Check that we are in the right time frame if get_table_option(key_name, 'maintenance_windows'): if (not __is_maintenance_window(table_name, get_table_option( key_name, 'maintenance_windows'))): logger.warning( '{0} - Current time is outside maintenance window'.format( table_name)) return else: logger.info( '{0} - Current time is within maintenance window'.format( table_name)) # Check table status if table.status != 'ACTIVE': logger.warning( '{0} - Not performing throughput changes when table ' 'is in {1} state'.format(table_name, table.status)) # If this setting is True, we will only scale down when # BOTH reads AND writes are low if get_table_option(key_name, 'always_decrease_rw_together'): if ((read_units < table.read_units) or (table.read_units == get_table_option( key_name, 'min_provisioned_reads'))): if ((write_units < table.write_units) or (table.write_units == get_table_option( key_name, 'min_provisioned_writes'))): logger.info( '{0} - Both reads and writes will be decreased'.format( table_name)) elif read_units < table.read_units: logger.info( '{0} - Will not decrease reads nor writes, waiting for ' 'both to become low before decrease'.format(table_name)) read_units = table.read_units elif write_units < table.write_units: logger.info( '{0} - Will not decrease reads nor writes, waiting for ' 'both to become low before decrease'.format(table_name)) write_units = table.write_units if read_units == table.read_units and write_units == table.write_units: logger.debug('{0} - No need to update provisioning') return if not get_global_option('dry_run'): try: table.update_throughput(int(read_units), int(write_units)) logger.info('Provisioning updated') except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'LimitExceededException': logger.warning( '{0} - {1}'.format(table_name, error.body['message'])) if int(read_units) > table.read_units: logger.info('{0} - Scaling up reads to {1:d}'.format( table_name, int(read_units))) update_throughput( table_name, int(read_units), int(table.write_units), key_name) elif int(write_units) > table.write_units: logger.info('{0} - Scaling up writes to {1:d}'.format( table_name, int(write_units))) update_throughput( table_name, int(table.read_units), int(write_units), key_name) elif dynamodb_error == 'ValidationException': logger.warning('{0} - ValidationException: {1}'.format( table_name, error.body['message'])) elif dynamodb_error == 'ResourceInUseException': logger.warning('{0} - ResourceInUseException: {1}'.format( table_name, error.body['message'])) elif dynamodb_error == 'AccessDeniedException': logger.warning('{0} - AccessDeniedException: {1}'.format( table_name, error.body['message'])) else: logger.error( ( '{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, dynamodb_error, error.body['message']))
def execute(): """ Ensure provisioning """ boto_server_error_retries = 3 # Ensure provisioning for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()): try: table_num_consec_read_checks = \ CHECK_STATUS['tables'][table_name]['reads'] except KeyError: table_num_consec_read_checks = 0 try: table_num_consec_write_checks = \ CHECK_STATUS['tables'][table_name]['writes'] except KeyError: table_num_consec_write_checks = 0 try: # The return var shows how many times the scale-down criteria # has been met. This is coupled with a var in config, # "num_intervals_scale_down", to delay the scale-down table_num_consec_read_checks, table_num_consec_write_checks = \ table.ensure_provisioning( table_name, table_key, table_num_consec_read_checks, table_num_consec_write_checks) CHECK_STATUS['tables'][table_name] = { 'reads': table_num_consec_read_checks, 'writes': table_num_consec_write_checks } gsi_names = set() # Add regexp table names for gst_instance in dynamodb.table_gsis(table_name): gsi_name = gst_instance[u'IndexName'] try: gsi_keys = get_table_option(table_key, 'gsis').keys() except AttributeError: # Continue if there are not GSIs configured continue for gsi_key in gsi_keys: try: if re.match(gsi_key, gsi_name): logger.debug('Table {0} GSI {1} matches ' 'GSI config key {2}'.format( table_name, gsi_name, gsi_key)) gsi_names.add((gsi_name, gsi_key)) except re.error: logger.error( 'Invalid regular expression: "{0}"'.format( gsi_key)) sys.exit(1) for gsi_name, gsi_key in sorted(gsi_names): try: gsi_num_consec_read_checks = \ CHECK_STATUS['tables'][table_name]['reads'] except KeyError: gsi_num_consec_read_checks = 0 try: gsi_num_consec_write_checks = \ CHECK_STATUS['tables'][table_name]['writes'] except KeyError: gsi_num_consec_write_checks = 0 gsi_num_consec_read_checks, gsi_num_consec_write_checks = \ gsi.ensure_provisioning( table_name, table_key, gsi_name, gsi_key, gsi_num_consec_read_checks, gsi_num_consec_write_checks) CHECK_STATUS['gsis'][gsi_name] = { 'reads': gsi_num_consec_read_checks, 'writes': gsi_num_consec_write_checks } except JSONResponseError as error: exception = error.body['__type'].split('#')[1] if exception == 'ResourceNotFoundException': logger.error('{0} - Table {1} does not exist anymore'.format( table_name, table_name)) continue except BotoServerError as error: if boto_server_error_retries > 0: logger.error( 'Unknown boto error. Status: "{0}". Reason: "{1}"'.format( error.status, error.reason)) logger.error('Please bug report if this error persists') boto_server_error_retries -= 1 continue else: raise # Sleep between the checks logger.debug('Sleeping {0} seconds until next check'.format( get_global_option('check_interval'))) time.sleep(get_global_option('check_interval'))
def update_gsi_provisioning(table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=False): """ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the DynamoDB table :type table_key: str :param table_key: Table configuration option key name :type gsi_name: str :param gsi_name: Name of the GSI :type gsi_key: str :param gsi_key: GSI configuration option key name :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ current_reads = int(get_provisioned_table_read_units(table_name)) current_writes = int(get_provisioned_table_write_units(table_name)) if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes logger.info( '{0} - Retrying to update provisioning, excluding any decreases. ' 'Setting new reads to {1} and new writes to {2}'.format( table_name, reads, writes)) # Check that we are in the right time frame m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows') if m_windows: if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows): logger.warning( '{0} - GSI: {1} - We are outside a maintenace window. ' 'Will only perform up scaling activites'.format( table_name, gsi_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info('{0} - GSI: {1} - ' 'No need to scale up reads nor writes'.format( table_name, gsi_name)) return else: logger.info('{0} - GSI: {1} - ' 'Current time is within maintenance window'.format( table_name, gsi_name)) logger.info('{0} - GSI: {1} - ' 'Updating provisioning to {2} reads and {3} writes'.format( table_name, gsi_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return try: DYNAMODB_CONNECTION.update_table(table_name=table_name, global_secondary_index_updates=[{ "Update": { "IndexName": gsi_name, "ProvisionedThroughput": { "ReadCapacityUnits": reads, "WriteCapacityUnits": writes } } }]) message = ('{0} - GSI: {1} - Provisioning updated to ' '{2} reads and {3} writes').format(table_name, gsi_name, reads, writes) # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > current_writes: sns_message_types.append('scale-down') if current_reads < reads or current_writes < current_writes: sns_message_types.append('scale-up') sns.publish_gsi_notification( table_key, gsi_key, message, sns_message_types, subject='Updated provisioning for GSI {0}'.format(gsi_name)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format( table_name, gsi_name, exception, error.body['message'])) else: logger.error( ('{0} - GSI: {1} - Unhandled exception: {2}: {3}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues').format( table_name, gsi_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info('{0} - GSI: {1} - Will retry to update provisioning ' 'with only increases'.format(table_name, gsi_name)) update_gsi_provisioning(table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=True)
def update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=False): """ Update provisioning for a given table :type table_name: str :param table_name: Name of the table :type key_name: str :param key_name: Configuration option key name :type reads: int :param reads: New number of provisioned read units :type writes: int :param writes: New number of provisioned write units :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ table = get_table(table_name) current_reads = int(get_provisioned_table_read_units(table_name)) current_writes = int(get_provisioned_table_write_units(table_name)) if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes logger.info( '{0} - Retrying to update provisioning, excluding any decreases. ' 'Setting new reads to {1} and new writes to {2}'.format( table_name, reads, writes)) # Check that we are in the right time frame maintenance_windows = get_table_option(key_name, 'maintenance_windows') if maintenance_windows: if not __is_table_maintenance_window(table_name, maintenance_windows): logger.warning( '{0} - We are outside a maintenace window. ' 'Will only perform up scaling activites'.format(table_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return else: logger.info( '{0} - Current time is within maintenance window'.format( table_name)) logger.info( '{0} - Updating provisioning to {1} reads and {2} writes'.format( table_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return try: table.update(throughput={'read': reads, 'write': writes}) # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > current_writes: sns_message_types.append('scale-down') if current_reads < reads or current_writes < current_writes: sns_message_types.append('scale-up') message = ( '{0} - Provisioning updated to {1} reads and {2} writes').format( table_name, reads, writes) sns.publish_table_notification( key_name, message, sns_message_types, subject='Updated provisioning for table {0}'.format(table_name)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = [ 'LimitExceededException', 'ValidationException', 'ResourceInUseException' ] if exception in know_exceptions: logger.warning('{0} - {1}: {2}'.format(table_name, exception, error.body['message'])) else: logger.error( ('{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues').format( table_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info('{0} - Will retry to update provisioning ' 'with only increases'.format(table_name)) update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=True)