Exemple #1
0
def __get_connection_cloudwatch():
    """ Ensure connection to CloudWatch """
    region = get_global_option('region')
    try:
        if (get_global_option('aws_access_key_id')
                and get_global_option('aws_secret_access_key')):
            logger.debug('Authenticating to CloudWatch using '
                         'credentials in configuration file')
            connection = cloudwatch.connect_to_region(
                region,
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            logger.debug('Authenticating using boto\'s authentication handler')
            connection = cloudwatch.connect_to_region(region)

    except Exception as err:
        logger.error('Failed connecting to CloudWatch: {0}'.format(err))
        logger.error('Please report an issue at: '
                     'https://github.com/sebdah/dynamic-dynamodb/issues')
        raise

    logger.debug('Connected to CloudWatch in {0}'.format(region))
    return connection
def __get_connection_cloudwatch():
    """ Ensure connection to CloudWatch """
    region = get_global_option('region')
    try:
        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to CloudWatch using '
                'credentials in configuration file')
            connection = cloudwatch.connect_to_region(
                region,
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            logger.debug(
                'Authenticating using boto\'s authentication handler')
            connection = cloudwatch.connect_to_region(region)

    except Exception as err:
        logger.error('Failed connecting to CloudWatch: {0}'.format(err))
        logger.error(
            'Please report an issue at: '
            'https://github.com/sebdah/dynamic-dynamodb/issues')
        raise

    logger.debug('Connected to CloudWatch in {0}'.format(region))
    return connection
def execute_table_in_loop(table_name, table_key, table_logger):
    if not get_global_option('daemon') and get_global_option('run_once'):
        execute_table(table_name, table_key)
    else:
        t = threading.currentThread()
        while getattr(t, "do_run", True):
            execute_table(table_name, table_key, table_logger)
            logger.debug('Sleeping {0} seconds until next check'.format(
                get_global_option('check_interval')))
            time.sleep(get_global_option('check_interval'))
Exemple #4
0
def ensure_provisioning(table_name, key_name):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    """
    if get_global_option('circuit_breaker_url'):
        if __circuit_breaker_is_open():
            logger.warning('Circuit breaker is OPEN!')
            return None

    read_update_needed, updated_read_units = __ensure_provisioning_reads(
        table_name, key_name)
    write_update_needed, updated_write_units = __ensure_provisioning_writes(
        table_name, key_name)

    # Handle throughput updates
    if read_update_needed or write_update_needed:
        logger.info(
            '{0} - Changing provisioning to {1:d} '
            'read units and {2:d} write units'.format(
                table_name,
                int(updated_read_units),
                int(updated_write_units)))
        update_throughput(table_name, updated_read_units, updated_write_units, key_name)
    else:
        logger.info('{0} - No need to change provisioning'.format(table_name))
def ensure_created(table_name, template_table_name):
    """ Ensure table has been created in DynamoDB based on given template table name
    
    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type template_table_name: str
    :param template_table_name: Name of the template DynamoDB table (that has hashkey, attribute definitions)
    """
    try:
        desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table']
    except JSONResponseError:
        try :
           template_table = get_table( template_table_name )
           template_table.describe()
           logger.info(
               '{0} - Create table with template table schema {1}, throughput {2}, indexes {3}, global_indexes {4}'.format(
            table_name, template_table.schema, template_table.throughput, template_table.indexes, template_table.global_indexes))

           # Return if dry-run
           if get_global_option('dry_run'):
              return

           table = Table.create(table_name, schema=template_table.schema, 
              throughput=template_table.throughput, indexes=template_table.indexes,
              global_indexes=template_table.global_indexes, 
              connection=DYNAMODB_CONNECTION)
               
        except DynamoDBResponseError:
           dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
           if dynamodb_error == 'ResourceNotFoundException':
              logger.error(
                '{0} - Table {1} not found'.format(table_name, table_name))
           raise
Exemple #6
0
def ensure_provisioning(table_name, key_name):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    """
    if get_global_option('circuit_breaker_url'):
        if circuit_breaker.is_open():
            logger.warning('Circuit breaker is OPEN!')
            return None

    read_update_needed, updated_read_units = __ensure_provisioning_reads(
        table_name, key_name)
    write_update_needed, updated_write_units = __ensure_provisioning_writes(
        table_name, key_name)

    # Handle throughput updates
    if read_update_needed or write_update_needed:
        logger.info(
            '{0} - Changing provisioning to {1:d} '
            'read units and {2:d} write units'.format(
                table_name,
                int(updated_read_units),
                int(updated_write_units)))
        __update_throughput(
            table_name, updated_read_units, updated_write_units, key_name)
    else:
        logger.info('{0} - No need to change provisioning'.format(table_name))
Exemple #7
0
def ensure_provisioning(table_name, key_name, num_consec_read_checks,
                        num_consec_write_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """

    if get_global_option('circuit_breaker_url') or get_table_option(
            key_name, 'circuit_breaker_url'):
        if circuit_breaker.is_open(table_name, key_name):
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    # Handle throughput alarm checks
    __ensure_provisioning_alarm(table_name, key_name)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                key_name,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                key_name,
                num_consec_write_checks)

        if read_update_needed:
            num_consec_read_checks = 0

        if write_update_needed:
            num_consec_write_checks = 0

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info('{0} - Changing provisioning to {1:d} '
                        'read units and {2:d} write units'.format(
                            table_name, int(updated_read_units),
                            int(updated_write_units)))
            __update_throughput(table_name, key_name, updated_read_units,
                                updated_write_units)
        else:
            logger.info(
                '{0} - No need to change provisioning'.format(table_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
Exemple #8
0
def ensure_provisioning(table_name, table_key, gsi_name, gsi_key, num_consec_read_checks, num_consec_write_checks):
    """ Ensure that provisioning is correct for Global Secondary Indexes

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """
    if get_global_option("circuit_breaker_url"):
        if circuit_breaker.is_open():
            logger.warning("Circuit breaker is OPEN!")
            return (0, 0)

    logger.info("{0} - Will ensure provisioning for global secondary index {1}".format(table_name, gsi_name))

    # Handle throughput alarm checks
    __ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = __ensure_provisioning_reads(
            table_name, table_key, gsi_name, gsi_key, num_consec_read_checks
        )
        write_update_needed, updated_write_units, num_consec_write_checks = __ensure_provisioning_writes(
            table_name, table_key, gsi_name, gsi_key, num_consec_write_checks
        )

        if read_update_needed:
            num_consec_read_checks = 0

        if write_update_needed:
            num_consec_write_checks = 0

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info(
                "{0} - GSI: {1} - Changing provisioning to {2:d} "
                "read units and {3:d} write units".format(
                    table_name, gsi_name, int(updated_read_units), int(updated_write_units)
                )
            )
            __update_throughput(table_name, table_key, gsi_name, gsi_key, updated_read_units, updated_write_units)
        else:
            logger.info("{0} - GSI: {1} - No need to change provisioning".format(table_name, gsi_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
Exemple #9
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option('show_config'):
            print json.dumps(config.get_configuration(), indent=2)
        elif get_global_option('daemon'):
            daemon = DynamicDynamoDBDaemon(
                '{0}/dynamic-dynamodb.{1}.pid'.format(
                    get_global_option('pid_file_dir'),
                    get_global_option('instance')))

            if get_global_option('daemon') == 'start':
                logger.debug('Starting daemon')
                try:
                    daemon.start()
                    logger.info('Daemon started')
                except IOError as error:
                    logger.error(
                        'Could not create pid file: {0}'.format(error))
                    logger.error('Daemon not started')
            elif get_global_option('daemon') == 'stop':
                logger.debug('Stopping daemon')
                daemon.stop()
                logger.info('Daemon stopped')
                sys.exit(0)

            elif get_global_option('daemon') == 'restart':
                logger.debug('Restarting daemon')
                daemon.restart()
                logger.info('Daemon restarted')

            elif get_global_option('daemon') in ['foreground', 'fg']:
                logger.debug('Starting daemon in foreground')
                daemon.run()
                logger.info('Daemon started in foreground')

            else:
                print(
                    'Valid options for --daemon are start, '
                    'stop, restart, and foreground')
                sys.exit(1)
        else:
            if get_global_option('run_once'):
                execute()
            else:
                while True:
                    execute()

    except Exception as error:
        logger.exception(error)
Exemple #10
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option('daemon'):
            daemon = DynamicDynamoDBDaemon(
                '{0}/dynamic-dynamodb.{1}.pid'.format(
                    get_global_option('pid_file_dir'),
                    get_global_option('instance')))

            if get_global_option('daemon') == 'start':
                daemon.start()

            elif get_global_option('daemon') == 'stop':
                logger.debug('Stopping daemon')
                daemon.stop()
                logger.info('Daemon stopped')
                sys.exit(0)

            elif get_global_option('daemon') == 'restart':
                daemon.restart()

            elif get_global_option('daemon') in ['foreground', 'fg']:
                daemon.run()

            else:
                print('Valid options for --daemon are start, stop and restart')
                sys.exit(1)
        else:
            while True:
                execute()

    except Exception as error:
        logger.exception(error)
Exemple #11
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option('daemon'):
            daemon = DynamicDynamoDBDaemon(
                '{0}/dynamic-dynamodb.{1}.pid'.format(
                    get_global_option('pid_file_dir'),
                    get_global_option('instance')))

            if get_global_option('daemon') == 'start':
                daemon.start()

            elif get_global_option('daemon') == 'stop':
                logger.debug('Stopping daemon')
                daemon.stop()
                logger.info('Daemon stopped')
                sys.exit(0)

            elif get_global_option('daemon') == 'restart':
                daemon.restart()

            elif get_global_option('daemon') in ['foreground', 'fg']:
                daemon.run()

            else:
                print('Valid options for --daemon are start, stop and restart')
                sys.exit(1)
        else:
            while True:
                execute()

    except Exception as error:
        logger.exception(error)
Exemple #12
0
def __get_connection_SNS():
    """ Ensure connection to SNS """
    try:
        if get_global_option("aws_access_key_id") and get_global_option("aws_secret_access_key"):
            logger.debug("Authenticating to SNS using " "credentials in configuration file")
            connection = sns.connect_to_region(
                get_global_option("region"),
                aws_access_key_id=get_global_option("aws_access_key_id"),
                aws_secret_access_key=get_global_option("aws_secret_access_key"),
            )
        else:
            try:
                logger.debug("Authenticating to SNS using EC2 instance profile")
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = sns.connect_to_region(
                    metadata["placement"]["availability-zone"][:-1],
                    profile_name=metadata["iam"]["info"][u"InstanceProfileArn"],
                )
            except KeyError:
                logger.debug("Authenticating to SNS using " "env vars / boto configuration")
                connection = sns.connect_to_region(get_global_option("region"))

    except Exception as err:
        logger.error("Failed connecting to SNS: {0}".format(err))
        logger.error("Please report an issue at: " "https://github.com/sebdah/dynamic-dynamodb/issues")
        raise

    logger.debug("Connected to SNS in {0}".format(get_global_option("region")))
    return connection
Exemple #13
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option('show_config'):
            print json.dumps(config.get_configuration(), indent=2)
        elif get_global_option('daemon'):
            daemon = DynamicDynamoDBDaemon(
                '{0}/dynamic-dynamodb.{1}.pid'.format(
                    get_global_option('pid_file_dir'),
                    get_global_option('instance')))

            if get_global_option('daemon') == 'start':
                logger.debug('Starting daemon')
                try:
                    daemon.start()
                    logger.info('Daemon started')
                except IOError as error:
                    logger.error('Could not create pid file: {0}'.format(error))
                    logger.error('Daemon not started')
            elif get_global_option('daemon') == 'stop':
                logger.debug('Stopping daemon')
                daemon.stop()
                logger.info('Daemon stopped')
                sys.exit(0)

            elif get_global_option('daemon') == 'restart':
                logger.debug('Restarting daemon')
                daemon.restart()
                logger.info('Daemon restarted')

            elif get_global_option('daemon') in ['foreground', 'fg']:
                logger.debug('Starting daemon in foreground')
                daemon.run()
                logger.info('Daemon started in foreground')

            else:
                print(
                    'Valid options for --daemon are start, '
                    'stop, restart, and foreground')
                sys.exit(1)
        else:
            if get_global_option('run_once'):
                execute()
            else:
                while True:
                    execute()

    except Exception as error:
        logger.exception(error)
Exemple #14
0
def ensure_provisioning(table_name, table_key, gsi_name, gsi_key):
    """ Ensure that provisioning is correct for Global Secondary Indexes

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    """
    if get_global_option('circuit_breaker_url'):
        if circuit_breaker.is_open():
            logger.warning('Circuit breaker is OPEN!')
            return None

    logger.info(
        '{0} - Will ensure provisioning for global secondary index {1}'.format(
            table_name, gsi_name))

    try:
        read_update_needed, updated_read_units = __ensure_provisioning_reads(
            table_name,
            table_key,
            gsi_name,
            gsi_key)
        write_update_needed, updated_write_units = __ensure_provisioning_writes(
            table_name,
            table_key,
            gsi_name,
            gsi_key)

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info(
                '{0} - GSI: {1} - Changing provisioning to {2:d} '
                'read units and {3:d} write units'.format(
                    table_name,
                    gsi_name,
                    int(updated_read_units),
                    int(updated_write_units)))
            __update_throughput(
                table_name,
                table_key,
                gsi_name,
                gsi_key,
                updated_read_units,
                updated_write_units)
        else:
            logger.info(
                '{0} - GSI: {1} - No need to change provisioning'.format(
                    table_name,
                    gsi_name))
    except JSONResponseError:
        raise
Exemple #15
0
def ensure_provisioning(
        table_name, key_name,
        num_consec_read_checks,
        num_consec_write_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """
    if get_global_option('circuit_breaker_url'):
        if circuit_breaker.is_open():
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                key_name,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                key_name,
                num_consec_write_checks)

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info(
                '{0} - Changing provisioning to {1:d} '
                'read units and {2:d} write units'.format(
                    table_name,
                    int(updated_read_units),
                    int(updated_write_units)))
            __update_throughput(
                table_name,
                key_name,
                updated_read_units,
                updated_write_units)
        else:
            logger.info('{0} - No need to change provisioning'.format(
                table_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    while not connected:
        logger.debug('Connecting to DynamoDB in {0}'.format(
            get_global_option('region')))

        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to DynamoDB using '
                'credentials in configuration file')
            connection = dynamodb2.connect_to_region(
                get_global_option('region'),
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            try:
                logger.debug(
                    'Authenticating to DynamoDB using EC2 instance profile')
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = dynamodb2.connect_to_region(
                    metadata['placement']['availability-zone'][:-1],
                    profile_name=metadata['iam']['info'][u'InstanceProfileArn'])
            except KeyError:
                logger.debug(
                    'Authenticating to DynamoDB using '
                    'env vars / boto configuration')
                connection = dynamodb2.connect_to_region(
                    get_global_option('region'))

        if not connection:
            if retries == 0:
                logger.error('Failed to connect to DynamoDB. Giving up.')
                raise
            else:
                logger.error(
                    'Failed to connect to DynamoDB. Retrying in 5 seconds')
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug('Connected to DynamoDB in {0}'.format(
                get_global_option('region')))

    return connection
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        logger.debug("Table: " + table_name)
        execute_in_thread(table_name, table_key)
    # Sleep between the checks
    if not get_global_option('run_once'):
        logger.debug('Sleeping {0} seconds until next thread check'.format(60))
        time.sleep(60)
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option("daemon"):
            daemon = DynamicDynamoDBDaemon(
                "{0}/dynamic-dynamodb.{1}.pid".format(get_global_option("pid_file_dir"), get_global_option("instance"))
            )

            if get_global_option("daemon") == "start":
                daemon.start()

            elif get_global_option("daemon") == "stop":
                logger.debug("Stopping daemon")
                daemon.stop()
                logger.info("Daemon stopped")
                sys.exit(0)

            elif get_global_option("daemon") == "restart":
                daemon.restart()

            elif get_global_option("daemon") in ["foreground", "fg"]:
                daemon.run()

            else:
                print("Valid options for --daemon are start, stop and restart")
                sys.exit(1)
        else:
            while True:
                execute()

    except Exception as error:
        logger.exception(error)
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    while not connected:
        logger.debug('Connecting to DynamoDB in {0}'.format(
            get_global_option('region')))

        if (get_global_option('aws_access_key_id')
                and get_global_option('aws_secret_access_key')):
            logger.debug('Authenticating to DynamoDB using '
                         'credentials in configuration file')
            connection = dynamodb2.connect_to_region(
                get_global_option('region'),
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            try:
                logger.debug(
                    'Authenticating to DynamoDB using EC2 instance profile')
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = dynamodb2.connect_to_region(
                    metadata['placement']['availability-zone'][:-1],
                    profile_name=metadata['iam']['info']
                    [u'InstanceProfileArn'])
            except KeyError:
                logger.debug('Authenticating to DynamoDB using '
                             'env vars / boto configuration')
                connection = dynamodb2.connect_to_region(
                    get_global_option('region'))

        if not connection:
            if retries == 0:
                logger.error('Failed to connect to DynamoDB. Giving up.')
                raise
            else:
                logger.error(
                    'Failed to connect to DynamoDB. Retrying in 5 seconds')
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug('Connected to DynamoDB in {0}'.format(
                get_global_option('region')))

    return connection
Exemple #20
0
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    region = get_global_option('region')

    while not connected:
        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to DynamoDB using '
                'credentials in configuration file')
            connection = dynamodb2.connect_to_region(
                region,
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            logger.debug(
                'Authenticating using boto\'s authentication handler')
            connection = dynamodb2.connect_to_region(region)

        if not connection:
            if retries == 0:
                logger.error('Failed to connect to DynamoDB. Giving up.')
                raise
            else:
                logger.error(
                    'Failed to connect to DynamoDB. Retrying in 5 seconds')
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug('Connected to DynamoDB in {0}'.format(region))

    return connection
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    region = get_global_option('region')

    while not connected:
        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to DynamoDB using '
                'credentials in configuration file')
            connection = dynamodb2.connect_to_region(
                region,
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            logger.debug(
                'Authenticating using boto\'s authentication handler')
            connection = dynamodb2.connect_to_region(region)

        if not connection:
            if retries == 0:
                logger.error('Failed to connect to DynamoDB. Giving up.')
                raise
            else:
                logger.error(
                    'Failed to connect to DynamoDB. Retrying in 5 seconds')
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug('Connected to DynamoDB in {0}'.format(region))

    return connection
Exemple #22
0
def __get_connection_dynamodb(retries=3):
    """ Ensure connection to DynamoDB

    :type retries: int
    :param retries: Number of times to retry to connect to DynamoDB
    """
    connected = False
    while not connected:
        logger.debug("Connecting to DynamoDB in {0}".format(get_global_option("region")))

        if get_global_option("aws_access_key_id") and get_global_option("aws_secret_access_key"):
            logger.debug("Authenticating to DynamoDB using " "credentials in configuration file")
            connection = dynamodb2.connect_to_region(
                get_global_option("region"),
                aws_access_key_id=get_global_option("aws_access_key_id"),
                aws_secret_access_key=get_global_option("aws_secret_access_key"),
            )
        else:
            try:
                logger.debug("Authenticating to DynamoDB using EC2 instance profile")
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = dynamodb2.connect_to_region(
                    metadata["placement"]["availability-zone"][:-1],
                    profile_name=metadata["iam"]["info"][u"InstanceProfileArn"],
                )
            except KeyError:
                logger.debug("Authenticating to DynamoDB using " "env vars / boto configuration")
                connection = dynamodb2.connect_to_region(get_global_option("region"))

        if not connection:
            if retries == 0:
                logger.error("Failed to connect to DynamoDB. Giving up.")
                raise
            else:
                logger.error("Failed to connect to DynamoDB. Retrying in 5 seconds")
                retries -= 1
                time.sleep(5)
        else:
            connected = True
            logger.debug("Connected to DynamoDB in {0}".format(get_global_option("region")))

    return connection
def ensure_deleted(table_name):
    """ Ensure table has been deleted from DynamoDB
    
    :type table_name: str
    :param table_name: Name of the DynamoDB table
    """
    try:
        desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table']
        logger.info(
          '{0} - Deleting table'.format(
           table_name))

        # Return if dry-run
        if get_global_option('dry_run'):
           return
           
        DYNAMODB_CONNECTION.delete_table(table_name)
    except JSONResponseError:
        return
Exemple #24
0
def __get_connection_cloudwatch():
    """ Ensure connection to SNS """
    try:
        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to CloudWatch using '
                'credentials in configuration file')
            connection = cloudwatch.connect_to_region(
                get_global_option('region'),
                aws_access_key_id=get_global_option('aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            try:
                logger.debug(
                    'Authenticating to CloudWatch using EC2 instance profile')
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = cloudwatch.connect_to_region(
                    metadata['placement']['availability-zone'][:-1],
                    profile_name=metadata['iam']['info'][u'InstanceProfileArn'])
            except KeyError:
                logger.debug(
                    'Authenticating to CloudWatch using '
                    'env vars / boto configuration')
                connection = cloudwatch.connect_to_region(
                    get_global_option('region'))

    except Exception as err:
        logger.error('Failed connecting to CloudWatch: {0}'.format(err))
        logger.error(
            'Please report an issue at: '
            'https://github.com/sebdah/dynamic-dynamodb/issues')
        raise

    logger.debug('Connected to CloudWatch in {0}'.format(
        get_global_option('region')))
    return connection
Exemple #25
0
def __get_connection_SNS():
    """ Ensure connection to SNS """
    try:
        if (get_global_option('aws_access_key_id') and
                get_global_option('aws_secret_access_key')):
            logger.debug(
                'Authenticating to SNS using '
                'credentials in configuration file')
            connection = sns.connect_to_region(
                get_global_option('region'),
                aws_access_key_id=get_global_option(
                    'aws_access_key_id'),
                aws_secret_access_key=get_global_option(
                    'aws_secret_access_key'))
        else:
            try:
                logger.debug(
                    'Authenticating to SNS using EC2 instance profile')
                metadata = get_instance_metadata(timeout=1, num_retries=1)
                connection = sns.connect_to_region(
                    metadata['placement']['availability-zone'][:-1],
                    profile_name=metadata['iam']['info'][u'InstanceProfileArn'])
            except KeyError:
                logger.debug(
                    'Authenticating to SNS using '
                    'env vars / boto configuration')
                connection = sns.connect_to_region(get_global_option('region'))

    except Exception as err:
        logger.error('Failed connecting to SNS: {0}'.format(err))
        logger.error(
            'Please report an issue at: '
            'https://github.com/sebdah/dynamic-dynamodb/issues')
        raise

    logger.debug('Connected to SNS in {0}'.format(get_global_option('region')))
    return connection
def update_gsi_provisioning(table_name,
                            table_key,
                            gsi_name,
                            gsi_key,
                            reads,
                            writes,
                            retry_with_only_increase=False):
    """ Update provisioning on a global secondary index

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    :type reads: int
    :param reads: Number of reads to provision
    :type writes: int
    :param writes: Number of writes to provision
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        logger.info(
            '{0} - Retrying to update provisioning, excluding any decreases. '
            'Setting new reads to {1} and new writes to {2}'.format(
                table_name, reads, writes))

    # Check that we are in the right time frame
    m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows')
    if m_windows:
        if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows):
            logger.warning(
                '{0} - GSI: {1} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(
                    table_name, gsi_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info('{0} - GSI: {1} - '
                            'No need to scale up reads nor writes'.format(
                                table_name, gsi_name))
                return

        else:
            logger.info('{0} - GSI: {1} - '
                        'Current time is within maintenance window'.format(
                            table_name, gsi_name))

    logger.info('{0} - GSI: {1} - '
                'Updating provisioning to {2} reads and {3} writes'.format(
                    table_name, gsi_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        DYNAMODB_CONNECTION.update_table(table_name=table_name,
                                         global_secondary_index_updates=[{
                                             "Update": {
                                                 "IndexName": gsi_name,
                                                 "ProvisionedThroughput": {
                                                     "ReadCapacityUnits":
                                                     reads,
                                                     "WriteCapacityUnits":
                                                     writes
                                                 }
                                             }
                                         }])

        message = ('{0} - GSI: {1} - Provisioning updated to '
                   '{2} reads and {3} writes').format(table_name, gsi_name,
                                                      reads, writes)

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append('scale-up')

        sns.publish_gsi_notification(
            table_key,
            gsi_key,
            message,
            sns_message_types,
            subject='Updated provisioning for GSI {0}'.format(gsi_name))

    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = ['LimitExceededException']
        if exception in know_exceptions:
            logger.warning('{0} - GSI: {1} - {2}: {3}'.format(
                table_name, gsi_name, exception, error.body['message']))
        else:
            logger.error(
                ('{0} - GSI: {1} - Unhandled exception: {2}: {3}. '
                 'Please file a bug report at '
                 'https://github.com/sebdah/dynamic-dynamodb/issues').format(
                     table_name, gsi_name, exception, error.body['message']))

        if (not retry_with_only_increase
                and exception == 'LimitExceededException'):
            logger.info('{0} - GSI: {1} - Will retry to update provisioning '
                        'with only increases'.format(table_name, gsi_name))
            update_gsi_provisioning(table_name,
                                    table_key,
                                    gsi_name,
                                    gsi_key,
                                    reads,
                                    writes,
                                    retry_with_only_increase=True)
def update_table_provisioning(table_name,
                              key_name,
                              reads,
                              writes,
                              retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        logger.info(
            '{0} - Retrying to update provisioning, excluding any decreases. '
            'Setting new reads to {1} and new writes to {2}'.format(
                table_name, reads, writes))

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, 'maintenance_windows')
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                '{0} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(table_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info(
                    '{0} - No need to scale up reads nor writes'.format(
                        table_name))
                return

        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    logger.info(
        '{0} - Updating provisioning to {1} reads and {2} writes'.format(
            table_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        table.update(throughput={'read': reads, 'write': writes})

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append('scale-up')

        message = (
            '{0} - Provisioning updated to {1} reads and {2} writes').format(
                table_name, reads, writes)

        sns.publish_table_notification(
            key_name,
            message,
            sns_message_types,
            subject='Updated provisioning for table {0}'.format(table_name))
    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = [
            'LimitExceededException', 'ValidationException',
            'ResourceInUseException'
        ]

        if exception in know_exceptions:
            logger.warning('{0} - {1}: {2}'.format(table_name, exception,
                                                   error.body['message']))
        else:
            logger.error(
                ('{0} - Unhandled exception: {1}: {2}. '
                 'Please file a bug report at '
                 'https://github.com/sebdah/dynamic-dynamodb/issues').format(
                     table_name, exception, error.body['message']))

        if (not retry_with_only_increase
                and exception == 'LimitExceededException'):
            logger.info('{0} - Will retry to update provisioning '
                        'with only increases'.format(table_name))
            update_table_provisioning(table_name,
                                      key_name,
                                      reads,
                                      writes,
                                      retry_with_only_increase=True)
Exemple #28
0
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None):
    """ Checks whether the circuit breaker is open

    :param table_name: Name of the table being checked
    :param table_key: Configuration key for table
    :param gsi_name: Name of the GSI being checked
    :param gsi_key: Configuration key for the GSI
    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(r'^(?P<scheme>http(s)?://)'
                         r'((?P<username>.+):(?P<password>.+)@){0,1}'
                         r'(?P<url>.*)$')

    url = timeout = None
    if gsi_name:
        url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url')
        timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout')
    elif table_name:
        url = get_table_option(table_key, 'circuit_breaker_url')
        timeout = get_table_option(table_key, 'circuit_breaker_timeout')

    if not url:
        url = get_global_option('circuit_breaker_url')
        timeout = get_global_option('circuit_breaker_timeout')

    match = pattern.match(url)
    if not match:
        logger.error('Malformatted URL: {0}'.format(url))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    auth = ()
    if use_basic_auth:
        url = '{scheme}{url}'.format(scheme=match.group('scheme'),
                                     url=match.group('url'))
        auth = (match.group('username'), match.group('password'))

    headers = {}
    if table_name:
        headers["x-table-name"] = table_name
    if gsi_name:
        headers["x-gsi-name"] = gsi_name

    # Make the actual request
    try:
        response = requests.get(url,
                                auth=auth,
                                timeout=timeout / 1000.00,
                                headers=headers)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error('Please file a bug at '
                     'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True
def update_gsi_provisioning(
        table_name, table_key, gsi_name, gsi_key,
        reads, writes, retry_with_only_increase=False):
    """ Update provisioning on a global secondary index

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    :type reads: int
    :param reads: Number of reads to provision
    :type writes: int
    :param writes: Number of writes to provision
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name))
    current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name))

    # Make sure we aren't scaling down if we turned off downscaling
    if (not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') or
            not get_gsi_option(
                table_key, gsi_key, 'enable_writes_down_scaling')):
        if (not get_gsi_option(
                table_key, gsi_key, 'enable_reads_down_scaling') and
                current_reads > reads):
            reads = current_reads
        if (not get_gsi_option(
                table_key, gsi_key, 'enable_writes_down_scaling') and
                current_writes > writes):
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - No need to scale up reads nor writes'.format(
                    table_name))
            return

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - GSI: {1} - No need to scale up reads nor writes'.format(
                    table_name, gsi_name))
            return

        logger.info(
            '{0} - GSI: {1} - Retrying to update provisioning, '
            'excluding any decreases. '
            'Setting new reads to {2} and new writes to {3}'.format(
                table_name, gsi_name, reads, writes))

    # Check that we are in the right time frame
    m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows')
    if m_windows:
        if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows):
            logger.warning(
                '{0} - GSI: {1} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(
                    table_name,
                    gsi_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info(
                    '{0} - GSI: {1} - '
                    'No need to scale up reads nor writes'.format(
                        table_name,
                        gsi_name))
                return

        else:
            logger.info(
                '{0} - GSI: {1} - '
                'Current time is within maintenance window'.format(
                    table_name,
                    gsi_name))

    logger.info(
        '{0} - GSI: {1} - '
        'Updating provisioning to {2} reads and {3} writes'.format(
            table_name, gsi_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        DYNAMODB_CONNECTION.update_table(
            table_name=table_name,
            global_secondary_index_updates=[
                {
                    "Update": {
                        "IndexName": gsi_name,
                        "ProvisionedThroughput": {
                            "ReadCapacityUnits": reads,
                            "WriteCapacityUnits": writes
                        }
                    }
                }
            ])

        message = []
        if current_reads > reads:
            message.append(
                '{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format(
                    table_name, gsi_name, current_reads, reads))
        elif current_reads < reads:
            message.append(
                '{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format(
                    table_name, gsi_name, current_reads, reads))
        if current_writes > writes:
            message.append(
                '{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format(
                    table_name, gsi_name, current_writes, writes))
        elif current_writes < writes:
            message.append(
                '{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format(
                    table_name, gsi_name, current_writes, writes))

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < writes:
            sns_message_types.append('scale-up')

        sns.publish_gsi_notification(
            table_key,
            gsi_key,
            ''.join(message),
            sns_message_types,
            subject='Updated provisioning for GSI {0}'.format(gsi_name))

    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = ['LimitExceededException']
        if exception in know_exceptions:
            logger.warning('{0} - GSI: {1} - {2}: {3}'.format(
                table_name, gsi_name, exception, error.body['message']))
        else:
            logger.error(
                (
                    '{0} - GSI: {1} - Unhandled exception: {2}: {3}. '
                    'Please file a bug report at '
                    'https://github.com/sebdah/dynamic-dynamodb/issues'
                ).format(
                    table_name, gsi_name, exception, error.body['message']))

        if (not retry_with_only_increase and
                exception == 'LimitExceededException'):
            logger.info(
                '{0} - GSI: {1} - Will retry to update provisioning '
                'with only increases'.format(table_name, gsi_name))
            update_gsi_provisioning(
                table_name,
                table_key,
                gsi_name,
                gsi_key,
                reads,
                writes,
                retry_with_only_increase=True)
def update_table_provisioning(
        table_name, key_name, reads, writes, retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    # Make sure we aren't scaling down if we turned off downscaling
    if (not get_table_option(key_name, 'enable_reads_down_scaling') or
            not get_table_option(key_name, 'enable_writes_down_scaling')):
        if (not get_table_option(key_name, 'enable_reads_down_scaling') and
                current_reads > reads):
            reads = current_reads
        if (not get_table_option(key_name, 'enable_writes_down_scaling') and
                current_writes > writes):
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - No need to scale up reads nor writes'.format(
                    table_name))
            return

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - No need to scale up reads nor writes'.format(
                    table_name))
            return

        logger.info(
            '{0} - Retrying to update provisioning, excluding any decreases. '
            'Setting new reads to {1} and new writes to {2}'.format(
                table_name, reads, writes))

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, 'maintenance_windows')
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                '{0} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(table_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info(
                    '{0} - No need to scale up reads nor writes'.format(
                        table_name))
                return

        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    logger.info(
        '{0} - Updating provisioning to {1} reads and {2} writes'.format(
            table_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        table.update(
            throughput={
                'read': reads,
                'write': writes
            })

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < writes:
            sns_message_types.append('scale-up')

        message = []
        if current_reads > reads:
            message.append('{0} - Reads: DOWN from {1} to {2}\n'.format(
                table_name, current_reads, reads))
        elif current_reads < reads:
            message.append('{0} - Reads: UP from {1} to {2}\n'.format(
                table_name, current_reads, reads))
        if current_writes > writes:
            message.append('{0} - Writes: DOWN from {1} to {2}\n'.format(
                table_name, current_writes, writes))
        elif current_writes < writes:
            message.append('{0} - Writes: UP from {1} to {2}\n'.format(
                table_name, current_writes, writes))

        sns.publish_table_notification(
            key_name,
            ''.join(message),
            sns_message_types,
            subject='Updated provisioning for table {0}'.format(table_name))
    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = [
            'LimitExceededException',
            'ValidationException',
            'ResourceInUseException']

        if exception in know_exceptions:
            logger.warning('{0} - {1}: {2}'.format(
                table_name, exception, error.body['message']))
        else:
            if 'message' in error.body:
                msg = error.body['message']
            else:
                msg = error

            logger.error(
                (
                    '{0} - Unhandled exception: {1}: {2}. '
                    'Please file a bug report at '
                    'https://github.com/sebdah/dynamic-dynamodb/issues'
                ).format(table_name, exception, msg))

        if (not retry_with_only_increase and
                exception == 'LimitExceededException'):
            logger.info(
                '{0} - Will retry to update provisioning '
                'with only increases'.format(table_name))
            update_table_provisioning(
                table_name,
                key_name,
                reads,
                writes,
                retry_with_only_increase=True)
Exemple #31
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table_num_consec_read_checks = \
                CHECK_STATUS['tables'][table_name]['reads']
        except KeyError:
            table_num_consec_read_checks = 0

        try:
            table_num_consec_write_checks = \
                CHECK_STATUS['tables'][table_name]['writes']
        except KeyError:
            table_num_consec_write_checks = 0

        try:
            # The return var shows how many times the scale-down criteria
            #  has been met. This is coupled with a var in config,
            # "num_intervals_scale_down", to delay the scale-down
            table_num_consec_read_checks, table_num_consec_write_checks = \
                table.ensure_provisioning(
                    table_name,
                    table_key,
                    table_num_consec_read_checks,
                    table_num_consec_write_checks)

            CHECK_STATUS['tables'][table_name] = {
                'reads': table_num_consec_read_checks,
                'writes': table_num_consec_write_checks
            }

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug('Table {0} GSI {1} matches '
                                         'GSI config key {2}'.format(
                                             table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error(
                            'Invalid regular expression: "{0}"'.format(
                                gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                try:
                    gsi_num_consec_read_checks = \
                        CHECK_STATUS['tables'][table_name]['reads']
                except KeyError:
                    gsi_num_consec_read_checks = 0

                try:
                    gsi_num_consec_write_checks = \
                        CHECK_STATUS['tables'][table_name]['writes']
                except KeyError:
                    gsi_num_consec_write_checks = 0

                gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
                    gsi.ensure_provisioning(
                        table_name,
                        table_key,
                        gsi_name,
                        gsi_key,
                        gsi_num_consec_read_checks,
                        gsi_num_consec_write_checks)

                CHECK_STATUS['gsis'][gsi_name] = {
                    'reads': gsi_num_consec_read_checks,
                    'writes': gsi_num_consec_write_checks
                }

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name, table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status, error.reason))
                logger.error('Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    logger.debug('Sleeping {0} seconds until next check'.format(
        get_global_option('check_interval')))
    time.sleep(get_global_option('check_interval'))
Exemple #32
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        boto_server_error_retries = 3

        while True:
            if get_global_option('daemon'):
                pid_file = '/tmp/dynamic-dynamodb.{0}.pid'.format(
                    get_global_option('instance'))
                daemon = DynamicDynamoDBDaemon(pid_file)

                if get_global_option('daemon') == 'start':
                    daemon.start(
                        check_interval=get_global_option('check_interval'))

                elif get_global_option('daemon') == 'stop':
                    daemon.stop()
                    sys.exit(0)

                elif get_global_option('daemon') == 'restart':
                    daemon.restart(
                        check_interval=get_global_option('check_interval'))

                elif get_global_option('daemon') in ['foreground', 'fg']:
                    daemon.run(
                        check_interval=get_global_option('check_interval'))

                else:
                    print(
                        'Valid options for --daemon are '
                        'start, stop and restart')
                    sys.exit(1)
            else:
                # Ensure provisioning
                for table_name, table_key in dynamodb.get_tables_and_gsis():
                    try:
                        table.ensure_provisioning(table_name, table_key)

                        gsi_names = set()
                        # Add regexp table names
                        if get_table_option(table_key, 'gsis'):
                            for gst_instance in dynamodb.table_gsis(table_name):
                                gsi_name = gst_instance[u'IndexName']

                                try:
                                    gsi_keys = get_table_option(
                                        table_key, 'gsis').keys()
                                except AttributeError:
                                    continue

                                for gsi_key in gsi_keys:
                                    try:
                                        if re.match(gsi_key, gsi_name):
                                            logger.debug(
                                                'Table {0} GSI {1} match with '
                                                'GSI config key {2}'.format(
                                                    table_name,
                                                    gsi_name,
                                                    gsi_key))
                                            gsi_names.add(
                                                (
                                                    gsi_name,
                                                    gsi_key
                                                ))
                                    except re.error:
                                        logger.error(
                                            'Invalid regular expression: '
                                            '"{0}"'.format(gsi_key))
                                        sys.exit(1)

                        gsi_names = sorted(gsi_names)

                        for gsi_name, gsi_key in gsi_names:
                            gsi.ensure_provisioning(
                                table_name,
                                table_key,
                                gsi_name,
                                gsi_key)

                    except JSONResponseError as error:
                        exception = error.body['__type'].split('#')[1]
                        if exception == 'ResourceNotFoundException':
                            logger.error(
                                '{0} - Table {1} does not exist anymore'.format(
                                    table_name, table_name))
                            continue

                    except BotoServerError as error:
                        if boto_server_error_retries > 0:
                            logger.error(
                                'Unknown boto error. Status: "{0}". '
                                'Reason: "{1}"'.format(
                                    error.status,
                                    error.reason))
                            logger.error(
                                'Please bug report if this error persists')
                            boto_server_error_retries -= 1
                            continue
                        else:
                            raise

            # Sleep between the checks
            logger.debug('Sleeping {0} seconds until next check'.format(
                get_global_option('check_interval')))
            time.sleep(get_global_option('check_interval'))
    except Exception as error:
        logger.exception(error)
Exemple #33
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table_num_consec_read_checks = \
                CHECK_STATUS['tables'][table_name]['reads']
        except KeyError:
            table_num_consec_read_checks = 0

        try:
            table_num_consec_write_checks = \
                CHECK_STATUS['tables'][table_name]['writes']
        except KeyError:
            table_num_consec_write_checks = 0

        try:
            # The return var shows how many times the scale-down criteria
            #  has been met. This is coupled with a var in config,
            # "num_intervals_scale_down", to delay the scale-down
            table_num_consec_read_checks, table_num_consec_write_checks = \
                table.ensure_provisioning(
                    table_name,
                    table_key,
                    table_num_consec_read_checks,
                    table_num_consec_write_checks)

            CHECK_STATUS['tables'][table_name] = {
                'reads': table_num_consec_read_checks,
                'writes': table_num_consec_write_checks
            }

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug(
                                'Table {0} GSI {1} matches '
                                'GSI config key {2}'.format(
                                    table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error('Invalid regular expression: "{0}"'.format(
                            gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                try:
                    gsi_num_consec_read_checks = \
                        CHECK_STATUS['tables'][table_name]['reads']
                except KeyError:
                    gsi_num_consec_read_checks = 0

                try:
                    gsi_num_consec_write_checks = \
                        CHECK_STATUS['tables'][table_name]['writes']
                except KeyError:
                    gsi_num_consec_write_checks = 0

                gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
                    gsi.ensure_provisioning(
                        table_name,
                        table_key,
                        gsi_name,
                        gsi_key,
                        gsi_num_consec_read_checks,
                        gsi_num_consec_write_checks)

                CHECK_STATUS['gsis'][gsi_name] = {
                    'reads': gsi_num_consec_read_checks,
                    'writes': gsi_num_consec_write_checks
                }

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name,
                    table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status,
                        error.reason))
                logger.error(
                    'Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    if not get_global_option('run_once'):
        logger.debug('Sleeping {0} seconds until next check'.format(
            get_global_option('check_interval')))
        time.sleep(get_global_option('check_interval'))
Exemple #34
0
def update_gsi_provisioning(table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=False):
    """ Update provisioning on a global secondary index

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    :type reads: int
    :param reads: Number of reads to provision
    :type writes: int
    :param writes: Number of writes to provision
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name))
    current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info("{0} - GSI: {1} - No need to scale up reads nor writes".format(table_name, gsi_name))
            return

        logger.info(
            "{0} - GSI: {1} - Retrying to update provisioning, excluding any decreases. "
            "Setting new reads to {2} and new writes to {3}".format(table_name, gsi_name, reads, writes)
        )

    # Check that we are in the right time frame
    m_windows = get_gsi_option(table_key, gsi_key, "maintenance_windows")
    if m_windows:
        if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows):
            logger.warning(
                "{0} - GSI: {1} - We are outside a maintenace window. "
                "Will only perform up scaling activites".format(table_name, gsi_name)
            )

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info("{0} - GSI: {1} - " "No need to scale up reads nor writes".format(table_name, gsi_name))
                return

        else:
            logger.info("{0} - GSI: {1} - " "Current time is within maintenance window".format(table_name, gsi_name))

    logger.info(
        "{0} - GSI: {1} - "
        "Updating provisioning to {2} reads and {3} writes".format(table_name, gsi_name, reads, writes)
    )

    # Return if dry-run
    if get_global_option("dry_run"):
        return

    try:
        DYNAMODB_CONNECTION.update_table(
            table_name=table_name,
            global_secondary_index_updates=[
                {
                    "Update": {
                        "IndexName": gsi_name,
                        "ProvisionedThroughput": {"ReadCapacityUnits": reads, "WriteCapacityUnits": writes},
                    }
                }
            ],
        )

        message = ("{0} - GSI: {1} - Provisioning updated to " "{2} reads and {3} writes").format(
            table_name, gsi_name, reads, writes
        )

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append("scale-down")
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append("scale-up")

        sns.publish_gsi_notification(
            table_key, gsi_key, message, sns_message_types, subject="Updated provisioning for GSI {0}".format(gsi_name)
        )

    except JSONResponseError as error:
        exception = error.body["__type"].split("#")[1]
        know_exceptions = ["LimitExceededException"]
        if exception in know_exceptions:
            logger.warning("{0} - GSI: {1} - {2}: {3}".format(table_name, gsi_name, exception, error.body["message"]))
        else:
            logger.error(
                (
                    "{0} - GSI: {1} - Unhandled exception: {2}: {3}. "
                    "Please file a bug report at "
                    "https://github.com/sebdah/dynamic-dynamodb/issues"
                ).format(table_name, gsi_name, exception, error.body["message"])
            )

        if not retry_with_only_increase and exception == "LimitExceededException":
            logger.info(
                "{0} - GSI: {1} - Will retry to update provisioning " "with only increases".format(table_name, gsi_name)
            )
            update_gsi_provisioning(
                table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=True
            )
Exemple #35
0
def update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    try:
        table = dynamodb.get_table(table_name)
    except DynamoDBResponseError:
        # Return if the table does not exist
        return None

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, get_table_option(
                key_name, 'maintenance_windows'))):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    if table.status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is in {1} state'.format(table_name, table.status))

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if ((read_units < table.read_units) or
                (table.read_units == get_table_option(
                    key_name, 'min_provisioned_reads'))):
            if ((write_units < table.write_units) or
                    (table.write_units == get_table_option(
                        key_name, 'min_provisioned_writes'))):
                logger.info(
                    '{0} - Both reads and writes will be decreased'.format(
                        table_name))

        elif read_units < table.read_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            read_units = table.read_units
        elif write_units < table.write_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            write_units = table.write_units

    if read_units == table.read_units and write_units == table.write_units:
        logger.debug('{0} - No need to update provisioning')
        return

    if not get_global_option('dry_run'):
        try:
            table.update_throughput(int(read_units), int(write_units))
            logger.info('Provisioning updated')
        except DynamoDBResponseError as error:
            dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
            if dynamodb_error == 'LimitExceededException':
                logger.warning(
                    '{0} - {1}'.format(table_name, error.body['message']))

                if int(read_units) > table.read_units:
                    logger.info('{0} - Scaling up reads to {1:d}'.format(
                        table_name,
                        int(read_units)))
                    update_throughput(
                        table_name,
                        int(read_units),
                        int(table.write_units),
                        key_name)

                elif int(write_units) > table.write_units:
                    logger.info('{0} - Scaling up writes to {1:d}'.format(
                        table_name,
                        int(write_units)))
                    update_throughput(
                        table_name,
                        int(table.read_units),
                        int(write_units),
                        key_name)

            elif dynamodb_error == 'ValidationException':
                logger.warning('{0} - ValidationException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'ResourceInUseException':
                logger.warning('{0} - ResourceInUseException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'AccessDeniedException':
                logger.warning('{0} - AccessDeniedException: {1}'.format(
                    table_name,
                    error.body['message']))

            else:
                logger.error(
                    (
                        '{0} - Unhandled exception: {1}: {2}. '
                        'Please file a bug report at '
                        'https://github.com/sebdah/dynamic-dynamodb/issues'
                    ).format(
                        table_name,
                        dynamodb_error,
                        error.body['message']))
Exemple #36
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table.ensure_provisioning(table_name, table_key)

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug(
                                'Table {0} GSI {1} matches '
                                'GSI config key {2}'.format(
                                    table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error('Invalid regular expression: "{0}"'.format(
                            gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                gsi.ensure_provisioning(
                    table_name,
                    table_key,
                    gsi_name,
                    gsi_key)

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name,
                    table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status,
                        error.reason))
                logger.error(
                    'Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    logger.debug('Sleeping {0} seconds until next check'.format(
        get_global_option('check_interval')))
    time.sleep(get_global_option('check_interval'))
Exemple #37
0
def ensure_provisioning(table_name, table_key, gsi_name, gsi_key,
                        num_consec_read_checks, num_consec_write_checks):
    """ Ensure that provisioning is correct for Global Secondary Indexes

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: GSI configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """
    if get_global_option('circuit_breaker_url'):
        if circuit_breaker.is_open():
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    logger.info(
        '{0} - Will ensure provisioning for global secondary index {1}'.format(
            table_name, gsi_name))

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                table_key,
                gsi_name,
                gsi_key,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                table_key,
                gsi_name,
                gsi_key,
                num_consec_write_checks)

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info('{0} - GSI: {1} - Changing provisioning to {2:d} '
                        'read units and {3:d} write units'.format(
                            table_name, gsi_name, int(updated_read_units),
                            int(updated_write_units)))
            __update_throughput(table_name, table_key, gsi_name, gsi_key,
                                updated_read_units, updated_write_units)
        else:
            logger.info(
                '{0} - GSI: {1} - No need to change provisioning'.format(
                    table_name, gsi_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
Exemple #38
0
def update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    table = dynamodb.get_table(table_name)

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if not __is_maintenance_window(table_name,
            get_table_option(key_name, 'maintenance_windows')):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    if table.status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is in {1} state'.format(table_name, table.status))

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if (read_units < table.read_units) or (table.read_units == get_table_option(key_name, 'min_provisioned_reads')):
            if (write_units < table.write_units) or (table.write_units == get_table_option(key_name, 'min_provisioned_writes')):
                logger.info(
                    '{0} - Both reads and writes will be decreased'.format(
                        table_name))

        elif read_units < table.read_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            read_units = table.read_units
        elif write_units < table.write_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            write_units = table.write_units

    if read_units == table.read_units and write_units == table.write_units:
        logger.debug('{0} - No need to update provisioning')
        return

    if not get_global_option('dry_run'):
        try:
            table.update_throughput(int(read_units), int(write_units))
            logger.info('Provisioning updated')
        except DynamoDBResponseError as error:
            dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
            if dynamodb_error == 'LimitExceededException':
                logger.warning(
                    '{0} - {1}'.format(table_name, error.body['message']))

                if int(read_units) > table.read_units:
                    logger.info('{0} - Scaling up reads to {1:d}'.format(
                        table_name,
                        int(read_units)))
                    update_throughput(
                        table_name,
                        int(read_units),
                        int(table.write_units),
                        key_name)

                elif int(write_units) > table.write_units:
                    logger.info('{0} - Scaling up writes to {1:d}'.format(
                        table_name,
                        int(write_units)))
                    update_throughput(
                        table_name,
                        int(table.read_units),
                        int(write_units),
                        key_name)

            elif dynamodb_error == 'ValidationException':
                logger.warning('{0} - ValidationException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'ResourceInUseException':
                logger.warning('{0} - ResourceInUseException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'AccessDeniedException':
                logger.warning('{0} - AccessDeniedException: {1}'.format(
                    table_name,
                    error.body['message']))

            else:
                logger.error(
                    (
                        '{0} - Unhandled exception: {1}: {2}. '
                        'Please file a bug report at '
                        'https://github.com/sebdah/dynamic-dynamodb/issues'
                    ).format(
                    table_name,
                    dynamodb_error,
                    error.body['message']))
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None):
    """ Checks whether the circuit breaker is open

    :param table_name: Name of the table being checked
    :param table_key: Configuration key for table
    :param gsi_name: Name of the GSI being checked
    :param gsi_key: Configuration key for the GSI
    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(
        r'^(?P<scheme>http(s)?://)'
        r'((?P<username>.+):(?P<password>.+)@){0,1}'
        r'(?P<url>.*)$'
    )

    url = timeout = None
    if gsi_name:
        url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url')
        timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout')
    elif table_name:
        url = get_table_option(table_key, 'circuit_breaker_url')
        timeout = get_table_option(table_key, 'circuit_breaker_timeout')

    if not url:
        url = get_global_option('circuit_breaker_url')
        timeout = get_global_option('circuit_breaker_timeout')

    match = pattern.match(url)
    if not match:
        logger.error('Malformatted URL: {0}'.format(url))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    auth = ()
    if use_basic_auth:
        url = '{scheme}{url}'.format(
            scheme=match.group('scheme'),
            url=match.group('url'))
        auth = (match.group('username'), match.group('password'))

    headers = {}
    if table_name:
        headers["x-table-name"] = table_name
    if gsi_name:
        headers["x-gsi-name"] = gsi_name

    # Make the actual request
    try:
        response = requests.get(
            url,
            auth=auth,
            timeout=timeout / 1000.00,
            headers=headers)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error(
            'Please file a bug at '
            'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True
Exemple #40
0
def ensure_provisioning(
        table_name, key_name,
        num_consec_read_checks,
        num_consec_write_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """

    if get_global_option('circuit_breaker_url') or get_table_option(
            key_name, 'circuit_breaker_url'):
        if circuit_breaker.is_open(table_name, key_name):
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    # Handle throughput alarm checks
    __ensure_provisioning_alarm(table_name, key_name)

    ts = TimeSeriesTable(get_table_option('time_series_tables', 'time_series_tables'),
                         get_table_option('time_series_tables_no_scale_period_in_seconds', 'time_series_tables_no_scale_period_in_seconds'))

    if ts.is_in_future(table_name):
        logger.info('Time series table ' + table_name + " is in the future, skipping provisioning")
        return (0, 0)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                key_name,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                key_name,
                num_consec_write_checks)

        if read_update_needed:
            num_consec_read_checks = 0

        if write_update_needed:
            num_consec_write_checks = 0

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info(
                '{0} - Changing provisioning to {1:d} '
                'read units and {2:d} write units'.format(
                    table_name,
                    int(updated_read_units),
                    int(updated_write_units)))
            __update_throughput(
                table_name,
                key_name,
                updated_read_units,
                updated_write_units)
        else:
            logger.info('{0} - No need to change provisioning'.format(
                table_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
Exemple #41
0
def __circuit_breaker_is_open():
    """ Checks wether the circuit breaker is open

    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(
        r'^(?P<scheme>http(s)?://)'
        r'((?P<username>.+):(?P<password>.+)@){0,1}'
        r'(?P<url>.*)$'
    )
    match = pattern.match(get_global_option('circuit_breaker_url'))

    if not match:
        logger.error('Malformatted URL: {0}'.format(
            get_global_option('circuit_breaker_url')))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    if use_basic_auth:
        url = '{scheme}{url}'.format(
            scheme=match.group('scheme'),
            url=match.group('url'))
        auth = (match.group('username'), match.group('password'))
    else:
        url = get_global_option('circuit_breaker_url')
        auth = ()

    # Make the actual request
    try:
        response = requests.get(
            url,
            auth=auth,
            timeout=get_global_option('circuit_breaker_timeout') / 1000.00)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error(
            'Please file a bug at '
            'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        if get_global_option('show_config'):
            print json.dumps(config.get_configuration(), indent=2)
        elif get_global_option('daemon'):
            daemon = DynamicDynamoDBDaemon(
                '{0}/dynamic-dynamodb.{1}.pid'.format(
                    get_global_option('pid_file_dir'),
                    get_global_option('instance')))

            if get_global_option('daemon') == 'start':
                logger.debug('Starting daemon')
                try:
                    daemon.start()
                    logger.info('Daemon started')
                except IOError as error:
                    logger.error(
                        'Could not create pid file: {0}'.format(error))
                    logger.error('Daemon not started')
            elif get_global_option('daemon') == 'stop':
                logger.debug('Stopping daemon')
                daemon.stop()
                logger.info('Daemon stopped')
                sys.exit(0)

            elif get_global_option('daemon') == 'restart':
                logger.debug('Restarting daemon')
                daemon.restart()
                logger.info('Daemon restarted')

            elif get_global_option('daemon') in ['foreground', 'fg']:
                logger.debug('Starting daemon in foreground')
                daemon.run()
                logger.info('Daemon started in foreground')

            else:
                print(
                    'Valid options for --daemon are start, '
                    'stop, restart, and foreground')
                sys.exit(1)
        else:
            if get_global_option('run_once'):
                execute()
            else:
                while True:
                    execute()

    except Exception as error:
        logger.exception(error)
    except KeyboardInterrupt:
        while threading.active_count() > 1:
            for thread in table_threads.values():
                thread.do_run = False
            logger.info('Waiting for all threads... {}s'.format(
                get_global_option('check_interval')))
            time.sleep(get_global_option('check_interval'))
        raise
Exemple #43
0
def __update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    provisioned_reads = table_stats.get_provisioned_read_units(table_name)
    provisioned_writes = table_stats.get_provisioned_write_units(table_name)

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, get_table_option(
                key_name, 'maintenance_windows'))):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    table_status = dynamodb.get_table_status(table_name)
    logger.debug('{0} - Table status is {1}'.format(table_name, table_status))
    if table_status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is {1}'.format(table_name, table_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if read_units < provisioned_reads and write_units < provisioned_writes:
            logger.debug(
                '{0} - Both reads and writes will be decreased'.format(
                    table_name))
        elif read_units < provisioned_reads:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            return
        elif write_units < provisioned_writes:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            return

    if not get_global_option('dry_run'):
        dynamodb.update_table_provisioning(
            table_name,
            int(read_units),
            int(write_units))
Exemple #44
0
def __update_throughput(
        table_name, table_key, gsi_name, gsi_key, read_units, write_units):
    """ Update throughput on the GSI

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type table_key: str
    :param table_key: Table configuration option key name
    :type gsi_name: str
    :param gsi_name: Name of the GSI
    :type gsi_key: str
    :param gsi_key: Configuration option key name
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    """
    current_ru = gsi_stats.get_provisioned_read_units(
        table_name, gsi_name)
    current_wu = gsi_stats.get_provisioned_write_units(
        table_name, gsi_name)

    # Check that we are in the right time frame
    if get_gsi_option(table_key, gsi_key, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, gsi_name, get_gsi_option(
                table_key, gsi_key, 'maintenance_windows'))):

            logger.warning(
                '{0} - GSI: {1} - '
                'Current time is outside maintenance window'.format(
                    table_name,
                    gsi_name))
            return
        else:
            logger.info(
                '{0} - GSI: {1} - '
                'Current time is within maintenance window'.format(
                    table_name,
                    gsi_name))

    # Check table status
    gsi_status = dynamodb.get_gsi_status(table_name, gsi_name)
    logger.debug('{0} - GSI: {1} - GSI status is {2}'.format(
        table_name, gsi_name, gsi_status))
    if gsi_status != 'ACTIVE':
        logger.warning(
            '{0} - GSI: {1} - Not performing throughput changes when GSI '
            'status is {2}'.format(table_name, gsi_name, gsi_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_gsi_option(table_key, gsi_key, 'always_decrease_rw_together'):
        if read_units < current_ru and write_units < current_wu:
            logger.debug(
                '{0} - GSI: {1} - '
                'Both reads and writes will be decreased'.format(
                    table_name,
                    gsi_name))
        elif read_units < current_ru:
            logger.info(
                '{0} - GSI: {1} - '
                'Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(
                    table_name, gsi_name))
            return
        elif write_units < current_wu:
            logger.info(
                '{0} - GSI: {1} - '
                'Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(
                    table_name, gsi_name))
            return

    if not get_global_option('dry_run'):
        dynamodb.update_gsi_provisioning(
            table_name,
            gsi_name,
            int(read_units),
            int(write_units))
        logger.info(
            '{0} - GSI: {1} - '
            'Provisioning updated to {2} reads and {3} writes'.format(
                table_name,
                gsi_name,
                read_units,
                write_units))
Exemple #45
0
def update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info("{0} - No need to scale up reads nor writes".format(table_name))
            return

        logger.info(
            "{0} - Retrying to update provisioning, excluding any decreases. "
            "Setting new reads to {1} and new writes to {2}".format(table_name, reads, writes)
        )

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, "maintenance_windows")
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                "{0} - We are outside a maintenace window. " "Will only perform up scaling activites".format(table_name)
            )

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info("{0} - No need to scale up reads nor writes".format(table_name))
                return

        else:
            logger.info("{0} - Current time is within maintenance window".format(table_name))

    logger.info("{0} - Updating provisioning to {1} reads and {2} writes".format(table_name, reads, writes))

    # Return if dry-run
    if get_global_option("dry_run"):
        return

    try:
        table.update(throughput={"read": reads, "write": writes})

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append("scale-down")
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append("scale-up")

        message = ("{0} - Provisioning updated to {1} reads and {2} writes").format(table_name, reads, writes)

        sns.publish_table_notification(
            key_name, message, sns_message_types, subject="Updated provisioning for table {0}".format(table_name)
        )
    except JSONResponseError as error:
        exception = error.body["__type"].split("#")[1]
        know_exceptions = ["LimitExceededException", "ValidationException", "ResourceInUseException"]

        if exception in know_exceptions:
            logger.warning("{0} - {1}: {2}".format(table_name, exception, error.body["message"]))
        else:
            logger.error(
                (
                    "{0} - Unhandled exception: {1}: {2}. "
                    "Please file a bug report at "
                    "https://github.com/sebdah/dynamic-dynamodb/issues"
                ).format(table_name, exception, error.body["message"])
            )

        if not retry_with_only_increase and exception == "LimitExceededException":
            logger.info("{0} - Will retry to update provisioning " "with only increases".format(table_name))
            update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=True)
Exemple #46
0
def __update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    try:
        current_ru = dynamodb.get_provisioned_table_read_units(table_name)
        current_wu = dynamodb.get_provisioned_table_write_units(table_name)
    except JSONResponseError:
        raise

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, get_table_option(
                key_name, 'maintenance_windows'))):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    try:
        table_status = dynamodb.get_table_status(table_name)
    except JSONResponseError:
        raise
    logger.debug('{0} - Table status is {1}'.format(table_name, table_status))
    if table_status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is {1}'.format(table_name, table_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        read_units, write_units = __calculate_always_decrease_rw_values(
            table_name,
            read_units,
            current_ru,
            write_units,
            current_wu)

        if read_units == current_ru and write_units == current_wu:
            logger.info('{0} - No changes to perform'.format(table_name))
            return

    if not get_global_option('dry_run'):
        dynamodb.update_table_provisioning(
            table_name,
            key_name,
            int(read_units),
            int(write_units))
            'filename': log_file,
            'when': 'midnight',
            'backupCount': 5
        }
        LOG_CONFIG['loggers']['']['handlers'].append('file')
        LOG_CONFIG['loggers']['dynamic-dynamodb']['handlers'].append('file')

    # Configure a custom log level
    if config_handler.get_logging_option('log_level'):
        LOG_CONFIG['handlers']['console']['level'] = \
            config_handler.get_logging_option('log_level').upper()
        if 'file' in LOG_CONFIG['handlers']:
            LOG_CONFIG['handlers']['file']['level'] = \
                config_handler.get_logging_option('log_level').upper()

    # Add dry-run to the formatter if in dry-run mode
    if config_handler.get_global_option('dry_run'):
        LOG_CONFIG['handlers']['console']['formatter'] = 'dry-run'
        if 'file' in LOG_CONFIG['handlers']:
            LOG_CONFIG['handlers']['file']['formatter'] = 'dry-run'

    try:
        dictconfig.dictConfig(LOG_CONFIG)
    except ValueError as error:
        print('Error configuring logger: {0}'.format(error))
        sys.exit(1)
    except:
        raise

LOGGER = logging.getLogger('dynamic-dynamodb')
Exemple #48
0
def __circuit_breaker_is_open():
    """ Checks wether the circuit breaker is open

    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(
        r'^(?P<scheme>http(s)?://)'
        r'((?P<username>.+):(?P<password>.+)@){0,1}'
        r'(?P<url>.*)$'
    )
    match = pattern.match(get_global_option('circuit_breaker_url'))

    if not match:
        logger.error('Malformatted URL: {0}'.format(
            get_global_option('circuit_breaker_url')))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    if use_basic_auth:
        url = '{scheme}{url}'.format(
            scheme=match.group('scheme'),
            url=match.group('url'))
        auth = (match.group('username'), match.group('password'))
    else:
        url = get_global_option('circuit_breaker_url')
        auth = ()

    # Make the actual request
    try:
        response = requests.get(
            url,
            auth=auth,
            timeout=get_global_option('circuit_breaker_timeout') / 1000.00)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error(
            'Please file a bug at '
            'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True