예제 #1
0
def increase_writes_in_units(table_name, current_provisioning, units, key_name):
    """ Increase the current_provisioning with units units

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type units: int
    :param units: How many units should we increase with
    :returns: int -- New provisioning value
    :type key_name: str
    :param key_name: Name of the key
    """
    updated_provisioning = int(current_provisioning) + int(units)
    logger.debug(
        'Write provisioning will be increased to {0:d} units'.format(
            updated_provisioning))

    if get_table_option(key_name, 'max_provisioned_writes') > 0:
        if (updated_provisioning >
            get_table_option(key_name, 'max_provisioned_writes')):

            logger.info('Reached provisioned writes max limit: {0:d}'.format(
                int(get_table_option(key_name, 'max_provisioned_writes'))))

            return get_table_option(key_name, 'max_provisioned_writes')

    return updated_provisioning
예제 #2
0
def get_min_provisioned_reads(current_provisioning, table_name, key_name):
    """ Returns the minimum provisioned reads

    If the min_provisioned_reads value is less than current_provisioning * 2,
    then we return current_provisioning * 2, as DynamoDB cannot be scaled up
    with more than 100%.

    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Name of the key
    :returns: int -- Minimum provisioned reads
    """
    min_provisioned_reads = 1

    if get_table_option(key_name, 'min_provisioned_reads'):
        min_provisioned_reads = int(
            get_table_option(key_name, 'min_provisioned_reads'))

        if min_provisioned_reads > int(current_provisioning * 2):
            min_provisioned_reads = int(current_provisioning * 2)
            logger.debug(
                '{0} - '
                'Cannot reach min_provisioned_reads as max scale up '
                'is 100% of current provisioning'.format(
                    table_name))

    logger.debug(
        '{0} - '
        'Setting min provisioned reads to {1}'.format(
            table_name, min_provisioned_reads))

    return min_provisioned_reads
예제 #3
0
def publish_table_notification(table_key, message, message_types, subject=None):
    """ Publish a notification for a specific table

    :type table_key: str
    :param table_key: Table configuration option key name
    :type message: str
    :param message: Message to send via SNS
    :type message_types: list
    :param message_types:
        List with types:
        - scale-up
        - scale-down
        - high-throughput-alarm
        - low-throughput-alarm
    :type subject: str
    :param subject: Subject to use for e-mail notifications
    :returns: None
    """
    topic = get_table_option(table_key, 'sns_topic_arn')
    if not topic:
        return

    for message_type in message_types:
        if message_type in get_table_option(table_key, 'sns_message_types'):
            __publish(topic, message, subject)
            return
예제 #4
0
def increase_reads_in_percent(table_name, current_provisioning, percent, key_name):
    """ Increase the current_provisioning with percent %

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type percent: int
    :param percent: How many percent should we increase with
    :returns: int -- New provisioning value
    :type key_name: str
    :param key_name: Name of the key
    """
    increase = int(float(current_provisioning)*(float(percent)/100))
    updated_provisioning = current_provisioning + increase
    logger.debug(
        'Read provisioning will be increased to {0:d} units'.format(
            updated_provisioning))

    if get_table_option(key_name, 'max_provisioned_reads') > 0:
        if (updated_provisioning >
            get_table_option(key_name, 'max_provisioned_reads')):

            logger.info('Reached provisioned reads max limit: {0:d}'.format(
                int(get_table_option(key_name, 'max_provisioned_reads'))))

            return get_table_option(key_name, 'max_provisioned_reads')

    return updated_provisioning
예제 #5
0
def increase_reads_in_percent(table_name, current_provisioning, percent,
                              key_name):
    """ Increase the current_provisioning with percent %

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type percent: int
    :param percent: How many percent should we increase with
    :returns: int -- New provisioning value
    :type key_name: str
    :param key_name: Name of the key
    """
    increase = int(float(current_provisioning) * (float(percent) / 100))
    updated_provisioning = current_provisioning + increase
    logger.debug('Read provisioning will be increased to {0:d} units'.format(
        updated_provisioning))

    if get_table_option(key_name, 'max_provisioned_reads') > 0:
        if (updated_provisioning > get_table_option(key_name,
                                                    'max_provisioned_reads')):

            logger.info('Reached provisioned reads max limit: {0:d}'.format(
                int(get_table_option(key_name, 'max_provisioned_reads'))))

            return get_table_option(key_name, 'max_provisioned_reads')

    return updated_provisioning
예제 #6
0
def increase_writes_in_units(table_name, current_provisioning, units,
                             key_name):
    """ Increase the current_provisioning with units units

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type units: int
    :param units: How many units should we increase with
    :returns: int -- New provisioning value
    :type key_name: str
    :param key_name: Name of the key
    """
    updated_provisioning = int(current_provisioning) + int(units)
    logger.debug('Write provisioning will be increased to {0:d} units'.format(
        updated_provisioning))

    if get_table_option(key_name, 'max_provisioned_writes') > 0:
        if (updated_provisioning > get_table_option(key_name,
                                                    'max_provisioned_writes')):

            logger.info('Reached provisioned writes max limit: {0:d}'.format(
                int(get_table_option(key_name, 'max_provisioned_writes'))))

            return get_table_option(key_name, 'max_provisioned_writes')

    return updated_provisioning
예제 #7
0
def publish_table_notification(table_key,
                               message,
                               message_types,
                               subject=None):
    """ Publish a notification for a specific table

    :type table_key: str
    :param table_key: Table configuration option key name
    :type message: str
    :param message: Message to send via SNS
    :type message_types: list
    :param message_types:
        List with types:
        - scale-up
        - scale-down
        - high-throughput-alarm
        - low-throughput-alarm
    :type subject: str
    :param subject: Subject to use for e-mail notifications
    :returns: None
    """
    topic = get_table_option(table_key, 'sns_topic_arn')
    if not topic:
        return

    for message_type in message_types:
        if message_type in get_table_option(table_key, 'sns_message_types'):
            __publish(topic, message, subject)
            return
예제 #8
0
def __ensure_provisioning_reads(table_name, key_name):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :returns: (bool, int) -- update_needed, updated_read_units
    """
    update_needed = False
    updated_read_units = statistics.get_provisioned_read_units(table_name)

    consumed_read_units_percent = \
        statistics.get_consumed_read_units_percent(table_name)

    if (consumed_read_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_reads_on_0_percent')):

        logger.info(
            '{0} - Scaling down reads is not done when usage is at 0%'.format(
                table_name))

    elif (consumed_read_units_percent >=
            get_table_option(key_name, 'reads_upper_threshold')):

        if get_table_option(key_name, 'increase_reads_unit') == 'percent':
            updated_provisioning = calculators.increase_reads_in_percent(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name)
        else:
            updated_provisioning = calculators.increase_reads_in_units(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    elif (consumed_read_units_percent <=
            get_table_option(key_name, 'reads_lower_threshold')):

        if get_table_option(key_name, 'decrease_reads_unit') == 'percent':
            updated_provisioning = calculators.decrease_reads_in_percent(
                updated_read_units,
                get_table_option(key_name, 'decrease_reads_with'),
                key_name)
        else:
            updated_provisioning = calculators.decrease_reads_in_units(
                updated_read_units,
                get_table_option(key_name, 'decrease_reads_with'),
                key_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    return update_needed, int(updated_read_units)
def __ensure_provisioning_writes(table_name):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :returns: (bool, int) -- update_needed, updated_write_units
    """
    update_needed = False
    updated_write_units = statistics.get_provisioned_write_units(table_name)

    consumed_write_units_percent = \
        statistics.get_consumed_write_units_percent(table_name)

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not
        get_table_option(table_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    elif (consumed_write_units_percent >=
        get_table_option(table_name, 'writes_upper_threshold')):

        if get_table_option(table_name, 'increase_writes_unit') == 'percent':
            updated_provisioning = calculators.increase_writes_in_percent(
                table_name,
                updated_write_units,
                get_table_option(table_name, 'increase_writes_with'))
        else:
            updated_provisioning = calculators.increase_writes_in_units(
                table_name,
                updated_write_units,
                get_table_option(table_name, 'increase_writes_with'))

        update_needed = True
        updated_write_units = updated_provisioning

    elif (consumed_write_units_percent <=
        get_table_option(table_name, 'writes_lower_threshold')):

        if get_table_option(table_name, 'decrease_writes_unit') == 'percent':
            updated_provisioning = calculators.decrease_writes_in_percent(
                table_name,
                updated_write_units,
                get_table_option(table_name, 'decrease_writes_with'))
        else:
            updated_provisioning = calculators.decrease_writes_in_units(
                table_name,
                updated_write_units,
                get_table_option(table_name, 'decrease_writes_with'))

        update_needed = True
        updated_write_units = updated_provisioning

    return update_needed, int(updated_write_units)
예제 #10
0
def get_min_provisioned_reads(table_name, current_provisioning):
    """ Returns the minimum provisioned reads

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :returns: int -- Minimum provisioned reads
    """
    if get_table_option(table_name, 'min_provisioned_reads'):
        return int(min(
            get_table_option(table_name, 'min_provisioned_reads'),
            (current_provisioning * 2)))

    return int(current_provisioning * 2)
예제 #11
0
def get_min_provisioned_reads(current_provisioning, key_name):
    """ Returns the minimum provisioned reads

    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type key_name: str
    :param key_name: Name of the key
    :returns: int -- Minimum provisioned reads
    """
    if get_table_option(key_name, 'min_provisioned_reads'):
        return int(min(
            get_table_option(key_name, 'min_provisioned_reads'),
            (current_provisioning * 2)))

    return int(current_provisioning * 2)
예제 #12
0
def ensure_provisioning(table_name, key_name, num_consec_read_checks,
                        num_consec_write_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """

    if get_global_option('circuit_breaker_url') or get_table_option(
            key_name, 'circuit_breaker_url'):
        if circuit_breaker.is_open(table_name, key_name):
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    # Handle throughput alarm checks
    __ensure_provisioning_alarm(table_name, key_name)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                key_name,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                key_name,
                num_consec_write_checks)

        if read_update_needed:
            num_consec_read_checks = 0

        if write_update_needed:
            num_consec_write_checks = 0

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info('{0} - Changing provisioning to {1:d} '
                        'read units and {2:d} write units'.format(
                            table_name, int(updated_read_units),
                            int(updated_write_units)))
            __update_throughput(table_name, key_name, updated_read_units,
                                updated_write_units)
        else:
            logger.info(
                '{0} - No need to change provisioning'.format(table_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
예제 #13
0
def get_min_provisioned_reads(table_name, current_provisioning, key_name):
    """ Returns the minimum provisioned reads

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type key_name: str
    :param key_name: Name of the key
    :returns: int -- Minimum provisioned reads
    """
    if get_table_option(key_name, 'min_provisioned_reads'):
        return int(
            min(get_table_option(key_name, 'min_provisioned_reads'),
                (current_provisioning * 2)))

    return int(current_provisioning * 2)
예제 #14
0
def get_min_provisioned_writes(table_name, current_provisioning, key_name):
    """ Returns the minimum provisioned writes

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :returns: int -- Minimum provisioned writes
    :type key_name: str
    :param key_name: Name of the key
    """
    if get_table_option(key_name, 'min_provisioned_writes'):
        return int(min(
            get_table_option(key_name, 'min_provisioned_writes'),
            (current_provisioning * 2)))

    return int(current_provisioning * 2)
예제 #15
0
def publish_table_notification(table_key, message, message_types, subject=None):
    """ Publish a notification for a specific table

    :type table_key: str
    :param table_key: Table configuration option key name
    :type message: str
    :param message: Message to send via SNS
    :type message_types: list
    :param message_types: List with types: scale-up, scale-down, error-message
    :type subject: str
    :param subject: Subject to use for e-mail notifications
    :returns: None
    """
    topic = get_table_option(table_key, "sns_topic_arn")
    if not topic:
        return

    for message_type in message_types:
        if message_type in get_table_option(table_key, "sns_message_types"):
            __publish(topic, message, subject)
            return
예제 #16
0
def increase_reads_in_units(current_provisioning, units, key_name, table_name):
    """ Increase the current_provisioning with units units

    :type current_provisioning: int
    :param current_provisioning: The current provisioning
    :type units: int
    :param units: How many units should we increase with
    :returns: int -- New provisioning value
    :type key_name: str
    :param key_name: Name of the key
    :type table_name: str
    :param table_name: Name of the DynamoDB table
    """
    updated_provisioning = 0

    if int(units) > int(current_provisioning):
        updated_provisioning = 2 * int(current_provisioning)
    else:
        updated_provisioning = int(current_provisioning) + int(units)

    if get_table_option(key_name, 'max_provisioned_reads') > 0:
        if (updated_provisioning >
                get_table_option(key_name, 'max_provisioned_reads')):

            logger.info(
                '{0} - Reached provisioned reads max limit: {1:d}'.format(
                    table_name,
                    int(get_table_option(key_name, 'max_provisioned_reads'))))

            return get_table_option(key_name, 'max_provisioned_reads')

    logger.debug(
        '{0} - Read provisioning will be increased to {1:d} units'.format(
            table_name,
            updated_provisioning))

    return updated_provisioning
예제 #17
0
def __update_throughput(table_name, key_name, read_units, write_units):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    """
    try:
        current_ru = dynamodb.get_provisioned_table_read_units(table_name)
        current_wu = dynamodb.get_provisioned_table_write_units(table_name)
    except JSONResponseError:
        raise

    # Check table status
    try:
        table_status = dynamodb.get_table_status(table_name)
    except JSONResponseError:
        raise
    logger.debug('{0} - Table status is {1}'.format(table_name, table_status))
    if table_status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is {1}'.format(table_name, table_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        read_units, write_units = __calculate_always_decrease_rw_values(
            table_name,
            read_units,
            current_ru,
            write_units,
            current_wu)

        if read_units == current_ru and write_units == current_wu:
            logger.info('{0} - No changes to perform'.format(table_name))
            return

    dynamodb.update_table_provisioning(
        table_name,
        key_name,
        int(read_units),
        int(write_units))
예제 #18
0
def __update_throughput(table_name, key_name, read_units, write_units):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    """
    try:
        current_ru = dynamodb.get_provisioned_table_read_units(table_name)
        current_wu = dynamodb.get_provisioned_table_write_units(table_name)
    except JSONResponseError:
        raise

    # Check table status
    try:
        table_status = dynamodb.get_table_status(table_name)
    except JSONResponseError:
        raise
    logger.debug('{0} - Table status is {1}'.format(table_name, table_status))
    if table_status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is {1}'.format(table_name, table_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        read_units, write_units = __calculate_always_decrease_rw_values(
            table_name,
            read_units,
            current_ru,
            write_units,
            current_wu)

        if read_units == current_ru and write_units == current_wu:
            logger.info('{0} - No changes to perform'.format(table_name))
            return

    dynamodb.update_table_provisioning(
        table_name,
        key_name,
        int(read_units),
        int(write_units))
예제 #19
0
def __ensure_provisioning_alarm(table_name, key_name):
    """ Ensure that provisioning alarm threshold is not exceeded

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    """
    lookback_window_start = get_table_option(
        key_name, 'lookback_window_start')
    lookback_period = get_table_option(key_name, 'lookback_period')

    consumed_read_units_percent = table_stats.get_consumed_read_units_percent(
        table_name, lookback_window_start, lookback_period)
    consumed_write_units_percent = table_stats.get_consumed_write_units_percent(
        table_name, lookback_window_start, lookback_period)

    reads_upper_alarm_threshold = \
        get_table_option(key_name, 'reads-upper-alarm-threshold')
    reads_lower_alarm_threshold = \
        get_table_option(key_name, 'reads-lower-alarm-threshold')
    writes_upper_alarm_threshold = \
        get_table_option(key_name, 'writes-upper-alarm-threshold')
    writes_lower_alarm_threshold = \
        get_table_option(key_name, 'writes-lower-alarm-threshold')

    # Check upper alarm thresholds
    upper_alert_triggered = False
    upper_alert_message = []
    if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent:
        upper_alert_triggered = True
        upper_alert_message.append(
            '{0} - Consumed Read Capacity {1:f}% '
            'was greater than or equal to the upper '
            'alarm threshold {2:f}%\n'.format(
                table_name,
                consumed_read_units_percent,
                reads_upper_alarm_threshold))

    if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent:
        upper_alert_triggered = True
        upper_alert_message.append(
            '{0} - Consumed Write Capacity {1:f}% '
            'was greater than or equal to the upper alarm '
            'threshold {2:f}%\n'.format(
                table_name,
                consumed_write_units_percent,
                writes_upper_alarm_threshold))

    # Check lower alarm thresholds
    lower_alert_triggered = False
    lower_alert_message = []
    if (reads_lower_alarm_threshold > 0 and
            consumed_read_units_percent < reads_lower_alarm_threshold):
        lower_alert_triggered = True
        lower_alert_message.append(
            '{0} - Consumed Read Capacity {1:f}% '
            'was below the lower alarm threshold {2:f}%\n'.format(
                table_name,
                consumed_read_units_percent,
                reads_lower_alarm_threshold))

    if (writes_lower_alarm_threshold > 0 and
            consumed_write_units_percent < writes_lower_alarm_threshold):
        lower_alert_triggered = True
        lower_alert_message.append(
            '{0} - Consumed Write Capacity {1:f}% '
            'was below the lower alarm threshold {2:f}%\n'.format(
                table_name,
                consumed_write_units_percent,
                writes_lower_alarm_threshold))

    # Send alert if needed
    if upper_alert_triggered:
        logger.info(
            '{0} - Will send high provisioning alert'.format(table_name))
        sns.publish_table_notification(
            key_name,
            ''.join(upper_alert_message),
            ['high-throughput-alarm'],
            subject='ALARM: High Throughput for Table {0}'.format(table_name))
    elif lower_alert_triggered:
        logger.info(
            '{0} - Will send low provisioning alert'.format(table_name))
        sns.publish_table_notification(
            key_name,
            ''.join(lower_alert_message),
            ['low-throughput-alarm'],
            subject='ALARM: Low Throughput for Table {0}'.format(table_name))
    else:
        logger.debug('{0} - Throughput alarm thresholds not crossed'.format(
            table_name))
예제 #20
0
def __ensure_provisioning_writes(table_name, key_name):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :returns: (bool, int) -- update_needed, updated_write_units
    """
    update_needed = False
    updated_write_units = statistics.get_provisioned_write_units(table_name)

    consumed_write_units_percent = \
        statistics.get_consumed_write_units_percent(table_name)

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not
        get_table_option(key_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    elif (consumed_write_units_percent >=
        get_table_option(key_name, 'writes_upper_threshold')):

        if get_table_option(key_name, 'increase_writes_unit') == 'percent':
            updated_provisioning = calculators.increase_writes_in_percent(
                table_name,
                updated_write_units,
                get_table_option(key_name, 'increase_writes_with'),
                key_name)
        else:
            updated_provisioning = calculators.increase_writes_in_units(
                table_name,
                updated_write_units,
                get_table_option(key_name, 'increase_writes_with'),
                key_name)

        update_needed = True
        updated_write_units = updated_provisioning

    elif (consumed_write_units_percent <=
        get_table_option(key_name, 'writes_lower_threshold')):

        if get_table_option(key_name, 'decrease_writes_unit') == 'percent':
            updated_provisioning = calculators.decrease_writes_in_percent(
                table_name,
                updated_write_units,
                get_table_option(key_name, 'decrease_writes_with'),
                key_name)
        else:
            updated_provisioning = calculators.decrease_writes_in_units(
                table_name,
                updated_write_units,
                get_table_option(key_name, 'decrease_writes_with'),
                key_name)

        update_needed = True
        updated_write_units = updated_provisioning

    return update_needed, int(updated_write_units)
예제 #21
0
def __ensure_provisioning_alarm(table_name, key_name):
    """ Ensure that provisioning alarm threshold is not exceeded

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    """
    lookback_window_start = get_table_option(key_name, 'lookback_window_start')
    lookback_period = get_table_option(key_name, 'lookback_period')

    consumed_read_units_percent = table_stats.get_consumed_read_units_percent(
        table_name, lookback_window_start, lookback_period)
    consumed_write_units_percent = table_stats.get_consumed_write_units_percent(
        table_name, lookback_window_start, lookback_period)

    reads_upper_alarm_threshold = \
        get_table_option(key_name, 'reads-upper-alarm-threshold')
    reads_lower_alarm_threshold = \
        get_table_option(key_name, 'reads-lower-alarm-threshold')
    writes_upper_alarm_threshold = \
        get_table_option(key_name, 'writes-upper-alarm-threshold')
    writes_lower_alarm_threshold = \
        get_table_option(key_name, 'writes-lower-alarm-threshold')

    # Check upper alarm thresholds
    upper_alert_triggered = False
    upper_alert_message = []
    if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent:
        upper_alert_triggered = True
        upper_alert_message.append('{0} - Consumed Read Capacity {1:f}% '
                                   'was greater than or equal to the upper '
                                   'alarm threshold {2:f}%\n'.format(
                                       table_name, consumed_read_units_percent,
                                       reads_upper_alarm_threshold))

    if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent:
        upper_alert_triggered = True
        upper_alert_message.append(
            '{0} - Consumed Write Capacity {1:f}% '
            'was greater than or equal to the upper alarm '
            'threshold {2:f}%\n'.format(table_name,
                                        consumed_write_units_percent,
                                        writes_upper_alarm_threshold))

    # Check lower alarm thresholds
    lower_alert_triggered = False
    lower_alert_message = []
    if (reads_lower_alarm_threshold > 0
            and consumed_read_units_percent < reads_lower_alarm_threshold):
        lower_alert_triggered = True
        lower_alert_message.append(
            '{0} - Consumed Read Capacity {1:f}% '
            'was below the lower alarm threshold {2:f}%\n'.format(
                table_name, consumed_read_units_percent,
                reads_lower_alarm_threshold))

    if (writes_lower_alarm_threshold > 0
            and consumed_write_units_percent < writes_lower_alarm_threshold):
        lower_alert_triggered = True
        lower_alert_message.append(
            '{0} - Consumed Write Capacity {1:f}% '
            'was below the lower alarm threshold {2:f}%\n'.format(
                table_name, consumed_write_units_percent,
                writes_lower_alarm_threshold))

    # Send alert if needed
    if upper_alert_triggered:
        logger.info(
            '{0} - Will send high provisioning alert'.format(table_name))
        sns.publish_table_notification(
            key_name,
            ''.join(upper_alert_message), ['high-throughput-alarm'],
            subject='ALARM: High Throughput for Table {0}'.format(table_name))
    elif lower_alert_triggered:
        logger.info(
            '{0} - Will send low provisioning alert'.format(table_name))
        sns.publish_table_notification(
            key_name,
            ''.join(lower_alert_message), ['low-throughput-alarm'],
            subject='ALARM: Low Throughput for Table {0}'.format(table_name))
    else:
        logger.debug(
            '{0} - Throughput alarm thresholds not crossed'.format(table_name))
예제 #22
0
    def run(self, check_interval=1):
        """ Run the daemon

        :type check_interval: int
        :param check_interval: Delay in seconds between checks
        """
        try:
            while True:
                # Ensure provisioning
                for table_name, table_key in \
                        sorted(dynamodb.get_tables_and_gsis()):
                    try:
                        table.ensure_provisioning(table_name, table_key)

                        gsi_names = set()
                        # Add regexp table names
                        for gst_instance in dynamodb.table_gsis(table_name):
                            gsi_name = gst_instance[u'IndexName']

                            try:
                                gsi_keys = get_table_option(
                                    table_key, 'gsis').keys()
                            except AttributeError:
                                # Continue if there are not GSIs configured
                                continue

                            for gsi_key in gsi_keys:
                                try:
                                    if re.match(gsi_key, gsi_name):
                                        logger.debug(
                                            'Table {0} GSI {1} match with '
                                            'GSI config key {2}'.format(
                                                table_name, gsi_name, gsi_key))
                                        gsi_names.add(
                                            (
                                                gsi_name,
                                                gsi_key
                                            ))
                                except re.error:
                                    logger.error(
                                        'Invalid regular expression: '
                                        '"{0}"'.format(gsi_key))
                                    sys.exit(1)

                        gsi_names = sorted(gsi_names)

                        for gsi_name, gsi_key in gsi_names:
                            gsi.ensure_provisioning(
                                table_name,
                                table_key,
                                gsi_name,
                                gsi_key)
                    except JSONResponseError as error:
                        exception = error.body['__type'].split('#')[1]
                        if exception == 'ResourceNotFoundException':
                            logger.error(
                                '{0} - Table {1} does not exist anymore'.format(
                                    table_name, table_name))
                            continue

                # Sleep between the checks
                logger.debug('Sleeping {0} seconds until next check'.format(
                    check_interval))
                time.sleep(check_interval)
        except Exception as error:
            logger.exception(error)
예제 #23
0
def __ensure_provisioning_writes(table_name, key_name):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :returns: (bool, int) -- update_needed, updated_write_units
    """
    update_needed = False
    updated_write_units = table_stats.get_provisioned_write_units(table_name)

    consumed_write_units_percent = \
        table_stats.get_consumed_write_units_percent(table_name)

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    elif (consumed_write_units_percent >=
            get_table_option(key_name, 'writes_upper_threshold')):

        if get_table_option(key_name, 'increase_writes_unit') == 'percent':
            updated_provisioning = calculators.increase_writes_in_percent(
                updated_write_units,
                get_table_option(key_name, 'increase_writes_with'),
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.increase_writes_in_units(
                updated_write_units,
                get_table_option(key_name, 'increase_writes_with'),
                key_name,
                table_name)

        if updated_write_units != updated_provisioning:
            update_needed = True
            updated_write_units = updated_provisioning

    elif (consumed_write_units_percent <=
            get_table_option(key_name, 'writes_lower_threshold')):

        if get_table_option(key_name, 'decrease_writes_unit') == 'percent':
            updated_provisioning = calculators.decrease_writes_in_percent(
                updated_write_units,
                get_table_option(key_name, 'decrease_writes_with'),
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.decrease_writes_in_units(
                updated_write_units,
                get_table_option(key_name, 'decrease_writes_with'),
                key_name,
                table_name)

        if updated_write_units != updated_provisioning:
            update_needed = True
            updated_write_units = updated_provisioning

    if get_table_option(key_name, 'max_provisioned_writes'):
        if (int(updated_write_units) >
                int(get_table_option(key_name, 'max_provisioned_writes'))):
            update_needed = True
            updated_write_units = int(
                get_table_option(key_name, 'max_provisioned_writes'))
            logger.info(
                'Will not increase writes over max-provisioned-writes '
                'limit ({0} writes)'.format(updated_write_units))

    return update_needed, int(updated_write_units)
예제 #24
0
def update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info("{0} - No need to scale up reads nor writes".format(table_name))
            return

        logger.info(
            "{0} - Retrying to update provisioning, excluding any decreases. "
            "Setting new reads to {1} and new writes to {2}".format(table_name, reads, writes)
        )

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, "maintenance_windows")
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                "{0} - We are outside a maintenace window. " "Will only perform up scaling activites".format(table_name)
            )

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info("{0} - No need to scale up reads nor writes".format(table_name))
                return

        else:
            logger.info("{0} - Current time is within maintenance window".format(table_name))

    logger.info("{0} - Updating provisioning to {1} reads and {2} writes".format(table_name, reads, writes))

    # Return if dry-run
    if get_global_option("dry_run"):
        return

    try:
        table.update(throughput={"read": reads, "write": writes})

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append("scale-down")
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append("scale-up")

        message = ("{0} - Provisioning updated to {1} reads and {2} writes").format(table_name, reads, writes)

        sns.publish_table_notification(
            key_name, message, sns_message_types, subject="Updated provisioning for table {0}".format(table_name)
        )
    except JSONResponseError as error:
        exception = error.body["__type"].split("#")[1]
        know_exceptions = ["LimitExceededException", "ValidationException", "ResourceInUseException"]

        if exception in know_exceptions:
            logger.warning("{0} - {1}: {2}".format(table_name, exception, error.body["message"]))
        else:
            logger.error(
                (
                    "{0} - Unhandled exception: {1}: {2}. "
                    "Please file a bug report at "
                    "https://github.com/sebdah/dynamic-dynamodb/issues"
                ).format(table_name, exception, error.body["message"])
            )

        if not retry_with_only_increase and exception == "LimitExceededException":
            logger.info("{0} - Will retry to update provisioning " "with only increases".format(table_name))
            update_table_provisioning(table_name, key_name, reads, writes, retry_with_only_increase=True)
예제 #25
0
def update_table_provisioning(table_name,
                              key_name,
                              reads,
                              writes,
                              retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        logger.info(
            '{0} - Retrying to update provisioning, excluding any decreases. '
            'Setting new reads to {1} and new writes to {2}'.format(
                table_name, reads, writes))

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, 'maintenance_windows')
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                '{0} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(table_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info(
                    '{0} - No need to scale up reads nor writes'.format(
                        table_name))
                return

        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    logger.info(
        '{0} - Updating provisioning to {1} reads and {2} writes'.format(
            table_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        table.update(throughput={'read': reads, 'write': writes})

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > current_writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < current_writes:
            sns_message_types.append('scale-up')

        message = (
            '{0} - Provisioning updated to {1} reads and {2} writes').format(
                table_name, reads, writes)

        sns.publish_table_notification(
            key_name,
            message,
            sns_message_types,
            subject='Updated provisioning for table {0}'.format(table_name))
    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = [
            'LimitExceededException', 'ValidationException',
            'ResourceInUseException'
        ]

        if exception in know_exceptions:
            logger.warning('{0} - {1}: {2}'.format(table_name, exception,
                                                   error.body['message']))
        else:
            logger.error(
                ('{0} - Unhandled exception: {1}: {2}. '
                 'Please file a bug report at '
                 'https://github.com/sebdah/dynamic-dynamodb/issues').format(
                     table_name, exception, error.body['message']))

        if (not retry_with_only_increase
                and exception == 'LimitExceededException'):
            logger.info('{0} - Will retry to update provisioning '
                        'with only increases'.format(table_name))
            update_table_provisioning(table_name,
                                      key_name,
                                      reads,
                                      writes,
                                      retry_with_only_increase=True)
예제 #26
0
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None):
    """ Checks whether the circuit breaker is open

    :param table_name: Name of the table being checked
    :param table_key: Configuration key for table
    :param gsi_name: Name of the GSI being checked
    :param gsi_key: Configuration key for the GSI
    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(r'^(?P<scheme>http(s)?://)'
                         r'((?P<username>.+):(?P<password>.+)@){0,1}'
                         r'(?P<url>.*)$')

    url = timeout = None
    if gsi_name:
        url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url')
        timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout')
    elif table_name:
        url = get_table_option(table_key, 'circuit_breaker_url')
        timeout = get_table_option(table_key, 'circuit_breaker_timeout')

    if not url:
        url = get_global_option('circuit_breaker_url')
        timeout = get_global_option('circuit_breaker_timeout')

    match = pattern.match(url)
    if not match:
        logger.error('Malformatted URL: {0}'.format(url))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    auth = ()
    if use_basic_auth:
        url = '{scheme}{url}'.format(scheme=match.group('scheme'),
                                     url=match.group('url'))
        auth = (match.group('username'), match.group('password'))

    headers = {}
    if table_name:
        headers["x-table-name"] = table_name
    if gsi_name:
        headers["x-gsi-name"] = gsi_name

    # Make the actual request
    try:
        response = requests.get(url,
                                auth=auth,
                                timeout=timeout / 1000.00,
                                headers=headers)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error('Please file a bug at '
                     'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True
예제 #27
0
def __ensure_provisioning_writes(
        table_name, key_name, num_consec_write_checks):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_write_units, num_consec_write_checks
    """
    if not get_table_option(key_name, 'enable_writes_autoscaling'):
        logger.info(
            '{0} - Autoscaling of reads has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_write_units(table_name), 0

    update_needed = False
    try:
        current_write_units = dynamodb.get_provisioned_table_write_units(
            table_name)
        consumed_write_units_percent = \
            table_stats.get_consumed_write_units_percent(table_name)
        throttled_write_count = \
            table_stats.get_throttled_write_event_count(table_name)
        writes_upper_threshold = \
            get_table_option(key_name, 'writes_upper_threshold')
        writes_lower_threshold = \
            get_table_option(key_name, 'writes_lower_threshold')
        throttled_writes_upper_threshold = \
            get_table_option(key_name, 'throttled_writes_upper_threshold')
        increase_writes_unit = \
            get_table_option(key_name, 'increase_writes_unit')
        increase_writes_with = \
            get_table_option(key_name, 'increase_writes_with')
        decrease_writes_unit = \
            get_table_option(key_name, 'decrease_writes_unit')
        decrease_writes_with = \
            get_table_option(key_name, 'decrease_writes_with')
        max_provisioned_writes = \
            get_table_option(key_name, 'max_provisioned_writes')
        num_write_checks_before_scale_down = \
            get_table_option(key_name, 'num_write_checks_before_scale_down')
	num_write_checks_reset_percent = \
	    get_table_option(key_name, 'num_write_checks_reset_percent')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_write_units = current_write_units

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    elif consumed_write_units_percent >= writes_upper_threshold:

        if increase_writes_unit == 'percent':
            calulated_provisioning = calculators.increase_writes_in_percent(
                current_write_units,
                increase_writes_with,
                get_table_option(key_name, 'max_provisioned_writes'),
                table_name)
        else:
            calulated_provisioning = calculators.increase_writes_in_units(
                current_write_units,
                increase_writes_with,
                get_table_option(key_name, 'max_provisioned_writes'),
                table_name)

        if current_write_units != calulated_provisioning:
            logger.info(
                '{0} - Resetting the number of consecutive '
                'write checks. Reason: scale up event detected'.format(
                    table_name))
            num_consec_write_checks = 0
            update_needed = True
            updated_write_units = calulated_provisioning

    elif throttled_write_count > throttled_writes_upper_threshold:

        if throttled_writes_upper_threshold > 0:
            if increase_writes_unit == 'percent':
                calulated_provisioning = calculators.increase_writes_in_percent(
                    current_write_units,
                    increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    table_name)
            else:
                calulated_provisioning = calculators.increase_writes_in_units(
                    current_write_units,
                    increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    table_name)

            if current_write_units != calulated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'write checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_write_checks = 0
                update_needed = True
                updated_write_units = calulated_provisioning
				
    elif consumed_write_units_percent >= num_write_checks_reset_percent:
		logger.info(
            '{0} - Resetting the number of consecutive '
            'write checks. Reason: Consumed Percent {1} is Greater than Reset Percent: {2}'.format(
                table_name, consumed_write_units_percent, num_write_checks_reset_percent))
		num_consec_write_checks = 0

    elif consumed_write_units_percent <= writes_lower_threshold:

        if decrease_writes_unit == 'percent':
            calulated_provisioning = calculators.decrease_writes_in_percent(
                current_write_units,
                decrease_writes_with,
                get_table_option(key_name, 'min_provisioned_writes'),
                table_name)
        else:
            calulated_provisioning = calculators.decrease_writes_in_units(
                current_write_units,
                decrease_writes_with,
                get_table_option(key_name, 'min_provisioned_writes'),
                table_name)

        if current_write_units != calulated_provisioning:
            num_consec_write_checks = num_consec_write_checks + 1

            if num_consec_write_checks >= num_write_checks_before_scale_down:
                update_needed = True
                updated_write_units = calulated_provisioning

    if max_provisioned_writes:
        if int(updated_write_units) > int(max_provisioned_writes):
            update_needed = True
            updated_write_units = int(max_provisioned_writes)
            logger.info(
                'Will not increase writes over max-provisioned-writes '
                'limit ({0} writes)'.format(updated_write_units))

    logger.info('{0} - Consecutive write checks {1}/{2}'.format(
        table_name,
        num_consec_write_checks,
        num_write_checks_before_scale_down))

    return update_needed, updated_write_units, num_consec_write_checks
예제 #28
0
def __ensure_provisioning_writes(
        table_name, key_name, num_consec_write_checks):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_write_units, num_consec_write_checks
    """
    if not get_table_option(key_name, 'enable_writes_autoscaling'):
        logger.info(
            '{0} - Autoscaling of writes has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_write_units(table_name), 0

    update_needed = False
    try:
        lookback_window_start = get_table_option(
            key_name, 'lookback_window_start')
        current_write_units = dynamodb.get_provisioned_table_write_units(
            table_name)
        consumed_write_units_percent = \
            table_stats.get_consumed_write_units_percent(
                table_name, lookback_window_start)
        throttled_write_count = \
            table_stats.get_throttled_write_event_count(
                table_name, lookback_window_start)
        writes_upper_threshold = \
            get_table_option(key_name, 'writes_upper_threshold')
        writes_lower_threshold = \
            get_table_option(key_name, 'writes_lower_threshold')
        throttled_writes_upper_threshold = \
            get_table_option(key_name, 'throttled_writes_upper_threshold')
        increase_writes_unit = \
            get_table_option(key_name, 'increase_writes_unit')
        increase_writes_with = \
            get_table_option(key_name, 'increase_writes_with')
        decrease_writes_unit = \
            get_table_option(key_name, 'decrease_writes_unit')
        decrease_writes_with = \
            get_table_option(key_name, 'decrease_writes_with')
        min_provisioned_writes = \
            get_table_option(key_name, 'min_provisioned_writes')
        max_provisioned_writes = \
            get_table_option(key_name, 'max_provisioned_writes')
        num_write_checks_before_scale_down = \
            get_table_option(key_name, 'num_write_checks_before_scale_down')
        num_write_checks_reset_percent = \
            get_table_option(key_name, 'num_write_checks_reset_percent')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_write_units = current_write_units

    # Reset consecutive write count num_write_checks_reset_percent is reached
    if num_write_checks_reset_percent:

        if consumed_write_units_percent >= num_write_checks_reset_percent:

            logger.info(
                '{0} - Resetting the number of consecutive '
                'write checks. Reason: Consumed percent {1} is '
                'greater than reset percent: {2}'.format(
                    table_name,
                    consumed_write_units_percent,
                    num_write_checks_reset_percent))

            num_consec_write_checks = 0

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    # Increase needed due to high CU consumption
    elif consumed_write_units_percent >= writes_upper_threshold:

        # Exit if up scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_up_scaling'):
            logger.debug(
                '{0} - Up scaling event detected. No action taken as scaling '
                'up writes has been disabled in the configuration'.format(
                    table_name))
        else:
            if increase_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                calculated_provisioning = calculators.increase_writes_in_units(
                    current_write_units,
                    increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    consumed_write_units_percent,
                    table_name)

            if current_write_units != calculated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'write checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_write_checks = 0
                update_needed = True
                updated_write_units = calculated_provisioning

    # Increase needed due to high throttling
    elif throttled_write_count > throttled_writes_upper_threshold:

        if throttled_writes_upper_threshold > 0:
            if increase_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                calculated_provisioning = calculators.increase_writes_in_units(
                    current_write_units,
                    increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    consumed_write_units_percent,
                    table_name)

            if current_write_units != calculated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'write checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_write_checks = 0
                update_needed = True
                updated_write_units = calculated_provisioning

    # Decrease needed due to low CU consumption
    elif consumed_write_units_percent <= writes_lower_threshold:

        # Exit if up scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_down_scaling'):
            logger.debug(
                '{0} - Down scaling event detected. No action taken as scaling '
                'down writes has been disabled in the configuration'.format(
                    table_name))
        else:
            if decrease_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.decrease_writes_in_percent(
                        current_write_units,
                        decrease_writes_with,
                        get_table_option(key_name, 'min_provisioned_writes'),
                        table_name)
            else:
                calculated_provisioning = calculators.decrease_writes_in_units(
                    current_write_units,
                    decrease_writes_with,
                    get_table_option(key_name, 'min_provisioned_writes'),
                    table_name)

            if current_write_units != calculated_provisioning:
                num_consec_write_checks = num_consec_write_checks + 1

                if (num_consec_write_checks >=
                        num_write_checks_before_scale_down):
                    update_needed = True
                    updated_write_units = calculated_provisioning

    # Never go over the configured max provisioning
    if max_provisioned_writes:
        if int(updated_write_units) > int(max_provisioned_writes):
            update_needed = True
            updated_write_units = int(max_provisioned_writes)
            logger.info(
                'Will not increase writes over max-provisioned-writes '
                'limit ({0} writes)'.format(updated_write_units))

    # Ensure that we have met the min-provisioning
    if min_provisioned_writes:
        if int(min_provisioned_writes) > int(updated_write_units):
            update_needed = True
            updated_write_units = int(min_provisioned_writes)
            logger.info(
                '{0} - Increasing writes to meet min-provisioned-writes '
                'limit ({1} writes)'.format(table_name, updated_write_units))

    logger.info('{0} - Consecutive write checks {1}/{2}'.format(
        table_name,
        num_consec_write_checks,
        num_write_checks_before_scale_down))

    return update_needed, updated_write_units, num_consec_write_checks
예제 #29
0
def __ensure_provisioning_reads(table_name, key_name):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :returns: (bool, int) -- update_needed, updated_read_units
    """
    update_needed = False
    try:
        updated_read_units = dynamodb.get_provisioned_table_read_units(
            table_name)
        consumed_read_units_percent = table_stats.\
            get_consumed_read_units_percent(table_name)
    except JSONResponseError:
        raise

    throttled_read_count = table_stats.get_throttled_read_event_count(
        table_name)

    if (consumed_read_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_reads_on_0_percent')):

        logger.info(
            '{0} - Scaling down reads is not done when usage is at 0%'.format(
                table_name))

    elif (consumed_read_units_percent >=
            get_table_option(key_name, 'reads_upper_threshold')):

        if get_table_option(key_name, 'increase_reads_unit') == 'percent':
            updated_provisioning = calculators.increase_reads_in_percent(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.increase_reads_in_units(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name,
                table_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    elif (throttled_read_count >=
            get_table_option(key_name, 'throttled_reads_upper_threshold')):

        if get_table_option(key_name, 'increase_reads_unit') == 'percent':
            updated_provisioning = calculators.increase_reads_in_percent(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.increase_reads_in_units(
                updated_read_units,
                get_table_option(key_name, 'increase_reads_with'),
                key_name,
                table_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    elif (consumed_read_units_percent <=
            get_table_option(key_name, 'reads_lower_threshold')):

        if get_table_option(key_name, 'decrease_reads_unit') == 'percent':
            updated_provisioning = calculators.decrease_reads_in_percent(
                updated_read_units,
                get_table_option(key_name, 'decrease_reads_with'),
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.decrease_reads_in_units(
                updated_read_units,
                get_table_option(key_name, 'decrease_reads_with'),
                key_name,
                table_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    if get_table_option(key_name, 'max_provisioned_reads'):
        if (int(updated_read_units) >
                int(get_table_option(key_name, 'max_provisioned_reads'))):
            update_needed = True
            updated_read_units = int(
                get_table_option(key_name, 'max_provisioned_reads'))
            logger.info(
                'Will not increase writes over max-provisioned-reads '
                'limit ({0} writes)'.format(updated_read_units))

    return update_needed, int(updated_read_units)
예제 #30
0
def __ensure_provisioning_reads(table_name, key_name, num_consec_read_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_read_units, num_consec_read_checks
    """
    if not get_table_option(key_name, 'enable_reads_autoscaling'):
        logger.info(
            '{0} - Autoscaling of reads has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_read_units(table_name), 0

    update_needed = False
    try:
        current_read_units = dynamodb.get_provisioned_table_read_units(
            table_name)
        consumed_read_units_percent = \
            table_stats.get_consumed_read_units_percent(table_name)
        throttled_read_count = \
            table_stats.get_throttled_read_event_count(table_name)
        reads_upper_threshold = \
            get_table_option(key_name, 'reads_upper_threshold')
        reads_lower_threshold = \
            get_table_option(key_name, 'reads_lower_threshold')
        throttled_reads_upper_threshold = \
            get_table_option(key_name, 'throttled_reads_upper_threshold')
        increase_reads_with = \
            get_table_option(key_name, 'increase_reads_with')
        increase_reads_unit = \
            get_table_option(key_name, 'increase_reads_unit')
        decrease_reads_with = \
            get_table_option(key_name, 'decrease_reads_with')
        decrease_reads_unit = \
            get_table_option(key_name, 'decrease_reads_unit')
        max_provisioned_reads = \
            get_table_option(key_name, 'max_provisioned_reads')
        num_read_checks_before_scale_down = \
            get_table_option(key_name, 'num_read_checks_before_scale_down')
        num_read_checks_reset_percent = \
            get_table_option(key_name, 'num_read_checks_reset_percent')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_read_units = current_read_units

    # Reset consecutive reads if num_read_checks_reset_percent is reached
    if num_read_checks_reset_percent:

        if consumed_read_units_percent >= num_read_checks_reset_percent:

            logger.info(
                '{0} - Resetting the number of consecutive '
                'read checks. Reason: Consumed percent {1} is '
                'greater than reset percent: {2}'.format(
                    table_name,
                    consumed_read_units_percent,
                    num_read_checks_reset_percent))

            num_consec_read_checks = 0

    if (consumed_read_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_reads_on_0_percent')):
        logger.info(
            '{0} - Scaling down reads is not done when usage is at 0%'.format(
                table_name))

    elif consumed_read_units_percent >= reads_upper_threshold:

        if increase_reads_unit == 'percent':
            calculated_provisioning = calculators.increase_reads_in_percent(
                current_read_units,
                increase_reads_with,
                get_table_option(key_name, 'max_provisioned_reads'),
                table_name)
        else:
            calculated_provisioning = calculators.increase_reads_in_units(
                current_read_units,
                increase_reads_with,
                get_table_option(key_name, 'max_provisioned_reads'),
                table_name)

        if current_read_units != calculated_provisioning:
            logger.info(
                '{0} - Resetting the number of consecutive '
                'read checks. Reason: scale up event detected'.format(
                    table_name))
            num_consec_read_checks = 0
            update_needed = True
            updated_read_units = calculated_provisioning

    elif throttled_read_count > throttled_reads_upper_threshold:

        if throttled_reads_upper_threshold > 0:
            if increase_reads_unit == 'percent':
                calculated_provisioning = calculators.increase_reads_in_percent(
                    updated_read_units,
                    increase_reads_with,
                    get_table_option(key_name, 'max_provisioned_reads'),
                    table_name)
            else:
                calculated_provisioning = calculators.increase_reads_in_units(
                    updated_read_units,
                    increase_reads_with,
                    get_table_option(key_name, 'max_provisioned_reads'),
                    table_name)

            if current_read_units != calculated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'read checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_read_checks = 0
                update_needed = True
                updated_read_units = calculated_provisioning

    elif consumed_read_units_percent <= reads_lower_threshold:

        if decrease_reads_unit == 'percent':
            calculated_provisioning = calculators.decrease_reads_in_percent(
                updated_read_units,
                decrease_reads_with,
                get_table_option(key_name, 'min_provisioned_reads'),
                table_name)
        else:
            calculated_provisioning = calculators.decrease_reads_in_units(
                updated_read_units,
                decrease_reads_with,
                get_table_option(key_name, 'min_provisioned_reads'),
                table_name)

        if current_read_units != calculated_provisioning:
            num_consec_read_checks = num_consec_read_checks + 1

            if num_consec_read_checks >= num_read_checks_before_scale_down:
                update_needed = True
                updated_read_units = calculated_provisioning

    if max_provisioned_reads:
        if int(updated_read_units) > int(max_provisioned_reads):
            update_needed = True
            updated_read_units = int(max_provisioned_reads)
            logger.info(
                'Will not increase writes over max-provisioned-reads '
                'limit ({0} writes)'.format(updated_read_units))

    logger.info('{0} - Consecutive read checks {1}/{2}'.format(
        table_name,
        num_consec_read_checks,
        num_read_checks_before_scale_down))

    return update_needed, updated_read_units, num_consec_read_checks
예제 #31
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table.ensure_provisioning(table_name, table_key)

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug(
                                'Table {0} GSI {1} matches '
                                'GSI config key {2}'.format(
                                    table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error('Invalid regular expression: "{0}"'.format(
                            gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                gsi.ensure_provisioning(
                    table_name,
                    table_key,
                    gsi_name,
                    gsi_key)

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name,
                    table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status,
                        error.reason))
                logger.error(
                    'Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    logger.debug('Sleeping {0} seconds until next check'.format(
        get_global_option('check_interval')))
    time.sleep(get_global_option('check_interval'))
예제 #32
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table_num_consec_read_checks = \
                CHECK_STATUS['tables'][table_name]['reads']
        except KeyError:
            table_num_consec_read_checks = 0

        try:
            table_num_consec_write_checks = \
                CHECK_STATUS['tables'][table_name]['writes']
        except KeyError:
            table_num_consec_write_checks = 0

        try:
            # The return var shows how many times the scale-down criteria
            #  has been met. This is coupled with a var in config,
            # "num_intervals_scale_down", to delay the scale-down
            table_num_consec_read_checks, table_num_consec_write_checks = \
                table.ensure_provisioning(
                    table_name,
                    table_key,
                    table_num_consec_read_checks,
                    table_num_consec_write_checks)

            CHECK_STATUS['tables'][table_name] = {
                'reads': table_num_consec_read_checks,
                'writes': table_num_consec_write_checks
            }

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug('Table {0} GSI {1} matches '
                                         'GSI config key {2}'.format(
                                             table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error(
                            'Invalid regular expression: "{0}"'.format(
                                gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                try:
                    gsi_num_consec_read_checks = \
                        CHECK_STATUS['tables'][table_name]['reads']
                except KeyError:
                    gsi_num_consec_read_checks = 0

                try:
                    gsi_num_consec_write_checks = \
                        CHECK_STATUS['tables'][table_name]['writes']
                except KeyError:
                    gsi_num_consec_write_checks = 0

                gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
                    gsi.ensure_provisioning(
                        table_name,
                        table_key,
                        gsi_name,
                        gsi_key,
                        gsi_num_consec_read_checks,
                        gsi_num_consec_write_checks)

                CHECK_STATUS['gsis'][gsi_name] = {
                    'reads': gsi_num_consec_read_checks,
                    'writes': gsi_num_consec_write_checks
                }

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name, table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status, error.reason))
                logger.error('Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    logger.debug('Sleeping {0} seconds until next check'.format(
        get_global_option('check_interval')))
    time.sleep(get_global_option('check_interval'))
예제 #33
0
def update_table_provisioning(
        table_name, key_name, reads, writes, retry_with_only_increase=False):
    """ Update provisioning for a given table

    :type table_name: str
    :param table_name: Name of the table
    :type key_name: str
    :param key_name: Configuration option key name
    :type reads: int
    :param reads: New number of provisioned read units
    :type writes: int
    :param writes: New number of provisioned write units
    :type retry_with_only_increase: bool
    :param retry_with_only_increase: Set to True to ensure only increases
    """
    table = get_table(table_name)
    current_reads = int(get_provisioned_table_read_units(table_name))
    current_writes = int(get_provisioned_table_write_units(table_name))

    # Make sure we aren't scaling down if we turned off downscaling
    if (not get_table_option(key_name, 'enable_reads_down_scaling') or
            not get_table_option(key_name, 'enable_writes_down_scaling')):
        if (not get_table_option(key_name, 'enable_reads_down_scaling') and
                current_reads > reads):
            reads = current_reads
        if (not get_table_option(key_name, 'enable_writes_down_scaling') and
                current_writes > writes):
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - No need to scale up reads nor writes'.format(
                    table_name))
            return

    if retry_with_only_increase:
        # Ensure that we are only doing increases
        if current_reads > reads:
            reads = current_reads
        if current_writes > writes:
            writes = current_writes

        # Return if we do not need to scale at all
        if reads == current_reads and writes == current_writes:
            logger.info(
                '{0} - No need to scale up reads nor writes'.format(
                    table_name))
            return

        logger.info(
            '{0} - Retrying to update provisioning, excluding any decreases. '
            'Setting new reads to {1} and new writes to {2}'.format(
                table_name, reads, writes))

    # Check that we are in the right time frame
    maintenance_windows = get_table_option(key_name, 'maintenance_windows')
    if maintenance_windows:
        if not __is_table_maintenance_window(table_name, maintenance_windows):
            logger.warning(
                '{0} - We are outside a maintenace window. '
                'Will only perform up scaling activites'.format(table_name))

            # Ensure that we are only doing increases
            if current_reads > reads:
                reads = current_reads
            if current_writes > writes:
                writes = current_writes

            # Return if we do not need to scale up
            if reads == current_reads and writes == current_writes:
                logger.info(
                    '{0} - No need to scale up reads nor writes'.format(
                        table_name))
                return

        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    logger.info(
        '{0} - Updating provisioning to {1} reads and {2} writes'.format(
            table_name, reads, writes))

    # Return if dry-run
    if get_global_option('dry_run'):
        return

    try:
        table.update(
            throughput={
                'read': reads,
                'write': writes
            })

        # See if we should send notifications for scale-down, scale-up or both
        sns_message_types = []
        if current_reads > reads or current_writes > writes:
            sns_message_types.append('scale-down')
        if current_reads < reads or current_writes < writes:
            sns_message_types.append('scale-up')

        message = []
        if current_reads > reads:
            message.append('{0} - Reads: DOWN from {1} to {2}\n'.format(
                table_name, current_reads, reads))
        elif current_reads < reads:
            message.append('{0} - Reads: UP from {1} to {2}\n'.format(
                table_name, current_reads, reads))
        if current_writes > writes:
            message.append('{0} - Writes: DOWN from {1} to {2}\n'.format(
                table_name, current_writes, writes))
        elif current_writes < writes:
            message.append('{0} - Writes: UP from {1} to {2}\n'.format(
                table_name, current_writes, writes))

        sns.publish_table_notification(
            key_name,
            ''.join(message),
            sns_message_types,
            subject='Updated provisioning for table {0}'.format(table_name))
    except JSONResponseError as error:
        exception = error.body['__type'].split('#')[1]
        know_exceptions = [
            'LimitExceededException',
            'ValidationException',
            'ResourceInUseException']

        if exception in know_exceptions:
            logger.warning('{0} - {1}: {2}'.format(
                table_name, exception, error.body['message']))
        else:
            if 'message' in error.body:
                msg = error.body['message']
            else:
                msg = error

            logger.error(
                (
                    '{0} - Unhandled exception: {1}: {2}. '
                    'Please file a bug report at '
                    'https://github.com/sebdah/dynamic-dynamodb/issues'
                ).format(table_name, exception, msg))

        if (not retry_with_only_increase and
                exception == 'LimitExceededException'):
            logger.info(
                '{0} - Will retry to update provisioning '
                'with only increases'.format(table_name))
            update_table_provisioning(
                table_name,
                key_name,
                reads,
                writes,
                retry_with_only_increase=True)
예제 #34
0
def __ensure_provisioning_writes(table_name, key_name,
                                 num_consec_write_checks):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_write_units, num_consec_write_checks
    """
    if not get_table_option(key_name, 'enable_writes_autoscaling'):
        logger.info(
            '{0} - Autoscaling of writes has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_write_units(table_name), 0

    update_needed = False
    try:
        lookback_window_start = get_table_option(key_name,
                                                 'lookback_window_start')
        current_write_units = dynamodb.get_provisioned_table_write_units(
            table_name)
        consumed_write_units_percent = \
            table_stats.get_consumed_write_units_percent(
                table_name, lookback_window_start)
        throttled_write_count = \
            table_stats.get_throttled_write_event_count(
                table_name, lookback_window_start)
        writes_upper_threshold = \
            get_table_option(key_name, 'writes_upper_threshold')
        writes_lower_threshold = \
            get_table_option(key_name, 'writes_lower_threshold')
        throttled_writes_upper_threshold = \
            get_table_option(key_name, 'throttled_writes_upper_threshold')
        increase_writes_unit = \
            get_table_option(key_name, 'increase_writes_unit')
        increase_writes_with = \
            get_table_option(key_name, 'increase_writes_with')
        decrease_writes_unit = \
            get_table_option(key_name, 'decrease_writes_unit')
        decrease_writes_with = \
            get_table_option(key_name, 'decrease_writes_with')
        min_provisioned_writes = \
            get_table_option(key_name, 'min_provisioned_writes')
        max_provisioned_writes = \
            get_table_option(key_name, 'max_provisioned_writes')
        num_write_checks_before_scale_down = \
            get_table_option(key_name, 'num_write_checks_before_scale_down')
        num_write_checks_reset_percent = \
            get_table_option(key_name, 'num_write_checks_reset_percent')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_write_units = current_write_units

    # Reset consecutive write count num_write_checks_reset_percent is reached
    if num_write_checks_reset_percent:

        if consumed_write_units_percent >= num_write_checks_reset_percent:

            logger.info('{0} - Resetting the number of consecutive '
                        'write checks. Reason: Consumed percent {1} is '
                        'greater than reset percent: {2}'.format(
                            table_name, consumed_write_units_percent,
                            num_write_checks_reset_percent))

            num_consec_write_checks = 0

    # Check if we should update write provisioning
    if (consumed_write_units_percent == 0 and not get_table_option(
            key_name, 'allow_scaling_down_writes_on_0_percent')):

        logger.info(
            '{0} - Scaling down writes is not done when usage is at 0%'.format(
                table_name))

    # Increase needed due to high CU consumption
    elif consumed_write_units_percent >= writes_upper_threshold:

        # Exit if up scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_up_scaling'):
            logger.debug(
                '{0} - Up scaling event detected. No action taken as scaling '
                'up writes has been disabled in the configuration'.format(
                    table_name))
        else:
            if increase_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                calculated_provisioning = calculators.increase_writes_in_units(
                    current_write_units, increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    consumed_write_units_percent, table_name)

            if current_write_units != calculated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'write checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_write_checks = 0
                update_needed = True
                updated_write_units = calculated_provisioning

    # Increase needed due to high throttling
    elif throttled_write_count > throttled_writes_upper_threshold:

        if throttled_writes_upper_threshold > 0:
            if increase_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                calculated_provisioning = calculators.increase_writes_in_units(
                    current_write_units, increase_writes_with,
                    get_table_option(key_name, 'max_provisioned_writes'),
                    consumed_write_units_percent, table_name)

            if current_write_units != calculated_provisioning:
                logger.info(
                    '{0} - Resetting the number of consecutive '
                    'write checks. Reason: scale up event detected'.format(
                        table_name))
                num_consec_write_checks = 0
                update_needed = True
                updated_write_units = calculated_provisioning

    # Decrease needed due to low CU consumption
    elif consumed_write_units_percent <= writes_lower_threshold:

        # Exit if up scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_down_scaling'):
            logger.debug(
                '{0} - Down scaling event detected. No action taken as scaling '
                'down writes has been disabled in the configuration'.format(
                    table_name))
        else:
            if decrease_writes_unit == 'percent':
                calculated_provisioning = \
                    calculators.decrease_writes_in_percent(
                        current_write_units,
                        decrease_writes_with,
                        get_table_option(key_name, 'min_provisioned_writes'),
                        table_name)
            else:
                calculated_provisioning = calculators.decrease_writes_in_units(
                    current_write_units, decrease_writes_with,
                    get_table_option(key_name, 'min_provisioned_writes'),
                    table_name)

            if current_write_units != calculated_provisioning:
                num_consec_write_checks = num_consec_write_checks + 1

                if (num_consec_write_checks >=
                        num_write_checks_before_scale_down):
                    update_needed = True
                    updated_write_units = calculated_provisioning

    # Never go over the configured max provisioning
    if max_provisioned_writes:
        if int(updated_write_units) > int(max_provisioned_writes):
            update_needed = True
            updated_write_units = int(max_provisioned_writes)
            logger.info('Will not increase writes over max-provisioned-writes '
                        'limit ({0} writes)'.format(updated_write_units))

    # Ensure that we have met the min-provisioning
    if min_provisioned_writes:
        if int(min_provisioned_writes) > int(updated_write_units):
            update_needed = True
            updated_write_units = int(min_provisioned_writes)
            logger.info(
                '{0} - Increasing writes to meet min-provisioned-writes '
                'limit ({1} writes)'.format(table_name, updated_write_units))

    logger.info('{0} - Consecutive write checks {1}/{2}'.format(
        table_name, num_consec_write_checks,
        num_write_checks_before_scale_down))

    return update_needed, updated_write_units, num_consec_write_checks
예제 #35
0
def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None):
    """ Checks whether the circuit breaker is open

    :param table_name: Name of the table being checked
    :param table_key: Configuration key for table
    :param gsi_name: Name of the GSI being checked
    :param gsi_key: Configuration key for the GSI
    :returns: bool -- True if the circuit is open
    """
    logger.debug('Checking circuit breaker status')

    # Parse the URL to make sure it is OK
    pattern = re.compile(
        r'^(?P<scheme>http(s)?://)'
        r'((?P<username>.+):(?P<password>.+)@){0,1}'
        r'(?P<url>.*)$'
    )

    url = timeout = None
    if gsi_name:
        url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url')
        timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout')
    elif table_name:
        url = get_table_option(table_key, 'circuit_breaker_url')
        timeout = get_table_option(table_key, 'circuit_breaker_timeout')

    if not url:
        url = get_global_option('circuit_breaker_url')
        timeout = get_global_option('circuit_breaker_timeout')

    match = pattern.match(url)
    if not match:
        logger.error('Malformatted URL: {0}'.format(url))
        sys.exit(1)

    use_basic_auth = False
    if match.group('username') and match.group('password'):
        use_basic_auth = True

    # Make the actual URL to call
    auth = ()
    if use_basic_auth:
        url = '{scheme}{url}'.format(
            scheme=match.group('scheme'),
            url=match.group('url'))
        auth = (match.group('username'), match.group('password'))

    headers = {}
    if table_name:
        headers["x-table-name"] = table_name
    if gsi_name:
        headers["x-gsi-name"] = gsi_name

    # Make the actual request
    try:
        response = requests.get(
            url,
            auth=auth,
            timeout=timeout / 1000.00,
            headers=headers)
        if int(response.status_code) == 200:
            logger.info('Circuit breaker is closed')
            return False
        else:
            logger.warning(
                'Circuit breaker returned with status code {0:d}'.format(
                    response.status_code))

    except requests.exceptions.SSLError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.Timeout as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.ConnectionError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.HTTPError as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except requests.exceptions.TooManyRedirects as error:
        logger.warning('Circuit breaker: {0}'.format(error))
    except Exception as error:
        logger.error('Unhandled exception: {0}'.format(error))
        logger.error(
            'Please file a bug at '
            'https://github.com/sebdah/dynamic-dynamodb/issues')

    return True
예제 #36
0
def main():
    """ Main function called from dynamic-dynamodb """
    try:
        boto_server_error_retries = 3

        while True:
            if get_global_option('daemon'):
                pid_file = '/tmp/dynamic-dynamodb.{0}.pid'.format(
                    get_global_option('instance'))
                daemon = DynamicDynamoDBDaemon(pid_file)

                if get_global_option('daemon') == 'start':
                    daemon.start(
                        check_interval=get_global_option('check_interval'))

                elif get_global_option('daemon') == 'stop':
                    daemon.stop()
                    sys.exit(0)

                elif get_global_option('daemon') == 'restart':
                    daemon.restart(
                        check_interval=get_global_option('check_interval'))

                elif get_global_option('daemon') in ['foreground', 'fg']:
                    daemon.run(
                        check_interval=get_global_option('check_interval'))

                else:
                    print(
                        'Valid options for --daemon are '
                        'start, stop and restart')
                    sys.exit(1)
            else:
                # Ensure provisioning
                for table_name, table_key in dynamodb.get_tables_and_gsis():
                    try:
                        table.ensure_provisioning(table_name, table_key)

                        gsi_names = set()
                        # Add regexp table names
                        if get_table_option(table_key, 'gsis'):
                            for gst_instance in dynamodb.table_gsis(table_name):
                                gsi_name = gst_instance[u'IndexName']

                                try:
                                    gsi_keys = get_table_option(
                                        table_key, 'gsis').keys()
                                except AttributeError:
                                    continue

                                for gsi_key in gsi_keys:
                                    try:
                                        if re.match(gsi_key, gsi_name):
                                            logger.debug(
                                                'Table {0} GSI {1} match with '
                                                'GSI config key {2}'.format(
                                                    table_name,
                                                    gsi_name,
                                                    gsi_key))
                                            gsi_names.add(
                                                (
                                                    gsi_name,
                                                    gsi_key
                                                ))
                                    except re.error:
                                        logger.error(
                                            'Invalid regular expression: '
                                            '"{0}"'.format(gsi_key))
                                        sys.exit(1)

                        gsi_names = sorted(gsi_names)

                        for gsi_name, gsi_key in gsi_names:
                            gsi.ensure_provisioning(
                                table_name,
                                table_key,
                                gsi_name,
                                gsi_key)

                    except JSONResponseError as error:
                        exception = error.body['__type'].split('#')[1]
                        if exception == 'ResourceNotFoundException':
                            logger.error(
                                '{0} - Table {1} does not exist anymore'.format(
                                    table_name, table_name))
                            continue

                    except BotoServerError as error:
                        if boto_server_error_retries > 0:
                            logger.error(
                                'Unknown boto error. Status: "{0}". '
                                'Reason: "{1}"'.format(
                                    error.status,
                                    error.reason))
                            logger.error(
                                'Please bug report if this error persists')
                            boto_server_error_retries -= 1
                            continue
                        else:
                            raise

            # Sleep between the checks
            logger.debug('Sleeping {0} seconds until next check'.format(
                get_global_option('check_interval')))
            time.sleep(get_global_option('check_interval'))
    except Exception as error:
        logger.exception(error)
예제 #37
0
def update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    try:
        table = dynamodb.get_table(table_name)
    except DynamoDBResponseError:
        # Return if the table does not exist
        return None

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, get_table_option(
                key_name, 'maintenance_windows'))):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    if table.status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is in {1} state'.format(table_name, table.status))

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if ((read_units < table.read_units) or
                (table.read_units == get_table_option(
                    key_name, 'min_provisioned_reads'))):
            if ((write_units < table.write_units) or
                    (table.write_units == get_table_option(
                        key_name, 'min_provisioned_writes'))):
                logger.info(
                    '{0} - Both reads and writes will be decreased'.format(
                        table_name))

        elif read_units < table.read_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            read_units = table.read_units
        elif write_units < table.write_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            write_units = table.write_units

    if read_units == table.read_units and write_units == table.write_units:
        logger.debug('{0} - No need to update provisioning')
        return

    if not get_global_option('dry_run'):
        try:
            table.update_throughput(int(read_units), int(write_units))
            logger.info('Provisioning updated')
        except DynamoDBResponseError as error:
            dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
            if dynamodb_error == 'LimitExceededException':
                logger.warning(
                    '{0} - {1}'.format(table_name, error.body['message']))

                if int(read_units) > table.read_units:
                    logger.info('{0} - Scaling up reads to {1:d}'.format(
                        table_name,
                        int(read_units)))
                    update_throughput(
                        table_name,
                        int(read_units),
                        int(table.write_units),
                        key_name)

                elif int(write_units) > table.write_units:
                    logger.info('{0} - Scaling up writes to {1:d}'.format(
                        table_name,
                        int(write_units)))
                    update_throughput(
                        table_name,
                        int(table.read_units),
                        int(write_units),
                        key_name)

            elif dynamodb_error == 'ValidationException':
                logger.warning('{0} - ValidationException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'ResourceInUseException':
                logger.warning('{0} - ResourceInUseException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'AccessDeniedException':
                logger.warning('{0} - AccessDeniedException: {1}'.format(
                    table_name,
                    error.body['message']))

            else:
                logger.error(
                    (
                        '{0} - Unhandled exception: {1}: {2}. '
                        'Please file a bug report at '
                        'https://github.com/sebdah/dynamic-dynamodb/issues'
                    ).format(
                        table_name,
                        dynamodb_error,
                        error.body['message']))
예제 #38
0
def __ensure_provisioning_reads(table_name, key_name):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :returns: (bool, int) -- update_needed, updated_read_units
    """
    if not get_table_option(key_name, 'enable_reads_autoscaling'):
        logger.info(
            '{0} - Autoscaling of reads has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_read_units(table_name)

    update_needed = False
    try:
        updated_read_units = dynamodb.get_provisioned_table_read_units(
            table_name)
        consumed_read_units_percent = \
            table_stats.get_consumed_read_units_percent(table_name)
        throttled_read_count = \
            table_stats.get_throttled_read_event_count(table_name)
        reads_upper_threshold = \
            get_table_option(key_name, 'reads_upper_threshold')
        reads_lower_threshold = \
            get_table_option(key_name, 'reads_lower_threshold')
        throttled_reads_upper_threshold = \
            get_table_option(key_name, 'throttled_reads_upper_threshold')
        increase_reads_with = \
            get_table_option(key_name, 'increase_reads_with')
        increase_reads_unit = \
            get_table_option(key_name, 'increase_reads_unit')
        decrease_reads_with = \
            get_table_option(key_name, 'decrease_reads_with')
        decrease_reads_unit = \
            get_table_option(key_name, 'decrease_reads_unit')
        max_provisioned_reads = \
            get_table_option(key_name, 'max_provisioned_reads')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    if (consumed_read_units_percent == 0 and not
            get_table_option(
                key_name, 'allow_scaling_down_reads_on_0_percent')):
        print('1')
        logger.info(
            '{0} - Scaling down reads is not done when usage is at 0%'.format(
                table_name))

    elif consumed_read_units_percent >= reads_upper_threshold:

        if increase_reads_unit == 'percent':
            updated_provisioning = calculators.increase_reads_in_percent(
                updated_read_units,
                increase_reads_with,
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.increase_reads_in_units(
                updated_read_units,
                increase_reads_with,
                key_name,
                table_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    elif throttled_read_count > throttled_reads_upper_threshold:

        if throttled_reads_upper_threshold > 0:
            if increase_reads_unit == 'percent':
                updated_provisioning = calculators.increase_reads_in_percent(
                    updated_read_units,
                    increase_reads_with,
                    key_name,
                    table_name)
            else:
                updated_provisioning = calculators.increase_reads_in_units(
                    updated_read_units,
                    increase_reads_with,
                    key_name,
                    table_name)

            if updated_read_units != updated_provisioning:
                update_needed = True
                updated_read_units = updated_provisioning

    elif consumed_read_units_percent <= reads_lower_threshold:

        if decrease_reads_unit == 'percent':
            updated_provisioning = calculators.decrease_reads_in_percent(
                updated_read_units,
                decrease_reads_with,
                key_name,
                table_name)
        else:
            updated_provisioning = calculators.decrease_reads_in_units(
                updated_read_units,
                decrease_reads_with,
                key_name,
                table_name)

        if updated_read_units != updated_provisioning:
            update_needed = True
            updated_read_units = updated_provisioning

    if max_provisioned_reads:
        if (int(updated_read_units) > int(max_provisioned_reads)):
            update_needed = True
            updated_read_units = int(max_provisioned_reads)
            logger.info(
                'Will not increase writes over max-provisioned-reads '
                'limit ({0} writes)'.format(updated_read_units))

    return update_needed, int(updated_read_units)
예제 #39
0
def __update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    provisioned_reads = table_stats.get_provisioned_read_units(table_name)
    provisioned_writes = table_stats.get_provisioned_write_units(table_name)

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if (not __is_maintenance_window(table_name, get_table_option(
                key_name, 'maintenance_windows'))):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    table_status = dynamodb.get_table_status(table_name)
    logger.debug('{0} - Table status is {1}'.format(table_name, table_status))
    if table_status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is {1}'.format(table_name, table_status))
        return

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if read_units < provisioned_reads and write_units < provisioned_writes:
            logger.debug(
                '{0} - Both reads and writes will be decreased'.format(
                    table_name))
        elif read_units < provisioned_reads:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            return
        elif write_units < provisioned_writes:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            return

    if not get_global_option('dry_run'):
        dynamodb.update_table_provisioning(
            table_name,
            int(read_units),
            int(write_units))
예제 #40
0
def ensure_provisioning(
        table_name, key_name,
        num_consec_read_checks,
        num_consec_write_checks):
    """ Ensure that provisioning is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_read_checks: int
    :param num_consec_read_checks: How many consecutive checks have we had
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
    """

    if get_global_option('circuit_breaker_url') or get_table_option(
            key_name, 'circuit_breaker_url'):
        if circuit_breaker.is_open(table_name, key_name):
            logger.warning('Circuit breaker is OPEN!')
            return (0, 0)

    # Handle throughput alarm checks
    __ensure_provisioning_alarm(table_name, key_name)

    try:
        read_update_needed, updated_read_units, num_consec_read_checks = \
            __ensure_provisioning_reads(
                table_name,
                key_name,
                num_consec_read_checks)
        write_update_needed, updated_write_units, num_consec_write_checks = \
            __ensure_provisioning_writes(
                table_name,
                key_name,
                num_consec_write_checks)

        if read_update_needed:
            num_consec_read_checks = 0

        if write_update_needed:
            num_consec_write_checks = 0

        # Handle throughput updates
        if read_update_needed or write_update_needed:
            logger.info(
                '{0} - Changing provisioning to {1:d} '
                'read units and {2:d} write units'.format(
                    table_name,
                    int(updated_read_units),
                    int(updated_write_units)))
            __update_throughput(
                table_name,
                key_name,
                updated_read_units,
                updated_write_units)
        else:
            logger.info('{0} - No need to change provisioning'.format(
                table_name))
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    return num_consec_read_checks, num_consec_write_checks
예제 #41
0
def __ensure_provisioning_writes(table_name, key_name,
                                 num_consec_write_checks):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_write_units, num_consec_write_checks
    """
    if not get_table_option(key_name, 'enable_writes_autoscaling'):
        logger.info(
            '{0} - Autoscaling of writes has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_write_units(table_name), 0

    update_needed = False
    try:
        lookback_window_start = get_table_option(key_name,
                                                 'lookback_window_start')
        lookback_period = get_table_option(key_name, 'lookback_period')
        current_write_units = dynamodb.get_provisioned_table_write_units(
            table_name)
        consumed_write_units_percent = \
            table_stats.get_consumed_write_units_percent(
                table_name, lookback_window_start, lookback_period)
        throttled_write_count = \
            table_stats.get_throttled_write_event_count(
                table_name, lookback_window_start, lookback_period)
        throttled_by_provisioned_write_percent = \
            table_stats.get_throttled_by_provisioned_write_event_percent(
                table_name, lookback_window_start, lookback_period)
        throttled_by_consumed_write_percent = \
            table_stats.get_throttled_by_consumed_write_percent(
                table_name, lookback_window_start, lookback_period)
        writes_upper_threshold = \
            get_table_option(key_name, 'writes_upper_threshold')
        writes_lower_threshold = \
            get_table_option(key_name, 'writes_lower_threshold')
        throttled_writes_upper_threshold = \
            get_table_option(key_name, 'throttled_writes_upper_threshold')
        increase_writes_unit = \
            get_table_option(key_name, 'increase_writes_unit')
        increase_writes_with = \
            get_table_option(key_name, 'increase_writes_with')
        decrease_writes_unit = \
            get_table_option(key_name, 'decrease_writes_unit')
        decrease_writes_with = \
            get_table_option(key_name, 'decrease_writes_with')
        min_provisioned_writes = \
            get_table_option(key_name, 'min_provisioned_writes')
        max_provisioned_writes = \
            get_table_option(key_name, 'max_provisioned_writes')
        num_write_checks_before_scale_down = \
            get_table_option(key_name, 'num_write_checks_before_scale_down')
        num_write_checks_reset_percent = \
            get_table_option(key_name, 'num_write_checks_reset_percent')
        increase_throttled_by_provisioned_writes_unit = \
            get_table_option(
                key_name, 'increase_throttled_by_provisioned_writes_unit')
        increase_throttled_by_provisioned_writes_scale = \
            get_table_option(
                key_name, 'increase_throttled_by_provisioned_writes_scale')
        increase_throttled_by_consumed_writes_unit = \
            get_table_option(
                key_name, 'increase_throttled_by_consumed_writes_unit')
        increase_throttled_by_consumed_writes_scale = \
            get_table_option(
                key_name, 'increase_throttled_by_consumed_writes_scale')
        increase_consumed_writes_unit = \
            get_table_option(key_name, 'increase_consumed_writes_unit')
        increase_consumed_writes_with = \
            get_table_option(key_name, 'increase_consumed_writes_with')
        increase_consumed_writes_scale = \
            get_table_option(key_name, 'increase_consumed_writes_scale')
        decrease_consumed_writes_unit = \
            get_table_option(key_name, 'decrease_consumed_writes_unit')
        decrease_consumed_writes_with = \
            get_table_option(key_name, 'decrease_consumed_writes_with')
        decrease_consumed_writes_scale = \
            get_table_option(key_name, 'decrease_consumed_writes_scale')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_write_units = current_write_units

    # Reset consecutive write count if num_write_checks_reset_percent
    # is reached
    if num_write_checks_reset_percent:

        if consumed_write_units_percent >= num_write_checks_reset_percent:

            logger.info('{0} - Resetting the number of consecutive '
                        'write checks. Reason: Consumed percent {1} is '
                        'greater than reset percent: {2}'.format(
                            table_name, consumed_write_units_percent,
                            num_write_checks_reset_percent))

            num_consec_write_checks = 0

    # Exit if up scaling has been disabled
    if not get_table_option(key_name, 'enable_writes_up_scaling'):
        logger.debug(
            '{0} - Up scaling event detected. No action taken as scaling '
            'up writes has been disabled in the configuration'.format(
                table_name))

    else:

        # If local/granular values not specified use global values
        increase_consumed_writes_unit = \
            increase_consumed_writes_unit or increase_writes_unit
        increase_throttled_by_provisioned_writes_unit = (
            increase_throttled_by_provisioned_writes_unit
            or increase_writes_unit)
        increase_throttled_by_consumed_writes_unit = \
            increase_throttled_by_consumed_writes_unit or increase_writes_unit

        increase_consumed_writes_with = \
            increase_consumed_writes_with or increase_writes_with

        # Initialise variables to store calculated provisioning
        throttled_by_provisioned_calculated_provisioning = scale_reader(
            increase_throttled_by_provisioned_writes_scale,
            throttled_by_provisioned_write_percent)
        throttled_by_consumed_calculated_provisioning = scale_reader(
            increase_throttled_by_consumed_writes_scale,
            throttled_by_consumed_write_percent)
        consumed_calculated_provisioning = scale_reader(
            increase_consumed_writes_scale, consumed_write_units_percent)
        throttled_count_calculated_provisioning = 0
        calculated_provisioning = 0

        # Increase needed due to high throttled to provisioned ratio
        if throttled_by_provisioned_calculated_provisioning:

            if increase_throttled_by_provisioned_writes_unit == 'percent':
                throttled_by_provisioned_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        throttled_by_provisioned_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_by_provisioned_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        throttled_by_provisioned_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high throttled to consumed ratio
        if throttled_by_consumed_calculated_provisioning:

            if increase_throttled_by_consumed_writes_unit == 'percent':
                throttled_by_consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        throttled_by_consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_by_consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        throttled_by_consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high CU consumption
        if consumed_calculated_provisioning:

            if increase_consumed_writes_unit == 'percent':
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        elif (writes_upper_threshold
              and consumed_write_units_percent > writes_upper_threshold
              and not increase_consumed_writes_scale):

            if increase_consumed_writes_unit == 'percent':
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_consumed_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        increase_consumed_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high throttling
        if (throttled_writes_upper_threshold
                and throttled_write_count > throttled_writes_upper_threshold):

            if increase_writes_unit == 'percent':
                throttled_count_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        updated_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_count_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        updated_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Determine which metric requires the most scaling
        if (throttled_by_provisioned_calculated_provisioning >
                calculated_provisioning):
            calculated_provisioning = \
                throttled_by_provisioned_calculated_provisioning
            scale_reason = ("due to throttled events by provisioned "
                            "units threshold being exceeded")
        if (throttled_by_consumed_calculated_provisioning >
                calculated_provisioning):
            calculated_provisioning = \
                throttled_by_consumed_calculated_provisioning
            scale_reason = ("due to throttled events by consumed "
                            "units threshold being exceeded")
        if consumed_calculated_provisioning > calculated_provisioning:
            calculated_provisioning = consumed_calculated_provisioning
            scale_reason = "due to consumed threshold being exceeded"
        if throttled_count_calculated_provisioning > calculated_provisioning:
            calculated_provisioning = throttled_count_calculated_provisioning
            scale_reason = "due to throttled events threshold being exceeded"

        if calculated_provisioning > current_write_units:
            logger.info('{0} - Resetting the number of consecutive '
                        'write checks. Reason: scale up {1}'.format(
                            table_name, scale_reason))
            num_consec_write_checks = 0
            update_needed = True
            updated_write_units = calculated_provisioning

    # Decrease needed due to low CU consumption
    if not update_needed:
        # If local/granular values not specified use global values
        decrease_consumed_writes_unit = \
            decrease_consumed_writes_unit or decrease_writes_unit

        decrease_consumed_writes_with = \
            decrease_consumed_writes_with or decrease_writes_with

        # Initialise variables to store calculated provisioning
        consumed_calculated_provisioning = scale_reader_decrease(
            decrease_consumed_writes_scale, consumed_write_units_percent)
        calculated_provisioning = None

        # Exit if down scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_down_scaling'):
            logger.debug(
                '{0} - Down scaling event detected. No action taken as scaling'
                ' down writes has been disabled in the configuration'.format(
                    table_name))
        # Exit if writes == 0% and downscaling has been disabled at 0%
        elif (consumed_write_units_percent == 0 and not get_table_option(
                key_name, 'allow_scaling_down_writes_on_0_percent')):
            logger.info(
                '{0} - Down scaling event detected. No action taken as scaling'
                ' down writes is not done when usage is at 0%'.format(
                    table_name))
        # Exit if writes are still throttled
        elif (throttled_writes_upper_threshold
              and throttled_write_count > throttled_writes_upper_threshold):
            logger.info(
                '{0} - Down scaling event detected. No action taken as there'
                ' are still throttled writes'.format(table_name))
        else:
            if consumed_calculated_provisioning:
                if decrease_consumed_writes_unit == 'percent':
                    calculated_provisioning = \
                        calculators.decrease_writes_in_percent(
                            updated_write_units,
                            consumed_calculated_provisioning,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
                else:
                    calculated_provisioning = \
                        calculators.decrease_writes_in_units(
                            updated_write_units,
                            consumed_calculated_provisioning,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
            elif (writes_lower_threshold
                  and consumed_write_units_percent < writes_lower_threshold
                  and not decrease_consumed_writes_scale):
                if decrease_consumed_writes_unit == 'percent':
                    calculated_provisioning = \
                        calculators.decrease_writes_in_percent(
                            updated_write_units,
                            decrease_consumed_writes_with,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
                else:
                    calculated_provisioning = \
                        calculators.decrease_writes_in_units(
                            updated_write_units,
                            decrease_consumed_writes_with,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)

            if (calculated_provisioning
                    and current_write_units != calculated_provisioning):
                num_consec_write_checks += 1

                if num_consec_write_checks >= \
                        num_write_checks_before_scale_down:
                    update_needed = True
                    updated_write_units = calculated_provisioning

    # Never go over the configured max provisioning
    if max_provisioned_writes:
        if int(updated_write_units) > int(max_provisioned_writes):
            update_needed = True
            updated_write_units = int(max_provisioned_writes)
            logger.info('Will not increase writes over max-provisioned-writes '
                        'limit ({0} writes)'.format(updated_write_units))

    # Ensure that we have met the min-provisioning
    if min_provisioned_writes:
        if int(min_provisioned_writes) > int(updated_write_units):
            update_needed = True
            updated_write_units = int(min_provisioned_writes)
            logger.info(
                '{0} - Increasing writes to meet min-provisioned-writes '
                'limit ({1} writes)'.format(table_name, updated_write_units))

    if calculators.is_consumed_over_proposed(current_write_units,
                                             updated_write_units,
                                             consumed_write_units_percent):
        update_needed = False
        updated_write_units = current_write_units
        logger.info(
            '{0} - Consumed is over proposed write units. Will leave table at '
            'current setting.'.format(table_name))

    logger.info('{0} - Consecutive write checks {1}/{2}'.format(
        table_name, num_consec_write_checks,
        num_write_checks_before_scale_down))

    return update_needed, updated_write_units, num_consec_write_checks
예제 #42
0
def __ensure_provisioning_writes(
        table_name, key_name, num_consec_write_checks):
    """ Ensure that provisioning of writes is correct

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type key_name: str
    :param key_name: Configuration option key name
    :type num_consec_write_checks: int
    :param num_consec_write_checks: How many consecutive checks have we had
    :returns: (bool, int, int)
        update_needed, updated_write_units, num_consec_write_checks
    """
    if not get_table_option(key_name, 'enable_writes_autoscaling'):
        logger.info(
            '{0} - Autoscaling of writes has been disabled'.format(table_name))
        return False, dynamodb.get_provisioned_table_write_units(table_name), 0

    update_needed = False
    try:
        lookback_window_start = get_table_option(
            key_name, 'lookback_window_start')
        lookback_period = get_table_option(key_name, 'lookback_period')
        current_write_units = dynamodb.get_provisioned_table_write_units(
            table_name)
        consumed_write_units_percent = \
            table_stats.get_consumed_write_units_percent(
                table_name, lookback_window_start, lookback_period)
        throttled_write_count = \
            table_stats.get_throttled_write_event_count(
                table_name, lookback_window_start, lookback_period)
        throttled_by_provisioned_write_percent = \
            table_stats.get_throttled_by_provisioned_write_event_percent(
                table_name, lookback_window_start, lookback_period)
        throttled_by_consumed_write_percent = \
            table_stats.get_throttled_by_consumed_write_percent(
                table_name, lookback_window_start, lookback_period)
        writes_upper_threshold = \
            get_table_option(key_name, 'writes_upper_threshold')
        writes_lower_threshold = \
            get_table_option(key_name, 'writes_lower_threshold')
        throttled_writes_upper_threshold = \
            get_table_option(key_name, 'throttled_writes_upper_threshold')
        increase_writes_unit = \
            get_table_option(key_name, 'increase_writes_unit')
        increase_writes_with = \
            get_table_option(key_name, 'increase_writes_with')
        decrease_writes_unit = \
            get_table_option(key_name, 'decrease_writes_unit')
        decrease_writes_with = \
            get_table_option(key_name, 'decrease_writes_with')
        min_provisioned_writes = \
            get_table_option(key_name, 'min_provisioned_writes')
        max_provisioned_writes = \
            get_table_option(key_name, 'max_provisioned_writes')
        num_write_checks_before_scale_down = \
            get_table_option(key_name, 'num_write_checks_before_scale_down')
        num_write_checks_reset_percent = \
            get_table_option(key_name, 'num_write_checks_reset_percent')
        increase_throttled_by_provisioned_writes_unit = \
            get_table_option(
                key_name, 'increase_throttled_by_provisioned_writes_unit')
        increase_throttled_by_provisioned_writes_scale = \
            get_table_option(
                key_name, 'increase_throttled_by_provisioned_writes_scale')
        increase_throttled_by_consumed_writes_unit = \
            get_table_option(
                key_name, 'increase_throttled_by_consumed_writes_unit')
        increase_throttled_by_consumed_writes_scale = \
            get_table_option(
                key_name, 'increase_throttled_by_consumed_writes_scale')
        increase_consumed_writes_unit = \
            get_table_option(key_name, 'increase_consumed_writes_unit')
        increase_consumed_writes_with = \
            get_table_option(key_name, 'increase_consumed_writes_with')
        increase_consumed_writes_scale = \
            get_table_option(key_name, 'increase_consumed_writes_scale')
        decrease_consumed_writes_unit = \
            get_table_option(key_name, 'decrease_consumed_writes_unit')
        decrease_consumed_writes_with = \
            get_table_option(key_name, 'decrease_consumed_writes_with')
        decrease_consumed_writes_scale = \
            get_table_option(key_name, 'decrease_consumed_writes_scale')
    except JSONResponseError:
        raise
    except BotoServerError:
        raise

    # Set the updated units to the current read unit value
    updated_write_units = current_write_units

    # Reset consecutive write count if num_write_checks_reset_percent
    # is reached
    if num_write_checks_reset_percent:

        if consumed_write_units_percent >= num_write_checks_reset_percent:

            logger.info(
                '{0} - Resetting the number of consecutive '
                'write checks. Reason: Consumed percent {1} is '
                'greater than reset percent: {2}'.format(
                    table_name,
                    consumed_write_units_percent,
                    num_write_checks_reset_percent))

            num_consec_write_checks = 0

    # Exit if up scaling has been disabled
    if not get_table_option(key_name, 'enable_writes_up_scaling'):
        logger.debug(
            '{0} - Up scaling event detected. No action taken as scaling '
            'up writes has been disabled in the configuration'.format(
                table_name))

    else:

        # If local/granular values not specified use global values
        increase_consumed_writes_unit = \
            increase_consumed_writes_unit or increase_writes_unit
        increase_throttled_by_provisioned_writes_unit = (
            increase_throttled_by_provisioned_writes_unit
            or increase_writes_unit)
        increase_throttled_by_consumed_writes_unit = \
            increase_throttled_by_consumed_writes_unit or increase_writes_unit

        increase_consumed_writes_with = \
            increase_consumed_writes_with or increase_writes_with

        # Initialise variables to store calculated provisioning
        throttled_by_provisioned_calculated_provisioning = scale_reader(
            increase_throttled_by_provisioned_writes_scale,
            throttled_by_provisioned_write_percent)
        throttled_by_consumed_calculated_provisioning = scale_reader(
            increase_throttled_by_consumed_writes_scale,
            throttled_by_consumed_write_percent)
        consumed_calculated_provisioning = scale_reader(
            increase_consumed_writes_scale, consumed_write_units_percent)
        throttled_count_calculated_provisioning = 0
        calculated_provisioning = 0

        # Increase needed due to high throttled to provisioned ratio
        if throttled_by_provisioned_calculated_provisioning:

            if increase_throttled_by_provisioned_writes_unit == 'percent':
                throttled_by_provisioned_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        throttled_by_provisioned_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_by_provisioned_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        throttled_by_provisioned_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high throttled to consumed ratio
        if throttled_by_consumed_calculated_provisioning:

            if increase_throttled_by_consumed_writes_unit == 'percent':
                throttled_by_consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        throttled_by_consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_by_consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        throttled_by_consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high CU consumption
        if consumed_calculated_provisioning:

            if increase_consumed_writes_unit == 'percent':
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        consumed_calculated_provisioning,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        elif (writes_upper_threshold
              and consumed_write_units_percent > writes_upper_threshold
              and not increase_consumed_writes_scale):

            if increase_consumed_writes_unit == 'percent':
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        current_write_units,
                        increase_consumed_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                consumed_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        current_write_units,
                        increase_consumed_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Increase needed due to high throttling
        if (throttled_writes_upper_threshold and throttled_write_count >
                throttled_writes_upper_threshold):

            if increase_writes_unit == 'percent':
                throttled_count_calculated_provisioning = \
                    calculators.increase_writes_in_percent(
                        updated_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)
            else:
                throttled_count_calculated_provisioning = \
                    calculators.increase_writes_in_units(
                        updated_write_units,
                        increase_writes_with,
                        get_table_option(key_name, 'max_provisioned_writes'),
                        consumed_write_units_percent,
                        table_name)

        # Determine which metric requires the most scaling
        if (throttled_by_provisioned_calculated_provisioning >
                calculated_provisioning):
            calculated_provisioning = \
                throttled_by_provisioned_calculated_provisioning
            scale_reason = (
                "due to throttled events by provisioned "
                "units threshold being exceeded")
        if (throttled_by_consumed_calculated_provisioning
                > calculated_provisioning):
            calculated_provisioning = \
                throttled_by_consumed_calculated_provisioning
            scale_reason = (
                "due to throttled events by consumed "
                "units threshold being exceeded")
        if consumed_calculated_provisioning > calculated_provisioning:
            calculated_provisioning = consumed_calculated_provisioning
            scale_reason = "due to consumed threshold being exceeded"
        if throttled_count_calculated_provisioning > calculated_provisioning:
            calculated_provisioning = throttled_count_calculated_provisioning
            scale_reason = "due to throttled events threshold being exceeded"

        if calculated_provisioning > current_write_units:
            logger.info(
                '{0} - Resetting the number of consecutive '
                'write checks. Reason: scale up {1}'.format(
                    table_name, scale_reason))
            num_consec_write_checks = 0
            update_needed = True
            updated_write_units = calculated_provisioning

    # Decrease needed due to low CU consumption
    if not update_needed:
        # If local/granular values not specified use global values
        decrease_consumed_writes_unit = \
            decrease_consumed_writes_unit or decrease_writes_unit

        decrease_consumed_writes_with = \
            decrease_consumed_writes_with or decrease_writes_with

        # Initialise variables to store calculated provisioning
        consumed_calculated_provisioning = scale_reader_decrease(
            decrease_consumed_writes_scale,
            consumed_write_units_percent)
        calculated_provisioning = None

        # Exit if down scaling has been disabled
        if not get_table_option(key_name, 'enable_writes_down_scaling'):
            logger.debug(
                '{0} - Down scaling event detected. No action taken as scaling'
                ' down writes has been disabled in the configuration'.format(
                    table_name))
        # Exit if writes == 0% and downscaling has been disabled at 0%
        elif (consumed_write_units_percent == 0 and not
                get_table_option(
                    key_name, 'allow_scaling_down_writes_on_0_percent')):
            logger.info(
                '{0} - Down scaling event detected. No action taken as scaling'
                ' down writes is not done when usage is at 0%'.format(
                    table_name))
        # Exit if writes are still throttled
        elif (throttled_writes_upper_threshold
              and throttled_write_count > throttled_writes_upper_threshold):
            logger.info(
                '{0} - Down scaling event detected. No action taken as there'
                ' are still throttled writes'.format(table_name))
        else:
            if consumed_calculated_provisioning:
                if decrease_consumed_writes_unit == 'percent':
                    calculated_provisioning = \
                        calculators.decrease_writes_in_percent(
                            updated_write_units,
                            consumed_calculated_provisioning,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
                else:
                    calculated_provisioning = \
                        calculators.decrease_writes_in_units(
                            updated_write_units,
                            consumed_calculated_provisioning,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
            elif (writes_lower_threshold
                  and consumed_write_units_percent < writes_lower_threshold
                  and not decrease_consumed_writes_scale):
                if decrease_consumed_writes_unit == 'percent':
                    calculated_provisioning = \
                        calculators.decrease_writes_in_percent(
                            updated_write_units,
                            decrease_consumed_writes_with,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)
                else:
                    calculated_provisioning = \
                        calculators.decrease_writes_in_units(
                            updated_write_units,
                            decrease_consumed_writes_with,
                            get_table_option(
                                key_name, 'min_provisioned_writes'),
                            table_name)

            if (calculated_provisioning and
                    current_write_units != calculated_provisioning):
                num_consec_write_checks += 1

                if num_consec_write_checks >= \
                        num_write_checks_before_scale_down:
                    update_needed = True
                    updated_write_units = calculated_provisioning

    # Never go over the configured max provisioning
    if max_provisioned_writes:
        if int(updated_write_units) > int(max_provisioned_writes):
            update_needed = True
            updated_write_units = int(max_provisioned_writes)
            logger.info(
                'Will not increase writes over max-provisioned-writes '
                'limit ({0} writes)'.format(updated_write_units))

    # Ensure that we have met the min-provisioning
    if min_provisioned_writes:
        if int(min_provisioned_writes) > int(updated_write_units):
            update_needed = True
            updated_write_units = int(min_provisioned_writes)
            logger.info(
                '{0} - Increasing writes to meet min-provisioned-writes '
                'limit ({1} writes)'.format(table_name, updated_write_units))

    if calculators.is_consumed_over_proposed(
            current_write_units,
            updated_write_units,
            consumed_write_units_percent):
        update_needed = False
        updated_write_units = current_write_units
        logger.info(
            '{0} - Consumed is over proposed write units. Will leave table at '
            'current setting.'.format(table_name))

    logger.info('{0} - Consecutive write checks {1}/{2}'.format(
        table_name,
        num_consec_write_checks,
        num_write_checks_before_scale_down))

    return update_needed, updated_write_units, num_consec_write_checks
예제 #43
0
def execute():
    """ Ensure provisioning """
    boto_server_error_retries = 3

    # Ensure provisioning
    for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()):
        try:
            table_num_consec_read_checks = \
                CHECK_STATUS['tables'][table_name]['reads']
        except KeyError:
            table_num_consec_read_checks = 0

        try:
            table_num_consec_write_checks = \
                CHECK_STATUS['tables'][table_name]['writes']
        except KeyError:
            table_num_consec_write_checks = 0

        try:
            # The return var shows how many times the scale-down criteria
            #  has been met. This is coupled with a var in config,
            # "num_intervals_scale_down", to delay the scale-down
            table_num_consec_read_checks, table_num_consec_write_checks = \
                table.ensure_provisioning(
                    table_name,
                    table_key,
                    table_num_consec_read_checks,
                    table_num_consec_write_checks)

            CHECK_STATUS['tables'][table_name] = {
                'reads': table_num_consec_read_checks,
                'writes': table_num_consec_write_checks
            }

            gsi_names = set()
            # Add regexp table names
            for gst_instance in dynamodb.table_gsis(table_name):
                gsi_name = gst_instance[u'IndexName']

                try:
                    gsi_keys = get_table_option(table_key, 'gsis').keys()

                except AttributeError:
                    # Continue if there are not GSIs configured
                    continue

                for gsi_key in gsi_keys:
                    try:
                        if re.match(gsi_key, gsi_name):
                            logger.debug(
                                'Table {0} GSI {1} matches '
                                'GSI config key {2}'.format(
                                    table_name, gsi_name, gsi_key))
                            gsi_names.add((gsi_name, gsi_key))

                    except re.error:
                        logger.error('Invalid regular expression: "{0}"'.format(
                            gsi_key))
                        sys.exit(1)

            for gsi_name, gsi_key in sorted(gsi_names):
                try:
                    gsi_num_consec_read_checks = \
                        CHECK_STATUS['tables'][table_name]['reads']
                except KeyError:
                    gsi_num_consec_read_checks = 0

                try:
                    gsi_num_consec_write_checks = \
                        CHECK_STATUS['tables'][table_name]['writes']
                except KeyError:
                    gsi_num_consec_write_checks = 0

                gsi_num_consec_read_checks, gsi_num_consec_write_checks = \
                    gsi.ensure_provisioning(
                        table_name,
                        table_key,
                        gsi_name,
                        gsi_key,
                        gsi_num_consec_read_checks,
                        gsi_num_consec_write_checks)

                CHECK_STATUS['gsis'][gsi_name] = {
                    'reads': gsi_num_consec_read_checks,
                    'writes': gsi_num_consec_write_checks
                }

        except JSONResponseError as error:
            exception = error.body['__type'].split('#')[1]

            if exception == 'ResourceNotFoundException':
                logger.error('{0} - Table {1} does not exist anymore'.format(
                    table_name,
                    table_name))
                continue

        except BotoServerError as error:
            if boto_server_error_retries > 0:
                logger.error(
                    'Unknown boto error. Status: "{0}". Reason: "{1}"'.format(
                        error.status,
                        error.reason))
                logger.error(
                    'Please bug report if this error persists')
                boto_server_error_retries -= 1
                continue

            else:
                raise

    # Sleep between the checks
    if not get_global_option('run_once'):
        logger.debug('Sleeping {0} seconds until next check'.format(
            get_global_option('check_interval')))
        time.sleep(get_global_option('check_interval'))
예제 #44
0
def update_throughput(table_name, read_units, write_units, key_name):
    """ Update throughput on the DynamoDB table

    :type table_name: str
    :param table_name: Name of the DynamoDB table
    :type read_units: int
    :param read_units: New read unit provisioning
    :type write_units: int
    :param write_units: New write unit provisioning
    :type key_name: str
    :param key_name: Configuration option key name
    """
    table = dynamodb.get_table(table_name)

    # Check that we are in the right time frame
    if get_table_option(key_name, 'maintenance_windows'):
        if not __is_maintenance_window(table_name,
            get_table_option(key_name, 'maintenance_windows')):

            logger.warning(
                '{0} - Current time is outside maintenance window'.format(
                    table_name))
            return
        else:
            logger.info(
                '{0} - Current time is within maintenance window'.format(
                    table_name))

    # Check table status
    if table.status != 'ACTIVE':
        logger.warning(
            '{0} - Not performing throughput changes when table '
            'is in {1} state'.format(table_name, table.status))

    # If this setting is True, we will only scale down when
    # BOTH reads AND writes are low
    if get_table_option(key_name, 'always_decrease_rw_together'):
        if (read_units < table.read_units) or (table.read_units == get_table_option(key_name, 'min_provisioned_reads')):
            if (write_units < table.write_units) or (table.write_units == get_table_option(key_name, 'min_provisioned_writes')):
                logger.info(
                    '{0} - Both reads and writes will be decreased'.format(
                        table_name))

        elif read_units < table.read_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            read_units = table.read_units
        elif write_units < table.write_units:
            logger.info(
                '{0} - Will not decrease reads nor writes, waiting for '
                'both to become low before decrease'.format(table_name))
            write_units = table.write_units

    if read_units == table.read_units and write_units == table.write_units:
        logger.debug('{0} - No need to update provisioning')
        return

    if not get_global_option('dry_run'):
        try:
            table.update_throughput(int(read_units), int(write_units))
            logger.info('Provisioning updated')
        except DynamoDBResponseError as error:
            dynamodb_error = error.body['__type'].rsplit('#', 1)[1]
            if dynamodb_error == 'LimitExceededException':
                logger.warning(
                    '{0} - {1}'.format(table_name, error.body['message']))

                if int(read_units) > table.read_units:
                    logger.info('{0} - Scaling up reads to {1:d}'.format(
                        table_name,
                        int(read_units)))
                    update_throughput(
                        table_name,
                        int(read_units),
                        int(table.write_units),
                        key_name)

                elif int(write_units) > table.write_units:
                    logger.info('{0} - Scaling up writes to {1:d}'.format(
                        table_name,
                        int(write_units)))
                    update_throughput(
                        table_name,
                        int(table.read_units),
                        int(write_units),
                        key_name)

            elif dynamodb_error == 'ValidationException':
                logger.warning('{0} - ValidationException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'ResourceInUseException':
                logger.warning('{0} - ResourceInUseException: {1}'.format(
                    table_name,
                    error.body['message']))

            elif dynamodb_error == 'AccessDeniedException':
                logger.warning('{0} - AccessDeniedException: {1}'.format(
                    table_name,
                    error.body['message']))

            else:
                logger.error(
                    (
                        '{0} - Unhandled exception: {1}: {2}. '
                        'Please file a bug report at '
                        'https://github.com/sebdah/dynamic-dynamodb/issues'
                    ).format(
                    table_name,
                    dynamodb_error,
                    error.body['message']))