Пример #1
0
    def __init__(self, name, rt, wt, hash_key, range_key, status='CREATING'):
        """
        Create a new ``Table``. When manually creating a table, make sure you
        registered it in :py:class:`ddbmock.database.db.DynamoDB` with a something
        like ``dynamodb.data[name] = Table(name, "...")``.

        Even though there are :py:const:`DELAY_CREATING` seconds before the status
        is updated to ``ACTIVE``, the table is immediately available. This is a
        slight difference with real DynamooDB to ease unit and functionnal tests.

        :param name: Valid table name. No further checks are performed.
        :param rt: Provisioned read throughput.
        :param wt: Provisioned write throughput.
        :param hash_key: :py:class:`ddbmock.database.key.Key` instance describe the ``hash_key``
        :param hash_key: :py:class:`ddbmock.database.key.Key` instance describe the ``range_key`` or ``None`` if table has no ``range_key``
        :param status: (optional) Valid initial table status. If Table needd to be avaible immediately, use ``ACTIVE``, otherwise, leave default value.

        .. note:: ``rt`` and ``wt`` are only used by ``DescribeTable`` and ``UpdateTable``. No throttling is nor will ever be done.
        """
        self.name = name
        self.rt = rt
        self.wt = wt
        self.hash_key = hash_key
        self.range_key = range_key
        self.status = status

        self.store = Store(name)
        self.write_lock = Lock()

        self.creation_time = time.time()
        self.last_increase_time = 0
        self.last_decrease_time = 0
        self.count = 0

        schedule_action(config.DELAY_CREATING, self.activate)
Пример #2
0
    def __init__(self, name, rt, wt, hash_key, range_key, status='CREATING'):
        """
        Create a new ``Table``. When manually creating a table, make sure you
        registered it in :py:class:`ddbmock.database.db.DynamoDB` with a something
        like ``dynamodb.data[name] = Table(name, "...")``.

        Even though there are :py:const:`DELAY_CREATING` seconds before the status
        is updated to ``ACTIVE``, the table is immediately available. This is a
        slight difference with real DynamooDB to ease unit and functionnal tests.

        :param name: Valid table name. No further checks are performed.
        :param rt: Provisioned read throughput.
        :param wt: Provisioned write throughput.
        :param hash_key: :py:class:`ddbmock.database.key.Key` instance describe the ``hash_key``
        :param hash_key: :py:class:`ddbmock.database.key.Key` instance describe the ``range_key`` or ``None`` if table has no ``range_key``
        :param status: (optional) Valid initial table status. If Table needd to be avaible immediately, use ``ACTIVE``, otherwise, leave default value.

        .. note:: ``rt`` and ``wt`` are only used by ``DescribeTable`` and ``UpdateTable``. No throttling is nor will ever be done.
        """
        self.name = name
        self.rt = rt
        self.wt = wt
        self.hash_key = hash_key
        self.range_key = range_key
        self.status = status

        self.store = Store(name)
        self.write_lock = Lock()

        self.creation_time = time.time()
        self.last_increase_time = 0
        self.last_decrease_time = 0
        self.count = 0

        schedule_action(config.DELAY_CREATING, self.activate)
Пример #3
0
    def test_schedule_action_disable(self, m_timer):
        from ddbmock.utils import schedule_action
        from ddbmock import config

        old_delay_param = config.ENABLE_DELAYS
        config.ENABLE_DELAYS = False

        m_cb = mock.Mock()

        schedule_action(DELAY, m_cb, ["args"], {'kwargs': 'kwargs'})
        m_cb.assert_called_with("args", kwargs='kwargs')

        config.ENABLE_DELAYS = old_delay_param
Пример #4
0
    def test_schedule_action_disable(self, m_timer):
        from ddbmock.utils import schedule_action
        from ddbmock import config

        old_delay_param = config.ENABLE_DELAYS
        config.ENABLE_DELAYS = False

        m_cb = mock.Mock()

        schedule_action(DELAY, m_cb, ["args"], {'kwargs':'kwargs'})
        m_cb.assert_called_with("args", kwargs='kwargs')

        config.ENABLE_DELAYS = old_delay_param
Пример #5
0
    def test_schedule_action_enable(self, m_timer):
        from ddbmock.utils import schedule_action
        from ddbmock import config

        old_delay_param = config.ENABLE_DELAYS
        config.ENABLE_DELAYS = True

        m_cb = mock.Mock()

        schedule_action(DELAY, m_cb, ["args"], {'kwargs': 'kwargs'})
        m_timer.assert_called_with(DELAY, m_cb, ["args"], {'kwargs': 'kwargs'})
        m_timer.return_value.start.assert_called_with()

        config.ENABLE_DELAYS = old_delay_param
Пример #6
0
    def test_schedule_action_enable(self, m_timer):
        from ddbmock.utils import schedule_action
        from ddbmock import config

        old_delay_param = config.ENABLE_DELAYS
        config.ENABLE_DELAYS = True

        m_cb = mock.Mock()

        schedule_action(DELAY, m_cb, ["args"], {'kwargs':'kwargs'})
        m_timer.assert_called_with(DELAY, m_cb, ["args"], {'kwargs':'kwargs'})
        m_timer.return_value.start.assert_called_with()

        config.ENABLE_DELAYS = old_delay_param
Пример #7
0
    def update_throughput(self, rt, wt):
        """
        Update table throughput. Same conditions and limitations as real DynamoDB
        applies:

        - No more that 1 decrease operation per UTC day.
        - No more than doubling throughput at once.
        - Table must be in ``ACTIVE`` state.

        Table status is then set to ``UPDATING`` until :py:const:`DELAY_UPDATING`
        delay is over. Like real DynamoDB, the Table can still be used during
        this period

        :param rt: New read throughput
        :param wt: New write throughput

        :raises: :py:exc:`ddbmock.errors.ResourceInUseException` if table was not in ``ACTIVE`` state
        :raises: :py:exc:`ddbmock.errors.LimitExceededException` if the other above conditions are not met.

        """
        if self.status != "ACTIVE":
            raise ResourceInUseException("Table {} is in {} state. Can not UPDATE.".format(self.name, self.status))

        # is decrease ?
        if self.rt > rt or self.wt > wt:
            current_time = time.time()
            current_date = datetime.date.fromtimestamp(current_time)
            last_decrease = datetime.date.fromtimestamp(self.last_decrease_time)
            if (current_date - last_decrease).days < config.MIN_TP_DEC_INTERVAL:
                last = datetime.datetime.fromtimestamp(self.last_decrease_time)
                current = datetime.datetime.fromtimestamp(current_time)
                raise LimitExceededException("Subscriber limit exceeded: Provisioned throughput can be decreased only once within the {} day. Last decrease time: Tuesday, {}. Request time: {}".format(config.MIN_TP_DEC_INTERVAL, last, current))
            self.last_decrease_time = current_time

        # is increase ?
        if self.rt < rt or self.wt < wt:
            if (rt - self.rt)/float(self.rt)*100 > config.MAX_TP_INC_CHANGE:
                raise LimitExceededException('Requested provisioned throughput change is not allowed. The ReadCapacityUnits change must be at most {} percent of current value. Current ReadCapacityUnits provisioned for the table: {}. Requested ReadCapacityUnits: {}.'.format(config.MAX_TP_INC_CHANGE, self.rt, rt))
            if (wt - self.wt)/float(self.wt)*100 > config.MAX_TP_INC_CHANGE:
                raise LimitExceededException('Requested provisioned throughput change is not allowed. The WriteCapacityUnits change must be at most {} percent of current value. Current WriteCapacityUnits provisioned for the table: {}. Requested WriteCapacityUnits: {}.'.format(config.MAX_TP_INC_CHANGE, self.wt, wt))
            self.last_increase_time = time.time()

        # real work
        self.status = "UPDATING"

        self.rt = rt
        self.wt = wt

        schedule_action(config.DELAY_UPDATING, self.activate)
Пример #8
0
    def delete_table(self, name):
        """
        Triggers internal "realistic" table deletion. This implies changing
        the status to ``DELETING``. Once :py:const:ddbmock.config.DELAY_DELETING
        has expired :py:meth:_internal_delete_table is called and the table
        is de-referenced from :py:attr:data.

        Since :py:attr:data only holds a reference, the table object might still
        exist at that moment and possibly still handle pending requests. This also
        allows to safely return a handle to the table object.

        :param name: Valid table name.

        :return: A reference to :py:class:`ddbmock.database.table.Table` named ``name``
        """
        if name not in self.data:
            raise ResourceNotFoundException("Table {} does not exist".format(name))

        table = self.data[name]
        table.delete()
        schedule_action(config.DELAY_DELETING, self._internal_delete_table, [table])

        return table
Пример #9
0
    def update_throughput(self, rt, wt):
        """
        Update table throughput. Same conditions and limitations as real DynamoDB
        applies:

        - No more that 1 decrease operation per UTC day.
        - No more than doubling throughput at once.
        - Table must be in ``ACTIVE`` state.

        Table status is then set to ``UPDATING`` until :py:const:`DELAY_UPDATING`
        delay is over. Like real DynamoDB, the Table can still be used during
        this period

        :param rt: New read throughput
        :param wt: New write throughput

        :raises: :py:exc:`ddbmock.errors.ResourceInUseException` if table was not in ``ACTIVE`` state
        :raises: :py:exc:`ddbmock.errors.LimitExceededException` if the other above conditions are not met.

        """
        if self.status != "ACTIVE":
            raise ResourceInUseException(
                "Table {} is in {} state. Can not UPDATE.".format(
                    self.name, self.status))

        # is decrease ?
        if self.rt > rt or self.wt > wt:
            current_time = time.time()
            current_date = datetime.date.fromtimestamp(current_time)
            last_decrease = datetime.date.fromtimestamp(
                self.last_decrease_time)
            if (current_date -
                    last_decrease).days < config.MIN_TP_DEC_INTERVAL:
                last = datetime.datetime.fromtimestamp(self.last_decrease_time)
                current = datetime.datetime.fromtimestamp(current_time)
                raise LimitExceededException(
                    "Subscriber limit exceeded: Provisioned throughput can be decreased only once within the {} day. Last decrease time: Tuesday, {}. Request time: {}"
                    .format(config.MIN_TP_DEC_INTERVAL, last, current))
            self.last_decrease_time = current_time

        # is increase ?
        if self.rt < rt or self.wt < wt:
            if (rt - self.rt) / float(
                    self.rt) * 100 > config.MAX_TP_INC_CHANGE:
                raise LimitExceededException(
                    'Requested provisioned throughput change is not allowed. The ReadCapacityUnits change must be at most {} percent of current value. Current ReadCapacityUnits provisioned for the table: {}. Requested ReadCapacityUnits: {}.'
                    .format(config.MAX_TP_INC_CHANGE, self.rt, rt))
            if (wt - self.wt) / float(
                    self.wt) * 100 > config.MAX_TP_INC_CHANGE:
                raise LimitExceededException(
                    'Requested provisioned throughput change is not allowed. The WriteCapacityUnits change must be at most {} percent of current value. Current WriteCapacityUnits provisioned for the table: {}. Requested WriteCapacityUnits: {}.'
                    .format(config.MAX_TP_INC_CHANGE, self.wt, wt))
            self.last_increase_time = time.time()

        # real work
        self.status = "UPDATING"

        self.rt = rt
        self.wt = wt

        schedule_action(config.DELAY_UPDATING, self.activate)