Esempio n. 1
0
    def _capture_calls(batch_size):
        """Captures delayed calls eligible for processing (based on time).

        The intention of this method is to select delayed calls based on time
        criteria and mark them in DB as being processed so that no other
        threads could process them in parallel.

        :return: A list of delayed calls captured for further processing.
        """
        result = []

        time_filter = utils.utc_now_sec() + datetime.timedelta(seconds=1)

        with db_api.transaction():
            candidates = db_api.get_delayed_calls_to_start(
                time_filter, batch_size)

            for call in candidates:
                # Mark this delayed call has been processed in order to
                # prevent calling from parallel transaction.
                db_call, updated_cnt = db_api.update_delayed_call(
                    id=call.id,
                    values={'processing': True},
                    query_filter={'processing': False})

                # If updated_cnt != 1 then another scheduler
                # has already updated it.
                if updated_cnt == 1:
                    result.append(db_call)

        LOG.debug("Scheduler captured %s delayed calls.", len(result))

        return result
Esempio n. 2
0
    def test_schedule_with_unique_key(self):
        method_args = {'name': 'task', 'id': '321'}

        key = 'my_unique_key'

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            unique_key=key,
            **method_args
        )

        self.assertEqual(1, len(db_api.get_delayed_calls()))

        # Schedule the call for the second time, number of calls
        # must not change due to the same unique key.
        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            unique_key=key,
            **method_args
        )

        calls = db_api.get_delayed_calls()

        self.assertEqual(1, len(calls))

        # Now change 'processing' flag and make sure we can schedule
        # one more call because DB constraint allows it.
        db_api.update_delayed_call(calls[0].id, {'processing': True})

        scheduler.schedule_call(
            None,
            TARGET_METHOD_PATH,
            DELAY,
            unique_key=key,
            **method_args
        )

        self.assertEqual(2, len(db_api.get_delayed_calls()))
Esempio n. 3
0
    def _capture_calls(batch_size):
        """Captures delayed calls eligible for processing (based on time).

        The intention of this method is to select delayed calls based on time
        criteria and mark them in DB as being processed so that no other
        threads could process them in parallel.

        :return: A list of delayed calls captured for further processing.
        """
        result = []

        time_filter = utils.utc_now_sec() + datetime.timedelta(seconds=1)

        with db_api.transaction():
            candidates = db_api.get_delayed_calls_to_start(
                time_filter,
                batch_size
            )

            for call in candidates:
                # Mark this delayed call has been processed in order to
                # prevent calling from parallel transaction.
                db_call, updated_cnt = db_api.update_delayed_call(
                    id=call.id,
                    values={'processing': True},
                    query_filter={'processing': False}
                )

                # If updated_cnt != 1 then another scheduler
                # has already updated it.
                if updated_cnt == 1:
                    result.append(db_call)

        LOG.debug("Scheduler captured %s delayed calls.", len(result))

        return result
Esempio n. 4
0
    def run_delayed_calls(self, ctx=None):
        time_filter = datetime.datetime.now() + datetime.timedelta(
            seconds=1)

        # Wrap delayed calls processing in transaction to
        # guarantee that calls will be processed just once.
        # Do delete query to DB first to force hanging up all
        # parallel transactions.
        # It should work on isolation level 'READ-COMMITTED',
        # 'REPEATABLE-READ' and above.
        #
        # 'REPEATABLE-READ' is by default in MySQL and
        # 'READ-COMMITTED is by default in PostgreSQL.
        delayed_calls = []

        with db_api.transaction():
            candidate_calls = db_api.get_delayed_calls_to_start(
                time_filter
            )
            calls_to_make = []

            for call in candidate_calls:
                # Mark this delayed call has been processed in order to
                # prevent calling from parallel transaction.
                result, number_of_updated = db_api.update_delayed_call(
                    id=call.id,
                    values={'processing': True},
                    query_filter={"processing": False}
                )

                # If number_of_updated != 1 other scheduler already
                # updated.
                if number_of_updated == 1:
                    calls_to_make.append(result)

        for call in calls_to_make:
            LOG.debug('Processing next delayed call: %s', call)

            target_auth_context = copy.deepcopy(call.auth_context)

            if call.factory_method_path:
                factory = importutils.import_class(
                    call.factory_method_path
                )

                target_method = getattr(factory(), call.target_method_name)
            else:
                target_method = importutils.import_class(
                    call.target_method_name
                )

            method_args = copy.deepcopy(call.method_arguments)

            if call.serializers:
                # Deserialize arguments.
                for arg_name, ser_path in call.serializers.items():
                    serializer = importutils.import_class(ser_path)()

                    deserialized = serializer.deserialize(
                        method_args[arg_name]
                    )

                    method_args[arg_name] = deserialized

            delayed_calls.append(
                (target_auth_context, target_method, method_args)
            )

        for (target_auth_context, target_method, method_args) in delayed_calls:
            try:
                # Set the correct context for the method.
                context.set_ctx(
                    context.MistralContext(target_auth_context)
                )

                # Call the method.
                target_method(**method_args)
            except Exception as e:
                LOG.exception(
                    "Delayed call failed, method: %s, exception: %s",
                    target_method,
                    e
                )
            finally:
                # Remove context.
                context.set_ctx(None)

        with db_api.transaction():
            for call in calls_to_make:
                try:
                    # Delete calls that were processed.
                    db_api.delete_delayed_call(call.id)
                except Exception as e:
                    LOG.error(
                        "failed to delete call [call=%s, "
                        "exception=%s]", call, e
                    )
Esempio n. 5
0
    def run_delayed_calls(self, ctx=None):
        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)

        # Wrap delayed calls processing in transaction to guarantee that calls
        # will be processed just once. Do delete query to DB first to force
        # hanging up all parallel transactions.
        # It should work with transactions which run at least 'READ-COMMITTED'
        # mode.
        delayed_calls = []

        with db_api.transaction():
            candidate_calls = db_api.get_delayed_calls_to_start(time_filter)
            calls_to_make = []

            for call in candidate_calls:
                # Mark this delayed call has been processed in order to
                # prevent calling from parallel transaction.
                result, number_of_updated = db_api.update_delayed_call(
                    id=call.id,
                    values={'processing': True},
                    query_filter={'processing': False})

                # If number_of_updated != 1 other scheduler already
                # updated.
                if number_of_updated == 1:
                    calls_to_make.append(result)

        for call in calls_to_make:
            LOG.debug('Processing next delayed call: %s', call)

            target_auth_context = copy.deepcopy(call.auth_context)

            if call.factory_method_path:
                factory = importutils.import_class(call.factory_method_path)

                target_method = getattr(factory(), call.target_method_name)
            else:
                target_method = importutils.import_class(
                    call.target_method_name)

            method_args = copy.deepcopy(call.method_arguments)

            if call.serializers:
                # Deserialize arguments.
                for arg_name, ser_path in call.serializers.items():
                    serializer = importutils.import_class(ser_path)()

                    deserialized = serializer.deserialize(
                        method_args[arg_name])

                    method_args[arg_name] = deserialized

            delayed_calls.append(
                (target_auth_context, target_method, method_args))

        ctx_serializer = context.RpcContextSerializer(
            context.JsonPayloadSerializer())

        for (target_auth_context, target_method, method_args) in delayed_calls:
            try:
                # Set the correct context for the method.
                ctx_serializer.deserialize_context(target_auth_context)

                # Call the method.
                target_method(**method_args)
            except Exception as e:
                LOG.exception("Delayed call failed, method: %s, exception: %s",
                              target_method, e)
            finally:
                # Remove context.
                context.set_ctx(None)

        with db_api.transaction():
            for call in calls_to_make:
                try:
                    # Delete calls that were processed.
                    db_api.delete_delayed_call(call.id)
                except Exception as e:
                    LOG.error(
                        "failed to delete call [call=%s, "
                        "exception=%s]", call, e)
Esempio n. 6
0
    def run_delayed_calls(self, ctx=None):
        time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1)

        # Wrap delayed calls processing in transaction to
        # guarantee that calls will be processed just once.
        # Do delete query to DB first to force hanging up all
        # parallel transactions.
        # It should work on isolation level 'READ-COMMITTED',
        # 'REPEATABLE-READ' and above.
        #
        # 'REPEATABLE-READ' is by default in MySQL and
        # 'READ-COMMITTED is by default in PostgreSQL.
        delayed_calls = []

        with db_api.transaction():
            candidate_calls = db_api.get_delayed_calls_to_start(time_filter)
            calls_to_make = []

            for call in candidate_calls:
                # Mark this delayed call has been processed in order to
                # prevent calling from parallel transaction.
                result, number_of_updated = db_api.update_delayed_call(
                    id=call.id,
                    values={'processing': True},
                    query_filter={"processing": False})

                # If number_of_updated != 1 other scheduler already
                # updated.
                if number_of_updated == 1:
                    calls_to_make.append(result)

        for call in calls_to_make:
            LOG.debug('Processing next delayed call: %s', call)

            target_auth_context = copy.deepcopy(call.auth_context)

            if call.factory_method_path:
                factory = importutils.import_class(call.factory_method_path)

                target_method = getattr(factory(), call.target_method_name)
            else:
                target_method = importutils.import_class(
                    call.target_method_name)

            method_args = copy.deepcopy(call.method_arguments)

            if call.serializers:
                # Deserialize arguments.
                for arg_name, ser_path in call.serializers.items():
                    serializer = importutils.import_class(ser_path)()

                    deserialized = serializer.deserialize(
                        method_args[arg_name])

                    method_args[arg_name] = deserialized

            delayed_calls.append(
                (target_auth_context, target_method, method_args))

        for (target_auth_context, target_method, method_args) in delayed_calls:

            # Transaction is needed here because some of the
            # target_method can use the DB
            with db_api.transaction():
                try:
                    # Set the correct context for the method.
                    context.set_ctx(
                        context.MistralContext(target_auth_context))

                    # Call the method.
                    target_method(**method_args)
                except Exception as e:
                    LOG.error("Delayed call failed [exception=%s]", e)
                finally:
                    # Remove context.
                    context.set_ctx(None)

        with db_api.transaction():
            for call in calls_to_make:
                try:
                    # Delete calls that were processed.
                    db_api.delete_delayed_call(call.id)
                except Exception as e:
                    LOG.error(
                        "failed to delete call [call=%s, "
                        "exception=%s]", call, e)