def test_scheduler_doesnt_handle_calls_the_failed_on_update( self, update_delayed_call): def update_call_failed(id, values, query_filter): self.queue.put("item") return None, 0 update_delayed_call.side_effect = update_call_failed scheduler.schedule_call(None, TARGET_METHOD_PATH, DELAY, **{ 'name': 'task', 'id': '321' }) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.queue.get() eventlet.sleep(1) update_delayed_call.assert_called_with(id=calls[0].id, values=mock.ANY, query_filter=mock.ANY) # If the scheduler does handel calls that failed on update # DBEntityNotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id)
def test_scheduler_doesnt_handle_calls_the_failed_on_update( self, update_delayed_call): def update_call_failed(id, values, query_filter): self.queue.put("item") return None, 0 update_delayed_call.side_effect = update_call_failed scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, **{'name': 'task', 'id': '321'} ) calls = db_api.get_delayed_calls_to_start(get_time_delay()) self.queue.get() eventlet.sleep(1) update_delayed_call.assert_called_with( id=calls[0].id, values=mock.ANY, query_filter=mock.ANY ) # If the scheduler does handel calls that failed on update # DBEntityNotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id)
def test_scheduler_doesnt_handel_calls_the_failed_on_update(self): def stop_thread_groups(): [tg.stop() for tg in self.tgs] self.tgs = [scheduler.setup(), scheduler.setup()] self.addCleanup(stop_thread_groups) method_args = {'name': 'task', 'id': '321'} scheduler.schedule_call( None, TARGET_METHOD_NAME, DELAY, **method_args ) time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2) calls = db_api.get_delayed_calls_to_start(time_filter) eventlet.sleep(WAIT) # If the scheduler does handel calls that failed on update # NotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id)
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): for call in db_api.get_delayed_calls_to_start(time_filter): # Delete this delayed call from DB before the making call in # order to prevent calling from parallel transaction. db_api.delete_delayed_call(call.id) LOG.debug('Processing next delayed call: %s', call) context.set_ctx(context.MistralContext(call.auth_context)) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.copy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append((target_method, method_args)) # TODO(m4dcoder): Troubleshoot deadlocks with PostgreSQL and MySQL. # The queries in the target method such as # mistral.engine.task_handler.run_action can deadlock # with delete_delayed_call. Please keep the scope of the # transaction short. for (target_method, method_args) in delayed_calls: with db_api.transaction(): try: # Call the method. target_method(**method_args) except Exception as e: LOG.debug("Delayed call failed [call=%s, exception=%s]", call, e)
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. with db_api.transaction(): delayed_calls = db_api.get_delayed_calls_to_start(time_filter) for call in delayed_calls: # Delete this delayed call from DB before the making call in # order to prevent calling from parallel transaction. db_api.delete_delayed_call(call.id) LOG.debug('Processing next delayed call: %s', call) context.set_ctx(context.MistralContext(call.auth_context)) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path ) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name ) method_args = copy.copy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name] ) method_args[arg_name] = deserialized try: # Call the method. target_method(**method_args) except Exception as e: LOG.debug( "Delayed call failed [call=%s, exception=%s]", call, e )
def test_scheduler_doesnt_handle_calls_the_failed_on_update(self): method_args = {'name': 'task', 'id': '321'} scheduler.schedule_call(None, FACTORY_METHOD_PATH, DELAY, **method_args) time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2) calls = db_api.get_delayed_calls_to_start(time_filter) eventlet.sleep(WAIT) # If the scheduler does handel calls that failed on update # DBEntityNotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id)
def test_processing_true_does_not_return_in_get_delayed_calls_to_start( self, method): method.side_effect = self.target_method values = { 'factory_method_path': None, 'target_method_name': TARGET_METHOD_PATH, 'execution_time': get_time_delay(), 'auth_context': None, 'serializers': None, 'method_arguments': None, 'processing': True } call = db_api.create_delayed_call(values) calls = db_api.get_delayed_calls_to_start(get_time_delay(10)) self.assertEqual(0, len(calls)) db_api.delete_delayed_call(call.id)
def test_scheduler_doesnt_handle_calls_the_failed_on_update(self): method_args = {'name': 'task', 'id': '321'} scheduler.schedule_call( None, FACTORY_METHOD_PATH, DELAY, **method_args ) time_filter = datetime.datetime.now() + datetime.timedelta(seconds=2) calls = db_api.get_delayed_calls_to_start(time_filter) eventlet.sleep(WAIT) # If the scheduler does handel calls that failed on update # DBEntityNotFoundException will raise. db_api.get_delayed_call(calls[0].id) db_api.delete_delayed_call(calls[0].id)
def delete_calls(db_calls): """Deletes delayed calls. :param db_calls: Delayed calls to delete from DB. """ with db_api.transaction(): for call in db_calls: try: db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "Failed to delete delayed call [call=%s, " "exception=%s]", call, e) # We have to re-raise any exception because the transaction # would be already invalid anyway. If it's a deadlock then # it will be handled. raise e LOG.debug("Scheduler deleted %s delayed calls.", len(db_calls))
def test_processing_true_does_not_return_in_get_delayed_calls_to_start( self, method): execution_time = (datetime.datetime.now() + datetime.timedelta(seconds=DELAY)) values = { 'factory_method_path': None, 'target_method_name': FACTORY_METHOD_PATH, 'execution_time': execution_time, 'auth_context': None, 'serializers': None, 'method_arguments': None, 'processing': True } call = db_api.create_delayed_call(values) time_filter = datetime.datetime.now() + datetime.timedelta(seconds=10) calls = db_api.get_delayed_calls_to_start(time_filter) self.assertEqual(0, len(calls)) db_api.delete_delayed_call(call.id)
def delete_calls(db_calls): """Deletes delayed calls. :param db_calls: Delayed calls to delete from DB. """ with db_api.transaction(): for call in db_calls: try: db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "Failed to delete delayed call [call=%s, " "exception=%s]", call, e ) # We have to re-raise any exception because the transaction # would be already invalid anyway. If it's a deadlock then # it will be handled. raise e LOG.debug("Scheduler deleted %s delayed calls.", len(db_calls))
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta( seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): candidate_calls = db_api.get_delayed_calls_to_start( time_filter ) calls_to_make = [] for call in candidate_calls: # Mark this delayed call has been processed in order to # prevent calling from parallel transaction. result, number_of_updated = db_api.update_delayed_call( id=call.id, values={'processing': True}, query_filter={"processing": False} ) # If number_of_updated != 1 other scheduler already # updated. if number_of_updated == 1: calls_to_make.append(result) for call in calls_to_make: LOG.debug('Processing next delayed call: %s', call) target_auth_context = copy.deepcopy(call.auth_context) if call.factory_method_path: factory = importutils.import_class( call.factory_method_path ) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name ) method_args = copy.deepcopy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name] ) method_args[arg_name] = deserialized delayed_calls.append( (target_auth_context, target_method, method_args) ) for (target_auth_context, target_method, method_args) in delayed_calls: try: # Set the correct context for the method. context.set_ctx( context.MistralContext(target_auth_context) ) # Call the method. target_method(**method_args) except Exception as e: LOG.exception( "Delayed call failed, method: %s, exception: %s", target_method, e ) finally: # Remove context. context.set_ctx(None) with db_api.transaction(): for call in calls_to_make: try: # Delete calls that were processed. db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "failed to delete call [call=%s, " "exception=%s]", call, e )
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to guarantee that calls # will be processed just once. Do delete query to DB first to force # hanging up all parallel transactions. # It should work with transactions which run at least 'READ-COMMITTED' # mode. delayed_calls = [] with db_api.transaction(): candidate_calls = db_api.get_delayed_calls_to_start(time_filter) calls_to_make = [] for call in candidate_calls: # Mark this delayed call has been processed in order to # prevent calling from parallel transaction. result, number_of_updated = db_api.update_delayed_call( id=call.id, values={'processing': True}, query_filter={'processing': False}) # If number_of_updated != 1 other scheduler already # updated. if number_of_updated == 1: calls_to_make.append(result) for call in calls_to_make: LOG.debug('Processing next delayed call: %s', call) target_auth_context = copy.deepcopy(call.auth_context) if call.factory_method_path: factory = importutils.import_class(call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.deepcopy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append( (target_auth_context, target_method, method_args)) ctx_serializer = context.RpcContextSerializer( context.JsonPayloadSerializer()) for (target_auth_context, target_method, method_args) in delayed_calls: try: # Set the correct context for the method. ctx_serializer.deserialize_context(target_auth_context) # Call the method. target_method(**method_args) except Exception as e: LOG.exception("Delayed call failed, method: %s, exception: %s", target_method, e) finally: # Remove context. context.set_ctx(None) with db_api.transaction(): for call in calls_to_make: try: # Delete calls that were processed. db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "failed to delete call [call=%s, " "exception=%s]", call, e)
def run_delayed_calls(self, ctx=None): time_filter = datetime.datetime.now() + datetime.timedelta(seconds=1) # Wrap delayed calls processing in transaction to # guarantee that calls will be processed just once. # Do delete query to DB first to force hanging up all # parallel transactions. # It should work on isolation level 'READ-COMMITTED', # 'REPEATABLE-READ' and above. # # 'REPEATABLE-READ' is by default in MySQL and # 'READ-COMMITTED is by default in PostgreSQL. delayed_calls = [] with db_api.transaction(): candidate_calls = db_api.get_delayed_calls_to_start(time_filter) calls_to_make = [] for call in candidate_calls: # Mark this delayed call has been processed in order to # prevent calling from parallel transaction. result, number_of_updated = db_api.update_delayed_call( id=call.id, values={'processing': True}, query_filter={"processing": False}) # If number_of_updated != 1 other scheduler already # updated. if number_of_updated == 1: calls_to_make.append(result) for call in calls_to_make: LOG.debug('Processing next delayed call: %s', call) target_auth_context = copy.deepcopy(call.auth_context) if call.factory_method_path: factory = importutils.import_class(call.factory_method_path) target_method = getattr(factory(), call.target_method_name) else: target_method = importutils.import_class( call.target_method_name) method_args = copy.deepcopy(call.method_arguments) if call.serializers: # Deserialize arguments. for arg_name, ser_path in call.serializers.items(): serializer = importutils.import_class(ser_path)() deserialized = serializer.deserialize( method_args[arg_name]) method_args[arg_name] = deserialized delayed_calls.append( (target_auth_context, target_method, method_args)) for (target_auth_context, target_method, method_args) in delayed_calls: # Transaction is needed here because some of the # target_method can use the DB with db_api.transaction(): try: # Set the correct context for the method. context.set_ctx( context.MistralContext(target_auth_context)) # Call the method. target_method(**method_args) except Exception as e: LOG.error("Delayed call failed [exception=%s]", e) finally: # Remove context. context.set_ctx(None) with db_api.transaction(): for call in calls_to_make: try: # Delete calls that were processed. db_api.delete_delayed_call(call.id) except Exception as e: LOG.error( "failed to delete call [call=%s, " "exception=%s]", call, e)