def lock_and_update_flag(cls, __ctx, dictData): ''' update data to an existing context :param contextKey: first hash level :param value: second hash level :param dictData: a dictionary of data ( resulting from a memorize() evaluation ) ''' try: with cls.mutex: __ctx=cls.hash[__ctx.contextKey][__ctx.contextValue] if not __ctx.isLocked(): return False asynclog.log(pos='%s.__manage' %(cls.__name__), key=__ctx.contextKey,value=__ctx.contextValue, ctx=__ctx, msg='LOCK IS UP' ) __ctx.setFlag() # update dictRuntime data __ctx.getCacheKeys().update_keys(dictData) asynclog.log(pos='%s.update_and_flag' %(cls.__name__), key=__ctx.contextKey,value=__ctx.contextValue, msg='updated with notification data',ctx=__ctx ) return True except Exception, e: asynclog.logError(pos='%s.update_and_flag' %(cls.__name__), key=__ctx.contextKey,value=__ctx.contextValue, msg='update notification data permanently',err=str(e) ) raise e
def unlock(cls, key, value): ''' check if cache resource is locked ''' try: with cls.mutex: asynclog.log(pos='%s.unlock' %(cls.__name__), key=key,value=value, msg='unlock flag' ) return cls.hash[key][value].unlock() except KeyError, e: # This could happen if entry was removed from cache asynclog.logWarning(pos='%s.unlock' %(cls.__name__), key=key,value=value, msg='unlock flag', err=str(e))
def exists(cls, key, value): ''' Return the object stored for the 2 levels key,value OR None :param contextKey: first level :param value: second level ''' try: if logger.isDebugEnabled(): asynclog.log(pos='%s.exists' %(cls.__name__), key=key,value=value, msg='check existence',ctx=cls.hash[key][value] ) return cls.hash[key][value] except Exception, e: asynclog.logWarning(pos='%s.exists' %(cls.__name__), key=key,value=value, msg='not found !!',err=str(e)) return None
def isFlagged(cls, key, value): ''' Return the object stored for the 2 levels key,value OR None :param contextKey: first level :param value: second level ''' try: __ctx=cls.hash[key][value] asynclog.log(pos='%s.isFlagged' %(cls.__name__), key=key,value=value, msg='Flagged: %s' % (str(__ctx.isFlagged())) ) return __ctx.isFlagged() except Exception, e: asynclog.logWarning(pos='%s.isFlagged' %(cls.__name__), key=key,value=value, msg='not found !!',err=str(e)) return None
def update(cls, key, value, dictData): ''' update data to an existing context :param contextKey: first hash level :param value: second hash level :param dictData: a dictionary of data ( resulting from a memorize() evaluation ) ''' try: with cls.mutex: cls.hash[key][value].getCacheKeys().update_keys_permanently(dictData) asynclog.log(pos='%s.update' %(cls.__name__), key=key,value=value, msg='update notification data permanently' ) except Exception, e: asynclog.logError(pos='%s.update' %(cls.__name__), key=key,value=value, msg='update notification data permanently',err=str(e) ) raise e
def manage_async_break(cls, __ctx): # No break by default ret = False # ASYNCHRONOUS - BREAK ALL - release the current thread execution if __ctx.line.isAsynchronous() and __ctx.line.asyncBlocking(): with cls.mutex: # the standard async flow is cancelled ? if ContextIdentifier.isFlagged(__ctx.contextKey, __ctx.contextValue): asynclog.log( pos='%s.manage_async_break' % (cls.__name__), key=__ctx.contextKey, value=__ctx.contextValue, msg= ' We keep the thread, the notification callback is passed ...', ctx=__ctx) # update context cache with callback cache ContextIdentifier.updateCache(__ctx) # cleanup in this case, as we move to next step ContextIdentifier.pop_ctx(__ctx) # Decrease all asynchronous counters & print value if trace level ContextLock.decreaseAllCounters( '<<ASYNC>> %s.manage_async_break()' % (cls.__name__)) # the processing will continue in the same thread ret = False else: # release lock ContextIdentifier.unlock(__ctx.contextKey, __ctx.contextValue) asynclog.log(pos='%s.manage_async_break' % (cls.__name__), key=__ctx.contextKey, value=__ctx.contextValue, msg='UNLOCK DONE - releasing the thread ...') # Normal async process is confirmed ret = True return ret
def pop_ctx_value(cls, key,value): ''' find a Context() identified by its 2 levels if found, remove from the Context from the hash :param contextKey: first level :param value: second level ''' try: with cls.mutex: asynclog.log(pos='%s.pop_ctx_value' %(cls.__name__), key=key,value=value, msg='') return cls.hash[key].pop(value) except KeyError: if not key in cls.hash: logger.error('Key "%s" not defined, check what happens for the value "%s" in the asynchronous.call_after()' % (key,value)) else: logger.error('Key "%s" exists but value "%s" not found - check asynchronous.call_after() at the add() context call' % (key,value)) return None except Exception,e: asynclog.logError(pos='%s.pop_ctx_value' %(cls.__name__), key=key,value=value, msg='',err=str(e) ) return None
def add_ctx(cls, key, value, clonedContext): ''' Add a Context() instance to the hash of hash :param contextKey: first hash level :param value: second hash level :param contextInstance: the value for the second level of type Context() ''' try: with cls.mutex: if not key in cls.hash: cls.hash[key] = {} # Set a functional lock from callback_before() clonedContext.setLock() cls.hash[key][value] = clonedContext asynclog.log(pos='%s.add_ctx' %(cls.__name__), key=key,value=value, msg='storing context',ctx=clonedContext ) # No ERROR logger trace here, as it is reported one level up except Exception, e: asynclog.logError(pos='%s.add' %(cls.__name__), key=key,value=value, msg='storing context failed',err=str(e),ctx=clonedContext ) raise e
class CallbackHandler(HttpHandler): def __init__(self): pass def __checkAttribute(self, json_data, attribute): try: return json_data.get(attribute) except: logger.error( '[contextCallback] required attribute "%s" was not found in JSON "%s"' % (attribute, json_data)) raise SyntaxError( '[contextCallback] required attribute "%s" was not found in JSON "%s"' % (attribute, json_data)) def __manage(self, data): # This is a async token - with a Json format try: logger.debug( '[%s] Transforming a "%s" data to a Json AsyncContextToken()' % (self.__class__.__name__, data)) json_data = JSONSerializer.toJSON(data) except JSONException, x: raise SyntaxError("%s\nfailed to read json: '''%s'''" % (x, data)) # checking required attributes contextKey = self.__checkAttribute(json_data, 'contextKey') contextValue = self.__checkAttribute(json_data, 'value') # Checking data attribute try: data_dict = dict(json_data.get('data')) asynclog.log(pos='%s.__manage' % (self.__class__.__name__), key=contextKey, value=contextValue, msg='Got data: %s' % (data_dict)) except: logger.error('[%s] the "data" attribute must be a dictionary' % (self.__class__.__name__)) raise SyntaxError( '[%s] the "data" attribute must be a dictionary' % (self.__class__.__name__)) __ctx = ContextIdentifier.exists(contextKey, contextValue) if __ctx: try: # The context has not been released # we update the context with notification data # and flag the context that a callback event occurred if ContextIdentifier.lock_and_update_flag(__ctx, data_dict): return # The processing for the current context is terminated # Creating the AsyncContextToken asynclog.log(pos='%s.__manage' % (self.__class__.__name__), key=contextKey, value=contextValue, msg='LOCK IS DOWN', ctx=__ctx) token = AsyncContextToken(contextKey, contextValue, data_dict) # Adding the token to the main processing queue GrinderQueue.put(token) return except Exception, e: logger.error( '<<ASYNC>>[%s=%s][%s.__manage] context management ,reason: %s' % (contextKey, contextValue, self.__class__.__name__, e)) raise (e)
def call_before(cls, __ctx): # ZERO, increase the number of context waiting (should be clean up in case of expiration) # *** This Object is useful for Validation mode *** # TODO: check this is not useless - as the number of waiting context is in the ContextIdentifier class if __ctx.line.asyncBlocking(): ContextLock.contextIncrease() if logger.isTraceEnabled(): logger.trace( '<<ASYNC>> asynchronous.call_before() - contextIncrease() & lock() - [ContextLock=%d]' % (ContextLock.count())) if not __ctx.line.multipleCaller: # This is the expiration criteria in ContextIdentifier reaper thread # __ctx.line.timeout = int(__ctx.line.timeout) # expiration date in milli seconds (-1 means NO expiration) expirationTime = ((__ctx.line.timeout + time.time()) * 1000 if __ctx.line.timeout != -1 else -1) __ctx.setExpirationTime(expirationTime) # # FIRST, get the context identifier value # 3 cases: # value: ${VAR} # value: xxxx => a fixed literal value # "value" keyword is not defined => get the current value from the contextKey in the running context # # Remember: line is only a definition (Immutable) contextKeyValue = __ctx.line.contextValue if __ctx.line.contextValue else __ctx.line.contextKey __ctx.contextKey = __ctx.line.contextKey __ctx.contextValue = __ctx.getCacheKeys().getValue(contextKeyValue) asynclog.logTrace(pos='%s.call_before' % (cls.__name__), msg='Initializing key/value', key=__ctx.contextKey, value=__ctx.contextValue) if not __ctx.contextValue: asynclog.logError(pos='%s.call_before' % (cls.__name__), key=__ctx.contextKey, value=__ctx.contextValue, msg='\ncache=%s' % (__ctx.getCacheKeys().dictRuntime), err='No value found in cache') raise SyntaxError( '[Asynchronous step] contextKey "%s" must have a value in the context or have a "value","contextValue" defined in the scenation' % (__ctx.contextKey)) asynclog.log(pos='%s.callBefore' % (cls.__name__), key=__ctx.contextKey, value=contextKeyValue, msg='initial async identifiers') # SECOND, send the JSON message to the ContextManager process #--------------------------------------------------------------- # TODO: manage the case there is no context router ! # TODO: manage the case we have only SMPP callback jsonMessage = None if __ctx.line.use_contextManager: try: # client call to the router # Time expressed in milliseconds jsonMessage = '{"contextKey" : "%s", "value" : "%s", "host" : "%s", "port" : "%s", "expirationtime": "%s", "count": "%d"}' % ( __ctx.contextKey, __ctx.contextValue, HTTPServerCallback.getHost(), HTTPServerCallback.getPort(), expirationTime, __ctx.line.callbackCount) Configuration.getrouterContextClient().postJsonMessage( jsonMessage, Configuration.getrouterContextClient().getCreateUri()) logger.debug( 'CREATE - Posting the callback router [message: %s]' % (jsonMessage)) except Exception, e: logger.error( 'CREATE - Error posting message to the contextRouter, stacktrace=%s' % (str(e))) raise Exception( 'CREATE - Error posting message to the contextRouter, stacktrace=%s' % (str(e))) # bet that the call will succeed - so we store the contextKey, value doubleton in ContextIdentifier cache # We bet the oneWay will succeed, so we store the context for the callback asynclog.logInfo(pos='%s.callBefore' % (cls.__name__), key=__ctx.contextKey, value=__ctx.contextValue, msg='Storing a cloned context') with cls.mutex: cloneContext = Context(__ctx.macrosAllScenario) cloneContext.copy(__ctx) asynclog.logTrace( pos='%s.callBefore' % cls.__name__, key=__ctx.contextKey, value=__ctx.contextValue, msg= 'Cloned object address: %s, original address=%s, context=%s' % (hex(id(cloneContext)), hex(id(__ctx)), cloneContext)) try: ContextIdentifier.add_ctx(__ctx.contextKey, __ctx.contextValue, cloneContext) except Exception, e: raise (e)
def process_callback(cls, __ctx, __data): __key, __value = __ctx.contextKey, __ctx.contextValue # Decrease all asynchronous counters & print value if trace level ContextLock.decreaseAllCounters('<<ASYNC>> %s.process_callback()' % (cls.__name__)) # For debugging purpose, uid maintains an unique identifier in the log files MDC.put('uid', '[uid=%s]' % (__ctx.uid)) # Ask to the implementation to process asynchronous data returned asynclog.log(pos='%s.process_callback' % cls.__name__, key=__key, value=__value, msg='resuming with notification data : %s' % (__data)) # # if the step as a tag async with a property timeout_success: True # coming back to the callback method means there was no timeout # so this is an assertion failure # if __ctx.line.isTimeoutSuccess(): reporting.report_step_status( success=False, context=__ctx, cause='Assertion Failure: a timeout was expected', state='AsyncStepAssertTimeoutSuccessKO', synchronous=False) __ctx.endAsyncScenario() return True try: # Load an alternative implementation defined in the callback_implementation tag if __ctx.line.callback_implementation: asynclog.log( pos='%s.process_callback' % cls.__name__, key=__key, value=__value, msg= 'post-processing notification data with implementation "%s"' % (str(__ctx.line.callback_implementation))) class_implementation = __ctx.scenario.loaded_implementations[ str(__ctx.line.callback_implementation)] processedData = class_implementation.process_data(__data) else: # Calling back the implementation to interpret the Mock data processedData = __ctx.getCurrentLine().getImplementation( ).process_data(__data) except (SyntaxError, KeyError), x: asynclog.logError( pos='%s.process_callback' % cls.__name__, key=__key, value=__value, msg= 'SyntaxError/KeyError raised when processing process_data()', err=str(x)) logger.error( '[state=StopAsyncStepFatalError][cause=%s][test=%s][scenario=%s]' % (repr(x), __ctx.line.getTestName(), __ctx.scenario)) reporting.report_step_status( success=False, synchronous=False, context=__ctx, cause="%s\n while processing test '%s' in scenario '%s'" % (x, __ctx.line.getTestName(), __ctx.scenario), state='StopSAsynctepFatalError') raise SyntaxError( "%s\n while processing test '%s' in scenario '%s'" % (x, __ctx.line.getTestName(), __ctx.scenario))
if not isinstance(processedData, dict): asynclog.logError( pos='%s.process_callback' % cls.__name__, key=__key, value=__value, msg= 'The asynchronous processed data must be a dictionary, [data:%s] [type:%s]' % (processedData, type(processedData)), err=str(x)) raise SyntaxError( 'The asynchronous processed data must be a dictionary, [data:%s] [type:%s]' % (processedData, type(processedData))) # Adding the processed data - Before assertion !! asynclog.log(pos='%s.process_callback' % cls.__name__, key=__key, value=__value, msg='Adding the context added data : %s' % processedData) __ctx.cacheKeys.update_cache_temporary(processedData) # ok, if we have a response in the data structure, we can apply assertion on it assertionFailed, cause = __ctx.getCurrentLine().checkResponse( processedData, __ctx, True) if 'deviceId' in __ctx: cause = '[deviceId=%s]%s' % (__ctx['deviceId'], cause) if assertionFailed: reporting.report_step_status(success=False, synchronous=False, context=__ctx, cause=cause, state='AsyncStepAssertKO')
def processScenario(self): ''' Launch a new run for one scenario or awake a sleeping one ''' startNewScenario = True ctx = None dictCallingToken = {} if self.token: # a restored context following a timeout or a callback processing failure if isinstance(self.token, Context): ctx = self.token # Special case : Timeout encountered is considered as a success if ctx.scenarioContinue: startNewScenario = False ctx.scenarioContinue = False MDC.put('uid', '[uid=%s]' % (ctx.uid)) logger.info('[state=ResumeScenarioKO] %s' % (reporting.localization(ctx))) # Async callback context elif isinstance(self.token, ContextToken): asynclog.log(pos='%s.processScenario' % (self.__class__.__name__), key=self.token.getContextKey(), value=self.token.getValue(), msg='Getting a waiting token in a ContextToken') ctx = ContextIdentifier.pop_ctx_value( key=self.token.getContextKey(), value=self.token.getValue()) if ctx: asynclog.log(pos='%s.processScenario' % (self.__class__.__name__), key=self.token.getContextKey(), value=self.token.getValue(), msg='Found a waiting token', ctx=ctx) startNewScenario = False # the awaken and updated context can be processed on following steps abort = asynchronous.process_callback( ctx, self.token.getContextData()) if abort: logger.info( '[state=ResumeAsyncScenarioKO][key="%s"][value="%s"] %s' % (self.token.getContextKey(), self.token.getValue(), reporting.localization(ctx))) return logger.info( '[state=ResumeAsyncScenario][key="%s"][value="%s"] %s' % (self.token.getContextKey(), self.token.getValue(), reporting.localization(ctx))) else: # Waiting mode - we arrive without any context if GrinderQueue.waiting_mode: # add what we get from the environment dictCallingToken = { self.token.getContextKey(): self.token.getValue() } dictCallingToken.update(self.token.getContextData()) logger.info( '[state=StartScenarioFromEvent] waiting_mode - Event token dictCallingToken=%s ' % (dictCallingToken)) else: # We had a token and the token was not found ... not normal ... we must stop this run here # TODO: create an error Test to store the error at the Test object level logger.warn( '[state=ResumeScenarioFailed] [timeout?] Context of type [%s], content: "%s" was not found' % (self.token.getContextKey(), self.token.getValue())) grinder.sleep(10) return # Throughput, thread all mode if not ctx: loggerProxy.start_run(Configuration.shortFileName) ctx = Context(self.macrosCached) # waiting scenario started from external events if dictCallingToken: ctx.cacheKeys.add_dynamic_keys(dictCallingToken) logger.info('[state=StartNewScenarioList] [scenarioList=%s]' % (self.scenarioList)) # Process the scenario list try: self.process(ctx, startNewScenario) except: logger.info( '[state=StopScenarioKO] [scenario=%s] see stacktrace for information' % (ctx.scenario.getName() if ctx else 'NO CONTEXT')) raise finally: MDC.remove('uid')