示例#1
0
def bye():
    print ''
    print '.' * 50
    print 'bye() - Stopping everything properly ...'
    logger.info('bye() - Stopping everything properly ...')
    if Configuration. async:
        logger.info("Stopping the Async Http server ...")
        print 'Stopping the Async Http server ... (Graceful period of %d milliseconds)' % (
            Configuration.http_graceful_period)
        Thread.sleep(Configuration.http_graceful_period)
        HTTPServerCallback.stop()

    if Configuration.smpp_started:
        if Configuration.getSmscDriver():
            logger.info('Stopping the SMSCDriver (wait %d seconds) ...' %
                        (Configuration.smpp_graceful_period))
            print('Stopping the SMSCDriver (wait %d seconds) ...' %
                  (Configuration.smpp_graceful_period))
            time.sleep(Configuration.smpp_graceful_period)
            Configuration.getSmscDriver().stop()

    # If the contectIndentifier has been initialized then stop the reaper
    logger.info("Stopping the ContextIdentifier Thread")
    ContextIdentifier.stop()

    # Reset TPS at the end
    if Configuration.use_reporter:
        Configuration.getClientReporter().setTPS(0)

    logger.info('bye() - END - All is terminated')
    print('bye() - END - All is terminated')
示例#2
0
    def __synchronize(cls):
        '''
          Meeting point for all threads for ramping up mode and validation mode
        '''
        if grinder.threadNumber == 0:
            logger.info('Thread meeting point begin before ramp up() ...')

        cls.cv.acquire()
        cls.thread_count += 1
        cls.cv.release()

        if cls.thread_count == Configuration.numberOfThreads:
            logger.info('All threads are initialized - rampup can start')
            cls.event.set()

        # During the wait of other threads, initialize SMSC
        if grinder.threadNumber == 0:
            Configuration.waitAfterSMSC()

        # wait here until we get an event.set()
        cls.event.wait()

        # The thread 0 is responsible to start the Throughput threads
        if grinder.threadNumber == 0:

            if Configuration.getMetronom():
                logger.info('>>> STARTING metronom !')
                Configuration.getMetronom().start()

        if grinder.threadNumber == 0:
            logger.info('Thread meeting terminated() ...')
示例#3
0
    def __init__(self):

        # A token is an event and a content
        self.token = None

        # Initialize macro & create a local copy
        self.macrosCached = CachedMacros().copyMacros()

        # immutable scenario list
        self.scenarioList = Configuration.getScenarioList()

        # Memorization of previous calls
        # memorization is a transient state - all results are stored in session Context()
        self.memoryManager = MemoryMgr(Configuration.dataFilePath)

        # Test executed initializations
        self.testImpl = {}
        self.__createTest()

        # Delay the reporting to console
        grinder.statistics.delayReports = 1

        # monkey patching main acting thread
        self.__call__ = self.__rampup_call__

        # This is
        if Configuration.thread_control_enable:
            self.__call__ = self.__controlled_rampup_call__

        # monkey patching
        if Configuration.pureThreadMode and not Configuration.waiting_mode:
            if Configuration. async:
                # PERFORMANCE THREAD MODE with asynchronous flows
                # This is not possible to work in that mode ...
                logger.error(
                    'Not supported use case: thread mode with asynchronous flows. Use Throughput mode instead.'
                )
                raise NotImplementedError(
                    'Not supported use case: thread mode with asynchronous flows. Use Throughput mode instead.'
                )

            self.__call__ = self.__direct_call__

        # Synchronize all threads for the throughput mode to avoid peaks effect
        if Configuration.use_throughput:
            self.__class__.__synchronize()

        if Configuration.use_reporter:
            # Force token_lag to 0 as this is a new test
            Configuration.getClientReporter().setTime1(
                System.currentTimeMillis(), 'token_lag')
            # Reset TPS to zero
            Configuration.getClientReporter().setTPS(0)

            # Again optimization with monkey patching -:(
            self.__lanch_scenario__ = self.__lanch_scenario_with_reporter__
示例#4
0
 def sendData(self, **args):
     if Configuration.use_reporter:
         Configuration.getClientReporter().setTime1(
             str(time.time()).split('.')[0], 'reporting_is_the_key')
         try:
             Configuration.getClientReporter().customMethod(
                 'sending custom probe!')
         except AttributeError, e:
             logger.warn(
                 '%s Reporter %s Error[%s]' %
                 (self.__class__.__name__,
                  Configuration.getClientReporter().__class__.__name__, e))
示例#5
0
 def _busyManagement(self, response):
     """
       flow regulation based on 503 errorCode
     :param response:
     """
     # Busy management
     if response['errorCode'] == 503:
         if Configuration.use_regulator:
             Configuration.getMonitor().setBusyCount()
         else:
             logger.info(
                 'Got a Http-503 server busy response, printing context:\n\'\'\'%s\'\'\''
                 % response)
示例#6
0
    def report_step_status(cls, **kargs):

        success = kargs['success']
        __ctx = kargs['context']
        cause = kargs['cause']
        state = kargs['state']
        synchronous = kargs.get('synchronous', True)

        # Validation reporting
        response = kargs.get('response', None)
        if response and Configuration.outTraceActivated:
            reporting.outputTraceForValidation(__ctx, response)

        status = 'success'

        if Configuration.use_reporter:
            Configuration.getClientReporter().addNbCallCount(
                __ctx.line.getTestName())

        if success:
            logger.info('[state=%s][status=%s][cause=%s]%s' %
                        (state, status, cause, cls.localization(__ctx)))
            if synchronous:
                logger.trace('grinder.statistics.forLastTest.success = 1')
                grinder.statistics.forLastTest.success = 1
                grinder.getStatistics().report()

        else:
            status = 'failed'
            logger.error('[state=%s][status=%s][cause=%s]%s' %
                         (state, status, cause, cls.localization(__ctx)))
            if Configuration.use_reporter:
                Configuration.getClientReporter().addNbCallErrorCount(
                    __ctx.line.getTestName())

            if synchronous:
                try:
                    logger.trace('grinder.statistics.forLastTest.success = 0')
                    grinder.statistics.forLastTest.success = 0
                    grinder.getStatistics().report()
                except InvalidContextException, x:
                    logger.info(
                        'raise an InvalidContextException, meaning we are not in a post-processing of a grinder Test, reason: %s'
                        % (x))
示例#7
0
    def call_after(cls, __ctx, response, jsonMsg):

        if 'errorCode' not in response:
            logger.error(
                '%s.call_after - "errorCode" was not found in your response, this is required in your implementation !'
                % (cls.__name__))
            raise SyntaxError(
                '%s.call_after - "errorCode" was not found in your response, this is required in your implementation !'
                % (cls.__name__))

        errorCode = int(response['errorCode'])

        # Optimistic lock bet failed : we correct this
        if errorCode not in (200, 0):
            ContextIdentifier.pop_ctx(__ctx)

        # context manager is for protocols like http (not for smpp)
        if __ctx.line.use_contextManager:
            # Case we were too optimistic and we have got an error during the synchronous HTTP call
            if errorCode not in (
                    200, 0):  #TODO decorelate totally errorCode & http.status
                try:
                    Configuration.getrouterContextClient().postJsonMessage(
                        jsonMsg,
                        Configuration.getrouterContextClient().getDeleteUri())
                    logger.info(
                        'DELETE - Posting the callback router [message: %s]' %
                        (jsonMsg))
                except Exception, e:
                    logger.error(
                        'DELETE - Error posting message to the contextRouter, stacktrace=%s'
                        % (str(e)))
                    raise Exception(
                        'DELETE - Error posting message to the contextRouter, stacktrace=%s'
                        % (str(e)))
                finally:
示例#8
0
    def __lanch_scenario_with_reporter__(self):

        # When a token is added in the Grinder token queue, it is marked with its arrival time
        # So the statistics here gives the lag before being consumed by a thread
        if self.token:
            Configuration.getClientReporter().setTime1(
                self.token.getTimestamp(), 'token_lag')

        Configuration.sessionIncrement()

        # Here, begins really the Yaml scenario execution
        try:
            self.processScenario()
        finally:
            Configuration.sessionDecrement()
示例#9
0
    def __init__(self, macrosAllScenario):

        # =========== IMMUTABLE attributes =================
        # we reverse because we will use a pop() to traverse the scenario
        self.scenarioList = Configuration.getScenarioList()
        self.scenarioListSize = len(self.scenarioList)
        self.macrosAllScenario = macrosAllScenario
        #
        self.templateManager = Configuration.cmdMgr

        # =========== MUTABLE attributes =================
        self.uid = None
        self.macros = None
        # expiration time = infinite by default
        self.expirationTime = -1
        self.indexScenario = -1
        self.scenario = None
        self.line = None
        self.indexLine = -1
        self.scenarioSize = 0

        # For async hardening
        self.locked = False
        self.flagged = False
        self.contextKey = None
        self.contextValue = None

        # A flag to indicates that the scenario must be continued on resume
        # Bug in Rev373 - the default value was True
        self.scenarioContinue = False

        # The initial context is the "meta" definition of the context
        self.__initial_context = None

        # This is the living context with substitution
        self.stored = None

        # A thread context cache
        self.cacheKeys = CacheKey()
示例#10
0
"""
   dummy implementation allows executing dummy payload and validate input/templates processing
"""

import random
import time
from net.grinder.script.Grinder import grinder

from dummy import dummy
from corelibs.coreGrinder import CoreGrinder
from corelibs.configuration import Configuration

properties = CoreGrinder.getProperty()
logger = CoreGrinder.getLogger()

reporter = Configuration.getClientReporter()


class report(dummy):
    def __init__(self, _dataFilePath, _templateFilePath):
        dummy.__init__(self, _dataFilePath, _templateFilePath)

    def version(self):
        ''' header string inserted by MKS - parse the second field (file name) and the third field (version)
        and concatenate the two together to create a version string
        return the release string'''
        setVersion = '$Header: dummy.grindertool 1.3 2011/06/15 15:52:15CEST omerlin Exp  $'.split(
        )[1:3]
        return setVersion[0].split('.')[0] + ' <' + setVersion[1] + '>'

    def sendData(self, **args):
示例#11
0
    def call_before(cls, __ctx):

        # ZERO, increase the number of context waiting (should be clean up in case of expiration)
        # *** This Object is useful for Validation mode ***
        # TODO: check this is not useless - as the number of waiting context is in the ContextIdentifier class
        if __ctx.line.asyncBlocking():
            ContextLock.contextIncrease()

            if logger.isTraceEnabled():
                logger.trace(
                    '<<ASYNC>> asynchronous.call_before() - contextIncrease() & lock() - [ContextLock=%d]'
                    % (ContextLock.count()))

        if not __ctx.line.multipleCaller:

            # This is the expiration criteria in ContextIdentifier reaper thread
            #
            __ctx.line.timeout = int(__ctx.line.timeout)
            # expiration date in milli seconds (-1 means NO expiration)
            expirationTime = ((__ctx.line.timeout + time.time()) *
                              1000 if __ctx.line.timeout != -1 else -1)
            __ctx.setExpirationTime(expirationTime)

            #
            # FIRST, get the context identifier value
            #  3 cases:
            #    value: ${VAR}
            #    value: xxxx                     => a fixed literal value
            #    "value" keyword is not defined  => get the current value from the contextKey in the running context
            #

            # Remember: line is only a definition (Immutable)
            contextKeyValue = __ctx.line.contextValue if __ctx.line.contextValue else __ctx.line.contextKey
            __ctx.contextKey = __ctx.line.contextKey
            __ctx.contextValue = __ctx.getCacheKeys().getValue(contextKeyValue)

            asynclog.logTrace(pos='%s.call_before' % (cls.__name__),
                              msg='Initializing key/value',
                              key=__ctx.contextKey,
                              value=__ctx.contextValue)

            if not __ctx.contextValue:
                asynclog.logError(pos='%s.call_before' % (cls.__name__),
                                  key=__ctx.contextKey,
                                  value=__ctx.contextValue,
                                  msg='\ncache=%s' %
                                  (__ctx.getCacheKeys().dictRuntime),
                                  err='No value found in cache')
                raise SyntaxError(
                    '[Asynchronous step] contextKey "%s" must have a value in the context or have a "value","contextValue" defined in the scenation'
                    % (__ctx.contextKey))

            asynclog.log(pos='%s.callBefore' % (cls.__name__),
                         key=__ctx.contextKey,
                         value=contextKeyValue,
                         msg='initial async identifiers')

            # SECOND, send the JSON message to the ContextManager process
            #---------------------------------------------------------------
            # TODO: manage the case there is no context router !
            # TODO: manage the case we have only SMPP callback
            jsonMessage = None
            if __ctx.line.use_contextManager:
                try:
                    # client call to the router
                    # Time expressed in milliseconds
                    jsonMessage = '{"contextKey" : "%s", "value" : "%s", "host" : "%s", "port" : "%s", "expirationtime": "%s", "count": "%d"}' % (
                        __ctx.contextKey, __ctx.contextValue,
                        HTTPServerCallback.getHost(),
                        HTTPServerCallback.getPort(), expirationTime,
                        __ctx.line.callbackCount)
                    Configuration.getrouterContextClient().postJsonMessage(
                        jsonMessage,
                        Configuration.getrouterContextClient().getCreateUri())
                    logger.debug(
                        'CREATE - Posting the callback router [message: %s]' %
                        (jsonMessage))
                except Exception, e:
                    logger.error(
                        'CREATE - Error posting message to the contextRouter, stacktrace=%s'
                        % (str(e)))
                    raise Exception(
                        'CREATE - Error posting message to the contextRouter, stacktrace=%s'
                        % (str(e)))

            # bet that the call will succeed - so we store the contextKey, value doubleton in ContextIdentifier cache
            # We bet the oneWay will succeed, so we store the context for the callback
            asynclog.logInfo(pos='%s.callBefore' % (cls.__name__),
                             key=__ctx.contextKey,
                             value=__ctx.contextValue,
                             msg='Storing a cloned context')

            with cls.mutex:
                cloneContext = Context(__ctx.macrosAllScenario)
                cloneContext.copy(__ctx)
                asynclog.logTrace(
                    pos='%s.callBefore' % cls.__name__,
                    key=__ctx.contextKey,
                    value=__ctx.contextValue,
                    msg=
                    'Cloned object address: %s, original address=%s, context=%s'
                    % (hex(id(cloneContext)), hex(id(__ctx)), cloneContext))

                try:
                    ContextIdentifier.add_ctx(__ctx.contextKey,
                                              __ctx.contextValue, cloneContext)
                except Exception, e:
                    raise (e)
示例#12
0
            for k, v in __ctx.contextKey.iteritems():
                v = __ctx.getCacheKeys().getValue(v if v else k)

                # expiration date in milli seconds
                expirationTime = (__ctx.line.timeout[k] + time.time()) * 1000

                msg.append(
                    '{"contextKey" : "%s", "value" : "%s", "host" : "%s", "port" : "%s", "expirationtime": "%s", "count": "%d"}'
                    % (k, v, HTTPServerCallback.getHost(),
                       HTTPServerCallback.getPort(), expirationTime,
                       __ctx.line.callbackCount))
            jsonMessage = '[' + ','.join(msg) + ']'

            try:
                Configuration.getrouterContextClient().postJsonMessage(
                    jsonMessage,
                    Configuration.getrouterContextClient().getCreateBatchUri())
                logger.debug(
                    'CREATE BATCH - Posting the callback router [message: %s]'
                    % (jsonMessage))
            except Exception, e:
                logger.error(
                    'CREATE BATCH - Error posting message to the contextRouter, stacktrace=%s'
                    % (str(e)))
                raise Exception(
                    'CREATE BATCH - Error posting message to the contextRouter, stacktrace=%s'
                    % (str(e)))

        #
        # Store all the waiting context
        #
示例#13
0
from corelibs.memory import MemoryMgr
from corelibs.grinderQueue import GrinderQueue
from corelibs.reporting import reporting
from corelibs.token import ContextToken, AbortRunToken, Token, ThroughputToken

properties = CoreGrinder.getProperty()
grinder = CoreGrinder.getGrinder()
logger = CoreGrinder.getLogger()

#--------------------------------------------------------------

# For custom logging
loggerProxy = proxy.getLoggerProxy()

# process static configuration
Configuration.initialize()

logger.info('===> configuration terminated <====')


class AsyncException(Exception):
    pass


@atexit.register
def bye():
    print ''
    print '.' * 50
    print 'bye() - Stopping everything properly ...'
    logger.info('bye() - Stopping everything properly ...')
    if Configuration. async: