Esempio n. 1
0
def loadProps():
    d = dict(properties)
    # For testing purpose
    if isinstance(grinder, MockGrinder):
        d.update({
            'grinder.threads': 1,
            'grinder.runNumber': 0,
            'grinder.threadNumber': 0,
            'grinder.processNumber': 0,
            'AGENT': 0,
            'RUN': 0,
            'THREAD': 0,
            'PROCESS': 0
        })
        return d

    if grinder:
        d.update({
            'grinder.hostID': get_hostID(),
            'grinder.runNumber': grinder.getRunNumber(),
            'grinder.threadNumber': grinder.getThreadNumber(),
            'grinder.processNumber': CoreGrinder.getRealProcessNumber(),
            'grinder.agentNumber': '%02d' % (grinder.getAgentNumber()),
            'HOST': socket.gethostname().split('.')[0],
            'HOSTID': get_hostID(),
            'AGENTID': get_agentId(get_hostID()),
            'AGENT': '%02d' % (grinder.getAgentNumber()),
            'RUN': grinder.getRunNumber(),
            'THREAD': grinder.getThreadNumber(),
            'PROCESS': CoreGrinder.getRealProcessNumber()
        })
    return d
Esempio n. 2
0
    def initClass(cls, strArgs):

        cls.manageArgs(strArgs)

        #========================
        # TEMPORY PUT IN COMMENTS
        # cls.checkArgs()
        #========================

        # to bypass (seek) N lines of the file
        cls.startFrom = properties.getInt(
            'throughput_start_from%d' % (CoreGrinder.getRealProcessNumber()),
            0)
        if cls.startFrom:
            logger.info('We will ignore the first %d lines' % (cls.startFrom))

        # The chunkGenerator will allow to minimize memory usage
        cls.chunkGenerator = cls.getNextChunkFromFile()
        cls.rows = cls.chunkGenerator.next()
Esempio n. 3
0
    def __manageClientReporter(cls):
        logger.info('Reporting activation: %s' % (Configuration.use_reporter))
        if Configuration.use_reporter:

            #
            # agentName: grinder.hostID is the standard way to set a naming for a specific agent
            #            by default, if we have 2 agents, the first one will get hostname-0, the second hostname-1 ...
            # Warning: sometimes we get the full domain name separated with dot. (so we split and keep the first)
            #
            agentName = properties.get(
                'grinder.hostID') or socket.gethostname().split('.')[0]

            #
            # report_show_process=True  : you want to have all the per process metrics in the graphing tool
            # report_show_process=False : you have metrics per agent (or hostname)
            #
            location = '%s.%d' % (agentName, CoreGrinder.getRealProcessNumber(
            )) if properties.getBoolean('reporter_show_process',
                                        False) else agentName

            reporter_tool_name = properties.get('reporter_tool')
            reporter_tool = (reporter_tool_name
                             or '').lower() or 'centralreporter'
            reporter_target = {
                'centralreporter': CentralReporterClient,
                'carbonreporter': CarbonCacheClient,
                'statsd': StatsdClient
            }
            reporterModule = None
            if reporter_tool not in reporter_target:
                try:
                    reporterModule = __import__(
                        'corelibs.stats.%s' % reporter_tool_name, globals(),
                        locals(), ['%s' % reporter_tool_name], -1)
                except (Exception, JavaException), e:
                    logger.error(
                        'FAILED invalid property reporter_tool [corelibs.stats.%s], failed with reason: [%s]'
                        % (reporter_tool_name, e))
                    if isinstance(e, JavaException):
                        raise Exception(e)
                    raise

            reporterHost = properties.get('reporter_host') or 'localhost'
            reporterPort = properties.getInt('reporter_port', 1901)

            # all the ordered testnames of all the scenarios
            testnames = [
                line.testName for scenario in cls.scenarioList.getList()
                for line in scenario.lines
            ]

            if reporter_tool == 'centralreporter':
                # remove duplicates from the testname list
                testnames = list(set(testnames))
                try:
                    cls.clientReporter = CentralReporterClient(
                        reporterHost, reporterPort, location, testnames)
                except:
                    logger.error(
                        '[reporter=%s][host=%s][port=%d][location=%s][testnames=%s]'
                        % (reporter_tool, reporterHost, reporterPort, location,
                           testnames))
            elif reporter_tool == 'statsd':
                try:
                    # reporter_aggregate_value aimed at grouping values (the machine location is absent)
                    # test names are indexed by the the process number index (process0=test0, ... )
                    location = '' if properties.getBoolean(
                        'reporter_aggregate_value', False) else '%s' % location

                    cls.clientReporter = StatsdClient(reporterHost,
                                                      reporterPort, location)
                except Exception, e:
                    logger.error(
                        'statsd reporter - Exception=%s\n[reporter=%s][host=%s][port=%d][location=%s]'
                        % (str(e), reporter_tool, reporterHost, reporterPort,
                           location))
                    raise RuntimeError(
                        'Exception=%s\nUnable to start the statsd reporter ([reporter=%s][host=%s][port=%d][location=%s])'
                        % (str(e), reporter_tool, reporterHost, reporterPort,
                           location))
Esempio n. 4
0
class Configuration:

    # Some required declaration
    dataFilePath = properties.get('dataFilePath')
    templateFilePath = properties.get('templateFilePath')
    cmdMgr = None

    outTraceActivated = False
    fOut = None
    fOutExtra = None
    cv_out = None
    cv_ext = None
    extraLevel = False

    # get the delay time between commands
    interMessageDelay = properties.getDouble('messageDelay', 0.0)
    initialSleepTime = properties.getDouble('grinder.initialSleepTime', 0.0)

    #
    # because it is possible that each process (and therefore, each test_thread)
    # could be kicked off numerous times by the console without exiting, get the total number of threads
    # and processes, then modulo divide the process and test_thread number with the total (respective) number
    # this will correctly create the correct file name (file.X.Y) on multiple runs
    #
    numberOfThreads = properties.getInt('grinder.threads', 0)
    #     numberOfRuns = properties.getInt('grinder.runs', 0)
    asyncruns = 0
    numberOfProcess = properties.getInt('grinder.processes', 0)

    # When relaunching several times, the process number is incremented
    processNumber = CoreGrinder.getRealProcessNumber()

    processNumberPadding = properties.getInt('processNumberPadding', 2)
    threadNumberPadding = properties.getInt('threadNumberPadding', 4)
    runNumberPadding = properties.getInt('runNumberPadding', 7)

    # ID format never changes, retrieve it once and for all.
    idFormat = toolbox.getidFormat()
    processIdFormat = '%%0%dd' % (processNumberPadding)
    runIDPadding = toolbox.getRunIDPadding()
    runIdFormat = '%%0%dd' % (runNumberPadding)
    threadIdFormat = '%%0%dd' % (threadNumberPadding)

    grinderStopScenarioOnError = properties.getBoolean(
        'stop.scenario.on.error', properties.getBoolean('stopOnError', True))

    #
    # if displayReadResponse is set to 'True', then display:
    #    the first 256 bytes of the response received from the command
    #
    displayReadResponse = properties.getBoolean('displayReadResponse', False)
    displayReadResponseMaxLength = properties.getInt(
        'displayReadResponseMaxLength', 1024)

    # The way inFile (scenario) are set to process, thread for different UC processing
    oneFileByThread = properties.getBoolean(
        'oneFileByThread',
        properties.getBoolean('grindertool.test.scenarioPerThread', False))
    oneFileByProcess = properties.getBoolean(
        'oneFileByProcess',
        properties.getBoolean('grindertool.test.scenarioPerProcess', False))

    oneSingleList = not (oneFileByProcess or oneFileByThread)

    shortFileName = properties.get('grinder.console.propertiesFile') or ''
    if shortFileName:
        shortFileName = shortFileName.split(os.sep)[-1]

    cacheProtocols = properties.getBoolean('cacheProtocols', False)

    # For Asynchronous management
    async = False
    routerContextClient = None

    # Http server callback for asynchronous flows
    http_graceful_period = properties.getInt('grindertool.http.stop.graceful',
                                             5000)

    async = False
    pureThreadMode = False
    validationMode = False
    scenarioList = None
    waiting_mode = False

    # SmscDriver start if required
    smpp_started = properties.getBoolean('grindertool.smsc.start', False)
    # graceful period before stopping Smsc server (5 seconds by default)
    smpp_graceful_period = properties.getInt('grindertool.smsc.stop.graceful',
                                             5)
    smscDriver = None

    use_reporter = properties.getBoolean('reporter_activate', False)
    clientReporter = None

    metronom = None
    monitor = None
    use_throughput = properties.getBoolean('throughput_activate', False)
    if numberOfThreads == 1:
        properties.setBoolean('throughput_activate', False)
        use_throughput = False
    use_regulator = properties.getBoolean('regulator_activate', False)

    listener_host = 'localhost'
    listener_port = 9080
    listener_poolSize = 32
    listener_socketBacklog = 1024

    # TODO
    #-----------
    #  Add the grinder.threads as a Gauge
    #  Add the thread maximum from the parameter grindertool.threads.threshold_pct_active if thread_control_enable=True
    #

    # Active Session throttling
    thread_control_enable = False
    threshold_thread_active = 100
    thread_wait_milli = 10

    @classmethod
    def initialize(cls):
        logger.info(
            "********************* STARTING INITIALIZATION ***********************"
        )

        # Check some parameters
        cls.checkParameters()

        # Immutable scenario loading and flags relative to the different mode
        cls.manageScenarioAndFlags()

        # Asynchronous configuration
        if cls. async:
            cls.__manageAsyncConfiguration()

            # Asynchronous callback listener - must be started after the asynchronous configuration
            cls.__startListener()

        # Initialize the SMSCToolkit and wait after connection only for pureThreadMode
        cls.__manageSMSCToolkit()

        # Reporting monitoring data
        cls.__manageClientReporter()

        # Ramping up mode management & flow controller mode
        cls.manageThroughput()

        # thread throttling
        cls.activateSessionThrottler()

        logger.info(
            "********************* END OF INITIALIZATION ***********************"
        )

    @classmethod
    def activateSessionThrottler(cls):

        # This is a throughput throttling mechanism
        #-----------------------------------------------------
        #   Threshold of active threads
        # thread throttling enable (True/False)
        cls.thread_control_enable = properties.getBoolean(
            'grindertool.threads.control_enable', False)
        if cls.thread_control_enable:

            cls.threshold_thread_active = int(
                (properties.getInt('grindertool.threads.threshold_pct_active',
                                   100) or 100) / 100) * cls.numberOfThreads
            if cls.threshold_thread_active > cls.numberOfThreads:
                raise SyntaxError(
                    'Number of threshold thread (%d) cannot be above number of grinder threads (%d)'
                    % (cls.threshold_thread_active, cls.numberOfThreads))
            #   When over threshold, time sleeping doing nothing
            cls.thread_wait_milli = properties.getInt(
                'grindertool.threads.sleep', 10)

            logger.info(
                '[CONTROL] Switching to controlled rampup ( [Threshold=%d][Threads=%d][Sleep=%d]  ) ...'
                % (cls.threshold_thread_active, cls.numberOfThreads,
                   cls.thread_wait_milli))
            print(
                '\t[CONTROL] Switching to controlled rampup ( [Threshold=%d][Threads=%d][Sleep=%d]  ) ...'
                % (cls.threshold_thread_active, cls.numberOfThreads,
                   cls.thread_wait_milli))

    @classmethod
    def checkParameters(cls):

        # TODO : should be part of the scenario immutable object
        # The template (payload) manager is a global instance
        cls.cmdMgr = command.TemplateManager(cls.templateFilePath)

        # Output trace for validation
        cls.outTraceActivated = properties.getBoolean('outTraceActivated',
                                                      False)
        if cls.outTraceActivated:
            outFile = '%s%s%s.%s' % (properties.get('logFilePath')
                                     or cls.dataFilePath, os.sep,
                                     properties.get('fileout')
                                     or 'default', toolbox.getFileTimeStamp())
            cls.fOut = file(outFile, 'w')
            logger.info('outFile "%s"' % outFile)
            cls.extraLevel = properties.getBoolean('extraLevel', False)
            if cls.extraLevel:
                cls.fOutExtra = file('%s.EXTRA' % outFile, 'w')
                logger.info('outExtraFile "%s"' % cls.fOutExtra)
            # file lock for output and extra logging files
            cls.cv_out = Condition()
            cls.cv_ext = Condition()

    @classmethod
    def manageThroughput(cls):

        # Throughput mode
        logger.info('Throughput_mode: %s' % (str(cls.use_throughput)))
        if (cls.use_regulator):
            cls.use_throughput = True
            Configuration.__launchRegulator(cls.clientReporter)
        else:
            # Thoughtput mode
            if cls.use_throughput:
                Configuration.__launchMetronom()
                properties.setInt('grinder.runs', 0)

        if cls.use_throughput:
            logger.info(
                '[throughput_mode activated, so forcing parameter grinder.runs=0'
            )
            properties.setInt('grinder.runs', 0)
            if cls.numberOfThreads < 16:
                logger.info(
                    '[throughput_mode activated, number of threads [current=%d] should be at at least greater than 16, setting to 16'
                    % (cls.numberOfThreads))
                properties.setInt('grinder.threads', 16)

    @classmethod
    def manageScenarioAndFlags(cls):

        cls.scenarioList = ScenarioList(cls)
        cls. async = cls.scenarioList.isAsync()

        # ValidationMode : in that case, the thread wait after the asynchronous operation termination
        if cls. async:
            if cls.numberOfThreads == 1 or cls.oneFileByThread or properties.getBoolean(
                    'grindertool.forceValidationMode', False):
                cls.validationMode = True
                cls.use_throughput = False

        cls.graceful_async = 0
        if cls. async:
            cls.graceful_async = properties.getInt(
                'grindertool.graceful_async_time', 5000)

        # optimization for pure threading mode
        cls.waiting_mode = properties.getBoolean('waiting_mode', False)
        cls.pureThreadMode = not cls.use_throughput and not cls. async