コード例 #1
0
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
    for graph in graphs:
        graphName = graph['graph_name'] 
        yLabel = graph['y_label']
        inputCsvFiles = []
        graphLegendLabels = []
        for entity in entities:
            entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
            entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
            if(not os.path.exists(entityMetricCsvFile)):
                logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
            else:
                inputCsvFiles.append(entityMetricCsvFile)
                graphLegendLabels.append(role + "-" + entity['entity_id'])
#            print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
        try:
            # plot one graph per mbean attribute
            labels = graph['y_label'].split(',')
            fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute, 
                                           graph['attributes'].split(','))
            attributes = graph['attributes'].split(',')
            for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):            
                outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"            
                plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2], 
                            "time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
#            print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
        except Exception as e:
            logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
            traceback.print_exc()
コード例 #2
0
ファイル: metrics.py プロジェクト: brndnmtthws/kafka
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
    for graph in graphs:
        graphName = graph['graph_name'] 
        yLabel = graph['y_label']
        inputCsvFiles = []
        graphLegendLabels = []
        for entity in entities:
            entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
            entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
            if(not os.path.exists(entityMetricCsvFile)):
                logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
            else:
                inputCsvFiles.append(entityMetricCsvFile)
                graphLegendLabels.append(role + "-" + entity['entity_id'])
#            print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
        try:
            # plot one graph per mbean attribute
            labels = graph['y_label'].split(',')
            fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute, 
                                           graph['attributes'].split(','))
            attributes = graph['attributes'].split(',')
            for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):            
                outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"            
                plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2], 
                            "time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
#            print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
        except Exception as e:
            logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
            traceback.print_exc()
コード例 #3
0
ファイル: metrics.py プロジェクト: sreeramg970/kafka
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv,
                             testcaseEnv):
    logger.info("starting metrics collection on jmx port : " + jmxPort,
                extra=d)
    jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
    clusterConfig = systemTestEnv.clusterEntityConfigDictList
    metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
    entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(
        testcaseEnv, role, entityId, "metrics")
    dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
    mbeansForRole = get_mbeans_for_role(dashboardsForRole)

    kafkaHome = system_test_utils.get_data_by_lookup_keyval(
        clusterConfig, "entity_id", entityId, "kafka_home")
    javaHome = system_test_utils.get_data_by_lookup_keyval(
        clusterConfig, "entity_id", entityId, "java_home")

    for mbean in mbeansForRole:
        outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
        startMetricsCmdList = [
            "ssh " + jmxHost, "'JAVA_HOME=" + javaHome, "JMX_PORT= " +
            kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
            "--jmx-url " + jmxUrl, "--object-name " + mbean + " 1> ",
            outputCsvFile + " & echo pid:$! > ",
            entityMetricsDir + "/entity_pid'"
        ]

        startMetricsCommand = " ".join(startMetricsCmdList)
        logger.debug("executing command: [" + startMetricsCommand + "]",
                     extra=d)
        system_test_utils.async_sys_call(startMetricsCommand)
        time.sleep(1)

        pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
        logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
        subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)

        # keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
        # testcaseEnv.entityJmxParentPidDict:
        #   key: entity_id
        #   val: list of JMX ppid associated to that entity_id
        #   { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
        for line in subproc.stdout.readlines():
            line = line.rstrip('\n')
            logger.debug("line: [" + line + "]", extra=d)
            if line.startswith("pid"):
                logger.debug("found pid line: [" + line + "]", extra=d)
                tokens = line.split(':')
                thisPid = tokens[1]
                if entityId not in testcaseEnv.entityJmxParentPidDict:
                    testcaseEnv.entityJmxParentPidDict[entityId] = []
                testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
コード例 #4
0
ファイル: metrics.py プロジェクト: brndnmtthws/kafka
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
    logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
    jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
    clusterConfig = systemTestEnv.clusterEntityConfigDictList
    metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
    entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
    dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
    mbeansForRole = get_mbeans_for_role(dashboardsForRole)
    
    kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
    javaHome  = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
    
    for mbean in mbeansForRole:
        outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
        startMetricsCmdList = ["ssh " + jmxHost,
                               "'JAVA_HOME=" + javaHome,
                               "JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
                               "--jmx-url " + jmxUrl,
                               "--object-name " + mbean + " 1> ",
                                outputCsvFile + " & echo pid:$! > ",
                                entityMetricsDir + "/entity_pid'"]

        startMetricsCommand = " ".join(startMetricsCmdList) 
        logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
        system_test_utils.async_sys_call(startMetricsCommand)
        time.sleep(1)

        pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
        logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
        subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)

        # keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
        # testcaseEnv.entityJmxParentPidDict:
        #   key: entity_id
        #   val: list of JMX ppid associated to that entity_id
        #   { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
        for line in subproc.stdout.readlines():
            line = line.rstrip('\n')
            logger.debug("line: [" + line + "]", extra=d)
            if line.startswith("pid"):
                logger.debug("found pid line: [" + line + "]", extra=d)
                tokens  = line.split(':')
                thisPid = tokens[1]
                if entityId not in testcaseEnv.entityJmxParentPidDict:
                    testcaseEnv.entityJmxParentPidDict[entityId] = []
                testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)