示例#1
0
    def updateWithNotSuccAppid(self, app):
        appHappenTime = util.getIntervalTime(app['startedTime'])
        rm = self.getRm(self.recordTime, appHappenTime)

        rm.inc("appNum", 1)
        if app['stats'] == 'KILLED':
            rm.inc("killedApp", 1)
        elif app['stats'] == 'FAILED':
            rm.inc("failedApp", 1)
示例#2
0
 def __init__(self, beginTime=( time.time()-config.collect_interval) ):
     self.interval = config.collect_interval
     self.recordTime = util.getIntervalTime(beginTime)
     msg = "begin to run at"+ time.strftime('%Y-%m-%d %A %X',time.localtime(self.recordTime))
     logger.info(msg)
     print msg
     self.rmList = {}
     self.nmList = {}
     self.appList = {} 
示例#3
0
 def updateWithNotSuccAppid(self,app):
     appHappenTime = util.getIntervalTime(app['startedTime'])
     rm = self.getRm(self.recordTime,appHappenTime)
     
     rm.inc("appNum",1)
     if app['stats'] == 'KILLED':
         rm.inc("killedApp",1)
     elif app['stats'] == 'FAILED':
         rm.inc("failedApp",1)
示例#4
0
 def __init__(self, beginTime=(time.time() - config.collect_interval)):
     self.interval = config.collect_interval
     self.recordTime = util.getIntervalTime(beginTime)
     msg = "begin to run at" + time.strftime(
         '%Y-%m-%d %A %X', time.localtime(self.recordTime))
     logger.info(msg)
     print msg
     self.rmList = {}
     self.nmList = {}
     self.appList = {}
示例#5
0
    def updateWithAppid(self, app, jobHistory, jobCounter):
        #update nm and rm
        amNode = self.getNodeFromAddress(app['amHostHttpAddress'])
        appHappenTime = util.getIntervalTime(app['startedTime'])
        nm = self.getNm(amNode, self.recordTime, appHappenTime)
        rm = self.getRm(self.recordTime, appHappenTime)

        nm.inc("containerNum", 1)
        nm.inc("amNum", 1)

        rm.inc("appNum", 1)
        rm.inc("finishedApp", 1)

        if app['finalStatus'] != "SUCCEEDED":
            rm.inc("notSuccApp", 1)
        #end update
        appid = app["id"]
        appRecord = self.getAppidRecord(appid)
        keyFromApp = [
            "user", "name", "queue", "startedTime", "finishedTime", "state",
            "finalStatus"
        ]
        for key in keyFromApp:
            if key == "startedTime" or key == "finishedTime":
                appRecord.set(key, util.getSecondTime(app[key]))
            else:
                appRecord.set(key, app[key])
        #todo
        appRecord.set("attemptNumber", 1)
        keyFromHistory = [
            "mapsTotal", "mapsCompleted", "successfulMapAttempts",
            "killedMapAttempts", "failedMapAttempts", "avgMapTime",
            "reducesTotal", "reducesCompleted", "successfulReduceAttempts",
            "killedReduceAttempts", "failedReduceAttempts", "avgReduceTime"
        ]
        if jobHistory.has_key('job'):
            for key in keyFromHistory:
                appRecord.set(key, jobHistory['job'][key])
        #TODO "localMap","rackMap"
        keyMapFromCounters = {
            "DATA_LOCAL_MAPS": "localMap",
            "RACK_LOCAL_MAPS": "rackMap",
            "FILE_BYTES_READ": "fileRead",
            "FILE_BYTES_WRITTEN": "fileWrite",
            "HDFS_BYTES_READ": "hdfsRead",
            "HDFS_BYTES_WRITTEN": "hdfsWrite"
        }
        if jobCounter:
            for (key, value) in keyMapFromCounters.items():
                appRecord.set(value, jobCounter[key]['total'])
示例#6
0
    def updateWithAttempt(self, attempt, attemptCounter):
        #update nm's containerNum , mapNum , reduceNum
        node = self.getNodeFromAddress(attempt['nodeHttpAddress'])
        happenTime = util.getIntervalTime(attempt['startTime'])
        rm = self.getRm(self.recordTime, happenTime)

        nm = self.getNm(node, self.recordTime, happenTime)
        #*********************************
        nm.inc("containerNum", 1)

        if attempt['type'] == 'MAP':
            rm.inc("mapNum", 1)
            nm.inc("mapNum", 1)
            rm.inc("mapTime", attempt['elapsedTime'])
            nm.inc("mapTime", attempt['elapsedTime'])
            if attempt['state'] != "SUCCEEDED":
                rm.inc("failMap", 1)
                nm.inc("failMap", 1)
        elif attempt['type'] == 'REDUCE':
            rm.inc("reduceNum", 1)
            nm.inc("reduceNum", 1)
            rm.inc("reduceTime", attempt['elapsedTime'])
            nm.inc("reduceTime", attempt['elapsedTime'])
            if attempt['state'] != "SUCCEEDED":
                rm.inc("failReduce", 1)
                nm.inc("failReduce", 1)
        #*********************************
        if not attemptCounter or not attemptCounter.has_key('jobTaskAttemptCounters') \
            or not attemptCounter['jobTaskAttemptCounters'].has_key('taskAttemptCounterGroup'):
            return
        for taskAttemptCounterGroup in attemptCounter[
                'jobTaskAttemptCounters']['taskAttemptCounterGroup']:
            if taskAttemptCounterGroup[
                    'counterGroupName'] == "org.apache.hadoop.mapreduce.FileSystemCounter":
                for counter in taskAttemptCounterGroup['counter']:
                    if counter['name'] == 'FILE_BYTES_READ':
                        rm.inc("fileRead", counter["value"])
                        nm.inc("fileRead", counter["value"])
                    elif counter['name'] == 'FILE_BYTES_WRITTEN':
                        rm.inc("fileWrite", counter["value"])
                        nm.inc("fileWrite", counter["value"])
                    elif counter['name'] == 'HDFS_BYTES_READ':
                        rm.inc("hdfsRead", counter["value"])
                        nm.inc("hdfsRead", counter["value"])
                    elif counter['name'] == 'HDFS_BYTES_WRITTEN':
                        rm.inc("hdfsWrite", counter["value"])
                        nm.inc("hdfsWrite", counter["value"])
            else:
                continue
示例#7
0
 def updateWithAttempt(self,attempt,attemptCounter):
     #update nm's containerNum , mapNum , reduceNum
     node = self.getNodeFromAddress(attempt['nodeHttpAddress'])
     happenTime = util.getIntervalTime(attempt['startTime'])
     rm = self.getRm(self.recordTime,happenTime)
      
     nm = self.getNm(node,self.recordTime, happenTime)
     #*********************************
     nm.inc("containerNum",1)
     
     if attempt['type'] == 'MAP':
         rm.inc("mapNum",1)
         nm.inc("mapNum",1)
         rm.inc("mapTime",attempt['elapsedTime'])
         nm.inc("mapTime",attempt['elapsedTime'])
         if attempt['state'] != "SUCCEEDED":
             rm.inc("failMap",1)
             nm.inc("failMap",1)
     elif attempt['type'] == 'REDUCE':
         rm.inc("reduceNum",1)
         nm.inc("reduceNum",1)
         rm.inc("reduceTime",attempt['elapsedTime'])
         nm.inc("reduceTime",attempt['elapsedTime'])
         if attempt['state'] != "SUCCEEDED":
             rm.inc("failReduce",1)
             nm.inc("failReduce",1)
     #*********************************
     if not attemptCounter or not attemptCounter.has_key('jobTaskAttemptCounters') \
         or not attemptCounter['jobTaskAttemptCounters'].has_key('taskAttemptCounterGroup'):
         return 
     for taskAttemptCounterGroup in attemptCounter['jobTaskAttemptCounters']['taskAttemptCounterGroup']:
         if taskAttemptCounterGroup['counterGroupName'] == "org.apache.hadoop.mapreduce.FileSystemCounter":
             for counter in taskAttemptCounterGroup['counter']:
                 if counter['name'] == 'FILE_BYTES_READ':
                     rm.inc("fileRead",counter["value"])
                     nm.inc("fileRead",counter["value"])
                 elif counter['name'] == 'FILE_BYTES_WRITTEN':
                     rm.inc("fileWrite",counter["value"])
                     nm.inc("fileWrite",counter["value"])
                 elif counter['name'] == 'HDFS_BYTES_READ':
                     rm.inc("hdfsRead",counter["value"])
                     nm.inc("hdfsRead",counter["value"])
                 elif counter['name'] == 'HDFS_BYTES_WRITTEN':
                     rm.inc("hdfsWrite",counter["value"])
                     nm.inc("hdfsWrite",counter["value"])
         else:
             continue
示例#8
0
    def updateWithAppid(self,app,jobHistory,jobCounter):
        #update nm and rm
        amNode = self.getNodeFromAddress(app['amHostHttpAddress'])
        appHappenTime = util.getIntervalTime(app['startedTime'])
        nm = self.getNm(amNode,self.recordTime, appHappenTime)
        rm = self.getRm(self.recordTime,appHappenTime)
        
        nm.inc("containerNum",1);
        nm.inc("amNum",1);

        rm.inc("appNum",1)
        rm.inc("finishedApp",1)
        
        if app['finalStatus'] != "SUCCEEDED":
            rm.inc("notSuccApp",1)
        #end update
        appid =  app["id"]
        appRecord = self.getAppidRecord(appid)
        keyFromApp = ["user","name","queue","startedTime","finishedTime","state","finalStatus"]
        for key in keyFromApp:
            if key == "startedTime" or key == "finishedTime":
                appRecord.set(key,util.getSecondTime(app[key]))
            else:    
                appRecord.set(key,app[key])
        #todo
        appRecord.set("attemptNumber",1)
        keyFromHistory = ["mapsTotal","mapsCompleted","successfulMapAttempts",
                          "killedMapAttempts","failedMapAttempts","avgMapTime",
                          "reducesTotal","reducesCompleted","successfulReduceAttempts",
                          "killedReduceAttempts","failedReduceAttempts","avgReduceTime"]
        if jobHistory.has_key('job'):
            for key in keyFromHistory:
                appRecord.set(key,jobHistory['job'][key])
        #TODO "localMap","rackMap"
        keyMapFromCounters = {"DATA_LOCAL_MAPS":"localMap","RACK_LOCAL_MAPS":"rackMap",
                "FILE_BYTES_READ":"fileRead","FILE_BYTES_WRITTEN":"fileWrite",
                "HDFS_BYTES_READ":"hdfsRead","HDFS_BYTES_WRITTEN":"hdfsWrite"}
        if jobCounter:
            for (key,value) in keyMapFromCounters.items():
                appRecord.set(value,jobCounter[key]['total'])