예제 #1
0
def test_big_filesizeformat():
    assert_equal("N/A", big_filesizeformat(None))
    assert_equal("N/A", big_filesizeformat(""))
    assert_equal("0 B", big_filesizeformat(0))
    assert_equal("17 B", big_filesizeformat(17))
    assert_equal("1.0 KB", big_filesizeformat(1024))
    assert_equal("1.0 MB", big_filesizeformat(1024 * 1024))
    assert_equal("1.1 GB", big_filesizeformat(int(1.1 * 1024 * 1024 * 1024)))
    assert_equal("2.0 TB", big_filesizeformat(2 * 1024 * 1024 * 1024 * 1024))
    assert_equal("1.5 PB", big_filesizeformat(3 * 1024 * 1024 * 1024 * 1024 * 1024 / 2))
예제 #2
0
def test_big_filesizeformat():
    assert_equal("N/A", big_filesizeformat(None))
    assert_equal("N/A", big_filesizeformat(""))
    assert_equal("0 B", big_filesizeformat(0))
    assert_equal("17 B", big_filesizeformat(17))
    assert_equal("1.0 KB", big_filesizeformat(1024))
    assert_equal("1.0 MB", big_filesizeformat(1024 * 1024))
    assert_equal("1.1 GB", big_filesizeformat(int(1.1 * 1024 * 1024 * 1024)))
    assert_equal("2.0 TB", big_filesizeformat(2 * 1024 * 1024 * 1024 * 1024))
    assert_equal("1.5 PB",
                 big_filesizeformat(3 * 1024 * 1024 * 1024 * 1024 * 1024 / 2))
예제 #3
0
파일: yarn_models.py 프로젝트: shanglai/hue
 def _get_metrics(self):
     self.metrics = {}
     try:
         executors = self.history_server_api.executors(
             self.jobId, self.attempt_id)
         if executors:
             self.metrics['headers'] = [
                 _('Executor Id'),
                 _('Address'),
                 _('RDD Blocks'),
                 _('Storage Memory'),
                 _('Disk Used'),
                 _('Active Tasks'),
                 _('Failed Tasks'),
                 _('Complete Tasks'),
                 _('Task Time'),
                 _('Input'),
                 _('Shuffle Read'),
                 _('Shuffle Write'),
                 _('Logs')
             ]
             self.metrics['executors'] = []
             for e in executors:
                 self.metrics['executors'].append([
                     e.get('id', 'N/A'),
                     e.get('hostPort', ''),
                     e.get('rddBlocks', ''),
                     '%s / %s' %
                     (big_filesizeformat(e.get('memoryUsed', 0)),
                      big_filesizeformat(e.get('maxMemory', 0))),
                     big_filesizeformat(e.get('diskUsed', 0)),
                     e.get('activeTasks', ''),
                     e.get('failedTasks', ''),
                     e.get('completedTasks', ''),
                     format_duration_in_millis(e.get('totalDuration', 0)),
                     big_filesizeformat(e.get('totalInputBytes', 0)),
                     big_filesizeformat(e.get('totalShuffleRead', 0)),
                     big_filesizeformat(e.get('totalShuffleWrite', 0)),
                     e.get('executorLogs', '')
                 ])
     except Exception, e:
         LOG.error('Failed to get Spark Job executors: %s' % e)
예제 #4
0
파일: yarn_models.py 프로젝트: hhy5277/hue
 def _get_metrics(self):
   self.metrics = {}
   try:
     executors = self.history_server_api.executors(self.jobId, self.attempt_id)
     if executors:
       self.metrics['headers'] = [
         _('Executor Id'),
         _('Address'),
         _('RDD Blocks'),
         _('Storage Memory'),
         _('Disk Used'),
         _('Active Tasks'),
         _('Failed Tasks'),
         _('Complete Tasks'),
         _('Task Time'),
         _('Input'),
         _('Shuffle Read'),
         _('Shuffle Write'),
         _('Logs')]
       self.metrics['executors'] = []
       for e in executors:
         self.metrics['executors'].append([
           e.get('id', 'N/A'),
           e.get('hostPort', ''),
           e.get('rddBlocks', ''),
           '%s / %s' % (big_filesizeformat(e.get('memoryUsed', 0)), big_filesizeformat(e.get('maxMemory', 0))),
           big_filesizeformat(e.get('diskUsed', 0)),
           e.get('activeTasks', ''),
           e.get('failedTasks', ''),
           e.get('completedTasks', ''),
           format_duration_in_millis(e.get('totalDuration', 0)),
           big_filesizeformat(e.get('totalInputBytes', 0)),
           big_filesizeformat(e.get('totalShuffleRead', 0)),
           big_filesizeformat(e.get('totalShuffleWrite', 0)),
           e.get('executorLogs', '')
         ])
   except Exception, e:
     LOG.error('Failed to get Spark Job executors: %s' % e)