Beispiel #1
0
def add_job(tools_list,
            jobs_xml=JOBS_XML,
            parameter=default_parameter,
            job_attrib={},
            resultXmlName="results"):
    '''
    创建新任务
    @param tools_list:工具列表
    @type tools_list: list 
    '''
    try:
        lptlog.info('''
                        ~~~~~~~~~~~~~~~~~~~~
                          开始创建测试任务
                        ~~~~~~~~~~~~~~~~~~~~''')
        lptlog.debug("指定测试工具集: %s" % utils.list_to_str(tools_list))
        lptxml.add_job(jobs_xml,
                       tools_list,
                       parameter,
                       job_attrib=job_attrib,
                       resultXmlName=resultXmlName)
        lptlog.info('''
                        ++++++++++++++++++
                         创建测试任务:PASS
                        ++++++++++++++++++''')
    except CreateJobException, e:
        lptlog.debug(e)
        lptlog.error('''
                        ++++++++++++++++++
                         创建测试任务:FAIL
                        ++++++++++++++++++''')
Beispiel #2
0
    def create_job(self,
                   tools_list,
                   parameter,
                   job_attrib={},
                   resultXmlName="results"):
        '''创建jobs.xml结构
        '''
        DateString = datetime.datetime.now().strftime('%y%m%d%H%M%S')
        #results = 'result_%s.xml' % DateString
        results = '%s_%s.xml' % (resultXmlName, DateString)

        job = self.create_node(
            'job', dict({
                'id': DateString,
                'status': "N/A"
            }, **job_attrib))
        lptlog.info('任务ID: %s' % DateString)

        self.create_element(job, 'resultsDB', results)
        lptlog.info('xml results文件:  %s' % results)

        lptlog.debug("创建参数")
        conftoxml = ConfigToXml(parameter, self.xml_file)
        conftoxml.add_test_group(tools_list)
        try:
            conftoxml.add_configparser_node(job, 'tool', {'status': 'no'})
        except Exception, e:
            #lptlog.exception('parameter.conf转换xml失败')
            #lptlog.error('parameter.conf转换xml失败')
            raise CreatNodeError, 'parameter.conf转换xml失败: %s' % e
Beispiel #3
0
    def __match_index(self, file):

        if not os.path.isfile(file):
            return []

        lptlog.debug("在%s中搜索测试指标" % file)
        results_lines = utils.read_all_lines(file)

        result_list = []        

        labels = ('TCP_STREAM','UDP_STREAM', 'TCP_RR', 'UDP_RR', 'TCP_CRR')
        parallel_template = {'parallels': '1', 'parallel': '1', 'iter': '1', 'times': '2'}        

        result1 = (results_lines[6].split()[4],results_lines[13].split()[3],results_lines[21].split()[5],results_lines[29].split()[5],results_lines[37].split()[5])
        result2 = (results_lines[45].split()[4],results_lines[52].split()[3],results_lines[60].split()[5],results_lines[68].split()[5],results_lines[76].split()[5])
        tmp_list = []
        parallel_dict = copy.deepcopy(parallel_template)
        tmp_list.append(parallel_dict)
        tmp_list.append(self.dict_generator(labels,result1))
        result_list.append(tmp_list)
        tmp_list = []
        parallel_dict = copy.deepcopy(parallel_template)
        parallel_dict['iter'] = '2'
        tmp_list.append(parallel_dict)
        tmp_list.append(self.dict_generator(labels,result2))
        result_list.append(tmp_list)
        tmp_list = []
        parallel_dict = copy.deepcopy(parallel_template)
        parallel_dict['iter'] = 'Average'
        tmp_list.append(parallel_dict)
        tmp_list.append(self.dict_average(result_list[0][1],result_list[1][1]))
        result_list.append(tmp_list)
        return result_list
Beispiel #4
0
def run(job_id=None, tools_list=None, jobs_xml=JOBS_XML, format='txt', clean=False, REBOOT=False):
    jobs = lptxml.Jobs(jobs_xml)
    if job_id is None:
        try:
            job_node = jobs.get_new_job()
        except IndexError,e:
            lptlog.debug("job任务数为0, 期望非0")
            job_node = None
Beispiel #5
0
 def check_tool_result_node(self):
     """检查jobs.xml文件中, 对应job_node中是否包含tool节点
     """
     tool_node = lptxml.get_tool_node(self.tool, self.jobs_xml)
     if tool_node is None:
         lptlog.critical("测试任务中不包含 %s 测试工具,请核对测试文件" % self.tool)
         raise ValueError()
     else:
         lptlog.debug("检查到 %s 测试任务,开始运行 %s 测试程序" % (self.tool, self.tool))
     return tool_node
Beispiel #6
0
 def check_tool_result_node(self):
     '''检查jobs.xml文件中, 对应job_node中是否包含tool节点
     '''
     tool_node = lptxml.get_tool_node(self.tool, self.jobs_xml)
     if tool_node is None:
         lptlog.critical('测试任务中不包含 %s 测试工具,请核对测试文件' % self.tool)
         raise ValueError()
     else:
         lptlog.debug("检查到 %s 测试任务,开始运行 %s 测试程序" % (self.tool, self.tool))
     return tool_node
Beispiel #7
0
 def __compile_x11perf(self, x11perf_tar="x11perf-1.5.3.tar.gz"):
     x11perf_tar_path = os.path.join(self.tools_dir, x11perf_tar)
     lptlog.info("解压x11perf压缩包")
     x11perf_srcdir = utils.extract_tarball_to_dir(x11perf_tar_path, self.src_dir)
     lptlog.info("x11per源目录: %s " % x11perf_srcdir)
     
     os.chdir(x11perf_srcdir)
     if os.path.isdir(x11perf_srcdir):
         lptlog.debug("编译x11perf测试程序")
         self.compile(configure_status=True, make_status=True, make_install_status=True) 
     #返回根lpt根目录
     os.chdir(self.lpt_root)
Beispiel #8
0
def run_shell(cmd, args_list=[]):
    '''
        采用os.system执行shell
    '''
    args_string_list = map(str, args_list)
    commands = cmd + " " + " ".join(args_string_list)
    try:
        lptlog.debug("执行命令:%s" % commands)
        os.system(commands)
    except Exception:
        #lptlog.exception("执行 %s 发生Error:" % commands)
        lptlog.error("执行 %s 发生Error:" % commands)
        raise RunShellError()
Beispiel #9
0
def run_shell(cmd, args_list=[]):
    """
        采用os.system执行shell
    """
    args_string_list = map(str, args_list)
    commands = cmd + " " + " ".join(args_string_list)
    try:
        lptlog.debug("执行命令:%s" % commands)
        os.system(commands)
    except Exception:
        # lptlog.exception("执行 %s 发生Error:" % commands)
        lptlog.error("执行 %s 发生Error:" % commands)
        raise RunShellError()
Beispiel #10
0
    def create_result(self):
        '''创建result_list
           '''

        #labels = ("Throughtput", "clients", "max_latency")
        labels = ("Throughtput", "max_latency")
        parallelstring = ",".join(map(str, self.parallels))

        r = re.compile(
            r"Throughput\s+(\d+.\d+)\s+MB/sec\s+(\d+)\s+clients\s+\d+\s+procs\s+max_latency=(\d+.\d+)\s",
            re.I)
        for parallel in self.parallels:
            sum_dic = {}
            for iter in range(self.times):
                tmp_result_file = os.path.join(
                    self.tmp_dir,
                    "%s_%s_%s.out" % (self.tool, parallel, iter + 1))
                if not os.path.isfile(tmp_result_file):
                    lptlog.warning("%s 不存在" % tmp_result_file)
                    continue
                result_lines = utils.read_all_lines(tmp_result_file)
                for line in result_lines:
                    key_dic = {}
                    if r.match(line):
                        m = r.match(line)
                        #result_list = [m.group(1), m.group(2), m.group(3)]
                        result_list = [m.group(1), m.group(3)]
                        result_tuple = tuple(
                            [utils.change_type(i) for i in result_list])
                        for l, v in zip(labels, result_tuple):
                            key_dic[l] = "%d" % v
                        if not sum_dic:
                            sum_dic = key_dic.copy()
                        else:
                            sum_dic = method.append_sum_dict(sum_dic, key_dic)
                        self.result_list.append([
                            self.create_result_node_attrib(
                                iter + 1, self.times, parallel,
                                self.parallels), key_dic
                        ])

            if sum_dic:
                parallel_average_dic = method.append_average_dict(
                    sum_dic, self.times)
                lptlog.debug("%d 并行求平均值:PASS" % parallel)
                self.result_list.append([
                    self.create_result_node_attrib("Average", self.times,
                                                   parallel, self.parallels),
                    parallel_average_dic
                ])
Beispiel #11
0
    def __compile_x11perf(self, x11perf_tar="x11perf-1.5.3.tar.gz"):
        x11perf_tar_path = os.path.join(self.tools_dir, x11perf_tar)
        lptlog.info("解压x11perf压缩包")
        x11perf_srcdir = utils.extract_tarball_to_dir(x11perf_tar_path,
                                                      self.src_dir)
        lptlog.info("x11per源目录: %s " % x11perf_srcdir)

        os.chdir(x11perf_srcdir)
        if os.path.isdir(x11perf_srcdir):
            lptlog.debug("编译x11perf测试程序")
            self.compile(configure_status=True,
                         make_status=True,
                         make_install_status=True)
        #返回根lpt根目录
        os.chdir(self.lpt_root)
Beispiel #12
0
    def __match_index(self, game, file):
        '''搜索index
        @return: 返回指定game的result_list,平均值, 
        '''
        labels = ('initialised', 'completed', 'total')
        result_list = []
        result_dict = {}
        if not os.path.isfile(file):
            lptlog.debug("%s 不存在")
            return result_list

        r_init_time = re.compile(
            r'(?P<thread>\d+) threads initialised in (?P<initialised>\d+) usec'
        )
        r_complete_time = re.compile(
            r"(?P<games>\d+) games completed in (?P<completed>\d+) usec")

        results_lines = lutils.read_all_lines(file)
        init_time_list = []
        complete_time_list = []
        #获取初始化时间和完成时间,返回两个list
        for line in results_lines:
            if r_init_time.match(line):
                init_time = r_init_time.match(line).group("initialised")
                init_time_list.append(init_time)
            if r_complete_time.match(line):
                complete_time = r_complete_time.match(line).group("completed")
                complete_time_list.append(complete_time)
        #初始化时间求平均值
        init_time_average = lutils.average_list(
            lutils.string_to_float(init_time_list), bits=0)
        #完成时间求平均值
        complete_time_average = lutils.average_list(
            lutils.string_to_float(complete_time_list), bits=0)
        sum_time = init_time_average + complete_time_average
        #定义result字典
        for l, v in zip(labels,
                        (init_time_average, complete_time_average, sum_time)):
            result_dict[l] = "%d" % v

    #定义result属性
        result_node_attrib = self.create_result_node_attrib(
            "Average", self.times, game * 2, [i * 2 for i in self.games])

        result_list.append(result_dict)
        result_list.insert(0, result_node_attrib)

        return result_list
Beispiel #13
0
    def __match_index(self, file):
        '''
        @return: 测试指标,dict
        @attention: 采用autotest中部分代码
            '''

        keylist = {}
        if not os.path.isfile(file):
            return []

        lptlog.debug("在%s中搜索测试指标" % file)
        results_lines = utils.read_all_lines(file)

        if self.testmode == "speed":
            #labels = ('write', 'rewrite', 'read', 'reread', 'randread','randwrite',
            #          'bkwdread', 'recordrewrite', 'strideread', 'fwrite',
            #         'frewrite', 'fread', 'freread')

            labels = ('write', 'rewrite', 'read', 'reread', 'randread',
                      'randwrite')
            for line in results_lines:
                fields = line.split()
                if len(fields) != 8:
                    continue
                lptlog.debug("line.split==8: %s" % line)
                try:
                    fields = tuple([int(i) for i in fields])
                except Exception:
                    continue

                for l, v in zip(labels, fields[2:]):
                    key_name = "%s" % l
                    keylist[key_name] = "%d" % v
        else:
            child_regexp = re.compile(
                r'Children see throughput for[\s]+([\d]+)[\s]+([\S]+|[\S]+[\s][\S]+)[\s]+=[\s]+([\w]+)*'
            )
            section = None
            w_count = 0
            for line in results_lines:
                line = line.strip()

                # Check for the beginning of a new result section
                match = child_regexp.search(line)

                if match:
                    # Extract the section name and the worker count
                    w_count = int(match.group(1))
                    lptlog.debug("w_count:%s" % w_count)
                    section = self.__get_section_name(match.group(2))
                    lptlog.debug("section:%s" % section)

                    # Output the appropriate keyval pair
                    #key_name = '%s-kids' % section
                    #keylist[key_name] = match.group(3)
                    keylist[section] = match.group(3)

        return keylist
Beispiel #14
0
 def get_config_array(self, tool_node, key, defaultValue):
     """转换 1,2,4字符类型为list
     """
     try:
         getList = lptxml.get_tool_parameter(tool_node, key)
         lptlog.debug("获取 %s :%s" % (key, getList))
         if getList is None:
             getList = defaultValue
         getList = utils.check_int_list(getList)
     except Exception:
         lptlog.warning("获取 %s error,将采用默认值" % key)
         getList = defaultValue
     finally:
         if not isinstance(getList, list):
             raise TypeError, getList
         return getList
Beispiel #15
0
 def clean(self):
     '''清理测试环境
     '''
     try:
         if self.tar_src_dir:
             shutil.rmtree(self.tar_src_dir)
             lptlog.info("清理源目录 %s :PASS" % self.tar_src_dir)
         if self.processBin is not None and os.path.exists(self.processBin):
             os.remove(self.processBin)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin)
         if self.processBin2 is not None and os.path.exists(self.processBin2):
             os.remove(self.processBin2)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin2)
     except Exception, e:
         lptlog.warning('清理临时目录或文件:FAIL')
         lptlog.debug(e)
Beispiel #16
0
 def get_config_array(self, tool_node, key, defaultValue):
     '''转换 1,2,4字符类型为list
     '''
     try:
         getList = lptxml.get_tool_parameter(tool_node,  key)
         lptlog.debug('获取 %s :%s' % (key, getList))
         if getList is None:
             getList = defaultValue
         getList = utils.check_int_list(getList)
     except Exception:
         lptlog.warning('获取 %s error,将采用默认值' % key)
         getList = defaultValue
     finally:
         if not isinstance(getList, list):
             raise TypeError(getList)
         return getList
Beispiel #17
0
 def get_config_value(self, tool_node, key, defaultValue, valueType=str):
     '''从parameters.conf文件中读取Value, 如果读取失败,赋予key, defaultValue
     '''
     try:
         getValue = lptxml.get_tool_parameter(tool_node, key)
         lptlog.debug('获取 %s : %s' % (key, getValue))
         if getValue is None:
             getValue = defaultValue
     except Exception:
         lptlog.warning("获取 %s error,将采用默认值: %s" %(key, defaultValue))
         getValue = defaultValue
     finally:
         try:
             getValue = valueType(getValue)
         except Exception:
             raise FormatterError(getValue)
         return getValue
Beispiel #18
0
 def clean(self):
     '''清理测试环境
     '''
     try:
         if self.tar_src_dir:
             shutil.rmtree(self.tar_src_dir)
             lptlog.info("清理源目录 %s :PASS" % self.tar_src_dir)
         if self.processBin is not None and os.path.exists(self.processBin):
             os.remove(self.processBin)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin)
         if self.processBin2 is not None and os.path.exists(
                 self.processBin2):
             os.remove(self.processBin2)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin2)
     except Exception, e:
         lptlog.warning('清理临时目录或文件:FAIL')
         lptlog.debug(e)
Beispiel #19
0
 def get_config_value(self, tool_node, key, defaultValue, valueType=str):
     """从parameters.conf文件中读取Value, 如果读取失败,赋予key, defaultValue
     """
     try:
         getValue = lptxml.get_tool_parameter(tool_node, key)
         lptlog.debug("获取 %s : %s" % (key, getValue))
         if getValue is None:
             getValue = defaultValue
     except Exception:
         lptlog.warning("获取 %s error,将采用默认值: %s" % (key, defaultValue))
         getValue = defaultValue
     finally:
         try:
             getValue = valueType(getValue)
         except Exception:
             raise FormatterError, getValue
         return getValue
Beispiel #20
0
    def __match_index(self, file):
        '''
        @return: 测试指标,dict
        @attention: 采用autotest中部分代码
            '''

        keylist = {}
        if not os.path.isfile(file):
            return []

        lptlog.debug("在%s中搜索测试指标" % file)
        results_lines = lutils.read_all_lines(file)

        if self.testmode == "speed":
        #labels = ('write', 'rewrite', 'read', 'reread', 'randread','randwrite', 
        #          'bkwdread', 'recordrewrite', 'strideread', 'fwrite', 
         #         'frewrite', 'fread', 'freread')

            labels = ('write', 'rewrite', 'read', 'reread', 'randread','randwrite')
            for line in results_lines:
                fields = line.split()
                if len(fields) != 8:
                    continue
                lptlog.debug("line.split==8: %s" %  line)
                try:
                    fields = tuple([int(i) for i in fields])
                except Exception:
                    continue

                for l, v in zip(labels, fields[2:]):
                    key_name = "%s" % l
                    keylist[key_name] = "%d" % v
        else:
            child_regexp  = re.compile(r'Children see throughput for[\s]+([\d]+)[\s]+([\S]+|[\S]+[\s][\S]+)[\s]+=[\s]+([\w]+)*')
            section = None
            w_count = 0
            for line in results_lines:
                #line = line.strip()

            # Check for the beginning of a new result section
                match = child_regexp.search(line)

                if match:
             # Extract the section name and the worker count
                    w_count = int(match.group(1))
                    lptlog.debug("w_count:%s" % w_count)
                    section = self.__get_section_name(match.group(2))
                    lptlog.debug("section:%s" % section)

                # Output the appropriate keyval pair
		    #key_name = '%s-kids' % section
                    #keylist[key_name] = match.group(3)
                    keylist[section] = match.group(3)
                    
        return keylist
Beispiel #21
0
    def __match_index(self, file):

        if not os.path.isfile(file):
            return []

        lptlog.debug("在%s中搜索测试指标" % file)
        results_lines = utils.read_all_lines(file)

        labels = ('io', 'aggrb', 'minb', 'maxb', 'mint','maxt')
        parallel_template = {'parallels': '1,2,3,4', 'parallel': '1', 'iter': '1', 'times': '2'}        

        result_list = []
        count = 0
        for line in results_lines:
            if 'READ:' in line:
                tmp_list = []
                parallel_dict = copy.deepcopy(parallel_template)
                parallel_dict['parallel'] = str(count / 2 + 1)
                parallel_dict['iter'] = 'READ'
                tmp_list.append(parallel_dict)
                tmp_list.append(self.dict_generator(labels,line))
                result_list.append(tmp_list)
                count = count + 1
            elif 'WRITE:' in line:
                tmp_list = []
                parallel_dict = copy.deepcopy(parallel_template)
                parallel_dict['parallel'] = str(count / 2 + 1)
                parallel_dict['iter'] = 'WRITE'
                tmp_list.append(parallel_dict)
                tmp_list.append(self.dict_generator(labels,line))
                result_list.append(tmp_list)
                count = count + 1
                if count in [2,4,6,8]:
                    tmp_list = []
                    dict2 = result_list[-1][1]
                    dict1 = result_list[-2][1]
                    parallel_dict = copy.deepcopy(parallel_template)
                    parallel_dict['parallel'] = str(count / 2)
                    parallel_dict['iter'] = 'Average'
                    tmp_list.append(parallel_dict)
                    tmp_list.append(self.dict_average(dict1, dict2))
                    result_list.append(tmp_list)

        return result_list
Beispiel #22
0
    def get_config_devices(self, tool_node, testdir):
        """获取测试设备, 并挂载到testdir
        """
        devices = self.get_config_value(tool_node, "devices", "Nodevice", valueType=str)
        lptlog.info("测试分区: %s " % devices)
        if not os.path.exists(devices):
            lptlog.debug("%s 不存在" % devices)
            return False
        else:
            try:
                if not os.path.ismount(testdir):
                    utils.system("mount %s %s" % (devices, testdir))
                else:
                    lptlog.debug("%s 已经挂载到 %s 目录" % (devices, testdir))

                return True
            except Exception:
                lptlog.warning("mount %s %s 失败,请确认分区是否已经格式化!!" % (devices, testdir))
                return False
Beispiel #23
0
 def get_config_devices(self, tool_node, testdir):
     '''获取测试设备, 并挂载到testdir
     '''
     devices = self.get_config_value(tool_node, "devices", "Nodevice", valueType=str)
     lptlog.info("测试分区: %s " % devices)
     if not os.path.exists(devices):
         lptlog.debug("%s 不存在" % devices)
         return False
     else:
         try:
             if not os.path.ismount(testdir):
                 utils.system("mount %s %s" % (devices, testdir))
             else:
                 lptlog.debug("%s 已经挂载到 %s 目录" % (devices, testdir))
                 
             return True
         except Exception:
             lptlog.warning("mount %s %s 失败,请确认分区是否已经格式化!!" % (devices, testdir))
             return False
Beispiel #24
0
 def setup(self):
     '''编译源码,设置程序
     '''
     if not self.check_bin(self.processBin):
         self.tar_src_dir = self.extract_bar()
         os.chdir(os.path.join(self.tar_src_dir, "src/current"))
         arch = sysinfo.get_current_kernel_arch()
         lptlog.debug("硬件架构: %s " % arch)
         if (arch == 'ppc'):
             para = 'linux-powerpc'
         elif (arch == 'ppc64'):
             para = 'linux-powerpc64'
         elif (arch == 'x86_64'):
             para ='linux-AMD64'
         elif (arch == "mips64el"):
             para = "linux-AMD64"
         else:
             para = "linux"
             
         self.compile(make_status=True,  make_para=para)
         utils.copy(os.path.join(self.tar_src_dir, 'src/current/iozone'), self.processBin)
Beispiel #25
0
 def clean(self):
     '''清理测试环境
     '''
     try:
         if self.tar_src_dir:
             lptlog.info(self.tar_src_dir)
             shutil.rmtree(self.tar_src_dir)
             lptlog.info("清理源目录 %s :PASS" % self.tar_src_dir)
         if self.processBin is not None and os.path.exists(self.processBin):
             os.remove(self.processBin)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin)
         if self.processBin2 is not None and os.path.exists(
                 self.processBin2):
             os.remove(self.processBin2)
             lptlog.info("清理Bin文件 %s :PASS" % self.processBin2)
     except Exception as e:
         lptlog.warning('清理临时目录或文件:FAIL')
         lptlog.debug(e)
         #raise CleanError, e
     finally:
         os.chdir(self.lpt_root)
Beispiel #26
0
    def __match_index(self, game, file):
        '''搜索index
        @return: 返回指定game的result_list,平均值, 
        '''
        labels = ('initialised', 'completed', 'total')
        result_list = []
        result_dict = {}
        if not os.path.isfile(file):
            lptlog.debug("%s 不存在")
            return result_list
    
        r_init_time = re.compile(r'(?P<thread>\d+) threads initialised in (?P<initialised>\d+) usec')
        r_complete_time = re.compile(r"(?P<games>\d+) games completed in (?P<completed>\d+) usec")

        results_lines = utils.read_all_lines(file)
        init_time_list = []
        complete_time_list = []
        #获取初始化时间和完成时间,返回两个list
        for line in results_lines:
            if r_init_time.match(line):
                init_time = r_init_time.match(line).group("initialised")
                init_time_list.append(init_time)
            if r_complete_time.match(line):
                complete_time = r_complete_time.match(line).group("completed")
                complete_time_list.append(complete_time)
        #初始化时间求平均值
        init_time_average = utils.average_list(utils.string_to_float(init_time_list), bits=0)
        #完成时间求平均值
        complete_time_average = utils.average_list(utils.string_to_float(complete_time_list), bits=0)
        sum_time = init_time_average + complete_time_average
        #定义result字典
        for l,v in zip(labels, (init_time_average,  complete_time_average, sum_time)):
            result_dict[l] = "%d" % v
       #定义result属性
        result_node_attrib = self.create_result_node_attrib("Average", self.times, game*2, [i*2 for i in self.games])
        
        result_list.append(result_dict)
        result_list.insert(0, result_node_attrib)
    
        return result_list
Beispiel #27
0
 def create_result(self):
     '''创建result_list
        '''
     key_dic = {}
     sum_dic = {}
     
     file = os.path.join(self.tmp_dir, "bonnie.out")
     lptlog.debug("读取 %s 文件" % file)
     if not os.path.isfile(file):
         raise IOError, "open %s Error" % file
     else:
         results_lines = utils.read_all_lines(file)
     
     labels = ["name","file_size","putc","putc_cpu","put_block","put_block_cpu","rewrite","rewrite_cpu",
             "getc","getc_cpu","get_block","get_block_cpu","seeks","seeks_cpu","num_files","seq_create",
             "seq_create_cpu","seq_stat","seq_stat_cpu","seq_del","seq_del_cpu","ran_create","ran_create_cpu",
             "ran_stat","ran_stat_cpu","ran_del","ran_del_cpu" ]
     keys = labels
     keys.pop(0)
     keys.pop(0)
     keys.pop(12)
     keys = tuple(keys)
     
     iter=0
     for line in results_lines:
         fields = line.split(',')
         if len(fields) != 27:
             continue
         if fields[0] == "name":
             continue
         else:
             iter += 1
             
         lptlog.debug("line.split==27: %s" %  line)
         
         #attrib_dic = {'iter':str(iter), 'times':str(self.times), 'parallel':'1', 'parallels':'1' ,
                       #"name":fields[0], "filesize":fields[1], "num_files":fields[14]}
         attrib_dic = self.create_result_node_attrib(iter, self.times, 1, [1])
         
         #remove 多余项
         
         fields.pop(0)
         fields.pop(0)
         fields.pop(12)
         fields = tuple([utils.change_type(i) for i in fields])
         
         for l, v in zip(keys, fields):
             key_dic[l] = "%d" % v 
         
         if not sum_dic:
             sum_dic = key_dic.copy()
         else:
             sum_dic = method.append_sum_dict(sum_dic, key_dic) 
         self.result_list.append([attrib_dic, key_dic])
              
     if  sum_dic:
         parallel_average_dic = method.append_average_dict(sum_dic, self.times)
         lptlog.debug("1 并行求平均值:PASS" )
         sum_attrib_dic = self.create_result_node_attrib("Average", self.times, 1, [1])
         self.result_list.append([sum_attrib_dic, parallel_average_dic])
Beispiel #28
0
 def create_job(self, tools_list, parameter, job_attrib={}, resultXmlName="results"):
     '''创建jobs.xml结构
     '''
     DateString = datetime.datetime.now().strftime('%y%m%d%H%M%S')
     #results = 'result_%s.xml' % DateString
     results = '%s_%s.xml' % (resultXmlName, DateString)
     
     job = self.create_node('job', dict({'id':DateString, 'status':"N/A"}, **job_attrib))
     lptlog.info('任务ID: %s' % DateString)
     
     self.create_element(job, 'resultsDB', results)
     lptlog.info('xml results文件:  %s' % results)
     
     lptlog.debug("创建参数")
     conftoxml = ConfigToXml(parameter, self.xml_file)
     conftoxml.add_test_group(tools_list)
     try:
         conftoxml.add_configparser_node(job, 'tool', {'status':'no'})
     except Exception, e:
         #lptlog.exception('parameter.conf转换xml失败')
         #lptlog.error('parameter.conf转换xml失败')
         raise CreatNodeError, 'parameter.conf转换xml失败: %s' % e
Beispiel #29
0
    def setup(self):
        '''编译源码,设置程序
        '''
        if not self.check_bin(self.processBin):
            self.tar_src_dir = self.extract_bar()
            os.chdir(os.path.join(self.tar_src_dir, "src/current"))
            arch = sysinfo.get_current_kernel_arch()
            lptlog.debug("硬件架构: %s " % arch)
            if (arch == 'ppc'):
                para = 'linux-powerpc'
            elif (arch == 'ppc64'):
                para = 'linux-powerpc64'
            elif (arch == 'x86_64'):
                para = 'linux-AMD64'
            elif (arch == "mips64el"):
                para = "linux-AMD64"
            else:
                para = "linux"

            self.compile(make_status=True, make_para=para)
            utils.copy(os.path.join(self.tar_src_dir, 'src/current/iozone'),
                       self.processBin)
Beispiel #30
0
def add_job(tools_list, jobs_xml=JOBS_XML, parameter=default_parameter, job_attrib={}, resultXmlName="results"):
    '''
    创建新任务
    @param tools_list:工具列表
    @type tools_list: list 
    '''
    try:
        lptlog.info('''
                        ~~~~~~~~~~~~~~~~~~~~
                          开始创建测试任务
                        ~~~~~~~~~~~~~~~~~~~~''')
        lptlog.debug("指定测试工具集: %s" % utils.list_to_str(tools_list))
        lptxml.add_job(jobs_xml, tools_list, parameter, job_attrib=job_attrib, resultXmlName=resultXmlName)
        lptlog.info('''
                        ++++++++++++++++++
                         创建测试任务:PASS
                        ++++++++++++++++++''')
    except CreateJobException, e:
        lptlog.debug(e)
        lptlog.error('''
                        ++++++++++++++++++
                         创建测试任务:FAIL
                        ++++++++++++++++++''')
Beispiel #31
0
def hash_file(filename, size=None, method="md5"):
    """
    Calculate the hash of filename.
    If size is not None, limit to first size bytes.
    Throw exception if something is wrong with filename.
    Can be also implemented with bash one-liner (assuming size%1024==0):
    dd if=filename bs=1024 count=size/1024 | sha1sum -

    :param filename: Path of the file that will have its hash calculated.
    :param method: Method used to calculate the hash. Supported methods:
            * md5
            * sha1
    :return: Hash of the file, if something goes wrong, return None.
    """
    chunksize = 4096
    fsize = os.path.getsize(filename)

    if not size or size > fsize:
        size = fsize
    f = open(filename, 'rb')

    try:
        hash = hash_method(method)
    except ValueError:
        lptlog.error("Unknown hash type %s, returning None" % method)

    while size > 0:
        if chunksize > size:
            chunksize = size
        data = f.read(chunksize)
        if len(data) == 0:
            lptlog.debug("Nothing left to read but size=%d" % size)
            break
        hash.update(data)
        size -= len(data)
    f.close()
    return hash.hexdigest()
Beispiel #32
0
def hash_file(filename, size=None, method="md5"):
    """
    Calculate the hash of filename.
    If size is not None, limit to first size bytes.
    Throw exception if something is wrong with filename.
    Can be also implemented with bash one-liner (assuming size%1024==0):
    dd if=filename bs=1024 count=size/1024 | sha1sum -

    :param filename: Path of the file that will have its hash calculated.
    :param method: Method used to calculate the hash. Supported methods:
            * md5
            * sha1
    :return: Hash of the file, if something goes wrong, return None.
    """
    chunksize = 4096
    fsize = os.path.getsize(filename)

    if not size or size > fsize:
        size = fsize
    f = open(filename, "rb")

    try:
        hash = hash_method(method)
    except ValueError:
        lptlog.error("Unknown hash type %s, returning None" % method)

    while size > 0:
        if chunksize > size:
            chunksize = size
        data = f.read(chunksize)
        if len(data) == 0:
            lptlog.debug("Nothing left to read but size=%d" % size)
            break
        hash.update(data)
        size -= len(data)
    f.close()
    return hash.hexdigest()
Beispiel #33
0
 def create_result(self):
     '''创建result_list
        '''
     
     #labels = ("Throughtput", "clients", "max_latency")
     labels = ("Throughtput",  "max_latency")
     parallelstring = ",".join(map(str, self.parallels))
     
     r = re.compile(r"Throughput\s+(\d+.\d+)\s+MB/sec\s+(\d+)\s+clients\s+\d+\s+procs\s+max_latency=(\d+.\d+)\s", re.I)
     for parallel in self.parallels:
         sum_dic = {}
         for iter in range(self.times):
             tmp_result_file = os.path.join(self.tmp_dir, "%s_%s_%s.out" % (self.tool, parallel, iter+1))
             if not os.path.isfile(tmp_result_file):
                 lptlog.warning("%s 不存在" % tmp_result_file)
                 continue
             result_lines = utils.read_all_lines(tmp_result_file)
             for line in result_lines:
                 key_dic = {}
                 if r.match(line):
                     m = r.match(line)
                     #result_list = [m.group(1), m.group(2), m.group(3)]
                     result_list = [m.group(1), m.group(3)]
                     result_tuple = tuple([utils.change_type(i)for i in result_list])
                     for l, v in zip(labels, result_tuple):
                         key_dic[l] = "%d" % v
                     if not sum_dic:
                         sum_dic = key_dic.copy()
                     else:
                         sum_dic = method.append_sum_dict(sum_dic, key_dic)
                     self.result_list.append([self.create_result_node_attrib(iter+1, self.times, parallel, self.parallels), key_dic])
                     
         if  sum_dic:
             parallel_average_dic = method.append_average_dict(sum_dic, self.times)
             lptlog.debug("%d 并行求平均值:PASS" % parallel)
             self.result_list.append([self.create_result_node_attrib("Average", self.times, parallel, self.parallels), parallel_average_dic])
Beispiel #34
0
def run_cmd(command, args=[], timeout=None, ignore_status=False, output_tee=None):
    """
    Run a command on the host.

    @param command: the command line string.
    @param timeout: time limit in seconds before attempting to kill the
            running process. The run() function will take a few seconds
            longer than 'timeout' to complete if it has to kill the process.
    @param ignore_status: do not raise an exception, no matter what the exit

    @param ignore_status: do not raise an exception, no matter what the exit
            code of the command is.
    @param stdout_tee: optional file-like object to which stdout data
            will be written as it is generated (data will still be stored
            in result.stdout).
    @param args: sequence of strings of arguments to be given to the command
            inside " quotes after they have been escaped for that; each
            element in the sequence will be given as a separate command
            argument

    :return: a CmdResult object

    :raise CmdError: the exit code of the command execution was not 0
    """

    if not isinstance(args, list):
        raise TypeError('Got a string for the "args" keyword argument, '
                        'need a list.')
    for arg in args:
        command += ' "%s"' % sh_escape(arg)
    lptlog.debug("执行命令:%s" % command)
    p = subprocess.Popen(command,  stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    output, output_err = p.communicate()
    
    if timeout and isinstance(timeout, int):
        time.sleep(timeout)
        retcode = p.poll()
        if retcode:
            lptlog.error('运行 %d 秒,%s 程序无响应' %(timeout, command))
            raise  CalledProcessError()
    
    if p.wait():
        lptlog.error("执行命令: %s, 输出错误信息:\n%s" %(command, output_err))
        if not ignore_status:
            raise CalledProcessError(p.poll(), command)
    elif output_tee:
        lptlog.debug("执行命令: %s, 输出信息:\n %s" %(command, output))
        return output 
    else:
        lptlog.debug("执行命令: %s, 输出信息:\n %s" %(command, output))
        return 0
Beispiel #35
0
def run_cmd(command, args=[], timeout=None, ignore_status=False, output_tee=None):
    """
    Run a command on the host.

    @param command: the command line string.
    @param timeout: time limit in seconds before attempting to kill the
            running process. The run() function will take a few seconds
            longer than 'timeout' to complete if it has to kill the process.
    @param ignore_status: do not raise an exception, no matter what the exit

    @param ignore_status: do not raise an exception, no matter what the exit
            code of the command is.
    @param stdout_tee: optional file-like object to which stdout data
            will be written as it is generated (data will still be stored
            in result.stdout).
    @param args: sequence of strings of arguments to be given to the command
            inside " quotes after they have been escaped for that; each
            element in the sequence will be given as a separate command
            argument

    :return: a CmdResult object

    :raise CmdError: the exit code of the command execution was not 0
    """

    if not isinstance(args, list):
        raise TypeError('Got a string for the "args" keyword argument, ' "need a list.")
    for arg in args:
        command += ' "%s"' % sh_escape(arg)
    lptlog.debug("执行命令:%s" % command)
    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
    output, output_err = p.communicate()

    if timeout and isinstance(timeout, int):
        time.sleep(timeout)
        retcode = p.poll()
        if retcode:
            lptlog.error("运行 %d 秒,%s 程序无响应" % (timeout, command))
            raise CalledProcessError()

    if p.wait():
        lptlog.error("执行命令: %s, 输出错误信息:\n%s" % (command, output_err))
        if not ignore_status:
            raise CalledProcessError(p.poll(), command)
    elif output_tee:
        lptlog.debug("执行命令: %s, 输出信息:\n %s" % (command, output))
        return output
    else:
        lptlog.debug("执行命令: %s, 输出信息:\n %s" % (command, output))
        return 0
Beispiel #36
0
 def run(self):
 
     tool_node = self.check_tool_result_node()
         
     lptlog.info("----------开始获取测试参数")
     
     cmd = self.processBin
     args = ['-u', os.getuid(),  '-m', 'lpt', '-q']
     
         #获取测试目录
     testdir = self.get_config_value(tool_node, "testdir", os.path.join(self.tmp_dir, "testdir"), valueType=str)
     if os.path.exists(testdir):
         if not os.path.isdir(testdir):
             lptlog.warning("%s 不是有效目录,将采用 /home/%u/testdir 目录" % testdir)
             testdir = "/home/%s/testdir" % getpass.getuser()
             os.makedirs(testdir, stat.S_IRWXU)
     else:
         os.makedirs(testdir, stat.S_IRWXU)
         testdir = os.path.abspath(testdir)
     args.append("-d")
     args.append(testdir)
     lptlog.info("测试目录: %s" % testdir)
         
         #获取设备
         
     devices = self.get_config_value(tool_node, "devices", "Nodevice", valueType=str)
     lptlog.info("测试分区: %s " % devices)
     if not os.path.exists(devices):
         lptlog.debug("%s 不存在" % devices)
     else:
         try:
             if not os.path.ismount(testdir):
                 util.system("mount %s %s" % (devices, testdir))
             else:
                 lptlog.debug("%s 已经挂载到 %s 目录" % (devices, testdir))
         except Exception:
             lptlog.warning("mount %s %s 失败,请确认分区是否已经格式化!!" % (devices, testdir))
     
         #获取测试内存大小
     memory = self.get_config_value(tool_node, "memory", sysinfo.get_memory_size(), valueType=str)
     if not utils.check_size_format(memory, match="\d+[kKmMgGtT]?"):
         lptlog.warning("测试内存配置error, 将采用系统内存")
         memory = sysinfo.get_memory_size()
         lptlog.debug("系统内存大小:%s" % memory)
     
               
        #获取测试文件大小
     filesize = self.get_config_value(tool_node, "filesize", "10g", valueType=str)
     if not utils.check_size_format(filesize, match="\d+[kKmMgGtT]?"):
         lptlog.warning("%s 格式 error,将采用默认大小10g" % filesize)
         filesize = "10g"
         
     if float(utils.custom_format(memory)) * 2 > float(utils.custom_format(filesize)):
         lptlog.warning("测试需求:测试内存*2 小于 文件大小,但实际相反,  将降低测试内存大小为测试文件的1/2" )
         memory = float(utils.custom_format(filesize))/2
         memory = utils.custom_format(memory, auto=True)
     
     lptlog.info("测试内存大小:%s" % memory)
     lptlog.info("测试文件大小: %s" % filesize)           
     args.append("-r")
     args.append(memory)
     
     #获取block大小
     blocksize = self.get_config_value(tool_node, "blocksize", "4k", valueType=str)
     if not utils.check_size_format(blocksize, match="\d+k?"):
         lptlog.warning("blocksize=%s 格式 error,将采用默认大小8k" % blocksize)
         blocksize = "8k"
                  
     args.append("-s")
     args.append("%s:%s" %(filesize, blocksize))
     lptlog.info("测试块大小: %s" % blocksize)
     
     small_files_num = self.get_config_value(tool_node, "small_files_num", 0, valueType=int)                
     small_file_size = self.get_config_value(tool_node, "small_file_size", "1k", valueType=str) 
     if not small_file_size in ("1k", "2k", "4k", "8k", "16k", "32k", "64k", "128k", "256k"):
         lptlog.warning("small_file_size=%s 格式error,请输入整型数字, 将采用默认值1k" % small_file_size)
     else:
         small_file_size = "1k"
     
     small_files_dirs = self.get_config_value(tool_node, "small_files_dirs", 0, valueType=int)
                     
     if small_files_num == "0":
         args.append("-n")
         args.append("0")
     else:
         args.append("-n")
         args.append("%s:%s:%s:%d" %(small_files_num, small_file_size, small_file_size, small_files_dirs))
     
     lptlog.info("小文件数: %s k, 小文件大小: %s, 测试目录: %s" % (small_files_num, small_file_size,  small_files_dirs))
         
     self.times = self.get_config_value(tool_node, "times", 5, valueType=int)
     lptlog.info("测试次数: %d " % self.times)
     args.append("-x")
     args.append("%d" % self.times)            
         
     no_buffer = self.get_config_value(tool_node, "no_buffer", "False", valueType=str)
    
     if no_buffer == "True":
         lptlog.info("no_buffer=True")
         args.append("-b")
    
     direct_io = self.get_config_value(tool_node, "direct_io", "False")
     if direct_io == "True":
         args.append("-D")
         lptlog.info("direct_io=True")
     
            
     #运行测试程序,要求保存结果到tmp目录,result_file命令为iozone_$parallel_type_$iter.out
     self.mainParameters["parameters"] = utils.list_to_str(["bonnie++"]+args, ops=" ")
         #清除缓冲
     method.clean_buffer()
     lptlog.info("----------运行测试脚本")
     utils.run_shell2(self.processBin, args_list=args, file=os.path.join(self.tmp_dir, "bonnie.out"))
     lptlog.info("%s 测试数据保存在 %s 中"  %  (self.tool, os.path.join(self.tmp_dir, "bonnie.out")))
Beispiel #37
0
def run(job_id=None,
        tools_list=None,
        jobs_xml=JOBS_XML,
        format='txt',
        clean=False,
        REBOOT=False):
    jobs = lptxml.Jobs(jobs_xml)
    if job_id is None:
        try:
            job_node = jobs.get_new_job()
        except IndexError as e:
            lptlog.debug("job任务数为0, 期望非0")
            job_node = None
    else:

        #python 2.7
        #job_node = jobs.search_job_node("job[@id='%s']" % job_id)
        #python 2.6
        job_nodes = jobs.search_job_nodes("job")
        if job_nodes is None:
            lptlog.debug("job任务数为0, 期望非0")
            job_node = None
        else:
            job_filter_nodes = [
                x for x in job_nodes if x.get("id") == str(job_id)
            ]
            if job_filter_nodes:
                job_node = job_filter_nodes[0]
            else:
                lptlog.debug("%s id不存在,请核对JOB ID" % job_id)
                job_node = None

    if job_node is None:
        #lptlog.error()
        raise MissXML("没有找到对应的job任务, 请核对jobs.xml或者重新创建测试任务")

    #判断所有工具是否已经全部执行完毕
    no_exec_tools_nodes_list = jobs.get_noexec_tools_nodes(job_node)
    if not no_exec_tools_nodes_list:
        #lptlog.warning('任务中所有工具状态都已正确执行, 请重新创建测试任务')
        raise TestOK("任务中所有工具状态都已正确执行, 请重新创建测试任务")
    else:
        no_exec_tools = list(map(jobs.get_tool_name, no_exec_tools_nodes_list))

    if not tools_list:
        lptlog.debug("未指定测试工具,将默认执行job中未执行成功的测试工具")
        test_tools = list(map(jobs.get_tool_name, no_exec_tools_nodes_list))
    else:  #python 2.7 #tools = filter(lambda x:job_node.find("tool[@id='%s']" % x).get('status') == "no", tools_list) #python 2.6 #no_exec_tools = map(lambda y:y.get('id'), jobs.get_noexec_tools_nodes(job_node)) #tools = filter(lambda x:no_exec_tools.count(x)>0, tools_list)
        test_tools = [tool for tool in no_exec_tools if tool in tools_list]

        if not test_tools:
            #lptlog.warning('指定运行的测试工具已经全部执行完毕, 请重新创建任务')
            raise TestOK('指定运行的测试工具已经全部执行完毕, 请重新创建任务')
        else:
            tools_string = " ".join(test_tools)
            lptlog.debug("尚未执行完毕的测试工具集:%s" % tools_string)

    for tool in test_tools:
        lptlog.info(__BEGIN_MSG % tool)
        try:
            control.run(tool, jobs_xml, job_node, clean=clean)
        except Exception as e:
            lptlog.debug(e)
            lptlog.error('''
                    ----------------------------------
                    +       %s 测试:FAIL    +
                    ----------------------------------
                    ''' % tool)
            lptlog.info(__END_MSG % tool)
            #lptlog.exception("")
            if test_tools[-1] == tool:
                raise TestOK("Test Over, but Some Case FAIL")
            else:
                continue
        else:
            #python 2.7
            #jobs.set_tool_status(job_node.find("tool[@id='%s']" % tool), 'ok')
            #python 2.6
            tool_node = [
                x for x in jobs.get_tools_nodes(job_node)
                if x.get("id") == tool
            ][0]
            jobs.set_tool_status(tool_node, 'ok')
            jobs.save_file()
            lptlog.info('''
                    ----------------------------------
                    +       %s 测试:PASS    +
                    ----------------------------------
                    ''' % tool)
            lptlog.info(__END_MSG % tool)

            if REBOOT:
                break
Beispiel #38
0
                if opts.jobs_list:
                    #检查是否存在jobs xml文件
                    if not os.path.isfile(self.jobs_xml):
                        #lptlog.warning("缺失jobs文件,请核对jobs文件或者重新创建job")
                        #raise NameError("")
                        raise ValueError, "缺失jobs文件,请核对jobs文件或者重新创建job"
                    else:
                        self.list_jobs(self.jobs_xml)
                #else:
                 #   lptlog.warning("-h or --help show help message...")
        except KeyboardInterrupt:
            lptlog.warning("按下CTRL+C,将停止测试程序")
            sys.exit()
        except optparse.OptionValueError, e:
            lptlog.error("Bad option or value: %s" % e)
        except MissXML, e:
            lptlog.error(e)
        except TestOK:
            lptlog.info("ALL Test OK")
            sys.exit()
        except Exception, e:
            lptlog.exception('')
            lptlog.debug(e)

def main(argv=sys.argv):
   lpt= Lpt()
   lpt.parser_opts(argv)

if __name__ == '__main__':
    main()
Beispiel #39
0
def run(job_id=None, tools_list=None, jobs_xml=JOBS_XML, format='txt', clean=False, REBOOT=False):
    jobs = lptxml.Jobs(jobs_xml)
    if job_id is None:
        try:
            job_node = jobs.get_new_job()
        except IndexError,e:
            lptlog.debug("job任务数为0, 期望非0")
            job_node = None
    else:
        
         #python 2.7
        #job_node = jobs.search_job_node("job[@id='%s']" % job_id)
        #python 2.6
        job_nodes = jobs.search_job_nodes("job")
        if job_nodes is None:
            lptlog.debug("job任务数为0, 期望非0")
            job_node = None
        else:
            job_filter_nodes = filter(lambda x: x.get("id")==str(job_id), job_nodes)
            if job_filter_nodes:
                job_node = job_filter_nodes[0]
            else:
                lptlog.debug("%s id不存在,请核对JOB ID" % job_id)
                job_node = None
               
    if job_node is None:
        #lptlog.error()
        raise MissXML, "没有找到对应的job任务, 请核对jobs.xml或者重新创建测试任务"
    
    #判断所有工具是否已经全部执行完毕
    no_exec_tools_nodes_list = jobs.get_noexec_tools_nodes(job_node)