def compute_caliper_logs(target_exec_dir, flag=1): # according the method in the config file, compute the score dic = yaml.load(open(caliper_path.folder_ope.final_parser, 'r')) config_files = server_utils.get_cases_def_files(target_exec_dir) for i in range(0, len(config_files)): config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) classify = config_files[i].split("/")[-1].strip().split("_")[0] for j in range(0, len(sections)): try: run_file = config.get(sections[j], 'run') parser = config.get(sections[j], 'parser') except Exception: raise AttributeError("The is no option value of Computing") print_format() if flag == 1: logging.info("Generation raw yaml for %s" % sections[j]) else: logging.info("Computing Score for %s" % sections[j]) bench = os.path.join(classify, sections[j]) try: # get the abspath, which is filename of run config for the benchmark bench_conf_file = os.path.join( caliper_path.config_files.tests_cfg_dir, bench, run_file) # get the config sections for the benchmrk configRun, sections_run = server_utils.read_config_file( bench_conf_file) except AttributeError as e: raise AttributeError except Exception: raise for k in range(0, len(sections_run)): try: category = configRun.get(sections_run[k], 'category') scores_way = configRun.get(sections_run[k], 'scores_way') command = configRun.get(sections_run[k], 'command') except Exception: logging.debug("no value for the %s" % sections_run[k]) logging.info(e) continue try: logging.debug( "Computing the score of the result of command: %s" % command) flag_compute = compute_case_score( dic[sections[j]][sections_run[k]]["value"], category, scores_way, target_exec_dir, flag) except Exception, e: logging.info("Error while computing the result of \"%s\"" % sections_run[k]) logging.info(e) continue else: if not flag_compute and dic[bench][sections_run[k] ["value"]]: logging.info("Error while computing the result\ of \"%s\"" % command)
def get_selected_tools(summary_file, target): selected_tools = [] config_files = server_utils.get_cases_def_files(target) for i in range(0, len(config_files)): config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) if len(sections): selected_tools.extend(sections) return selected_tools
def compute_caliper_logs(target_exec_dir,flag = 1): # according the method in the config file, compute the score dic = yaml.load(open(caliper_path.folder_ope.final_parser, 'r')) config_files = server_utils.get_cases_def_files(target_exec_dir) for i in range(0, len(config_files)): config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) classify = config_files[i].split("/")[-1].strip().split("_")[0] for j in range(0, len(sections)): try: run_file = config.get(sections[j], 'run') parser = config.get(sections[j], 'parser') except Exception: raise AttributeError("The is no option value of Computing") print_format() if flag == 1: logging.info("Generation raw yaml for %s" % sections[j]) else: logging.info("Computing Score for %s" % sections[j]) bench = os.path.join(classify, sections[j]) try: # get the abspath, which is filename of run config for the benchmark bench_conf_file = os.path.join( caliper_path.config_files.tests_cfg_dir, bench, run_file) # get the config sections for the benchmrk configRun, sections_run = server_utils.read_config_file( bench_conf_file) except AttributeError as e: raise AttributeError except Exception: raise for k in range(0, len(sections_run)): try: category = configRun.get(sections_run[k], 'category') scores_way = configRun.get(sections_run[k], 'scores_way') command = configRun.get(sections_run[k], 'command') except Exception: logging.debug("no value for the %s" % sections_run[k]) logging.info(e) continue try: logging.debug("Computing the score of the result of command: %s" % command) flag_compute = compute_case_score(dic[sections[j]][sections_run[k]]["value"], category, scores_way, target_exec_dir, flag) except Exception, e: logging.info("Error while computing the result of \"%s\"" % sections_run[k]) logging.info(e) continue else: if not flag_compute and dic[bench][sections_run[k]["value"]]: logging.info("Error while computing the result\ of \"%s\"" % command)
def build_caliper(target_arch, flag=0): """ target_arch means to build the caliper for the special arch flag mean build for the target or local machine (namely server) 0: means for the target 1: means for the server """ if target_arch: arch = target_arch else: arch = 'x86_64' # get the files list of 'cfg' files_list = server_utils.get_cases_def_files(arch) logging.debug("config files are %s" % files_list) source_build_file = caliper_path.SOURCE_BUILD_FILE des_build_file = os.path.join(TMP_DIR, caliper_path.BUILD_FILE) logging.info("destination file of building is %s" % des_build_file) for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): if os.path.exists(des_build_file): os.remove(des_build_file) shutil.copyfile(os.path.abspath(source_build_file), des_build_file) try: result = generate_build(config, sections[i], dir_name, des_build_file) except Exception, e: logging.info(e) else: if result: return result result = build_each_tool(dir_name, sections[i], des_build_file, target_arch) if os.path.exists(des_build_file): os.remove(des_build_file) if result: build_flag = server_utils.get_fault_tolerance_config( "fault_tolerance", "build_error_continue") if build_flag == 1: continue else: return result
def parsing_run(target_exec_dir, target): # get the test cases defined files config_files = server_utils.get_cases_def_files(target_exec_dir) logging.debug("the selected configuration are %s" % config_files) dic = {} for i in range(0, len(config_files)): # run benchmarks selected in each configuration file # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i]) config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) logging.debug(sections) # get if it is the 'common' or 'arm' or 'android' classify = config_files[i].split("/")[-1].strip().split("_")[0] logging.debug(classify) for i in range(0, len(sections)): dic[sections[i]] = {} # try to resolve the configuration of the configuration file try: run_file = config.get(sections[i], 'run') parser = config.get(sections[i], 'parser') except Exception: raise AttributeError("The is no option value of parser") print_format() logging.info("Parsing %s" % sections[i]) bench = os.path.join(classify, sections[i]) try: result = parse_all_cases(target_exec_dir, target, bench, sections[i], run_file, parser, dic) except Exception: logging.info("Running %s Exception" % sections[i]) crash_handle.main() print_format() run_flag = server_utils.get_fault_tolerance_config( 'fault_tolerance', 'run_error_continue') if run_flag == 1: continue else: return result else: logging.info("Parsing %s Finished" % sections[i]) print_format() outfp = open( os.path.join( caliper_path.folder_ope.workspace, caliper_path.folder_ope.name.strip() + "/final_parsing_logs.yaml"), 'w') outfp.write(yaml.dump(dic, default_flow_style=False)) outfp.close() return 0
def parsing_run(target_exec_dir, target): # get the test cases defined files config_files = server_utils.get_cases_def_files(target_exec_dir) logging.debug("the selected configuration are %s" % config_files) dic = {} for i in range(0, len(config_files)): # run benchmarks selected in each configuration file # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i]) config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) logging.debug(sections) # get if it is the 'common' or 'arm' or 'android' classify = config_files[i].split("/")[-1].strip().split("_")[0] logging.debug(classify) for i in range(0, len(sections)): dic[sections[i]] = {} # try to resolve the configuration of the configuration file try: run_file = config.get(sections[i], 'run') parser = config.get(sections[i], 'parser') except Exception: raise AttributeError("The is no option value of parser") print_format() logging.info("Parsing %s" % sections[i]) bench = os.path.join(classify, sections[i]) try: result = parse_all_cases(target_exec_dir, target, bench, sections[i], run_file,parser,dic) except Exception: logging.info("Running %s Exception" % sections[i]) crash_handle.main() print_format() run_flag = server_utils.get_fault_tolerance_config( 'fault_tolerance', 'run_error_continue') if run_flag == 1: continue else: return result else: logging.info("Parsing %s Finished" % sections[i]) print_format() outfp = open(os.path.join(caliper_path.folder_ope.workspace, caliper_path.folder_ope.name.strip() +"/final_parsing_logs.yaml"),'w') outfp.write(yaml.dump(dic, default_flow_style=False)) outfp.close() return 0
def copy_build_caliper(target_arch, flag=0): """ target_arch means to build the caliper for the special arch flag mean build for the target or local machine (namely server) 0: means for the target 1: means for the server """ global GEN_DIR, WS_GEN_DIR, BUILD_MAPPING_FILE prev_build_files = [] current_build_files = [] WS_prev_build_files = [] WS_current_build_files = [] if target_arch: arch = target_arch else: arch = 'x86_64' # get the files list of 'cfg' build_folder = os.path.join(caliper_path.BUILD_LOGS, arch) files_list = server_utils.get_cases_def_files(arch) source_build_file = caliper_path.SOURCE_BUILD_FILE des_build_file = os.path.join(TMP_DIR, caliper_path.BUILD_FILE) logging.info("=" * 55) logging.info( "Please Wait while check and copy the files of the tools which was built by other process" ) # Fetch details of previous builds for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') # POPULATING THE binary_mapping.yaml try: with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() try: if dic[sections[i]]['ProcessID'] not in currentProcess: logging.info("=" * 55) logging.info( "Please wait another process is building %s" % sections[i]) temp = yaml.load(open(caliper_path.BUILD_TIME)) count = temp[sections[i]] while dic[sections[i]][ 'ProcessID'] not in currentProcess and count: mins, secs = divmod(count, 60) timeformat = '{:02d}:{:02d}'.format(mins, secs) count = count - 1 sys.stdout.write("\r" + timeformat) sys.stdout.flush() time.sleep(1) with client_utils.SimpleFlock( BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() if count == 0: with client_utils.SimpleFlock( BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) if type(dic) != dict: dic = {} fp.close() if sections[i] in dic.keys(): del dic[sections[i]] fp = open(BUILD_MAPPING_FILE, 'w') fp.write( yaml.dump(dic, default_flow_style=False)) fp.close() if not dic[sections[i]]['binaries']: logging.info("%s BUILDING FAILED" % sections[i]) except KeyError: continue except Exception as e: logging.debug(e) sys.exit(1) temp = 0 WS_current_build_files = getAllFilesRecursive(WS_GEN_DIR) for j in dic[sections[i]]['binaries']: if j not in WS_current_build_files: if j != BUILD_MAPPING_FILE: WS_Dir = os.path.join(WS_GEN_DIR, '/'.join(j.split('/')[5:-1])) try: os.makedirs(WS_Dir) except: pass if temp == 0: temp = 1 logging.info("=" * 55) logging.info("COPYING %s's Binaries" % sections[i]) shutil.copy(j, WS_Dir) for log in glob.glob(os.path.join(build_folder, sections[i] + '*')): shutil.copy(log, FOLDER.build_dir) logging.info("=" * 55) return 0
def build_caliper(target_arch, flag=0, clear=0): """ target_arch means to build the caliper for the special arch flag mean build for the target or local machine (namely server) 0: means for the target 1: means for the server """ copy = 0 global GEN_DIR, WS_GEN_DIR, BUILD_MAPPING_FILE, BUILD_MAPPING_DIR GEN_DIR = caliper_path.GEN_DIR WS_GEN_DIR = os.path.join(FOLDER.workspace, 'binary') prev_build_files = [] current_build_files = [] WS_prev_build_files = [] WS_current_build_files = [] if target_arch: arch = target_arch else: arch = 'x86_64' # get the files list of 'cfg' files_list = server_utils.get_cases_def_files(arch) logging.debug("config files are %s" % files_list) BUILD_MAPPING_DIR = os.path.join(BUILD_MAPPING_DIR, arch) if not os.path.exists(BUILD_MAPPING_DIR): try: os.makedirs(BUILD_MAPPING_DIR) except: pass source_build_file = caliper_path.SOURCE_BUILD_FILE des_build_file = os.path.join(TMP_DIR, caliper_path.BUILD_FILE) logging.info("destination file of building is %s" % des_build_file) set_signals() # check and delete those binaries if it is already built if -c is used if clear: logging.info("=" * 55) logging.info( "WARNING: Please wait, dont run any other instance of caliper") for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() if type(dic) != dict: dic = {} if sections[i] in dic.keys(): for file in dic[sections[i]]['binaries']: try: shutil.rmtree(file) except: pass dic[sections[i]]['binaries'] = [] dic[sections[i]]['ProcessID'] = 0 fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() logging.info("It is safe to run caliper now") logging.info("=" * 55) #STARING THE BUILD for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD = 0 BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') reset_binary_mapping() try: #Lock the file and modify it if this is the first process which is building the tool with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) if type(dic) != dict: dic = {} fp.close() if sections[i] not in dic.keys(): dic[sections[i]] = {} dic[sections[i]]['binaries'] = [] dic[sections[i]]['ProcessID'] = os.getpid() BUILD = 1 fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() #checking if binary field is empty, empty means that the previous build is a failure if not dic[sections[i]]['binaries']: BUILD = 1 # Checking if the tool if already built or is in the process of being built by another process if dic[sections[i]]['ProcessID'] not in currentProcess: # We shall continue to build the next tools and we'll copy these binaries later logging.info("=" * 55) logging.info( "%s is being built by someother process, we'll build the remaining tools" % sections[i]) continue except Exception as e: logging.debug(e) sys.exit(1) if BUILD == 0: #Collecting the build files in the Workspace binary dir and main binary dir WS_prev_build_files = getAllFilesRecursive(WS_GEN_DIR) prev_build_files = getAllFilesRecursive(GEN_DIR) #checking if the required binaries are present or not in the main binary dir for j in dic[sections[i]]['binaries']: if j not in prev_build_files: if j != BUILD_MAPPING_FILE: #the binaries are not present we have to build it BUILD = 1 if BUILD == 1: if os.path.exists(des_build_file): os.remove(des_build_file) shutil.copyfile(os.path.abspath(source_build_file), des_build_file) try: result = generate_build(config, sections[i], dir_name, des_build_file) except Exception, e: logging.info(e) else: if result: return result result = build_each_tool(dir_name, sections[i], des_build_file, target_arch) if result: build_flag = server_utils.get_fault_tolerance_config( "fault_tolerance", "build_error_continue") with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): dic[sections[i]]['ProcessID'] = 0 dic[sections[i]]['binaries'] = [] fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() if build_flag == 1: #Build has failed so delete the section entry in the build_mapping.yaml continue else: return result if os.path.exists(des_build_file): os.remove(des_build_file) else: #Copy the generated binaries to the Workspace binaries for j in dic[sections[i]]['binaries']: if j not in WS_prev_build_files: if j != BUILD_MAPPING_FILE: WS_Dir = os.path.join(WS_GEN_DIR, '/'.join(j.split('/')[5:-1])) try: os.makedirs(WS_Dir) except: pass shutil.copy(j, WS_Dir) logging.info("=" * 55) logging.info("%s is already build", sections[i]) #get the binary present in the WS binary dir #WS_prev_build_dir - WS_current_build_dir = "TOOL CHAIN RELATED BINARIES" #Copy the ToolChainRelated binaries to the main binary folder WS_current_build_files = getAllFilesRecursive(WS_GEN_DIR) for files in WS_current_build_files: if files not in WS_prev_build_files: deflocation = os.path.join(str(GEN_DIR), '/'.join(files.split('/')[6:])) try: os.makedirs('/'.join(deflocation.split('/')[:-1])) except: pass if not os.path.exists(deflocation): shutil.copy(files, deflocation) (dic[sections[i]]['binaries']).append(str(deflocation)) dic[sections[i]]['ProcessID'] = 0 with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) temp = yaml.load(fp) if type(temp) != dict: temp = {} fp.close() copy_dic(temp, dic, sections[i]) fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close()
def copy_build_caliper(target_arch, flag=0): """ target_arch means to build the caliper for the special arch flag mean build for the target or local machine (namely server) 0: means for the target 1: means for the server """ global GEN_DIR, WS_GEN_DIR, BUILD_MAPPING_FILE prev_build_files = [] current_build_files = [] WS_prev_build_files = [] WS_current_build_files = [] if target_arch: arch = target_arch else: arch = 'x86_64' # get the files list of 'cfg' build_folder = os.path.join(caliper_path.BUILD_LOGS, arch) files_list = server_utils.get_cases_def_files(arch) source_build_file = caliper_path.SOURCE_BUILD_FILE des_build_file = os.path.join(TMP_DIR, caliper_path.BUILD_FILE) logging.info("=" * 55) logging.info("Please Wait while check and copy the files of the tools which was built by other process") # Fetch details of previous builds for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') # POPULATING THE binary_mapping.yaml try: with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() try: if dic[sections[i]]['ProcessID'] not in currentProcess: logging.info("=" * 55) logging.info("Please wait another process is building %s" %sections[i]) temp = yaml.load(open(caliper_path.BUILD_TIME)) count = temp[sections[i]] while dic[sections[i]]['ProcessID'] not in currentProcess and count: mins, secs = divmod(count, 60) timeformat = '{:02d}:{:02d}'.format(mins, secs) count = count - 1 sys.stdout.write("\r" + timeformat) sys.stdout.flush() time.sleep(1) with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() if count == 0: with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) if type(dic) != dict: dic = {} fp.close() if sections[i] in dic.keys(): del dic[sections[i]] fp = open(BUILD_MAPPING_FILE,'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() if not dic[sections[i]]['binaries']: logging.info("%s BUILDING FAILED" %sections[i]) except KeyError: continue except Exception as e: logging.debug(e) sys.exit(1) temp = 0 WS_current_build_files = getAllFilesRecursive(WS_GEN_DIR) for j in dic[sections[i]]['binaries']: if j not in WS_current_build_files: if j != BUILD_MAPPING_FILE: WS_Dir = os.path.join(WS_GEN_DIR,'/'.join(j.split('/')[5:-1])) try: os.makedirs(WS_Dir) except: pass if temp == 0: temp = 1 logging.info("=" * 55) logging.info("COPYING %s's Binaries" % sections[i] ) shutil.copy(j, WS_Dir) for log in glob.glob(os.path.join(build_folder,sections[i] + '*')): shutil.copy(log,FOLDER.build_dir) logging.info("=" * 55) return 0
def build_caliper(target_arch, flag=0,clear=0): """ target_arch means to build the caliper for the special arch flag mean build for the target or local machine (namely server) 0: means for the target 1: means for the server """ copy = 0 global GEN_DIR, WS_GEN_DIR,BUILD_MAPPING_FILE,BUILD_MAPPING_DIR GEN_DIR = caliper_path.GEN_DIR WS_GEN_DIR = os.path.join(FOLDER.workspace, 'binary') prev_build_files = [] current_build_files = [] WS_prev_build_files = [] WS_current_build_files = [] if target_arch: arch = target_arch else: arch = 'x86_64' # get the files list of 'cfg' files_list = server_utils.get_cases_def_files(arch) logging.debug("config files are %s" % files_list) BUILD_MAPPING_DIR = os.path.join(BUILD_MAPPING_DIR,arch) if not os.path.exists(BUILD_MAPPING_DIR): try: os.makedirs(BUILD_MAPPING_DIR) except: pass source_build_file = caliper_path.SOURCE_BUILD_FILE des_build_file = os.path.join(TMP_DIR, caliper_path.BUILD_FILE) logging.info("destination file of building is %s" % des_build_file) set_signals() # check and delete those binaries if it is already built if -c is used if clear: logging.info("=" * 55) logging.info("WARNING: Please wait, dont run any other instance of caliper") for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) fp.close() if type(dic) != dict: dic = {} if sections[i] in dic.keys(): for file in dic[sections[i]]['binaries']: try: shutil.rmtree(file) except: pass dic[sections[i]]['binaries'] = [] dic[sections[i]]['ProcessID'] = 0 fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() logging.info("It is safe to run caliper now") logging.info("=" * 55) #STARING THE BUILD for i in range(0, len(files_list)): # get the directory, such as 'common','server' and so on dir_name = files_list[i].strip().split("/")[-1].split("_")[0] config = ConfigParser.ConfigParser() config.read(files_list[i]) sections = config.sections() for i in range(0, len(sections)): BUILD = 0 BUILD_MAPPING_FILE = os.path.join(BUILD_MAPPING_DIR, sections[i] + '.yaml') reset_binary_mapping() try: #Lock the file and modify it if this is the first process which is building the tool with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) dic = yaml.load(fp) if type(dic) != dict: dic = {} fp.close() if sections[i] not in dic.keys(): dic[sections[i]] = {} dic[sections[i]]['binaries'] = [] dic[sections[i]]['ProcessID'] = os.getpid() BUILD = 1 fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() #checking if binary field is empty, empty means that the previous build is a failure if not dic[sections[i]]['binaries']: BUILD = 1 # Checking if the tool if already built or is in the process of being built by another process if dic[sections[i]]['ProcessID'] not in currentProcess: # We shall continue to build the next tools and we'll copy these binaries later logging.info("=" * 55) logging.info("%s is being built by someother process, we'll build the remaining tools" % sections[i]) continue except Exception as e: logging.debug(e) sys.exit(1) if BUILD == 0: #Collecting the build files in the Workspace binary dir and main binary dir WS_prev_build_files = getAllFilesRecursive(WS_GEN_DIR) prev_build_files = getAllFilesRecursive(GEN_DIR) #checking if the required binaries are present or not in the main binary dir for j in dic[sections[i]]['binaries']: if j not in prev_build_files: if j != BUILD_MAPPING_FILE: #the binaries are not present we have to build it BUILD = 1 if BUILD == 1: if os.path.exists(des_build_file): os.remove(des_build_file) shutil.copyfile(os.path.abspath(source_build_file), des_build_file) try: result = generate_build(config, sections[i], dir_name, des_build_file) except Exception, e: logging.info(e) else: if result: return result result = build_each_tool(dir_name, sections[i], des_build_file, target_arch) if result: build_flag = server_utils.get_fault_tolerance_config("fault_tolerance", "build_error_continue") with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): dic[sections[i]]['ProcessID'] = 0 dic[sections[i]]['binaries'] = [] fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close() if build_flag == 1: #Build has failed so delete the section entry in the build_mapping.yaml continue else: return result if os.path.exists(des_build_file): os.remove(des_build_file) else: #Copy the generated binaries to the Workspace binaries for j in dic[sections[i]]['binaries']: if j not in WS_prev_build_files: if j != BUILD_MAPPING_FILE: WS_Dir = os.path.join(WS_GEN_DIR,'/'.join(j.split('/')[5:-1])) try: os.makedirs(WS_Dir) except: pass shutil.copy(j, WS_Dir) logging.info("=" * 55) logging.info("%s is already build", sections[i]) #get the binary present in the WS binary dir #WS_prev_build_dir - WS_current_build_dir = "TOOL CHAIN RELATED BINARIES" #Copy the ToolChainRelated binaries to the main binary folder WS_current_build_files = getAllFilesRecursive(WS_GEN_DIR) for files in WS_current_build_files: if files not in WS_prev_build_files: deflocation = os.path.join(str(GEN_DIR) , '/'.join(files.split('/')[6:])) try: os.makedirs('/'.join(deflocation.split('/')[:-1])) except: pass if not os.path.exists(deflocation): shutil.copy(files, deflocation) (dic[sections[i]]['binaries']).append(str(deflocation)) dic[sections[i]]['ProcessID'] = 0 with client_utils.SimpleFlock(BUILD_MAPPING_FILE, 60): fp = open(BUILD_MAPPING_FILE) temp = yaml.load(fp) if type(temp) != dict: temp = {} fp.close() copy_dic(temp,dic,sections[i]) fp = open(BUILD_MAPPING_FILE, 'w') fp.write(yaml.dump(dic, default_flow_style=False)) fp.close()
def caliper_run(target_exec_dir, target): # get the test cases defined files config_files = server_utils.get_cases_def_files(target_exec_dir) logging.debug("the selected configuration are %s" % config_files) for i in range(0, len(config_files)): # run benchmarks selected in each configuration file # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i]) config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) logging.debug(sections) # get if it is the 'common' or 'arm' or 'android' classify = config_files[i].split("/")[-1].strip().split("_")[0] logging.debug(classify) for i in range(0, len(sections)): # run for each benchmark target_arch = server_utils.get_host_arch(target) build_name = sections[i] + '_' + target_arch + '.suc' build_suc = os.path.join(Folder.build_dir, build_name) if not os.path.exists(build_suc): continue build_host_name = sections[i] + '_' + \ server_utils.get_local_machine_arch() + '.fail' if os.path.exists(build_host_name): continue # try to resolve the configuration of the configuration file try: run_file = config.get(sections[i], 'run') parser = config.get(sections[i], 'parser') except Exception: raise AttributeError("The is no option value of parser") print_format() logging.info("Running %s" % sections[i]) bench = os.path.join(classify, sections[i]) try: result = run_all_cases(target_exec_dir, target, bench, sections[i], run_file) except Exception: logging.info("Running %s Exception" % sections[i]) crash_handle.main() print_format() if sections[i] == "ltp": try: unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/; fi" ) except Exception: unmount = target.run( "if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs ;fi" ) unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) run_flag = server_utils.get_fault_tolerance_config( 'fault_tolerance', 'run_error_continue') if run_flag == 1: continue else: return result else: logging.info("Running %s Finished" % sections[i]) if sections[i] == "ltp": try: unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) except Exception: unmount = target.run( "if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs/ ;fi" ) unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) print_format() return 0
def caliper_run(target_exec_dir, server, target): # get the test cases defined files config_files = server_utils.get_cases_def_files(target_exec_dir) logging.debug("the selected configuration are %s" % config_files) for i in range(0, len(config_files)): # run benchmarks selected in each configuration file # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i]) config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) logging.debug(sections) # get if it is the 'common' or 'arm' or 'android' classify = config_files[i].split("/")[-1].strip().split("_")[0] logging.debug(classify) if classify == "server" and server: try: server_ip = settings.get_value("SERVER", "ip", type=str) server_port = settings.get_value("SERVER", "port", type=int) server_user = settings.get_value("SERVER", "user", type=str) logging.info( "Please wait while caliper triggers the server.py script in the server" ) server_pwd = server.run("pwd").stdout server_pwd = server_pwd.split("\n")[0] server_caliper_dir = os.path.join(server_pwd, "caliper_server") server_caliper_dir = os.path.join(server_caliper_dir, "server.py") server_user = server_user + '@' + server_ip script = server_caliper_dir + ' ' + str(server_port) subprocess.Popen( ['ssh', '%s' % server_user, 'python %s' % script]) except Exception as e: logging.info(e) raise AttributeError( "Error in establising connection with server") for i in range(0, len(sections)): # run for each benchmark target_arch = server_utils.get_host_arch(target) build_name = sections[i] + '_' + target_arch + '.suc' build_suc = os.path.join(Folder.build_dir, build_name) if not os.path.exists(build_suc): continue build_host_name = sections[i] + '_' + \ server_utils.get_local_machine_arch() + '.fail' if os.path.exists(build_host_name): continue # try to resolve the configuration of the configuration file try: run_file = config.get(sections[i], 'run') parser = config.get(sections[i], 'parser') except Exception: raise AttributeError("The is no option value of parser") print_format() logging.info("Running %s" % sections[i]) bench = os.path.join(classify, sections[i]) try: system_initialise(target) if classify == "server": logging.info("Waiting for server to grant access") sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((server_ip, server_port)) logging.info("%s" % str(sock.recv(1024))) result = run_all_cases(target_exec_dir, target, bench, sections[i], run_file) if classify == "server": sock.send("1") sock.close() except Exception: logging.info("Running %s Exception" % sections[i]) crash_handle.main() print_format() if sections[i] == "ltp": try: unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/; fi" ) except Exception: unmount = target.run( "if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs ;fi" ) unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) run_flag = server_utils.get_fault_tolerance_config( 'fault_tolerance', 'run_error_continue') if run_flag == 1: continue else: return result else: logging.info("Running %s Finished" % sections[i]) if sections[i] == "ltp": try: unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) except Exception: unmount = target.run( "if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs/ ;fi" ) unmount = target.run( "if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi" ) print_format() return 0
def caliper_run(target_exec_dir, target): # get the test cases defined files config_files = server_utils.get_cases_def_files(target_exec_dir) logging.debug("the selected configuration are %s" % config_files) for i in range(0, len(config_files)): # run benchmarks selected in each configuration file # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i]) config_file = os.path.join(config_files[i]) config, sections = server_utils.read_config_file(config_file) logging.debug(sections) # get if it is the 'common' or 'arm' or 'android' classify = config_files[i].split("/")[-1].strip().split("_")[0] logging.debug(classify) for i in range(0, len(sections)): # run for each benchmark target_arch = server_utils.get_host_arch(target) build_name = sections[i]+'_'+target_arch+'.suc' build_suc = os.path.join(Folder.build_dir, build_name) if not os.path.exists(build_suc): continue build_host_name = sections[i] + '_' + \ server_utils.get_local_machine_arch() + '.fail' if os.path.exists(build_host_name): continue # try to resolve the configuration of the configuration file try: run_file = config.get(sections[i], 'run') parser = config.get(sections[i], 'parser') except Exception: raise AttributeError("The is no option value of parser") print_format() logging.info("Running %s" % sections[i]) bench = os.path.join(classify, sections[i]) try: system_initialise(target) result = run_all_cases(target_exec_dir, target, bench, sections[i], run_file) except Exception: logging.info("Running %s Exception" % sections[i]) crash_handle.main() print_format() if sections[i]== "ltp": try: unmount = target.run("if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/; fi") except Exception: unmount = target.run("if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs ;fi") unmount = target.run("if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi") run_flag = server_utils.get_fault_tolerance_config( 'fault_tolerance', 'run_error_continue') if run_flag == 1: continue else: return result else: logging.info("Running %s Finished" % sections[i]) if sections[i] == "ltp": try: unmount = target.run("if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi") except Exception: unmount = target.run("if df -h |grep caliper_nfs ; then fuser -km /mnt/caliper_nfs/ ;fi") unmount = target.run("if df -h |grep caliper_nfs ; then umount /mnt/caliper_nfs/ ;fi") print_format() return 0