def main(args): zipfilepath = args.zip if zipfilepath is None: print "pass arguements correctly!" exit(-1) xmlfilepath = args.xmlfile zip_path = zipfilepath if utils.valid_file(zip_path) is not True: print "bad zip" exit(-1) data_for_all_files = [] path_to_extract = utils.random_temp_path(TEMP_DIR) utils.extractor(zip_path, path_to_extract) list_of_all_files = utils.getListOfFiles(path_to_extract) for path_to_file in list_of_all_files: uid = utils.get_uuid() filename = utils.stripfilepath(path_to_file) rel_path = utils.get_relative_path(path_to_file, path_to_extract) md5hash = utils.md5sum(path_to_file) filesize = utils.get_file_size(filepath=path_to_file) data = FileDetails(file_uuid=uid, file_name=filename, file_full_path=path_to_file, relative_path=rel_path, file_md5hash=md5hash, file_size=filesize) data_for_all_files.append(data) XS.XMLSerialize(data_for_all_files, xmlfilepath) utils.cleanup(path_to_extract) exit(0)
def create_docker_compose(timestamp): try: file_dir = get_relative_path( '../call-history/{0}/docker-compose.yml'.format(timestamp)) docker_compose_file = open(file_dir, 'w') docker_compose_file.write(''.join(COMPOSE_LINES_BEFORE_VOLUME_PATH)) host_path = get_relative_path("../call-history/{0}".format(timestamp)) volume_path_line = ' - {0}:/usr/share/filebeat/filebeat-volume\n'.format( host_path) docker_compose_file.write(volume_path_line) docker_compose_file.write(''.join(COMPOSE_LINES_AFTER_VOLUME_PATH)) docker_compose_file.close() except Exception as e: raise RuntimeError(str(e)) return True
def run_file(filename, counter): """ This function runs warhorn.py using the newly created xml file. :Returns: 1. counter (int) = Counter value changes to 1 if the user decides to run warhorn.py; otherwise remains 0. """ aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE) neg_pattern = re.compile("^(n|no)$", re.IGNORECASE) answer = raw_input("Do you want to run this file now? (yes[Enter]/no): ") if aff_pattern.match(answer): print "Running Warhorn" dir_path = os.path.dirname(os.path.realpath(sys.argv[0])) rel_path = get_relative_path(os.path.dirname(dir_path), "warhorn.py") subprocess.call(["python", rel_path, filename]) if counter == 0: print "You can view the config file in the logs folder." elif neg_pattern.match(answer): if counter == 0: print "File discarded." elif counter == 1: print "File saved for later use." else: print "The command was not recognized. Please answer 'yes' or 'no'." run_file(filename, counter)
def get_dir_path_to_save_file(new_file_name): """ This function prompts the user to enter the path to the directory in which he wants to save the newly created xml file. :Arguments: 1. new_file_name (str) = This is the new name of the xml file :Returns: 1. new_file_path (str) = This is the path of the xml file to where it will be saved """ aff_pattern = re.compile("^(|y|yes)$", re.IGNORECASE) neg_pattern = re.compile("^(n|no)$", re.IGNORECASE) answer = raw_input("Would you like to store " + new_file_name + " in the user_generated folder? (yes[Enter]/no): ") if aff_pattern.match(answer): dir_path = os.path.dirname(os.path.realpath(sys.argv[0])) rel_path = get_relative_path(os.path.dirname(dir_path), "user_generated", new_file_name) new_file_path = os.path.join(rel_path) elif neg_pattern.match(answer): path = raw_input("Please enter a new path. If nothing is entered, " + new_file_name + " will get stored in the home folder: ") path = validate_path(path) new_file_path = validate_new_file_name(path, new_file_name) else: print "The command was not recognized. Please answer 'yes' or 'no'." new_file_path = get_dir_path_to_save_file(new_file_name) return new_file_path
def create_filebeat_config(timestamp, index_pattern): try: file_dir = get_relative_path( '../call-history/{0}/filebeat.yml'.format(timestamp)) conf_file = open(file_dir, 'w') conf_file.write(create_input_str()) conf_file.write(create_logstash_str(index_pattern)) conf_file.write(create_kibana()) conf_file.close() except Exception as e: raise RuntimeError(str(e)) return True
def main(): """ This function basically creates the xml file by calling various other functions, runs the file and then saves it. """ root = Element('data') dir_path = os.path.dirname(os.path.realpath(sys.argv[0])) rel_path = get_relative_path(dir_path, "data.xml") tree = xml.etree.ElementTree.parse(rel_path) input_root = tree.getroot() nodes = get_firstlevel_children(input_root, "tag") populate_xml(root, nodes) temp_xml = 'temp.xml' pretty_xml = minidom.parseString(xml.etree.ElementTree.tostring(root))\ .toprettyxml(indent=" ") with open(temp_xml, "w") as config_file: config_file.write(pretty_xml) config_file.flush() config_file.close() save_file(temp_xml)
def minimize_file(filepath): """ This method runs lithium on a java source code and returns an object that contains the filepath and the similar loc in the original file obj = { "class": ".../ClassA.java", "loc": [1,50,52,54,...] } """ global project, bug_number, test_case, expected_msg_path output_lithium = {} # structute to store the output of lithium # /tmp directories project_dir = tempfile.mkdtemp(prefix="lithium-slicer_") lithium_tmp = tempfile.mkdtemp(prefix="lithium-interesting_") # checkout the project - @TODO: not necessary anymore checkout_project(project, bug_number, project_dir) # update filepath path java_file = os.path.join(project_dir, filepath) filename = os.path.basename(java_file) logger.info("Minimizing {filename}".format(filename=filename)) # saves original file in log_dir origin_filename = os.path.basename(java_file) origin_path = os.path.join(log_testcase_dir, origin_filename) copy(java_file, origin_path) # remove comments in original file if remove_comments: logger.info( "Removing comments for {filename}".format(filename=filename)) uncomment_path = os.path.join(log_testcase_dir, "uncomment_" + origin_filename) if platform.system() == 'Darwin': bash_cmd = '/usr/libexec/java_home -v 1.8 --exec java -cp java-parser-comments-remover-1.0-SNAPSHOT-jar-with-dependencies.jar com.tqrg.cleaner.Cleaner ' + java_file + ' ' + uncomment_path elif platform.system() == 'Linux': bash_cmd = '/usr/lib/jvm/java-8-oracle/bin/java -cp java-parser-comments-remover-1.0-SNAPSHOT-jar-with-dependencies.jar com.tqrg.cleaner.Cleaner ' + java_file + ' ' + uncomment_path process = subprocess.Popen(bash_cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() copy(uncomment_path, java_file) logger.info("Removing comments was finished for {filename}".format( filename=filename)) # run lithium try: start_lithium = time.time() cmd_line = "python3 -m lithium --tempdir={TEMPDIR} interesting {PROJECTDIR} {TESTCASE} {EXPECTED_MSG_PATH} {FILE}" cmd_line = cmd_line.format(TEMPDIR=lithium_tmp, PROJECTDIR=project_dir, TESTCASE=test_case, FILE=java_file, EXPECTED_MSG_PATH=expected_msg_path) logger.info("Running lithium for {filename}".format(filename=filename)) call(split(cmd_line), stderr=STDOUT) logger.info( "Lithium was finished for {filename}".format(filename=filename)) # copy minimized file logger.info( "Copying minimized file to {log_dir}".format(log_dir=log_dir)) minimized_filename = "lithium_" + filename minimized_path = os.path.join(log_testcase_dir, minimized_filename) copy(java_file, minimized_path) # update output_lithium["class"] = get_relative_path(project, java_file, args.bug_number[0]) output_lithium["loc"] = get_locs( origin_path, minimized_path) if not remove_comments else get_locs( uncomment_path, minimized_path) est_time = int((time.time() - start_lithium) / 60.0) logger.info( "The file {filename} was minimized in {time} minutes".format( filename=filename, time=est_time)) except Exception as e: raise Exception("Something happens {}".format(e.message)) finally: # remove tmp directories rmtree(lithium_tmp, ignore_errors=True) rmtree(project_dir, ignore_errors=True) return output_lithium
def compute(self): """ compute() -> None Dispatch the display event to the spreadsheet with images and labels """ model_workspace = utils.get_relative_path(self.get_input("ModelWorkspace"), self) if self.has_input("ModelWorkspace") and \ utils.check_if_model_finished(model_workspace): auc_graph = text_output = response_curves = calibration_graph = None confusion_graph = residuals_graph = variable_graph = model_label = initial_display = None window = spreadsheetController.findSpreadsheetWindow() model_dir_full = os.path.normcase(model_workspace) model_dir = os.path.split(model_dir_full)[1] model_name = model_dir[:model_dir.index('_')] auc_graph_path = self.findFile(model_dir_full, '_modelEvalPlot.png') # os.path.join(model_dir_full, model_name + '_modelEvalPlot.jpg') if os.path.exists(auc_graph_path): auc_graph = window.file_pool.make_local_copy(auc_graph_path) text_output_path = self.findFile(model_dir_full, '_output.txt') # os.path.join(model_dir_full, model_name + '_output.txt') if os.path.exists(text_output_path): text_output = window.file_pool.make_local_copy(text_output_path) response_directory = os.path.join(model_dir_full, 'responseCurves') if os.path.exists(response_directory): responseCurveFiles = os.listdir(response_directory) response_curves = [] for response_curve in responseCurveFiles: if response_curve != "Thumbs.db": # Windows grief response_curves.append(os.path.join(response_directory, response_curve)) else: response_curves = [] calibration_graph_path = self.findFile(model_dir_full, '_CalibrationPlot.png') # os.path.join(model_dir_full, model_name + '_CalibrationPlot.jpg') if os.path.exists(calibration_graph_path): calibration_graph = window.file_pool.make_local_copy(calibration_graph_path) confusion_graph_path = self.findFile(model_dir_full, '.confusion.matrix.png') # os.path.join(model_dir_full, model_name + '.confusion.matrix.jpg') if os.path.exists(confusion_graph_path): confusion_graph = window.file_pool.make_local_copy(confusion_graph_path) residuals_graph_path = self.findFile(model_dir_full, '.resid.plot.png') # os.path.join(model_dir_full, model_name + '.resid.plot.jpg') if os.path.exists(residuals_graph_path): residuals_graph = window.file_pool.make_local_copy(residuals_graph_path) variable_imp_path = self.findFile(model_dir_full, '_variable.importance.png') # os.path.join(model_dir_full, model_name + '_variable.importance.jpg') if os.path.exists(variable_imp_path): variable_graph = window.file_pool.make_local_copy(variable_imp_path) model_label = model_dir.capitalize().replace('output', 'Output') utils.set_sheet_location(self) if self.has_input('InitialModelOutputDisplay'): initial_display = self.get_input('InitialModelOutputDisplay') else: initial_display = 'AUC' self.cellWidget = self.displayAndWait(SAHMOutputViewerCellWidget, (auc_graph, text_output, response_curves, calibration_graph, confusion_graph, residuals_graph, variable_graph, model_label, initial_display)) else: fileValue = None
def compute(self): """ compute() -> None Dispatch the display event to the spreadsheet with images and labels """ model_workspaces = self.get_input_list("ModelWorkspaces") if len(model_workspaces) < 1 or len(model_workspaces) > 4: raise RuntimeError('Between 1 and 4 ModelWorkspaces must be supplied!') #TODO add in check to make sure all models finished successfully #if still running raise module suspended workspaces = [] for model_workspace in model_workspaces: rel_workspace = utils.get_relative_path(model_workspace, self) rel_workspace = os.path.join(rel_workspace, 'modelWorkspace') workspaces.append(os.path.normpath(rel_workspace)) self.location = utils.get_curve_sheet_location(self) # find the next available port port = 5678 while port_occupied('127.0.0.1', port): print "port ({}) occupied trying next".format(port) port += 1 if port > 6000: raise RuntimeError('Unable to find unoccupied port!') # launch the Shiny app args = {} args['port'] = str(port) args['wsList'] = ",".join(workspaces) script = "ResponseCurveShinyApp.r" cmd = utils.gen_R_cmd(script, args) if self.has_input('run_name_info'): runinfo = self.force_get_input('run_name_info') subfolder = runinfo.contents.get('subfolder', "") runname = runinfo.contents.get('runname', "") else: subfolders = [] runnames = [] for outdname in workspaces: _subfolder, _runname = utils.get_previous_run_info(os.path.split(outdname)[0]) subfolders.append(_subfolder) runnames.append(_runname) if all(x == subfolders[0] for x in subfolders): subfolder = subfolders[0] else: subfolder = '' if all(x == runnames[0] for x in runnames): runname = runnames[0] else: runname = '' outdname = os.path.join(utils.getrootdir(), subfolder, "ResponseCurveExplorerOutput") if not os.path.exists(outdname): os.makedirs(outdname) outdname = utils.mknextdir(prefix="ResponseCurveExplorerOutput", skipSequence=False, directory=os.path.join(utils.getrootdir(), subfolder)) outfname = utils.mknextfile("ResponseCurveExplorer_stdout", suffix=".txt", directory=outdname, runname=runname) errfname = utils.mknextfile("ResponseCurveExplorer_stderr", suffix=".txt", directory=outdname, runname=runname) utils.writetolog("\nStarting processing of " + script , True) utils.writetolog(" command used: \n" + utilities.convert_list_to_cmd_str(cmd), False, False) stdErrFile = open(errfname, 'a') stdErrFile.seek(0, os.SEEK_END) stdOutFile = open(outfname, 'a') stdOutFile.seek(0, os.SEEK_END) p = subprocess.Popen(cmd, stderr=stdErrFile, stdout=stdOutFile) start_time = time.clock() CUTOFF_SECONDS = 300 # 5min while True: with open(outfname) as stdout_f: stdout = stdout_f.read() try: listening = stdout.split("Listening on ") url = listening[-1].strip() except: pass with open(errfname) as stderr_f: stderr = stderr_f.read() if "Error" in stderr: raise RuntimeError("Running Shiny App resulted in an Error:\n{}".format(stderr)) else: listening = stderr.split("Listening on ") url = listening[-1].strip() if url.startswith("http:"): break time.sleep(5) if time.clock() - start_time > CUTOFF_SECONDS: msg = "Shiny App taking longer than expected to load!.\n" msg += "The R kernel generating the Shiny app will continue to run in the background.\n" msg += "Manually monitor the contents of {\n}\tfor the url which you can then open in a browser.".format(errfname) raise RuntimeError(msg) self.cellWidget = self.displayAndWait(responseCurveExplorerWidger, (p, url, None))