def install(self): """Install the automated workflow Runs install bash script, saves output into log files in the staging directory. Also checks for error keywords in the output and saves warnings accordingly. """ script_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "../scripts/cybershake/install_cybershake.py", ) cmd = "python {} {} {} {} --seed {} --stat_file_path {}".format( script_path, self.stage_dir, os.path.join( self.stage_dir, os.path.basename(self.config_dict[self.cf_fault_list_key]), ), self.version, self.config_dict[const.RootParams.seed.value], self.config_dict["stat_file"], ) cmd = (cmd + " --extended_period" if self.config_dict.get("extended_period") is True else cmd) cmd = (cmd + " --keep_dup_stations" if self.config_dict.get("keep_dup_stations") is True else cmd) print("Running install...\nCmd: {}".format(cmd)) out_file = os.path.join(self.stage_dir, self.install_out_file) err_file = os.path.join(self.stage_dir, self.install_err_file) with open(out_file, "w") as out_f, open(err_file, "w") as err_f: exe(cmd, debug=False, stdout=out_f, stderr=err_f) # Check for errors # Get these straight from execution? output = open(out_file, "r").read() error = open(err_file, "r").read() if any(cur_str in output.lower() for cur_str in self.error_keywords): msg = "There appears to be errors in the install. Error keyword found in stdout!" print(msg) print("##### INSTALL OUTPUT #####") print(output) print("##########################") self.warnings.append(Warning("Install - Stdout", msg)) if any(cur_str in error.lower() for cur_str in self.error_keywords): msg = "There appears to be errors in the install. Error keyword found in stderr!" print(msg) print("##### INSTALL OUTPUT #####") print(error) print("##########################") self.errors.append(Error("Install - Stderr", msg)) self.fault_dirs, self.sim_dirs = get_sim_dirs(self.runs_dir)
def test_srf2corners(test_srf, filename, sample_cnr_file_path): # NOTE : The testing was carried out based on the assumption that the hypocentre was correct # srf.srf2corners method calls the get_hypo method inside it, which gives the hypocentre value abs_filename = os.path.join(DIR_NAME, filename) print("abs_filename: ", abs_filename) srf.srf2corners(test_srf, cnrs=abs_filename) cmd = "diff " + sample_cnr_file_path + " " + abs_filename out, err = shared.exe(cmd) assert len(out) == 0 and len(err) == 0 utils.remove_file(abs_filename)
def check_data(s1, s2): """load data of two timeseries objs to txt files and compare""" utils.setup_dir(TXT_DIR_1) utils.setup_dir(TXT_DIR_2) s1.all2txt(prefix="./{}/".format(TXT_DIR_1)) s2.all2txt(prefix="./{}/".format(TXT_DIR_2)) for f in os.listdir(TXT_DIR_1)[:200]: out, err = shared.exe( "diff {} {}".format(os.path.join(TXT_DIR_1, f), os.path.join(TXT_DIR_2, f)) ) shutil.rmtree(TXT_DIR_1) shutil.rmtree(TXT_DIR_2)
def set_up(request): test_data_save_dirs = [] for i, (REALISATION, DATA_DOWNLOAD_PATH) in enumerate(REALISATIONS): data_store_path = os.path.join(os.getcwd(), "sample" + str(i)) zip_download_path = os.path.join(data_store_path, REALISATION + ".zip") download_cmd = "wget -O {} {}".format(zip_download_path, DATA_DOWNLOAD_PATH) unzip_cmd = "unzip {} -d {}".format(zip_download_path, data_store_path) # print(DATA_STORE_PATH) test_data_save_dirs.append(data_store_path) if not os.path.isdir(data_store_path): os.makedirs(data_store_path, exist_ok=True) out, err = shared.exe(download_cmd, debug=False) if "error" in err: shutil.rmtree(data_store_path) sys.exit("{} failed to retrieve test data".format(err)) # download_via_ftp(DATA_DOWNLOAD_PATH, zip_download_path) if not os.path.isfile(zip_download_path): sys.exit( "File failed to download from {}. Exiting".format( DATA_DOWNLOAD_PATH ) ) out, err = shared.exe(unzip_cmd, debug=False) os.remove(zip_download_path) if "error" in err: shutil.rmtree(data_store_path) sys.exit("{} failed to extract data folder".format(err)) else: print("Benchmark data folder already exits: ", data_store_path) # Run all tests yield test_data_save_dirs # Remove the test data directory for PATH in test_data_save_dirs: if os.path.isdir(PATH): shutil.rmtree(PATH, ignore_errors=True)
def test_set_up(realizations): """ Downloads test data and places each set in a separate folder, then returns the list of test data locations :param realizations: A list of (realisation name, data zip url) pairs to download data for testing :return: A list of test data locations, containing the contents of the downloaded zip files """ test_data_save_dirs = [] for i, (realization, data_download_path) in enumerate(realizations): data_store_path = os.path.join(os.getcwd(), "sample" + str(i)) zip_download_path = os.path.join(data_store_path, realization + ".zip") download_cmd = "wget -O {} {}".format(zip_download_path, data_download_path) unzip_cmd = "unzip {} -d {}".format(zip_download_path, data_store_path) # print(DATA_STORE_PATH) test_data_save_dirs.append(data_store_path) if not os.path.isdir(data_store_path): os.makedirs(data_store_path, exist_ok=True) out, err = shared.exe(download_cmd, debug=False) if "error" in err: rmtree(data_store_path) sys.exit("{} failed to retrieve test data".format(err)) # download_via_ftp(DATA_DOWNLOAD_PATH, zip_download_path) if not os.path.isfile(zip_download_path): sys.exit( "File failed to download from {}. Exiting".format( data_download_path ) ) out, err = shared.exe(unzip_cmd, debug=False) os.remove(zip_download_path) if "error" in err: rmtree(data_store_path) sys.exit("{} failed to extract data folder".format(err)) else: print("Benchmark data folder already exists: ", data_store_path) print(test_data_save_dirs) # Run all tests return test_data_save_dirs
def test_e3ds(bench_path, test_path): """ :param bench_path: path to benchmark OutBin folder :param test_path: :return: """ print("{}testing e3d segments{}".format(DIVIDER, DIVIDER)) bench_e3ds = sorted(os.listdir(bench_path)) test_e3ds = sorted(os.listdir(test_path)) assert len(bench_e3ds) == len(test_e3ds) for i in range(len(bench_e3ds)): out, err = shared.exe( "diff {} {}".format( os.path.join(bench_path, bench_e3ds[i]), os.path.join(test_path, test_e3ds[i]), ) )
def task_runner_no_debug(*args, **kwargs): return exe(*args, debug=False, **kwargs)
from qcore.test.tool import utils from qcore import shared import numpy as np import getpass from datetime import datetime import sys import shutil import errno XYTS_DOWNLOAD_PATH = "https://www.dropbox.com/s/zge70zvntzxatpo/xyts.e3d?dl=0" XYTS_STORE_PATH = os.path.join("/home", getpass.getuser(), "xyts.e3d") DOWNLOAD_CMD = "wget -O {} {}".format(XYTS_STORE_PATH, XYTS_DOWNLOAD_PATH) if not os.path.isfile(XYTS_STORE_PATH): out, err = shared.exe(DOWNLOAD_CMD, debug=False) if "failed" in err: os.remove(XYTS_STORE_PATH) sys.exit("{} failted to download xyts benchmark file".format(err)) else: print("Successfully downloaded benchmark xyts.e3d") else: print("Benchmark xyts.e3d already exits") SAMPLE_OUT_DIR_PATH = os.path.join( os.path.abspath(os.path.dirname(__file__)), "sample1/output" ) XYTS_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), "sample1/xyts.e3d") try: os.symlink(XYTS_STORE_PATH, XYTS_FILE) except:
def _run_auto(self, sleep_time: int = 10): """ Runs auto submit Parameters ---------- user: str The username under which to run the tasks sleep_time: int Time (in seconds) between progress checks """ submit_cmd = "python {} {} --sleep_time 2".format( os.path.join( os.path.dirname(os.path.abspath(__file__)), "../scripts/cybershake/queue_monitor.py", ), self.stage_dir, ) # Have to put this in a massive try block, to ensure that # the run_queue_and_auto_submit process is terminated on any errors. try: print("Starting cybershake wrapper...") p_submit = exe( submit_cmd, debug=False, non_blocking=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) self._processes.append(p_submit) p_submit_out_nbsr = NonBlockingStreamReader(p_submit.stdout) p_submit_err_nbsr = NonBlockingStreamReader(p_submit.stderr) # Create and open the log files out_submit_f = open( os.path.join(self.stage_dir, self.submit_out_file), "w") err_submit_f = open( os.path.join(self.stage_dir, self.submit_err_file), "w") self._files.extend((out_submit_f, err_submit_f)) outputs_to_check = [ (out_submit_f, p_submit_out_nbsr), (err_submit_f, p_submit_err_nbsr), ] # Monitor mgmt db print("Progress: ") start_time = time.time() while time.time() - start_time < self.timeout: try: comp_count, total_count = self.check_mgmt_db_progress() except sql.OperationalError as ex: print("Operational error while accessing database. " "Retrying in {} seconds\n{}".format(sleep_time, ex)) time.sleep(sleep_time) continue print( "Created/Queued/Running/Completed/Total/Expected simulations: {}/{}/{}/{}/{}/{}" .format(*comp_count, total_count, len(self.tasks) * self.realisations)) # Get the log data for file, reader in outputs_to_check: lines = reader.readlines() if lines: file.writelines(lines) file.flush() for rel in self._generate_rel_list(): self._add_task_to_queue(rel, self.task_state[rel]) self.task_state[rel] += 1 if total_count == comp_count[-1]: break else: time.sleep(sleep_time) if time.time() - start_time >= self.timeout: print("The auto-submit timeout expired.") self.errors.append( Error("Auto-submit timeout", "The auto-submit timeout expired.")) return False # Still display the exception except Exception as ex: raise ex # Clean up finally: self.close() return True