def cleanup(): """ Clean-up the package including the tests. """ # Clean the tests (note cleanup_tests has a main() so it runs when imported. import tests.cleanup_tests as cleanup_tests cleanup_tests.cleanup_tests() from sbpipe.utils.io import files_with_pattern_recur # Remove all files with suffix .pyc recursively for f in files_with_pattern_recur(SBPIPE, '.pyc'): remove_file_silently(f) # Remove all temporary files (*~) recursively for f in files_with_pattern_recur(SBPIPE, '~'): remove_file_silently(f)
def sim(self, model, inputdir, outputdir, cluster="local", local_cpus=1, runs=1, output_msg=False): __doc__ = Simul.sim.__doc__ # check Copasi file if not self.model_checking(os.path.join(inputdir, model), os.path.join(inputdir, model.replace('.cps', '_check.txt')), 'Time-Course'): return False if not self._run_par_comput(inputdir, model, outputdir, cluster, local_cpus, runs, output_msg): return False # removed repeated copasi files repeated_copasi_files = [f for f in os.listdir(inputdir) if re.match(self._get_model_group(model) + '[0-9]+.*.cps', f)] for report in repeated_copasi_files: remove_file_silently(os.path.join(inputdir, report)) return True
def pe(self, model, inputdir, cluster, local_cpus, runs, outputdir, sim_data_dir, output_msg=False): __doc__ = Simul.pe.__doc__ # check Copasi file if not self.model_checking(os.path.join(inputdir, model), os.path.join(inputdir, model.replace('.cps', '_check.txt')), 'Parameter Estimation'): return False if not self._run_par_comput(inputdir, model, sim_data_dir, cluster, local_cpus, runs, output_msg): return False # move_models repeated_copasi_files = [f for f in os.listdir(inputdir) if re.match(self._get_model_group(model) + '[0-9]+.*.cps', f)] for file in repeated_copasi_files: remove_file_silently(os.path.join(inputdir, file)) return True
def ps1(self, model, scanned_par, simulate_intervals, single_param_scan_intervals, inputdir, outputdir, cluster="local", local_cpus=1, runs=1, output_msg=False): __doc__ = Simul.ps1.__doc__ # check Copasi file if not self.model_checking(os.path.join(inputdir, model), os.path.join(inputdir, model.replace('.cps', '_check.txt')), 'Scan'): return False if not self._run_par_comput(inputdir, model, outputdir, cluster, local_cpus, runs, output_msg): return False # removed repeated copasi files repeated_copasi_files = [f for f in os.listdir(inputdir) if re.match(self._get_model_group(model) + '[0-9]+.*.cps', f)] for report in repeated_copasi_files: remove_file_silently(os.path.join(inputdir, report)) self.ps1_postproc(model, scanned_par, simulate_intervals, single_param_scan_intervals, outputdir) return True
def cleanup_tests(): """ Clean up the test results. """ projects = [f for f in os.listdir(testpath) if isdir(join(testpath, f))] print('Cleaning tests:') for file in projects: if file == '__pycache__': shutil.rmtree(os.path.join(testpath, file), ignore_errors=True) continue print('- ' + file) if file == 'snakemake': print("cleaning output files...") shutil.rmtree(os.path.join(testpath, file, 'Results'), ignore_errors=True) shutil.rmtree(os.path.join(testpath, file, 'log'), ignore_errors=True) shutil.rmtree(os.path.join(testpath, file, '.snakemake'), ignore_errors=True) snake_files = glob.glob(os.path.join(testpath, file, "*.snake")) for f in snake_files: remove_file_silently(f) continue modelspath = join(testpath, file, 'Models') replicated_files = glob.glob(os.path.join(modelspath, "*[0-9].cps")) for f in replicated_files: remove_file_silently(f) check_files = glob.glob(os.path.join(modelspath, "*_check.txt")) for f in check_files: remove_file_silently(f) wfpath = join(testpath, file, 'Results') if file == 'interrupted': # We keep the generated data sets for these tests results = [os.path.join(dp, f) for dp, dn, filenames in os.walk(wfpath) for f in filenames] for f in results: if f.find('param_estim_data') == -1: remove_file_silently(f) continue shutil.rmtree(wfpath, ignore_errors=True)