def copy_support_files(file_path): """Copy the support files to a named destination file_path: path where you want the support files to be copied to Will raise EmperorSupportFilesError if a problem is found whilst trying to copy the files. """ file_path = join(file_path, 'emperor_required_resources') if not exists(file_path): makedirs(file_path) # shutil.copytree does not provide an easy way to copy the contents of a # directory into another existing directory, hence the system call. # use double quotes for the paths to escape any invalid chracter(s)/spaces cmd = 'cp -R "%s/"* "%s"' % (get_emperor_support_files_dir(), abspath(file_path)) cmd_o, cmd_e, cmd_r = qcli_system_call(cmd) if cmd_e: raise EmperorSupportFilesError, "Error found whilst trying to copy " +\ "the support files:\n%s\n Could not execute: %s" % (cmd_e, cmd) return
def main(): option_parser, opts, args =\ parse_command_line_parameters(**script_info) Query_collection=[] rank_collection=[] Query_dict=defaultdict(list) temp_dir_name=tempfile.mkdtemp(prefix='root_') level=0 for root,dirs,files in os.walk(opts.input_HMM_fp): print root #print dirs #print files #print ".................." path_to_db=os.path.join(root,'db') path_to_result=os.path.join(root,'result.out') if level==0: stdout,stderr,return_value = qcli_system_call('hmmscan '+path_to_db+' '+opts.input_query_fp+' > '+path_to_result) if return_value != 0: print 'Stdout:\n%s\nStderr:%s\n' % (stdout,stderr) exit(1) HMM_result=open(path_to_result,'U') HMM_choice_list,HMM_Query_list,HMM_choice_list_with_ID=search_HMM(HMM_result) create_temp_test_seq_file(temp_dir_name,HMM_choice_list_with_ID,open(opts.input_query_fp,'U')) rank_collection.extend(HMM_choice_list) Query_collection.extend(HMM_Query_list) for Query_ID, rank in HMM_choice_list_with_ID: Query_dict[Query_ID].append(rank) else: if os.path.basename(root) in rank_collection: path_to_test_seq=os.path.join(temp_dir_name,os.path.basename(root)+'.fasta') stdout,stderr,return_value = qcli_system_call('hmmscan '+path_to_db+' '+path_to_test_seq+' > '+path_to_result) if return_value != 0: print 'Stdout:\n%s\nStderr:%s\n' % (stdout,stderr) exit(1) HMM_result=open(path_to_result,'U') HMM_choice_list,HMM_Query_list,HMM_choice_list_with_ID=search_HMM(HMM_result) create_temp_test_seq_file(temp_dir_name,HMM_choice_list_with_ID,open(path_to_test_seq,'U')) rank_collection.extend(HMM_choice_list) for Query_ID, rank in HMM_choice_list_with_ID: Query_dict[Query_ID].append(rank) level+=1 shutil.rmtree(temp_dir_name) taxonomy_assignment_to_query_seq(Query_dict,Query_collection)
def get_emperor_library_version(): """Get Emperor version and the git SHA + current branch (if applicable)""" emperor_dir = get_emperor_project_dir() emperor_version = emperor_library_version # more information could be retrieved following this pattern sha_cmd = 'git --git-dir %s/.git rev-parse HEAD' % (emperor_dir) sha_o, sha_e, sha_r = qcli_system_call(sha_cmd) git_sha = sha_o.strip() branch_cmd = 'git --git-dir %s/.git rev-parse --abbrev-ref HEAD' %\ (emperor_dir) branch_o, branch_e, branch_r = qcli_system_call(branch_cmd) git_branch = branch_o.strip() # validate the output from both command calls if is_valid_git_refname(git_branch) and is_valid_git_sha1(git_sha): return '%s, %s@%s' % (emperor_version, git_branch, git_sha[0:7]) else: return '%s' % emperor_version
def run_script_usage_tests(test_data_dir, scripts_dir, working_dir, verbose=False, tests=None, failure_log_fp=None, force_overwrite=False, timeout=60): """ Test script_usage examples when test data is present in test_data_dir Returns a result summary string and the number of script usage examples (i.e. commands) that failed. """ # process input filepaths and directories test_data_dir = abspath(test_data_dir) working_dir = join(working_dir,'script_usage_tests') if force_overwrite and exists(working_dir): rmtree(working_dir) if failure_log_fp != None: failure_log_fp = abspath(failure_log_fp) if tests == None: tests = [split(d)[1] for d in sorted(glob('%s/*' % test_data_dir)) if isdir(d)] if verbose: print 'Tests to run:\n %s' % ' '.join(tests) addsitedir(scripts_dir) failed_tests = [] warnings = [] total_tests = 0 for test in tests: # import the usage examples - this is possible because we added # scripts_dir to the PYTHONPATH above script_fn = '%s/%s.py' % (scripts_dir,test) script = __import__(test) usage_examples = script.script_info['script_usage'] if verbose: print 'Testing %d usage examples from: %s' % (len(usage_examples),script_fn) # init the test environment test_input_dir = '%s/%s' % (test_data_dir,test) test_working_dir = '%s/%s' % (working_dir,test) copytree(test_input_dir,test_working_dir) chdir(test_working_dir) # remove pre-exisitng output files if any try: script_usage_output_to_remove = script.script_info['script_usage_output_to_remove'] except KeyError: script_usage_output_to_remove = [] for e in script_usage_output_to_remove: rmtree(e.replace('$PWD',getcwd()),ignore_errors=True) remove_files([e.replace('$PWD',getcwd())],error_on_missing=False) if verbose: print ' Running tests in: %s' % getcwd() print ' Tests:' for usage_example in usage_examples: if '%prog' not in usage_example[2]: warnings.append('%s usage examples do not all use %%prog to represent the command name. You may not be running the version of the command that you think you are!' % test) cmd = usage_example[2].replace('%prog',script_fn) if verbose: print ' %s' % cmd, timed_out = False initiate_timeout(timeout) try: stdout, stderr, return_value = qcli_system_call(cmd) except TimeExceededError: timed_out = True else: disable_timeout() total_tests += 1 if timed_out: # Add a string instead of return_value - if fail_tests ever ends # up being returned from this function we'll want to code this as # an int for consistency in the return value type. failed_tests.append((cmd, "", "", "None, time exceeded")) if verbose: print ": Timed out" elif return_value != 0: failed_tests.append((cmd, stdout, stderr, return_value)) if verbose: print ": Failed" else: pass if verbose: print ": Pass" if verbose: print '' if failure_log_fp: failure_log_f = open(failure_log_fp,'w') if len(failed_tests) == 0: failure_log_f.write('All script interface tests passed.\n') else: i = 1 for cmd, stdout, stderr, return_value in failed_tests: failure_log_f.write('**Failed test %d:\n%s\n\nReturn value: %s\n\nStdout:\n%s\n\nStderr:\n%s\n\n' % (i,cmd,str(return_value), stdout, stderr)) i += 1 failure_log_f.close() if warnings: print 'Warnings:' for warning in warnings: print ' ' + warning print '' result_summary = 'Ran %d commands to test %d scripts. %d of these commands failed.' % (total_tests,len(tests),len(failed_tests)) if len(failed_tests) > 0: failed_scripts = set([split(e[0].split()[0])[1] for e in failed_tests]) result_summary += '\nFailed scripts were: %s' % " ".join(failed_scripts) if failure_log_fp: result_summary += "\nFailures are summarized in %s" % failure_log_fp rmtree(working_dir) return result_summary, len(failed_tests)
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) unittest_glob = opts.unittest_glob temp_filepath = opts.temp_filepath script_usage_tests = opts.script_usage_tests suppress_unit_tests = opts.suppress_unit_tests suppress_script_usage_tests = opts.suppress_script_usage_tests suppress_javascript_unit_tests = opts.suppress_javascript_unit_tests # since the test data is in the tests folder just add scripts_test_data emperor_test_data_dir = join(abspath(dirname(__file__)), 'scripts_test_data/') # offer the option for the user to pass the scripts dir from the command # line since there is no other way to get the scripts dir. If not provided # the base structure of the repository will be assumed. Note that for both # cases we are using absolute paths, to avoid unwanted failures. if opts.emperor_scripts_dir is None: emperor_scripts_dir = abspath(join(get_emperor_project_dir(), 'scripts/')) # let's try to guess cases for qiime-deploy type of installs if get_emperor_project_dir().endswith('/lib'): emperor_scripts_dir = abspath(join(get_emperor_project_dir()[:-3], 'scripts/')) else: emperor_scripts_dir = abspath(opts.emperor_scripts_dir) # make a sanity check if (suppress_unit_tests and suppress_script_usage_tests and suppress_javascript_unit_tests): option_parser.error("All tests have been suppresed. Nothing to run.") test_dir = abspath(dirname(__file__)) unittest_good_pattern = re.compile('OK\s*$') application_not_found_pattern = re.compile('ApplicationNotFoundError') python_name = 'python' bad_tests = [] missing_application_tests = [] # Run through all of Emperor's unit tests, and keep track of any files which # fail unit tests, note that these are the unit tests only if not suppress_unit_tests: unittest_names = [] if not unittest_glob: for root, dirs, files in walk(test_dir): for name in files: if name.startswith('test_') and name.endswith('.py'): unittest_names.append(join(root,name)) else: for fp in glob(unittest_glob): fn = split(fp)[1] if fn.startswith('test_') and fn.endswith('.py'): unittest_names.append(abspath(fp)) unittest_names.sort() for unittest_name in unittest_names: print "Testing %s:\n" % unittest_name command = '%s %s -v' % (python_name, unittest_name) stdout, stderr, return_value = qcli_system_call(command) print stderr if not unittest_good_pattern.search(stderr): if application_not_found_pattern.search(stderr): missing_application_tests.append(unittest_name) else: bad_tests.append(unittest_name) script_usage_failures = 0 # choose to run some of the script usage tests or all the available ones if not suppress_script_usage_tests and exists(emperor_test_data_dir) and\ exists(emperor_scripts_dir): if script_usage_tests != None: script_tests = script_usage_tests.split(',') else: script_tests = None initial_working_directory = getcwd() # Run the script usage testing functionality; note that depending on the # module where this was imported, the name of the arguments will change # that's the reason why I added the name of the arguments in here script_usage_result_summary, script_usage_failures = \ run_script_usage_tests( emperor_test_data_dir, # test_data_dir emperor_scripts_dir, # scripts_dir temp_filepath, # working_dir True, # verbose script_tests, # tests None, # failure_log_fp False) # force_overwrite # running script usage tests breaks the current working directory chdir(initial_working_directory) if not suppress_javascript_unit_tests: runner = join(test_dir, 'javascript_tests', 'runner.js') index = join(test_dir, 'javascript_tests', 'index.html') o, e, r = qcli_system_call('phantomjs %s %s' % (runner, index)) if o: print o if e: print e # if all the tests passed javascript_tests_passed = True if r == 0 else False else: javascript_tests_passed = True print "==============\nResult summary\n==============" if not suppress_unit_tests: print "\nUnit test result summary\n------------------------\n" if bad_tests: print "\nFailed the following unit tests.\n%s" %'\n'.join(bad_tests) if missing_application_tests: print "\nFailed the following unit tests, in part or whole due "+\ "to missing external applications.\nDepending on the Emperor "+\ "features you plan to use, this may not be critical.\n%s"\ % '\n'.join(missing_application_tests) if not(missing_application_tests or bad_tests): print "\nAll unit tests passed.\n" if not suppress_script_usage_tests: if exists(emperor_test_data_dir) and exists(emperor_scripts_dir): print "\nScript usage test result summary"+\ "\n--------------------------------\n" print script_usage_result_summary else: print ("\nCould not run script usage tests.\nThe Emperor scripts " "directory could not be automatically located, try supplying " " it manually using the --emperor_scripts_dir option.") if not suppress_javascript_unit_tests: print ('\nJavaScript unit tests result summary\n' '------------------------------------\n') if javascript_tests_passed: print 'All JavaScript unit tests passed.\n' else: print 'JavaScript unit tests failed, check the summary above.' # In case there were no failures of any type, exit with a return code of 0 return_code = 1 if (len(bad_tests) == 0 and len(missing_application_tests) == 0 and script_usage_failures == 0 and javascript_tests_passed): return_code = 0 return return_code
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) unittest_glob = opts.unittest_glob temp_filepath = opts.temp_filepath script_usage_tests = opts.script_usage_tests suppress_unit_tests = opts.suppress_unit_tests suppress_script_usage_tests = opts.suppress_script_usage_tests # since the test data is in the tests folder just add scripts_test_data emperor_test_data_dir = join(abspath(dirname(__file__)), 'scripts_test_data/') # offer the option for the user to pass the scripts dir from the command # line since there is no other way to get the scripts dir. If not provided # the base structure of the repository will be assumed. Note that for both # cases we are using absolute paths, to avoid unwanted failures. if opts.emperor_scripts_dir is None: emperor_scripts_dir = abspath( join(get_emperor_project_dir(), 'scripts/')) # let's try to guess cases for qiime-deploy type of installs if get_emperor_project_dir().endswith('/lib'): emperor_scripts_dir = abspath( join(get_emperor_project_dir()[:-3], 'scripts/')) else: emperor_scripts_dir = abspath(opts.emperor_scripts_dir) # make a sanity check if (suppress_unit_tests and suppress_script_usage_tests): option_parser.error("All tests have been suppresed. Nothing to run.") test_dir = abspath(dirname(__file__)) unittest_good_pattern = re.compile('OK\s*$') application_not_found_pattern = re.compile('ApplicationNotFoundError') python_name = 'python' bad_tests = [] missing_application_tests = [] # Run through all of Emperor's unit tests, and keep track of any files which # fail unit tests, note that these are the unit tests only if not suppress_unit_tests: unittest_names = [] if not unittest_glob: for root, dirs, files in walk(test_dir): for name in files: if name.startswith('test_') and name.endswith('.py'): unittest_names.append(join(root, name)) else: for fp in glob(unittest_glob): fn = split(fp)[1] if fn.startswith('test_') and fn.endswith('.py'): unittest_names.append(abspath(fp)) unittest_names.sort() for unittest_name in unittest_names: print "Testing %s:\n" % unittest_name command = '%s %s -v' % (python_name, unittest_name) stdout, stderr, return_value = qcli_system_call(command) print stderr if not unittest_good_pattern.search(stderr): if application_not_found_pattern.search(stderr): missing_application_tests.append(unittest_name) else: bad_tests.append(unittest_name) script_usage_failures = 0 # choose to run some of the script usage tests or all the available ones if not suppress_script_usage_tests and exists(emperor_test_data_dir) and\ exists(emperor_scripts_dir): if script_usage_tests != None: script_tests = script_usage_tests.split(',') else: script_tests = None # Run the script usage testing functionality; note that depending on the # module where this was imported, the name of the arguments will change # that's the reason why I added the name of the arguments in here script_usage_result_summary, script_usage_failures = \ run_script_usage_tests( emperor_test_data_dir, # test_data_dir emperor_scripts_dir, # scripts_dir temp_filepath, # working_dir True, # verbose script_tests, # tests None, # failure_log_fp False) # force_overwrite print "==============\nResult summary\n==============" if not suppress_unit_tests: print "\nUnit test result summary\n------------------------\n" if bad_tests: print "\nFailed the following unit tests.\n%s" % '\n'.join( bad_tests) if missing_application_tests: print "\nFailed the following unit tests, in part or whole due "+\ "to missing external applications.\nDepending on the Emperor "+\ "features you plan to use, this may not be critical.\n%s"\ % '\n'.join(missing_application_tests) if not (missing_application_tests or bad_tests): print "\nAll unit tests passed.\n\n" if not suppress_script_usage_tests: if exists(emperor_test_data_dir) and exists(emperor_scripts_dir): print "\nScript usage test result summary"+\ "\n------------------------------------\n" print script_usage_result_summary else: print( "\nCould not run script usage tests.\nThe Emperor scripts " "directory could not be automatically located, try supplying " " it manually using the --emperor_scripts_dir option.") # In case there were no failures of any type, exit with a return code of 0 return_code = 1 if (len(bad_tests) == 0 and len(missing_application_tests) == 0 and script_usage_failures == 0): return_code = 0 return return_code
def main(): option_parser, opts, args = parse_command_line_parameters(**script_info) unittest_glob = opts.unittest_glob temp_filepath = opts.temp_filepath script_usage_tests = opts.script_usage_tests suppress_unit_tests = opts.suppress_unit_tests suppress_script_usage_tests = opts.suppress_script_usage_tests suppress_javascript_unit_tests = opts.suppress_javascript_unit_tests # since the test data is in the tests folder just add scripts_test_data ili_test_data_dir = join(abspath(dirname(__file__)), 'scripts_test_data/') # offer the option for the user to pass the scripts dir from the command # line since there is no other way to get the scripts dir. If not provided # the base structure of the repository will be assumed. Note that for both # cases we are using absolute paths, to avoid unwanted failures. if opts.ili_scripts_dir is None: ili_scripts_dir = abspath(join(get_ili_project_dir(), 'scripts/')) else: ili_scripts_dir = abspath(opts.ili_scripts_dir) # make a sanity check if (suppress_unit_tests and suppress_script_usage_tests and suppress_javascript_unit_tests): option_parser.error("All tests have been suppresed. Nothing to run.") test_dir = abspath(dirname(__file__)) unittest_good_pattern = re.compile('OK\s*$') application_not_found_pattern = re.compile('ApplicationNotFoundError') python_name = 'python' bad_tests = [] missing_application_tests = [] # Run through all of ili's unit tests, and keep track of any files # which fail unit tests, note that these are the unit tests only if not suppress_unit_tests: unittest_names = [] if not unittest_glob: for root, dirs, files in walk(test_dir): for name in files: if name.startswith('test_') and name.endswith('.py'): unittest_names.append(join(root, name)) else: for fp in glob(unittest_glob): fn = split(fp)[1] if fn.startswith('test_') and fn.endswith('.py'): unittest_names.append(abspath(fp)) unittest_names.sort() for unittest_name in unittest_names: print "Testing %s:\n" % unittest_name command = '%s %s -v' % (python_name, unittest_name) stdout, stderr, return_value = qcli_system_call(command) print stderr if not unittest_good_pattern.search(stderr): if application_not_found_pattern.search(stderr): missing_application_tests.append(unittest_name) else: bad_tests.append(unittest_name) script_usage_failures = 0 # choose to run some of the script usage tests or all the available ones if (not suppress_script_usage_tests and exists(ili_test_data_dir) and exists(ili_scripts_dir)): if script_usage_tests is not None: script_tests = script_usage_tests.split(',') else: script_tests = None initial_working_directory = getcwd() # Run the script usage testing functionality; note that depending on # the module where this was imported, the name of the arguments will # change that's the reason why I added the name of the arguments in # here script_usage_result_summary, script_usage_failures = \ run_script_usage_tests(ili_test_data_dir, # test_data_dir ili_scripts_dir, # scripts_dir temp_filepath, # working_dir True, # verbose script_tests, # tests None, # failure_log_fp False) # force_overwrite # running script usage tests breaks the current working directory chdir(initial_working_directory) if not suppress_javascript_unit_tests: runner = join(test_dir, 'javascript_tests', 'runner.js') index = join(test_dir, 'javascript_tests', 'index.html') o, e, r = qcli_system_call('phantomjs %s %s' % (runner, index)) if o: print o if e: print e # if all the tests passed javascript_tests_passed = True if r == 0 else False else: javascript_tests_passed = True print "==============\nResult summary\n==============" if not suppress_unit_tests: print "\nUnit test result summary\n------------------------\n" if bad_tests: print("\nFailed the following unit tests.\n%s" % '\n'.join(bad_tests)) if missing_application_tests: print( "\nFailed the following unit tests, in part or whole due " "to missing external applications.\nDepending on the " "ili features you plan to use, this may not be " "critical.\n%s" % '\n'.join(missing_application_tests)) if not (missing_application_tests or bad_tests): print "\nAll unit tests passed.\n" if not suppress_script_usage_tests: if exists(ili_test_data_dir) and exists(ili_scripts_dir): print( "\nScript usage test result summary" "\n--------------------------------\n") print script_usage_result_summary else: print( "\nCould not run script usage tests.\nThe ili scripts " "directory could not be automatically located, try " "supplying it manually using the --ili_scripts_dir " "option.") if not suppress_javascript_unit_tests: print( '\nJavaScript unit tests result summary\n' '------------------------------------\n') if javascript_tests_passed: print 'All JavaScript unit tests passed.\n' else: print 'JavaScript unit tests failed, check the summary above.' # In case there were no failures of any type, exit with a return code of 0 return_code = 1 if (len(bad_tests) == 0 and len(missing_application_tests) == 0 and script_usage_failures == 0 and javascript_tests_passed): return_code = 0 return return_code