def test_safe_exec_run(mocker): """Test that safe_exec calls subprocess.run with check=True""" import subprocess cmd = 'some command line'.split() mocker.patch('subprocess.run') safe_exec(cmd) # test subprocess.run is called with check=True subprocess.run.assert_called_with(cmd, check=True, stdout=-1, stderr=-1)
def test_cluster_error(): p = safe_exec('which gcloud') gcloud_exepath = p.stdout.decode() spy_file = os.path.join(os.getcwd(), 'spy_file.txt') gcloud = f"""\ #!/bin/sh if [ "${1} ${2} ${3}" == "container clusters list" ]; then echo STOPPING else echo `{gcloud_exepath} $@` fi""" env = dict(os.environ) with TemporaryDirectory() as d: env['PATH'] = d + ':' + env['PATH'] gcloud_fname = os.path.join(d, 'gcloud') with open(gcloud_fname, 'wt') as f: f.write(gcloud) import stat os.chmod(gcloud_fname, stat.S_IRWXU) fn_config = os.path.join(TEST_DATA_DIR, 'cluster-error.ini') # elastic-blast.py delete --cfg tests/app/data/cluster-error.ini --logfile stderr p = subprocess.run( [ELB_EXENAME, 'delete', '--cfg', fn_config, '--logfile', 'stderr'], env=env, stderr=subprocess.PIPE) msg = p.stderr.decode() print(msg) assert p.returncode == constants.CLUSTER_ERROR assert 'Traceback' not in msg assert 'ERROR' in msg assert 'is already being deleted' in msg # elastic-blast.py submit --cfg tests/app/data/cluster-error.ini --logfile stderr p = subprocess.run( [ELB_EXENAME, 'submit', '--cfg', fn_config, '--logfile', 'stderr'], env=env, stderr=subprocess.PIPE) assert p.returncode == constants.CLUSTER_ERROR msg = p.stderr.decode() print(msg) assert 'Traceback' not in msg assert 'Previous instance of cluster' in msg assert 'is still STOPPING' in msg # elastic-blast.py status --cfg tests/app/data/cluster-error.ini --loglevel DEBUG --logfile stderr p = subprocess.run( [ELB_EXENAME, 'status', '--cfg', fn_config, '--logfile', 'stderr'], env=env, stderr=subprocess.PIPE) assert p.returncode == constants.CLUSTER_ERROR msg = p.stderr.decode() print(msg) assert 'Traceback' not in msg
def test_dependency_error(): p = safe_exec('which elastic-blast.py') exepath = p.stdout.decode() newpath = os.path.dirname(exepath) orig_exe_path = exepath # Check gcloud missing p = subprocess.run( [ELB_EXENAME, 'submit', '--cfg', INI_VALID, '--dry-run'], env={'PATH': newpath}, stderr=subprocess.PIPE) assert p.returncode == constants.DEPENDENCY_ERROR msg = p.stderr.decode() print(msg) assert 'Traceback' not in msg assert "Required pre-requisite 'gcloud' doesn't work" in msg # Eliminate gcloud, check kubectl missing p = safe_exec('which gcloud') exepath = p.stdout.decode() newpath += ':' + os.path.dirname(exepath) p = subprocess.run( [ELB_EXENAME, 'submit', '--cfg', INI_VALID, '--dry-run'], env={'PATH': newpath}, stderr=subprocess.PIPE) assert p.returncode == constants.DEPENDENCY_ERROR msg = p.stderr.decode() print(msg) assert 'Traceback' not in msg assert "Required pre-requisite 'kubectl' doesn't work" in msg from tempfile import TemporaryDirectory # Provide non-executable gcloud file with TemporaryDirectory() as d: safe_exec(f'touch {d}/gcloud') newpath = os.path.dirname(orig_exe_path) + ':' + d p = subprocess.run( [ELB_EXENAME, 'submit', '--cfg', INI_VALID, '--dry-run'], env={'PATH': newpath}, stderr=subprocess.PIPE) assert p.returncode == constants.DEPENDENCY_ERROR msg = p.stderr.decode() print(msg) assert 'Traceback' not in msg assert "Required pre-requisite 'gcloud' doesn't work" in msg
def _read_job_logs_gcp(cfg): """ return Run object with number of finished job, start, end, and exit codes, and any additional information we can learn about this run """ dry_run = cfg.cluster.dry_run njobs = start_time = end_time = 0 exit_codes = [] results = cfg.cluster.results if not results: return Run() log_uri = results + '/' + ELB_LOG_DIR + '/BLAST_RUNTIME-*.out' # if self.detailed: # print("Checking logs", log_uri) cmd = ['gsutil', 'cat', log_uri] if dry_run: logging.info(' '.join(cmd)) return Run() proc = safe_exec(cmd) nread = 0 njobs = 0 for line in proc.stdout.decode().split('\n'): if not line: continue nread += 1 parts = line.split() # Failing jobs generate invalid log entries which can # start not with a timestamp try: timestamp = float(parts[0]) / 1e9 except ValueError: continue # subj = parts[1] # should be 'run', we can verify this verb = parts[2] # nbatch = parts[3] # TODO: maybe makes sense to check that there are no duplicates, # also check gaps between one batch end and another batch start if verb == 'start' and (njobs == 0 or timestamp < start_time): start_time = timestamp elif verb == 'end' and (njobs == 0 or timestamp > end_time): end_time = timestamp if verb == 'end': njobs += 1 if verb == 'exitCode': exit_codes.append(int(parts[4])) # if self.detailed: # print(f"Read {nread} lines of logs") if not nread: logging.error(proc.stderr.read().strip(), file=sys.stderr) return Run(njobs, start_time, end_time, exit_codes)
def test_safe_exec_cmd_not_a_list_or_string(self): """Test that safe_exec cmd argument of type other than a list or string raises ValueError""" with self.assertRaises(ValueError) as e: safe_exec(1)
def test_safe_exec_permission_error(self): """Test that a non-existent or non-executable binary/shell command raises SafeExecError""" cmd = ['date -o'] with self.assertRaises(SafeExecError): safe_exec(cmd)
def test_safe_exec_fail(self): """Test that command line returning with non-zero exit status raises SafeExecError""" cmd = 'date -o'.split() with self.assertRaises(SafeExecError): safe_exec(cmd)
def test_safe_exec(self): text = 'some cool text' cmd = f'echo {text}'.split() p = safe_exec(cmd) self.assertEqual(p.returncode, 0) self.assertEqual(p.stdout.decode().rstrip(), text)