def test_compose_query_params_raises_error_for_invalid_query_dict_that_has_multiple_values_for_exclusive_keys( self, ): query_dict = { 'status': ['Running', 'Failed', 'Submitted'], 'start': ['2018-01-01T00:00:00.000Z', '2018-01-02T00:00:00.000Z'], 'end': '2018-01-01T12:00:00.000Z', 'label': {'Comment1': 'test1', 'Comment2': 'test2', 'Comment3': 'test3'}, } with self.assertRaises(ValueError): CromwellAPI._compose_query_params(query_dict)
def update_wf_batch_status(cromwell_auth, wf_id, include_in_batch=True): # Update workflow batch status to indicate whether wf should be included in final batch or not labels = { const.CROMWELL_BATCH_STATUS_FIELD: const.CROMWELL_BATCH_STATUS_INCLUDE_FLAG } if not include_in_batch: labels[ const. CROMWELL_BATCH_STATUS_FIELD] = const.CROMWELL_BATCH_STATUS_EXCLUDE_FLAG CromwellAPI.patch_labels(wf_id, labels, cromwell_auth, raise_for_status=True)
def workflow_is_duplicate(self, workflow): hash_id = workflow.labels.get('hash-id') query_dict = { 'label': f'hash-id:{hash_id}', 'additionalQueryResultFields': 'labels', } response = CromwellAPI.query(query_dict, self.cromwell_auth, raise_for_status=True) results = response.json()['results'] existing_workflows = [ result for result in results if result['id'] != workflow.id ] if len(existing_workflows) > 0: # If there are other on-hold workflows with the same hash-id, # the workflow is a duplicate/should not be run if it has an older bundle version than the others on_hold = [ workflow for workflow in existing_workflows if workflow['status'] == 'On Hold' ] if len(on_hold) == 0: return True else: on_hold.sort(key=lambda x: self.get_bundle_datetime(x[ 'labels']['bundle-version'])) workflow_bundle_version = self.get_bundle_datetime( workflow.bundle_version) return workflow_bundle_version < self.get_bundle_datetime( on_hold[-1]['labels']['bundle-version']) return False
def test_health_returns_200(self, mock_request): expected = { "DockerHub": { "ok": "true" }, "Engine Database": { "ok": "true" }, "PAPI": { "ok": "true" }, "GCS": { "ok": "true" }, } def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return expected for cromwell_auth in self.auth_options: mock_request.get('{0}/engine/v1/status'.format(cromwell_auth.url), json=_request_callback) result = CromwellAPI.health(cromwell_auth) self.assertEqual(result.status_code, 200) self.assertEqual(result.json(), expected)
def submit_wf_from_dict(cromwell_auth, wdl_workflow, input_dict, dependencies=None, label_dict=None, options_file=None): # Write input and label files to tmp files input_file = tempfile.NamedTemporaryFile() with open(input_file.name, "w") as fh: json.dump(input_dict, fh) if label_dict: label_file = tempfile.NamedTemporaryFile() with open(label_file.name, "w") as fh: json.dump(label_dict, fh) else: label_file = None try: # Submit workflow and return id result = CromwellAPI.submit(cromwell_auth, wdl_workflow, input_file.name, dependencies=dependencies, label_file=label_file.name, options_file=options_file, raise_for_status=True) return result.json()["id"] finally: # Close temp files no matter what input_file.close() if label_dict: label_file.close()
def retrieve_workflows(self, query_dict): """ Retrieve the latest list of metadata of all "On Hold" workflows from Cromwell. Args: query_dict (dict): A dictionary that contains valid query parameters which can be accepted by the Cromwell /query endpoint. Returns: workflow_metas (None or list): Will be None if it gets a non 200 code from Cromwell, otherwise will be a list of workflow metadata dict blocks. e.g. ``` [ { "name": "WorkflowName1", "id": "xxx1", "submission": "2018-01-01T23:49:40.620Z", "status": "Succeeded", "end": "2018-07-12T00:37:12.282Z", "start": "2018-07-11T23:49:48.384Z" }, { "name": "WorkflowName2", "id": "xxx2", "submission": "2018-01-01T23:49:42.171Z", "status": "Succeeded", "end": "2018-07-12T00:31:27.273Z", "start": "2018-07-11T23:49:48.385Z" } ] ``` """ workflow_metas = None query_dict["additionalQueryResultFields"] = "labels" try: response = CromwellAPI.query(auth=self.cromwell_auth, query_dict=query_dict) if response.status_code != 200: logger.warning( "QueueHandler | Failed to retrieve workflows from Cromwell | {0} | {1}" .format(response.text, datetime.now())) else: workflow_metas = response.json()["results"] num_workflows = len(workflow_metas) logger.info( "QueueHandler | Retrieved {0} workflows from Cromwell. | {1}" .format(num_workflows, datetime.now())) logger.debug("QueueHandler | {0} | {1}".format( workflow_metas, datetime.now())) # TODO: remove this or not? except ( requests.exceptions.ConnectionError, requests.exceptions.RequestException, ) as error: logger.error( "QueueHandler | Failed to retrieve workflows from Cromwell | {0} | {1}" .format(error, datetime.now())) finally: return workflow_metas
def get_wf_status(cromwell_auth, wf_id, log_on_fail=True): result = CromwellAPI.status(wf_id, cromwell_auth) try: result.raise_for_status() except requests.exceptions.HTTPError as e: err_msg = "Message from cromwell server: {0}".format( result.json()["message"]) if log_on_fail: logging.error(err_msg) raise WFStatusCheckFailException(err_msg) return result.json()["status"]
def validate_cromwell_server(cromwell_auth): # Ping the cromwell server and check system health. Raise error if any issues returned by server. logging.info("Checking health of cromwell server...") result = CromwellAPI.health(cromwell_auth) try: result.raise_for_status() except requests.exceptions.HTTPError: logging.error("Cromwell server is reachable but not functional! " "Message from server:\n{0}".format(result.json())) raise logging.info("Cromwell server is up and running!")
def test_release_workflow_that_is_not_on_hold_returns_error(self, mock_request): workflow_id = 'test' def _request_callback(request, context): context.status_code = 403 context.headers['test'] = 'header' return { 'status': 'error', 'message': 'Couldn\'t change status of workflow {} to \'Submitted\' because the workflow is not in ' '\'On Hold\' state'.format(request.url.split('/')[-2]), } for cromwell_auth in self.auth_options: mock_request.post( '{0}/api/workflows/v1/{1}/releaseHold'.format( cromwell_auth.url, workflow_id ), json=_request_callback, ) with self.assertRaises(requests.exceptions.HTTPError): CromwellAPI.release_hold(workflow_id, cromwell_auth).raise_for_status()
def _submit_workflows(self, cromwell_auth, mock_request, _request_callback): mock_request.post(cromwell_auth.url + '/api/workflows/v1', json=_request_callback) return CromwellAPI.submit( auth=cromwell_auth, wdl_file=self.wdl_file, inputs_files=self.inputs_file, options_file=self.options_file, dependencies=self.zip_file, label_file=self.label, )
def test_status(self, mock_request): def _request_callback_status(request, context): context.status_code = 200 context.headers['test'] = 'header' return {'status': 'Succeeded'} workflow_id = "01234" for cromwell_auth in self.auth_options: mock_request.get( cromwell_auth.url + '/api/workflows/v1/{}/status'.format(workflow_id), json=_request_callback_status, ) result = CromwellAPI.status(workflow_id, cromwell_auth) self.assertEqual(result.json()['status'], 'Succeeded')
def main(): logger.info('Validating workflow...') CromwellAPI.validate_workflow(args.workflow_wdl, args.womtool_path) client = AdvisorClient.AdvisorClient(endpoint=args.advisor_server) with open(args.scan_json) as f: study_configuration = json.load(f) max_num_trials = study_configuration['maxTrials'] with open(args.template_json) as f: template_values = json.load(f) study = client.get_or_create_study(args.study_name, study_configuration, algorithm=args.algorithm) logger.info(study) for i in range(max_num_trials): try: trial = client.get_suggestions(study.name)[0] logger.info(trial) scan_values = json.loads(trial.parameter_values) metric = calculate_metric(template_values, scan_values) if metric is bad_value: logger.info('Trial returned bad value, skipping...') continue trial = client.complete_trial_with_one_metric(trial, metric) logger.info('Objective value: ' + str(metric)) logger.info('Trial completed.') except Exception as e: logger.info(e) logger.info('Problem with trial, skipping...') continue best_trial = client.get_best_trial(study.name) logger.info('Best trial: {}'.format(best_trial))
def query_workflows(cromwell_auth, query): # Return worklfow ids matching conditions specified in query dict # e.g. query = {"label": [{"run_id": "12"},{"custom_label2": "barf"}]} # e.g. query = {"submission": "2020-01-10T14:53:48.128Z"} result = CromwellAPI.query(query, cromwell_auth) try: result.raise_for_status() except requests.exceptions.HTTPError as e: logging.error("Unable to run query: {0}".format(query)) logging.error("Message from cromwell server:\n{0}".format( result.json())) raise return [ wf["id"] for wf in result.json()['results'] if "parentWorkflowId" not in wf ]
def test_abort(self, mock_request): workflow_id = "01234" expected = {"id": workflow_id, "status": "Aborting"} def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return expected for cromwell_auth in self.auth_options: mock_request.post( cromwell_auth.url + '/api/workflows/v1/{}/abort'.format(workflow_id), json=_request_callback, ) result = CromwellAPI.abort(workflow_id, cromwell_auth) self.assertEqual(result.json(), expected)
def test_compose_query_params_can_convert_bools_within_query_dicts(self): query_dict = { 'status': ['Running', 'Failed', 'Submitted'], 'start': '2018-01-01T00:00:00.000Z', 'end': '2018-01-01T12:00:00.000Z', 'label': { 'Comment1': 'test1', 'Comment2': 'test2', 'Comment3': 'test3' }, 'includeSubworkflows': True, } expect_params = [ { 'status': 'Running' }, { 'status': 'Failed' }, { 'status': 'Submitted' }, { 'start': '2018-01-01T00:00:00.000Z' }, { 'end': '2018-01-01T12:00:00.000Z' }, { 'label': 'Comment1:test1' }, { 'label': 'Comment2:test2' }, { 'label': 'Comment3:test3' }, { 'includeSubworkflows': 'true' }, ] six.assertCountEqual(self, CromwellAPI._compose_query_params(query_dict), expect_params)
def test_release_onhold_returns_200(self, mock_request): workflow_id = '12345abcde' def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return {'id': request.url.split('/')[-2], 'status': 'Submitted'} for cromwell_auth in self.auth_options: mock_request.post( '{0}/api/workflows/v1/{1}/releaseHold'.format( cromwell_auth.url, workflow_id), json=_request_callback, ) result = CromwellAPI.release_hold(workflow_id, cromwell_auth) self.assertEqual(result.status_code, 200) self.assertEqual(result.json()['id'], workflow_id) self.assertEqual(result.json()['status'], 'Submitted')
def test_query_workflows_returns_200(self, mock_request): query_dict = { 'status': ['Running', 'Failed'], 'label': { 'label_key1': 'label_value1', 'label_key2': 'label_value2' }, } def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return { 'results': [ { 'name': 'workflow1', 'submission': 'submission1', 'id': 'id1', 'status': 'Failed', 'start': 'start1', 'end': 'end1', }, { 'name': 'workflow2', 'submission': 'submission2', 'id': 'id2', 'status': 'Running', 'start': 'start2', 'end': 'end2', }, ], 'totalResultsCount': 2, } for cromwell_auth in self.auth_options: mock_request.post( '{}/api/workflows/v1/query'.format(cromwell_auth.url), json=_request_callback, ) result = CromwellAPI.query(query_dict, cromwell_auth) self.assertEqual(result.status_code, 200) self.assertEqual(result.json()['totalResultsCount'], 2)
def run(auth: CromwellAuth, metadata: str = None, uuid: str = None): if metadata is not None: with open(args.metadata) as data_file: metadata = json.load(data_file) # cli top level parsing ensures metadata is set otherwise else: response = CromwellAPI.metadata( uuid=uuid, auth=auth, expandSubWorkflows=True, raise_for_status=True, ) metadata = response.json() if auth.service_key_content is None: raise exceptions.CromwellAuthenticationError( "task_runtime requires a service account key") print_task_runtime_data(metadata, auth.service_key_content)
def get_wf_metadata(cromwell_auth, wf_id, include_keys=None, exclude_keys=None): result = CromwellAPI.metadata(wf_id, cromwell_auth, includeKey=include_keys, excludeKey=exclude_keys) try: result.raise_for_status() except requests.exceptions.HTTPError as e: logging.error("Unable to fetch metadata for wf: {0}".format(wf_id)) logging.error("Message from cromwell server:\n{0}".format( result.json())) raise return result.json()
def submit_jobs(self): resp_list = [] for _index, _args_dict in self.final_args_dict.items(): new_inputs_dict = deepcopy(self.inputs_dict) new_inputs_dict[ "genericworkflow.GenericTask.shell_command"] = _args_dict[ 'command'] new_inputs_dict[ "genericworkflow.GenericTask.input_files"] = _args_dict[ 'remote_input_files'] temp_resp = cwt.submit(auth=self.auth_obj, wdl_file=io.BytesIO(self.wdl_text.encode()), inputs_files=io.BytesIO( json.dumps(new_inputs_dict).encode()), options_file=io.BytesIO( json.dumps(self.options_dict).encode())) resp_list.append(temp_resp.json()['id']) #resp_list.append(new_inputs_dict) self.resp_list = resp_list return (self.resp_list)
def test_path_labels_returns_200(self, mock_request): workflow_id = 'labeltest' new_label = {'foo': 'bar'} def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return {'id': request.url.split('/')[-2], 'labels': new_label} for cromwell_auth in self.auth_options: mock_request.patch( '{0}/api/workflows/v1/{1}/labels'.format( cromwell_auth.url, workflow_id ), json=_request_callback, ) result = CromwellAPI.patch_labels(workflow_id, new_label, cromwell_auth) self.assertEqual(result.status_code, 200) self.assertEqual(result.json()['id'], workflow_id) self.assertEqual(result.json()['labels'], new_label)
def test_metadata_returns_200(self, mock_request): workflow_id = '12345abcde' test_include_key = 'workflow' def _request_callback(request, context): context.status_code = 200 context.headers['test'] = 'header' return {'id': '12345abcde', 'actualWorkflowLanguageVersion': 'draft-2'} for cromwell_auth in self.auth_options: mock_request.get( '{0}/api/workflows/v1/{1}/metadata?expandSubWorkflows=false&includeKey={2}'.format( cromwell_auth.url, workflow_id, test_include_key ), json=_request_callback, ) result = CromwellAPI.metadata( workflow_id, cromwell_auth, includeKey=test_include_key ) self.assertEqual(result.status_code, 200) self.assertEqual(result.json()['id'], workflow_id)
def test_compose_query_params_can_compose_simple_query_dicts(self): query_dict = { 'status': 'Running', 'start': '2018-01-01T00:00:00.000Z', 'end': '2018-01-01T12:00:00.000Z', 'label': {'Comment': 'test'}, 'page': 1, 'pageSize': 10, } expect_params = [ {'status': 'Running'}, {'start': '2018-01-01T00:00:00.000Z'}, {'end': '2018-01-01T12:00:00.000Z'}, {'label': 'Comment:test'}, {'page': '1'}, {'pageSize': '10'}, ] six.assertCountEqual( self, CromwellAPI._compose_query_params(query_dict), expect_params )
def get_status(self): return pd.DataFrame( [[_a, cwt.metadata(_a, self.auth_obj).json()['status']] for _a in self.resp_list])
def calculate_metric(template_values, scan_values): merged_values = { k: v for (k, v) in (template_values.items() + scan_values.items()) } merged_json_file, merged_json_path = tempfile.mkstemp() with open(merged_json_path, 'w') as f: json.dump(merged_values, f) cromwell_auth = CromwellAuth(url=args.cromwell_server, header={'Authorization': 'bearer fake_token'}, auth=None) with open(args.workflow_wdl, 'r') as w, open(merged_json_path, 'r') as j: submit = CromwellAPI.submit(cromwell_auth, w, j) workflow_id = submit.json()['id'] logger.info('Submitted workflow: ' + workflow_id) time.sleep(5) logger.info('Waiting for workflow to complete...') # Query workflow status indefinitely until success or failure returned. # If success returned, attempt to retrieve objective_value from metadata and return. # If failure returned or if exception raised during metadata retreival, return bad_value. try: while True: try: CromwellAPI.wait([workflow_id], cromwell_auth, timeout_minutes=600, poll_interval_seconds=20, verbose=False) response = CromwellAPI.status(workflow_id, cromwell_auth) status = response.json()['status'] if status == 'Succeeded': logger.info('Workflow succeeded...') break except WorkflowFailedException: logger.info('Workflow failed, returning bad value...') return bad_value except Exception as e: logger.info(e) logger.info( 'Cromwell exception, retrying wait and status check...') logger.info('Getting metadata...') session = retry_session(retries=10) metadata = session.post( url=cromwell_auth.url + CromwellAPI._metadata_endpoint.format(uuid=workflow_id), auth=cromwell_auth.auth, headers=cromwell_auth.header) workflow_name = metadata.json()['workflowName'] objective_value = metadata.json()['outputs'][ '{}.objective_value'.format(workflow_name)] return objective_value except Exception as e: logger.info(e) logger.info( 'Cromwell exception during metadata retrieval, returning bad value...' ) return bad_value
"final_workflow_outputs_dir": output_base_location, "use_relative_output_paths": True, "final_call_logs_dir": "{}/call_logs".format(output_base_location), "jes_gcs_root": cromwell_runs_bucket, "google_labels": { "pipeline-name": "gatk4-germline-snps-indels", "project-name": "comparing-gatk-sentieon-dragen" } } # - input_iobytes = io.BytesIO(json.dumps(jj_input_json).encode()) options_iobytes = io.BytesIO(json.dumps(jj_options_dict).encode()) jj_resp = cwt.submit(auth_obj, wdl_file = cromwell_functions.get_wdl_iobytes("gs://bioskryb_dev_wdl_and_inputs/gatk-workflows/gatk4-germline-snps-indels/2.0.0/JointGenotyping.wdl", storage_client), inputs_files = input_iobytes, options_file = options_iobytes ) jj_resp.content gvcf_filenames_n16 # ## Sentieon - gVCF generation # The same 16 samples from above (plus the reference) are joint genotyped using Sentieon # First, generate gVCFs r1_fastq_files = !gsutil ls gs://bioskryb-vumc-data/Nova181_H2VMKDSXY/*_R1_*fastq.gz r2_fastq_files = !gsutil ls gs://bioskryb-vumc-data/Nova182_H2VGNDSXY/*_R1_*fastq.gz
def sync_metadata(self): self.metadata = CromwellAPI.metadata( self.wf_id, self.auth, includeKey=["outputs", "labels", "status", "inputs"], raise_for_status=True).json()
def main(): # Configure argparser argparser = get_argparser() # Parse the arguments args = argparser.parse_args() # Input files: json input file to be used as template and batch_name = args.batch_name cromwell_url = args.cromwell_url # Standardize url cromwell_url = utils.fix_url(cromwell_url) # Configure logging appropriate for verbosity utils.configure_logging(args.verbosity_level) # Authenticate and validate cromwell server auth = cromwell.get_cromwell_auth(url=cromwell_url) cromwell.validate_cromwell_server(auth) # Otherwise just grab all of the workflows with batch-name batch_wfs = cromwell.query_workflows( auth, {"label": { const.CROMWELL_BATCH_LABEL: batch_name }}) # Error out if batch doesn't actually exist if not batch_wfs: logging.error( "No batch exists on current cromwell server with batch-name '{0}'". format(batch_name)) raise IOError # Terminal wf status codes terminal_states = [ const.CROMWELL_ABORTING_STATUS, const.CROMWELL_ABORTED_STATUS, const.CROMWELL_SUCCESS_STATUS, const.CROMWELL_FAILED_STATUS ] logging.info("Aborting workflows...") aborted_wfs = 0 running_wfs = 0 for wf in batch_wfs: wf_status = cromwell.get_wf_status(auth, wf) if wf_status not in terminal_states: try: logging.info("Aborting wf: {0}".format(wf)) CromwellAPI.abort(wf, auth, raise_for_status=True) aborted_wfs += 1 except requests.exceptions.HTTPError: logging.warning( "Unable to abort wf '{0}' for some reason...".format(wf)) finally: running_wfs += 1 success_rate = 0.0 if running_wfs == 0 else (aborted_wfs / (1.0 * running_wfs)) * 100 logging.info( "{0}/{1} ({2}%) pending batch workflows successfully aborted!".format( aborted_wfs, running_wfs, success_rate))
def main(): # Configure argparser argparser = get_argparser() # Parse the arguments args = argparser.parse_args() # Input files: json input file to be used as template and batch_input_json = args.input_json batch_label_json = args.label_json wdl_workflow = args.wdl_workflow wdl_imports = args.wdl_imports wdl_options = args.wdl_options output_dir = args.output_dir batch_conflict_action = args.batch_conflict_action cromwell_url = args.cromwell_url # Standardize url cromwell_url = utils.fix_url(cromwell_url) # Configure logging appropriate for verbosity utils.configure_logging(args.verbosity_level) # Read in batch inputs with open(batch_input_json, "r") as fh: batch_inputs = json.load(fh) with open(batch_label_json, "r") as fh: batch_labels = json.load(fh) # Convert inputs/lables to lists if they're not already lists batch_inputs = [batch_inputs ] if not isinstance(batch_inputs, list) else batch_inputs batch_labels = [batch_labels ] if not isinstance(batch_labels, list) else batch_labels assert len(batch_inputs) == len( batch_labels), "Batch label and input files are different sizes!" # Check to make sure all workflow labels have required label keys for wf_labels in batch_labels: wdl.validate_wf_labels(wf_labels) # Authenticate and validate cromwell server auth = cromwell.get_cromwell_auth(url=cromwell_url) cromwell.validate_cromwell_server(auth) # Create a report to detail what jobs were run/skipped/failed job_report = copy.deepcopy(batch_labels) batch_name = batch_labels[0][const.CROMWELL_BATCH_LABEL] report_file = "{0}/{1}.submit_batch.{2}.xlsx".format( output_dir, batch_name, time.strftime("%Y%m%d-%H%M%S")) # Loop through workflows to see if they need to be run/rerun submitted_wfs = 0 for i in range(len(batch_inputs)): # Get inputs, labels, and batch_sample label for next workflow in batch wf_input = batch_inputs[i] wf_labels = batch_labels[i] batch_sample_label = wf_labels[const.CROMWELL_BATCH_SAMPLE_LABEL] # Try to run the workflow try: # Get list of previous workflows in this batch with the same sample name (batch conflicts) batch_conflict_wfs = cromwell.get_batch_conflicts( auth, batch_sample_label) # Determine how to resolve each batch conflict can_submit_wf = True for batch_conflict_wf in batch_conflict_wfs: can_overide_conflict_wf, abort_conflict_wf = resolve_batch_conflict( auth, batch_conflict_wf, batch_conflict_action) # Exclude prior conflicting wf from the current active batch if can_overide_conflict_wf: cromwell.update_wf_batch_status(auth, batch_conflict_wf, include_in_batch=False) # Abort prior conflicting wf if in non-terminal state (fail, success) if abort_conflict_wf: CromwellAPI.abort(batch_conflict_wf, auth, raise_for_status=True) logging.warning( "Aborted conflicting wf '{0}' " "with duplicate batch_sample_id '{1}'".format( batch_conflict_wf, batch_sample_label)) # Workflow can only be submitted if it can override all prior workflows can_submit_wf = can_submit_wf and can_overide_conflict_wf # Workflow id of submission and message to print in job report file. # Will be overwritten if job is submitted. wf_id = "Not submitted" msg = "SupercededWF is either running/submitted or was successful." # Run the workflow if you can if can_submit_wf: # Show some logging stuff logging.info( "Submitting workflow for '{0}'".format(batch_sample_label)) if batch_conflict_wfs: logging.warning("Superceded workflows: {0}".format( ", ".join(batch_conflict_wfs))) # Submit workflow and get id of the newly submitted workflow wf_id = cromwell.submit_wf_from_dict(auth, wdl_workflow, input_dict=wf_input, dependencies=wdl_imports, label_dict=wf_labels, options_file=wdl_options) # Increment counter of successfully submitted workflows submitted_wfs += 1 msg = "" # Update information for report job_report[i][const.CROMWELL_WF_ID_FIELD] = wf_id job_report[i][const.SUPERCEDED_WF_FIELD] = ", ".join( batch_conflict_wfs) job_report[i][const.REPORT_INFO_FIELD] = msg except BaseException: # Log any successful submissions and indicate workflow which caused failure logging.info( "Successfully submitted {0} out of {1} workflows in batch!". format(submitted_wfs, len(batch_inputs))) logging.info("Writing workflow report...") output_job_report(job_report, report_file, failed_workflow_index=i) raise # Log submission if all workflows submitted successfully logging.info( "Successfully submitted {0} out of {1} workflows in batch!".format( submitted_wfs, len(batch_inputs))) logging.info("Writing workflow report...") output_job_report(job_report, report_file)