def main(demux_fcid_dir, restrict_to_projects=None, restrict_to_samples=None): demux_fcid_dir = "/proj/a2014205/INBOX/140528_D00415_0049_BC423WACXX" # G.Grigelioniene_14_01 process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140702_D00415_0052_AC41A2ANXX" # M.Kaller_14_06 sample P1171_102, P1171_104, P1171_106, P1171_108 process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140905_D00415_0057_BC45KVANXX" # M.Kaller_14_06 sample P1171_102, P1171_104, P1171_106 ---- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140815_SN1025_0222_AC4HA6ACXX" # M.Kaller_14_05 sample P1170_101, P1170_103, P1170_105 process_demultiplexed_flowcell( demux_fcid_dir, None, None) # M.Kaller_14_08 sample P1272_101, P1272_104 time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140815_SN1025_0223_BC4HAPACXX" # M.Kaller_14_05 sample P1170_101, P1170_103, P1170_105 process_demultiplexed_flowcell( demux_fcid_dir, None, None) # M.Kaller_14_08 sample P1272_101, P1272_104 time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140919_SN1018_0203_BHA3THADXX" # M.Kaller_14_05 P1170_103, P1170_105 --- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes ###UPPSALA demux_fcid_dir = "/proj/a2014205/INBOX/140821_D00458_0029_AC45JGANXX" # uppsala run process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140917_D00458_0034_AC4FF3ANXX" # -- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes #and now a loop to update the DB time.sleep(3800) charon_session = CharonSession() ####charon_session.project_delete("ND-0522") while True: update_charon_with_local_jobs_status( ) ## this updated local_db and charon accordingly # grab all projects from Charon projects_dict = charon_session.projects_get_all()['projects'] for project_charon in projects_dict: project_name = project_charon["name"] project_dir = os.path.join( "/proj/a2014205/nobackup/NGI/analysis_ready/DATA", project_name) if os.path.isdir(project_dir): projectObj = recreate_project_from_filesystem( project_dir, None) launch_analysis_for_samples([projectObj]) time.sleep(3800)
def main(demux_fcid_dir, restrict_to_projects=None, restrict_to_samples=None): demux_fcid_dir = "/proj/a2014205/INBOX/140528_D00415_0049_BC423WACXX" # G.Grigelioniene_14_01 process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140702_D00415_0052_AC41A2ANXX" # M.Kaller_14_06 sample P1171_102, P1171_104, P1171_106, P1171_108 process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140905_D00415_0057_BC45KVANXX" # M.Kaller_14_06 sample P1171_102, P1171_104, P1171_106 ---- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140815_SN1025_0222_AC4HA6ACXX" # M.Kaller_14_05 sample P1170_101, P1170_103, P1170_105 process_demultiplexed_flowcell(demux_fcid_dir, None, None) # M.Kaller_14_08 sample P1272_101, P1272_104 time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140815_SN1025_0223_BC4HAPACXX" # M.Kaller_14_05 sample P1170_101, P1170_103, P1170_105 process_demultiplexed_flowcell(demux_fcid_dir, None, None) # M.Kaller_14_08 sample P1272_101, P1272_104 time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140919_SN1018_0203_BHA3THADXX" # M.Kaller_14_05 P1170_103, P1170_105 --- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes ###UPPSALA demux_fcid_dir = "/proj/a2014205/INBOX/140821_D00458_0029_AC45JGANXX" # uppsala run process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes demux_fcid_dir = "/proj/a2014205/INBOX/140917_D00458_0034_AC4FF3ANXX" # -- rerun process_demultiplexed_flowcell(demux_fcid_dir, None, None) time.sleep(60) #wait for 1 minutes #and now a loop to update the DB time.sleep(3800) charon_session = CharonSession() ####charon_session.project_delete("ND-0522") while True: update_charon_with_local_jobs_status() ## this updated local_db and charon accordingly # grab all projects from Charon projects_dict = charon_session.projects_get_all()['projects'] for project_charon in projects_dict: project_name = project_charon["name"] project_dir = os.path.join("/proj/a2014205/nobackup/NGI/analysis_ready/DATA", project_name) if os.path.isdir(project_dir): projectObj = recreate_project_from_filesystem(project_dir, None) launch_analysis_for_samples([projectObj]) time.sleep(3800)
#!/bin/env python from __future__ import print_function import argparse import importlib from ngi_pipeline.engines.piper_ngi.local_process_tracking import update_charon_with_local_jobs_status from ngi_pipeline.engines.piper_ngi.database import SampleAnalysis, get_db_session if __name__=="__main__": parser = argparse.ArgumentParser("Show all the jobs currently running (currently just for Piper).") parser.add_argument("-q", "--quiet", action="store_true", help="Don't send notification emails on status changes.") args = parser.parse_args() update_charon_with_local_jobs_status(quiet=args.quiet) with get_db_session() as session: sample_jobs = session.query(SampleAnalysis).all() print("\nSample-level analysis jobs:") if sample_jobs: for sample_job in sample_jobs: print("\t{}".format(sample_job)) else: print("\tNone") print()
#!/bin/env python from __future__ import print_function import argparse import importlib from ngi_pipeline.engines.piper_ngi.local_process_tracking import update_charon_with_local_jobs_status from ngi_pipeline.engines.piper_ngi.database import SampleAnalysis, get_db_session if __name__=="__main__": parser = argparse.ArgumentParser("Show all the jobs currently running (currently just for Piper).") update_charon_with_local_jobs_status() with get_db_session() as session: sample_jobs = session.query(SampleAnalysis).all() print("\nSample-level analysis jobs:") if sample_jobs: for sample_job in sample_jobs: print("\t{}".format(sample_job)) else: print("\tNone") print()
def project_summarize(projects, verbosity=0): if type(verbosity) is not int or verbosity < 0: print_stderr('Invalid verbosity level ("{}"); must be a positive ' 'integer; falling back to 0') verbosity = 0 update_charon_with_local_jobs_status(quiet=True) # Don't send mails charon_session = CharonSession() projects_list = [] for project in projects: try: project = os.path.basename(locate_project(project)) except ValueError as e: print_stderr("Skipping project: {}".format(e)) continue print_stderr('Gathering information for project "{}"...'.format(project)) project_dict = {} try: project = charon_session.project_get(project) except CharonError as e: print_stderr('Project "{}" not found in Charon; skipping ({})'.format(project, e), file=sys.stderr) continue project_dict['name'] = project['name'] project_dict['id'] = project['projectid'] project_dict['status'] = project['status'] samples_list = project_dict['samples'] = [] for sample in charon_session.project_get_samples(project['projectid']).get('samples', []): sample_dict = {} sample_dict['id'] = sample['sampleid'] sample_dict['analysis_status'] = sample['analysis_status'] sample_dict['coverage'] = sample['total_autosomal_coverage'] libpreps_list = sample_dict['libpreps'] = [] samples_list.append(sample_dict) for libprep in charon_session.sample_get_libpreps(project['projectid'], sample['sampleid']).get('libpreps', []): libprep_dict = {} libprep_dict['id'] = libprep['libprepid'] libprep_dict['qc'] = libprep['qc'] seqruns_list = libprep_dict['seqruns'] = [] libpreps_list.append(libprep_dict) for seqrun in charon_session.libprep_get_seqruns(project['projectid'], sample['sampleid'], libprep['libprepid']).get('seqruns', []): seqrun_dict = {} seqrun_dict['id'] = seqrun['seqrunid'] seqrun_dict['alignment_status'] = seqrun['alignment_status'] seqrun_dict['coverage'] = seqrun['mean_autosomal_coverage'] if seqrun.get('total_reads'): seqrun_dict['total_reads'] = seqrun['total_reads'] seqruns_list.append(seqrun_dict) projects_list.append(project_dict) if verbosity in (0, 1): projects_status_list = [] #projects_by_status = collections.defaultdict(dict) #samples_by_status = collections.defaultdict(set) #libpreps_by_status = collections.defaultdict(set) #seqruns_by_status = collections.defaultdict(set) for project_dict in projects_list: project_status_dict = {} project_status_dict['name'] = "{} ({})".format(project_dict['name'], project_dict['id']) project_status_dict['status'] = project_dict['status'] samples_by_status = project_status_dict['samples_by_status'] = collections.defaultdict(set) libpreps_by_status = project_status_dict['libpreps_by_status'] = collections.defaultdict(set) seqruns_by_status = project_status_dict['seqruns_by_status'] = collections.defaultdict(set) for sample_dict in project_dict.get('samples', []): #samples_by_status[sample_dict['analysis_status']].add(sample_dict['id']) sample_status = sample_dict['analysis_status'] libpreps = sample_dict.get('libpreps') if libpreps: if not any([libprep["seqruns"] for libprep in libpreps]): sample_status = "NO_SEQRUNS" else: for libprep_dict in libpreps: libpreps_by_status[libprep_dict['qc']].add(libprep_dict['id']) for seqrun_dict in libprep_dict.get('seqruns', []): seqruns_by_status[seqrun_dict['alignment_status']].add(seqrun_dict['id']) else: sample_status = "NO_LIBPREPS" samples_by_status[sample_status].add(sample_dict['id']) projects_status_list.append(project_status_dict) print_items = (("Samples", "samples_by_status"), ("Libpreps", "libpreps_by_status"), ("Seqruns", "seqruns_by_status"),) for project_dict in projects_status_list: print_stderr("\nProject\n-------") print_stderr(" Name: {:>40}".format(project_dict['name'])) print_stderr(" Status: {:>40}".format(project_dict['status'])) for name, dict_key in print_items: status_dict = project_dict[dict_key] print_stderr("{}\n{}".format(name, "-"*len(name))) total_items = sum(map(len, status_dict.values())) # Sort by analysis value for status, item_set in sorted(status_dict.iteritems(), key=lambda key_value: key_value[0]): num_items = len(item_set) percent = (100.00 * num_items) / total_items print_stderr(" Status: {:<20} ({:>3}/{:<3}) ({:>6.2f}%)".format(status, num_items, total_items, percent)) if verbosity == 1: for item in sorted(item_set): print_stderr(" {}".format(item)) print_stderr("") else: # Verbosity is 2+, maximum verbosity output_template = "{}{:<30}{:>{rspace}}" for project_dict in projects_list: offset = 0 indent = " " * offset rspace = 80 - offset print_stderr(output_template.format(indent, "Project name:", project_dict['name'], rspace=rspace)) print_stderr(output_template.format(indent, "Project ID:", project_dict['id'], rspace=rspace)) print_stderr(output_template.format(indent, "Project status:", project_dict['status'], rspace=rspace)) for sample_dict in project_dict['samples']: print_stderr("") offset = 4 indent = " " * offset rspace = 80 - offset print_stderr(output_template.format(indent, "Sample ID:", sample_dict['id'], rspace=rspace)) print_stderr(output_template.format(indent, "Sample analysis status:", sample_dict['analysis_status'], rspace=rspace)) print_stderr(output_template.format(indent, "Sample coverage:", sample_dict['coverage'], rspace=rspace)) for libprep_dict in sample_dict['libpreps']: print_stderr("") offset = 8 indent = " " * offset rspace = 80 - offset print_stderr(output_template.format(indent, "Libprep ID:", libprep_dict['id'], rspace=rspace)) print_stderr(output_template.format(indent, "Libprep qc status:", libprep_dict['qc'], rspace=rspace)) for seqrun_dict in libprep_dict['seqruns']: print_stderr("") offset = 12 indent = " " * offset rspace = 80 - offset print_stderr(output_template.format(indent, "Seqrun ID:", seqrun_dict['id'], rspace=rspace)) print_stderr(output_template.format(indent, "Seqrun alignment status:", seqrun_dict['alignment_status'], rspace=rspace)) print_stderr(output_template.format(indent, "Seqrun mean auto. coverage:", seqrun_dict['coverage'], rspace=rspace)) if "total_reads" in seqrun_dict: print_stderr(output_template.format(indent, "Seqrun total reads:", seqrun_dict['total_reads'], rspace=rspace)) print_stderr("\n")
def launch_analysis(level, projects_to_analyze, restart_failed_jobs=False, config=None, config_file_path=None): """Launch the appropriate seqrun (flowcell-level) analysis for each fastq file in the project. :param list projects_to_analyze: The list of projects (Project objects) to analyze :param dict config: The parsed NGI configuration file; optional/has default. :param str config_file_path: The path to the NGI configuration file; optional/has default. """ # Update Charon with the local state of all the jobs we're running update_charon_with_local_jobs_status() charon_session = CharonSession() for project in projects_to_analyze: # Get information from Charon regarding which workflows to run try: # E.g. "NGI" for NGI DNA Samples workflow = charon_session.project_get(project.project_id)["pipeline"] except (KeyError, CharonError) as e: # Workflow missing from Charon? LOG.error('Skipping project "{}" because of error: {}'.format(project, e)) continue try: analysis_engine_module_name = config["analysis"]["workflows"][workflow]["analysis_engine"] except KeyError: error_msg = ("No analysis engine for workflow \"{}\" specified " "in configuration file. Skipping this workflow " "for project {}".format(workflow, project)) LOG.error(error_msg) raise RuntimeError(error_msg) # Import the adapter module specified in the config file (e.g. piper_ngi) try: analysis_module = importlib.import_module(analysis_engine_module_name) except ImportError as e: error_msg = ('Skipping project "{}" workflow "{}": couldn\'t import ' 'module "{}": {}'.format(project, workflow, analysis_engine_module_name, e)) LOG.error(error_msg) # Next project continue # This is weird objects_to_process = [] if level == "sample": for sample in project: objects_to_process.append({"project": project, "sample": sample}) elif level == "seqrun": for sample in project: for libprep in sample: for seqrun in libprep: objects_to_process.append({"project": project, "sample": sample, "libprep": libprep, "seqrun": seqrun}) # Still weird and not so great for obj_dict in objects_to_process: project = obj_dict.get("project") sample = obj_dict.get("sample") libprep = obj_dict.get("libprep") seqrun = obj_dict.get("seqrun") try: if level == "seqrun": charon_reported_status = charon_session.seqrun_get(project.project_id, sample, libprep, seqrun)['alignment_status'] else: # sample-level charon_reported_status = charon_session.sample_get(project.project_id, sample)['status'] except (CharonError, KeyError) as e: LOG.warn('Unable to get required information from Charon for ' 'sample "{}" / project "{}" -- forcing it to new: {}'.format(sample, project, e)) if level == "seqrun": charon_session.seqrun_update(project.project_id, sample.name, libprep.name, seqrun.name, alignment_status="NEW") charon_reported_status = charon_session.seqrun_get(project.project_id, sample, libprep, seqrun)['alignment_status'] else: charon_session.sample_update(project.project_id, sample.name, status="NEW") charon_reported_status = charon_session.sample_get(project.project_id, sample)['status'] # Check Charon to ensure this hasn't already been processed if charon_reported_status in ("RUNNING", "DONE"): if level == "seqrun": LOG.info('Charon reports seqrun analysis for project "{}" / sample "{}" ' '/ libprep "{}" / seqrun "{}" does not need processing ' ' (already "{}")'.format(project, sample, libprep, seqrun, charon_reported_status)) else: # Sample LOG.info('Charon reports seqrun analysis for project "{}" / sample "{}" ' 'does not need processing ' ' (already "{}")'.format(project, sample, charon_reported_status)) continue elif charon_reported_status == "FAILED": if not restart_failed_jobs: if level == "seqrun": LOG.error('FAILED: Project "{}" / sample "{}" / library "{}" ' '/ flowcell "{}": Charon reports FAILURE, manual ' 'investigation needed!'.format(project, sample, libprep, seqrun)) else: # Sample LOG.error('FAILED: Project "{}" / sample "{}" Charon reports FAILURE, manual ' 'investigation needed!'.format(project, sample, libprep, seqrun)) continue try: # The engines themselves know which sub-workflows # they need to execute for a given level. For example, # with DNA Variant Calling on the sequencing run # level, we need to execute basic alignment and QC. if level == "seqrun": LOG.info('Attempting to launch seqrun analysis for ' 'project "{}" / sample "{}" / libprep "{}" ' '/ seqrun "{}", workflow "{}"'.format(project, sample, libprep, seqrun, workflow)) analysis_module.analyze_seqrun(project=project, sample=sample, libprep=libprep, seqrun=seqrun) else: # sample level LOG.info('Attempting to launch sample analysis for ' 'project "{}" / sample "{}" / workflow ' '"{}"'.format(project, sample, workflow)) analysis_module.analyze_sample(project=project, sample=sample) except Exception as e: raise LOG.error('Cannot process project "{}" / sample "{}" / ' 'libprep "{}" / seqrun "{}" / workflow ' '"{}" : {}'.format(project, sample, libprep, seqrun, workflow, e)) set_new_seqrun_status = "FAILED" continue
def project_summarize(projects, verbosity=0): if type(verbosity) is not int or verbosity < 0: print_stderr('Invalid verbosity level ("{}"); must be a positive ' 'integer; falling back to 0') verbosity = 0 update_charon_with_local_jobs_status(quiet=True) # Don't send mails charon_session = CharonSession() projects_list = [] for project in projects: try: project = os.path.basename(locate_project(project)) except ValueError as e: print_stderr("Skipping project: {}".format(e)) continue print_stderr( 'Gathering information for project "{}"...'.format(project)) project_dict = {} try: project = charon_session.project_get(project) except CharonError as e: print_stderr( 'Project "{}" not found in Charon; skipping ({})'.format( project, e), file=sys.stderr) continue project_dict['name'] = project['name'] project_dict['id'] = project['projectid'] project_dict['status'] = project['status'] samples_list = project_dict['samples'] = [] for sample in charon_session.project_get_samples( project['projectid']).get('samples', []): sample_dict = {} sample_dict['id'] = sample['sampleid'] sample_dict['analysis_status'] = sample['analysis_status'] sample_dict['coverage'] = sample['total_autosomal_coverage'] libpreps_list = sample_dict['libpreps'] = [] samples_list.append(sample_dict) for libprep in charon_session.sample_get_libpreps( project['projectid'], sample['sampleid']).get('libpreps', []): libprep_dict = {} libprep_dict['id'] = libprep['libprepid'] libprep_dict['qc'] = libprep['qc'] seqruns_list = libprep_dict['seqruns'] = [] libpreps_list.append(libprep_dict) for seqrun in charon_session.libprep_get_seqruns( project['projectid'], sample['sampleid'], libprep['libprepid']).get('seqruns', []): seqrun_dict = {} seqrun_dict['id'] = seqrun['seqrunid'] seqrun_dict['alignment_status'] = seqrun[ 'alignment_status'] seqrun_dict['coverage'] = seqrun['mean_autosomal_coverage'] if seqrun.get('total_reads'): seqrun_dict['total_reads'] = seqrun['total_reads'] seqruns_list.append(seqrun_dict) projects_list.append(project_dict) if verbosity in (0, 1): projects_status_list = [] #projects_by_status = collections.defaultdict(dict) #samples_by_status = collections.defaultdict(set) #libpreps_by_status = collections.defaultdict(set) #seqruns_by_status = collections.defaultdict(set) for project_dict in projects_list: project_status_dict = {} project_status_dict['name'] = "{} ({})".format( project_dict['name'], project_dict['id']) project_status_dict['status'] = project_dict['status'] samples_by_status = project_status_dict[ 'samples_by_status'] = collections.defaultdict(set) libpreps_by_status = project_status_dict[ 'libpreps_by_status'] = collections.defaultdict(set) seqruns_by_status = project_status_dict[ 'seqruns_by_status'] = collections.defaultdict(set) for sample_dict in project_dict.get('samples', []): #samples_by_status[sample_dict['analysis_status']].add(sample_dict['id']) sample_status = sample_dict['analysis_status'] libpreps = sample_dict.get('libpreps') if libpreps: if not any([libprep["seqruns"] for libprep in libpreps]): sample_status = "NO_SEQRUNS" else: for libprep_dict in libpreps: libpreps_by_status[libprep_dict['qc']].add( libprep_dict['id']) for seqrun_dict in libprep_dict.get('seqruns', []): seqruns_by_status[ seqrun_dict['alignment_status']].add( seqrun_dict['id']) else: sample_status = "NO_LIBPREPS" samples_by_status[sample_status].add(sample_dict['id']) projects_status_list.append(project_status_dict) print_items = ( ("Samples", "samples_by_status"), ("Libpreps", "libpreps_by_status"), ("Seqruns", "seqruns_by_status"), ) for project_dict in projects_status_list: print_stderr("\nProject\n-------") print_stderr(" Name: {:>40}".format(project_dict['name'])) print_stderr(" Status: {:>40}".format(project_dict['status'])) for name, dict_key in print_items: status_dict = project_dict[dict_key] print_stderr("{}\n{}".format(name, "-" * len(name))) total_items = sum(map(len, status_dict.values())) # Sort by analysis value for status, item_set in sorted( status_dict.iteritems(), key=lambda key_value: key_value[0]): num_items = len(item_set) percent = (100.00 * num_items) / total_items print_stderr( " Status: {:<20} ({:>3}/{:<3}) ({:>6.2f}%)".format( status, num_items, total_items, percent)) if verbosity == 1: for item in sorted(item_set): print_stderr(" {}".format(item)) print_stderr("") else: # Verbosity is 2+, maximum verbosity output_template = "{}{:<30}{:>{rspace}}" for project_dict in projects_list: offset = 0 indent = " " * offset rspace = 80 - offset print_stderr( output_template.format(indent, "Project name:", project_dict['name'], rspace=rspace)) print_stderr( output_template.format(indent, "Project ID:", project_dict['id'], rspace=rspace)) print_stderr( output_template.format(indent, "Project status:", project_dict['status'], rspace=rspace)) for sample_dict in project_dict['samples']: print_stderr("") offset = 4 indent = " " * offset rspace = 80 - offset print_stderr( output_template.format(indent, "Sample ID:", sample_dict['id'], rspace=rspace)) print_stderr( output_template.format(indent, "Sample analysis status:", sample_dict['analysis_status'], rspace=rspace)) print_stderr( output_template.format(indent, "Sample coverage:", sample_dict['coverage'], rspace=rspace)) for libprep_dict in sample_dict['libpreps']: print_stderr("") offset = 8 indent = " " * offset rspace = 80 - offset print_stderr( output_template.format(indent, "Libprep ID:", libprep_dict['id'], rspace=rspace)) print_stderr( output_template.format(indent, "Libprep qc status:", libprep_dict['qc'], rspace=rspace)) for seqrun_dict in libprep_dict['seqruns']: print_stderr("") offset = 12 indent = " " * offset rspace = 80 - offset print_stderr( output_template.format(indent, "Seqrun ID:", seqrun_dict['id'], rspace=rspace)) print_stderr( output_template.format( indent, "Seqrun alignment status:", seqrun_dict['alignment_status'], rspace=rspace)) print_stderr( output_template.format( indent, "Seqrun mean auto. coverage:", seqrun_dict['coverage'], rspace=rspace)) if "total_reads" in seqrun_dict: print_stderr( output_template.format( indent, "Seqrun total reads:", seqrun_dict['total_reads'], rspace=rspace)) print_stderr("\n")
#!/bin/env python from __future__ import print_function import argparse import importlib from ngi_pipeline.engines.piper_ngi.local_process_tracking import update_charon_with_local_jobs_status from ngi_pipeline.engines.piper_ngi.database import SampleAnalysis, get_db_session if __name__ == "__main__": parser = argparse.ArgumentParser( "Show all the jobs currently running (currently just for Piper).") update_charon_with_local_jobs_status() with get_db_session() as session: sample_jobs = session.query(SampleAnalysis).all() print("\nSample-level analysis jobs:") if sample_jobs: for sample_job in sample_jobs: print("\t{}".format(sample_job)) else: print("\tNone") print()
from __future__ import print_function import argparse import importlib from ngi_pipeline.engines.piper_ngi.local_process_tracking import update_charon_with_local_jobs_status from ngi_pipeline.engines.piper_ngi.database import SampleAnalysis, get_db_session if __name__ == "__main__": parser = argparse.ArgumentParser( "Show all the jobs currently running (currently just for Piper).") parser.add_argument( "-q", "--quiet", action="store_true", help="Don't send notification emails on status changes.") args = parser.parse_args() update_charon_with_local_jobs_status(quiet=args.quiet) with get_db_session() as session: sample_jobs = session.query(SampleAnalysis).all() print("\nSample-level analysis jobs:") if sample_jobs: for sample_job in sample_jobs: print("\t{}".format(sample_job)) else: print("\tNone") print()