def _get_parser(): p = get_base_parser(__doc__) p.add_argument("dataset_xml", help="Path to input dataset") p.add_argument("output_file", help="Name of output dataset file") p.add_argument("--biosample-name", default=None, help="New BioSample Name") p.add_argument("--library-name", default=None, help="New Library Name") return p
def get_parser_impl(constants): fmt_name = constants.FORMAT_NAME p = get_base_parser(constants.DESC) p.add_argument("bam", help="Input PacBio dataset XML or BAM file") p.add_argument( "{f}_out".format(f=fmt_name), help="Exported {f} as ZIP archive".format(f=fmt_name.upper())) return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("datastore_in", help="DataStore JSON of ConsensusReadSet files") p.add_argument("datastore_out", help="DataStore JSON of FASTQ files") p.add_argument("--nproc", type=int, default=1, help="Number of processors to use") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("ccsxml", help="Path to input dataset") p.add_argument("--zmws-json", action="store", default=None, help="Path to zmws.json.gz") p.add_argument("--report-ccs-processing", action="store", default=None, help="Path to ccs_processing.report.json") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("barcodes", help="BarcodeSet XML or FASTA") p.add_argument("dataset", help="Reads (Subreads or CCS) PacBio dataset XML") p.add_argument("--symmetric", action="store_true", dest="symmetric_barcodes", default=None, help="Symmetric barcode pairs") p.add_argument("--asymmetric", action="store_false", dest="symmetric_barcodes", help="Asymmetric barcode pairs") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("flnc_ccs", help="Full-Length Non-Chimeric CCS Reads") p.add_argument("--cluster-csv", default=None, help="Cluster Report CSV") p.add_argument("--transcripts-hq", default=None, help="HQ TranscriptSet") p.add_argument("--transcripts-lq", default=None, help="LQ TranscriptSet") p.add_argument("--collapse-fasta", default=None, help="Collapsed Isoforms") #p.add_argument("--collapse-gff", default=None, help="Collapsed Isoform GFF") p.add_argument("--read-stats", default=None, help="Collapsed Isoform Read Info") p.add_argument("--datastore-prefix", default="sample", help="Prefix for output per-sample datastore.json files") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("dataset", help="PacBio dataset XML") p.add_argument("--skip-counts", action="store_true", default=True, help="Don't load database indices") p.add_argument("--no-skip-counts", action="store_false", dest="skip_counts", help="Load database indices and recalculate size") p.add_argument("--get-index-size", action="store_true", help="Compute file index size") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("samples_file", help="BAM file with sample name") for file_id, _, label in FILE_IDS_AND_NAMES: p.add_argument("--{}".format(file_id.replace("_", "-")), default=None, help=label) p.add_argument("--single-sample", action="store_true", default=False, help="Indicates whether the analysis has only one sample") p.add_argument( "--all-samples", action="store_true", default=False, help= "Indicates whether the outputs are for all samples in a multi-sample experiment" ) p.add_argument("--datastore", default="isoseq_sample.datastore.json", help="Output JSON file name") return p
def get_parser(): """ Input: idx - 0 SubreadSet idx - 1 HQ TranscriptSet idx - 2 LQ TranscriptSet Output: idx - 0 HQ TranscriptSet, of which read names have biosample_HQ prefix idx - 1 LQ TranscriptSet, of which read names have biosample_LQ prefix idx - 2 HQ DataStore of output TranscriptSet BAM file idx - 3 LQ DataStore of output TranscriptSet BAM file """ p = get_base_parser(__doc__) p.add_argument("subreads", help="SubreadSet with biosample metadata.") p.add_argument("hq_ds_in", help="Gathered HQ transcripts") p.add_argument("lq_ds_in", help="Gathered LQ transcripts") p.add_argument("hq_ds_out", help="Output HQ transcripts") p.add_argument("lq_ds_out", help="Output LQ transcripts") p.add_argument("hq_datastore", help="Datastore containing HQ transcripts BAM") p.add_argument("lq_datastore", help="Datastore containing LQ transcripts BAM") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("barcodeset", help="BarcodeSet XML") return p
def get_parser(): p = get_base_parser(__doc__) p.add_argument("subreads", help="Input SubreadSet XML") p.add_argument("fastq_out", help="Exported FASTQ") return p
def get_parser(): parser = get_base_parser(__doc__, log_level="CRITICAL") parser.add_argument('file', help="BAM, FASTA, or DataSet XML file") parser.add_argument("--quick", dest="quick", action="store_true", help="Limits validation to the first 100 records " + "(plus file header); equivalent to " + "--max-records=100") parser.add_argument("--max", dest="max_errors", action="store", type=int, help="Exit after MAX_ERRORS have been recorded " + "(DEFAULT: check entire file)") parser.add_argument("--max-records", dest="max_records", action="store", type=int, help="Exit after MAX_RECORDS have been inspected " + "(DEFAULT: check entire file)") parser.add_argument("--type", dest="file_type", action="store", choices=["BAM", "Fasta"] + dataset.DatasetTypes.ALL, help="Use the specified file type instead of guessing") parser.add_argument( "--index", dest="validate_index", action="store_true", help= "Require index files (.fai or .pbi); this is always enforced for dataset XML" ) parser.add_argument("--strict", dest="strict", action="store_true", help="Turn on additional validation, primarily for " + "DataSet XML") parser.add_argument( "--instrument", dest="instrument_mode", action="store_true", help= "Indicates that the dataset is delivered from a PacBio instrument and should be checked for metadata consistency" ) parser.add_argument("-x", "--xunit-out", dest="xunit_out", action="store", default=None, help="Xunit test results for Jenkins") parser.add_argument("--alarms", dest="alarms_out", action="store", default=None, help="alarms.json for errors") g1 = parser.add_argument_group('bam', "BAM options") g2 = parser.add_argument_group('fasta', "Fasta options") bam.get_format_specific_args(g1) fasta.get_format_specific_args(g2) return parser
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("datasets", nargs="+", help="Paths to chunked datasets") return p
def _get_parser(): p = get_base_parser(__doc__) p.add_argument("datastore", help="Datastore of trimmed reads") p.add_argument("ccs_in", help="Input (untrimmed) CCS reads") return p