def rename(args): """ %prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3. """ from jcvi.formats.base import DictFile p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print g
def libsvm(args): """ %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) fp.next() for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print " ".join([klass] + feats)
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head" fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print "\t".join((count, nseqid))
def header(args): """ %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number [default: %default]") p.add_option("--ids", help="Write ids to file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print "\t".join(hd) print "--->" print "\t".join(newhd) ids = opts.ids if ids: fw = open(ids, "w") print >> fw, "\n".join(newhd) fw.close()
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--top", default=10, type="int", help="Top N taxa to extract [default: %default]") p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head -n {0}".format(opts.top) fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print "\t".join((count, nseqid))
def covlen(args): """ %prog covlen covfile fastafile Plot coverage vs length. `covfile` is two-column listing contig id and depth of coverage. """ import numpy as np import pandas as pd import seaborn as sns from jcvi.formats.base import DictFile p = OptionParser(covlen.__doc__) p.add_option("--maxsize", default=1000000, type="int", help="Max contig size") p.add_option("--maxcov", default=100, type="int", help="Max contig size") p.add_option("--color", default='m', help="Color of the data points") p.add_option("--kind", default="scatter", choices=("scatter", "reg", "resid", "kde", "hex"), help="Kind of plot to draw") opts, args, iopts = p.set_image_options(args, figsize="8x8") if len(args) != 2: sys.exit(not p.print_help()) covfile, fastafile = args cov = DictFile(covfile, cast=float) s = Sizes(fastafile) data = [] maxsize, maxcov = opts.maxsize, opts.maxcov for ctg, size in s.iter_sizes(): c = cov.get(ctg, 0) if size > maxsize: continue if c > maxcov: continue data.append((size, c)) x, y = zip(*data) x = np.array(x) y = np.array(y) logging.debug("X size {0}, Y size {1}".format(x.size, y.size)) df = pd.DataFrame() xlab, ylab = "Length", "Coverage of depth (X)" df[xlab] = x df[ylab] = y sns.jointplot(xlab, ylab, kind=opts.kind, data=df, xlim=(0, maxsize), ylim=(0, maxcov), stat_func=None, edgecolor="w", color=opts.color) figname = covfile + ".pdf" savefig(figname, dpi=iopts.dpi, iopts=iopts)
def summary(args): """ %prog summary diploid.napus.fractionation gmap.status Provide summary of fractionation. `fractionation` file is generated with loss(). `gmap.status` is generated with genestatus(). """ from jcvi.formats.base import DictFile from jcvi.utils.cbook import percentage, Registry p = OptionParser(summary.__doc__) p.add_option("--extra", help="Cross with extra tsv file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frfile, statusfile = args status = DictFile(statusfile) fp = open(frfile) registry = Registry() # keeps all the tags for any given gene for row in fp: seqid, gene, tag = row.split() if tag == '.': registry[gene].append("outside") else: registry[gene].append("inside") if tag[0] == '[': registry[gene].append("no_syntenic_model") if tag.startswith("[S]"): registry[gene].append("[S]") gstatus = status.get(gene, None) if gstatus == 'complete': registry[gene].append("complete") elif gstatus == 'pseudogene': registry[gene].append("pseudogene") elif gstatus == 'partial': registry[gene].append("partial") else: registry[gene].append("gmap_fail") elif tag.startswith("[NS]"): registry[gene].append("[NS]") if "random" in tag or "Scaffold" in tag: registry[gene].append("random") else: registry[gene].append("real_ns") elif tag.startswith("[NF]"): registry[gene].append("[NF]") else: registry[gene].append("syntenic_model") inside = registry.count("inside") outside = registry.count("outside") syntenic = registry.count("syntenic_model") non_syntenic = registry.count("no_syntenic_model") s = registry.count("[S]") ns = registry.count("[NS]") nf = registry.count("[NF]") complete = registry.count("complete") pseudogene = registry.count("pseudogene") partial = registry.count("partial") gmap_fail = registry.count("gmap_fail") random = registry.count("random") real_ns = registry.count("real_ns") complete_models = registry.get_tag("complete") pseudogenes = registry.get_tag("pseudogene") partial_deletions = registry.get_tag("partial") m = "{0} inside synteny blocks\n".format(inside) m += "{0} outside synteny blocks\n".format(outside) m += "{0} has syntenic gene\n".format(syntenic) m += "{0} lack syntenic gene\n".format(non_syntenic) m += "{0} has sequence match in syntenic location\n".format(s) m += "{0} has sequence match in non-syntenic location\n".format(ns) m += "{0} has sequence match in un-ordered scaffolds\n".format(random) m += "{0} has sequence match in real non-syntenic location\n".format(real_ns) m += "{0} has no sequence match\n".format(nf) m += "{0} syntenic sequence - complete model\n".format(percentage(complete, s)) m += "{0} syntenic sequence - partial model\n".format(percentage(partial, s)) m += "{0} syntenic sequence - pseudogene\n".format(percentage(pseudogene, s)) m += "{0} syntenic sequence - gmap fail\n".format(percentage(gmap_fail, s)) print >> sys.stderr, m aa = ["complete_models", "partial_deletions", "pseudogenes"] bb = [complete_models, partial_deletions, pseudogenes] for a, b in zip(aa, bb): fw = open(a, "w") print >> fw, "\n".join(b) fw.close() extra = opts.extra if extra: registry.update_from(extra) fp.seek(0) fw = open("registry", "w") for row in fp: seqid, gene, tag = row.split() ts = registry[gene] print >> fw, "\t".join((seqid, gene, tag, "-".join(ts))) fw.close() logging.debug("Registry written.")
def merge(args): """ %prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project. """ from jcvi.formats.base import DictFile p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) quartets, registry, lost = args qq = DictFile(registry, keypos=1, valuepos=3) lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|') qq.update(lost) fp = open(quartets) cases = { "AN,CN": 4, "BO,AN,CN": 8, "BO,CN": 2, "BR,AN": 1, "BR,AN,CN": 6, "BR,BO": 3, "BR,BO,AN": 5, "BR,BO,AN,CN": 9, "BR,BO,CN": 7, } ip = { "syntenic_model": "Syntenic_model_excluded_by_OMG", "complete": "Predictable", "partial": "Truncated", "pseudogene": "Pseudogene", "random": "Match_random", "real_ns": "Transposed", "gmap_fail": "GMAP_fail", "AN LOST": "AN_LOST", "CN LOST": "CN_LOST", "BR LOST": "BR_LOST", "BO LOST": "BO_LOST", "outside": "Outside_synteny_blocks", "[NF]": "Not_found", } for row in fp: atoms = row.strip().split("\t") genes = atoms[:4] tag = atoms[4] a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes] qqs = [c, d, a, b] for i, q in enumerate(qqs): if atoms[i] != '.': qqs[i] = "syntenic_model" # Make comment comment = "Case{0}".format(cases[tag]) dots = sum([1 for x in genes if x == '.']) if dots == 1: idx = genes.index(".") status = qqs[idx] status = ip[status] comment += "-" + status print row.strip() + "\t" + "\t".join(qqs + [comment])
def sizes(args): """ %prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker """ from jcvi.formats.base import DictFile from jcvi.apps.align import blast p = OptionParser(sizes.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, afasta, bfasta = args pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" extfasta = pf + ".ext.fasta" if need_update(gapsbed, extfasta): extbed, extfasta = flanks([gapsbed, afasta]) q = op.basename(extfasta).split(".")[0] r = op.basename(bfasta).split(".")[0] blastfile = "{0}.{1}.blast".format(q, r) if need_update([extfasta, bfasta], blastfile): blastfile = blast([bfasta, extfasta, "--wordsize=50", "--pctid=98"]) labelsfile = blast_to_twobeds(blastfile) labels = DictFile(labelsfile, delimiter='\t') bed = Bed(gapsbed) for b in bed: b.score = b.span accn = b.accn print "\t".join((str(x) for x in (b.seqid, b.start - 1, b.end, accn, b.score, labels.get(accn, "na"))))
def summary(args): """ %prog summary diploid.napus.fractionation gmap.status Provide summary of fractionation. `fractionation` file is generated with loss(). `gmap.status` is generated with genestatus(). """ from jcvi.formats.base import DictFile from jcvi.utils.cbook import percentage, Registry p = OptionParser(summary.__doc__) p.add_option("--extra", help="Cross with extra tsv file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frfile, statusfile = args status = DictFile(statusfile) fp = open(frfile) registry = Registry() # keeps all the tags for any given gene for row in fp: seqid, gene, tag = row.split() if tag == '.': registry[gene].append("outside") else: registry[gene].append("inside") if tag[0] == '[': registry[gene].append("no_syntenic_model") if tag.startswith("[S]"): registry[gene].append("[S]") gstatus = status.get(gene, None) if gstatus == 'complete': registry[gene].append("complete") elif gstatus == 'pseudogene': registry[gene].append("pseudogene") elif gstatus == 'partial': registry[gene].append("partial") else: registry[gene].append("gmap_fail") elif tag.startswith("[NS]"): registry[gene].append("[NS]") if "random" in tag or "Scaffold" in tag: registry[gene].append("random") else: registry[gene].append("real_ns") elif tag.startswith("[NF]"): registry[gene].append("[NF]") else: registry[gene].append("syntenic_model") inside = registry.count("inside") outside = registry.count("outside") syntenic = registry.count("syntenic_model") non_syntenic = registry.count("no_syntenic_model") s = registry.count("[S]") ns = registry.count("[NS]") nf = registry.count("[NF]") complete = registry.count("complete") pseudogene = registry.count("pseudogene") partial = registry.count("partial") gmap_fail = registry.count("gmap_fail") random = registry.count("random") real_ns = registry.count("real_ns") complete_models = registry.get_tag("complete") pseudogenes = registry.get_tag("pseudogene") partial_deletions = registry.get_tag("partial") m = "{0} inside synteny blocks\n".format(inside) m += "{0} outside synteny blocks\n".format(outside) m += "{0} has syntenic gene\n".format(syntenic) m += "{0} lack syntenic gene\n".format(non_syntenic) m += "{0} has sequence match in syntenic location\n".format(s) m += "{0} has sequence match in non-syntenic location\n".format(ns) m += "{0} has sequence match in un-ordered scaffolds\n".format(random) m += "{0} has sequence match in real non-syntenic location\n".format( real_ns) m += "{0} has no sequence match\n".format(nf) m += "{0} syntenic sequence - complete model\n".format( percentage(complete, s)) m += "{0} syntenic sequence - partial model\n".format( percentage(partial, s)) m += "{0} syntenic sequence - pseudogene\n".format( percentage(pseudogene, s)) m += "{0} syntenic sequence - gmap fail\n".format(percentage(gmap_fail, s)) print >> sys.stderr, m aa = ["complete_models", "partial_deletions", "pseudogenes"] bb = [complete_models, partial_deletions, pseudogenes] for a, b in zip(aa, bb): fw = open(a, "w") print >> fw, "\n".join(b) fw.close() extra = opts.extra if extra: registry.update_from(extra) fp.seek(0) fw = open("registry", "w") for row in fp: seqid, gene, tag = row.split() ts = registry[gene] print >> fw, "\t".join((seqid, gene, tag, "-".join(ts))) fw.close() logging.debug("Registry written.")