def libsvm(args): """ %prog libsvm csvfile prefix.ids Convert csv file to LIBSVM format. `prefix.ids` contains the prefix mapping. Ga -1 Gr 1 So the feature in the first column of csvfile get scanned with the prefix and mapped to different classes. Formatting spec: http://svmlight.joachims.org/ """ from jcvi.formats.base import DictFile p = OptionParser(libsvm.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) csvfile, prefixids = args d = DictFile(prefixids) fp = open(csvfile) fp.next() for row in fp: atoms = row.split() klass = atoms[0] kp = klass.split("_")[0] klass = d.get(kp, "0") feats = ["{0}:{1}".format(i + 1, x) for i, x in enumerate(atoms[1:])] print " ".join([klass] + feats)
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head" fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print "\t".join((count, nseqid))
def header(args): """ %prog header map conversion_table Rename lines in the map header. The mapping of old names to new names are stored in two-column `conversion_table`. """ from jcvi.formats.base import DictFile p = OptionParser(header.__doc__) p.add_option("--prefix", default="", help="Prepend text to line number [default: %default]") p.add_option("--ids", help="Write ids to file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) mstmap, conversion_table = args data = MSTMap(mstmap) hd = data.header conversion = DictFile(conversion_table) newhd = [opts.prefix + conversion.get(x, x) for x in hd] print "\t".join(hd) print "--->" print "\t".join(newhd) ids = opts.ids if ids: fw = open(ids, "w") print >> fw, "\n".join(newhd) fw.close()
def rename(args): """ %prog rename in.gff3 switch.ids > reindexed.gff3 Change the IDs within the gff3. """ from jcvi.formats.base import DictFile p = OptionParser(rename.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ingff3, switch = args switch = DictFile(switch) gff = Gff(ingff3) for g in gff: id, = g.attributes["ID"] newname = switch.get(id, id) g.attributes["ID"] = [newname] if "Parent" in g.attributes: parents = g.attributes["Parent"] g.attributes["Parent"] = [switch.get(x, x) for x in parents] g.update_attributes() print g
def fillrbh(args): from jcvi.formats.base import DictFile p = OptionParser(fillrbh.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blocksfile, rbhfile, orthofile = args # Generate mapping both ways adict = DictFile(rbhfile) bdict = DictFile(rbhfile, keypos=1, valuepos=0) adict.update(bdict) fp = open(blocksfile) fw = open(orthofile, "w") nrecruited = 0 for row in fp: a, b = row.split() c = '.' if b == '.': if a in adict: b = adict[a] nrecruited += 1 c = 'rbh' else: c = 'syntelog' print("\t".join((a, b, c)), file=fw) logging.debug("Recruited {0} pairs from RBH.".format(nrecruited)) fp.close() fw.close()
def top10(args): """ %prog top10 blastfile.best Count the most frequent 10 hits. Usually the BLASTFILE needs to be screened the get the best match. You can also provide an .ids file to query the ids. For example the ids file can contain the seqid to species mapping. The ids file is two-column, and can sometimes be generated by `jcvi.formats.fasta ids --description`. """ from jcvi.formats.base import DictFile p = OptionParser(top10.__doc__) p.add_option("--top", default=10, type="int", help="Top N taxa to extract [default: %default]") p.add_option("--ids", default=None, help="Two column ids file to query seqid [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args mapping = DictFile(opts.ids, delimiter="\t") if opts.ids else {} cmd = "cut -f2 {0}".format(blastfile) cmd += " | sort | uniq -c | sort -k1,1nr | head -n {0}".format(opts.top) fp = popen(cmd) for row in fp: count, seqid = row.split() nseqid = mapping.get(seqid, seqid) print "\t".join((count, nseqid))
def fillrbh(args): from jcvi.formats.base import DictFile p = OptionParser(fillrbh.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blocksfile, rbhfile, orthofile = args # Generate mapping both ways adict = DictFile(rbhfile) bdict = DictFile(rbhfile, keypos=1, valuepos=0) adict.update(bdict) fp = open(blocksfile) fw = open(orthofile, "w") nrecruited = 0 for row in fp: a, b = row.split() if b == '.': if a in adict: b = adict[a] nrecruited += 1 b += "'" print("\t".join((a, b)), file=fw) logging.debug("Recruited {0} pairs from RBH.".format(nrecruited)) fp.close() fw.close()
def annotation(args): """ %prog annotation blastfile > annotations Create simple two column files from the first two coluns in blastfile. Use --queryids and --subjectids to switch IDs or descriptions. """ from jcvi.formats.base import DictFile p = OptionParser(annotation.__doc__) p.add_option("--queryids", help="Query IDS file to switch [default: %default]") p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args d = "\t" qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None blast = Blast(blastfile) for b in blast: query, subject = b.query, b.subject if qids: query = qids[query] if sids: subject = sids[subject] print "\t".join((query, subject))
def covlen(args): """ %prog covlen covfile fastafile Plot coverage vs length. `covfile` is two-column listing contig id and depth of coverage. """ import numpy as np import pandas as pd import seaborn as sns from jcvi.formats.base import DictFile p = OptionParser(covlen.__doc__) p.add_option("--maxsize", default=1000000, type="int", help="Max contig size") p.add_option("--maxcov", default=100, type="int", help="Max contig size") p.add_option("--color", default='m', help="Color of the data points") p.add_option("--kind", default="scatter", choices=("scatter", "reg", "resid", "kde", "hex"), help="Kind of plot to draw") opts, args, iopts = p.set_image_options(args, figsize="8x8") if len(args) != 2: sys.exit(not p.print_help()) covfile, fastafile = args cov = DictFile(covfile, cast=float) s = Sizes(fastafile) data = [] maxsize, maxcov = opts.maxsize, opts.maxcov for ctg, size in s.iter_sizes(): c = cov.get(ctg, 0) if size > maxsize: continue if c > maxcov: continue data.append((size, c)) x, y = zip(*data) x = np.array(x) y = np.array(y) logging.debug("X size {0}, Y size {1}".format(x.size, y.size)) df = pd.DataFrame() xlab, ylab = "Length", "Coverage of depth (X)" df[xlab] = x df[ylab] = y sns.jointplot(xlab, ylab, kind=opts.kind, data=df, xlim=(0, maxsize), ylim=(0, maxcov), stat_func=None, edgecolor="w", color=opts.color) figname = covfile + ".pdf" savefig(figname, dpi=iopts.dpi, iopts=iopts)
def flanking(args): """ %prog flanking SI.ids liftover.bed master.txt master-removed.txt Extract flanking genes for given SI loci. """ p = OptionParser(flanking.__doc__) p.add_option("-N", default=50, type="int", help="How many genes on both directions") opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) SI, liftover, master, te = args N = opts.N SI = SetFile(SI, column=0, delimiter='.') liftover = Bed(liftover) order = liftover.order neighbors = set() for s in SI: si, s = order[s] LB = max(si - N, 0) RB = min(si + N, len(liftover)) for j in xrange(LB, RB + 1): a = liftover[j] if a.seqid != s.seqid: continue neighbors.add(a.accn) dmain = DictFile(master, keypos=0, valuepos=None, delimiter='\t') dte = DictFile(te, keypos=0, valuepos=None, delimiter='\t') header = open(master).next() print "\t".join(("SI/Neighbor", "Gene/TE", header.strip())) for a in liftover: s = a.accn if s not in neighbors: continue tag = "SI" if s in SI else "neighbor" if s in dmain: d = dmain[s] print "\t".join([tag, "gene"] + d) elif s in dte: d = dte[s] print "\t".join([tag, "TE"] + d)
def some(args): """ %prog some idsfile afastq [bfastq] Select a subset of the reads with ids present in the idsfile. `bfastq` is optional (only if reads are paired) """ p = OptionParser(some.__doc__) opts, args = p.parse_args(args) if len(args) not in (2, 3): sys.exit(not p.print_help()) idsfile, afastq, = args[:2] bfastq = args[2] if len(args) == 3 else None ids = DictFile(idsfile, valuepos=None) ai = iter_fastq(open(afastq)) arec = ai.next() if bfastq: bi = iter_fastq(open(bfastq)) brec = bi.next() while arec: if arec.name[1:] in ids: print arec if bfastq: print brec arec = ai.next() if bfastq: brec = bi.next()
def logodds(args): """ %prog logodds cnt1 cnt2 Compute log likelihood between two db. """ from math import log from jcvi.formats.base import DictFile p = OptionParser(logodds.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cnt1, cnt2 = args d = DictFile(cnt2) fp = open(cnt1) for row in fp: scf, c1 = row.split() c2 = d[scf] c1, c2 = float(c1), float(c2) c1 += 1 c2 += 1 score = int(100 * (log(c1) - log(c2))) print("{0}\t{1}".format(scf, score))
def _draw_trees(trees, nrow=1, ncol=1, rmargin=0.3, iopts=None, outdir=".", shfile=None, **kwargs): """ Draw one or multiple trees on one plot. """ from jcvi.graphics.tree import draw_tree if shfile: SHs = DictFile(shfile, delimiter="\t") ntrees = len(trees) n = nrow * ncol for x in range(int(ceil(float(ntrees) / n))): fig = plt.figure(1, (iopts.w, iopts.h)) if iopts else plt.figure(1, (5, 5)) root = fig.add_axes([0, 0, 1, 1]) xiv = 1.0 / ncol yiv = 1.0 / nrow xstart = list(np.arange(0, 1, xiv)) * nrow ystart = list(chain(*zip(*[list(np.arange(0, 1, yiv))[::-1]] * ncol))) for i in range(n * x, n * (x + 1)): if i == ntrees: break ax = fig.add_axes([xstart[i % n], ystart[i % n], xiv, yiv]) f = trees.keys()[i] tree = trees[f] try: SH = SHs[f] except: SH = None draw_tree(ax, tree, rmargin=rmargin, reroot=False, supportcolor="r", SH=SH, **kwargs) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() format = iopts.format if iopts else "pdf" dpi = iopts.dpi if iopts else 300 if n == 1: image_name = f.rsplit(".", 1)[0] + "." + format else: image_name = "trees{0}.{1}".format(x, format) image_name = op.join(outdir, image_name) savefig(image_name, dpi=dpi, iopts=iopts) plt.clf()
def geneinfo(args): """ %prog geneinfo pineapple.20141004.bed liftover.bed pineapple.20150413.bed \ note.txt interproscan.txt Build gene info table from various sources. The three beds contain information on the original scaffolds, linkage groups, and final selected loci (after removal of TEs and split loci). The final two text files contain AHRD and domain data. """ p = OptionParser(geneinfo.__doc__) opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) scfbed, liftoverbed, lgbed, note, ipr = args note = DictFile(note, delimiter="\t") scfbed = Bed(scfbed) lgorder = Bed(lgbed).order liftover = Bed(liftoverbed).order header = ("Accession Scaffold-position LG-position " "Description Interpro-domain Interpro-description " "GO-term KEGG".split()) ipr = read_interpro(ipr) fw_clean = must_open("master.txt", "w") fw_removed = must_open("master-removed.txt", "w") for fw in (fw_clean, fw_removed): print("\t".join(header), file=fw) for b in scfbed: accession = b.accn scaffold_position = b.tag if accession in liftover: lg_position = liftover[accession][-1].tag else: lg_position = "split" fw = fw_clean if accession in lgorder else fw_removed description = note[accession] interpro = interpro_description = go = kegg = "" if accession in ipr: interpro, interpro_description, go, kegg = ipr[accession] print( "\t".join(( accession, scaffold_position, lg_position, description, interpro, interpro_description, go, kegg, )), file=fw, ) fw.close()
def make_ortholog(blocksfile, rbhfile, orthofile): from jcvi.formats.base import DictFile # Generate mapping both ways adict = DictFile(rbhfile) bdict = DictFile(rbhfile, keypos=1, valuepos=0) adict.update(bdict) fp = open(blocksfile) fw = open(orthofile, "w") nrecruited = 0 for row in fp: a, b = row.split() if b == '.': if a in adict: b = adict[a] nrecruited += 1 b += "'" print >> fw, "\t".join((a, b)) logging.debug("Recruited {0} pairs from RBH.".format(nrecruited)) fp.close() fw.close()
def gaps(args): """ %prog gaps idsfile fractionationfile gapsbed Check gene locations against gaps. `idsfile` contains a list of IDs to query into `fractionationfile` in order to get expected locations. """ from jcvi.formats.base import DictFile from jcvi.apps.base import popen from jcvi.utils.cbook import percentage p = OptionParser(gaps.__doc__) p.add_option("--bdist", default=0, type="int", help="Base pair distance [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) idsfile, frfile, gapsbed = args bdist = opts.bdist d = DictFile(frfile, keypos=1, valuepos=2) bedfile = idsfile + ".bed" fw = open(bedfile, "w") fp = open(idsfile) total = 0 for row in fp: id = row.strip() hit = d[id] tag, pos = get_tag(hit, None) seqid, start, end = pos start, end = max(start - bdist, 1), end + bdist print >> fw, "\t".join(str(x) for x in (seqid, start - 1, end, id)) total += 1 fw.close() cmd = "intersectBed -a {0} -b {1} -v | wc -l".format(bedfile, gapsbed) not_in_gaps = popen(cmd).read() not_in_gaps = int(not_in_gaps) in_gaps = total - not_in_gaps print >> sys.stderr, "Ids in gaps: {1}".\ format(total, percentage(in_gaps, total))
def sort_layout(thread, listfile, column=0): """ Sort the syntelog table according to chromomomal positions. First orient the contents against threadbed, then for contents not in threadbed, insert to the nearest neighbor. """ from jcvi.formats.base import DictFile outfile = listfile.rsplit(".", 1)[0] + ".sorted.list" threadorder = thread.order fw = open(outfile, "w") lt = DictFile(listfile, keypos=column, valuepos=None) threaded = [] imported = set() for t in thread: accn = t.accn if accn not in lt: continue imported.add(accn) atoms = lt[accn] threaded.append(atoms) assert len(threaded) == len(imported) total = sum(1 for x in open(listfile)) logging.debug("Total: {0}, currently threaded: {1}".format( total, len(threaded))) fp = open(listfile) for row in fp: atoms = row.split() accn = atoms[0] if accn in imported: continue insert_into_threaded(atoms, threaded, threadorder) for atoms in threaded: print >> fw, "\t".join(atoms) fw.close() logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
def agp(args): """ %prog agp tpffile certificatefile agpfile Build agpfile from overlap certificates. Tiling Path File (tpf) is a file that lists the component and the gaps. It is a three-column file similar to below, also see jcvi.formats.agp.tpf(): telomere chr1 na AC229737.8 chr1 + AC202463.29 chr1 + Note: the orientation of the component is only used as a guide. If the orientation is derivable from a terminal overlap, it will use it regardless of what the tpf says. See jcvi.assembly.goldenpath.certificate() which generates a list of certificates based on agpfile. At first, it seems counter-productive to convert first agp to certificates then certificates back to agp. The certificates provide a way to edit the overlap information, so that the agpfile can be corrected (without changing agpfile directly). """ from jcvi.formats.base import DictFile p = OptionParser(agp.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) tpffile, certificatefile, agpfile = args orientationguide = DictFile(tpffile, valuepos=2) cert = Certificate(certificatefile) cert.write_AGP(agpfile, orientationguide=orientationguide)
def gss(args): """ %prog gss fastafile plateMapping Generate sequence files and metadata templates suited for gss submission. The FASTA file is assumed to be exported from the JCVI data delivery folder which looks like: >1127963806024 /library_name=SIL1T054-B-01-120KB /clear_start=0 /clear_end=839 /primer_id=1049000104196 /trace_id=1064147620169 /trace_file_id=1127963805941 /clone_insert_id=1061064364776 /direction=reverse /sequencer_run_id=1064147620155 /sequencer_plate_barcode=B906423 /sequencer_plate_well_coordinates=C3 /sequencer_plate_96well_quadrant=1 /sequencer_plate_96well_coordinates=B02 /template_plate_barcode=CC0251602AB /growth_plate_barcode=BB0273005AB AGCTTTAGTTTCAAGGATACCTTCATTGTCATTCCCGGTTATGATGATATCATCAAGATAAACAAGAATG ACAATGATACCTGTTTGGTTCTGAAGTGTAAAGAGGGTATGTTCAGCTTCAGATCTTCTAAACCCTTTGT CTAGTAAGCTGGCACTTAGCTTCCTATACCAAACCCTTTGTGATTGCTTCAGTCCATAAATTGCCTTTTT Plate mapping file maps the JTC `sequencer_plate_barcode` to external IDs. For example: B906423 SIL-001 """ p = OptionParser(gss.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, mappingfile = args seen = defaultdict(int) clone = defaultdict(set) plateMapping = DictFile(mappingfile) fw = open("MetaData.txt", "w") print(PublicationTemplate.format(**vars), file=fw) print(LibraryTemplate.format(**vars), file=fw) print(ContactTemplate.format(**vars), file=fw) logging.debug("Meta data written to `{0}`".format(fw.name)) fw = open("GSS.txt", "w") fw_log = open("GSS.log", "w") for rec in SeqIO.parse(fastafile, "fasta"): # First pass just check well number matchings and populate sequences in # the same clone description = rec.description a = parse_description(description) direction = a["direction"][0] sequencer_plate_barcode = a["sequencer_plate_barcode"][0] sequencer_plate_well_coordinates = a[ "sequencer_plate_well_coordinates"][0] sequencer_plate_96well_quadrant = a["sequencer_plate_96well_quadrant"][ 0] sequencer_plate_96well_coordinates = a[ "sequencer_plate_96well_coordinates"][0] # Check the 96-well ID is correctly converted to 384-well ID w96 = sequencer_plate_96well_coordinates w96quad = int(sequencer_plate_96well_quadrant) w384 = sequencer_plate_well_coordinates assert convert_96_to_384(w96, w96quad) == w384 plate = sequencer_plate_barcode assert plate in plateMapping, "{0} not found in `{1}` !".format( plate, mappingfile) plate = plateMapping[plate] d = Directions[direction] cloneID = "{0}{1}".format(plate, w384) gssID = "{0}{1}".format(cloneID, d) seen[gssID] += 1 if seen[gssID] > 1: gssID = "{0}{1}".format(gssID, seen[gssID]) seen[gssID] += 1 clone[cloneID].add(gssID) seen = defaultdict(int) for rec in SeqIO.parse(fastafile, "fasta"): # need to populate gssID, mateID, cloneID, seq, plate, row, column description = rec.description a = parse_description(description) direction = a["direction"][0] sequencer_plate_barcode = a["sequencer_plate_barcode"][0] sequencer_plate_well_coordinates = a[ "sequencer_plate_well_coordinates"][0] w384 = sequencer_plate_well_coordinates plate = sequencer_plate_barcode plate = plateMapping[plate] d = Directions[direction] cloneID = "{0}{1}".format(plate, w384) gssID = "{0}{1}".format(cloneID, d) seen[gssID] += 1 if seen[gssID] > 1: logging.error("duplicate key {0} found".format(gssID)) gssID = "{0}{1}".format(gssID, seen[gssID]) othergss = clone[cloneID] - set([gssID]) othergss = ", ".join(sorted(othergss)) vars.update(locals()) print(GSSTemplate.format(**vars), file=fw) # Write conversion logs to log file print("{0}\t{1}".format(gssID, description), file=fw_log) print("=" * 60, file=fw_log) logging.debug("A total of {0} seqs written to `{1}`".format( len(seen), fw.name)) fw.close() fw_log.close()
def update_from(self, filename): from jcvi.formats.base import DictFile d = DictFile(filename) for k, v in d.items(): self[k].append(v)
def summary(args): """ %prog summary diploid.napus.fractionation gmap.status Provide summary of fractionation. `fractionation` file is generated with loss(). `gmap.status` is generated with genestatus(). """ from jcvi.formats.base import DictFile from jcvi.utils.cbook import percentage, Registry p = OptionParser(summary.__doc__) p.add_option("--extra", help="Cross with extra tsv file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frfile, statusfile = args status = DictFile(statusfile) fp = open(frfile) registry = Registry() # keeps all the tags for any given gene for row in fp: seqid, gene, tag = row.split() if tag == '.': registry[gene].append("outside") else: registry[gene].append("inside") if tag[0] == '[': registry[gene].append("no_syntenic_model") if tag.startswith("[S]"): registry[gene].append("[S]") gstatus = status.get(gene, None) if gstatus == 'complete': registry[gene].append("complete") elif gstatus == 'pseudogene': registry[gene].append("pseudogene") elif gstatus == 'partial': registry[gene].append("partial") else: registry[gene].append("gmap_fail") elif tag.startswith("[NS]"): registry[gene].append("[NS]") if "random" in tag or "Scaffold" in tag: registry[gene].append("random") else: registry[gene].append("real_ns") elif tag.startswith("[NF]"): registry[gene].append("[NF]") else: registry[gene].append("syntenic_model") inside = registry.count("inside") outside = registry.count("outside") syntenic = registry.count("syntenic_model") non_syntenic = registry.count("no_syntenic_model") s = registry.count("[S]") ns = registry.count("[NS]") nf = registry.count("[NF]") complete = registry.count("complete") pseudogene = registry.count("pseudogene") partial = registry.count("partial") gmap_fail = registry.count("gmap_fail") random = registry.count("random") real_ns = registry.count("real_ns") complete_models = registry.get_tag("complete") pseudogenes = registry.get_tag("pseudogene") partial_deletions = registry.get_tag("partial") m = "{0} inside synteny blocks\n".format(inside) m += "{0} outside synteny blocks\n".format(outside) m += "{0} has syntenic gene\n".format(syntenic) m += "{0} lack syntenic gene\n".format(non_syntenic) m += "{0} has sequence match in syntenic location\n".format(s) m += "{0} has sequence match in non-syntenic location\n".format(ns) m += "{0} has sequence match in un-ordered scaffolds\n".format(random) m += "{0} has sequence match in real non-syntenic location\n".format(real_ns) m += "{0} has no sequence match\n".format(nf) m += "{0} syntenic sequence - complete model\n".format(percentage(complete, s)) m += "{0} syntenic sequence - partial model\n".format(percentage(partial, s)) m += "{0} syntenic sequence - pseudogene\n".format(percentage(pseudogene, s)) m += "{0} syntenic sequence - gmap fail\n".format(percentage(gmap_fail, s)) print >> sys.stderr, m aa = ["complete_models", "partial_deletions", "pseudogenes"] bb = [complete_models, partial_deletions, pseudogenes] for a, b in zip(aa, bb): fw = open(a, "w") print >> fw, "\n".join(b) fw.close() extra = opts.extra if extra: registry.update_from(extra) fp.seek(0) fw = open("registry", "w") for row in fp: seqid, gene, tag = row.split() ts = registry[gene] print >> fw, "\t".join((seqid, gene, tag, "-".join(ts))) fw.close() logging.debug("Registry written.")
def pastegenes(args): """ %prog pastegenes coverage.list old.genes.bed new.genes.bed old.assembly Paste in zero or low coverage genes. For a set of neighboring genes missing, add the whole cassette as unplaced scaffolds. For singletons the program will try to make a patch. """ from jcvi.formats.base import DictFile from jcvi.utils.cbook import gene_name p = OptionParser(pastegenes.__doc__) p.add_option( "--cutoff", default=90, type="int", help="Coverage cutoff to call gene missing", ) p.add_option( "--flank", default=2000, type="int", help="Get the seq of size on two ends", ) p.add_option( "--maxsize", default=50000, type="int", help="Maximum size of patchers to be replaced", ) opts, args = p.parse_args(args) if len(args) != 4: sys.exit(not p.print_help()) coveragefile, oldbed, newbed, oldassembly = args cutoff = opts.cutoff flank = opts.flank maxsize = opts.maxsize coverage = DictFile(coveragefile, valuepos=2, cast=float) obed = Bed(oldbed) order = obed.order bed = [x for x in obed if x.accn in coverage] key = lambda x: coverage[x.accn] >= cutoff extrabed = "extra.bed" extendbed = "extend.bed" pastebed = "paste.bed" fw = open(extrabed, "w") fwe = open(extendbed, "w") fwp = open(pastebed, "w") fw_ids = open(extendbed + ".ids", "w") singletons, large, large_genes = 0, 0, 0 for chr, chrbed in groupby(bed, key=lambda x: x.seqid): chrbed = list(chrbed) for good, beds in groupby(chrbed, key=key): if good: continue beds = list(beds) blocksize = len(set([gene_name(x.accn) for x in beds])) if blocksize == 1: singletons += 1 accn = beds[0].accn gi, gb = order[accn] leftb = obed[gi - 1] rightb = obed[gi + 1] leftr = leftb.range rightr = rightb.range cur = gb.range distance_to_left, oo = range_distance(leftr, cur) distance_to_right, oo = range_distance(cur, rightr) span, oo = range_distance(leftr, rightr) if distance_to_left <= distance_to_right and distance_to_left > 0: label = "LEFT" else: label = "RIGHT" if 0 < span <= maxsize: print( "\t".join( str(x) for x in (chr, leftb.start, rightb.end, gb.accn) ), file=fwp, ) print(leftb, file=fwe) print(gb, file=fwe) print(rightb, file=fwe) print( "L:{0} R:{1} [{2}]".format( distance_to_left, distance_to_right, label ), file=fwe, ) print(gb.accn, file=fw_ids) continue large += 1 large_genes += blocksize ranges = [(x.start, x.end) for x in beds] rmin, rmax = range_minmax(ranges) rmin -= flank rmax += flank name = "-".join((beds[0].accn, beds[-1].accn)) print("\t".join(str(x) for x in (chr, rmin - 1, rmax, name)), file=fw) fw.close() fwe.close() extrabed = mergeBed(extrabed, d=flank, nms=True) fastaFromBed(extrabed, oldassembly, name=True) summary([extrabed]) logging.debug("Singleton blocks : {0}".format(singletons)) logging.debug("Large blocks : {0} ({1} genes)".format(large, large_genes))
def main(): """ %prog bedfile id_mappings Takes a bedfile that contains the coordinates of features to plot on the chromosomes, and `id_mappings` file that map the ids to certain class. Each class will get assigned a unique color. `id_mappings` file is optional (if omitted, will not paint the chromosome features, except the centromere). """ p = OptionParser(main.__doc__) p.add_option("--title", default="Medicago truncatula v3.5", help="title of the image [default: `%default`]") p.add_option("--gauge", default=False, action="store_true", help="draw a gauge with size label [default: %default]") p.add_option( "--imagemap", default=False, action="store_true", help= "generate an HTML image map associated with the image [default: %default]" ) p.add_option( "--winsize", default=50000, type="int", help= "if drawing an imagemap, specify the window size (bases) of each map element " "[default: %default bp]") p.add_option("--empty", help="Write legend for unpainted region") opts, args, iopts = p.set_image_options(figsize="6x6", dpi=300) if len(args) not in (1, 2): sys.exit(p.print_help()) bedfile = args[0] mappingfile = None if len(args) == 2: mappingfile = args[1] winsize = opts.winsize imagemap = opts.imagemap w, h = iopts.w, iopts.h dpi = iopts.dpi prefix = bedfile.rsplit(".", 1)[0] figname = prefix + "." + opts.format if imagemap: imgmapfile = prefix + '.map' mapfh = open(imgmapfile, "w") print >> mapfh, '<map id="' + prefix + '">' if mappingfile: mappings = DictFile(mappingfile, delimiter="\t") classes = sorted(set(mappings.values())) logging.debug("A total of {0} classes found: {1}".format( len(classes), ','.join(classes))) else: mappings = {} classes = [] logging.debug("No classes registered (no id_mappings given).") mycolors = "rgbymc" class_colors = dict(zip(classes, mycolors)) bed = Bed(bedfile) chr_lens = {} centromeres = {} for b, blines in groupby(bed, key=(lambda x: x.seqid)): blines = list(blines) maxlen = max(x.end for x in blines) chr_lens[b] = maxlen for b in bed: accn = b.accn if accn == "centromere": centromeres[b.seqid] = b.start if accn in mappings: b.accn = mappings[accn] else: b.accn = '-' chr_number = len(chr_lens) assert chr_number == len(centromeres) fig = plt.figure(1, (w, h)) root = fig.add_axes([0, 0, 1, 1]) r = .7 # width and height of the whole chromosome set xstart, ystart = .15, .85 xinterval = r / chr_number xwidth = xinterval * .5 # chromosome width max_chr_len = max(chr_lens.values()) ratio = r / max_chr_len # canvas / base # first the chromosomes for a, (chr, cent_position) in enumerate(sorted(centromeres.items())): clen = chr_lens[chr] xx = xstart + a * xinterval + .5 * xwidth yy = ystart - cent_position * ratio root.text(xx, ystart + .01, chr, ha="center") ChromosomeWithCentromere(root, xx, ystart, yy, ystart - clen * ratio, width=xwidth) chr_idxs = dict((a, i) for i, a in enumerate(sorted(chr_lens.keys()))) alpha = .75 # color the regions for chr in sorted(chr_lens.keys()): segment_size, excess = 0, 0 bac_list = [] for b in bed.sub_bed(chr): clen = chr_lens[chr] idx = chr_idxs[chr] klass = b.accn start = b.start end = b.end xx = xstart + idx * xinterval yystart = ystart - end * ratio yyend = ystart - start * ratio root.add_patch( Rectangle((xx, yystart), xwidth, yyend - yystart, fc=class_colors.get(klass, "w"), lw=0, alpha=alpha)) if imagemap: """ `segment` : size of current BAC being investigated + `excess` `excess` : left-over bases from the previous BAC, as a result of iterating over `winsize` regions of `segment` """ if excess == 0: segment_start = start segment = (end - start + 1) + excess while True: if segment < winsize: bac_list.append(b.accn) excess = segment break segment_end = segment_start + winsize - 1 tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \ xx + xwidth, (1 - ystart) + segment_end * ratio print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \ w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end) segment_start += winsize segment -= winsize bac_list = [] if imagemap and excess > 0: bac_list.append(b.accn) segment_end = end tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \ xx + xwidth, (1 - ystart) + segment_end * ratio print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \ w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end) if imagemap: print >> mapfh, '</map>' mapfh.close() logging.debug("Image map written to `{0}`".format(mapfh.name)) if opts.gauge: xstart, ystart = .9, .85 Gauge(root, xstart, ystart - r, ystart, max_chr_len) # class legends, four in a row xstart = .1 xinterval = .2 xwidth = .04 yy = .08 for klass, cc in sorted(class_colors.items()): if klass == '-': continue root.add_patch( Rectangle((xstart, yy), xwidth, xwidth, fc=cc, lw=0, alpha=alpha)) root.text(xstart + xwidth + .01, yy, klass, fontsize=10) xstart += xinterval empty = opts.empty if empty: root.add_patch( Rectangle((xstart, yy), xwidth, xwidth, fill=False, lw=1)) root.text(xstart + xwidth + .01, yy, empty, fontsize=10) root.text(.5, .95, opts.title, fontstyle="italic", ha="center", va="center") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() savefig(figname, dpi=dpi, iopts=iopts)
def summary(args): """ %prog summary diploid.napus.fractionation gmap.status Provide summary of fractionation. `fractionation` file is generated with loss(). `gmap.status` is generated with genestatus(). """ from jcvi.formats.base import DictFile from jcvi.utils.cbook import percentage, Registry p = OptionParser(summary.__doc__) p.add_option("--extra", help="Cross with extra tsv file [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) frfile, statusfile = args status = DictFile(statusfile) fp = open(frfile) registry = Registry() # keeps all the tags for any given gene for row in fp: seqid, gene, tag = row.split() if tag == '.': registry[gene].append("outside") else: registry[gene].append("inside") if tag[0] == '[': registry[gene].append("no_syntenic_model") if tag.startswith("[S]"): registry[gene].append("[S]") gstatus = status.get(gene, None) if gstatus == 'complete': registry[gene].append("complete") elif gstatus == 'pseudogene': registry[gene].append("pseudogene") elif gstatus == 'partial': registry[gene].append("partial") else: registry[gene].append("gmap_fail") elif tag.startswith("[NS]"): registry[gene].append("[NS]") if "random" in tag or "Scaffold" in tag: registry[gene].append("random") else: registry[gene].append("real_ns") elif tag.startswith("[NF]"): registry[gene].append("[NF]") else: registry[gene].append("syntenic_model") inside = registry.count("inside") outside = registry.count("outside") syntenic = registry.count("syntenic_model") non_syntenic = registry.count("no_syntenic_model") s = registry.count("[S]") ns = registry.count("[NS]") nf = registry.count("[NF]") complete = registry.count("complete") pseudogene = registry.count("pseudogene") partial = registry.count("partial") gmap_fail = registry.count("gmap_fail") random = registry.count("random") real_ns = registry.count("real_ns") complete_models = registry.get_tag("complete") pseudogenes = registry.get_tag("pseudogene") partial_deletions = registry.get_tag("partial") m = "{0} inside synteny blocks\n".format(inside) m += "{0} outside synteny blocks\n".format(outside) m += "{0} has syntenic gene\n".format(syntenic) m += "{0} lack syntenic gene\n".format(non_syntenic) m += "{0} has sequence match in syntenic location\n".format(s) m += "{0} has sequence match in non-syntenic location\n".format(ns) m += "{0} has sequence match in un-ordered scaffolds\n".format(random) m += "{0} has sequence match in real non-syntenic location\n".format( real_ns) m += "{0} has no sequence match\n".format(nf) m += "{0} syntenic sequence - complete model\n".format( percentage(complete, s)) m += "{0} syntenic sequence - partial model\n".format( percentage(partial, s)) m += "{0} syntenic sequence - pseudogene\n".format( percentage(pseudogene, s)) m += "{0} syntenic sequence - gmap fail\n".format(percentage(gmap_fail, s)) print >> sys.stderr, m aa = ["complete_models", "partial_deletions", "pseudogenes"] bb = [complete_models, partial_deletions, pseudogenes] for a, b in zip(aa, bb): fw = open(a, "w") print >> fw, "\n".join(b) fw.close() extra = opts.extra if extra: registry.update_from(extra) fp.seek(0) fw = open("registry", "w") for row in fp: seqid, gene, tag = row.split() ts = registry[gene] print >> fw, "\t".join((seqid, gene, tag, "-".join(ts))) fw.close() logging.debug("Registry written.")
def genestats(args): """ %prog genestats gffile Print summary stats, including: - Number of genes - Number of single-exon genes - Number of multi-exon genes - Number of distinct exons - Number of genes with alternative transcript variants - Number of predicted transcripts - Mean number of distinct exons per gene - Mean number of transcripts per gene - Mean gene locus size (first to last exon) - Mean transcript size (UTR, CDS) - Mean exon size Stats modeled after barley genome paper Table 1. A physical, genetic and functional sequence assembly of the barley genome """ p = OptionParser(genestats.__doc__) p.add_option("--groupby", default="conf_class", help="Print separate stats groupby") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gff_file, = args gb = opts.groupby g = make_index(gff_file) tf = "transcript.sizes" if need_update(gff_file, tf): fw = open(tf, "w") for feat in g.features_of_type("mRNA"): fid = feat.id conf_class = feat.attributes.get(gb, "all") tsize = sum((c.stop - c.start + 1) for c in g.children(fid, 1) \ if c.featuretype == "exon") print >> fw, "\t".join((fid, str(tsize), conf_class)) fw.close() tsizes = DictFile(tf, cast=int) conf_classes = DictFile(tf, valuepos=2) logging.debug("A total of {0} transcripts populated.".format(len(tsizes))) genes = [] for feat in g.features_of_type("gene"): fid = feat.id transcripts = [c.id for c in g.children(fid, 1) \ if c.featuretype == "mRNA"] transcript_sizes = [tsizes[x] for x in transcripts] exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \ if c.featuretype == "exon") conf_class = conf_classes[transcripts[0]] gs = GeneStats(feat, conf_class, transcript_sizes, exons) genes.append(gs) r = {} # Report distinct_groups = set(conf_classes.values()) for g in distinct_groups: num_genes = num_single_exon_genes = num_multi_exon_genes = 0 num_genes_with_alts = num_transcripts = num_exons = 0 cum_locus_size = cum_transcript_size = cum_exon_size = 0 for gs in genes: if gs.conf_class != g: continue num_genes += 1 if gs.num_exons == 1: num_single_exon_genes += 1 else: num_multi_exon_genes += 1 num_exons += gs.num_exons if gs.num_transcripts > 1: num_genes_with_alts += 1 num_transcripts += gs.num_transcripts cum_locus_size += gs.locus_size cum_transcript_size += gs.cum_transcript_size cum_exon_size += gs.cum_exon_size mean_num_exons = num_exons * 1. / num_genes mean_num_transcripts = num_transcripts * 1. / num_genes mean_locus_size = cum_locus_size * 1. / num_genes mean_transcript_size = cum_transcript_size * 1. / num_transcripts mean_exon_size = cum_exon_size * 1. / num_exons r[("Number of genes", g)] = num_genes r[("Number of single-exon genes", g)] = \ percentage(num_single_exon_genes, num_genes, mode=1) r[("Number of multi-exon genes", g)] = \ percentage(num_multi_exon_genes, num_genes, mode=1) r[("Number of distinct exons", g)] = num_exons r[("Number of genes with alternative transcript variants", g)] = \ percentage(num_genes_with_alts, num_genes, mode=1) r[("Number of predicted transcripts", g)] = num_transcripts r[("Mean number of distinct exons per gene", g)] = mean_num_exons r[("Mean number of transcripts per gene", g)] = mean_num_transcripts r[("Mean gene locus size (first to last exon)", g)] = mean_locus_size r[("Mean transcript size (UTR, CDS)", g)] = mean_transcript_size r[("Mean exon size", g)] = mean_exon_size print >> sys.stderr, tabulate(r)
def __init__(self, fig, root, datafile, bedfile, layoutfile, switch=None, tree=None, extra_features=None, chr_label=True, loc_label=True, pad=.04): w, h = fig.get_figwidth(), fig.get_figheight() bed = Bed(bedfile) order = bed.order bf = BlockFile(datafile) self.layout = lo = Layout(layoutfile) switch = DictFile(switch, delimiter="\t") if switch else None if extra_features: extra_features = Bed(extra_features) exts = [] extras = [] for i in xrange(bf.ncols): ext = bf.get_extent(i, order) exts.append(ext) if extra_features: start, end, si, ei, chr, orientation, span = ext start, end = start.start, end.end # start, end coordinates ef = list(extra_features.extract(chr, start, end)) # Pruning removes minor features with < 0.1% of the region ef_pruned = [x for x in ef if x.span >= span / 1000] print >> sys.stderr, "Extracted {0} features "\ "({1} after pruning)".format(len(ef), len(ef_pruned)) extras.append(ef_pruned) maxspan = max(exts, key=lambda x: x[-1])[-1] scale = maxspan / .65 self.gg = gg = {} self.rr = [] ymids = [] vpad = .012 * w / h for i in xrange(bf.ncols): ext = exts[i] ef = extras[i] if extras else None r = Region(root, ext, lo[i], bed, scale, switch, chr_label=chr_label, loc_label=loc_label, vpad=vpad, extra_features=ef) self.rr.append(r) # Use tid and accn to store gene positions gg.update(dict(((i, k), v) for k, v in r.gg.items())) ymids.append(r.y) for i, j in lo.edges: for ga, gb, h in bf.iter_pairs(i, j): a, b = gg[(i, ga)], gg[(j, gb)] ymid = (ymids[i] + ymids[j]) / 2 Shade(root, a, b, ymid, fc="gainsboro", lw=0, alpha=1) for ga, gb, h in bf.iter_pairs(i, j, highlight=True): a, b = gg[(i, ga)], gg[(j, gb)] ymid = (ymids[i] + ymids[j]) / 2 Shade(root, a, b, ymid, alpha=1, highlight=h, zorder=2) if tree: from jcvi.graphics.tree import draw_tree, read_trees trees = read_trees(tree) ntrees = len(trees) logging.debug("A total of {0} trees imported.".format(ntrees)) xiv = 1. / ntrees yiv = .3 xstart = 0 ystart = min(ymids) - .4 for i in xrange(ntrees): ax = fig.add_axes([xstart, ystart, xiv, yiv]) label, outgroup, tx = trees[i] draw_tree(ax, tx, outgroup=outgroup, rmargin=.4, leaffont=11) xstart += xiv RoundLabel(ax, .5, .3, label, fill=True, fc="lavender", color="r")
def omgprepare(args): """ %prog omgprepare ploidy anchorsfile blastfile Prepare to run Sankoff's OMG algorithm to get orthologs. """ from jcvi.formats.blast import cscore from jcvi.formats.base import DictFile p = OptionParser(omgprepare.__doc__) p.add_option("--norbh", action="store_true", help="Disable RBH hits [default: %default]") p.add_option("--pctid", default=0, type="int", help="Percent id cutoff for RBH hits [default: %default]") p.add_option("--cscore", default=90, type="int", help="C-score cutoff for RBH hits [default: %default]") p.set_stripnames() p.set_beds() opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) ploidy, anchorfile, blastfile = args norbh = opts.norbh pctid = opts.pctid cs = opts.cscore qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts) fp = open(ploidy) genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp)) fp.close() ploidy = DictFile(ploidy) geneinfo(qbed, qorder, genomeidx, ploidy) geneinfo(sbed, sorder, genomeidx, ploidy) pf = blastfile.rsplit(".", 1)[0] cscorefile = pf + ".cscore" cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"]) ac = AnchorFile(anchorfile) pairs = set((a, b) for a, b, i in ac.iter_pairs()) logging.debug("Imported {0} pairs from `{1}`.".format( len(pairs), anchorfile)) weightsfile = pf + ".weights" fp = open(cscorefile) fw = open(weightsfile, "w") npairs = 0 for row in fp: a, b, c, pct = row.split() c, pct = float(c), float(pct) c = int(c * 100) if (a, b) not in pairs: if norbh: continue if c < cs: continue if pct < pctid: continue c /= 10 # This severely penalizes RBH against synteny print >> fw, "\t".join((a, b, str(c))) npairs += 1 fw.close() logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile))
def draw_tree(ax, tx, rmargin=.3, treecolor="k", leafcolor="k", supportcolor="k", outgroup=None, reroot=True, gffdir=None, sizes=None, trunc_name=None, SH=None, scutoff=0, barcodefile=None, leafcolorfile=None, leaffont=12): """ main function for drawing phylogenetic tree """ t = Tree(tx) if reroot: if outgroup: R = t.get_common_ancestor(*outgroup) else: # Calculate the midpoint node R = t.get_midpoint_outgroup() if R != t: t.set_outgroup(R) farthest, max_dist = t.get_farthest_leaf() margin = .05 xstart = margin ystart = 1 - margin canvas = 1 - rmargin - 2 * margin tip = .005 # scale the tree scale = canvas / max_dist num_leaves = len(t.get_leaf_names()) yinterval = canvas / (num_leaves + 1) # get exons structures, if any structures = {} if gffdir: gffiles = glob("{0}/*.gff*".format(gffdir)) setups, ratio = get_setups(gffiles, canvas=rmargin / 2, noUTR=True) structures = dict((a, (b, c)) for a, b, c in setups) if sizes: sizes = Sizes(sizes).mapping if barcodefile: barcodemap = DictFile(barcodefile, delimiter="\t") if leafcolorfile: leafcolors = DictFile(leafcolorfile, delimiter="\t") coords = {} i = 0 for n in t.traverse("postorder"): dist = n.get_distance(t) xx = xstart + scale * dist if n.is_leaf(): yy = ystart - i * yinterval i += 1 if trunc_name: name = truncate_name(n.name, rule=trunc_name) else: name = n.name if barcodefile: name = decode_name(name, barcodemap) sname = name.replace("_", "-") try: lc = leafcolors[n.name] except Exception: lc = leafcolor else: # if color is given as "R,G,B" if "," in lc: lc = map(float, lc.split(",")) ax.text(xx + tip, yy, sname, va="center", fontstyle="italic", size=leaffont, color=lc) gname = n.name.split("_")[0] if gname in structures: mrnabed, cdsbeds = structures[gname] ExonGlyph(ax, 1 - rmargin / 2, yy, mrnabed, cdsbeds, align="right", ratio=ratio) if sizes and gname in sizes: size = sizes[gname] size = size / 3 - 1 # base pair converted to amino acid size = "{0}aa".format(size) ax.text(1 - rmargin / 2 + tip, yy, size, size=leaffont) else: children = [coords[x] for x in n.get_children()] children_x, children_y = zip(*children) min_y, max_y = min(children_y), max(children_y) # plot the vertical bar ax.plot((xx, xx), (min_y, max_y), "-", color=treecolor) # plot the horizontal bar for cx, cy in children: ax.plot((xx, cx), (cy, cy), "-", color=treecolor) yy = sum(children_y) * 1. / len(children_y) support = n.support if support > 1: support = support / 100. if not n.is_root(): if support > scutoff / 100.: ax.text(xx, yy + .005, "{0:d}".format(int(abs(support * 100))), ha="right", size=leaffont, color=supportcolor) coords[n] = (xx, yy) # scale bar br = .1 x1 = xstart + .1 x2 = x1 + br * scale yy = ystart - i * yinterval ax.plot([x1, x1], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x2, x2], [yy - tip, yy + tip], "-", color=treecolor) ax.plot([x1, x2], [yy, yy], "-", color=treecolor) ax.text((x1 + x2) / 2, yy - tip, "{0:g}".format(br), va="top", ha="center", size=leaffont, color=treecolor) if SH is not None: xs = x1 ys = (margin + yy) / 2. ax.text(xs, ys, "SH test against ref tree: {0}".format(SH), ha="left", size=leaffont, color="g") normalize_axes(ax)
def get_hg38_chromsizes(filename=datafile("hg38.chrom.sizes")): chromsizes = DictFile(filename) chromsizes = dict((k, int(v)) for k, v in chromsizes.items()) return chromsizes
def main(): """ %prog bedfile id_mappings Takes a bedfile that contains the coordinates of features to plot on the chromosomes, and `id_mappings` file that map the ids to certain class. Each class will get assigned a unique color. `id_mappings` file is optional (if omitted, will not paint the chromosome features, except the centromere). """ p = OptionParser(main.__doc__) p.add_option("--title", default="Medicago truncatula v3.5", help="title of the image [default: `%default`]") p.add_option("--gauge", default=False, action="store_true", help="draw a gauge with size label [default: %default]") p.add_option("--imagemap", default=False, action="store_true", help="generate an HTML image map associated with the image [default: %default]") p.add_option("--winsize", default=50000, type="int", help="if drawing an imagemap, specify the window size (bases) of each map element " "[default: %default bp]") p.add_option("--empty", help="Write legend for unpainted region") opts, args, iopts = p.set_image_options(figsize="6x6", dpi=300) if len(args) not in (1, 2): sys.exit(p.print_help()) bedfile = args[0] mappingfile = None if len(args) == 2: mappingfile = args[1] winsize = opts.winsize imagemap = opts.imagemap w, h = iopts.w, iopts.h dpi = iopts.dpi prefix = bedfile.rsplit(".", 1)[0] figname = prefix + "." + opts.format if imagemap: imgmapfile = prefix + '.map' mapfh = open(imgmapfile, "w") print >> mapfh, '<map id="' + prefix + '">' if mappingfile: mappings = DictFile(mappingfile, delimiter="\t") classes = sorted(set(mappings.values())) logging.debug("A total of {0} classes found: {1}".format(len(classes), ','.join(classes))) else: mappings = {} classes = [] logging.debug("No classes registered (no id_mappings given).") mycolors = "rgbymc" class_colors = dict(zip(classes, mycolors)) bed = Bed(bedfile) chr_lens = {} centromeres = {} for b, blines in groupby(bed, key=(lambda x: x.seqid)): blines = list(blines) maxlen = max(x.end for x in blines) chr_lens[b] = maxlen for b in bed: accn = b.accn if accn == "centromere": centromeres[b.seqid] = b.start if accn in mappings: b.accn = mappings[accn] else: b.accn = '-' chr_number = len(chr_lens) if centromeres: assert chr_number == len(centromeres) fig = plt.figure(1, (w, h)) root = fig.add_axes([0, 0, 1, 1]) r = .7 # width and height of the whole chromosome set xstart, ystart = .15, .85 xinterval = r / chr_number xwidth = xinterval * .5 # chromosome width max_chr_len = max(chr_lens.values()) ratio = r / max_chr_len # canvas / base # first the chromosomes for a, (chr, clen) in enumerate(sorted(chr_lens.items())): xx = xstart + a * xinterval + .5 * xwidth root.text(xx, ystart + .01, chr, ha="center") if centromeres: yy = ystart - centromeres[chr] * ratio ChromosomeWithCentromere(root, xx, ystart, yy, ystart - clen * ratio, width=xwidth) else: Chromosome(root, xx, ystart, ystart - clen * ratio, width=xwidth) chr_idxs = dict((a, i) for i, a in enumerate(sorted(chr_lens.keys()))) alpha = .75 # color the regions for chr in sorted(chr_lens.keys()): segment_size, excess = 0, 0 bac_list = [] for b in bed.sub_bed(chr): clen = chr_lens[chr] idx = chr_idxs[chr] klass = b.accn start = b.start end = b.end xx = xstart + idx * xinterval yystart = ystart - end * ratio yyend = ystart - start * ratio root.add_patch(Rectangle((xx, yystart), xwidth, yyend - yystart, fc=class_colors.get(klass, "w"), lw=0, alpha=alpha)) if imagemap: """ `segment` : size of current BAC being investigated + `excess` `excess` : left-over bases from the previous BAC, as a result of iterating over `winsize` regions of `segment` """ if excess == 0: segment_start = start segment = (end - start + 1) + excess while True: if segment < winsize: bac_list.append(b.accn) excess = segment break segment_end = segment_start + winsize - 1 tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \ xx + xwidth, (1 - ystart) + segment_end * ratio print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \ w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end) segment_start += winsize segment -= winsize bac_list = [] if imagemap and excess > 0: bac_list.append(b.accn) segment_end = end tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \ xx + xwidth, (1 - ystart) + segment_end * ratio print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \ w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end) if imagemap: print >> mapfh, '</map>' mapfh.close() logging.debug("Image map written to `{0}`".format(mapfh.name)) if opts.gauge: xstart, ystart = .9, .85 Gauge(root, xstart, ystart - r, ystart, max_chr_len) # class legends, four in a row xstart = .1 xinterval = .2 xwidth = .04 yy = .08 for klass, cc in sorted(class_colors.items()): if klass == '-': continue root.add_patch(Rectangle((xstart, yy), xwidth, xwidth, fc=cc, lw=0, alpha=alpha)) root.text(xstart + xwidth + .01, yy, klass, fontsize=10) xstart += xinterval empty = opts.empty if empty: root.add_patch(Rectangle((xstart, yy), xwidth, xwidth, fill=False, lw=1)) root.text(xstart + xwidth + .01, yy, empty, fontsize=10) root.text(.5, .95, opts.title, fontstyle="italic", ha="center", va="center") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() savefig(figname, dpi=dpi, iopts=iopts)
def htg(args): """ %prog htg fastafile template.sbt Prepare sqnfiles for Genbank HTG submission to update existing records. `fastafile` contains the records to update, multiple records are allowed (with each one generating separate sqn file in the sqn/ folder). The record defline has the accession ID. For example, >AC148290.3 Internally, this generates two additional files (phasefile and namesfile) and download records from Genbank. Below is implementation details: `phasefile` contains, for each accession, phase information. For example: AC148290.3 3 HTG 2 mth2-45h12 which means this is a Phase-3 BAC. Record with only a single contig will be labeled as Phase-3 regardless of the info in the `phasefile`. Template file is the Genbank sbt template. See jcvi.formats.sbt for generation of such files. Another problem is that Genbank requires the name of the sequence to stay the same when updating and will kick back with a table of name conflicts. For example: We are unable to process the updates for these entries for the following reason: Seqname has changed Accession Old seq_name New seq_name --------- ------------ ------------ AC239792 mtg2_29457 AC239792.1 To prepare a submission, this script downloads genbank and asn.1 format, and generate the phase file and the names file (use formats.agp.phase() and apps.gbsubmit.asn(), respectively). These get automatically run. However, use --phases if the genbank files contain outdated information. For example, the clone name changes or phase upgrades. In this case, run formats.agp.phase() manually, modify the phasefile and use --phases to override. """ from jcvi.formats.fasta import sequin, ids from jcvi.formats.agp import phase from jcvi.apps.fetch import entrez p = OptionParser(htg.__doc__) p.add_option("--phases", default=None, help="Use another phasefile to override [default: %default]") p.add_option("--comment", default="", help="Comments for this update [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, sbtfile = args pf = fastafile.rsplit(".", 1)[0] idsfile = pf + ".ids" phasefile = pf + ".phases" namesfile = pf + ".names" ids([fastafile, "--outfile={0}".format(idsfile)]) asndir = "asn.1" mkdir(asndir) entrez([idsfile, "--format=asn.1", "--outdir={0}".format(asndir)]) asn(glob("{0}/*".format(asndir)) + \ ["--outfile={0}".format(namesfile)]) if opts.phases is None: gbdir = "gb" mkdir(gbdir) entrez([idsfile, "--format=gb", "--outdir={0}".format(gbdir)]) phase(glob("{0}/*".format(gbdir)) + \ ["--outfile={0}".format(phasefile)]) else: phasefile = opts.phases assert op.exists(namesfile) and op.exists(phasefile) newphasefile = phasefile + ".new" newphasefw = open(newphasefile, "w") comment = opts.comment fastadir = "fasta" sqndir = "sqn" mkdir(fastadir) mkdir(sqndir) from jcvi.graphics.histogram import stem_leaf_plot names = DictFile(namesfile) assert len(set(names.keys())) == len(set(names.values())) phases = DictFile(phasefile) ph = [int(x) for x in phases.values()] # vmin 1, vmax 4, bins 3 stem_leaf_plot(ph, 1, 4, 3, title="Counts of phases before updates") logging.debug("Information loaded for {0} records.".format(len(phases))) assert len(names) == len(phases) newph = [] cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir) sh(cmd, outfile="/dev/null", errfile="/dev/null") acmd = 'tbl2asn -a z -p fasta -r {sqndir}' acmd += ' -i {splitfile} -t {sbtfile} -C tigr' acmd += ' -j "{qualifiers}"' acmd += ' -A {accession_nv} -o {sqndir}/{accession_nv}.sqn -V Vbr' acmd += ' -y "{comment}" -W T -T T' qq = "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]" nupdated = 0 for row in open(phasefile): atoms = row.rstrip().split("\t") # see formats.agp.phase() for column contents accession, phase, clone = atoms[0], atoms[1], atoms[-1] fafile = op.join(fastadir, accession + ".fa") accession_nv = accession.split(".", 1)[0] newid = names[accession_nv] newidopt = "--newid={0}".format(newid) cloneopt = "--clone={0}".format(clone) splitfile, gaps = sequin([fafile, newidopt, cloneopt]) splitfile = op.basename(splitfile) phase = int(phase) assert phase in (1, 2, 3) oldphase = phase if gaps == 0 and phase != 3: phase = 3 if gaps != 0 and phase == 3: phase = 2 print("{0}\t{1}\t{2}".\ format(accession_nv, oldphase, phase), file=newphasefw) newph.append(phase) qualifiers = qq.format(phase=phase) if ";" in clone: qualifiers += " [keyword=HTGS_POOLED_MULTICLONE]" cmd = acmd.format(accession=accession, accession_nv=accession_nv, sqndir=sqndir, sbtfile=sbtfile, splitfile=splitfile, qualifiers=qualifiers, comment=comment) sh(cmd) verify_sqn(sqndir, accession) nupdated += 1 stem_leaf_plot(newph, 1, 4, 3, title="Counts of phases after updates") print("A total of {0} records updated.".format(nupdated), file=sys.stderr)
def merge(args): """ %prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project. """ from jcvi.formats.base import DictFile p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) quartets, registry, lost = args qq = DictFile(registry, keypos=1, valuepos=3) lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|') qq.update(lost) fp = open(quartets) cases = { "AN,CN": 4, "BO,AN,CN": 8, "BO,CN": 2, "BR,AN": 1, "BR,AN,CN": 6, "BR,BO": 3, "BR,BO,AN": 5, "BR,BO,AN,CN": 9, "BR,BO,CN": 7, } ip = { "syntenic_model": "Syntenic_model_excluded_by_OMG", "complete": "Predictable", "partial": "Truncated", "pseudogene": "Pseudogene", "random": "Match_random", "real_ns": "Transposed", "gmap_fail": "GMAP_fail", "AN LOST": "AN_LOST", "CN LOST": "CN_LOST", "BR LOST": "BR_LOST", "BO LOST": "BO_LOST", "outside": "Outside_synteny_blocks", "[NF]": "Not_found", } for row in fp: atoms = row.strip().split("\t") genes = atoms[:4] tag = atoms[4] a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes] qqs = [c, d, a, b] for i, q in enumerate(qqs): if atoms[i] != '.': qqs[i] = "syntenic_model" # Make comment comment = "Case{0}".format(cases[tag]) dots = sum([1 for x in genes if x == '.']) if dots == 1: idx = genes.index(".") status = qqs[idx] status = ip[status] comment += "-" + status print row.strip() + "\t" + "\t".join(qqs + [comment])
def htg(args): """ %prog htg fastafile template.sbt Prepare sqnfiles for Genbank HTG submission to update existing records. `fastafile` contains the records to update, multiple records are allowed (with each one generating separate sqn file in the sqn/ folder). The record defline has the accession ID. For example, >AC148290.3 Internally, this generates two additional files (phasefile and namesfile) and download records from Genbank. Below is implementation details: `phasefile` contains, for each accession, phase information. For example: AC148290.3 3 HTG 2 mth2-45h12 which means this is a Phase-3 BAC. Record with only a single contig will be labeled as Phase-3 regardless of the info in the `phasefile`. Template file is the Genbank sbt template. See jcvi.formats.sbt for generation of such files. Another problem is that Genbank requires the name of the sequence to stay the same when updating and will kick back with a table of name conflicts. For example: We are unable to process the updates for these entries for the following reason: Seqname has changed Accession Old seq_name New seq_name --------- ------------ ------------ AC239792 mtg2_29457 AC239792.1 To prepare a submission, this script downloads genbank and asn.1 format, and generate the phase file and the names file (use formats.agp.phase() and apps.gbsubmit.asn(), respectively). These get automatically run. However, use --phases if the genbank files contain outdated information. For example, the clone name changes or phase upgrades. In this case, run formats.agp.phase() manually, modify the phasefile and use --phases to override. """ from jcvi.formats.fasta import sequin, ids from jcvi.formats.agp import phase from jcvi.apps.fetch import entrez p = OptionParser(htg.__doc__) p.add_option( "--phases", default=None, help="Use another phasefile to override", ) p.add_option("--comment", default="", help="Comments for this update") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, sbtfile = args pf = fastafile.rsplit(".", 1)[0] idsfile = pf + ".ids" phasefile = pf + ".phases" namesfile = pf + ".names" ids([fastafile, "--outfile={0}".format(idsfile)]) asndir = "asn.1" mkdir(asndir) entrez([idsfile, "--format=asn.1", "--outdir={0}".format(asndir)]) asn(glob("{0}/*".format(asndir)) + ["--outfile={0}".format(namesfile)]) if opts.phases is None: gbdir = "gb" mkdir(gbdir) entrez([idsfile, "--format=gb", "--outdir={0}".format(gbdir)]) phase( glob("{0}/*".format(gbdir)) + ["--outfile={0}".format(phasefile)]) else: phasefile = opts.phases assert op.exists(namesfile) and op.exists(phasefile) newphasefile = phasefile + ".new" newphasefw = open(newphasefile, "w") comment = opts.comment fastadir = "fasta" sqndir = "sqn" mkdir(fastadir) mkdir(sqndir) from jcvi.graphics.histogram import stem_leaf_plot names = DictFile(namesfile) assert len(set(names.keys())) == len(set(names.values())) phases = DictFile(phasefile) ph = [int(x) for x in phases.values()] # vmin 1, vmax 4, bins 3 stem_leaf_plot(ph, 1, 4, 3, title="Counts of phases before updates") logging.debug("Information loaded for {0} records.".format(len(phases))) assert len(names) == len(phases) newph = [] cmd = "faSplit byname {0} {1}/".format(fastafile, fastadir) sh(cmd, outfile="/dev/null", errfile="/dev/null") acmd = "tbl2asn -a z -p fasta -r {sqndir}" acmd += " -i {splitfile} -t {sbtfile} -C tigr" acmd += ' -j "{qualifiers}"' acmd += " -A {accession_nv} -o {sqndir}/{accession_nv}.sqn -V Vbr" acmd += ' -y "{comment}" -W T -T T' qq = "[tech=htgs {phase}] [organism=Medicago truncatula] [strain=A17]" nupdated = 0 for row in open(phasefile): atoms = row.rstrip().split("\t") # see formats.agp.phase() for column contents accession, phase, clone = atoms[0], atoms[1], atoms[-1] fafile = op.join(fastadir, accession + ".fa") accession_nv = accession.split(".", 1)[0] newid = names[accession_nv] newidopt = "--newid={0}".format(newid) cloneopt = "--clone={0}".format(clone) splitfile, gaps = sequin([fafile, newidopt, cloneopt]) splitfile = op.basename(splitfile) phase = int(phase) assert phase in (1, 2, 3) oldphase = phase if gaps == 0 and phase != 3: phase = 3 if gaps != 0 and phase == 3: phase = 2 print("{0}\t{1}\t{2}".format(accession_nv, oldphase, phase), file=newphasefw) newph.append(phase) qualifiers = qq.format(phase=phase) if ";" in clone: qualifiers += " [keyword=HTGS_POOLED_MULTICLONE]" cmd = acmd.format( accession=accession, accession_nv=accession_nv, sqndir=sqndir, sbtfile=sbtfile, splitfile=splitfile, qualifiers=qualifiers, comment=comment, ) sh(cmd) verify_sqn(sqndir, accession) nupdated += 1 stem_leaf_plot(newph, 1, 4, 3, title="Counts of phases after updates") print("A total of {0} records updated.".format(nupdated), file=sys.stderr)
def draw_chromosomes( root, bedfile, sizes, iopts, mergedist, winsize, imagemap, mappingfile=None, gauge=False, legend=True, empty=False, title=None, ): bed = Bed(bedfile) prefix = bedfile.rsplit(".", 1)[0] if imagemap: imgmapfile = prefix + ".map" mapfh = open(imgmapfile, "w") print('<map id="' + prefix + '">', file=mapfh) if mappingfile: mappings = DictFile(mappingfile, delimiter="\t") classes = sorted(set(mappings.values())) preset_colors = (DictFile( mappingfile, keypos=1, valuepos=2, delimiter="\t") if DictFile.num_columns(mappingfile) >= 3 else {}) else: classes = sorted(set(x.accn for x in bed)) mappings = dict((x, x) for x in classes) preset_colors = {} logging.debug("A total of {} classes found: {}".format( len(classes), ",".join(classes))) # Assign colors to classes ncolors = max(3, min(len(classes), 12)) palette = set1_n if ncolors <= 8 else set3_n colorset = palette(number=ncolors) colorset = sample_N(colorset, len(classes)) class_colors = dict(zip(classes, colorset)) class_colors.update(preset_colors) logging.debug("Assigned colors: {}".format(class_colors)) chr_lens = {} centromeres = {} if sizes: chr_lens = Sizes(sizes).sizes_mapping else: for b, blines in groupby(bed, key=(lambda x: x.seqid)): blines = list(blines) maxlen = max(x.end for x in blines) chr_lens[b] = maxlen for b in bed: accn = b.accn if accn == "centromere": centromeres[b.seqid] = b.start if accn in mappings: b.accn = mappings[accn] else: b.accn = "-" chr_number = len(chr_lens) if centromeres: assert chr_number == len( centromeres), "chr_number = {}, centromeres = {}".format( chr_number, centromeres) r = 0.7 # width and height of the whole chromosome set xstart, ystart = 0.15, 0.85 xinterval = r / chr_number xwidth = xinterval * 0.5 # chromosome width max_chr_len = max(chr_lens.values()) ratio = r / max_chr_len # canvas / base # first the chromosomes for a, (chr, clen) in enumerate(sorted(chr_lens.items())): xx = xstart + a * xinterval + 0.5 * xwidth root.text(xx, ystart + 0.01, str(get_number(chr)), ha="center") if centromeres: yy = ystart - centromeres[chr] * ratio ChromosomeWithCentromere(root, xx, ystart, yy, ystart - clen * ratio, width=xwidth) else: Chromosome(root, xx, ystart, ystart - clen * ratio, width=xwidth) chr_idxs = dict((a, i) for i, a in enumerate(sorted(chr_lens.keys()))) alpha = 1 # color the regions for chr in sorted(chr_lens.keys()): segment_size, excess = 0, 0 bac_list = [] prev_end, prev_klass = 0, None for b in bed.sub_bed(chr): clen = chr_lens[chr] idx = chr_idxs[chr] klass = b.accn if klass == "centromere": continue start = b.start end = b.end if start < prev_end + mergedist and klass == prev_klass: start = prev_end xx = xstart + idx * xinterval yystart = ystart - end * ratio yyend = ystart - start * ratio root.add_patch( Rectangle( (xx, yystart), xwidth, yyend - yystart, fc=class_colors.get(klass, "lightslategray"), lw=0, alpha=alpha, )) prev_end, prev_klass = b.end, klass if imagemap: """ `segment` : size of current BAC being investigated + `excess` `excess` : left-over bases from the previous BAC, as a result of iterating over `winsize` regions of `segment` """ if excess == 0: segment_start = start segment = (end - start + 1) + excess while True: if segment < winsize: bac_list.append(b.accn) excess = segment break segment_end = segment_start + winsize - 1 tlx, tly, brx, bry = ( xx, (1 - ystart) + segment_start * ratio, xx + xwidth, (1 - ystart) + segment_end * ratio, ) print( "\t" + write_ImageMapLine( tlx, tly, brx, bry, iopts.w, iopts.h, iopts.dpi, chr + ":" + ",".join(bac_list), segment_start, segment_end, ), file=mapfh, ) segment_start += winsize segment -= winsize bac_list = [] if imagemap and excess > 0: bac_list.append(b.accn) segment_end = end tlx, tly, brx, bry = ( xx, (1 - ystart) + segment_start * ratio, xx + xwidth, (1 - ystart) + segment_end * ratio, ) print( "\t" + write_ImageMapLine( tlx, tly, brx, bry, iopts.w, iopts.h, iopts.dpi, chr + ":" + ",".join(bac_list), segment_start, segment_end, ), file=mapfh, ) if imagemap: print("</map>", file=mapfh) mapfh.close() logging.debug("Image map written to `{0}`".format(mapfh.name)) if gauge: xstart, ystart = 0.9, 0.85 Gauge(root, xstart, ystart - r, ystart, max_chr_len) if "centromere" in class_colors: del class_colors["centromere"] # class legends, four in a row if legend: xstart = 0.1 xinterval = 0.8 / len(class_colors) xwidth = 0.04 yy = 0.08 for klass, cc in sorted(class_colors.items()): if klass == "-": continue root.add_patch( Rectangle((xstart, yy), xwidth, xwidth, fc=cc, lw=0, alpha=alpha)) root.text(xstart + xwidth + 0.01, yy, latex(klass), fontsize=10) xstart += xinterval if empty: root.add_patch( Rectangle((xstart, yy), xwidth, xwidth, fill=False, lw=1)) root.text(xstart + xwidth + 0.01, yy, empty, fontsize=10) if title: root.text(0.5, 0.95, markup(title), ha="center", va="center")
def __init__( self, fig, root, datafile, bedfile, layoutfile, switch=None, tree=None, extra_features=None, chr_label=True, loc_label=True, genelabelsize=0, pad=0.05, vpad=0.015, scalebar=False, shadestyle="curve", glyphstyle="arrow", glyphcolor: BasePalette = OrientationPalette(), ): _, h = fig.get_figwidth(), fig.get_figheight() bed = Bed(bedfile) order = bed.order bf = BlockFile(datafile) self.layout = lo = Layout(layoutfile) switch = DictFile(switch, delimiter="\t") if switch else None if extra_features: extra_features = Bed(extra_features) exts = [] extras = [] for i in range(bf.ncols): ext = bf.get_extent(i, order) exts.append(ext) if extra_features: start, end, si, ei, chr, orientation, span = ext start, end = start.start, end.end # start, end coordinates ef = list(extra_features.extract(chr, start, end)) # Pruning removes minor features with < 0.1% of the region ef_pruned = [x for x in ef if x.span >= span / 1000] print( "Extracted {0} features " "({1} after pruning)".format(len(ef), len(ef_pruned)), file=sys.stderr, ) extras.append(ef_pruned) maxspan = max(exts, key=lambda x: x[-1])[-1] scale = maxspan / 0.65 self.gg = gg = {} self.rr = [] ymids = [] glyphcolor = ( OrientationPalette() if glyphcolor == "orientation" else OrthoGroupPalette(bf.grouper()) ) for i in range(bf.ncols): ext = exts[i] ef = extras[i] if extras else None r = Region( root, ext, lo[i], bed, scale, switch, genelabelsize=genelabelsize, chr_label=chr_label, loc_label=loc_label, vpad=vpad, extra_features=ef, glyphstyle=glyphstyle, glyphcolor=glyphcolor, ) self.rr.append(r) # Use tid and accn to store gene positions gg.update(dict(((i, k), v) for k, v in r.gg.items())) ymids.append(r.y) def offset(samearc): if samearc == "above": return 2 * pad if samearc == "above2": return 4 * pad if samearc == "below": return -2 * pad if samearc == "below2": return -4 * pad for i, j, blockcolor, samearc in lo.edges: for ga, gb, h in bf.iter_pairs(i, j): a, b = gg[(i, ga)], gg[(j, gb)] if samearc is not None: ymid = ymids[i] + offset(samearc) else: ymid = (ymids[i] + ymids[j]) / 2 Shade(root, a, b, ymid, fc=blockcolor, lw=0, alpha=1, style=shadestyle) for ga, gb, h in bf.iter_pairs(i, j, highlight=True): a, b = gg[(i, ga)], gg[(j, gb)] if samearc is not None: ymid = ymids[i] + offset(samearc) else: ymid = (ymids[i] + ymids[j]) / 2 Shade( root, a, b, ymid, alpha=1, highlight=h, zorder=2, style=shadestyle ) if scalebar: print("Build scalebar (scale={})".format(scale), file=sys.stderr) # Find the best length of the scalebar ar = [1, 2, 5] candidates = ( [1000 * x for x in ar] + [10000 * x for x in ar] + [100000 * x for x in ar] ) # Find the one that's close to an optimal canvas size dists = [(abs(x / scale - 0.12), x) for x in candidates] dist, candidate = min(dists) dist = candidate / scale x, y, yp = 0.22, 0.92, 0.005 a, b = x - dist / 2, x + dist / 2 lsg = "lightslategrey" root.plot([a, a], [y - yp, y + yp], "-", lw=2, color=lsg) root.plot([b, b], [y - yp, y + yp], "-", lw=2, color=lsg) root.plot([a, b], [y, y], "-", lw=2, color=lsg) root.text( x, y + 0.02, human_size(candidate, precision=0), ha="center", va="center", ) if tree: from jcvi.graphics.tree import draw_tree, read_trees trees = read_trees(tree) ntrees = len(trees) logging.debug("A total of {0} trees imported.".format(ntrees)) xiv = 1.0 / ntrees yiv = 0.3 xstart = 0 ystart = min(ymids) - 0.4 for i in range(ntrees): ax = fig.add_axes([xstart, ystart, xiv, yiv]) label, outgroup, color, tx = trees[i] draw_tree( ax, tx, outgroup=outgroup, rmargin=0.4, leaffont=11, treecolor=color, supportcolor=color, leafcolor=color, ) xstart += xiv RoundLabel(ax, 0.5, 0.3, label, fill=True, fc="lavender", color=color)
def sizes(args): """ %prog sizes gaps.bed a.fasta b.fasta Take the flanks of gaps within a.fasta, map them onto b.fasta. Compile the results to the gap size estimates in b. The output is detailed below: Columns are: 1. A scaffold 2. Start position 3. End position 4. Gap identifier 5. Gap size in A (= End - Start) 6. Gap size in B (based on BLAST, see below) For each gap, I extracted the left and right sequence (mostly 2Kb, but can be shorter if it runs into another gap) flanking the gap. The flanker names look like gap.00003L and gap.00003R means the left and right flanker of this particular gap, respectively. The BLAST output is used to calculate the gap size. For each flanker sequence, I took the best hit, and calculate the inner distance between the L match range and R range. The two flankers must map with at least 98% identity, and in the same orientation. NOTE the sixth column in the list file is not always a valid number. Other values are: - na: both flankers are missing in B - Singleton: one flanker is missing - Different chr: flankers map to different scaffolds - Strand +|-: flankers map in different orientations - Negative value: the R flanker map before L flanker """ from jcvi.formats.base import DictFile from jcvi.apps.align import blast p = OptionParser(sizes.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) gapsbed, afasta, bfasta = args pf = gapsbed.rsplit(".", 1)[0] extbed = pf + ".ext.bed" extfasta = pf + ".ext.fasta" if need_update(gapsbed, extfasta): extbed, extfasta = flanks([gapsbed, afasta]) q = op.basename(extfasta).split(".")[0] r = op.basename(bfasta).split(".")[0] blastfile = "{0}.{1}.blast".format(q, r) if need_update([extfasta, bfasta], blastfile): blastfile = blast([bfasta, extfasta, "--wordsize=50", "--pctid=98"]) labelsfile = blast_to_twobeds(blastfile) labels = DictFile(labelsfile, delimiter='\t') bed = Bed(gapsbed) for b in bed: b.score = b.span accn = b.accn print "\t".join((str(x) for x in (b.seqid, b.start - 1, b.end, accn, b.score, labels.get(accn, "na"))))