def publish(): '''publish files.''' # publish web pages P.publish_report() # publish additional data web_dir = PARAMS["web_dir"] project_id = P.getProjectId() # directory, files exportfiles = { "intervals": glob.glob(os.path.join( PARAMS["exportdir"], "bed", "*.bed.gz")) + glob.glob(os.path.join(PARAMS["exportdir"], "bed", "*.bed.gz.tbi")), } bams = [] for targetdir, filenames in exportfiles.items(): if len(filenames) == 0: E.warn("no files for target '%s'" % targetdir) for src in filenames: dest = "%s/%s/%s" % (web_dir, targetdir, os.path.basename(src)) if dest.endswith(".bam"): bams.append(dest) dest = os.path.abspath(dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if not os.path.exists(dest): E.debug("creating symlink from %s to %s" % (src, dest)) os.symlink(os.path.abspath(src), dest)
def publish(): '''publish files.''' # publish web pages P.publish_report() # publish additional data - i.e. the final lncRNA gtf file web_dir = PARAMS["web_dir"] if not os.path.exists(os.path.join(web_dir), "lncrna_final.class.gtf.gz"): os.symlink( "lncrna_final.class.gtf.gz", os.path.abspath( os.path.join(os.path.join(web_dir), "lncrna_final.class.gtf.gz")))
def publish(): '''publish files.''' # publish web pages P.publish_report() # publish additional data web_dir = PARAMS["web_dir"] project_id = P.getProjectId() ucsc_urls = { "bam": """track type=bam name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(dirname)s/%(filename)s""", "bigwig": """track type=bigWig name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/%(dirname)s/%(filename)s""", } # directory, files exportfiles = ( ("bamfiles", glob.glob("*/*.genome.bam") + glob.glob("*/*.genome.bam.bai"), "bam"), ("bamfiles", glob.glob("*/*.prep.bam") + glob.glob("*/*.prep.bam.bai"), "bam"), ("medips", glob.glob("*/*.bigwig"), "bigwig"), ) ucsc_files = [] for targetdir, filenames, datatype in exportfiles: for src in filenames: filename = os.path.basename(src) dest = "%s/%s/%s" % (web_dir, targetdir, filename) suffix = os.path.splitext(src) if suffix in ucsc_urls: ucsc_files.append((datatype, targetdir, filename)) dest = os.path.abspath(dest) if not os.path.exists(dest): os.symlink(os.path.abspath(src), dest) # output ucsc links for ucsctype, dirname, filename in ucsc_files: filename = os.path.basename(filename) track = P.snip(filename, ucsctype) print(ucsc_urls[ucsctype] % locals())
def publish(): '''publish files.''' # directory, files export_files = {"bigwigfiles": glob.glob("*/*.bigwig")} if PARAMS['ucsc_exclude']: for filetype, files in export_files.items(): new_files = set(files) for f in files: for regex in P.asList(PARAMS['ucsc_exclude']): if re.match(regex, f): new_files.remove(f) break export_files[filetype] = list(new_files) # publish web pages E.info("publishing report") P.publish_report(export_files=export_files) E.info("publishing UCSC data hub") P.publish_tracks(export_files)
def publish(): '''publish files.''' P.publish_report()
def publish_report(): '''publish report.''' E.info("publishing report") P.publish_report()
def publish_report(): '''publish report in the cgat downloads directory.''' E.info("publishing report") P.publish_report()
def publish(): '''publish files.''' # publish web pages P.publish_report()
def publish(): '''publish report and data.''' E.info("publishing report") P.publish_report()