Esempio n. 1
0
def runDirectExport(dataHub):
    if dataHub.args.export:
        logging.info("* Exporting views *")
        ensureExportData(dataHub)

        exportFormat = export.getExportFormat(dataHub.args)

        if dataHub.args.type == "batch":
            if not os.path.exists(dataHub.args.export):
                os.makedirs(dataHub.args.export)
            elif not os.path.isdir(dataHub.args.export):
                logging.error("In batch mode, --export must be passed as a directory, not a file: '{}'".format(dataHub.args.export))
                sys.exit(1)
            path = os.path.join(dataHub.args.export, "{}.{}".format(dataHub.variant.shortName(), exportFormat))
        else:
            path = dataHub.args.export

        exportData = dataHub.trackCompositor.render()
        if exportFormat != "svg":
            converter = export.getExportConverter(dataHub.args, exportFormat)
            exportData = export.convertSVG(exportData, exportFormat, converter)

        with open(path, "w") as outf:
            outf.write(exportData)

        if dataHub.args.open_exported:
            utilities.launchFile(dataHub.args.export)

        if dataHub.args.dotplots:
            dotplotPath = os.path.splitext(path)[0] + ".dotplot.png"
            with open(dotplotPath, "wb") as dotplotFile:
                dotplotFile.write(dataHub.dotplots["ref vs ref"])
Esempio n. 2
0
def runDirectExport(dataHub):
    if dataHub.args.export:
        logging.info("* Exporting views *")
        ensureExportData(dataHub)

        exportFormat = export.getExportFormat(dataHub.args)

        if dataHub.args.type == "batch" or dataHub.args.format is not None:
            if not os.path.exists(dataHub.args.export):
                os.makedirs(dataHub.args.export)
            path = os.path.join(dataHub.args.export, "{}.{}".format(dataHub.variant.shortName(), exportFormat))
        else:
            path = dataHub.args.export

        exportData = dataHub.trackCompositor.render()
        if exportFormat != "svg":
            converter = export.getExportConverter(dataHub.args, exportFormat)
            exportData = export.convertSVG(exportData, exportFormat, converter)

        outf = open(path, "w")
        outf.write(exportData)
        outf.close()

        if dataHub.args.open_exported:
            utilities.launchFile(dataHub.args.export)
Esempio n. 3
0
def runDirectExport(dataHub):
    if dataHub.args.export:
        logging.info("* Exporting views *")
        ensureExportData(dataHub)

        exportFormat = export.getExportFormat(dataHub.args)

        if dataHub.args.type == "batch" or dataHub.args.format is not None:
            if not os.path.exists(dataHub.args.export):
                os.makedirs(dataHub.args.export)
            path = os.path.join(
                dataHub.args.export,
                "{}.{}".format(dataHub.variant.shortName(), exportFormat))
        else:
            path = dataHub.args.export

        exportData = dataHub.trackCompositor.render()
        if exportFormat != "svg":
            converter = export.getExportConverter(dataHub.args, exportFormat)
            exportData = export.convertSVG(exportData, exportFormat, converter)

        outf = open(path, "w")
        outf.write(exportData)
        outf.close()

        if dataHub.args.open_exported:
            utilities.launchFile(dataHub.args.export)
Esempio n. 4
0
File: app.py Progetto: svviz/svviz
def runDirectExport(dataHub):
    if dataHub.args.export:
        logging.info("* Exporting views *")
        ensureExportData(dataHub)

        exportFormat = export.getExportFormat(dataHub.args)

        if dataHub.args.type == "batch":
            if not os.path.exists(dataHub.args.export):
                os.makedirs(dataHub.args.export)
            elif not os.path.isdir(dataHub.args.export):
                logging.error("In batch mode, --export must be passed as a directory, not a file: '{}'".format(dataHub.args.export))
                sys.exit(1)
            path = os.path.join(dataHub.args.export, "{}.{}".format(dataHub.variant.shortName(), exportFormat))
        else:
            path = dataHub.args.export

        exportData = dataHub.trackCompositor.render()
        filemode = "w"
        if exportFormat != "svg":
            converter = export.getExportConverter(dataHub.args, exportFormat)
            exportData = export.convertSVG(exportData, exportFormat, converter)
            filemode = "wb"
            
        with open(path, filemode) as outf:
            outf.write(exportData)

        if dataHub.args.open_exported:
            utilities.launchFile(dataHub.args.export)

        outbasepath = os.path.splitext(path)[0]
        if dataHub.args.dotplots:
            dotplotPath = outbasepath + ".dotplot.png"
            with open(dotplotPath, "wb") as dotplotFile:
                dotplotFile.write(dataHub.dotplots["ref vs ref"])

        if dataHub.args.export_insert_sizes:
            didExportISD = False

            plotInsertSizeDistributions(dataHub)
            for name, sample in dataHub.samples.items():
                if sample.insertSizePlot is not None:
                    outpath = outbasepath + ".insertsizes.{}.png".format(name)
                    with open(outpath, "w") as isdfile:
                        isdfile.write(sample.insertSizePlot)
                    didExportISD = True

            if not didExportISD:
                print("** Failed to plot the insert size distributions; please make sure the **")
                print("** rpy2 is installed, your input bam files have sufficient numbers of **")
                print("** reads (> 50,000), and that the reads are paired-ended eg Illumina  **")
                print("** and not PacBio                                                     **")
Esempio n. 5
0
def runDirectExport(dataHub):
    if dataHub.args.export:
        logging.info("* Exporting views *")
        ensureExportData(dataHub)

        exportFormat = export.getExportFormat(dataHub.args)

        if dataHub.args.type == "batch":
            if not os.path.exists(dataHub.args.export):
                os.makedirs(dataHub.args.export)
            elif not os.path.isdir(dataHub.args.export):
                logging.error(
                    "In batch mode, --export must be passed as a directory, not a file: '{}'"
                    .format(dataHub.args.export))
                sys.exit(1)
            path = os.path.join(
                dataHub.args.export,
                "{}.{}".format(dataHub.variant.shortName(), exportFormat))
        else:
            path = dataHub.args.export

        exportData = dataHub.trackCompositor.render()
        filemode = "w"
        if exportFormat != "svg":
            converter = export.getExportConverter(dataHub.args, exportFormat)
            exportData = export.convertSVG(exportData, exportFormat, converter)
            filemode = "wb"

        with open(path, filemode) as outf:
            outf.write(exportData)

        if dataHub.args.open_exported:
            utilities.launchFile(dataHub.args.export)

        outbasepath = os.path.splitext(path)[0]
        if dataHub.args.dotplots:
            dotplotPath = outbasepath + ".dotplot.png"
            with open(dotplotPath, "wb") as dotplotFile:
                dotplotFile.write(dataHub.dotplots["ref vs ref"])

        if dataHub.args.export_insert_sizes:
            didExportISD = False

            plotInsertSizeDistributions(dataHub)
            for name, sample in dataHub.samples.items():
                if sample.insertSizePlot is not None:
                    outpath = outbasepath + ".insertsizes.{}.png".format(name)
                    with open(outpath, "w") as isdfile:
                        isdfile.write(sample.insertSizePlot)
                    didExportISD = True

            if not didExportISD:
                print(
                    "** Failed to plot the insert size distributions; please make sure the **"
                )
                print(
                    "** rpy2 is installed, your input bam files have sufficient numbers of **"
                )
                print(
                    "** reads (> 50,000), and that the reads are paired-ended eg Illumina  **"
                )
                print(
                    "** and not PacBio                                                     **"
                )
Esempio n. 6
0
def run():
    timings = {}

    for testName in [
            "mei", "inv", "ins_moleculo", "ins_pacbio", "del_chr1",
            "translocation"
    ]:
        print(">", testName, "<")

        exportPath = "renderTests/export_{}_new.svg".format(testName)
        originalPath = "renderTests/export_{}_original.svg".format(testName)

        d = gzip.open("renderTests/{}.pickle.gz".format(testName), "rb")
        try:
            dataHub = pickle.load(d, encoding="latin1")
        except TypeError:
            dataHub = pickle.load(d)

        dataHub.args = MockArgs()
        dataHub.args.thicker_lines = False
        dataHub.args.export = exportPath
        dataHub.args.context = 0

        t0 = time.time()
        app.renderSamples(dataHub)
        app.ensureExportData(dataHub)
        app.runDirectExport(dataHub)
        t1 = time.time()
        timings[testName] = t1 - t0

        no_changes = True

        if not os.path.exists(originalPath):
            print("  first time running; nothing to compare against")
            shutil.copy(exportPath, originalPath)
        else:
            if filecmp.cmp(originalPath, exportPath, shallow=False):
                print("  files identical!")
            else:
                for a, b in zip(
                        open(originalPath).readlines(),
                        open(exportPath).readlines()):
                    if a != b:
                        no_changes = (False,
                                      "files differ: {}".format(testName))
                        print("FILES DIFFER! First line that differs:")
                        print("Original:", a.strip())
                        print("New:     ", b.strip())
                        print("...")

                        time.sleep(3)
                        utilities.launchFile(exportPath)
                        utilities.launchFile(originalPath)

                        break

    timingsPath = "renderTests/renderTimings.json.txt"
    regenerateTimings = False
    try:
        oldTimings = json.load(open(timingsPath))
        print("{:<20}{:>20}{:>20}".format("Test Name", "Previous", "New"))
        for testName in sorted(timings):
            try:
                remark = "ok"
                if timings[testName] > oldTimings[testName] * 1.1:
                    remark = "** slower! **"
                print("{:<20}{:>19.2f}s{:>19.2f}s\t{}".format(
                    testName, oldTimings[testName], timings[testName], remark))
            except KeyError:
                print("{:<20}{:>20}s{:>19.2f}s".format(testName, "",
                                                       timings[testName]))
                regenerateTimings = True

    except IOError:
        print("unable to load previous timings...")

    if not os.path.exists(timingsPath) or regenerateTimings:
        print("overwriting previous timings...")
        json.dump(timings, open(timingsPath, "w"))

    return no_changes, ""
Esempio n. 7
0
def run():
    timings = {}

    for testName in ["mei", "inv", "ins_moleculo", "ins_pacbio", "del_chr1", "translocation"]:
        print(">", testName, "<")

        exportPath = "renderTests/export_{}_new.svg".format(testName)
        originalPath = "renderTests/export_{}_original.svg".format(testName)

        d = gzip.open("renderTests/{}.pickle.gz".format(testName), "rb")
        try:
            dataHub = pickle.load(d, encoding="latin1")
        except TypeError:
            dataHub = pickle.load(d)

        dataHub.args = MockArgs()
        dataHub.args.thicker_lines = False
        dataHub.args.export = exportPath
        dataHub.args.context = 0
        
        t0 = time.time()
        app.renderSamples(dataHub)
        app.ensureExportData(dataHub)    
        app.runDirectExport(dataHub)
        t1 = time.time()
        timings[testName] = t1-t0

        no_changes = True

        if not os.path.exists(originalPath):
            print("  first time running; nothing to compare against")
            shutil.copy(exportPath, originalPath)
        else:
            if filecmp.cmp(originalPath, exportPath, shallow=False):
                print("  files identical!")
            else:
                for a, b in zip(open(originalPath).readlines(), open(exportPath).readlines()):
                    if a != b:
                        no_changes = (False, "files differ: {}".format(testName))
                        print("FILES DIFFER! First line that differs:")
                        print("Original:", a.strip())
                        print("New:     ", b.strip())
                        print("...")

                        time.sleep(3)
                        utilities.launchFile(exportPath)
                        utilities.launchFile(originalPath)

                        break


    timingsPath = "renderTests/renderTimings.json.txt"
    regenerateTimings = False
    try:
        oldTimings = json.load(open(timingsPath))
        print("{:<20}{:>20}{:>20}".format("Test Name", "Previous", "New"))
        for testName in sorted(timings):
            try:
                remark = "ok"
                if timings[testName] > oldTimings[testName] * 1.1:
                    remark = "** slower! **"
                print("{:<20}{:>19.2f}s{:>19.2f}s\t{}".format(testName, oldTimings[testName], timings[testName], remark))
            except KeyError:
                print("{:<20}{:>20}s{:>19.2f}s".format(testName, "", timings[testName]))
                regenerateTimings = True

    except IOError:
        print("unable to load previous timings...")

    if not os.path.exists(timingsPath) or regenerateTimings:
        print("overwriting previous timings...")
        json.dump(timings, open(timingsPath, "w"))
        
    return no_changes, ""
Esempio n. 8
0
    if not os.path.exists(originalPath):
        print "  first time running; nothing to compare against"
        shutil.copy(exportPath, originalPath)
    else:
        if filecmp.cmp(originalPath, exportPath, shallow=False):
            print "  files identical!"
        else:
            for a, b in zip(open(originalPath).readlines(), open(exportPath).readlines()):
                if a != b:
                    print "FILES DIFFER! First line that differs:"
                    print "Original:", a.strip()
                    print "New:     ", b.strip()
                    print "..."

                    time.sleep(3)
                    utilities.launchFile(exportPath)
                    utilities.launchFile(originalPath)

                    break


timingsPath = "tests/renderTimings.json.txt"
regenerateTimings = False
try:
    oldTimings = json.load(open(timingsPath))
    print "{:<20}{:>20}{:>20}".format("Test Name", "Previous", "New")
    for testName in sorted(timings):
        try:
            remark = "ok"
            if timings[testName] > oldTimings[testName] * 1.1:
                remark = "** slower! **"
Esempio n. 9
0
        shutil.copy(exportPath, originalPath)
    else:
        if filecmp.cmp(originalPath, exportPath, shallow=False):
            print "  files identical!"
        else:
            for a, b in zip(
                    open(originalPath).readlines(),
                    open(exportPath).readlines()):
                if a != b:
                    print "FILES DIFFER! First line that differs:"
                    print "Original:", a.strip()
                    print "New:     ", b.strip()
                    print "..."

                    time.sleep(3)
                    utilities.launchFile(exportPath)
                    utilities.launchFile(originalPath)

                    break

timingsPath = "tests/renderTimings.json.txt"
regenerateTimings = False
try:
    oldTimings = json.load(open(timingsPath))
    print "{:<20}{:>20}{:>20}".format("Test Name", "Previous", "New")
    for testName in sorted(timings):
        try:
            remark = "ok"
            if timings[testName] > oldTimings[testName] * 1.1:
                remark = "** slower! **"
            print "{:<20}{:>19.2f}s{:>19.2f}s\t{}".format(