Esempio n. 1
0
def _install_data(data_dir, path_flavor, args):
    """Upgrade required genome data files in place.
    """
    try:
        from bcbio import install as bcb
    except:
        raise ImportError("It needs bcbio to do the quick installation.")

    bio_data = op.join(path_flavor, "../biodata.yaml")
    s = {
        "flavor": path_flavor,
        # "target": "[brew, conda]",
        "vm_provider": "novm",
        "hostname": "localhost",
        "fabricrc_overrides": {
            "edition": "minimal",
            "use_sudo": "false",
            "keep_isolated": "true",
            "conda_cmd": bcb._get_conda_bin(),
            "distribution": "__auto__",
            "dist_name": "__auto__"
        }
    }
    s["actions"] = ["setup_biodata"]
    s["fabricrc_overrides"]["data_files"] = data_dir
    s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy")
    cbl = bcb.get_cloudbiolinux(bcb.REMOTES)
    s["genomes"] = _get_biodata(bio_data, args)
    sys.path.insert(0, cbl["dir"])
    cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
    cbl_deploy.deploy(s)
Esempio n. 2
0
def _install(path, args):
    """
    small helper for installation in case outside bcbio
    """
    try:
       from bcbio import install as bcb
    except:
        raise ImportError("It needs bcbio to do the quick installation.")

    path_flavor = _get_flavor()
    s = {"fabricrc_overrides": {"system_install": path,
                                "local_install": os.path.join(path, "local_install"),
                                "use_sudo": "false",
                                "edition": "minimal"}}
    s = {"flavor": path_flavor,
         # "target": "[brew, conda]",
         "vm_provider": "novm",
         "hostname": "localhost",
         "fabricrc_overrides": {"edition": "minimal",
                                "use_sudo": "false",
                                "keep_isolated": "true",
                                "conda_cmd": bcb._get_conda_bin(),
                                "distribution": "__auto__",
                                "dist_name": "__auto__"}}


    s["actions"] = ["install_biolinux"]
    s["fabricrc_overrides"]["system_install"] = path
    s["fabricrc_overrides"]["local_install"] = os.path.join(path, "local_install")
    cbl = bcb.get_cloudbiolinux(bcb.REMOTES)
    sys.path.insert(0, cbl["dir"])
    cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
    cbl_deploy.deploy(s)
Esempio n. 3
0
def _install_data(data_dir, path_flavor, args):
    """Upgrade required genome data files in place.
    """
    try:
       from bcbio import install as bcb
    except:
        raise ImportError("It needs bcbio to do the quick installation.")

    bio_data = op.join(path_flavor, "../biodata.yaml")
    s = {"flavor": path_flavor,
         # "target": "[brew, conda]",
         "vm_provider": "novm",
         "hostname": "localhost",
         "fabricrc_overrides": {"edition": "minimal",
                                "use_sudo": "false",
                                "keep_isolated": "true",
                                "conda_cmd": bcb._get_conda_bin(),
                                "distribution": "__auto__",
                                "dist_name": "__auto__"}}
    s["actions"] = ["setup_biodata"]
    s["fabricrc_overrides"]["data_files"] = data_dir
    s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy")
    cbl = bcb.get_cloudbiolinux(bcb.REMOTES)
    s["genomes"] = _get_biodata(bio_data, args)
    sys.path.insert(0, cbl["dir"])
    cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
    cbl_deploy.deploy(s)
Esempio n. 4
0
def _install(path, args):
    """
    small helper for installation in case outside bcbio
    """
    try:
        from bcbio import install as bcb
    except:
        raise ImportError("It needs bcbio to do the quick installation.")

    path_flavor = _get_flavor()
    s = {
        "fabricrc_overrides": {
            "system_install": path,
            "local_install": os.path.join(path, "local_install"),
            "use_sudo": "false",
            "edition": "minimal"
        }
    }
    s = {
        "flavor": path_flavor,
        # "target": "[brew, conda]",
        "vm_provider": "novm",
        "hostname": "localhost",
        "fabricrc_overrides": {
            "edition": "minimal",
            "use_sudo": "false",
            "keep_isolated": "true",
            "conda_cmd": bcb._get_conda_bin(),
            "distribution": "__auto__",
            "dist_name": "__auto__"
        }
    }

    s["actions"] = ["install_biolinux"]
    s["fabricrc_overrides"]["system_install"] = path
    s["fabricrc_overrides"]["local_install"] = os.path.join(
        path, "local_install")
    cbl = bcb.get_cloudbiolinux(bcb.REMOTES)
    sys.path.insert(0, cbl["dir"])
    cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
    cbl_deploy.deploy(s)
                        action='store_true',
                        default=False,
                        help="Add ERCC spike-ins.")
    parser.add_argument("--mirbase",
                        help="species in mirbase for smallRNAseq data.")
    parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")

    args = parser.parse_args()
    if not all([args.mirbase, args.srna_gtf]) and any(
        [args.mirbase, args.srna_gtf]):
        raise ValueError("--mirbase and --srna_gtf both need a value.")

    env.hosts = ["localhost"]
    env.cores = args.cores
    os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
    cbl = get_cloudbiolinux(REMOTES)
    sys.path.insert(0, cbl["dir"])
    genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
    # monkey patch cloudbiolinux to use this indexing command instead
    genomes = getattr(genomemod, 'genomes')
    genomes._index_w_command = _index_w_command
    fabmod = __import__("cloudbio", fromlist=["fabutils"])
    fabutils = getattr(fabmod, 'fabutils')
    fabutils.configure_runsudo(env)

    system_config = os.path.join(_get_data_dir(), "galaxy",
                                 "bcbio_system.yaml")
    with open(system_config) as in_handle:
        config = yaml.load(in_handle)
    env.picard_home = config_utils.get_program("picard", config, ptype="dir")
                        help="Name of organism, for example Hsapiens.")
    parser.add_argument("-b", "--build", required=True,
                        help="Build of genome, for example hg19.")
    parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
                        default=["seq"], help="Space separated list of indexes to make")
    parser.add_argument("--ercc", action='store_true', default=False,
                        help="Add ERCC spike-ins.")
    parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
    parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")

    args = parser.parse_args()
 #   if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
 #       raise ValueError("--mirbase and --srna_gtf both need a value.")

    os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
    cbl = get_cloudbiolinux(REMOTES)
    sys.path.insert(0, cbl["dir"])
    genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
    # monkey patch cloudbiolinux to use this indexing command instead
    genomes = getattr(genomemod, 'genomes')
    genomes._index_w_command = _index_w_command

    genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
    args.fasta = os.path.abspath(args.fasta)
    if not file_exists(args.fasta):
        print("%s does not exist, exiting." % args.fasta)
        sys.exit(1)

    args.gtf = os.path.abspath(args.gtf) if args.gtf else None
    if args.gtf and not file_exists(args.gtf):
        print("%s does not exist, exiting." % args.gtf)
Esempio n. 7
0
    required.add_argument("-f", "--fasta", required=True,
                        help="FASTA file of the genome.")
    required.add_argument("-g", "--gtf", default=None,
                        help="GTF file of the transcriptome")
    required.add_argument("-n", "--name", required=True,
                        help="Name of organism, for example Hsapiens.")
    required.add_argument("-b", "--build", required=True,
                        help="Build of genome, for example hg19.")
    parser.add_argument("--cloudbiolinux", help="Specify a cloudbiolinux git commit hash or tag to install",
                            default="master")
    args = parser.parse_args()
 #   if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
 #       raise ValueError("--mirbase and --srna_gtf both need a value.")

    os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
    cbl = get_cloudbiolinux(args, REMOTES)
    sys.path.insert(0, cbl["dir"])
    genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
    # monkey patch cloudbiolinux to use this indexing command instead
    genomes = getattr(genomemod, 'genomes')
    genomes._index_w_command = _index_w_command

    genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
    args.fasta = os.path.abspath(args.fasta)
    if not file_exists(args.fasta):
        print("%s does not exist, exiting." % args.fasta)
        sys.exit(1)

    args.gtf = os.path.abspath(args.gtf) if args.gtf else None
    if args.gtf and not file_exists(args.gtf):
        print("%s does not exist, exiting." % args.gtf)