Exemple #1
0
cluster_type = args.cluster_type
email = args.email
print_only = args.print_only
verbose = args.verbose

version = "--version " + TopmedPipeline.__version__

cluster = TopmedPipeline.ClusterFactory.createCluster(cluster_type,
                                                      cluster_file, verbose)

pipeline = cluster.getPipelinePath()
submitPath = cluster.getSubmitPath()
driver = os.path.join(submitPath, "runRscript.sh")

configdict = TopmedPipeline.readConfig(configfile)
configdict = TopmedPipeline.directorySetup(configdict,
                                           subdirs=["log", "plots"])

# analysis init
cluster.analysisInit(print_only=print_only)

job = "locuszoom"

rscript = os.path.join(pipeline, "R", job + ".R")

# find number of jobs to submit by counting lines in file
n = TopmedPipeline.countLines(configdict["locus_file"])
range = "1-" + str(n - 1)

args = ["-s", rscript, configfile, version]
jobid = cluster.submitJob(job_name=job,
                          cmd=driver,
cluster_file = args.cluster_file
cluster_type = args.cluster_type
ncores = args.ncores
email = args.email
print_only = args.print_only
verbose = args.verbose

version = "--version " + TopmedPipeline.__version__

cluster = TopmedPipeline.ClusterFactory.createCluster(cluster_type, cluster_file, verbose)

pipeline = cluster.getPipelinePath()
driver = os.path.join(pipeline, "runRscript.sh")

configdict = TopmedPipeline.readConfig(configfile)
configdict = TopmedPipeline.directorySetup(configdict, subdirs=["config", "data", "log", "plots", "report"])

# hold is a list of submit IDs. A submit ID is a dict:
#     {jobname: [jobids]}
hold_null_agg = []

# null model
job = "null_model"

# if a null model file is given in the config, skip this step
run_null_model = "null_model_file" not in configdict
if run_null_model:

    rscript = os.path.join(pipeline, "R", job + ".R")

    config = deepcopy(configdict)
Exemple #3
0
ncores = args.ncores
email = args.email
print_only = args.print_only
verbose = args.verbose

version = "--version " + TopmedPipeline.__version__

cluster = TopmedPipeline.ClusterFactory.createCluster(cluster_type,
                                                      cluster_file, verbose)

pipeline = cluster.getPipelinePath()
submitPath = cluster.getSubmitPath()
driver = os.path.join(submitPath, "runRscript.sh")

configdict = TopmedPipeline.readConfig(configfile)
configdict = TopmedPipeline.directorySetup(
    configdict, subdirs=["config", "data", "log", "report"])

# analysis init
cluster.analysisInit(print_only=print_only)

# null model
job = "null_model"

rscript = os.path.join(pipeline, "R", job + ".R")

config = deepcopy(configdict)
config["out_prefix"] = configdict["data_prefix"] + "_null_model"
config["out_phenotype_file"] = configdict["data_prefix"] + "_phenotypes.RData"
configfile = configdict["config_prefix"] + "_" + job + ".config"
TopmedPipeline.writeConfig(config, configfile)
configfile = args.config_file
cluster_file = args.cluster_file
cluster_type = args.cluster_type
email = args.email
print_only = args.print_only
verbose = args.verbose

version = "--version " + TopmedPipeline.__version__

cluster = TopmedPipeline.ClusterFactory.createCluster(cluster_type, cluster_file, verbose)

pipeline = cluster.getPipelinePath()
driver = os.path.join(pipeline, "runRscript.sh")

configdict = TopmedPipeline.readConfig(configfile)
configdict = TopmedPipeline.directorySetup(configdict, subdirs=["log", "plots"])


job = "locuszoom"

rscript = os.path.join(pipeline, "R", job + ".R")

# find number of jobs to submit by counting lines in file
n = TopmedPipeline.countLines(configdict["locus_file"])
range = "1-" + str(n-1)

args = ["-s", rscript, configfile, version]
jobid = cluster.submitJob(job_name=job, cmd=driver, args=args, array_range=range, email=email, print_only=print_only)


cluster.submitJob(job_name="cleanup", cmd=os.path.join(pipeline, "cleanup.sh"), holdid=jobid, print_only=print_only)