예제 #1
0
def _prepare_input(job_context: Dict) -> Dict:
    start_time = log_state("prepare input", job_context["job"].id)

    job_context["primary_organism"] = max(
        job_context["samples"],
        key=lambda organism: len(job_context["samples"][organism]))
    job_context["all_organisms"] = job_context["samples"].keys()
    all_samples = list(itertools.chain(*job_context["samples"].values()))
    job_context["samples"] = {job_context["primary_organism"]: all_samples}

    # We'll store here all sample accession codes that didn't make it into the compendia
    # with the reason why not.
    job_context["filtered_samples"] = {}

    job_context = smashing_utils.prepare_files(job_context)

    # Compendia jobs only run for one organism, so we know the only
    # key will be the organism name, unless of course we've already failed.
    if job_context["job"].success is not False:
        job_context["organism_name"] = job_context["group_by_keys"][0]

        # TEMPORARY for iterating on compendia more quickly. Rather
        # than downloading the data from S3 each run we're just gonna
        # use the same directory every job.
        job_context["old_work_dir"] = job_context["work_dir"]
        job_context[
            "work_dir"] = SMASHING_DIR + job_context["organism_name"] + "/"
        if not os.path.exists(job_context["work_dir"]):
            os.makedirs(job_context["work_dir"])

    log_state("prepare input done", job_context["job"].id, start_time)
    return job_context
예제 #2
0
def _prepare_input(job_context: Dict) -> Dict:

    # We're going to use the smasher outside of the smasher.
    # I'm not crazy about this yet. Maybe refactor later,
    # but I need the data now.
    job_context = smashing_utils.prepare_files(job_context)

    # work_dir is already created by smasher._prepare_files
    outfile_base = job_context["work_dir"] + str(time.time()).split(".")[0]
    job_context["target_file"] = outfile_base + "_target.tsv"

    return job_context