def create_job_submission(billing_project, workspace_name, workflow_name,
                          workflow_repo):
    response = fapi.get_entities_with_type(billing_project, workspace_name)
    entities = response.json()

    for ent in entities:
        ent_name = ent['name']
        ent_type = ent['entityType']
        ent_attrs = ent['attributes']

    submisson_response = fapi.create_submission(billing_project,
                                                workspace_name,
                                                workflow_repo,
                                                workflow_name,
                                                entity=args.entity_id,
                                                etype="participant_lane_set",
                                                expression=None,
                                                use_callcache=True)

    if submisson_response.status_code != 201:
        print(submisson_response.content)
        sys.exit(1)
    else:
        print("Successfully Created Submisson")
        with open('response.txt', 'w') as fout:
            fout.write(submisson_response.json()['submissionId'] + '\n')
Exemple #2
0
 def new(wnamespace,
         workspace,
         cnamespace,
         config,
         entity_id,
         etype,
         expression,
         api_url=fapi.PROD_API_ROOT):
     r, c = fapi.create_submission(wnamespace, workspace, cnamespace,
                                   config, entity_id, expression, api_url)
     fapi._check_response_code(r, 201)
def submit_a_job_to_terra(workspace_namespace: str,
                          workspace_name: str,
                          config_namespace: str,
                          config_name: str,
                          use_callcache: bool = True) -> str:
    """Create a job submission to Terra and if success, return a URL for checking job status.
    """
    launch_submission = fapi.create_submission(workspace_namespace,
                                               workspace_name,
                                               config_namespace,
                                               config_name,
                                               use_callcache=use_callcache)
    if launch_submission.status_code != 201:
        raise ValueError(
            f"Unable to launch submission - {launch_submission.json()}!")

    submission_id = launch_submission.json()["submissionId"]
    status_url = f"https://app.terra.bio/#workspaces/{workspace_namespace}/{workspace_name}/job_history/{submission_id}"
    return status_url
Exemple #4
0
z = fapi.get_workspace_acl(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE)
print('WORKSPACE ACL:', z, z.json())


# z = fapi.overwrite_workspace_config(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
#                                     cnamespace=SEL_NAMESPACE, configname=TERRA_CONFIG_NAME, body=config_json)
# print('OVERWROTE', z, z.json())

z = fapi.get_workspace_config(workspace=SEL_WORKSPACE, namespace=SEL_NAMESPACE,
                              config=TERRA_CONFIG_NAME, cnamespace=SEL_NAMESPACE)

print('CONFIG_NOW_IS_2', z, z.json())


if True:
    z = fapi.create_submission(wnamespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
                               cnamespace=SEL_NAMESPACE, config=TERRA_CONFIG_NAME)
    print('SUBMISSION IS', z, z.json())

# sys.exit(0)

# def dump_file(fname, value):
#     """store string in file"""
#     with open(fname, 'w')  as out:
#         out.write(str(value))

# #z = fapi.create_submission(wnamespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
# #                           cnamespace=SEL_NAMESPACE, config=TERRA_CONFIG_NAME)
# #print('SUBMISSION IS', z, z.json())

# #z = fapi.get_config_template(namespace='dockstore', method=TERRA_CONFIG_NAME, version=1)
# #print(z.json())
def main():
    if len(sys.argv) < 2:
        return

    global billing_project
    global template_workspace_name

    parser = argparse.ArgumentParser(prog="python " + sys.argv[0],
                                     add_help=False)
    subparser = parser.add_subparsers(dest="cmd")

    delete_workspace = subparser.add_parser('delete_workspace',
                                            help='delete workspace')
    delete_workspace.add_argument('--workspace-name',
                                  dest="workspace_name",
                                  help="name of the workspace")

    clone_workspace = subparser.add_parser(
        'clone_workspace', help='clone from existing workspace')
    clone_workspace.add_argument('--source-work-space',
                                 dest='src_work_space',
                                 help="name of source workspace")
    clone_workspace.add_argument('--destination-work-space',
                                 dest='dest_work_space',
                                 help="name of destination workspace")

    get_data_info = subparser.add_parser('get_participant_table',
                                         help='get participant.tsv')
    get_data_info.add_argument('--workspace-name',
                               dest="workspace_name",
                               help="name of the workspace")
    get_data_info.add_argument('--participant-table-name',
                               dest="participant_table_name",
                               help="name of sample table")
    get_data_info.add_argument('--output-name',
                               dest="output_table_name",
                               required=False,
                               default="participant.tsv",
                               help="name of output tsv")

    create_participant_lane = subparser.add_parser(
        'create_participant_lane',
        help='create participant_lane/lane_set_id tables')
    create_participant_lane.add_argument('--input-name',
                                         dest="input_participant_table_name",
                                         required=False,
                                         default="participant.tsv",
                                         help="input participant table  name")

    create_participant_lane.add_argument(
        '--output-prefix',
        dest="output_prefix",
        required=False,
        help="name of output prefix for the lanes")

    upload_participant_lane = subparser.add_parser(
        'upload_participant',
        help=
        'uploads the participant_lane_set, _lane_membership and _lane_entity files'
    )
    upload_participant_lane.add_argument('--workspace-name',
                                         dest="workspace_name",
                                         help="name of the workspace")
    upload_participant_lane.add_argument('--input-prefix',
                                         dest="input_prefix",
                                         help="name of the input prefix")

    upload_workflow = subparser.add_parser(
        'upload_workflow', help='uploads wdl to --workspace-name')
    upload_workflow.add_argument('--workspace-name',
                                 dest="workspace_name",
                                 help="name of the workspace")
    upload_workflow.add_argument('--method',
                                 dest="method",
                                 help="name of the input prefix")
    upload_workflow.add_argument('--wdl',
                                 dest="wdl",
                                 help="name of the input prefix")

    upload_config = subparser.add_parser('upload_config',
                                         help='upload config information')
    upload_config.add_argument('--workspace-name',
                               dest="workspace_name",
                               help="name of the workspace")
    upload_config.add_argument('--chemistry',
                               dest="chemistry",
                               choices=["V2", "V3"],
                               help="chemistry")
    upload_config.add_argument(
        '--counting-mode',
        dest="counting_mode",
        choices=["sc_rna", "sn_rna"],
        help="counting mode: whether to count intronic alignments")
    upload_config.add_argument('--species',
                               dest="species",
                               choices=["human", "mouse"],
                               help="species")

    submit_workflow = subparser.add_parser('submit_workflow',
                                           help='submit a workflow run')
    submit_workflow.add_argument('--workspace-name',
                                 dest="workspace_name",
                                 help="name of the workspace")
    submit_workflow.add_argument('--workflow-repo',
                                 dest="workflow_repo",
                                 help="workflow repo name")
    submit_workflow.add_argument('--workflow-name',
                                 dest="workflow_name",
                                 help="workflow name")
    submit_workflow.add_argument('--entity-id',
                                 dest="entity_id",
                                 help="entity id")

    get_status = subparser.add_parser('get_status',
                                      help='get status of a submission')
    get_status.add_argument('--workspace-name',
                            dest="workspace_name",
                            help="name of the workspace")
    get_status.add_argument('--submission-id',
                            dest="submission_id",
                            help="submission_id")

    # show help when no arguments supplied
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)

    args = parser.parse_args()

    # new_workspace_name = "DCP2_Optimus_template_KMK_v1"
    if args.cmd == 'delete_workspace':
        print("Delete existing workspace ", args.workspace_name)
        delete_status = fapi.delete_workspace(billing_project,
                                              args.workspace_name)

    elif args.cmd == 'clone_workspace':
        print("Cloning a new workspace from template", args.src_work_space)
        status = create_newworkspace(billing_project, args.src_work_space,
                                     args.dest_work_space)

    elif args.cmd == 'get_participant_table':
        print("Get information from workspace", args.workspace_name)
        r = fapi.get_entities_tsv(billing_project, args.workspace_name,
                                  args.participant_table_name)
        with open(args.output_table_name, 'w') as fout:
            fout.write(r.content.decode())

    elif args.cmd == 'create_participant_lane':
        parse_terra.create_output_files(args.input_participant_table_name,
                                        args.output_prefix)

    elif args.cmd == 'upload_participant':
        upload_tables(args.input_prefix + ".tsv", billing_project,
                      args.workspace_name)
        upload_tables(args.input_prefix + "_membership.tsv", billing_project,
                      args.workspace_name)
        upload_tables(args.input_prefix + "_entity.tsv", billing_project,
                      args.workspace_name)
    elif args.cmd == 'upload_workflow':
        r = fapi.update_repository_method(args.workspace_name, args.method,
                                          "args.synopsis", args.wdl,
                                          "comment.txt", "args.comment")
        with open("response.txt", 'w') as fout:
            fout.write(r.content.decode())

    elif args.cmd == 'upload_config':

        work_space_config = fapi.get_workspace_config(billing_project,
                                                      args.workspace_name,
                                                      args.workspace_name,
                                                      "Optimus")
        work_space_json = work_space_config.json()
        work_space_json['inputs'] = json_templates.optimus_inputs
        work_space_json['outputs'] = json_templates.optimus_outputs

        if args.chemistry == "V2":
            work_space_json['inputs']['Optimus.chemistry'] = '\"tenX_v2\"'
            work_space_json['inputs'][
                'Optimus.whitelist'] = 'workspace.whitelist_v2'
        if args.chemistry == "V3":
            work_space_json['inputs']['Optimus.chemistry'] = '\"tenX_v3\"'
            work_space_json['inputs'][
                'Optimus.whitelist'] = 'workspace.whitelist_v3'

        if args.chemistry == "sn_rna":
            work_space_json['inputs']['Optimus.counting_mode'] = "\"sn_rna\""
        if args.chemistry == "sc_rna":
            work_space_json['inputs']['Optimus.counting_mode'] = "\"sc_rna\""

        if args.species == "human":
            work_space_json['inputs'][
                'Optimus.annotations_gtf'] = 'workspace.human_annotations_gtf'
            work_space_json['inputs'][
                'Optimus.ref_genome_fasta'] = 'workspace.human_ref_genome_fasta'
            work_space_json['inputs'][
                'Optimus.tar_star_reference'] = 'workspace.human_tar_star_reference'
        if args.species == "mouse":
            work_space_json['inputs'][
                'Optimus.annotations_gtf'] = 'workspace.mouse_annotations_gtf'
            work_space_json['inputs'][
                'Optimus.ref_genome_fasta'] = 'workspace.mouse_ref_genome_fasta'
            work_space_json['inputs'][
                'Optimus.tar_star_reference'] = 'workspace.mouse_tar_star_reference'

        updated_workflow = fapi.update_workspace_config(
            billing_project, args.workspace_name, args.workspace_name,
            "Optimus", work_space_json)

        if updated_workflow.status_code != 200:
            print("ERROR :" + updated_workflow.content)
            sys.exit(1)
        else:
            print("updated successfully")

    elif args.cmd == 'submit_workflow':
        # Launching the Updated Monitor Submission Workflow
        response = fapi.get_entities_with_type(billing_project,
                                               args.workspace_name)
        entities = response.json()

        for ent in entities:
            ent_name = ent['name']
            ent_type = ent['entityType']
            ent_attrs = ent['attributes']

        submisson_response = fapi.create_submission(
            billing_project,
            args.workspace_name,
            args.workflow_repo,
            args.workflow_name,
            entity=args.entity_id,
            etype="participant_lane_set",
            expression=None,
            use_callcache=True)

        if submisson_response.status_code != 201:
            print(submisson_response.content)
            sys.exit(1)
        else:
            print("Successfully Created Submisson")
            with open('response.txt', 'w') as fout:
                # json.dump(submisson_response.json(), fout)
                fout.write(submisson_response.json()['submissionId'] + '\n')
        # r = create_workspace_config("broadgdac", args.workspace_name, body):
        # print(r.content.decode())
    elif args.cmd == 'get_status':
        res = fapi.get_submission(billing_project, args.workspace_name,
                                  args.submission_id)
        print(res.content.decode())
Exemple #6
0
def supervise_until_complete(monitor_data, dependencies, args, recovery_file):
    """ Supervisor loop. Loop forever until all tasks are evaluated or completed """
    project = args['project']
    workspace = args['workspace']
    namespace = args['namespace']
    sample_sets = args['sample_sets']
    recovery_data = {'args': args}

    if not validate_monitor_tasks(dependencies, args):
        logging.error("Errors found, aborting...")
        return

    while True:
        # There are 4 possible states for each node:
        #   1. Not Started -- In this state, check all the dependencies for the
        #         node (possibly 0). If all of them have been evaluated, and the
        #         satisfiedMode is met, start the task, change to "Running". if
        #         satisfiedMode is not met, change to "Evaluated"
        #
        #   2. Running -- Submitted in FC. Check the submission endpoint, and
        #         if it has completed, change to "Completed", set evaluated=True,
        #         and whether the task succeeded
        #         Otherwise, do nothing
        #
        #   3. Completed -- Job ran in FC and either succeeded or failed. Do nothing
        #   4. Evaluated -- All dependencies evaluated, but this task did not run
        #         do nothing

        # Keep a tab of the number of jobs in each category
        running = 0
        waiting = 0
        completed = 0

        # Get the submissions
        r = fapi.list_submissions(project, workspace)
        sub_list = r.json()
        #TODO: filter this list by submission time first?
        sub_lookup = {s["submissionId"]: s for s in sub_list}

        # Keys of dependencies is the list of tasks to run
        for n in dependencies:
            for sset in sample_sets:
                task_data = monitor_data[n][sset]
                if task_data['state'] == "Not Started":
                    # See if all of the dependencies have been evaluated
                    upstream_evaluated = True
                    for dep in dependencies[n]:
                        # Look up the status of the task
                        upstream_task_data = monitor_data[
                            dep['upstream_task']][sset]
                        if not upstream_task_data.get('evaluated'):
                            upstream_evaluated = False

                    # if all of the dependencies have been evaluated, we can evaluate
                    # this node
                    if upstream_evaluated:
                        # Now check the satisfied Mode of all the dependencies
                        should_run = True
                        for dep in dependencies[n]:
                            upstream_task_data = monitor_data[
                                dep['upstream_task']][sset]
                            mode = dep['satisfiedMode']

                            # Task must have succeeded for OnComplete
                            if mode == '"OnComplete"' and not upstream_task_data[
                                    'succeeded']:
                                should_run = False
                            # 'Always' and 'Optional' run once the deps have been
                            # evaluated

                        if should_run:
                            # Submit the workflow to FC
                            fc_config = n
                            logging.info("Starting workflow " + fc_config +
                                         " on " + sset)

                            # How to handle errors at this step?
                            for retry in range(3):
                                r = fapi.create_submission(project,
                                                           workspace,
                                                           namespace,
                                                           fc_config,
                                                           sset,
                                                           etype="sample_set",
                                                           expression=None)
                                if r.status_code == 201:
                                    task_data['submissionId'] = r.json(
                                    )['submissionId']
                                    task_data['state'] = "Running"
                                    running += 1
                                    break
                                else:
                                    # There was an error, under certain circumstances retry
                                    logging.debug(
                                        "Create_submission for " + fc_config +
                                        "failed on " + sset +
                                        " with the following response:" +
                                        r.content + "\nRetrying...")

                            else:
                                # None of the attempts above succeeded, log an error, mark as failed
                                logging.error("Maximum retries exceeded")
                                task_data['state'] = 'Completed'
                                task_data['evaluated'] = True
                                task_data['succeeded'] = False

                        else:
                            # This task will never be able to run, mark evaluated
                            task_data['state'] = "Evaluated"
                            task_data['evaluated'] = True
                            completed += 1
                    else:
                        waiting += 1

                elif task_data['state'] == "Running":
                    submission = sub_lookup[task_data['submissionId']]
                    status = submission['status']

                    if status == "Done":
                        # Look at the individual workflows to see if there were
                        # failures
                        logging.info("Workflow " + n + " completed for " +
                                     sset)
                        success = 'Failed' not in submission[
                            'workflowStatuses']
                        task_data['evaluated'] = True
                        task_data['succeeded'] = success
                        task_data['state'] = "Completed"

                        completed += 1
                    else:
                        # Submission isn't done, don't do anything
                        running += 1

                else:
                    # Either Completed or evaluated
                    completed += 1

                # Save the state of the monitor for recovery purposes
                # Have to do this for every workflow + sample_set so we don't lose track of any
                recovery_data['monitor_data'] = monitor_data
                recovery_data['dependencies'] = dependencies

                with open(recovery_file, 'w') as rf:
                    json.dump(recovery_data, rf)

        logging.info("{0} Waiting, {1} Running, {2} Completed".format(
            waiting, running, completed))

        # If all tasks have been evaluated, we are done
        if all(monitor_data[n][sset]['evaluated'] for n in monitor_data
               for sset in monitor_data[n]):
            logging.info("DONE.")
            break
        time.sleep(30)
Exemple #7
0
def do_fc_run(method: str, workspace: str, wdl_inputs: Union[str, dict],
              out_json: str, bucket_folder: str) -> str:
    """Run a FireCloud method.

    Args:
        method: method namespace/name/version. Version is optional
        workspace: workspace namespace/name
        wdl_inputs: WDL input JSON.
        upload: Whether to upload inputs and convert local file paths to gs:// URLs.
        bucket_folder: The folder under google bucket for uploading files.

    Returns:
        URL to check submission status
   """
    inputs = kco.get_wdl_inputs(wdl_inputs)
    method_namespace, method_name, method_version = kco.fs_split(method)
    if method_version is None:
        version = -1
        list_methods = fapi.list_repository_methods(method_name)
        if list_methods.status_code != 200:
            raise ValueError('Unable to list methods ' + ' - ' +
                             str(list_methods.json))
        methods = list_methods.json()
        for method in methods:
            if method['namespace'] == method_namespace:
                version = max(version, method['snapshotId'])
        if version == -1:
            raise ValueError(method_name + ' not found')
        method_version = version

    root_entity = None
    launch_entity = None
    workspace_namespace, workspace_name, workspace_version = kco.fs_split(
        workspace)
    kco.get_or_create_workspace(workspace_namespace, workspace_name)

    if out_json is not None:
        kco.do_fc_upload(inputs, workspace, False, bucket_folder)
        with open(out_json, 'w') as fout:
            json.dump(inputs, fout)
    config_namespace = method_namespace
    config_name = method_name

    method_body = {
        'name': config_name,
        'namespace': config_namespace,
        'methodRepoMethod': {
            'methodNamespace':
            method_namespace,
            'methodName':
            method_name,
            'methodVersion':
            method_version,
            'sourceRepo':
            'agora',
            'methodUri':
            'agora://{0}/{1}/{2}'.format(method_namespace, method_name,
                                         method_version)
        },
        'rootEntityType': root_entity,
        'prerequisites': {},
        'inputs': convert_inputs(inputs),
        'outputs': {},
        'methodConfigVersion': 1,
        'deleted': False
    }

    config_exists = fapi.get_workspace_config(workspace_namespace,
                                              workspace_name, config_namespace,
                                              config_name)

    if config_exists.status_code == 200:
        config_submission = fapi.update_workspace_config(
            workspace_namespace, workspace_name, config_namespace, config_name,
            method_body)
        if config_submission.status_code != 200:
            raise ValueError('Unable to update workspace config. Response: ' +
                             str(config_submission.status_code))

    else:
        config_submission = fapi.create_workspace_config(
            workspace_namespace, workspace_name, method_body)
        if config_submission.status_code != 201:
            raise ValueError('Unable to create workspace config - ' +
                             str(config_submission.json()))

    launch_submission = fapi.create_submission(workspace_namespace,
                                               workspace_name,
                                               config_namespace, config_name,
                                               launch_entity, root_entity, "")

    if launch_submission.status_code == 201:
        submission_id = launch_submission.json()['submissionId']
        url = 'https://portal.firecloud.org/#workspaces/{}/{}/monitor/{}'.format(
            workspace_namespace, workspace_name, submission_id)

        return url
    else:
        raise ValueError('Unable to launch submission - ' +
                         str(launch_submission.json()))
Exemple #8
0
                   outputs={})

z = fapi.create_workspace_config(namespace=SEL_NAMESPACE,
                                 workspace=SEL_WORKSPACE,
                                 body=config_json)
print('CREATED CONFIG:', z, z.json())

z = fapi.get_workspace_config(workspace=SEL_WORKSPACE,
                              namespace=SEL_NAMESPACE,
                              config='test-cosi2-method-01',
                              cnamespace=SEL_NAMESPACE)

print('CONFIG_NOW_IS', z, z.json())

z = fapi.create_submission(wnamespace=SEL_NAMESPACE,
                           workspace=SEL_WORKSPACE,
                           cnamespace=SEL_NAMESPACE,
                           config='test-cosi2-method-01')
print('SUBMISSION IS', z, z.json())

sys.exit(0)


def dump_file(fname, value):
    """store string in file"""
    with open(fname, 'w') as out:
        out.write(str(value))


#z = fapi.create_submission(wnamespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
#                           cnamespace=SEL_NAMESPACE, config='dockstore-tool-cosi2')
#print('SUBMISSION IS', z, z.json())
Exemple #9
0
# Updating the inputs in the JSON
workflow_json['inputs'] = {
    "monitor_submission.submission_id": f'"{submission_id}"',
    "monitor_submission.terra_project": f'"{workspace_project}"',
    "monitor_submission.terra_workspace": f'"{workspace_name}"'
}
updated_workflow = fapi.update_workspace_config(workspace_project,
                                                workspace_name, workflow_repo,
                                                workflow_name, workflow_json)
if updated_workflow.status_code != 200:
    print(updated_workflow.content)
    raise ferrors.FireCloudServerError(updated_workflow.status_code,
                                       updated_workflow.content)

# Launching the Updated Monitor Submission Workflow
create_submisson_response = fapi.create_submission(workspace_project,
                                                   workspace_name,
                                                   workflow_repo,
                                                   workflow_name,
                                                   entity=None,
                                                   etype=None,
                                                   expression=None,
                                                   use_callcache=True)
if create_submisson_response.status_code != 201:
    print(create_submisson_response.content)
    raise ferrors.FireCloudServerError(create_submisson_response.status_code,
                                       create_submisson_response.content)
else:
    print("Successfully Created Submisson")
    print(create_submisson_response.json())