def cli(ctx, workflow_dict): """Imports a new workflow given a dictionary representing a previously exported workflow. Output: """ return ctx.gi.workflows.import_workflow_dict(json_loads(workflow_dict))
def cli(ctx, job_info): """Return jobs for the current user based payload content. Output: """ return ctx.gi.jobs.search_jobs(json_loads(job_info))
def cli(ctx, history_id, tool_id, tool_inputs): """Runs tool specified by ``tool_id`` in history indicated by ``history_id`` with inputs from ``dict`` ``tool_inputs``. Output: """ return ctx.gi.tools.run_tool(history_id, tool_id, json_loads(tool_inputs))
def cli(ctx, job_info): """Return jobs for the current user based payload content. Output: list of dictionaries containing summary job information of the jobs that match the requested job run This method is designed to scan the list of previously run jobs and find records of jobs that had the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply recycle the old results. """ return ctx.gi.jobs.search_jobs(json_loads(job_info))
def cli(ctx, workflow_dict, publish=False): """Imports a new workflow given a dictionary representing a previously exported workflow. Output: Information about the imported workflow. For example:: {u'name': 'Training: 16S rRNA sequencing with mothur: main tutorial', u'tags': [], u'deleted': false, u'latest_workflow_uuid': '368c6165-ccbe-4945-8a3c-d27982206d66', u'url': '/api/workflows/94bac0a90086bdcf', u'number_of_steps': 44, u'published': false, u'owner': 'jane-doe', u'model_class': 'StoredWorkflow', u'id': '94bac0a90086bdcf'} """ return ctx.gi.workflows.import_workflow_dict(json_loads(workflow_dict), publish=publish)
def cli(ctx, history_id, tool_id, tool_inputs): """Runs tool specified by ``tool_id`` in history indicated by ``history_id`` with inputs from ``dict`` ``tool_inputs``. Output: Information about outputs and job For example:: { "outputs": [ { "misc_blurb": "queued", "peek": null, "update_time": "2019-05-08T12:26:16.069798", "data_type": "galaxy.datatypes.tabular.Tabular", "tags": [], "deleted": false, "history_id": "df8fe5ddadbf3ab1", "metadata_column_names": null, "metadata_delimiter": " ", "visible": true, "genome_build": "?", "create_time": "2019-05-08T12:26:15.997739", "hid": 42, "file_size": 0, "metadata_data_lines": null, "file_ext": "tabular", "id": "aeb65580396167f3", "misc_info": null, "hda_ldda": "hda", "history_content_type": "dataset", "name": "Cut on data 1", "metadata_columns": null, "uuid": "d91d10af-7546-45be-baa9-902010661466", "state": "new", "metadata_comment_lines": null, "model_class": "HistoryDatasetAssociation", "metadata_dbkey": "?", "output_name": "out_file1", "purged": false, "metadata_column_types": null } ], "implicit_collections": [], "jobs": [ { "tool_id": "cut1", "update_time": "2019-05-08T12:26:16.067389", "exit_code": null, "state": "new", "create_time": "2019-05-08T12:26:16.067372", "model_class": "Job", "id": "7dd125b61b35d782" } ], "output_collections": [] } The ``tool_inputs`` dict should contain input datasets and parameters in the (largely undocumented) format used by the Galaxy API. Some examples can be found in `Galaxy's API test suite <https://github.com/galaxyproject/galaxy/blob/dev/test/api/test_tools.py>`_. """ return ctx.gi.tools.run_tool(history_id, tool_id, json_loads(tool_inputs))