def ping(ctx, access_token): # noqa: D301 """Check connection to REANA server. The `ping` command allows to test connection to REANA server. Examples: \n \t $ reana-client ping """ try: from reana_client.api.client import ping as rs_ping from reana_client.utils import get_api_url logging.info('Connecting to {0}'.format(get_api_url())) response = rs_ping(access_token) msg_color = 'red' if response.get('error') else 'green' click.echo( click.style('REANA server: {0}\n' 'Authenticated as: {1} <{2}>\n' 'Status: {3}'.format(get_api_url(), response.get('full_name') or '', response.get('email'), response.get('status')), fg=msg_color)) logging.debug('Server response:\n{}'.format(response)) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) error_msg = ('Could not connect to the selected REANA cluster ' 'server at {0}:\n{1}'.format(get_api_url(), e)) click.echo(click.style(error_msg, fg='red'), err=True) ctx.exit(1)
def upload_file(workflow, file_, file_name, access_token): """Upload file to workflow workspace. :param workflow: name or id which identifies the workflow. :param file_: content of a file that will be uploaded. :param file_name: name of a file that will be uploaded. :param access_token: access token of the current user. """ from reana_client.utils import get_api_url try: endpoint = current_rs_api_client.api.upload_file.operation.path_name.format( workflow_id_or_name=workflow) http_response = requests.post( urljoin(get_api_url(), endpoint), data=file_, params={ "file_name": file_name, "access_token": access_token }, headers={"Content-Type": "application/octet-stream"}, verify=False, ) if http_response.ok: return http_response.json() raise Exception(http_response.json().get("message")) except requests.exceptions.ConnectionError: logging.debug("File could not be uploaded.", exc_info=True) raise Exception("Could not connect to the server {}".format( get_api_url())) except requests.exceptions.HTTPError as e: logging.debug("The server responded with an HTTP error code.", exc_info=True) raise Exception("Unexpected response from the server: \n{}".format( e.response)) except requests.exceptions.Timeout: logging.debug("Timeout while trying to establish connection.", exc_info=True) raise Exception("The request to the server has timed out.") except requests.exceptions.RequestException: logging.debug("Something went wrong while connecting to the server.", exc_info=True) raise Exception( "The request to the server has failed for an unknown reason.") except Exception as e: raise e
def wrapper(*args, **kwargs): from reana_client.utils import get_api_url api_url = get_api_url() if not api_url: click.secho( 'REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) return func(*args, **kwargs)
def upload_file(workflow_id, file_, file_name, access_token): """Upload file to workflow workspace. :param workflow_id: UID which identifies the workflow. :param file_: content of a file that will be uploaded. :param file_name: name of a file that will be uploaded. :param access_token: access token of the current user. """ from reana_client.utils import get_api_url try: endpoint = \ current_rs_api_client.api.upload_file.operation.path_name.format( workflow_id_or_name=workflow_id) http_response = requests.post( urljoin(get_api_url(), endpoint), data=file_, params={ 'file_name': file_name, 'access_token': access_token }, headers={'Content-Type': 'application/octet-stream'}, verify=False) return http_response.json() except requests.exceptions.ConnectionError: logging.debug('File could not be uploaded.', exc_info=True) raise Exception('Could not connect to the server {}'.format( get_api_url())) except requests.exceptions.HTTPError as e: logging.debug('The server responded with an HTTP error code.', exc_info=True) raise Exception('Unexpected response from the server: \n{}'.format( e.response)) except requests.exceptions.Timeout: logging.debug('Timeout while trying to establish connection.', exc_info=True) raise Exception('The request to the server has timed out.') except requests.exceptions.RequestException: logging.debug('Something went wrong while connecting to the server.', exc_info=True) raise Exception('The request to the server has failed for an ' 'unknown reason.') except Exception as e: raise e
def wrapper(*args, **kwargs): from reana_client.utils import get_api_url api_url = get_api_url() if not api_url: display_message( "REANA client is not connected to any REANA cluster.", msg_type="error", ) sys.exit(1) return func(*args, **kwargs)
def ping(ctx, access_token): # noqa: D301 """Check connection to REANA server. The ``ping`` command allows to test connection to REANA server. Examples: \n \t $ reana-client ping """ try: from reana_client.api.client import ping as rs_ping from reana_client.utils import get_api_url logging.info("Connecting to {0}".format(get_api_url())) response = rs_ping(access_token) msg_color = "red" if response.get("error") else "green" click.secho( "REANA server: {0}\n" "REANA server version: {1}\n" "REANA client version: {2}\n" "Authenticated as: {3} <{4}>\n" "Status: {5}".format( get_api_url(), response.get("reana_server_version", ""), __version__, response.get("full_name", ""), response.get("email"), response.get("status"), ), fg=msg_color, ) logging.debug("Server response:\n{}".format(response)) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Could not connect to the selected REANA cluster " "server at {0}:\n{1}".format(get_api_url(), e), msg_type="error", ) ctx.exit(1)
def workflow_create(ctx, file, name, skip_validation, access_token): # noqa: D301 """Create a new workflow. The `create` command allows to create a new workflow from reana.yaml specifications file. The file is expected to be located in the current working directory, or supplied via command-line -f option, see examples below. Examples: \n \t $ reana-client create\n \t $ reana-client create -w myanalysis\n \t $ reana-client create -w myanalysis -f myreana.yaml\n """ from reana_client.api.client import create_workflow from reana_client.utils import get_api_url logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) # Check that name is not an UUIDv4. # Otherwise it would mess up `--workflow` flag usage because no distinction # could be made between the name and actual UUID of workflow. if is_uuid_v4(name): click.echo(click.style("Workflow name cannot be a valid UUIDv4", fg="red"), err=True) try: reana_specification = load_reana_spec(click.format_filename(file), skip_validation) logging.info("Connecting to {0}".format(get_api_url())) response = create_workflow(reana_specification, name, access_token) click.echo(click.style(response["workflow_name"], fg="green")) # check if command is called from wrapper command if "invoked_by_subcommand" in ctx.parent.__dict__: ctx.parent.workflow_name = response["workflow_name"] except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style("Cannot create workflow {}: \n{}".format(name, str(e)), fg="red"), err=True, ) if "invoked_by_subcommand" in ctx.parent.__dict__: sys.exit(1)
def download_file(workflow, file_name, access_token): """Download the requested file if it exists. :param workflow: name or id which identifies the workflow. :param file_name: file name or path to the file requested. :param access_token: access token of the current user. :return: Tuple containing file binary content and filename. """ try: from reana_client.utils import get_api_url logging.getLogger("urllib3").setLevel(logging.CRITICAL) endpoint = current_rs_api_client.api.download_file.operation.path_name.format( workflow_id_or_name=workflow, file_name=file_name) http_response = requests.get( urljoin(get_api_url(), endpoint), params={ "file_name": file_name, "access_token": access_token }, verify=False, ) if "Content-Disposition" in http_response.headers: content_disposition = http_response.headers.get( "Content-Disposition") value, params = cgi.parse_header(content_disposition) file_name = params.get("filename", "downloaded_file") if http_response.status_code == 200: return http_response.content, file_name else: raise Exception("Error {status_code} {reason} {message}".format( status_code=http_response.status_code, reason=http_response.reason, message=http_response.json().get("message"), )) except HTTPError as e: logging.debug("Output file could not be downloaded: " "\nStatus: {}\nReason: {}\n" "Message: {}".format(e.response.status_code, e.response.reason, e.response.json()["message"])) raise Exception(e.response.json()["message"]) except Exception as e: raise e
def workflow_delete(ctx, workflow, all_runs, workspace, access_token): # noqa: D301 """Delete a workflow. The `delete` command allows to remove workflow runs from the database and the workspace. By default, the command removes the workflow and all its cached information and hides the workflow from the workflow list. Note that workflow workspace will still be accessible until you use `--include-workspace` flag. Note also that you can remove all past runs of a workflow by specifying `--include-all-runs` flag. Example: \n \t $ reana-client delete -w myanalysis.42 \n \t $ reana-client delete -w myanalysis.42 --include-all-runs \n \t $ reana-client delete -w myanalysis.42 --include-workspace """ from reana_client.api.client import delete_workflow, get_workflow_status from reana_client.utils import get_api_url logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) if workflow: try: logging.info("Connecting to {0}".format(get_api_url())) response = delete_workflow(workflow, all_runs, workspace, access_token) if all_runs: message = "All workflows named '{}' have been deleted.".format( workflow.split(".")[0]) else: message = get_workflow_status_change_msg(workflow, "deleted") click.secho(message, fg="green") except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style("Cannot delete workflow {} \n{}".format( workflow, str(e)), fg="red"), err=True, )
def download_file(workflow_id, file_name, access_token): """Download the requested file if it exists. :param workflow_id: UUID which identifies the workflow. :param file_name: File name or path to the file requested. :returns: . """ try: from reana_client.utils import get_api_url logging.getLogger("urllib3").setLevel(logging.CRITICAL) endpoint = current_rs_api_client.api.download_file.operation.path_name.format( workflow_id_or_name=workflow_id, file_name=file_name) http_response = requests.get( urljoin(get_api_url(), endpoint), params={ "file_name": file_name, "access_token": access_token }, verify=False, ) if http_response.status_code == 200: return http_response.content else: raise Exception("Error {status_code} {reason} {message}".format( status_code=http_response.status_code, reason=http_response.reason, message=http_response.json().get("message"), )) except HTTPError as e: logging.debug("Output file could not be downloaded: " "\nStatus: {}\nReason: {}\n" "Message: {}".format(e.response.status_code, e.response.reason, e.response.json()["message"])) raise Exception(e.response.json()["message"]) except Exception as e: raise e
def cwl_runner(ctx, quiet, outdir, basedir, processfile, jobfile, access_token): """Run CWL files in a standard format <workflow.cwl> <job.json>.""" import json from reana_client.utils import get_api_url from reana_client.api.client import ( create_workflow, get_workflow_logs, start_workflow, upload_file, ) logging.basicConfig( format="[%(levelname)s] %(message)s", stream=sys.stderr, level=logging.INFO if quiet else logging.DEBUG, ) try: basedir = basedir or os.path.abspath(os.path.dirname(processfile)) reana_spec = {"workflow": {"type": "cwl"}} job = {} if jobfile: with open(jobfile) as f: job = yaml.load(f, Loader=yaml.FullLoader) if processfile: reana_spec["inputs"] = {"parameters": job} reana_spec["workflow"]["specification"] = load_workflow_spec( reana_spec["workflow"]["type"], processfile) reana_spec["workflow"]["specification"] = replace_location_in_cwl_spec( reana_spec["workflow"]["specification"]) logging.info("Connecting to {0}".format(get_api_url())) reana_specification = json.loads(json.dumps(reana_spec, sort_keys=True)) response = create_workflow(reana_specification, "cwl-test", access_token) logging.error(response) workflow_name = response["workflow_name"] workflow_id = response["workflow_id"] logging.info("Workflow {0}/{1} has been created.".format( workflow_name, workflow_id)) file_dependencies_list = [] for cwlobj in [processfile, jobfile]: if not cwlobj: continue file_dependencies_obj = get_file_dependencies_obj(cwlobj, basedir) file_dependencies_list.append(file_dependencies_obj) files_to_upload = findfiles(file_dependencies_list) upload_files(files_to_upload, basedir, workflow_id, access_token) response = start_workflow(workflow_id, access_token, reana_spec["inputs"]["parameters"]) logging.error(response) first_logs = "" while True: sleep(1) logging.error("Polling workflow logs") response = get_workflow_logs(workflow_id, access_token) logs = response["logs"] if logs != first_logs: logging.error(logs[len(first_logs):]) first_logs = logs if ("Final process status" in logs or "Traceback (most recent call last)" in logs): # click.echo(response['status']) break try: import ast out = (re.search(r"FinalOutput[\s\S]*?FinalOutput", logs).group().replace("FinalOutput", "")) json_output = out.encode("utf8").decode("unicode_escape") except AttributeError: logging.error("Workflow execution failed") sys.exit(1) except Exception: logging.error(traceback.format_exc()) sys.exit(1) sys.stdout.write(json_output) sys.stdout.write("\n") sys.stdout.flush() except HTTPServerError as e: logging.error(traceback.print_exc()) logging.error(e) except Exception: logging.error(traceback.print_exc())
def cwl_runner(ctx, quiet, outdir, basedir, processfile, jobfile, access_token): """Run CWL files in a standard format <workflow.cwl> <job.json>.""" from reana_client.utils import get_api_url from reana_client.api.client import (create_workflow, get_workflow_logs, start_workflow, upload_file) logging.basicConfig(format='[%(levelname)s] %(message)s', stream=sys.stderr, level=logging.INFO if quiet else logging.DEBUG) try: basedir = basedir or os.path.abspath(os.path.dirname(processfile)) if processfile: with open(jobfile) as f: reana_spec = { "workflow": { "type": "cwl" }, "inputs": { "parameters": { "input": yaml.load(f, Loader=yaml.FullLoader) } } } reana_spec['workflow']['spec'] = load_workflow_spec( reana_spec['workflow']['type'], processfile, ) else: with open(jobfile) as f: job = yaml.load(f, Loader=yaml.FullLoader) reana_spec = { "workflow": { "type": "cwl" }, "parameters": { "input": "" } } reana_spec['workflow']['spec'] = load_workflow_spec( reana_spec['workflow']['type'], job['cwl:tool']) del job['cwl:tool'] reana_spec['inputs']['parameters'] = {'input': job} reana_spec['workflow']['spec'] = replace_location_in_cwl_spec( reana_spec['workflow']['spec']) logging.info('Connecting to {0}'.format(get_api_url())) response = create_workflow(reana_spec, 'cwl-test', access_token) logging.error(response) workflow_name = response['workflow_name'] workflow_id = response['workflow_id'] logging.info('Workflow {0}/{1} has been created.'.format( workflow_name, workflow_id)) file_dependencies_list = [] for cwlobj in [processfile, jobfile]: file_dependencies_list.append( get_file_dependencies_obj(cwlobj, basedir)) files_to_upload = findfiles(file_dependencies_list) for cwl_file_object in files_to_upload: file_path = cwl_file_object.get('location') abs_file_path = os.path.join(basedir, file_path) with open(abs_file_path, 'r') as f: upload_file(workflow_id, f, file_path, access_token) logging.error('File {} uploaded.'.format(file_path)) response = start_workflow(workflow_id, access_token, reana_spec['inputs']['parameters']) logging.error(response) first_logs = "" while True: sleep(1) logging.error('Polling workflow logs') response = get_workflow_logs(workflow_id, access_token) logs = response['logs'] if logs != first_logs: logging.error(logs[len(first_logs):]) first_logs = logs if "Final process status" in logs or \ "Traceback (most recent call last)" in logs: # click.echo(response['status']) break try: out = re.search("success{[\S\s]*", logs).group().replace("success", "") import ast import json json_output = json.dumps(ast.literal_eval(str(out))) except AttributeError: logging.error("Workflow execution failed") sys.exit(1) except Exception as e: logging.error(traceback.format_exc()) sys.exit(1) sys.stdout.write(json_output) sys.stdout.write("\n") sys.stdout.flush() except HTTPServerError as e: logging.error(traceback.print_exc()) logging.error(e) except Exception as e: logging.error(traceback.print_exc())
def workflow_restart( ctx, workflow, access_token, parameters, options, file ): # noqa: D301 """Restart previously run workflow. The ``restart`` command allows to restart a previous workflow on the same workspace. Note that workflow restarting can be used in a combination with operational options ``FROM`` and ``TARGET``. You can also pass a modified workflow specification with ``-f`` or ``--file`` flag. You can furthermore use modified input prameters using ``-p`` or ``--parameters`` flag and by setting additional operational options using ``-o`` or ``--options``. The input parameters and operational options can be repetitive. Examples: \n \t $ reana-client restart -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n \t $ reana-client restart -w myanalysis.42 -p myparam=myvalue\n \t $ reana-client restart -w myanalysis.42 -o TARGET=gendata\n \t $ reana-client restart -w myanalysis.42 -o FROM=fitdata """ from reana_client.utils import get_api_url from reana_client.api.client import ( get_workflow_parameters, get_workflow_status, start_workflow, ) logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) parsed_parameters = { "input_parameters": parameters, "operational_options": options, "restart": True, } if file: parsed_parameters["reana_specification"] = load_reana_spec( click.format_filename(file) ) if workflow: if parameters or options: try: if "reana_specification" in parsed_parameters: workflow_type = parsed_parameters["reana_specification"][ "workflow" ]["type"] original_parameters = ( parsed_parameters["reana_specification"] .get("inputs", {}) .get("parameters", {}) ) else: response = get_workflow_parameters(workflow, access_token) workflow_type = response["type"] original_parameters = response["parameters"] parsed_parameters["operational_options"] = validate_operational_options( workflow_type, parsed_parameters["operational_options"] ) parsed_parameters["input_parameters"] = validate_input_parameters( parsed_parameters["input_parameters"], original_parameters ) except REANAValidationError as e: display_message(e.message, msg_type="error") sys.exit(1) except Exception as e: display_message( "Could not apply given input parameters: " "{0} \n{1}".format(parameters, str(e)), msg_type="error", ) try: logging.info("Connecting to {0}".format(get_api_url())) response = start_workflow(workflow, access_token, parsed_parameters) workflow = response["workflow_name"] + "." + str(response["run_number"]) current_status = get_workflow_status(workflow, access_token).get("status") display_message( get_workflow_status_change_msg(workflow, current_status), msg_type="success", ) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Cannot start workflow {}: \n{}".format(workflow, str(e)), msg_type="error", ) if "invoked_by_subcommand" in ctx.parent.__dict__: sys.exit(1)
def workflow_start( ctx, workflow, access_token, parameters, options, follow ): # noqa: D301 """Start previously created workflow. The ``start`` command allows to start previously created workflow. The workflow execution can be further influenced by passing input prameters using ``-p`` or ``--parameters`` flag and by setting additional operational options using ``-o`` or ``--options``. The input parameters and operational options can be repetitive. For example, to disable caching for the Serial workflow engine, you can set ``-o CACHE=off``. Examples: \n \t $ reana-client start -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n \t $ reana-client start -w myanalysis.42 -p myparam1=myvalue1 -o CACHE=off """ from reana_client.utils import get_api_url from reana_client.api.client import ( get_workflow_parameters, get_workflow_status, start_workflow, ) logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) parsed_parameters = {"input_parameters": parameters, "operational_options": options} if workflow: if parameters or options: try: response = get_workflow_parameters(workflow, access_token) workflow_type = response["type"] original_parameters = response["parameters"] validate_operational_options( workflow_type, parsed_parameters["operational_options"] ) parsed_parameters["input_parameters"] = validate_input_parameters( parsed_parameters["input_parameters"], original_parameters ) except REANAValidationError as e: display_message(e.message, msg_type="error") sys.exit(1) except Exception as e: display_message( "Could not apply given input parameters: " "{0} \n{1}".format(parameters, str(e)), msg_type="error", ) try: logging.info("Connecting to {0}".format(get_api_url())) response = start_workflow(workflow, access_token, parsed_parameters) current_status = get_workflow_status(workflow, access_token).get("status") display_message( get_workflow_status_change_msg(workflow, current_status), msg_type="success", ) if follow: while "running" in current_status: time.sleep(TIMECHECK) current_status = get_workflow_status(workflow, access_token).get( "status" ) display_message( get_workflow_status_change_msg(workflow, current_status), msg_type="success", ) if "finished" in current_status: if follow: display_message( "Listing workflow output files...", msg_type="info", ) ctx.invoke( get_files, workflow=workflow, access_token=access_token, output_format="url", ) sys.exit(0) elif "failed" in current_status or "stopped" in current_status: sys.exit(1) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Cannot start workflow {}: \n{}".format(workflow, str(e)), msg_type="error", ) if "invoked_by_subcommand" in ctx.parent.__dict__: sys.exit(1)
def workflow_start(ctx, workflow, access_token, parameters, options, follow): # noqa: D301 """Start previously created workflow. The `start` command allows to start previously created workflow. The workflow execution can be further influenced by passing input prameters using `-p` or `--parameters` flag and by setting additional operational options using `-o` or `--options`. The input parameters and operational options can be repetitive. For example, to disable caching for the Serial workflow engine, you can set `-o CACHE=off`. Examples: \n \t $ reana-client start -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n \t $ reana-client start -w myanalysis.42 -p myparam1=myvalue1 -o CACHE=off """ from reana_client.utils import get_api_url from reana_client.api.client import (get_workflow_parameters, get_workflow_status, start_workflow) logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) parsed_parameters = { 'input_parameters': parameters, 'operational_options': options } if workflow: if parameters or options: try: response = get_workflow_parameters(workflow, access_token) workflow_type = response['type'] original_parameters = response['parameters'] validate_operational_options( workflow_type, parsed_parameters['operational_options']) parsed_parameters['input_parameters'] = \ validate_input_parameters( parsed_parameters['input_parameters'], original_parameters) except REANAValidationError as e: click.secho(e.message, err=True, fg='red') sys.exit(1) except Exception as e: click.secho('Could not apply given input parameters: ' '{0} \n{1}'.format(parameters, str(e)), err=True) try: logging.info('Connecting to {0}'.format(get_api_url())) response = start_workflow(workflow, access_token, parsed_parameters) current_status = get_workflow_status(workflow, access_token).get('status') click.secho(get_workflow_status_change_msg(workflow, current_status), fg='green') if follow: while 'running' in current_status: time.sleep(TIMECHECK) current_status = get_workflow_status( workflow, access_token).get('status') click.secho(get_workflow_status_change_msg( workflow, current_status), fg='green') if 'finished' in current_status: if follow: click.secho( '[INFO] Listing workflow output ' 'files...', bold=True) ctx.invoke(get_files, workflow=workflow, access_token=access_token, output_format='url') sys.exit(0) elif 'failed' in current_status or \ 'stopped' in current_status: sys.exit(1) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style('Cannot start workflow {}: \n{}'.format( workflow, str(e)), fg='red'), err=True) if 'invoked_by_subcommand' in ctx.parent.__dict__: sys.exit(1)
def __init__(self): """Initialize config variables.""" self.reana_server_url = get_api_url()
def workflow_restart(ctx, workflow, access_token, parameters, options, file): # noqa: D301 """Restart previously run workflow. The `restart` command allows to restart a previous workflow on the same workspace. Note that workflow restarting can be used in a combination with operational options ``FROM`` and ``TARGET``. You can also pass a modified workflow specification with ``-f`` or `--file`` flag. You can furthermore use modified input prameters using `-p` or `--parameters` flag and by setting additional operational options using `-o` or `--options`. The input parameters and operational options can be repetitive. Examples: \n \t $ reana-client restart -w myanalysis.42 -p sleeptime=10 -p myparam=4 \n \t $ reana-client restart -w myanalysis.42 -p myparam=myvalue\n \t $ reana-client restart -w myanalysis.42 -o TARGET=gendata\n \t $ reana-client restart -w myanalysis.42 -o FROM=fitdata """ from reana_client.utils import get_api_url from reana_client.api.client import (get_workflow_parameters, get_workflow_status, start_workflow) logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) parsed_parameters = { 'input_parameters': parameters, 'operational_options': options, 'restart': True } if file: parsed_parameters['reana_specification'] = \ load_reana_spec(click.format_filename(file)) if workflow: if parameters or options: try: if 'reana_specification' in parsed_parameters: workflow_type = \ parsed_parameters['reana_specification']['workflow'][ 'type'] original_parameters = \ parsed_parameters['reana_specification'].get( 'inputs', {}).get('parameters', {}) else: response = get_workflow_parameters(workflow, access_token) workflow_type = response['type'] original_parameters = response['parameters'] parsed_parameters['operational_options'] = \ validate_operational_options( workflow_type, parsed_parameters['operational_options']) parsed_parameters['input_parameters'] = \ validate_input_parameters( parsed_parameters['input_parameters'], original_parameters) except REANAValidationError as e: click.secho(e.message, err=True, fg='red') sys.exit(1) except Exception as e: click.secho('Could not apply given input parameters: ' '{0} \n{1}'.format(parameters, str(e)), err=True) try: logging.info('Connecting to {0}'.format(get_api_url())) response = start_workflow(workflow, access_token, parsed_parameters) workflow = response['workflow_name'] + '.' + \ str(response['run_number']) current_status = get_workflow_status(workflow, access_token).get('status') click.secho(get_workflow_status_change_msg(workflow, current_status), fg='green') except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style('Cannot start workflow {}: \n{}'.format( workflow, str(e)), fg='red'), err=True) if 'invoked_by_subcommand' in ctx.parent.__dict__: sys.exit(1)