def list_users(ctx, id, email, user_access_token, admin_access_token, output_format): """List users according to the search criteria.""" try: response = _get_users(id, email, user_access_token, admin_access_token) headers = ["id", "email", "access_token", "access_token_status"] data = [] for user in response: data.append(( str(user.id_), user.email, str(user.access_token), str(user.access_token_status), )) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style("User could not be retrieved: \n{}".format(str(e)), fg="red"), err=True, )
def workflow_disk_usage(ctx, workflow, access_token, summarize): """Get disk usage of a workflow.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if workflow: try: parameters = {'summarize': summarize} response = get_workflow_disk_usage(workflow, parameters, access_token) headers = ['size', 'name'] data = [] for disk_usage_info in response['disk_usage_info']: data.append([ disk_usage_info['size'], '.{}'.format(disk_usage_info['name']) ]) click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Disk usage could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def workflow_disk_usage(ctx, workflow, access_token, summarize, block_size): # noqa: D301 """Get workspace disk usage. The `du` command allows to chech the disk usage of given workspace. Examples: \n \t $ reana-client du -w myanalysis.42 -s \n \t $ reana-client du -w myanalysis.42 --bytes """ from reana_client.api.client import get_workflow_disk_usage logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if workflow: try: parameters = {'summarize': summarize, 'block_size': block_size} response = get_workflow_disk_usage(workflow, parameters, access_token) headers = ['size', 'name'] data = [] for disk_usage_info in response['disk_usage_info']: if not disk_usage_info['name'].startswith(FILES_BLACKLIST): data.append([disk_usage_info['size'], '.{}'.format(disk_usage_info['name'])]) click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style('Disk usage could not be retrieved: \n{}' .format(str(e)), fg='red'), err=True)
def secrets_list(access_token): # noqa: D301 """List user secrets. Examples: \n \t $ reana-client secrets-list """ try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho('REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) try: secrets = list_secrets(access_token) headers = ['name', 'type'] data = [] for secret_ in secrets: data.append(list(map(str, [secret_['name'], secret_['type']]))) click_table_printer(headers, headers, data) except Exception as e: logging.debug(str(e), exc_info=True) click.echo(click.style('Something went wrong while listing secrets', fg='red'), err=True)
def get_users(ctx, id, email, user_access_token, admin_access_token, output_format): """Return user information. Requires the token of an administrator.""" try: response = _get_users(id, email, user_access_token, admin_access_token) headers = ['id', 'email', 'access_token'] data = [] for user in response: data.append((str(user.id_), user.email, user.access_token)) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style('User could not be retrieved: \n{}' .format(str(e)), fg='red'), err=True)
def test_click_table_printer_filter_wrong_header(capsys): """Test click_table_printer with filter when header is non existing.""" headers = ['header_one', 'header_two'] sample_data = [['very_very_long_row_one', 'second_column'], ['very_very_long_row_two', 'second_column']] click_table_printer(headers, ['badheader'], sample_data) out, err = capsys.readouterr() assert out == '\n\n\n'
def test_click_table_printer_filter(capsys): """Test click_table_printer with filter.""" headers = ['header_one', 'header_two'] sample_data = [['very_very_long_row_one', 'second_column'], ['very_very_long_row_two', 'second_column']] click_table_printer(headers, [headers[1]], sample_data) out, err = capsys.readouterr() assert out == 'HEADER_TWO \nsecond_column\nsecond_column\n'
def test_click_table_printer(capsys): """Test click_table_printer.""" headers = ['header_one'] sample_data = [['very_very_long_row_one'], ['very_very_long_row_two']] click_table_printer(headers, [], sample_data) out, err = capsys.readouterr() assert out == 'HEADER_ONE \nvery_very_long_row_one' + \ '\nvery_very_long_row_two\n'
def test_click_table_printer(capsys): """Test click_table_printer.""" headers = ["header_one"] sample_data = [["very_very_long_row_one"], ["very_very_long_row_two"]] click_table_printer(headers, [], sample_data) out, err = capsys.readouterr() assert (out == "HEADER_ONE \nvery_very_long_row_one" + "\nvery_very_long_row_two\n")
def cli_verify_backend(ctx): """Verify configuration of running cluster backend.""" logging.debug(ctx.obj.backend.cluster_spec) backend_compatibility = ctx.obj.backend.verify_backend() data = [] headers = ['kubernetes version', 'is compatible'] data.append(list(map(str, [backend_compatibility['current_version'], backend_compatibility['is_compatible']]))) click_table_printer(headers, [], data)
def test_click_table_printer_filter_wrong_header(capsys): """Test click_table_printer with filter when header is non existing.""" headers = ["header_one", "header_two"] sample_data = [ ["very_very_long_row_one", "second_column"], ["very_very_long_row_two", "second_column"], ] click_table_printer(headers, ["badheader"], sample_data) out, err = capsys.readouterr() assert out == "\n\n\n"
def test_click_table_printer_filter(capsys): """Test click_table_printer with filter.""" headers = ["header_one", "header_two"] sample_data = [ ["very_very_long_row_one", "second_column"], ["very_very_long_row_two", "second_column"], ] click_table_printer(headers, [headers[1]], sample_data) out, err = capsys.readouterr() assert out == "HEADER_TWO \nsecond_column\nsecond_column\n"
def cli_verify_components(ctx, namespace): """Verify configuration of components deployed in a running cluster.""" logging.debug(ctx.obj.backend.cluster_spec) matching_components = ctx.obj.backend.verify_components(namespace) data = [] headers = ['component', 'image'] for component_name in matching_components: image_matches = 'match' if matching_components[component_name] \ else 'mismatch' data.append(list(map(str, [component_name, image_matches]))) click_table_printer(headers, [], data)
def create_user(ctx, email, user_access_token, admin_access_token): """Create a new user. Requires the token of an administrator.""" try: response = _create_user(email, user_access_token, admin_access_token) headers = ['id', 'email', 'access_token'] data = [(str(response.id_), response.email, response.access_token)] click.echo(click.style('User was successfully created.', fg='green')) click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style('User could not be created: \n{}'.format( str(e)), fg='red'), err=True)
def inputs_list(ctx, user, organization, workflow, _filter, output_format): """List input files of a workflow.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if workflow: try: response = ctx.obj.client.get_analysis_inputs( user, organization, workflow) headers = ['name', 'size', 'last-modified'] data = [] for file_ in response: data.append( list( map(str, [ file_['name'], file_['size'], file_['last-modified'] ]))) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data = tablib_data.subset(rows=None, cols=list(_filter)) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Something went wrong while retrieving input file list' ' for workflow {0}:\n{1}'.format(workflow, str(e)), fg='red'), err=True) else: click.echo(click.style( 'Workflow name must be provided either with ' '`--workflow` option or with REANA_WORKON ' 'environment variable', fg='red'), err=True)
def status(ctx, namespace, component): """Display the status of cluster components and if the cluster is ready.""" components_status = ctx.obj.backend\ .get_components_status(namespace, component) # detect if all components are in running state: all_running = True data = [] headers = ['component', 'status'] for component_name in components_status: data.append( list(map(str, [component_name, components_status[component_name]]))) if components_status[component_name] != 'Running': all_running = False # detect if all necessary components are present: all_present = True if component: if component not in components_status: all_present = False else: for component_name in reana_cluster_ready_necessary_components: if component_name not in components_status: all_present = False # print component status table: click_table_printer(headers, [], data) # produce final report: if all_running and all_present: if component: click.echo( click.style('REANA component {0} is ready.'.format(component), fg='green')) else: click.echo(click.style('REANA cluster is ready.', fg='green')) else: if component: click.echo( click.style( 'REANA component {0} is not ready.'.format(component), fg='yellow')) else: click.echo(click.style('REANA cluster is not ready.', fg='yellow')) sys.exit(1)
def workflow_disk_usage(ctx, workflow, access_token, summarize, filters, human_readable_or_raw): # noqa: D301 """Get workspace disk usage. The ``du`` command allows to chech the disk usage of given workspace. Examples: \n \t $ reana-client du -w myanalysis.42 -s \n \t $ reana-client du -w myanalysis.42 -s --human-readable \n \t $ reana-client du -w myanalysis.42 --filter name=data/ """ from reana_client.api.client import get_workflow_disk_usage logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) search_filter = None headers = ["size", "name"] if filters: _, search_filter = parse_filter_parameters(filters, headers) if workflow: try: parameters = {"summarize": summarize, "search": search_filter} response = get_workflow_disk_usage(workflow, parameters, access_token) if not response["disk_usage_info"]: display_message("No files matching filter criteria.", msg_type="error") sys.exit(1) data = [] for disk_usage_info in response["disk_usage_info"]: if not disk_usage_info["name"].startswith(FILES_BLACKLIST): data.append([ disk_usage_info["size"][human_readable_or_raw], ".{}".format(disk_usage_info["name"]), ]) click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Disk usage could not be retrieved: \n{}".format(e), msg_type="error", )
def workflow_disk_usage( ctx, workflow, access_token, summarize, human_readable_or_raw ): # noqa: D301 """Get workspace disk usage. The `du` command allows to chech the disk usage of given workspace. Examples: \n \t $ reana-client du -w myanalysis.42 -s \n \t $ reana-client du -w myanalysis.42 -s --human-readable """ from reana_client.api.client import get_workflow_disk_usage logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) if workflow: try: parameters = {"summarize": summarize} response = get_workflow_disk_usage(workflow, parameters, access_token) headers = ["size", "name"] data = [] for disk_usage_info in response["disk_usage_info"]: if not disk_usage_info["name"].startswith(FILES_BLACKLIST): data.append( [ disk_usage_info["size"][human_readable_or_raw], ".{}".format(disk_usage_info["name"]), ] ) click_table_printer(headers, [], data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style( "Disk usage could not be retrieved: \n{}".format(str(e)), fg="red" ), err=True, )
def workflow_list(ctx, user, organization, _filter, output_format): """List all workflows user has.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) try: response = ctx.obj.client.get_all_analyses(user, organization) headers = [ 'name', 'run_number', 'id', 'user', 'organization', 'status' ] data = [] for analysis in response: name, run_number = get_workflow_name_and_run_number( analysis['name']) data.append( list( map(str, [ name, run_number, analysis['id'], analysis['user'], analysis['organization'], analysis['status'] ]))) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data = tablib_data.subset(rows=None, cols=list(_filter)) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow list culd not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def secrets_list(access_token): # noqa: D301 """List user secrets. Examples: \n \t $ reana-client secrets-list """ from reana_client.api.client import list_secrets try: secrets = list_secrets(access_token) headers = ['name', 'type'] data = [] for secret_ in secrets: data.append(list(map(str, [secret_['name'], secret_['type']]))) click_table_printer(headers, headers, data) except Exception as e: logging.debug(str(e), exc_info=True) click.echo(click.style('Something went wrong while listing secrets', fg='red'), err=True)
def secrets_list(access_token): # noqa: D301 """List user secrets. Examples: \n \t $ reana-client secrets-list """ from reana_client.api.client import list_secrets try: secrets = list_secrets(access_token) headers = ["name", "type"] data = [] for secret_ in secrets: data.append(list(map(str, [secret_["name"], secret_["type"]]))) click_table_printer(headers, headers, data) except Exception as e: logging.debug(str(e), exc_info=True) display_message( "Something went wrong while listing secrets", msg_type="error", )
def workflow_workflows(ctx, sessions, _filter, output_format, access_token, show_all, verbose): """List all workflows user has.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) type = 'interactive' if sessions else 'batch' try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho('REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if _filter: parsed_filters = parse_parameters(_filter) try: response = get_workflows(access_token, type, bool(verbose)) verbose_headers = ['id', 'user', 'size'] headers = { 'batch': ['name', 'run_number', 'created', 'status'], 'interactive': ['name', 'run_number', 'created', 'session_type', 'session_uri'] } if verbose: headers[type] += verbose_headers data = [] for workflow in response: if workflow['status'] == 'deleted' and not show_all: continue name, run_number = get_workflow_name_and_run_number( workflow['name']) workflow['name'] = name workflow['run_number'] = run_number if type == 'interactive': workflow['session_uri'] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow['session_uri'], access_token=access_token) data.append([str(workflow[k]) for k in headers[type]]) data = sorted(data, key=lambda x: int(x[1])) workflow_ids = ['{0}.{1}'.format(w[0], w[1]) for w in data] if os.getenv('REANA_WORKON', '') in workflow_ids: active_workflow_idx = \ workflow_ids.index(os.getenv('REANA_WORKON', '')) for idx, row in enumerate(data): if idx == active_workflow_idx: data[idx][headers[type].index('run_number')] += ' *' tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers[type], tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers[type], _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow list could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def workflow_workflows(ctx, sessions, _filter, output_format, access_token, show_all, verbose, block_size, sort_columm_name): # noqa: D301 """List all workflows and sessions. The `list` command lists workflows and sessions. By default, the list of workflows is returned. If you would like to see the list of your open interactive sessions, you need to pass the `--sessions` command-line option. Example: \n \t $ reana-client list --all \n \t $ reana-client list --sessions \n \t $ reana-client list --verbose --bytes """ import tablib from reana_client.api.client import get_workflows logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) type = 'interactive' if sessions else 'batch' if _filter: parsed_filters = parse_parameters(_filter) try: if not verbose: block_size = None response = get_workflows(access_token, type, bool(verbose), block_size) verbose_headers = ['id', 'user', 'size'] headers = { 'batch': ['name', 'run_number', 'created', 'started', 'ended', 'status'], 'interactive': ['name', 'run_number', 'created', 'session_type', 'session_uri'] } if verbose: headers[type] += verbose_headers data = [] for workflow in response: if workflow['status'] == 'deleted' and not show_all: continue name, run_number = get_workflow_name_and_run_number( workflow['name']) workflow['name'] = name workflow['run_number'] = run_number if type == 'interactive': workflow['session_uri'] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow['session_uri'], access_token=access_token) row = [] for header in headers[type]: if header == 'started': header = 'run_started_at' elif header == 'ended': header = 'run_finished_at' value = workflow.get(header) if not value: value = workflow.get('progress', {}).get(header) or '-' row.append(value) data.append(row) sort_column_id = 2 if sort_columm_name.lower() in headers[type]: sort_column_id = headers[type].index(sort_columm_name.lower()) data = sorted(data, key=lambda x: x[sort_column_id], reverse=True) workflow_ids = ['{0}.{1}'.format(w[0], w[1]) for w in data] if os.getenv('REANA_WORKON', '') in workflow_ids: active_workflow_idx = \ workflow_ids.index(os.getenv('REANA_WORKON', '')) for idx, row in enumerate(data): if idx == active_workflow_idx: run_number = \ str(data[idx][headers[type].index('run_number')]) run_number += ' *' tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers[type], tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers[type], _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow list could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def workflow_status( # noqa: C901 ctx, workflow, _format, output_format, access_token, verbose ): # noqa: D301 """Get status of a workflow. The ``status`` command allow to retrieve status of a workflow. The status can be created, queued, running, failed, etc. You can increase verbosity or filter retrieved information by passing appropriate command-line options. Examples: \n \t $ reana-client status -w myanalysis.42 \n \t $ reana-client status -w myanalysis.42 -v --json """ import tablib from reana_client.api.client import get_workflow_status def render_progress(finished_jobs, total_jobs): if total_jobs: return "{0}/{1}".format(finished_jobs, total_jobs) else: return "-/-" def add_data_from_reponse(row, data, headers): name, run_number = get_workflow_name_and_run_number(row["name"]) total_jobs = row["progress"].get("total") if total_jobs: total_jobs = total_jobs.get("total") else: total_jobs = 0 finished_jobs = row["progress"].get("finished") if finished_jobs: finished_jobs = finished_jobs.get("total") else: finished_jobs = 0 parsed_response = list( map(str, [name, run_number, row["created"], row["status"]]) ) if row["progress"]["total"].get("total") or 0 > 0: if "progress" not in headers: headers += ["progress"] parsed_response.append(render_progress(finished_jobs, total_jobs)) if row["status"] in ["running", "finished", "failed", "stopped"]: started_at = row["progress"].get("run_started_at") finished_at = row["progress"].get("run_finished_at") if started_at: after_created_pos = headers.index("created") + 1 headers.insert(after_created_pos, "started") parsed_response.insert(after_created_pos, started_at) if finished_at: after_started_pos = headers.index("started") + 1 headers.insert(after_started_pos, "ended") parsed_response.insert(after_started_pos, finished_at) data.append(parsed_response) return data def add_verbose_data_from_response(response, verbose_headers, headers, data): for k in verbose_headers: if k == "command": current_command = response["progress"]["current_command"] if current_command: if current_command.startswith('bash -c "cd '): current_command = current_command[ current_command.index(";") + 2 : -2 ] data[-1] += [current_command] else: if "current_step_name" in response["progress"] and response[ "progress" ].get("current_step_name"): current_step_name = response["progress"].get( "current_step_name" ) data[-1] += [current_step_name] else: headers.remove("command") else: data[-1] += [response.get(k)] return data logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) if workflow: try: if _format: parsed_filters = parse_format_parameters(_format) _format = [item["column_name"] for item in parsed_filters] response = get_workflow_status(workflow, access_token) headers = ["name", "run_number", "created", "status"] verbose_headers = ["id", "user", "command"] data = [] if not isinstance(response, list): response = [response] for workflow in response: add_data_from_reponse(workflow, data, headers) if verbose: headers += verbose_headers add_verbose_data_from_response( workflow, verbose_headers, headers, data ) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _format: tablib_data = tablib_data.subset(rows=None, cols=list(_format)) display_message(tablib_data.export(output_format)) else: click_table_printer(headers, _format, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Cannot retrieve the status of a workflow {}: \n" "{}".format(workflow, str(e)), msg_type="error", )
def workflow_status(ctx, workflow, _filter, output_format, access_token, verbose): """Get status of previously created workflow.""" def render_progress(finished_jobs, total_jobs): if total_jobs: return '{0}/{1}'.format(finished_jobs, total_jobs) else: return '-/-' def add_data_from_reponse(row, data, headers): name, run_number = get_workflow_name_and_run_number(row['name']) total_jobs = row['progress'].get('total') if total_jobs: total_jobs = total_jobs.get('total') else: total_jobs = 0 finished_jobs = row['progress'].get('finished') if finished_jobs: finished_jobs = finished_jobs.get('total') else: finished_jobs = 0 if row['progress']['total'].get('total') or 0 > 0: if 'progress' not in headers: headers += ['progress'] data.append( list( map(str, [ name, run_number, row['created'], row['status'], render_progress(finished_jobs, total_jobs) ]))) return data def add_verbose_data_from_response(response, verbose_headers, headers, data): for k in verbose_headers: if k == 'command': current_command = response['progress']['current_command'] if current_command: if current_command.startswith('bash -c "cd '): current_command = current_command[current_command. index(';') + 2:-2] data[-1] += [current_command] else: if 'current_step_name' in response['progress'] and \ response['progress'].get('current_step_name'): current_step_name = response['progress'].\ get('current_step_name') data[-1] += [current_step_name] else: headers.remove('command') else: data[-1] += [response.get(k)] return data logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if workflow: try: response = get_workflow_status(workflow, access_token) headers = ['name', 'run_number', 'created', 'status', 'progress'] verbose_headers = ['id', 'user', 'command'] data = [] if not isinstance(response, list): response = [response] for workflow in response: add_data_from_reponse(workflow, data, headers) if verbose: headers += verbose_headers add_verbose_data_from_response(workflow, verbose_headers, headers, data) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data = tablib_data.subset(rows=None, cols=list(_filter)) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow status could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def workflow_workflows( # noqa: C901 ctx, workflow, sessions, _format, output_format, access_token, show_all, verbose, human_readable_or_raw, sort_columm_name, page, size, filters, include_progress, include_workspace_size, ): # noqa: D301 """List all workflows and sessions. The ``list`` command lists workflows and sessions. By default, the list of workflows is returned. If you would like to see the list of your open interactive sessions, you need to pass the ``--sessions`` command-line option. Example: \n \t $ reana-client list --all \n \t $ reana-client list --sessions \n \t $ reana-client list --verbose --bytes """ import tablib from reana_client.api.client import get_workflows logging.debug("command: {}".format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug("{param}: {value}".format(param=p, value=ctx.params[p])) type = "interactive" if sessions else "batch" status_filter = None search_filter = None if filters: filter_names = ["name", "status"] status_filter, search_filter = parse_filter_parameters(filters, filter_names) if _format: parsed_format_filters = parse_format_parameters(_format) try: response = get_workflows( access_token, type, verbose=bool(verbose), page=page, size=size, status=status_filter, search=search_filter, include_progress=include_progress, include_workspace_size=include_workspace_size, workflow=workflow, ) verbose_headers = ["id", "user"] workspace_size_header = ["size"] progress_header = ["progress"] headers = { "batch": ["name", "run_number", "created", "started", "ended", "status"], "interactive": [ "name", "run_number", "created", "session_type", "session_uri", "session_status", ], } if verbose: headers[type] += verbose_headers if verbose or include_workspace_size: headers[type] += workspace_size_header if verbose or include_progress: headers[type] += progress_header data = [] for workflow in response: workflow["size"] = workflow["size"][human_readable_or_raw] if workflow["status"] == "deleted" and not show_all: continue name, run_number = get_workflow_name_and_run_number(workflow["name"]) workflow["name"] = name workflow["run_number"] = run_number if type == "interactive": workflow["session_uri"] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow["session_uri"], access_token=access_token, ) row = [] for header in headers[type]: value = None if header in progress_header: value = get_formatted_progress(workflow.get("progress")) elif header in ["started", "ended"]: _key = ( "run_started_at" if header == "started" else "run_finished_at" ) value = workflow.get("progress", {}).get(_key) or "-" if not value: value = workflow.get(header) row.append(value) data.append(row) sort_column_id = 2 if sort_columm_name.lower() in headers[type]: sort_column_id = headers[type].index(sort_columm_name.lower()) data = sorted(data, key=lambda x: x[sort_column_id], reverse=True) workflow_ids = ["{0}.{1}".format(w[0], w[1]) for w in data] if os.getenv("REANA_WORKON", "") in workflow_ids: active_workflow_idx = workflow_ids.index(os.getenv("REANA_WORKON", "")) for idx, row in enumerate(data): if idx == active_workflow_idx: run_number = str(data[idx][headers[type].index("run_number")]) run_number += " *" tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _format: tablib_data, filtered_headers = format_data( parsed_format_filters, headers[type], tablib_data ) if output_format: display_message(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: display_message(tablib_data.export(output_format)) else: click_table_printer(headers[type], _format, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) display_message( "Workflow list could not be retrieved: \n{}".format(str(e)), msg_type="error", )
def list_quota_usage(ctx, id, email, user_access_token, admin_access_token, output_format, human_readable): """List quota usage of users.""" try: response = _get_users(id, email, user_access_token, admin_access_token) headers = [ "id", "email", "cpu-used", "cpu-limit", "disk-used", "disk-limit" ] health_order = { QuotaHealth.healthy.name: 0, QuotaHealth.warning.name: 1, QuotaHealth.critical.name: 2, } data = [] colours = [] health = [] for user in response: quota_usage = user.get_quota_usage() disk, cpu = quota_usage.get("disk"), quota_usage.get("cpu") data.append(( str(user.id_), user.email, cpu.get("usage").get(human_readable), cpu.get("limit", {}).get(human_readable) or "-", disk.get("usage").get(human_readable), disk.get("limit", {}).get(human_readable) or "-", )) health_ordered = max( [ disk.get("health", QuotaHealth.healthy.name), cpu.get("health", QuotaHealth.healthy.name), ], key=lambda key: health_order[key], ) colours.append(REANA_RESOURCE_HEALTH_COLORS[health_ordered]) health.append(health_ordered) if data and colours and health: data, colours, _ = (list(t) for t in zip(*sorted( zip(data, colours, health), key=lambda t: health_order[t[2]], reverse=True, ))) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, [], data, colours) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style("User could not be retrieved: \n{}".format(str(e)), fg="red"), err=True, )
def get_files(ctx, workflow, _filter, output_format, access_token): # noqa: D301 """List workspace files. The `ls` command lists workspace files of a workflow specified by the environment variable REANA_WORKON or provided as a command-line flag `--workflow` or `-w`. Examples: \n \t $ reana-client ls --workflow myanalysis.42 """ import tablib from reana_client.api.client import current_rs_api_client, list_files logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if _filter: parsed_filters = parse_parameters(_filter) if workflow: logging.info('Workflow "{}" selected'.format(workflow)) try: response = list_files(workflow, access_token) headers = ['name', 'size', 'last-modified'] data = [] file_path = get_path_from_operation_id( current_rs_api_client.swagger_spec.spec_dict['paths'], 'download_file') urls = [] for file_ in response: if not file_['name'].startswith(FILES_BLACKLIST): data.append( list( map(str, [ file_['name'], file_['size'], file_['last-modified'] ]))) urls.append(ctx.obj.reana_server_url + file_path.format( workflow_id_or_name=workflow, file_name=file_['name'])) tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if output_format == URL: click.echo('\n'.join(urls)) elif _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers, tablib_data) if output_format == JSON: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format == JSON: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Something went wrong while retrieving file list' ' for workflow {0}:\n{1}'.format(workflow, str(e)), fg='red'), err=True)
def workflow_status(ctx, user, organization, workflow, _filter, output_format): """Get status of previously created analysis workflow.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if workflow: try: response = ctx.obj.client.get_analysis_status( user, organization, workflow) headers = [ 'name', 'run_number', 'id', 'user', 'organization', 'status', 'command', 'progress' ] data = [] if isinstance(response, list): for analysis in response: name, run_number = get_workflow_name_and_run_number( analysis['name']) data.append( list( map(str, [ name, run_number, analysis['id'], analysis['user'], analysis['organization'], analysis['status'], analysis['current_command'], '{0}/{1}'.format( analysis['current_command_idx'], analysis['total_commands']) ]))) else: name, run_number = get_workflow_name_and_run_number( response['name']) data.append( list( map(str, [ name, run_number, response['id'], response['user'], response['organization'], response['status'], response['progress'].get('current_command'), '{0}/{1}'.format( response['progress'].get( 'current_command_idx'), response['progress'].get('total_commands')) ]))) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: data = data.subset(rows=None, cols=list(_filter)) click.echo(data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow status could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True) else: click.echo(click.style( 'Workflow name must be provided either with ' '`--workflow` option or with REANA_WORKON ' 'environment variable', fg='red'), err=True)
def get_files(ctx, workflow, _filter, output_format, access_token): """List workflow workspace files.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho('REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if _filter: parsed_filters = parse_parameters(_filter) if workflow: logging.info('Workflow "{}" selected'.format(workflow)) try: response = list_files(workflow, access_token) headers = ['name', 'size', 'last-modified'] data = [] for file_ in response: data.append( list( map(str, [ file_['name'], file_['size'], file_['last-modified'] ]))) tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers, tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Something went wrong while retrieving file list' ' for workflow {0}:\n{1}'.format(workflow, str(e)), fg='red'), err=True)