def workflow_workflows(ctx, sessions, _filter, output_format, access_token, show_all, verbose): """List all workflows user has.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) type = 'interactive' if sessions else 'batch' try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho('REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if _filter: parsed_filters = parse_parameters(_filter) try: response = get_workflows(access_token, type, bool(verbose)) verbose_headers = ['id', 'user', 'size'] headers = { 'batch': ['name', 'run_number', 'created', 'status'], 'interactive': ['name', 'run_number', 'created', 'session_type', 'session_uri'] } if verbose: headers[type] += verbose_headers data = [] for workflow in response: if workflow['status'] == 'deleted' and not show_all: continue name, run_number = get_workflow_name_and_run_number( workflow['name']) workflow['name'] = name workflow['run_number'] = run_number if type == 'interactive': workflow['session_uri'] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow['session_uri'], access_token=access_token) data.append([str(workflow[k]) for k in headers[type]]) data = sorted(data, key=lambda x: int(x[1])) workflow_ids = ['{0}.{1}'.format(w[0], w[1]) for w in data] if os.getenv('REANA_WORKON', '') in workflow_ids: active_workflow_idx = \ workflow_ids.index(os.getenv('REANA_WORKON', '')) for idx, row in enumerate(data): if idx == active_workflow_idx: data[idx][headers[type].index('run_number')] += ' *' tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers[type], tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers[type], _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow list could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def get_files(ctx, workflow, _filter, output_format, access_token): """List workflow workspace files.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho('REANA client is not connected to any REANA cluster.', fg='red', err=True) sys.exit(1) if not access_token: click.echo(click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if _filter: parsed_filters = parse_parameters(_filter) if workflow: logging.info('Workflow "{}" selected'.format(workflow)) try: response = list_files(workflow, access_token) headers = ['name', 'size', 'last-modified'] data = [] for file_ in response: data.append( list( map(str, [ file_['name'], file_['size'], file_['last-modified'] ]))) tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers, tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Something went wrong while retrieving file list' ' for workflow {0}:\n{1}'.format(workflow, str(e)), fg='red'), err=True)
def workflow_workflows(ctx, sessions, _filter, output_format, access_token, show_all, verbose, block_size, sort_columm_name): # noqa: D301 """List all workflows and sessions. The `list` command lists workflows and sessions. By default, the list of workflows is returned. If you would like to see the list of your open interactive sessions, you need to pass the `--sessions` command-line option. Example: \n \t $ reana-client list --all \n \t $ reana-client list --sessions \n \t $ reana-client list --verbose --bytes """ import tablib from reana_client.api.client import get_workflows logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) type = 'interactive' if sessions else 'batch' if _filter: parsed_filters = parse_parameters(_filter) try: if not verbose: block_size = None response = get_workflows(access_token, type, bool(verbose), block_size) verbose_headers = ['id', 'user', 'size'] headers = { 'batch': ['name', 'run_number', 'created', 'started', 'ended', 'status'], 'interactive': ['name', 'run_number', 'created', 'session_type', 'session_uri'] } if verbose: headers[type] += verbose_headers data = [] for workflow in response: if workflow['status'] == 'deleted' and not show_all: continue name, run_number = get_workflow_name_and_run_number( workflow['name']) workflow['name'] = name workflow['run_number'] = run_number if type == 'interactive': workflow['session_uri'] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow['session_uri'], access_token=access_token) row = [] for header in headers[type]: if header == 'started': header = 'run_started_at' elif header == 'ended': header = 'run_finished_at' value = workflow.get(header) if not value: value = workflow.get('progress', {}).get(header) or '-' row.append(value) data.append(row) sort_column_id = 2 if sort_columm_name.lower() in headers[type]: sort_column_id = headers[type].index(sort_columm_name.lower()) data = sorted(data, key=lambda x: x[sort_column_id], reverse=True) workflow_ids = ['{0}.{1}'.format(w[0], w[1]) for w in data] if os.getenv('REANA_WORKON', '') in workflow_ids: active_workflow_idx = \ workflow_ids.index(os.getenv('REANA_WORKON', '')) for idx, row in enumerate(data): if idx == active_workflow_idx: run_number = \ str(data[idx][headers[type].index('run_number')]) run_number += ' *' tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers[type], tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers[type], _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Workflow list could not be retrieved: \n{}'.format(str(e)), fg='red'), err=True)
def get_files(ctx, workflow, _filter, output_format, access_token): # noqa: D301 """List workspace files. The `ls` command lists workspace files of a workflow specified by the environment variable REANA_WORKON or provided as a command-line flag `--workflow` or `-w`. Examples: \n \t $ reana-client ls --workflow myanalysis.42 """ import tablib from reana_client.api.client import current_rs_api_client, list_files logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if _filter: parsed_filters = parse_parameters(_filter) if workflow: logging.info('Workflow "{}" selected'.format(workflow)) try: response = list_files(workflow, access_token) headers = ['name', 'size', 'last-modified'] data = [] file_path = get_path_from_operation_id( current_rs_api_client.swagger_spec.spec_dict['paths'], 'download_file') urls = [] for file_ in response: if not file_['name'].startswith(FILES_BLACKLIST): data.append( list( map(str, [ file_['name'], file_['size'], file_['last-modified'] ]))) urls.append(ctx.obj.reana_server_url + file_path.format( workflow_id_or_name=workflow, file_name=file_['name'])) tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if output_format == URL: click.echo('\n'.join(urls)) elif _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers, tablib_data) if output_format == JSON: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format == JSON: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Something went wrong while retrieving file list' ' for workflow {0}:\n{1}'.format(workflow, str(e)), fg='red'), err=True)
def workflow_status(ctx, workflow, _filter, output_format, access_token, verbose): # noqa: D301 """Get status of a workflow. The `status` command allow to retrieve status of a workflow. The status can be created, queued, running, failed, etc. You can increase verbosity or filter retrieved information by passing appropriate command-line options. Examples: \n \t $ reana-client status -w myanalysis.42 \n \t $ reana-client status -w myanalysis.42 -v --json """ import tablib from reana_client.api.client import get_workflow_status def render_progress(finished_jobs, total_jobs): if total_jobs: return '{0}/{1}'.format(finished_jobs, total_jobs) else: return '-/-' def add_data_from_reponse(row, data, headers): name, run_number = get_workflow_name_and_run_number(row['name']) total_jobs = row['progress'].get('total') if total_jobs: total_jobs = total_jobs.get('total') else: total_jobs = 0 finished_jobs = row['progress'].get('finished') if finished_jobs: finished_jobs = finished_jobs.get('total') else: finished_jobs = 0 parsed_response = list( map(str, [name, run_number, row['created'], row['status']])) if row['progress']['total'].get('total') or 0 > 0: if 'progress' not in headers: headers += ['progress'] parsed_response.append( render_progress(finished_jobs, total_jobs)) if row['status'] in ['running', 'finished', 'failed', 'stopped']: started_at = row['progress'].get('run_started_at') finished_at = row['progress'].get('run_finished_at') if started_at: after_created_pos = headers.index('created') + 1 headers.insert(after_created_pos, 'started') parsed_response.insert(after_created_pos, started_at) if finished_at: after_started_pos = headers.index('started') + 1 headers.insert(after_started_pos, 'ended') parsed_response.insert(after_started_pos, finished_at) data.append(parsed_response) return data def add_verbose_data_from_response(response, verbose_headers, headers, data): for k in verbose_headers: if k == 'command': current_command = response['progress']['current_command'] if current_command: if current_command.startswith('bash -c "cd '): current_command = current_command[current_command. index(';') + 2:-2] data[-1] += [current_command] else: if 'current_step_name' in response['progress'] and \ response['progress'].get('current_step_name'): current_step_name = response['progress'].\ get('current_step_name') data[-1] += [current_step_name] else: headers.remove('command') else: data[-1] += [response.get(k)] return data logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) if workflow: try: if _filter: parsed_filters = parse_parameters(_filter) _filter = [item['column_name'] for item in parsed_filters] response = get_workflow_status(workflow, access_token) headers = ['name', 'run_number', 'created', 'status'] verbose_headers = ['id', 'user', 'command'] data = [] if not isinstance(response, list): response = [response] for workflow in response: add_data_from_reponse(workflow, data, headers) if verbose: headers += verbose_headers add_verbose_data_from_response(workflow, verbose_headers, headers, data) if output_format: tablib_data = tablib.Dataset() tablib_data.headers = headers for row in data: tablib_data.append(row) if _filter: tablib_data = tablib_data.subset(rows=None, cols=list(_filter)) click.echo(tablib_data.export(output_format)) else: click_table_printer(headers, _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo(click.style( 'Cannot retrieve the status of a workflow {}: \n{}'.format( workflow, str(e)), fg='red'), err=True)