def request_data_http(url: str, resource: str) -> None: """Get data for a resource over HTTP :param url: The base URL :param resource: The resource ID """ results = HttpClient(url).request_data(resource) print(json.dumps(results))
def request_events_http(url: str, resource: str) -> None: """Get events for a resource over HTTP :param url: The base URL :param resource: The resource ID """ results = HttpClient(url).request_events(resource) events = [_result2event(result) for result in results['events']] print_table(events)
def create_job_http(url: str, jwt_token: str, name: str) -> None: """Request the server makes a new job. :param url: Base url of the remote odin server :param jwt_token: You JWT authentication token :param name: The name of the job you want to create """ results = HttpClient(url, jwt_token).create_job(name) print(json.dumps(results))
def request_nodes_http(url: str) -> None: """Request the status over HTTP :param url: the base URL """ nodes = HttpClient(url).request_cluster_hw_status() rows = [] for node in nodes: for gpu in node['gpus']: rows.append(_gpu2row(gpu, node['host'])) print_table(rows)
def schedule_pipeline_http(url: str, jwt_token: str, work: str, context: Dict) -> None: """Request the status over HTTP :param url: the base URL :param jwt_token: The JWT token representing this authentication :param work: The pipeline ID """ results = HttpClient(url, jwt_token=jwt_token).schedule_pipeline(work, context) print(json.dumps(results))
def request_cleanup_http(url: str, jwt_token: str, work: str, purge_db: bool = False, purge_fs: bool = False) -> None: """Request the status over HTTP :param url: the base URL :param jwt_token: The token for last user auth :param work: The pipeline ID :param purge_db: Should we delete the pipeline from the jobs db too? :param purge_fs: Should we remove pipeline file system artifacts? """ results = HttpClient(url, jwt_token=jwt_token).delete_pipeline(work, purge_db, purge_fs) cleaned = [_result2cleanup(r) for r in results['cleanups']] print("Results of this request:") print_table(cleaned)
def request_status_http(url: str, work: str, columns: Set[str], all_cols: bool = False) -> None: """Request the status over HTTP :param url: the base URL :param work: The pipeline ID :param columns: A set of columns to include in the output :param all_cols: Should we just show all columns, If true then columns in ignored """ results = HttpClient(url).request_status(work) for result in results: rows = [_task2row(r) for r in result['tasks']] show_status(_result2pipe(result), rows, columns, all_cols)
def push_file_maybe_create_job(url: str, jwt_token: str, job: str, file_name: str, file_contents: str, create_job: bool) -> None: """Push a file to update a remove pipeline. :param url: The odin-http endpoint :param jwt_token: The jwt token used to auth with odin :param job: The job definition that will be updated :param file_name: The name to save the file as on the remove server :param file_contents: The content of the file we want to upload """ client = HttpClient(url, jwt_token=jwt_token) if create_job: results = client.create_job(job) if 'status' in results: status = color( 'Failed tor create a new job. If the job already exists, do not try to create', Colors.RED) print(json.dumps(results)) print(status) return results = client.push_file(job, file_name, file_contents) print(json.dumps(results))
def log_all_children_http(url, resource, namespace): client = HttpClient(url=url) try: print(client.request_logs(resource, namespace)) except: head = find_head(resource) data = client.request_data(head) for job in data['jobs']['jobs']: rid = client.request_data(job)['jobs']['resource_id'] print('================') print(rid) print('----------------') try: logs = client.request_logs(rid, namespace) print(logs) except: try: # If everything still fails, it could be a Kubeflow job child logs = client.request_logs(f'{rid}-master-0', namespace) print(logs) except: print('Failed to get log')