def fetch_job_datasets(endpoint, token): endpoint = f'{endpoint}/api/v1/job-data-sets/' r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning('Unable to fetch job datasets') return [] return json.loads(r.text)['results']
def create_uploaded_dataset(name, path, url, token): """ uploaded a dataset to ragnarok """ endpoint = f'{url}/api/v1/uploaded-data-sets/' data = {'name': name} files = {'file': open(path, 'rb')} r = requests.post(endpoint, data=data, headers=auth_headers(token), files=files) if r.status_code != 201: log.warning(f'unable to create dataset {name} from {path}') return log.info(f'created dataset {name} from {path}')
def fetch_dataset(name, path, dataset_type, url, token): """ fetch a dataset from ragnarok """ endpoint = f'{url}/api/v1/{dataset_type}-data-sets/' params = {'name': name} r = requests.get(endpoint, params=params, headers=auth_headers(token)) if r.status_code != 200: log.warning(f'Unable to fetch {dataset_type} datasets') return response = json.loads(r.text) if response['count'] != 1: log.warning(f'Unable to find {dataset_type} dataset with name "{name}"') return dataset = response['results'][0] endpoint = f"{url}/api/v1/{dataset['dataset_type']}-data-sets/{dataset['id']}/download" r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning(f"Unable to get download link for dataset {dataset['id']}") return response = json.loads(r.text) name_slug = f"{dataset['name'].replace(' ', '_')}-{dataset['id'][:8]}.zip" output_path = to_pathlib_path(path) / name_slug download_url(response['redirect_link'], output_path)
def fetch_scene(name, path, url, token): """ fetch a scene from ragnarok """ endpoint = f'{url}/api/v1/scenes/' params = {'name': name} r = requests.get(endpoint, params=params, headers=auth_headers(token)) if r.status_code != 200: log.warning(f'Unable to fetch scenes') return response = json.loads(r.text) if response['count'] != 1: log.warning(f'Unable to find scene with name "{name}"') return scene = response['results'][0] endpoint = f"{url}/api/v1/scenes/{scene['id']}/download" r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning(f"Unable to get download link for scene {scene['id']}") return response = json.loads(r.text) name_slug = f"{scene['name'].replace(' ', '_')}-{scene['id'][:8]}.zip" output_path = to_pathlib_path(path) / name_slug download_url(response['redirect_link'], output_path)
def filter_dataset_url(field, pattern, regex, url, token): """ filter generated dataset """ endpoint = f'{url}?{field}__{pattern}={regex}' r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning(f"Unable to filter {url}") return [] response = json.loads(r.text) datasets, names = [], [] for d in response['results']: names.append(d['name']) datasets.append(d['id']) log.info(f'filter <{endpoint}> found {names}') return datasets
def fetch_jobs(endpoint, token): """ fetch all datasets in ragnarok """ endpoint = f'{endpoint}/api/v1/jobs/' r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning('Unable to fetch jobs') return jobs = json.loads(r.text)['results'] tbl = TableLogger(columns='state,name,operation,created', default_colwidth=30) if len(jobs) == 0: log.info(None) for j in jobs: tbl(j['state'], j['name'], j['operation'], j['created_at'])
def create_generated_dataset(name, scene_name, config, url, token): """ create a dataset on ragnarok """ endpoint = f'{url}/api/v1/scenes/' params = {'name': scene_name} r = requests.get(endpoint, params=params, headers=auth_headers(token)) if r.status_code != 200: log.warning(f'unable to fetch scenes') return response = json.loads(r.text) if response['count'] != 1: log.warning(f'unable to find scene with name {scene_name}') return scene = response['results'][0] endpoint = f'{url}/api/v1/generated-data-sets/' data = { 'scene': scene['id'], 'config': json.dumps(config), 'name': name } r = requests.post(endpoint, data=data, headers=auth_headers(token)) if r.status_code != 201: log.warning(f'Unable to create dataset {name} for scene {scene_name} with config {config}') return log.info(f'created dataset {name} for scene {scene_name} with config {config}')
def create_new_job(name, operation, config, datasets, url, token): """ create job on ragnarok """ endpoint = f'{url}/api/v1/jobs/' data = { 'operation': operation, 'name': name, 'input_data_sets': datasets, 'config': json.dumps(config) } r = requests.post(endpoint, data=data, headers=auth_headers(token)) if r.status_code != 201: log.warning( f'Unable to create {operation} job {name} on datasets {datasets}') return log.info(f'created {operation} job {name} {config} on datasets {datasets}')
def fetch_scenes(endpoint, token): """ fetch all datasets in ragnarok """ endpoint = f'{endpoint}/api/v1/scenes/' r = requests.get(endpoint, headers=auth_headers(token)) if r.status_code != 200: log.warning('Unable to fetch scenes') return scenes = json.loads(r.text)['results'] tbl = TableLogger(columns='state,name,zpy_version,blender_version,created', default_colwidth=30) if len(scenes) == 0: log.info(None) for s in scenes: tbl(s['state'], s['name'], s['zpy_version'], s['blender_version'], s['created_at'])