def download_path_from_cluster(cluster, girder_token, parent, path, upload=False, include=None, exclude=None): """ Download a given path on a cluster into an assetstore. :params cluster: The cluster to to download the path from. :params girder_token: The Girder token to use to access Girder. :params parent: The target folder to import the path into. :params path: The path on the cluster to download. :params upload: Indicate if the import should upload the file data or just the metadata, the default is False. :params include: List of include regexs :params exclude: List of exclude regexs, """ assetstore_base_url = get_assetstore_url_base(cluster) assetstore_id = get_assetstore_id(girder_token, cluster) with get_connection(girder_token, cluster) as conn: download_path(conn, girder_token, parent, path, assetstore_base_url, assetstore_id, upload=upload, include=include, exclude=exclude)
def test_delete_assetstore (self, delete_key, generate_key): body = { 'type': 'trad', 'name': 'my trad cluster', 'config': { 'ssh': { 'user': '******' }, 'host': 'myhost' } } json_body = json.dumps(body) r = self.request('/clusters', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 201) cluster = r.json cluster_id = cluster['_id'] # Create an assetstore for this cluster url_base = get_assetstore_url_base(cluster) create_url = '/%s' % url_base body = { 'name': cluster['_id'], 'host': cluster['config']['host'], 'user': '******', 'authKey': cluster['_id'] } json_body = json.dumps(body) r = self.request(create_url, type='application/json', method='POST', body=json_body, user=self._user) self.assertStatusOk(r) cluster['assetstoreId'] = r.json['_id'] # Patch the cluster so it is associated with the assetstore patch_cluster = { 'assetstoreId': str(r.json['_id']) } r = self.request('/clusters/%s' % str(cluster_id), type='application/json', method='PATCH', body=json.dumps(patch_cluster), user=self._user) self.assertStatusOk(r) r = self.request('/clusters/%s' % str(cluster_id), method='DELETE', user=self._user) self.assertStatusOk(r) r = self.request('/clusters/%s' % str(cluster_id), method='GET', user=self._user) self.assertStatus(r, 404) # Assert that assetstore is gone self.assertIsNone(self.model('assetstore').load(cluster['assetstoreId']))
def test_delete_assetstore (self, delete_key, generate_key): body = { 'type': 'trad', 'name': 'my trad cluster', 'config': { 'ssh': { 'user': '******' }, 'host': 'myhost' } } json_body = json.dumps(body) r = self.request('/clusters', method='POST', type='application/json', body=json_body, user=self._user) self.assertStatus(r, 201) cluster = r.json cluster_id = cluster['_id'] # Create an assetstore for this cluster url_base = get_assetstore_url_base(cluster) create_url = '/%s' % url_base body = { 'name': cluster['_id'], 'host': cluster['config']['host'], 'user': '******', 'authKey': cluster['_id'] } json_body = json.dumps(body) r = self.request(create_url, type='application/json', method='POST', body=json_body, user=self._user) self.assertStatusOk(r) cluster['assetstoreId'] = r.json['_id'] # Patch the cluster so it is associated with the assetstore patch_cluster = { 'assetstoreId': str(r.json['_id']) } r = self.request('/clusters/%s' % str(cluster_id), type='application/json', method='PATCH', body=json.dumps(patch_cluster), user=self._user) self.assertStatusOk(r) r = self.request('/clusters/%s' % str(cluster_id), method='DELETE', user=self._user) self.assertStatusOk(r) r = self.request('/clusters/%s' % str(cluster_id), method='GET', user=self._user) self.assertStatus(r, 404) # Assert that assetstore is gone self.assertIsNone(self.model('assetstore').load(cluster['assetstoreId']))
def upload_job_output_to_folder(cluster, job, log_write_url=None, job_dir=None, girder_token=None): status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job['_id']) headers = {'Girder-Token': girder_token} assetstore_base_url = get_assetstore_url_base(cluster) assetstore_id = get_assetstore_id(girder_token, cluster) if not job_dir: job_dir = job['dir'] try: with get_connection(girder_token, cluster) as conn: for output in job['output']: if 'folderId' in output and 'path' in output: folder_id = output['folderId'] path = os.path.join(job_dir, output['path']) download_path(conn, girder_token, folder_id, path, assetstore_base_url, assetstore_id) except HttpError as e: job['status'] = JobState.ERROR url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id']) logger = get_post_logger('job', girder_token, url) logger.exception(e.responseText) r = requests.patch(status_url, headers=headers, json={'status': JobState.ERROR}) check_status(r) if _get_on_complete(job) == 'terminate': cluster_log_url = '%s/clusters/%s/log' % \ (cumulus.config.girder.baseUrl, cluster['_id']) command.send_task( 'cumulus.tasks.cluster.terminate_cluster', args=(cluster,), kwargs={'log_write_url': cluster_log_url, 'girder_token': girder_token}) # If we where uploading move job to the complete state if job['status'] == JobState.UPLOADING: job_status = from_string(job['status'], task=None, cluster=cluster, job=job, log_write_url=log_write_url, girder_token=girder_token, conn=conn) job_status = Complete(job_status) job_status = job_status.next(JobQueueState.COMPLETE) job_status.run() r = requests.patch(status_url, headers=headers, json={'status': str(job_status)}) check_status(r)
def upload_job_output_to_folder(cluster, job, log_write_url=None, job_dir=None, girder_token=None): status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job['_id']) headers = {'Girder-Token': girder_token} assetstore_base_url = get_assetstore_url_base(cluster) assetstore_id = get_assetstore_id(girder_token, cluster) if not job_dir: job_dir = job['dir'] try: with get_connection(girder_token, cluster) as conn: for output in job['output']: if 'folderId' in output and 'path' in output: folder_id = output['folderId'] path = os.path.join(job_dir, output['path']) download_path(conn, girder_token, folder_id, path, assetstore_base_url, assetstore_id) except HttpError as e: job['status'] = JobState.ERROR url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id']) logger = get_post_logger('job', girder_token, url) logger.exception(e.responseText) r = requests.patch(status_url, headers=headers, json={'status': JobState.ERROR}) check_status(r) if _get_on_complete(job) == 'terminate': cluster_log_url = '%s/clusters/%s/log' % \ (cumulus.config.girder.baseUrl, cluster['_id']) command.send_task( 'cumulus.tasks.cluster.terminate_cluster', args=(cluster,), kwargs={'log_write_url': cluster_log_url, 'girder_token': girder_token}) # If we where uploading move job to the complete state if job['status'] == JobState.UPLOADING: job_status = from_string(job['status'], task=None, cluster=cluster, job=job, log_write_url=log_write_url, girder_token=girder_token, conn=conn) job_status = Complete(job_status) job_status = job_status.next(JobQueueState.COMPLETE) job_status.run() r = requests.patch(status_url, headers=headers, json={'status': str(job_status)}) check_status(r)
def download_path_from_cluster(cluster, girder_token, parent, path, upload=False, include=None, exclude=None): """ Download a given path on a cluster into an assetstore. :params cluster: The cluster to to download the path from. :params girder_token: The Girder token to use to access Girder. :params parent: The target folder to import the path into. :params path: The path on the cluster to download. :params upload: Indicate if the import should upload the file data or just the metadata, the default is False. :params include: List of include regexs :params exclude: List of exclude regexs, """ assetstore_base_url = get_assetstore_url_base(cluster) assetstore_id = get_assetstore_id(girder_token, cluster) with get_connection(girder_token, cluster) as conn: download_path(conn, girder_token, parent, path, assetstore_base_url, assetstore_id, upload=upload, include=include, exclude=exclude)