def globus_download_files(client: globus_sdk.TransferClient, endpoint_id: str, files: tuple) -> None: """Gets the details of the files in the list Arguments: client: the Globus transfer client to use endpoint_id: the ID of the endpoint to access files: the list of files to fetch Return: Returns an updated list of file details """ # Fetch metadata and pull information out of it file_transfers = {} for one_file in files: globus_save_path = os.path.join(LOCAL_SAVE_PATH, os.path.basename(one_file)) if not os.path.exists(globus_save_path): globus_remote_path = one_file file_transfers[globus_remote_path] = globus_save_path if file_transfers: have_exception = False cnt = 1 for remote_path, save_path in file_transfers.items(): try: logging.info("Trying transfer %s: %s", str(cnt), str(remote_path)) cnt += 1 transfer_setup = globus_sdk.TransferData( client, endpoint_id, GLOBUS_LOCAL_ENDPOINT_ID, label="Get image file", sync_level="checksum") transfer_setup.add_item(remote_path, save_path) transfer_request = client.submit_transfer(transfer_setup) task_result = client.task_wait(transfer_request['task_id'], timeout=600, polling_interval=5) if not task_result: raise RuntimeError("Unable to retrieve JSON metadata: %s" % remote_path) if not os.path.exists(save_path): raise RuntimeError( "Unable to find downloaded file at: %s" % save_path) except RuntimeError as ex: have_exception = True logging.warning("Failed to get image: %s", str(ex)) if have_exception: raise RuntimeError("Unable to retrieve all files individually") del file_transfers
class UOCloudTransferClient: def __init__(self, config: UOCloudSyncConfig): confidential_client = ConfidentialAppAuthClient( client_id=config.get_client_id(), client_secret=config.get_client_secret()) scopes = "urn:globus:auth:scope:transfer.api.globus.org:all" cc_authorizer = ClientCredentialsAuthorizer(confidential_client, scopes) # create a new client self._transfer_client = TransferClient(authorizer=cc_authorizer) self._src_endpoint = None self._dest_endpoint = None def get_endpoint_id(self, endpoint_name: str): endpoints = self._transfer_client.endpoint_search(filter_fulltext=endpoint_name) # Just return the first result. Hope it is right! for ep in endpoints: return ep['id'] def transfer_data(self, src_endpoint: str, src_path: Union[str, Path, PathLike], dest_endpoint: str, dest_path: Union[str, Path, PathLike]): self._src_endpoint = src_endpoint self._dest_endpoint = dest_endpoint src_endpoint_id = self.get_endpoint_id(src_endpoint) if not src_endpoint_id: print(f'ERROR: Unable to find source endpoint id for: "{self._src_endpoint}"') return dest_endpoint_id = self.get_endpoint_id(dest_endpoint) if not dest_endpoint_id: print(f'ERROR: Unable to find destination endpoint id for: "{self._dest_endpoint}"') return transfer_data = TransferData(self._transfer_client, src_endpoint_id, dest_endpoint_id, encrypt_data=True) transfer_data.add_item(src_path, dest_path, recursive=True) try: print( f'Submitting a transfer task from {self._src_endpoint}:{src_path} to {self._dest_endpoint}:{dest_path}') task = self._transfer_client.submit_transfer(transfer_data) except TransferAPIError as e: print(str(e)) sys.exit(1) task_id = task['task_id'] print(f'\tWaiting for transfer to complete with task_id: {task_id}') while not self._transfer_client.task_wait(task_id=task_id, timeout=3600, polling_interval=60): print('.', end='') print('Transferred files:') for info in self._transfer_client.task_successful_transfers(task_id=task_id, num_results=None): print("\t{} -> {}".format(info["source_path"], info["destination_path"]))
def globus_download_files(client: globus_sdk.TransferClient, endpoint_id: str, files: tuple) -> None: """Gets the details of the files in the list Arguments: client: the Globus transfer client to use endpoint_id: the ID of the endpoint to access files: the list of files to fetch Return: Returns an updated list of file details """ # Fetch metadata and pull information out of it file_transfers = {} for one_file in files: globus_save_path = os.path.join(LOCAL_SAVE_PATH, os.path.basename(one_file)) if not os.path.exists(globus_save_path): globus_remote_path = one_file file_transfers[globus_remote_path] = globus_save_path if file_transfers: have_exception = False cnt = 1 resp = subprocess.run(['icd', IRODS_LOCATION], stdout=subprocess.PIPE) if resp.returncode != 0: raise RuntimeError("Unable to change to iRODS location %s" % IRODS_LOCATION) for remote_path, save_path in file_transfers.items(): try: logging.info("Trying transfer %s: %s", str(cnt), str(remote_path)) cnt += 1 transfer_setup = globus_sdk.TransferData( client, endpoint_id, GLOBUS_LOCAL_ENDPOINT_ID, label="Get image file", sync_level="checksum") transfer_setup.add_item(remote_path, save_path) transfer_request = client.submit_transfer(transfer_setup) task_result = client.task_wait(transfer_request['task_id'], timeout=600, polling_interval=5) if not task_result: raise RuntimeError("Unable to retrieve file: %s" % remote_path) if not os.path.exists(save_path): raise RuntimeError( "Unable to find downloaded file at: %s" % save_path) local_dir = os.getcwd() os.chdir(os.path.dirname(save_path)) print("Uploading file to irods: %s", save_path) resp = subprocess.run( ['iput', '-K', '-f', os.path.basename(save_path)], stdout=subprocess.PIPE) if resp.returncode != 0: os.chdir(local_dir) raise RuntimeError("Unable to load file to iRODS %s" % save_path) os.chdir(local_dir) print(" removing uploaded file") os.remove(save_path) except RuntimeError as ex: have_exception = True logging.warning("Failed to get image: %s", str(ex)) if have_exception: raise RuntimeError("Unable to retrieve all files individually") del file_transfers
class GlobusStorageManager: # https://globus-sdk-python.readthedocs.io/en/stable/clients/transfer/ app_id = 'b2fe5703-edb0-4f7f-80a6-2147c8ae35f0' # map transfer app id class GlobusQueue: ''' placeholder for globus async helpers ''' pass def __init__(self): self.auth_client = NativeAppAuthClient(self.app_id) self.auth_client.oauth2_start_flow(refresh_tokens=True) self.xfer_client = None custom = dj.config.get('custom', None) if custom and 'globus.token' in custom: self.refresh() else: self.login() # authentication methods def login(self): ''' fetch refresh token, store in dj.config['globus.token'] ''' auth_client = self.auth_client print('Please login via: {}'.format( auth_client.oauth2_get_authorize_url())) code = input('and enter code:').strip() tokens = auth_client.oauth2_exchange_code_for_tokens(code) xfer_auth_cfg = tokens.by_resource_server['transfer.api.globus.org'] xfer_rt = xfer_auth_cfg['refresh_token'] xfer_at = xfer_auth_cfg['access_token'] xfer_exp = xfer_auth_cfg['expires_at_seconds'] xfer_auth = RefreshTokenAuthorizer( xfer_rt, auth_client, access_token=xfer_at, expires_at=xfer_exp) self.xfer_client = TransferClient(authorizer=xfer_auth) custom = dj.config.get('custom', {}) custom['globus.token'] = xfer_rt dj.config['custom'] = custom def refresh(self): ''' use refresh token to refresh access token ''' auth_client = self.auth_client xfer_auth = RefreshTokenAuthorizer( dj.config['custom']['globus.token'], auth_client, access_token=None, expires_at=None) self.xfer_client = TransferClient(authorizer=xfer_auth) # endpoint managment / utility methods @classmethod def ep_parts(cls, endpoint_path): # split endpoint:/path to endpoint, path epsplit = endpoint_path.split(':') return epsplit[0], ':'.join(epsplit[1:]) def activate_endpoint(self, endpoint): ''' activate an endpoint ''' tc = self.xfer_client r = tc.endpoint_autoactivate(endpoint, if_expires_in=3600) log.debug('activate_endpoint() code: {}'.format(r['code'])) if r['code'] == 'AutoActivationFailed': print('Endpoint({}) Not Active! Error! Source message: {}' .format(endpoint, r['message'])) raise Exception('globus endpoint activation failure') knownok = any(('AutoActivated' in r['code'], 'AlreadyActivated' in r['code'])) if not knownok: log.debug('activate_endpoint(): not knownok response') def _wait(self, task, timeout=10, polling_interval=10): ''' tranfer client common wait wrapper ''' return self.xfer_client.task_wait(task, timeout, polling_interval) def _tasks(self): ''' >>> tl = tc.task_list(num_results=25, filter="type:TRANSFER,DELETE") >>> _ = [print(t["task_id"], t["type"], t["status"]) for t in tl] ''' pass def _task_info(self): ''' >>> for event in tc.task_event_list(task_id): >>> print("Event on Task({}) at {}:\n{}".format( >>> task_id, event["time"], event["description"]) or get_task ''' pass # transfer methods def ls(self, endpoint_path): ''' returns: { "DATA": [ { "DATA_TYPE": "file", "group": "staff", "last_modified": "2018-05-22 18:49:19+00:00", "link_group": null, "link_last_modified": null, "link_size": null, "link_target": null, "link_user": null, "name": "map", "permissions": "0755", "size": 102, "type": "dir", "user": "******" }, ], "DATA_TYPE": "file_list", "absolute_path": null, "endpoint": "aa4e5f9c-05f3-11e8-a6ad-0a448319c2f8", "length": 2, "path": "/~/Globus/", "rename_supported": true, "symlink_supported": false, "total": 2 } ''' ep, path = self.ep_parts(endpoint_path) return self.xfer_client.operation_ls(ep, path=path) def mkdir(self, ep_path): ''' create a directory at ep_path ''' ep, path = self.ep_parts(ep_path) return self.xfer_client.operation_mkdir(ep, path=path) def rmdir(self, ep_path, recursive=False): ''' remove a directory at ep_path ''' tc = self.xfer_client ep, path = self.ep_parts(ep_path) ddata = DeleteData(tc, ep, recursive=recursive) ddata.add_item(path) task_id = tc.submit_delete(ddata)['task_id'] return self._wait(task_id) def cp(self, src_ep_path, dst_ep_path, recursive=False): ''' copy file/path todo: support label, sync_level, etc? sync_level: ["exists", "size", "mtime", "checksum"] ''' tc = self.xfer_client sep, spath = self.ep_parts(src_ep_path) dep, dpath = self.ep_parts(dst_ep_path) td = TransferData(tc, sep, dep) td.add_item(spath, dpath, recursive=recursive) task_id = tc.submit_transfer(td)['task_id'] return self._wait(task_id) def rename(self, src_ep_path, dst_ep_path): ''' rename a file/path ''' tc = self.xfer_client sep, spath = self.ep_parts(src_ep_path) dep, dpath = self.ep_parts(dst_ep_path) if sep != dep: raise Exception('rename between two different endpoints') return tc.operation_rename(sep, spath, dpath)
try: tc = TransferClient(authorizer=authorizer) except: print( "ERROR: TransferClient() call failed! Unable to call the Globus transfer interface with the provided auth info!" ) sys.exit(-1) # print(transfer) # Now we should have auth, try setting up a transfer. tdata = TransferData(tc, source_endpoint_id, destination_endpoint_id, label="DCDE Relion transfer", sync_level="size") tdata.add_item(source_dir, dest_dir, recursive=True) transfer_result = tc.submit_transfer(tdata) print("task_id =", transfer_result["task_id"]) while not tc.task_wait( transfer_result['task_id'], timeout=1200, polling_interval=10): print(".", end="") print("\n{} completed!".format(transfer_result['task_id'])) os.listdir(path=dest_dir)
def do_job(self,tokens, task_color,stage_in_source,stage_in_dest,stage_out_dest,stage_in_source_path,stage_in_dest_path,stage_out_dest_path): def post_refresh_message(token_data): print("I got called") requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'key_message':token_data.by_resource_server['transfer.api.globus.org']['access_token'], 'task_id':task_id,'step':'1','task_color':task_color})) #socketio.emit('message_log', {'message_body':'Testing for emit'}) auth_client = dill.loads(redis_store.get('auth_client')) #send json message with key special_message that include new access token #requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'key_message':token_data.by_resource_server['transfer.api.globus.org']['access_token'], 'task_id':task_id} authorizer = globus_sdk.RefreshTokenAuthorizer(tokens['transfer.api.globus.org']['refresh_token'], auth_client,tokens['transfer.api.globus.org']['access_token'], expires_at=tokens['transfer.api.globus.org']['expires_at_seconds'],on_refresh=post_refresh_message) #stage_in_source = stage_in_source stage_in_destination= stage_in_dest stage_out_destination = stage_out_dest #stage_in_source_path = redis_store.get('stage_in_source_path').decode('utf-8') stage_in_destination_path = stage_in_dest_path stage_out_destination_path = stage_out_dest_path task_id = do_job.request.id tc = TransferClient(authorizer=authorizer) #auth_client=load_auth_client() data = globus_sdk.TransferData(tc,stage_in_source, stage_in_destination,label="stagein") data.add_item(stage_in_source_path, stage_in_destination_path, True) status = tc.submit_transfer(data) requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Queue wait is done, now initiating Stage in....','task_id':task_id,'step':'2','task_color':task_color})) tc.task_wait(status["task_id"])#task id of the stage_in result_in=tc.get_task(status["task_id"]) #print("The response for task is :") #print(result_in) complete_status = result_in['status'] print("The complete status is :") print(complete_status) if complete_status == "SUCCEEDED": requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+'] Stage In succeeded', 'task_id':task_id,'step':'2','task_color':task_color})) else: requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+'] Stage In failed, canceling the job..... ','task_id':task_id,'step':'2','task_color':task_color})) # stop and delete the job raise Reject("Stage in Failed",requeue=False) #print to the log that job informations, with id, running the fake job requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Running the job','task_id':task_id,'step':'3','task_color':task_color})) time.sleep(3) #fetching new token #validate now active #fake job is done requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Job is done','task_id':task_id,'step':'3','task_color':task_color})) requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+'] Initiating Stage out.... ','task_id':task_id,'step':'4','task_color':task_color})) #tc = TransferClient(authorizer=authorizer) data = globus_sdk.TransferData(tc, stage_in_destination, stage_out_destination,label="stageout") data.add_item(stage_in_destination_path, stage_out_destination_path, True) #hopefully refresh token lambda called here or after here supposed to log refreshed ok status = tc.submit_transfer(data) tc.task_wait(status["task_id"]) result_in=tc.get_task(status["task_id"]) complete_status = result_in['status'] if complete_status == "SUCCEEDED": requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+'] Stage Out succeeded ','task_id':task_id,'step':'4','task_color':task_color})) else: requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+'] Stage Out failed, canceling the job.....','task_id':task_id,'step':'4','task_color':task_color})) raise Reject("Stage out Failed",requeue=False)
def globus_transfer( # noqa: C901 remote_endpoint, remote_path, name, transfer_type, non_blocking=False): """ Read the local globus endpoint UUID from ~/.zstash.ini. If the ini file does not exist, create an ini file with empty values, and try to find the local endpoint UUID based on the FQDN """ ini_path = os.path.expanduser("~/.zstash.ini") ini = configparser.ConfigParser() local_endpoint = None if ini.read(ini_path): if "local" in ini.sections(): local_endpoint = ini["local"].get("globus_endpoint_uuid") else: ini["local"] = {"globus_endpoint_uuid": ""} try: with open(ini_path, "w") as f: ini.write(f) except Exception as e: logger.error(e) sys.exit(1) if not local_endpoint: fqdn = socket.getfqdn() for pattern in regex_endpoint_map.keys(): if re.fullmatch(pattern, fqdn): local_endpoint = regex_endpoint_map.get(pattern) break if not local_endpoint: logger.error( "{} does not have the local Globus endpoint set".format(ini_path)) sys.exit(1) if remote_endpoint.upper() in hpss_endpoint_map.keys(): remote_endpoint = hpss_endpoint_map.get(remote_endpoint.upper()) if transfer_type == "get": src_ep = remote_endpoint src_path = os.path.join(remote_path, name) dst_ep = local_endpoint dst_path = os.path.join(os.getcwd(), name) else: src_ep = local_endpoint src_path = os.path.join(os.getcwd(), name) dst_ep = remote_endpoint dst_path = os.path.join(remote_path, name) subdir = os.path.basename(os.path.normpath(remote_path)) subdir_label = re.sub("[^A-Za-z0-9_ -]", "", subdir) filename = name.split(".")[0] label = subdir_label + " " + filename native_client = NativeClient( client_id="6c1629cf-446c-49e7-af95-323c6412397f", app_name="Zstash", default_scopes= "openid urn:globus:auth:scope:transfer.api.globus.org:all", ) native_client.login(no_local_server=True, refresh_tokens=True) transfer_authorizer = native_client.get_authorizers().get( "transfer.api.globus.org") tc = TransferClient(transfer_authorizer) for ep_id in [src_ep, dst_ep]: r = tc.endpoint_autoactivate(ep_id, if_expires_in=600) if r.get("code") == "AutoActivationFailed": logger.error( "The {} endpoint is not activated or the current activation expires soon. Please go to https://app.globus.org/file-manager/collections/{} and (re)activate the endpoint." .format(ep_id, ep_id)) sys.exit(1) td = TransferData( tc, src_ep, dst_ep, label=label, sync_level="checksum", verify_checksum=True, preserve_timestamp=True, fail_on_quota_errors=True, ) td.add_item(src_path, dst_path) try: task = tc.submit_transfer(td) except TransferAPIError as e: if e.code == "NoCredException": logger.error( "{}. Please go to https://app.globus.org/endpoints and activate the endpoint." .format(e.message)) else: logger.error(e) sys.exit(1) except Exception as e: logger.error("Exception: {}".format(e)) sys.exit(1) if non_blocking: return try: task_id = task.get("task_id") """ A Globus transfer job (task) can be in one of the three states: ACTIVE, SUCCEEDED, FAILED. The script every 20 seconds polls a status of the transfer job (task) from the Globus Transfer service, with 20 second timeout limit. If the task is ACTIVE after time runs out 'task_wait' returns False, and True otherwise. """ while not tc.task_wait(task_id, 20, 20): pass """ The Globus transfer job (task) has been finished (SUCCEEDED or FAILED). Check if the transfer SUCCEEDED or FAILED. """ task = tc.get_task(task_id) if task["status"] == "SUCCEEDED": logger.info( "Globus transfer {}, from {}{} to {}{} succeeded".format( task_id, src_ep, src_path, dst_ep, dst_path)) else: logger.error("Transfer FAILED") except TransferAPIError as e: if e.code == "NoCredException": logger.error( "{}. Please go to https://app.globus.org/endpoints and activate the endpoint." .format(e.message)) else: logger.error(e) sys.exit(1) except Exception as e: logger.error("Exception: {}".format(e)) sys.exit(1)
class TestGlobus(TestZstash): def preactivate_globus(self): """ Read the local globus endpoint UUID from ~/.zstash.ini. If the ini file does not exist, create an ini file with empty values, and try to find the local endpoint UUID based on the FQDN """ local_endpoint = None ini_path = os.path.expanduser("~/.zstash.ini") ini = configparser.ConfigParser() if ini.read(ini_path): if "local" in ini.sections(): local_endpoint = ini["local"].get("globus_endpoint_uuid") else: ini["local"] = {"globus_endpoint_uuid": ""} try: with open(ini_path, "w") as f: ini.write(f) except Exception as e: self.fail(e) if not local_endpoint: fqdn = socket.getfqdn() for pattern in regex_endpoint_map.keys(): if re.fullmatch(pattern, fqdn): local_endpoint = regex_endpoint_map.get(pattern) break if not local_endpoint: # self.fail("{} does not have the local Globus endpoint set".format(ini_path)) self.skipTest( "{} does not have the local Globus endpoint set".format( ini_path)) native_client = NativeClient( client_id="6c1629cf-446c-49e7-af95-323c6412397f", app_name="Zstash", default_scopes= "openid urn:globus:auth:scope:transfer.api.globus.org:all", ) native_client.login(no_local_server=True, refresh_tokens=True) transfer_authorizer = native_client.get_authorizers().get( "transfer.api.globus.org") self.transfer_client = TransferClient(transfer_authorizer) for ep_id in [hpss_globus_endpoint, local_endpoint]: r = self.transfer_client.endpoint_autoactivate(ep_id, if_expires_in=600) if r.get("code") == "AutoActivationFailed": self.fail( "The {} endpoint is not activated or the current activation expires soon. Please go to https://app.globus.org/file-manager/collections/{} and (re)-activate the endpoint." .format(ep_id, ep_id)) def delete_files_globus(self): ep_id = hpss_globus_endpoint r = self.transfer_client.endpoint_autoactivate(ep_id, if_expires_in=60) if r.get("code") == "AutoActivationFailed": self.fail( "The {} endpoint is not activated. Please go to https://app.globus.org/file-manager/collections/{} and activate the endpoint." .format(ep_id, ep_id)) ddata = DeleteData(self.transfer_client, hpss_globus_endpoint, recursive=True) ddata.add_item("/~/zstash_test/") try: task = self.transfer_client.submit_delete(ddata) task_id = task.get("task_id") """ A Globus transfer job (task) can be in one of the three states: ACTIVE, SUCCEEDED, FAILED. The script every 5 seconds polls a status of the transfer job (task) from the Globus Transfer service, with 5 second timeout limit. If the task is ACTIVE after time runs out 'task_wait' returns False, and True otherwise. """ while not self.transfer_client.task_wait(task_id, 5, 5): task = self.transfer_client.get_task(task_id) if task.get("is_paused"): break """ The Globus transfer job (task) has been finished (SUCCEEDED or FAILED), or is still active (ACTIVE). Check if the transfer SUCCEEDED or FAILED. """ task = self.transfer_client.get_task(task_id) if task["status"] == "SUCCEEDED": pass elif task.get("status") == "ACTIVE": if task.get("is_paused"): pause_info = self.transfer_client.task_pause_info(task_id) paused_rules = pause_info.get("pause_rules") reason = paused_rules[0].get("message") message = "The task was paused. Reason: {}".format(reason) print(message) else: message = "The task reached a {} second deadline\n".format( 24 * 3600) print(message) self.transfer_client.cancel_task(task_id) else: print("Globus delete FAILED") except TransferAPIError as e: if e.code == "NoCredException": self.fail( "{}. Please go to https://app.globus.org/endpoints and activate the endpoint." .format(e.message)) else: self.fail(e) except Exception as e: self.fail("{} - exception: {}".format(self, e)) def tearDown(self): """ Tear down a test. This is run after every test method. After the script has failed or completed, remove all created files, even those on the HPSS repo. """ os.chdir(TOP_LEVEL) print("Removing test files, both locally and at the HPSS repo") # self.cache may appear in any of these directories, # but should not appear at the same level as these. # Therefore, there is no need to explicitly remove it. for d in [self.test_dir, self.backup_dir]: if os.path.exists(d): shutil.rmtree(d) if self.hpss_path and self.hpss_path.lower().startswith("globus:"): self.delete_files_globus() def helperLsGlobus(self, test_name, hpss_path, cache=None, zstash_path=ZSTASH_PATH): """ Test `zstash ls --hpss=globus://...`. """ self.preactivate_globus() self.hpss_path = hpss_path if cache: # Override default cache self.cache = cache cache_option = " --cache={}".format(self.cache) else: cache_option = "" use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path, cache=self.cache) self.assertWorkspace() os.chdir(self.test_dir) for option in ["", "-v", "-l"]: print_starred("Testing zstash ls {}".format(option)) cmd = "{}zstash ls{} {} --hpss={}".format(zstash_path, cache_option, option, self.hpss_path) output, err = run_cmd(cmd) self.check_strings(cmd, output + err, ["file0.txt"], ["ERROR"]) os.chdir(TOP_LEVEL) def testLs(self): self.helperLsGlobus("testLsGlobus", f"globus://{hpss_globus_endpoint}/~/zstash_test/")