def __init__(self, manager, result=None): self.manager = manager if result: self.id = result[Collection.id] self.path = result[Collection.name] self.name = irods_basename(result[Collection.name]) self._meta = None
def _download(self, obj, local_path, **options): if os.path.isdir(local_path): file = os.path.join(local_path, irods_basename(obj)) else: file = local_path # Check for force flag if file exists if os.path.exists(file) and kw.FORCE_FLAG_KW not in options: raise ex.OVERWRITE_WITHOUT_FORCE_FLAG with open(file, 'wb') as f, self.open(obj, 'r', **options) as o: for chunk in chunks(o, self.READ_BUFFER_SIZE): f.write(chunk)
def get(self, path, file=None, **options): parent = self.sess.collections.get(irods_dirname(path)) # TODO: optimize if file: self._download(path, file, **options) query = self.sess.query(DataObject)\ .filter(DataObject.name == irods_basename(path))\ .filter(DataObject.collection_id == parent.id) results = query.all() # get up to max_rows replicas if len(results) <= 0: raise ex.DataObjectDoesNotExist() return iRODSDataObject(self, parent, results)
def do_mkdir(self, line): try: opts, args = self.parse_command("mkdir", "", line) except self._ConnectionError: return for arg in args: path = self.get_path(arg) try: self.session.collections.create(path) except CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME: self.println( "... mkdir: cannot create collection `{:}`:" " Object exists", irods_basename(path)) break
def get(self, path, file=None, **options): parent = self.sess.collections.get(irods_dirname(path)) # TODO: optimize if file: self._download(path, file, **options) query = self.sess.query(DataObject)\ .filter(DataObject.name == irods_basename(path))\ .filter(DataObject.collection_id == parent.id)\ .add_keyword(kw.ZONE_KW, path.split('/')[1]) results = query.all() # get up to max_rows replicas if len(results) <= 0: raise ex.DataObjectDoesNotExist() # workaround incompatibility of irodsclient-0.8.2 and iRODS server 3.x for r in results: if DataObject.resc_hier not in r: r[DataObject.resc_hier] = None return iRODSDataObject(self, parent, results)
def do_cd(self, line): """Change the current irods collection """ # Parse the new path try: opts, args = self.parse_command("cd", "", line, noargs=True) except self._ConnectionError: return if not args: path = HOME else: path = self.get_path(args[0]) # Fetch the corresponding irods collection try: self.cursor = self.session.collections.get(path) except CollectionDoesNotExist: self.println("... path `{:}` does not exist", args[0]) else: # Update the prompt current = irods_basename(self.cursor.path) self.prompt = "[trirods@ccirods {:}]$ ".format(current)
def get(self, path, local_path=None, num_threads=DEFAULT_NUMBER_OF_THREADS, **options): parent = self.sess.collections.get(irods_dirname(path)) # TODO: optimize if local_path: self._download(path, local_path, num_threads=num_threads, **options) query = self.sess.query(DataObject)\ .filter(DataObject.name == irods_basename(path))\ .filter(DataObject.collection_id == parent.id)\ .add_keyword(kw.ZONE_KW, path.split('/')[1]) results = query.all() # get up to max_rows replicas if len(results) <= 0: raise ex.DataObjectDoesNotExist() return iRODSDataObject(self, parent, results)
def _download(self, obj, local_path, num_threads, **options): if os.path.isdir(local_path): local_file = os.path.join(local_path, irods_basename(obj)) else: local_file = local_path # Check for force flag if local_file exists if os.path.exists(local_file) and kw.FORCE_FLAG_KW not in options: raise ex.OVERWRITE_WITHOUT_FORCE_FLAG with open(local_file, 'wb') as f, self.open(obj, 'r', **options) as o: if self.should_parallelize_transfer(num_threads, o): f.close() if not self.parallel_get( (obj, o), local_path, num_threads=num_threads, target_resource_name=options.get(kw.RESC_NAME_KW, '')): raise RuntimeError("parallel get failed") else: for chunk in chunks(o, self.READ_BUFFER_SIZE): f.write(chunk)
def data_access_query(self, path): cn = irods_dirname(path) dn = irods_basename(path) return self.sess.query(DataObject, DataAccess).filter(Collection.name == cn, DataObject.name == dn)
def get_collection_tree(self, base, path, sessions_cleanup=True): """ Lists the folders and files attributes at the input 'path' Parameters ---------- base : str The base path to validate ; eg. P000000001/C000000001 path : str The collection's id; eg. P000000001/C000000001/SubFolder1/Experiment1/ sessions_cleanup: bool If true, the session will be closed after retrieving the values. Returns ------- dict The folders and files attributes at the requested path """ output = [] base_path = "/nlmumc/projects/" + base absolute_path = "/nlmumc/projects/" + path if not is_safe_path(base_path, absolute_path): raise CAT_NO_ACCESS_PERMISSION collection = self.session.collections.get(absolute_path) for coll in collection.subcollections: # query extra collection info: ctime query = self.session.query(iRODSCollection).filter( iRODSCollection.id == coll.id) try: result = query.one() except NoResultFound: raise CollectionDoesNotExist() name = irods_basename(result[iRODSCollection.name]) ctime = result[iRODSCollection.create_time] relative_path = path + "/" + name folder_node = { "name": name, "path": relative_path, "type": "folder", "size": "--", "rescname": "--", "ctime": ctime.strftime("%Y-%m-%d %H:%M:%S"), } output.append(folder_node) for data in collection.data_objects: # query extra data info: ctime query = self.session.query(DataObject).filter( DataObject.id == data.id) try: result = query.first() except NoResultFound: raise DataObjectDoesNotExist() ctime = result[DataObject.create_time] relative_path = path + "/" + data.name data_node = { "name": data.name, "path": relative_path, "type": "file", "size": data.size, "rescname": data.resource_name, "offlineResource": data.resource_name == "arcRescSURF01", "ctime": ctime.strftime("%Y-%m-%d %H:%M:%S"), } output.append(data_node) if sessions_cleanup: self.session.cleanup() return output