def is_folder(cls, reference): return False @classmethod def get_mtime(cls, reference): response = cls._head(reference) mtime = response.getheader('last-modified') if mtime is None: return None return HTTPDate.decode(mtime) @classmethod def get_mimetype(cls, reference): response = cls._head(reference) ctype = response.getheader('content-type') return ctype.split(';')[0] @classmethod def get_size(cls, reference): response = cls._head(reference) size = response.getheader('content-length') return int(size) @classmethod def open(cls, reference, mode=None): reference = str(reference) return urlopen(reference) register_file_system('http', HTTPReadOnlyFS)
ref, status, responses = cls._propfind(ref) # if cls.debug: dprint(status) # if cls.debug: dprint(pp.pformat(responses)) # if cls.debug: dprint(ref) prefix, response = cls._get_response_from_ref(ref, responses) # if cls.debug: dprint(prefix) # if cls.debug: dprint(response) prefix_count = len(prefix) filenames = [] for path, response in responses.iteritems(): if path.startswith(prefix): # Can this ever not happen? filename = path[prefix_count:] if filename.startswith("/"): filename = filename[1:] if filename: filenames.append(filename) # FIXME: since we have the metadata, it makes sense to # store it in the cache # if cls.debug: dprint(filenames) return filenames import urlparse urlparse.uses_relative.append('webdav') urlparse.uses_netloc.append('webdav') urlparse.uses_query.append('webdav') urlparse.uses_params.append('webdav') urlparse.uses_fragment.append('webdav') register_file_system('webdav', WebDavFS)
@classmethod def is_folder(cls, reference): return False @classmethod def get_mtime(cls, reference): response = cls._head(reference) mtime = response.getheader('last-modified') if mtime is None: return None return HTTPDate.decode(mtime) @classmethod def get_mimetype(cls, reference): response = cls._head(reference) ctype = response.getheader('content-type') return ctype.split(';')[0] @classmethod def get_size(cls, reference): response = cls._head(reference) size = response.getheader('content-length') return int(size) @classmethod def open(cls, reference, mode=None): reference = str(reference) return urlopen(reference) register_file_system('http', HTTPReadOnlyFS)
ref, status, responses = cls._propfind(ref) # if cls.debug: dprint(status) # if cls.debug: dprint(pp.pformat(responses)) # if cls.debug: dprint(ref) prefix, response = cls._get_response_from_ref(ref, responses) # if cls.debug: dprint(prefix) # if cls.debug: dprint(response) prefix_count = len(prefix) filenames = [] for path, response in responses.iteritems(): if path.startswith(prefix): # Can this ever not happen? filename = path[prefix_count:] if filename.startswith("/"): filename = filename[1:] if filename: filenames.append(filename) # FIXME: since we have the metadata, it makes sense to # store it in the cache # if cls.debug: dprint(filenames) return filenames import urlparse urlparse.uses_relative.append("webdav") urlparse.uses_netloc.append("webdav") urlparse.uses_query.append("webdav") urlparse.uses_params.append("webdav") urlparse.uses_fragment.append("webdav") register_file_system("webdav", WebDavFS)
def move(cls, source, target): if cls.is_file(target): raise OSError("[Errno 20] Not a directory: '%s'" % target) client = cls._get_client(source) client.rename(str(source.path), str(target.path)) @classmethod def get_names(cls, ref): if not cls.exists(ref): raise OSError("[Errno 2] No such file or directory: '%s'" % ref) if not cls.is_folder(ref): raise OSError("[Errno 20] Not a directory: '%s'" % ref) client = cls._get_client(ref) filenames = client.listdir(str(ref.path)) if cls.debug: dprint(filenames) return filenames try: import paramiko import urlparse urlparse.uses_relative.append('sftp') urlparse.uses_netloc.append('sftp') urlparse.uses_query.append('sftp') urlparse.uses_params.append('sftp') urlparse.uses_fragment.append('sftp') register_file_system('sftp', SFTPFS) except ImportError: pass