Beispiel #1
0
 def __init__(self, client, name, max_buffer=MAX_BUFFER):
     self.client = client
     r = self.client.get_file(name)
     self.bytes = int(r.getheader('Content-Length'))
     if r > max_buffer:
         temp = tempfile.TemporaryFile()
     else:
         temp = StringIO()
     shutil.copyfileobj(r, temp)
     temp.seek(0)
     super(SpooledReader, self).__init__(temp, name)
Beispiel #2
0
 def setcontents(self, path, data, chunk_size=1024*64):
     if not isinstance(data, six.binary_type):        
         return super(MemoryFS, self).setcontents(path, data, chunk_size)        
     if not self.exists(path):      
         self.open(path, 'wb').close()                    
         
     dir_entry = self._get_dir_entry(path)
     if not dir_entry.isfile():
         raise ResourceInvalidError('Not a directory %(path)s', path)
     new_mem_file = StringIO()        
     new_mem_file.write(data)
     dir_entry.mem_file = new_mem_file                        
    def setcontents(self, path, data=b"", encoding=None, errors=None, chunk_size=1024 * 64):
        if isinstance(data, six.binary_type):
            if not self.exists(path):
                self.open(path, "wb").close()
            dir_entry = self._get_dir_entry(path)
            if not dir_entry.isfile():
                raise ResourceInvalidError("Not a directory %(path)s", path)
            new_mem_file = StringIO()
            new_mem_file.write(data)
            dir_entry.mem_file = new_mem_file
            return len(data)

        return super(MemoryFS, self).setcontents(
            path, data=data, encoding=encoding, errors=errors, chunk_size=chunk_size
        )
Beispiel #4
0
 def upload(self, path, f):
     import pdb; pdb.set_trace()
     if isinstance(f, basestring):
         # upload given string as file's contents.
         f = StringIO(f)
     l = None
     try:
         l = len(f)
     except:
         try:
             l = os.fstat(f.fileno()).st_size
         except:
             try:
                 f.seek(0, 2)
                 l = f.tell()
                 f.seek(0)
             except:
                 raise Exception('Could not determine length of file!')
     dirname, basename = pathsplit(path)
     try:
         info = self.info(path)
     except:
         try:
             info = self.info(dirname)
         except:
             raise Exception('Cannot upload to non-existent directory!')
     url = '%s/%s/%s' % (ULURL, self.auth_token, info['inode'])
     host = urlparse.urlparse(url).hostname
     conn = httplib.HTTPConnection(host, 443)
     boundary = mimetools.choose_boundary()
     fields = {
         'boundary': boundary,
         'mime': mimetypes.guess_type(basename)[0] or 'application/octet-stream',
         'name': basename,
     }
     head = MULTIPART_HEAD % fields
     tail = MULTIPART_TAIL % fields
     l += len(head) + len(tail)
     headers = {
         'Content-Length': l,
         'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
     }
     conn.request('POST', url, '', headers)
     # now stream the file to box.net.
     conn.send(head)
     while True:
         data = f.read(4096)
         if not data:
             break
         conn.send(data)
     conn.send(tail)
     r = conn.getresponse()
     if r.status != 200:
         raise Exception('Error uploading data!')
Beispiel #5
0
 def __setstate__(self, state):
     self.__dict__.update(state)
     if self.type == 'file':
         self.lock = threading.RLock()
     else:
         self.lock = None
     if self.mem_file is not None:
         data = self.mem_file
         self.mem_file = StringIO()
         self.mem_file.write(data)
    def __init__(self, fs, path, mode, rfile=None, write_on_flush=True):
        """RemoteFileBuffer constructor.

        The owning filesystem, path and mode must be provided.  If the
        optional argument 'rfile' is provided, it must be a read()-able
        object or a string containing the initial file contents.
        """
        wrapped_file = SpooledTemporaryFile(max_size=self.max_size_in_memory)
        self.fs = fs
        self.path = path
        self.write_on_flush = write_on_flush
        self._changed = False
        self._readlen = 0  # How many bytes already loaded from rfile
        self._rfile = None  # Reference to remote file object
        self._eof = False  # Reached end of rfile?
        if getattr(fs, "_lock", None) is not None:
            self._lock = fs._lock.__class__()
        else:
            self._lock = threading.RLock()

        if "r" in mode or "+" in mode or "a" in mode:
            if rfile is None:
                # File was just created, force to write anything
                self._changed = True
                self._eof = True

            if not hasattr(rfile, "read"):
                #rfile = StringIO(unicode(rfile))
                rfile = StringIO(rfile)

            self._rfile = rfile
        else:
            # Do not use remote file object
            self._eof = True
            self._rfile = None
            self._changed = True
            if rfile is not None and hasattr(rfile,"close"):
                rfile.close()
        super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
        # FIXME: What if mode with position on eof?
        if "a" in mode:
            # Not good enough...
            self.seek(0, SEEK_END)
Beispiel #7
0
    def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
        # TODO: chunked transport of large files
        epath = self.encode_path(path)
        if "w" in mode:
            self.proxy.set_contents(epath, xmlrpclib.Binary(b("")))
        if "r" in mode or "a" in mode or "+" in mode:
            try:
                data = self.proxy.get_contents(epath, "rb").data
            except IOError:
                if "w" not in mode and "a" not in mode:
                    raise ResourceNotFoundError(path)
                if not self.isdir(dirname(path)):
                    raise ParentDirectoryMissingError(path)
                self.proxy.set_contents(path, xmlrpclib.Binary(b("")))
        else:
            data = b("")
        f = StringIO(data)
        if "a" not in mode:
            f.seek(0, 0)
        else:
            f.seek(0, 2)
        oldflush = f.flush
        oldclose = f.close
        oldtruncate = f.truncate

        def newflush():
            self._lock.acquire()
            try:
                oldflush()
                self.proxy.set_contents(epath, xmlrpclib.Binary(f.getvalue()))
            finally:
                self._lock.release()

        def newclose():
            self._lock.acquire()
            try:
                f.flush()
                oldclose()
            finally:
                self._lock.release()

        def newtruncate(size=None):
            self._lock.acquire()
            try:
                oldtruncate(size)
                f.flush()
            finally:
                self._lock.release()

        f.flush = newflush
        f.close = newclose
        f.truncate = newtruncate
        return f
Beispiel #8
0
 def __setstate__(self, state):
     state["bucket"] = RiakBucket(state.pop("bucket"), state.pop("host"), state.pop("port"), state.pop("transport"))
     self.__dict__.update(state)
     if self.type == "file":
         self.lock = threading.RLock()
     else:
         self.lock = None
     if self._mem_file is not None:
         data = self._mem_file
         self._mem_file = StringIO()
         self._mem_file.write(data)
Beispiel #9
0
    def __init__(self, type, name, contents=None):

        assert type in ("dir", "file"), "Type must be dir or file!"

        self.type = type
        self.name = name

        if contents is None and type == "dir":
            contents = {}

        self.open_files = []
        self.contents = contents
        self.mem_file = None
        self.created_time = datetime.datetime.now()
        self.modified_time = self.created_time
        self.accessed_time = self.created_time

        self.xattrs = {}

        self.lock = None
        if self.type == 'file':
            self.mem_file = StringIO()
            self.lock = threading.RLock()
Beispiel #10
0
    def open(self,
             path,
             mode='r',
             buffering=-1,
             encoding=None,
             errors=None,
             newline=None,
             line_buffering=False,
             **kwargs):
        path = normpath(relpath(path))

        if 'r' in mode:
            if self.zip_mode not in 'ra':
                raise OperationFailedError(
                    "open file",
                    path=path,
                    msg=
                    "1 Zip file must be opened for reading ('r') or appending ('a')"
                )
            try:
                if hasattr(self.zf, 'open') and self._zip_file_string:
                    #return self.zf.open(self._encode_path(path), "r")
                    return self.zf.open(self._encode_path(path),
                                        'rU' if 'U' in mode else 'r')
                else:
                    contents = self.zf.read(self._encode_path(path))
            except KeyError:
                raise ResourceNotFoundError(path)
            return StringIO(contents)

        if 'w' in mode:
            if self.zip_mode not in 'wa':
                raise OperationFailedError(
                    "open file",
                    path=path,
                    msg=
                    "2 Zip file must be opened for writing ('w') or appending ('a')"
                )
            dirname, _filename = pathsplit(path)
            if dirname:
                self.temp_fs.makedir(dirname,
                                     recursive=True,
                                     allow_recreate=True)

            self._add_resource(path)
            f = _TempWriteFile(self.temp_fs, path, self._on_write_close)
            return f

        raise ValueError("Mode must contain be 'r' or 'w'")
Beispiel #11
0
    def setcontents(self,
                    path,
                    data=b'',
                    encoding=None,
                    errors=None,
                    chunk_size=1024 * 64,
                    **kwargs):
        if isinstance(data, six.binary_type):
            if not self.exists(path):
                self.open(path, 'wb').close()
            dir_entry = self._get_dir_entry(path)
            if not dir_entry.isfile():
                raise ResourceInvalidError('Not a directory %(path)s', path)
            new_mem_file = StringIO()
            new_mem_file.write(data)
            dir_entry.mem_file = new_mem_file
            return len(data)

        return super(MemoryFS, self).setcontents(path,
                                                 data=data,
                                                 encoding=encoding,
                                                 errors=errors,
                                                 chunk_size=chunk_size,
                                                 **kwargs)
Beispiel #12
0
    def open(self, path, mode="r"):
        # TODO: chunked transport of large files
        path = self.encode_path(path)
        if "w" in mode:
            self.proxy.set_contents(path, xmlrpclib.Binary(""))
        if "r" in mode or "a" in mode or "+" in mode:
            try:
                data = self.proxy.get_contents(path).data
            except IOError:
                if "w" not in mode and "a" not in mode:
                    raise ResourceNotFoundError(path)
                if not self.isdir(dirname(path)):
                    raise ParentDirectoryMissingError(path)
                self.proxy.set_contents(path, xmlrpclib.Binary(""))
        else:
            data = ""
        f = StringIO(data)
        if "a" not in mode:
            f.seek(0, 0)
        else:
            f.seek(0, 2)
        oldflush = f.flush
        oldclose = f.close
        oldtruncate = f.truncate

        def newflush():
            oldflush()
            self.proxy.set_contents(path, xmlrpclib.Binary(f.getvalue()))

        def newclose():
            f.flush()
            oldclose()

        def newtruncate(size=None):
            oldtruncate(size)
            f.flush()

        f.flush = newflush
        f.close = newclose
        f.truncate = newtruncate
        return f
Beispiel #13
0
    def setcontents(self,
                    path,
                    data=b'',
                    encoding=None,
                    errors=None,
                    chunk_size=1024 * 64):
        # Remove then write contents.  There is no method to erase the contents
        # of a file when writing to it using pysmb.
        try:
            self.remove(path)
        except ResourceNotFoundError:
            pass

        if not hasattr(data, 'read'):
            data = StringIO(data)
        self.conn.storeFile(self.share, path, data)
Beispiel #14
0
 def open(self, path, mode="r"):
     # TODO: chunked transport of large files
     path = self.encode_path(path)
     if "w" in mode:
         self.proxy.set_contents(path,xmlrpclib.Binary(""))
     if "r" in mode or "a" in mode or "+" in mode:
         try:
             data = self.proxy.get_contents(path).data
         except IOError:
             if "w" not in mode and "a" not in mode:
                 raise ResourceNotFoundError(path)
             if not self.isdir(dirname(path)):
                 raise ParentDirectoryMissingError(path)
             self.proxy.set_contents(path,xmlrpclib.Binary(""))
     else:
         data = ""
     f = StringIO(data)
     if "a" not in mode:
         f.seek(0,0)
     else:
         f.seek(0,2)
     oldflush = f.flush
     oldclose = f.close
     oldtruncate = f.truncate
     def newflush():
         oldflush()
         self.proxy.set_contents(path,xmlrpclib.Binary(f.getvalue()))
     def newclose():
         f.flush()
         oldclose()
     def newtruncate(size=None):
         oldtruncate(size)
         f.flush()
     f.flush = newflush
     f.close = newclose
     f.truncate = newtruncate
     return f
Beispiel #15
0
    def __init__(self, type, name, contents=None):

        assert type in ("dir", "file"), "Type must be dir or file!"

        self.type = type
        self.name = name

        if contents is None and type == "dir":
            contents = {}

        self.open_files = []
        self.contents = contents
        self.mem_file = None
        self.created_time = datetime.datetime.now()
        self.modified_time = self.created_time
        self.accessed_time = self.created_time

        self.xattrs = {}

        self.lock = None
        if self.type == 'file':
            self.mem_file = StringIO()
            self.lock = threading.RLock()
Beispiel #16
0
class DirEntry(object):

    def sync(f):
        def deco(self, *args, **kwargs):
            if self.lock is not None:
                try:
                    self.lock.acquire()
                    return f(self, *args, **kwargs)
                finally:
                    self.lock.release()
            else:
                return f(self, *args, **kwargs)
        return deco

    def __init__(self, type, name, contents=None):

        assert type in ("dir", "file"), "Type must be dir or file!"

        self.type = type
        self.name = name

        if contents is None and type == "dir":
            contents = {}

        self.open_files = []
        self.contents = contents
        self.mem_file = None
        self.created_time = datetime.datetime.now()
        self.modified_time = self.created_time
        self.accessed_time = self.created_time

        self.xattrs = {}

        self.lock = None
        if self.type == 'file':
            self.mem_file = StringIO()
            self.lock = threading.RLock()

    def get_value(self):
        self.lock.acquire()
        try:
            return self.mem_file.getvalue()
        finally:
            self.lock.release()
    data = property(get_value)

    def desc_contents(self):
        if self.isfile():
            return "<file %s>" % self.name
        elif self.isdir():
            return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())

    def isdir(self):
        return self.type == "dir"

    def isfile(self):
        return self.type == "file"

    def __str__(self):
        return "%s: %s" % (self.name, self.desc_contents())

    @sync
    def __getstate__(self):
        state = self.__dict__.copy()
        state.pop('lock')
        if self.mem_file is not None:
            state['mem_file'] = self.data
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        if self.type == 'file':
            self.lock = threading.RLock()
        else:
            self.lock = None
        if self.mem_file is not None:
            data = self.mem_file
            self.mem_file = StringIO()
            self.mem_file.write(data)
Beispiel #17
0
    def open(self,
             path,
             mode='r',
             buffering=-1,
             encoding=None,
             errors=None,
             newline=None,
             line_buffering=False,
             **kwargs):
        # TODO: chunked transport of large files
        epath = self.encode_path(path)
        if "w" in mode:
            self.proxy.set_contents(epath, xmlrpclib.Binary(b("")))
        if "r" in mode or "a" in mode or "+" in mode:
            try:
                data = self.proxy.get_contents(epath, "rb").data
            except IOError:
                if "w" not in mode and "a" not in mode:
                    raise ResourceNotFoundError(path)
                if not self.isdir(dirname(path)):
                    raise ParentDirectoryMissingError(path)
                self.proxy.set_contents(path, xmlrpclib.Binary(b("")))
        else:
            data = b("")
        f = StringIO(data)
        if "a" not in mode:
            f.seek(0, 0)
        else:
            f.seek(0, 2)
        oldflush = f.flush
        oldclose = f.close
        oldtruncate = f.truncate

        def newflush():
            self._lock.acquire()
            try:
                oldflush()
                self.proxy.set_contents(epath, xmlrpclib.Binary(f.getvalue()))
            finally:
                self._lock.release()

        def newclose():
            self._lock.acquire()
            try:
                f.flush()
                oldclose()
            finally:
                self._lock.release()

        def newtruncate(size=None):
            self._lock.acquire()
            try:
                oldtruncate(size)
                f.flush()
            finally:
                self._lock.release()

        f.flush = newflush
        f.close = newclose
        f.truncate = newtruncate
        return f
Beispiel #18
0
 def _get_file(self):
     if self.type == "file" and self._mem_file is None:
         bytes = self.bucket.get_binary(self.path).get_data()
         self._mem_file = StringIO(bytes)
     return self._mem_file
Beispiel #19
0
class DirEntry(object):

    def sync(f):
        def deco(self, *args, **kwargs):
            if self.lock is not None:
                try:
                    self.lock.acquire()
                    return f(self, *args, **kwargs)
                finally:
                    self.lock.release()
            else:
                return f(self, *args, **kwargs)
        return deco

    def __init__(self, type, name, contents=None):

        assert type in ("dir", "file"), "Type must be dir or file!"

        self.type = type
        self.name = name

        if contents is None and type == "dir":
            contents = {}

        self.open_files = []
        self.contents = contents
        self.mem_file = None
        self.created_time = datetime.datetime.now()
        self.modified_time = self.created_time
        self.accessed_time = self.created_time

        self.xattrs = {}

        self.lock = None
        if self.type == 'file':
            self.mem_file = StringIO()
            self.lock = threading.RLock()

    def get_value(self):
        self.lock.acquire()
        try:
            return self.mem_file.getvalue()
        finally:
            self.lock.release()
    data = property(get_value)

    def desc_contents(self):
        if self.isfile():
            return "<file %s>" % self.name
        elif self.isdir():
            return "<dir %s>" % "".join("%s: %s" % (k, v.desc_contents()) for k, v in self.contents.iteritems())

    def isdir(self):
        return self.type == "dir"

    def isfile(self):
        return self.type == "file"

    def __str__(self):
        return "%s: %s" % (self.name, self.desc_contents())

    @sync
    def __getstate__(self):
        state = self.__dict__.copy()
        state.pop('lock')
        if self.mem_file is not None:
            state['mem_file'] = self.data
        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        if self.type == 'file':
            self.lock = threading.RLock()
        else:
            self.lock = None
        if self.mem_file is not None:
            data = self.mem_file
            self.mem_file = StringIO()
            self.mem_file.write(data)
Beispiel #20
0
class RiakFSObject(DirEntry):
    """
    Represents a filesystem "node", either a directory of file.

    A directory node may have sub-nodes in a contents dictionary.

    Has more responsibility than the `DirEntry` class, for example the
    `remove` and `_make_dir_entry` methods. Also has a `path` attribute
    which, in the case of a file, is the key of the object in the Riak
    store. TODO: look into moving the responsibilty back to the FS object,
    make this class dumber.
    """

    @classmethod
    def from_dict(cls, bucket, data):
        def obj_from_dict(d):
            type = d.pop("type")
            name = d.pop("name")
            prefix = d.pop("prefix", None)
            contents = d.pop("contents", {})
            obj = cls(bucket, type, name, prefix)
            obj.xattrs = d["xattrs"]
            obj.timestamps = d["timestamps"]
            for k, v in contents.items():
                obj.contents[k] = obj_from_dict(v)
            return obj

        return obj_from_dict(data)

    def to_dict(self):
        ignore = set(["bucket", "contents", "lock", "open_files"])

        def serialize(obj):
            d = {}
            for k, v in obj.__dict__.iteritems():
                if k in ignore or k.startswith("_"):
                    continue
                if k == "xattrs" and not v:
                    continue
                d[k] = v
            if obj.contents:
                d["contents"] = dict((k, serialize(v)) for k, v in obj.contents.items())
            return d

        return serialize(self)

    # Datetime instances don't json serialize, so we maintain them as lists
    # under the hood and "rehydrate" on demand
    def _get_ct(self):
        return datetime.fromtimestamp(time.mktime(self.timestamps["ctime"]))

    def _get_mt(self):
        return datetime.fromtimestamp(time.mktime(self.timestamps["mtime"]))

    def _get_at(self):
        return datetime.fromtimestamp(time.mktime(self.timestamps["atime"]))

    def _set_ct(self, val):
        self.timestamps["ctime"] = list(val.timetuple())

    def _set_mt(self, val):
        self.timestamps["mtime"] = list(val.timetuple())

    def _set_at(self, val):
        self.timestamps["atime"] = list(val.timetuple())

    created_time = property(_get_ct, _set_ct)
    modified_time = property(_get_mt, _set_mt)
    accessed_time = property(_get_at, _set_at)

    def _get_file(self):
        if self.type == "file" and self._mem_file is None:
            bytes = self.bucket.get_binary(self.path).get_data()
            self._mem_file = StringIO(bytes)
        return self._mem_file

    def _set_file(self, stream):
        self._mem_file = stream

    mem_file = property(_get_file, _set_file)

    def __init__(self, bucket, type, name, prefix=None, contents=None):
        assert type in ("dir", "file"), "Type must be dir or file!"
        self.bucket = bucket
        self.type = type
        self.name = name.rstrip("/")
        if prefix:
            prefix = prefix.strip("/") + "/"
        else:
            prefix = ""
        self.prefix = prefix
        self.path = prefix + name
        if type == "dir":
            self.path += "/"
            if contents is None:
                contents = {}
        self.open_files = []
        self.contents = contents

        now = list(datetime.now().timetuple())
        self.timestamps = {"ctime": now, "mtime": now, "atime": now}

        self.xattrs = {}

        self.lock = None
        self._mem_file = None
        if self.type == "file":
            self.lock = threading.RLock()
        # problem of having a `path` attribute - if there are contents, their
        # paths may be different.

    #        if contents:
    #            def update_paths(entries, prefix):
    #                prefix = '/' + prefix.strip('/') + '/'
    #                for entry in entries:
    #                    entry.prefix = prefix
    #                    entry.path = prefix + entry.name
    #                    if entry.contents:
    #                        update_paths(entry.contents.values(), entry.path)
    #            update_paths(contents.values(), self.path)

    def _make_dir_entry(self, type, name, contents=None):
        child = self.__class__(self.bucket, type, name, prefix=self.path, contents=contents)
        self.contents[name] = child
        return child

    def remove(self, name):
        entry = self.contents[name]
        if entry.isfile():
            key = self.path + name
            obj = self.bucket.get(key)
            obj.delete()
        else:
            for child in entry.contents.keys():
                entry.remove(child)
        del self.contents[name]

    def __getstate__(self):
        state = self.__dict__.copy()
        del state["lock"]
        bucket = state.pop("bucket")
        state["bucket"] = bucket.get_name()
        state["host"] = bucket._client._host
        state["port"] = bucket._client._port
        state["transport"] = bucket._client._transport.__class__.__name__[4:-9].upper()
        if self._mem_file is not None:
            state["_mem_file"] = self.data
        return state

    def __setstate__(self, state):
        state["bucket"] = RiakBucket(state.pop("bucket"), state.pop("host"), state.pop("port"), state.pop("transport"))
        self.__dict__.update(state)
        if self.type == "file":
            self.lock = threading.RLock()
        else:
            self.lock = None
        if self._mem_file is not None:
            data = self._mem_file
            self._mem_file = StringIO()
            self._mem_file.write(data)
Beispiel #21
0
    def decompress(self, g, wrapAsFile=True):
        buf = g.read(2)
        magic = unpack(">H", buf)[0]
        if (magic & 0x3EFF) == 0x10FB:
            # it is compressed
            if magic & 0x8000:
                outputSize = unpack(">I", g.read(4))[0]
                if magic & 0x100:
                    unknown1 = unpack(">I", g.read(4))[0]
            else:
                outputSize = unpack(">I", "\0" + g.read(3))[0]
                if magic & 0x100:
                    unknown1 = unpack(">I", "\0" + g.read(3))[0]

        output = []
        while True:
            opcode = unpack("B", g.read(1))[0]
            if not (opcode & 0x80):  # opcode: bit7==0 to get here
                # read second opcode
                opcode2 = unpack("B", g.read(1))[0]
                #print "0x80", toBits(opcode), toBits(opcode2), opcode & 0x03, (((opcode & 0x60) << 3) | opcode2) + Q, ((opcode & 0x1C) >> 2) + 2 + R

                # copy at most 3 bytes to output stream (lowest 2 bits of opcode)
                count = opcode & 0x03
                for i in range(count):
                    output.append(g.read(1))

                # you always have to look at least one byte, hence the +1
                # use bit6 and bit5 (bit7=0 to trigger the if-statement) of opcode, and 8 bits of opcode2 (10-bits)
                lookback = (((opcode & 0x60) << 3) | opcode2) + 1

                # use bit4..2 of opcode
                count = ((opcode & 0x1C) >> 2) + 3

                for i in range(count):
                    output.append(output[-lookback])
            elif not (opcode & 0x40):  # opcode: bit7..6==10 to get here
                opcode2 = unpack("B", g.read(1))[0]
                opcode3 = unpack("B", g.read(1))[0]
                #print "0x40", toBits(opcode), toBits(opcode2), toBits(opcode3)

                # copy count bytes (upper 2 bits of opcode2)
                count = opcode2 >> 6
                for i in range(count):
                    output.append(g.read(1))

                # look back again (lower 6 bits of opcode2, all 8 bits of opcode3, total 14-bits)
                lookback = (((opcode2 & 0x3F) << 8) | opcode3) + 1
                # lower 6 bits of opcode are the count to copy
                count = (opcode & 0x3F) + 4

                for i in range(count):
                    output.append(output[-lookback])
            elif not (opcode & 0x20):  # opcode: bit7..5=110 to get here
                opcode2 = unpack("B", g.read(1))[0]
                opcode3 = unpack("B", g.read(1))[0]
                opcode4 = unpack("B", g.read(1))[0]

                # copy at most 3 bytes to output stream (lowest 2 bits of opcode)
                count = opcode & 0x03
                for i in range(count):
                    output.append(g.read(1))

                # look back: bit4 of opcode, all bits of opcode2 and opcode3, total 17-bits
                lookback = (((opcode & 0x10) >> 4) << 16) | (
                    opcode2 << 8) | (opcode3) + 1
                # bit3..2 of opcode and the whole of opcode4
                count = (((((opcode & 0x0C) >> 2) << 8)) | opcode4) + 5

                #print "0x20", toBits(opcode), toBits(opcode2), toBits(opcode3), toBits(opcode4), lookback, count

                for i in range(count):
                    output.append(output[-lookback])
            else:  # opcode: bit7..5==1 to get here
                # use lowest 5 bits for count
                count = ((opcode & 0x1F) << 2) + 4
                if count > 0x70:  # this is end of input
                    # turn into a small-copy
                    count = opcode & 0x03
                    #print "0xEXITCOPY", count
                    for i in range(count):
                        output.append(g.read(1))
                    break

                # "big copy" operation: up to 112 bytes (minumum of 4, multiple of 4)
                for i in range(count):
                    output.append(g.read(1))
                #print "0xLO", toBits(opcode), count

        if wrapAsFile:
            return StringIO("".join(output))
        else:
            return "".join(output)
Beispiel #22
0
import threading

import paramiko

from fs.base import flags_to_mode
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO
from fs.utils import isdir

# Default host key used by BaseSFTPServer
#
DEFAULT_HOST_KEY = paramiko.RSAKey.from_private_key(
    StringIO(
        "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKCAIEAl7sAF0x2O/HwLhG68b1uG8KHSOTqe3Cdlj5i/1RhO7E2BJ4B\n3jhKYDYtupRnMFbpu7fb21A24w3Y3W5gXzywBxR6dP2HgiSDVecoDg2uSYPjnlDk\nHrRuviSBG3XpJ/awn1DObxRIvJP4/sCqcMY8Ro/3qfmid5WmMpdCZ3EBeC0CAwEA\nAQKCAIBSGefUs5UOnr190C49/GiGMN6PPP78SFWdJKjgzEHI0P0PxofwPLlSEj7w\nRLkJWR4kazpWE7N/bNC6EK2pGueMN9Ag2GxdIRC5r1y8pdYbAkuFFwq9Tqa6j5B0\nGkkwEhrcFNBGx8UfzHESXe/uE16F+e8l6xBMcXLMJVo9Xjui6QJBAL9MsJEx93iO\nzwjoRpSNzWyZFhiHbcGJ0NahWzc3wASRU6L9M3JZ1VkabRuWwKNuEzEHNK8cLbRl\nTyH0mceWXcsCQQDLDEuWcOeoDteEpNhVJFkXJJfwZ4Rlxu42MDsQQ/paJCjt2ONU\nWBn/P6iYDTvxrt/8+CtLfYc+QQkrTnKn3cLnAkEAk3ixXR0h46Rj4j/9uSOfyyow\nqHQunlZ50hvNz8GAm4TU7v82m96449nFZtFObC69SLx/VsboTPsUh96idgRrBQJA\nQBfGeFt1VGAy+YTLYLzTfnGnoFQcv7+2i9ZXnn/Gs9N8M+/lekdBFYgzoKN0y4pG\n2+Q+Tlr2aNlAmrHtkT13+wJAJVgZATPI5X3UO0Wdf24f/w9+OY+QxKGl86tTQXzE\n4bwvYtUGufMIHiNeWP66i6fYCucXCMYtx6Xgu2hpdZZpFw==\n-----END RSA PRIVATE KEY-----\n"
    ))


def report_sftp_errors(func):
    """Decorator to catch and report FS errors as SFTP error codes.

    Any FSError exceptions are caught and translated into an appropriate
    return code, while other exceptions are passed through untouched.
    """
    @wraps(func)
    def wrapper(*args, **kwds):
        try:
            return func(*args, **kwds)
        except ResourceNotFoundError, e:
            return paramiko.SFTP_NO_SUCH_FILE