Example #1
0
def fix_savedata(dir):
    if (not os.path.isdir(dir) or not os.path.isfile(dir+"/SYS.BIN") ): 
        ErrorMessageBox("Ŀ¼´íÎó")
        
    import mmap
    fd = os.open(dir+"/SYS.BIN", os.O_RDWR)
    buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE)
    if (buf[0:8] != "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"): 
        print "Bad savedata or not decrypted. SYS.BIN"
        ErrorMessageBox("´æµµ´íÎó")
    for pos in range(0x269480, 0x269480 + 0x1258 * 100) :
        if buf[pos:pos+4] == "\0\0\0\2" :
            buf[pos+0x18:pos+0x58] = "\0\0\0\0" * 0x10
        pos+=0x1258
    os.close(fd)
    print 'Fix SYS.BIN.'
    
    import fnmatch
    zstr = "\0\0\0\0" * ((0x8A358 - 0x46358) / 4)
    for directory, subdirectories, files in os.walk(dir):
      for file in files:
        if fnmatch.fnmatch(file, 'SAVE???.BIN'):
            fd = os.open(os.path.join(directory, file), os.O_RDWR)
            buf = mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_WRITE)
            if (buf[0:4] != "\0\0\0\2") :
                print "Bad savedata or not decrypted. %s" % file
                ErrorMessageBox("´æµµ´íÎó»òδ½âÃÜ")
            buf[0x18:0x58] = "\0\0\0\0" * 0x10
            buf[0x46358:0x8A358] = zstr
            os.close(fd)
            print 'Fix %s.' % (file)
    windll.user32.MessageBoxA(None, "´æµµÐÞÕýÍê³É!", EXE_TITLE, 0)
Example #2
0
def truncate_and_unlink(path, increment=256 * 1024 ** 2, delay=0.2):
    """Truncate a file to zero bytes before unlinking

    Truncation is done in ``increment`` bytes with a sleep delay
    of ``delay`` seconds between each truncation step.

    Once the file is zero bytes in size it will be removed/unlinked
    from the filesystem.

    :raises: OSError on error
    """
    LOG.debug("truncate_and_unlink(path=%r, increment=%r, delay=%.2fs)", path, format_bytes(increment), delay)
    fd = os.open(path, os.O_RDWR)

    size = os.fstat(fd).st_size
    while size > 0:
        start = time.time()
        length = size - increment
        if length < 0:
            length = 0
        os.ftruncate(fd, length)
        LOG.debug("truncate(%s, length=%s) in %.2fs", path, format_bytes(length), time.time() - start)
        time.sleep(delay)
        size = os.fstat(fd).st_size
    os.unlink(path)
    LOG.debug("unlink(%s)", path)
Example #3
0
    def read_forever(self):
        """
        This function reads the log file one line at a time, reopening it if it
        changes (due to logrotate for example).
        This function will loop until the program ends or is explicitly told to
        stop reading the log file.
        """
        try:
            self.mc_log_fh = open(self.mc_log, "r")

            self.curinode = os.fstat(self.mc_log_fh.fileno()).st_ino

            while self.log_read:
                while self.log_read:
                    self.buf = self.mc_log_fh.readline()
                    if self.buf == "":
                        break

                    self.handle_message(self.buf, False)
                try:
                    if os.stat(self.mc_log).st_ino != self.curinode:
                        self.mc_new_fh = open(self.mc_log, "r")
                        self.mc_log_fh.close()
                        self.mc_log_fh = self.mc_new_fg
                        self.curinode = os.fstat(self.mc_log_fh.fileno()).st_ino
                        continue  # dont bother sleeping since there is a new log file
                except IOError:
                    pass
                time.sleep(0.1)
        except:
            self.log.error("Craftoria: unable to open or read log file %s: %s", sys.exc_type, sys.exc_value)
Example #4
0
    def actionStreamFile(self, params):
        site = self.sites.get(params["site"])
        if not site or not site.settings["serving"]:  # Site unknown or not serving
            self.response({"error": "Unknown site"})
            return False
        try:
            if config.debug_socket:
                self.log.debug("Opening file: %s" % params["inner_path"])
            with site.storage.open(params["inner_path"]) as file:
                file.seek(params["location"])
                stream_bytes = min(FILE_BUFF, os.fstat(file.fileno()).st_size - params["location"])
                back = {
                    "size": os.fstat(file.fileno()).st_size,
                    "location": min(file.tell() + FILE_BUFF, os.fstat(file.fileno()).st_size),
                    "stream_bytes": stream_bytes,
                }
                if config.debug_socket:
                    self.log.debug(
                        "Sending file %s from position %s to %s"
                        % (params["inner_path"], params["location"], back["location"])
                    )
                self.response(back)
                self.sendRawfile(file, read_bytes=FILE_BUFF)
            if config.debug_socket:
                self.log.debug("File %s sent" % params["inner_path"])

            # Add peer to site if not added before
            connected_peer = site.addPeer(self.connection.ip, self.connection.port)
            if connected_peer:  # Just added
                connected_peer.connect(self.connection)  # Assign current connection to peer

        except Exception, err:
            self.log.debug("GetFile read error: %s" % Debug.formatException(err))
            self.response({"error": "File read error: %s" % Debug.formatException(err)})
            return False
Example #5
0
 def _fd_closed(fd):
     try:
         os.fstat(fd)
     except (OSError, IOError), exc:
         if exc.errno == errno.EBADF:
             return True
         raise
Example #6
0
    def test_object_run_fast_track_non_zero(self):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        data = "0" * 1024
        etag = md5()
        with self.disk_file.mkstemp() as fd:
            os.write(fd, data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {
                "ETag": etag,
                "X-Timestamp": str(normalize_timestamp(time.time())),
                "Content-Length": str(os.fstat(fd).st_size),
                "Original-Content-Length": str(os.fstat(fd).st_size),
            }
            self.disk_file.put(fd, metadata)
            etag = md5()
            etag.update("1" + "0" * 1023)
            etag = etag.hexdigest()
            metadata["ETag"] = etag
            write_metadata(fd, metadata)

        quarantine_path = os.path.join(self.devices, "sda", "quarantined", "objects")
        self.auditor.run_once(zero_byte_fps=50)
        self.assertFalse(os.path.isdir(quarantine_path))
        self.auditor.run_once()
        self.assertTrue(os.path.isdir(quarantine_path))
def split_multimol2(multimol2):
    """Splits a multi-mol2 file (a mol2 file consisting of multiple mol2 entries)
        into individual mol2-file contents.

    Arguments:
        multimol2 (string): path to the multi-mol2 file

    Returns:
        A list consisting of a sublist for every extracted mol2-file. Sublists contain
        the molecule ID and the mol2 file contents.
        e.g., [['ID1234', '@<TRIPOS>MOLECULE...'],['ID1235', '@<TRIPOS>MOLECULE...'], ...]
    
    """
    with open(multimol2, "r") as mol2file:
        line = ""
        mol2cont = ""
        single_mol2s = []
        line = mol2file.readline()

        while not mol2file.tell() == os.fstat(mol2file.fileno()).st_size:
            if line.startswith("@<TRIPOS>MOLECULE"):
                mol2cont = ""
                mol2cont += line
                line = mol2file.readline()
                molecule_id = line.strip()

                while not line.startswith("@<TRIPOS>MOLECULE"):
                    mol2cont += line
                    line = mol2file.readline()
                    if mol2file.tell() == os.fstat(mol2file.fileno()).st_size:
                        mol2cont += line
                        break

                single_mol2s.append([molecule_id, mol2cont])
    return single_mol2s
Example #8
0
    def _open_next(self, fd):
        current = os.path.join(self.dir, "current")
        current_fd = _open_nonblock(current)

        pat = os.path.join(self.dir, "@*.[su]")
        files = glob.glob(pat)
        files.sort()
        files.append(current)

        inode = os.fstat(fd).st_ino
        os.close(fd)

        for idx, f in enumerate(files):
            if os.stat(f).st_ino == inode:
                break
        else:
            log.warn("multilog: can't find before file. (inode=%d)", inode)
            return os.fstat(current_fd).st_ino, current_fd

        idx += 1
        if len(files) == idx:
            log.warn("multilog: rotate current => current?")
            return os.fstat(current_fd).st_ino, current_fd

        next_file = files[idx]
        if next_file == current:
            return os.fstat(current_fd).st_ino, current_fd

        os.close(current_fd)
        return os.stat(next_file).st_ino, _open_nonblock(next_file)
Example #9
0
    def __init__(self, fd, args=None, timeout=30, maxread=2000, searchwindowsize=None, logfile=None):
        """This takes a file descriptor (an int) or an object that support the
        fileno() method (returning an int). All Python file-like objects
        support fileno(). """

        if type(fd) != type(0) and hasattr(fd, "fileno"):
            fd = fd.fileno()

        if type(fd) != type(0):
            raise ExceptionPexpect(
                "The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn."
            )

        try:  # make sure fd is a valid file descriptor
            os.fstat(fd)
        except OSError:
            raise ExceptionPexpect("The fd argument is not a valid file descriptor.")

        self.args = None
        self.command = None
        SpawnBase.__init__(self, timeout, maxread, searchwindowsize, logfile)
        self.child_fd = fd
        self.own_fd = False
        self.closed = False
        self.name = "<file descriptor %d>" % fd
Example #10
0
def _set_data_field(fields, args):
    if "location" not in fields and "copy_from" not in fields:
        if args.file:
            fields["data"] = open(args.file, "rb")
        else:
            # distinguish cases where:
            # (1) stdin is not valid (as in cron jobs):
            #     glance ... <&-
            # (2) image data is provided through standard input:
            #     glance ... < /tmp/file or cat /tmp/file | glance ...
            # (3) no image data provided:
            #     glance ...
            try:
                os.fstat(0)
            except OSError:
                # (1) stdin is not valid (closed...)
                fields["data"] = None
                return
            if not sys.stdin.isatty():
                # (2) image data is provided through standard input
                if msvcrt:
                    msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
                fields["data"] = sys.stdin
            else:
                # (3) no image data provided
                fields["data"] = None
Example #11
0
    def followTail(self, fileobj=None, fstat=None):
        if fileobj is None:
            fileobj = open(self.filename)
            if self.seekend:
                fileobj.seek(0, 2)

        line = fileobj.read()

        if line:
            self.dataReceived(line)

        if fstat is None:
            fstat = os.fstat(fileobj.fileno())

        try:
            stat = os.stat(self.filename)
        except:
            stat = fstat

        if self.fileIdentity(stat) != self.fileIdentity(fstat):
            fileobj = open(self.filename)
            fstat = os.fstat(fileobj.fileno())
            self.fileReset()

        if self.keeprunning:
            reactor.callLater(self.delay, lambda: self.followTail(fileobj, fstat))
Example #12
0
File: tail.py Project: actank/zmon
 def __iter__(self):
     if not self.file:
         self._reopen(True)
     try:
         need_reopen = not os.path.samestat(os.fstat(self.file.fileno()), os.stat(self.fname))
     except:
         need_reopen = True
     if self.file:
         newpos = os.fstat(self.file.fileno()).st_size
         if newpos > self.pos + self.max_size:
             self.pos = newpos
             self.buf = ""
             return
         self.file.seek(self.pos)
         if self.buf:
             self.buf += self.file.readline()
         if self.buf.endswith("\n") or len(self.buf) > self.max_size:
             yield self.buf
             self.buf = ""
         line = None
         for line in self.file:
             if line.endswith("\n"):
                 yield line
         if line and not line.endswith("\n"):
             self.buf = line
         self.pos = self.file.tell()
     if need_reopen:
         self._reopen(True)
Example #13
0
def file_contents_ro(fd, stream=False, allow_mmap=True):
    """:return: read-only contents of the file represented by the file descriptor fd

    :param fd: file descriptor opened for reading
    :param stream: if False, random access is provided, otherwise the stream interface
        is provided.
    :param allow_mmap: if True, its allowed to map the contents into memory, which
        allows large files to be handled and accessed efficiently. The file-descriptor
        will change its position if this is False"""
    try:
        if allow_mmap:
            # supports stream and random access
            try:
                return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
            except EnvironmentError:
                # python 2.4 issue, 0 wants to be the actual size
                return mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_READ)
            # END handle python 2.4
    except OSError:
        pass
    # END exception handling

    # read manully
    contents = os.read(fd, os.fstat(fd).st_size)
    if stream:
        return _RandomAccessBytesIO(contents)
    return contents
Example #14
0
    def __init__(self, fd, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None):

        """This takes a file descriptor (an int) or an object that support the
        fileno() method (returning an int). All Python file-like objects
        support fileno(). """

        # TODO: Add better handling of trying to use fdspawn in place of spawn
        # TODO: (overload to allow fdspawn to also handle commands as spawn does.

        if not isinstance(fd, int) and hasattr(fd, "fileno"):
            fd = fd.fileno()

        if not isinstance(fd, int):
            raise ExceptionPexpect(
                "The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn."
            )

        try:  # make sure fd is a valid file descriptor
            os.fstat(fd)
        except OSError:
            raise ExceptionPexpect("The fd argument is not a valid file descriptor.")

        self.args = None
        self.command = None
        spawn.__init__(self, None, args, timeout, maxread, searchwindowsize, logfile)
        self.child_fd = fd
        self.own_fd = False
        self.closed = False
        self.name = "<file descriptor %d>" % fd
Example #15
0
def multitail(filenames):
    files = {}
    for fn in filenames:
        try:
            handle = open(fn)
            handle.seek(0, 2)
            inode = os.fstat(handle.fileno()).st_ino
        except IOError:
            handle = None
            inode = None
        files[fn] = dict(handle=handle, last=0, inode=inode)

    all = files.items()
    while 1:
        read = False
        for fn, info in all:
            l = info["handle"] and info["handle"].readline()
            if l:
                yield fn, l
                info["last"] = time.time()
                info["read"] = read = True
            else:
                info["read"] = False
        for fn, info in all:
            if not info["handle"] or (info["read"] == False and info["inode"] != stat_inode(fn)):
                if info["handle"]:
                    info["handle"].close()
                try:
                    info["handle"] = open(fn)
                    info["inode"] = os.fstat(info["handle"].fileno()).st_ino
                except IOError:
                    info["handle"] = None
        if not read:
            time.sleep(1)
Example #16
0
    def test_contextmanager(self):
        fn = None
        with Image.open("Tests/images/hopper.gif") as im:
            fn = im.fp.fileno()
            os.fstat(fn)

        self.assertRaises(OSError, lambda: os.fstat(fn))
Example #17
0
    def __init__(self, fh, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
        """Constructor.

    Args:
      fh: io.Base or file object, The source of the bytes to upload. MUST be
        opened in blocking mode, do not use streams opened in non-blocking mode.
      mimetype: string, Mime-type of the file. If None then a mime-type will be
        guessed from the file extension.
      chunksize: int, File will be uploaded in chunks of this many bytes. Only
        used if resumable=True.
      resumable: bool, True if this is a resumable upload. False means upload
        in a single request.
    """
        self._fh = fh
        self._mimetype = mimetype
        self._chunksize = chunksize
        self._resumable = resumable
        self._size = None
        try:
            if hasattr(self._fh, "fileno"):
                fileno = self._fh.fileno()

                # Pipes and such show up as 0 length files.
                size = os.fstat(fileno).st_size
                if size:
                    self._size = os.fstat(fileno).st_size
        except IOError:
            pass
Example #18
0
    def pipe_read( self, pipe, minimum_to_read ):

        ##  Hackaround since Windows doesn't support select( ) except for sockets.

        dbg_print( 'pipe_read: minimum to read is ' + str( minimum_to_read ) )

        time.sleep( self.delay )

        count = 0
        count = os.fstat( pipe )[stat.ST_SIZE]
            
        data = ''

        while ( count > 0 ):
            data += os.read( pipe, 1 )
            count = os.fstat( pipe )[stat.ST_SIZE]

            ##  Be sure to break the read, if asked to do so,
            ##  after we've read in a line termination.

            if minimum_to_read != 0 and data[ len(data) -1 ] == '\n':
                if len( data ) >= minimum_to_read:
                    dbg_print( 'pipe_read: read at least the minimum asked for' )
                    break

        return data
Example #19
0
def test_contextmanager():
    fn = None
    with Image.open("Images/lena.gif") as im:
        fn = im.fp.fileno()
        assert_no_exception(lambda: os.fstat(fn))

    assert_exception(OSError, lambda: os.fstat(fn))
Example #20
0
    def test_object_audit_diff_data(self):
        self.auditor = auditor.AuditorWorker(self.conf, self.logger)
        data = "0" * 1024
        etag = md5()
        timestamp = str(normalize_timestamp(time.time()))
        with self.disk_file.mkstemp() as fd:
            os.write(fd, data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {
                "ETag": etag,
                "X-Timestamp": timestamp,
                "Content-Length": str(os.fstat(fd).st_size),
                "Original-Content-Length": str(os.fstat(fd).st_size),
            }
            self.disk_file.put(fd, metadata)
            pre_quarantines = self.auditor.quarantines
            # remake so it will have metadata
            self.disk_file = DiskFile(self.devices, "sda", "0", "a", "c", "o", self.logger)

            self.auditor.object_audit(os.path.join(self.disk_file.datadir, timestamp + ".data"), "sda", "0")
            self.assertEquals(self.auditor.quarantines, pre_quarantines)
            etag = md5()
            etag.update("1" + "0" * 1023)
            etag = etag.hexdigest()
            metadata["ETag"] = etag
            write_metadata(fd, metadata)

            self.auditor.object_audit(os.path.join(self.disk_file.datadir, timestamp + ".data"), "sda", "0")
            self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
Example #21
0
    def actionGetFile(self, params):
        site = self.sites.get(params["site"])
        if not site or not site.settings["serving"]:  # Site unknown or not serving
            self.response({"error": "Unknown site"})
            return False
        try:
            file_path = site.storage.getPath(params["inner_path"])
            if config.debug_socket:
                self.log.debug("Opening file: %s" % file_path)
            with StreamingMsgpack.FilePart(file_path, "rb") as file:
                file.seek(params["location"])
                file.read_bytes = FILE_BUFF
                back = {
                    "body": file,
                    "size": os.fstat(file.fileno()).st_size,
                    "location": min(file.tell() + FILE_BUFF, os.fstat(file.fileno()).st_size),
                }
                if config.debug_socket:
                    self.log.debug(
                        "Sending file %s from position %s to %s" % (file_path, params["location"], back["location"])
                    )
                self.response(back, streaming=True)
            if config.debug_socket:
                self.log.debug("File %s sent" % file_path)

            # Add peer to site if not added before
            connected_peer = site.addPeer(self.connection.ip, self.connection.port)
            if connected_peer:  # Just added
                connected_peer.connect(self.connection)  # Assign current connection to peer

        except Exception, err:
            self.log.debug("GetFile read error: %s" % Debug.formatException(err))
            self.response({"error": "File read error: %s" % Debug.formatException(err)})
            return False
def split_multimol2(multimol2):
    """
    Splits a multi-mol2 file.

    Parameters
    ----------
    multimol2 : str
      Path to the multi-mol2 file.

    Returns
    ----------
    A generator object for lists for every extracted mol2-file. Lists contain
      the molecule ID and the mol2 file contents.
      e.g., ['ID1234', '@<TRIPOS>MOLECULE...'

    """
    with open(multimol2, "r") as mol2file:
        line = mol2file.readline()

        while not mol2file.tell() == os.fstat(mol2file.fileno()).st_size:
            if line.startswith("@<TRIPOS>MOLECULE"):
                mol2cont = []
                mol2cont.append(line)
                line = mol2file.readline()
                molecule_id = line.strip()

                while not line.startswith("@<TRIPOS>MOLECULE"):
                    mol2cont.append(line)
                    line = mol2file.readline()
                    if mol2file.tell() == os.fstat(mol2file.fileno()).st_size:
                        mol2cont.append(line)
                        break
                mol2cont[-1] = mol2cont[-1].rstrip()  # removes blank line at file end
                yield [molecule_id, "".join(mol2cont)]
Example #23
0
def get_data_file(args):
    if args.file:
        return open(args.file, "rb")
    else:
        # distinguish cases where:
        # (1) stdin is not valid (as in cron jobs):
        #     glance ... <&-
        # (2) image data is provided through standard input:
        #     glance ... < /tmp/file or cat /tmp/file | glance ...
        # (3) no image data provided:
        #     glance ...
        try:
            os.fstat(0)
        except OSError:
            # (1) stdin is not valid (closed...)
            return None
        if not sys.stdin.isatty():
            # (2) image data is provided through standard input
            image = sys.stdin
            if hasattr(sys.stdin, "buffer"):
                image = sys.stdin.buffer
            if msvcrt:
                msvcrt.setmode(image.fileno(), os.O_BINARY)
            return image
        else:
            # (3) no image data provided
            return None
Example #24
0
    def test_valid_different_image_type_valid_etag(self):
        dummy_cache = cache.get_cache("django.core.cache.backends.dummy.DummyCache")
        with patch.object(models, "cache", dummy_cache):
            with self.settings(MEDIA_ROOT=self.TEMP_DIR):
                c = Client()
                response = c.get(self.gif_url)
                etag = response["etag"]

                f = open("%s/../resources/test_png.png" % TEST_ROOT)
                f2 = open("%s/../resources/test_gif.gif" % TEST_ROOT)

                new_name = "testing PUT name: {0}".format(random.random())

                response = c.put(self.gif_url, {"description": new_name, "image": f}, If_Match=etag)
                self.assertEquals(response.status_code, 200)
                self.assertEquals(int(response["content-length"]), os.fstat(f.fileno()).st_size, "Loaded the new image")
                self.assertNotEqual(
                    int(response["content-length"]),
                    os.fstat(f2.fileno()).st_size,
                    "Size doesn't match the original image",
                )
                self.assertEquals(response["content-type"], "image/png", "Has the right content type")

                # Just to be sure
                response = c.get(self.gif_url)
                self.assertEquals(response["content-type"], "image/png", "Has the right content type")
Example #25
0
def _putFile(inPath, outUrl, method="PUT", session=None, callback=None):
    if hasattr(inPath, "read"):
        inFile = inPath
        if hasattr(inPath, "size"):
            size = inFile.size
        elif hasattr(inPath, "getvalue"):
            size = len(inPath.getvalue())
        elif hasattr(inPath, "fileno"):
            size = os.fstat(inPath.fileno()).st_size
        else:
            raise TypeError("Can't determine size of file")
    else:
        inFile = open(inPath)
        size = os.fstat(inFile.fileno()).st_size

    headers = {}
    if session:
        headers["Cookie"] = "vmware_soap_session=%s; $Path=/" % session
    response = _makeConnection(outUrl, method, headers, inFile, bodyLength=size, callback=callback)

    if response and response.status not in (200, 201):
        raise RuntimeError("%s failed: %d - %s" % (method, response.status, response.reason))
    elif not response:
        raise RuntimeError("%s failed" % method)
    response.close()
    def getReferencedAttributes(self, obj):
        file_data = {}
        # Try to get last revision, only store a new blob if the
        # contents differ from the prior one, otherwise store a
        # reference to the prior one.
        # The implementation is mostly based on CMFEditions's CloneBlobs
        # modifier.
        repo = getToolByName(obj, "portal_repository")
        try:
            prior_rev = repo.retrieve(obj)
        except ArchivistRetrieveError:
            prior_rev = None

        for schemata in iterSchemata(obj):
            for name, field in getFields(schemata).items():
                if INamedBlobFileField.providedBy(field) or INamedBlobImageField.providedBy(field):
                    try:
                        # field.get may raise an AttributeError if the field
                        # is provided by a behavior and hasn't been
                        # initialized yet
                        field_value = field.get(field.interface(obj))
                    except AttributeError:
                        field_value = None
                    if field_value is None:
                        continue
                    blob_file = field_value.open()
                    save_new = True
                    dotted_name = ".".join([schemata.__identifier__, name])

                    if prior_rev is not None:
                        prior_obj = prior_rev.object
                        prior_blob = field.get(field.interface(prior_obj))
                        if prior_blob is not None:
                            prior_file = prior_blob.open()

                            # Check for file size differences
                            if os.fstat(prior_file.fileno()).st_size == os.fstat(blob_file.fileno()).st_size:
                                # Files are the same size, compare line by line
                                for line, prior_line in izip(blob_file, prior_file):
                                    if line != prior_line:
                                        break
                                else:
                                    # The files are the same, save a reference
                                    # to the prior versions blob on this
                                    # version
                                    file_data[dotted_name] = prior_blob._blob
                                    save_new = False

                    if save_new:
                        new_blob = file_data[dotted_name] = Blob()
                        new_blob_file = new_blob.open("w")
                        try:
                            blob_file.seek(0)
                            new_blob_file.writelines(blob_file)
                        finally:
                            blob_file.close()
                            new_blob_file.close()

        return file_data
Example #27
0
 def isalive(self):
     if self.child_fd == -1:
         return False
     try:
         os.fstat(self.child_fd)
         return True
     except OSError:
         return False
Example #28
0
 def func(no):
     m = mmap.mmap(no, 6, access=mmap.ACCESS_WRITE)
     f_size = os.fstat(no).st_size
     assert intmask(m.file_size()) == f_size == 6
     m.resize(10)
     f_size = os.fstat(no).st_size
     assert intmask(m.file_size()) == f_size == 10
     m.close()
 def isFileHandleAtPath(self, fileHandle, filePath):
     if (
         os.stat(filePath).st_ino == os.fstat(fileHandle.fileno()).st_ino
         and os.stat(filePath).st_dev == os.fstat(fileHandle.fileno()).st_dev
     ):
         return True
     else:
         return False
Example #30
0
 def start(self):
     """ Start capturing on targetfd using memorized tmpfile. """
     try:
         os.fstat(self.targetfd_save)
     except (AttributeError, OSError):
         raise ValueError("saved filedescriptor not valid anymore")
     os.dup2(self.tmpfile_fd, self.targetfd)
     self.syscapture.start()