Example #1
0
 def truncate(self, filepath, size):
     with self._get_lock():
         ascii_path = filepath.encode('ascii', 'ignore')
         localfs_path = self._make_localfs_path(ascii_path)
         fd = os.open(localfs_path, os.O_WRONLY | os.O_CREAT)
         os.ftruncate(fd, size)
         os.close(fd)
Example #2
0
    def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
                           native_sysroot, pseudo):
        """
        Prepare content for an ext2/3/4 rootfs partition.
        """
        du_cmd = "du -ks %s" % rootfs_dir
        out = exec_cmd(du_cmd)
        actual_rootfs_size = int(out.split()[0])

        rootfs_size = self.get_rootfs_size(actual_rootfs_size)

        with open(rootfs, 'w') as sparse:
            os.ftruncate(sparse.fileno(), rootfs_size * 1024)

        extraopts = self.mkfs_extraopts or "-F -i 8192"

        label_str = ""
        if self.label:
            label_str = "-L %s" % self.label

        mkfs_cmd = "mkfs.%s %s %s %s -d %s" % \
            (self.fstype, extraopts, rootfs, label_str, rootfs_dir)
        exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)

        mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
        exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
Example #3
0
    def __init__(self, pid_path):
        self._pid_path = pid_path
        self._other_running = False
        ensuredirs(self._pid_path)
        self._lockfile = None

        try:
            self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY)
        except:
            raise SoleError('Cannot open lockfile (path = %s)' % self._pid_path)

        try:
            fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
            
        except IOError:
            self._other_running = True
            try:
                f = open(self._pid_path, 'r')
                pid = f.read().strip()
                f.close()
            except:
                pid = '?'

            raise SoleError('Other instance is running (pid = %s)' % pid)

        try:
            os.ftruncate(self._lockfile, 0)
            os.write(self._lockfile, '%i\n' % os.getpid())
            os.fsync(self._lockfile)

        except:
            pass # the pid is only stored for user information, so this is allowed to fail
Example #4
0
def truncate_and_unlink(path, increment=256*1024**2, delay=0.2):
    """Truncate a file to zero bytes before unlinking

    Truncation is done in ``increment`` bytes with a sleep delay
    of ``delay`` seconds between each truncation step.

    Once the file is zero bytes in size it will be removed/unlinked
    from the filesystem.

    :raises: OSError on error
    """
    LOG.debug("truncate_and_unlink(path=%r, increment=%r, delay=%.2fs)",
              path, format_bytes(increment), delay)
    fd = os.open(path, os.O_RDWR)

    size = os.fstat(fd).st_size
    while size > 0:
        start = time.time()
        length = size - increment
        if length < 0:
            length = 0
        os.ftruncate(fd, length)
        LOG.debug("truncate(%s, length=%s) in %.2fs",
                  path, format_bytes(length), time.time() - start)
        time.sleep(delay)
        size = os.fstat(fd).st_size
    os.unlink(path)
    LOG.debug("unlink(%s)", path)
Example #5
0
def _general_print(head, color, msg=None, stream=None, level="normal"):
    global LOG_CONTENT
    if not stream:
        stream = sys.stdout

    if LOG_LEVELS[level] > LOG_LEVEL:
        # skip
        return

    # encode raw 'unicode' str to utf8 encoded str
    if msg and isinstance(msg, unicode):
        msg = msg.encode("utf-8", "ignore")

    errormsg = ""
    if CATCHERR_BUFFILE_FD > 0:
        size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
        os.ftruncate(CATCHERR_BUFFILE_FD, 0)

    # append error msg to LOG
    if errormsg:
        LOG_CONTENT += errormsg

    # append normal msg to LOG
    save_msg = msg.strip() if msg else None
    if save_msg:
        global HOST_TIMEZONE
        timestr = time.strftime("[%m/%d %H:%M:%S] ", time.gmtime(time.time() - HOST_TIMEZONE))
        LOG_CONTENT += timestr + save_msg + "\n"

    if errormsg:
        _color_print("", NO_COLOR, errormsg, stream, level)

    _color_print(head, color, msg, stream, level)
Example #6
0
    def expand(self, create = False, size = None):
        flags = os.O_WRONLY
        if create:
            flags |= os.O_CREAT
            if not os.path.exists(self.lofile):
                makedirs(os.path.dirname(self.lofile))

        if size is None:
            size = self.size

        msger.debug("Extending sparse file %s to %d" % (self.lofile, size))
        if create:
            fd = os.open(self.lofile, flags, 0644)
        else:
            fd = os.open(self.lofile, flags)

        if size <= 0:
            size = 1
        try:
            os.ftruncate(fd, size)
        except:
            # may be limited by 2G in 32bit env
            os.ftruncate(fd, 2**31L)

        os.close(fd)
Example #7
0
File: db.py Project: bebule/swift
    def _commit_puts(self, item_list=None):
        """
        Scan for .pending files and commit the found records by feeding them
        to merge_items(). Assume that lock_parent_directory has already been
        called.

        :param item_list: A list of items to commit in addition to .pending
        """
        if self.db_file == ':memory:' or not os.path.exists(self.pending_file):
            return
        if item_list is None:
            item_list = []
        self._preallocate()
        if not os.path.getsize(self.pending_file):
            if item_list:
                self.merge_items(item_list)
            return
        with open(self.pending_file, 'r+b') as fp:
            for entry in fp.read().split(':'):
                if entry:
                    try:
                        self._commit_puts_load(item_list, entry)
                    except Exception:
                        self.logger.exception(
                            _('Invalid pending entry %(file)s: %(entry)s'),
                            {'file': self.pending_file, 'entry': entry})
            if item_list:
                self.merge_items(item_list)
            try:
                os.ftruncate(fp.fileno(), 0)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
Example #8
0
def count(delta, file_path):
    """
    Increments counter file and returns the max number of times the file
    has been modified. Counter data must be in the form:
    concurrent tasks, max concurrent tasks (counter should be initialized to 0,0)

    :param int delta: increment value
    :param str file_path: path to shared counter file
    :return int max concurrent tasks:
    """
    fd = os.open(file_path, os.O_RDWR)
    try:
        fcntl.flock(fd, fcntl.LOCK_EX)
        try:
            s = os.read(fd, 10)
            value, maxValue = map(int, s.split(','))
            value += delta
            if value > maxValue: maxValue = value
            os.lseek(fd, 0, 0)
            os.ftruncate(fd, 0)
            os.write(fd, ','.join(map(str, (value, maxValue))))
        finally:
            fcntl.flock(fd, fcntl.LOCK_UN)
    finally:
        os.close(fd)
    return maxValue
Example #9
0
    def _create_local_file(self, progresscb, size_bytes):
        """
        Helper function which attempts to build self.path
        """
        fd = None
        path = self._path
        sparse = self._sparse

        try:
            try:
                fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_DSYNC)

                if sparse:
                    os.ftruncate(fd, size_bytes)
                else:
                    # 1 meg of nulls
                    mb = 1024 * 1024
                    buf = '\x00' * mb

                    left = size_bytes
                    while left > 0:
                        if left < mb:
                            buf = '\x00' * left
                        left = max(left - mb, 0)

                        os.write(fd, buf)
                        progresscb.update(size_bytes - left)
            except OSError, e:
                raise RuntimeError(_("Error creating diskimage %s: %s") %
                                   (path, str(e)))
        finally:
            if fd is not None:
                os.close(fd)
            progresscb.end(size_bytes)
Example #10
0
def write_pid_file(pid_file, pid):
    import fcntl
    import stat

    try:
        fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
                     stat.S_IRUSR | stat.S_IWUSR)
    except OSError as e:
        comm.print_exception(e)
        return -1
    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
    assert flags != -1
    flags |= fcntl.FD_CLOEXEC
    r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
    assert r != -1
    # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
    # via fcntl.fcntl. So use lockf instead
    try:
        fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
    except IOError:
        r = os.read(fd, 32)
        if r:
            logging.error('already started at pid %s' % comm.to_str(r))
        else:
            logging.error('already started')
        os.close(fd)
        return -1
    os.ftruncate(fd, 0)
    os.write(fd, comm.to_bytes(str(pid)))
    return 0
Example #11
0
def updatemanifest(manifestfile, versionname, versioncode):
    dom1 = parse(manifestfile)
    dom1.documentElement.setAttribute("android:versionName", versionname)
    dom1.documentElement.setAttribute("android:versionCode", versioncode)
    f = os.open(manifestfile, os.O_RDWR)
    os.ftruncate(f,0);
    os.write(f, dom1.toxml())
Example #12
0
 def _read_write(self, data):
     total_bytes = reduce(operator.add, map(len, data))
     n = coro.many_lio_writes(self.fd, 0, data)
     self.assertEqual(n, total_bytes)
     a = coro.many_lio_reads(self.fd, 0, total_bytes)
     self.assertEqual(a, data)
     os.ftruncate(self.fd, 0)
    def download_to_vpool(url, path, overwrite_if_exists=False):
        """
        Special method to download to vpool because voldrv does not support extending file at write
        :param url: URL to download from
        :type url: str

        :param path: Path to download to
        :type path: str

        :param overwrite_if_exists: Overwrite if file already exists
        :type overwrite_if_exists: bool

        :return: None
        """
        print url
        print path
        if os.path.exists(path) and not overwrite_if_exists:
            return
        u = urllib.urlopen(url)
        file_size = u.info()['Content-Length']
        bsize = 4096 * 1024
        VDiskController.create_volume(path, 0)
        with open(path, "wb") as f:
            size_written = 0
            os.ftruncate(f.fileno(), int(file_size))
            while 1:
                s = u.read(bsize)
                size_written += len(s)
                f.write(s)
                if len(s) < bsize:
                    break
        u.close()
def _general_print(head, color, msg=None, stream=None, level='normal'):
    global LOG_CONTENT
    if not stream:
        stream = sys.stdout

    if LOG_LEVELS[level] > LOG_LEVEL:
        # skip
        return

    # encode raw 'unicode' str to utf8 encoded str
    if msg and isinstance(msg, unicode):
        msg = msg.encode('utf-8', 'ignore')

    errormsg = ''
    if CATCHERR_BUFFILE_FD > 0:
        size = os.lseek(CATCHERR_BUFFILE_FD , 0, os.SEEK_END)
        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
        os.ftruncate(CATCHERR_BUFFILE_FD, 0)

    # append error msg to LOG
    if errormsg:
        LOG_CONTENT += errormsg

    # append normal msg to LOG
    save_msg = msg.strip() if msg else None
    if save_msg:
        timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
        LOG_CONTENT += timestr + save_msg + '\n'

    if errormsg:
        _color_print('', NO_COLOR, errormsg, stream, level)

    _color_print(head, color, msg, stream, level)
Example #15
0
  def truncateFile(self, fusepath, size):
    '''
    Unconditionally truncate the file. Don't check to see if the user has
    access.
    '''

    try:
      self.lockFile(fusepath)

      opcodes = self._genCacheOpcodes(fusepath)
      self._validateCache(fusepath, opcodes)
      realpath = self._generatePath(fusepath, opcodes)

      logger.debug('Truncating %s to %d bytes.' % (realpath, size))

      fd = os.open(realpath, os.O_RDWR)
      os.ftruncate(fd, size)
      os.close(fd)

      # Since we wrote to the file, invalidate the stat cache if it exists.
      self._invalidateStatCache(realpath)

      return 0

    finally:
      self.unlockFile(fusepath)
Example #16
0
def with_pid_file(pid_file, pid):
    """"""
    try:
        fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
    except OSError as e:
        logging.error(e.message)
        return False

    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
    assert flags != -1

    flags = fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
    assert flags != -1

    try:
        fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError:
        pid = os.read(fd, 32)
        logging.error('already started %(pid)s' % {'pid': pid})
        os.close(fd)
        return False

    os.ftruncate(fd, 0)
    os.write(fd, str(pid))
    return True
 def _create_file(self, filename, size, sparse=True):
     try:
         f = open(filename, "w+")
     except (OSError, IOError):
         raise ExecutionError("Could not open %s" % filename)
     try:
         if sparse:
             try:
                 os.posix_fallocate(f.fileno(), 0, size)
             except AttributeError:
                 # Prior to version 3.3, Python does not provide fallocate
                 os.ftruncate(f.fileno(), size)
         else:
             self.shell.log.info("Writing %d bytes" % size)
             while size > 0:
                 write_size = min(size, 1024)
                 f.write("\0" * write_size)
                 size -= write_size
     except (OSError, IOError):
         os.remove(filename)
         raise ExecutionError("Could not expand file to %d bytes" % size)
     except OverflowError:
         raise ExecutionError("The file size is too large (%d bytes)" % size)
     finally:
         f.close()
Example #18
0
File: magic.py Project: zjc5415/pyq
def forward_outputs(outs):
    for fd in (1, 2):
        if fd in outs:
            os.lseek(fd, 0, os.SEEK_SET)
            with io.open(fd, closefd=False) as f:
                STD_STREAM[fd].writelines(f)
            os.ftruncate(fd, 0)
Example #19
0
def create_anon_file(sz):
    fd, fn = tempfile.mkstemp(dir=XDG_RUNTIME_DIR) # create temporary file
    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
    fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
    os.unlink(fn) # make it anonymous
    os.ftruncate(fd, sz) # limit the size
    return fd
Example #20
0
def _init_mmap(path=None, filename=None, size=PAGESIZE):
    """Given path, filename => filename, size, mmap"""
    if path is None:
        path = DEFAULT_PATH

    if filename is None:
        filename = 'mmstats-%d' % os.getpid()
        tid = libgettid.gettid()
        if tid:
            filename += '-%d' % tid

    full_path = os.path.join(path, filename)

    # Create new empty file to back memory map on disk
    fd = os.open(full_path, os.O_CREAT | os.O_TRUNC | os.O_RDWR)
    if size > PAGESIZE:
        if size % PAGESIZE:
            size = size + (PAGESIZE - (size % PAGESIZE))
    else:
        size = PAGESIZE

    # Zero out the file
    os.ftruncate(fd, size)

    m = mmap.mmap(fd, size, mmap.MAP_SHARED, mmap.PROT_WRITE)
    return (full_path, size, m)
Example #21
0
        def __init__(self, size, fd=-1):
            self.size = size
            self.fd = fd
            if fd == -1:
                if PY3:
                    self.fd, name = tempfile.mkstemp(
                        prefix='pym-%d-' % (os.getpid(),),
                        dir=util.get_temp_dir(),
                    )

                    os.unlink(name)
                    util.Finalize(self, os.close, (self.fd,))
                    with io.open(self.fd, 'wb', closefd=False) as f:
                        bs = 1024 * 1024
                        if size >= bs:
                            zeros = b'\0' * bs
                            for _ in range(size // bs):
                                f.write(zeros)
                            del(zeros)
                        f.write(b'\0' * (size % bs))
                        assert f.tell() == size
                else:
                    name = tempfile.mktemp(
                        prefix='pym-%d-' % (os.getpid(),),
                        dir=util.get_temp_dir(),
                    )
                    self.fd = os.open(
                        name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600,
                    )
                    util.Finalize(self, os.close, (self.fd,))
                    os.unlink(name)
                    os.ftruncate(self.fd, size)
            self.buffer = mmap.mmap(self.fd, self.size)
Example #22
0
File: db.py Project: JioCloud/swift
    def _commit_puts(self, item_list=None):
        """
        Scan for .pending files and commit the found records by feeding them
        to merge_items().

        :param item_list: A list of items to commit in addition to .pending
        """
        if self.db_file == ":memory:" or not os.path.exists(self.pending_file):
            return
        if item_list is None:
            item_list = []
        with lock_parent_directory(self.pending_file, self.pending_timeout):
            self._preallocate()
            if not os.path.getsize(self.pending_file):
                if item_list:
                    self.merge_items(item_list)
                return
            with open(self.pending_file, "r+b") as fp:
                for entry in fp.read().split(":"):
                    if entry:
                        try:
                            self._commit_puts_load(item_list, entry)
                        except Exception:
                            self.logger.exception(
                                _("Invalid pending entry %(file)s: %(entry)s"),
                                {"file": self.pending_file, "entry": entry},
                            )
                if item_list:
                    self.merge_items(item_list)
                try:
                    os.ftruncate(fp.fileno(), 0)
                except OSError as err:
                    if err.errno != errno.ENOENT:
                        raise
Example #23
0
    def __init__(self, u_boot_console):
        """Initialize a new GptTestDiskImage object.

        Args:
            u_boot_console: A U-Boot console.

        Returns:
            Nothing.
        """

        filename = 'test_gpt_disk_image.bin'

        persistent = u_boot_console.config.persistent_data_dir + '/' + filename
        self.path = u_boot_console.config.result_dir  + '/' + filename

        if os.path.exists(persistent):
            u_boot_console.log.action('Disk image file ' + persistent +
                ' already exists')
        else:
            u_boot_console.log.action('Generating ' + persistent)
            fd = os.open(persistent, os.O_RDWR | os.O_CREAT)
            os.ftruncate(fd, 4194304)
            os.close(fd)
            cmd = ('sgdisk', '-U', '375a56f7-d6c9-4e81-b5f0-09d41ca89efe',
                persistent)
            u_boot_utils.run_and_log(u_boot_console, cmd)
            cmd = ('sgdisk', '--new=1:2048:2560', '-c 1:part1', persistent)
            u_boot_utils.run_and_log(u_boot_console, cmd)
            cmd = ('sgdisk', '--new=2:4096:4608', '-c 2:part2', persistent)
            u_boot_utils.run_and_log(u_boot_console, cmd)
            cmd = ('sgdisk', '-l', persistent)
            u_boot_utils.run_and_log(u_boot_console, cmd)

        cmd = ('cp', persistent, self.path)
        u_boot_utils.run_and_log(u_boot_console, cmd)
Example #24
0
 def tidy(self):
     '''
     Remove dead space.
     '''
     INTSIZE = (self.xwidth - self.width) // 2
     entries = []
     data = []
     
     # Fetch content, and update pointers without saving to the file yet.
     ptr = self.offset + self.size * self.xwidth
     for i in range(self.items):
         entry = self.__read(self.xwidth - self.width, self.offset + i * self.xwidth + self.width)
         entry = entry.decode('utf-8', 'strict')
         offset = int(entry[:len(entry) // 2])
         length = int(entry[len(entry) // 2:])
         if length > 0:
             data.append(self.__read(length, offset))
         entries.append((ptr if length > 0 else 0, length))
         ptr += length
     
     # Update stored content.
     ptr = self.offset + self.size * self.xwidth
     for datum in range(data):
         entry = self.__write(datum, ptr)
         ptr += len(datum)
     os.ftruncate(self.fd, ptr)
     
     # Update all pointers in the entry list.
     for (ptr, length) in range(self.items):
         entry = ('%0*i%0*i' % (INTSIZE, ptr, INTSIZE, length)).encode('utf-8')
         self.__write(entry, self.offset + i * self.xwidth + self.width)
Example #25
0
def modify(guid, worker, filename, key):
    obj = dict(guid=guid, total=0, workers=[], extraTotal=0, extraWorkers=[])

    fd = os.open(filename, os.O_CREAT|os.O_RDWR)

    raw  = os.read(fd, 1000000)

    if (raw is None) or ('' == raw):
        os.ftruncate(fd, 0)
    else:
        tmp = json.loads(raw)
        if ('guid' not in tmp) or (guid != tmp['guid']):
            os.ftruncate(fd, 0)
        else:
            obj = tmp

    if 'main' == key:
        obj['total'] += 1
        obj['workers'].append(worker)
    else:
        obj['extraTotal'] += 1
        obj['extraWorkers'].append(worker)

    os.lseek(fd, 0, os.SEEK_SET)
    os.write(fd, json.dumps(obj, indent=4, sort_keys=True))
Example #26
0
def __lock_pidfile(filename, verbose=True):
    fd = os.open(filename, os.O_CREAT|os.O_RDWR)

    try:
        pid = 0
        # Lock for read, to read PID
        fcntl.flock(fd, fcntl.LOCK_SH)

        pid = 0
        str_pid = os.read(fd, 100)
        try:
            pid = int(str_pid)
        except ValueError:
            # XXX bad pidfile - clean trash
            os.ftruncate(fd, 0)
            if verbose: sys.stderr.write("Bad Pidfile!\nPidfile contains trash: \"%s\"\nPidfile nas been cleaned!\n" % str_pid)
        
        # reLock for write or exit
        fcntl.flock(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
        # save PID
        process_id = os.getpid()
        os.ftruncate(fd, 0)
        os.lseek(fd, 0, os.SEEK_SET)        
        os.write(fd, str(process_id).encode('utf-8'))
        os.fsync(fd)
        
        # Re-lock for read to make the PID accessible
        fcntl.flock(fd, fcntl.LOCK_SH)
        return fd, pid
    except IOError:
        os.close(fd)        
        return None, pid
Example #27
0
def _general_print(head, color, msg = None, stream = None, level = 'normal'):
    global LOG_CONTENT
    if not stream:
        stream = sys.stdout

    if LOG_LEVELS[level] > LOG_LEVEL:
        # skip
        return

    errormsg = ''
    if CATCHERR_BUFFILE_FD > 0:
        size = os.lseek(CATCHERR_BUFFILE_FD , 0, os.SEEK_END)
        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
        os.ftruncate(CATCHERR_BUFFILE_FD, 0)

    if LOG_FILE_FP:
        if errormsg:
            LOG_CONTENT += errormsg

        save_msg = msg.strip() if msg else None
        if save_msg:
            global HOST_TIMEZONE
            timestr = time.strftime("[%m/%d %H:%M:%S] ",
                                    time.gmtime(time.time() - HOST_TIMEZONE))
            LOG_CONTENT += timestr + save_msg + '\n'

    if errormsg:
        _color_print('', NO_COLOR, errormsg, stream, level)

    _color_print(head, color, msg, stream, level)
Example #28
0
    def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
                             native_sysroot, pseudo):
        """
        Prepare content for a btrfs rootfs partition.

        Currently handles ext2/3/4 and btrfs.
        """
        du_cmd = "du -ks %s" % rootfs_dir
        out = exec_cmd(du_cmd)
        actual_rootfs_size = int(out.split()[0])

        extra_blocks = self.get_extra_block_count(actual_rootfs_size)
        if extra_blocks < self.extra_space:
            extra_blocks = self.extra_space

        rootfs_size = actual_rootfs_size + extra_blocks
        rootfs_size *= self.overhead_factor

        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
                    (extra_blocks, self.mountpoint, rootfs_size))

        with open(rootfs, 'w') as sparse:
            os.ftruncate(sparse.fileno(), rootfs_size * 1024)

        label_str = ""
        if self.label:
            label_str = "-L %s" % self.label

        mkfs_cmd = "mkfs.%s -b %d -r %s %s %s" % \
            (self.fstype, rootfs_size * 1024, rootfs_dir, label_str, rootfs)
        exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
Example #29
0
        def release(self, fd, dest, fsize, filename, created):
            """release handler.

            @param fd file discripter
            @param writelen size of data to be written
            """
            os.lseek(fd, 0, os.SEEK_SET)
            DRDFSLog.debug("fd = " + str(fd))
            if created == False:
                self.repl_info.ReplInfoWhenClose(filename, dest, 
                                                 self.access_info)

            else:
                try:
                    buf = os.read(fd, conf.bufsize)
                except os.error, e:
                    print "OSError in release (%s)" %(e)
                l = buf.rsplit(',')
                size = string.atol(l[2])
                try:
                    buf = l[0] + ',' + l[1] + ',' + str(fsize) + ',\n'
                    DRDFSLog.debug("write to meta file %s" % buf)
                    
                    os.ftruncate(fd, len(buf))
                    os.lseek(fd, 0, os.SEEK_SET)
                    os.write(fd, buf)
                    os.fsync(fd)
                except os.error, e:
                    print "OSError in release (%s)" %(e)
Example #30
0
def _general_print(head, color, msg=None, stream=None, level="normal"):
    global LOG_CONTENT
    if not stream:
        stream = sys.stdout

    if LOG_LEVELS[level] > LOG_LEVEL:
        # skip
        return

    errormsg = ""
    if CATCHERR_BUFFILE_FD > 0:
        size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
        os.ftruncate(CATCHERR_BUFFILE_FD, 0)

    if LOG_FILE_FP:
        if errormsg:
            LOG_CONTENT += errormsg

        save_msg = msg.strip()
        if save_msg:
            timestr = datetime.now().strftime("[%m/%d %H:%M:%S] ")
            LOG_CONTENT += timestr + save_msg + "\n"

    if errormsg:
        _color_print("", NO_COLOR, errormsg, stream, level)

    _color_print(head, color, msg, stream, level)
Example #31
0
    def _clone_local(self, meter, size_bytes):
        if self._input_path == "/dev/null":
            # Not really sure why this check is here,
            # but keeping for compat
            logging.debug("Source dev was /dev/null. Skipping")
            return
        if self._input_path == self._output_path:
            logging.debug("Source and destination are the same. Skipping.")
            return

        # If a destination file exists and sparse flag is True,
        # this priority takes an existing file.

        if (not os.path.exists(self._output_path) and self._sparse):
            clone_block_size = 4096
            sparse = True
            fd = None
            try:
                fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT, 0640)
                os.ftruncate(fd, size_bytes)
            finally:
                if fd:
                    os.close(fd)
        else:
            clone_block_size = 1024 * 1024 * 10
            sparse = False

        logging.debug("Local Cloning %s to %s, sparse=%s, block_size=%s",
                      self._input_path, self._output_path, sparse,
                      clone_block_size)

        zeros = '\0' * 4096

        src_fd, dst_fd = None, None
        try:
            try:
                src_fd = os.open(self._input_path, os.O_RDONLY)
                dst_fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT,
                                 0640)

                i = 0
                while 1:
                    l = os.read(src_fd, clone_block_size)
                    s = len(l)
                    if s == 0:
                        meter.end(size_bytes)
                        break
                    # check sequence of zeros
                    if sparse and zeros == l:
                        os.lseek(dst_fd, s, 1)
                    else:
                        b = os.write(dst_fd, l)
                        if s != b:
                            meter.end(i)
                            break
                    i += s
                    if i < size_bytes:
                        meter.update(i)
            except OSError, e:
                raise RuntimeError(
                    _("Error cloning diskimage %s to %s: %s") %
                    (self._input_path, self._output_path, str(e)))
        finally:
            if src_fd is not None:
                os.close(src_fd)
            if dst_fd is not None:
                os.close(dst_fd)
Example #32
0
 def f(fi, len):
     os.ftruncate(fi, len)
Example #33
0
    def copy(self, sync=True, verify=True):
        """
        Copy the image to the destination file using bmap. The 'sync' argument
        defines whether the destination file has to be synchronized upon
        return.  The 'verify' argument defines whether the checksum has to be
        verified while copying.
        """

        # Create the queue for block batches and start the reader thread, which
        # will read the image in batches and put the results to '_batch_queue'.
        self._batch_queue = Queue.Queue(self._batch_queue_len)
        thread.start_new_thread(self._get_data, (verify, ))

        blocks_written = 0
        bytes_written = 0
        fsync_last = 0

        self._progress_started = False
        self._progress_index = 0
        self._progress_time = datetime.datetime.now()

        # Read the image in '_batch_blocks' chunks and write them to the
        # destination file
        while True:
            batch = self._batch_queue.get()
            if batch is None:
                # No more data, the image is written
                break
            elif batch[0] == "error":
                # The reader thread encountered an error and passed us the
                # exception.
                exc_info = batch[1]
                raise exc_info[0], exc_info[1], exc_info[2]

            (start, end, buf) = batch[1:4]

            assert len(buf) <= (end - start + 1) * self.block_size
            assert len(buf) > (end - start) * self.block_size

            self._f_dest.seek(start * self.block_size)

            # Synchronize the destination file if we reached the watermark
            if self._dest_fsync_watermark:
                if blocks_written >= fsync_last + self._dest_fsync_watermark:
                    fsync_last = blocks_written
                    self.sync()

            try:
                self._f_dest.write(buf)
            except IOError as err:
                raise Error("error while writing blocks %d-%d of '%s': %s" %
                            (start, end, self._dest_path, err))

            self._batch_queue.task_done()
            blocks_written += (end - start + 1)
            bytes_written += len(buf)

            self._update_progress(blocks_written)

        if not self.image_size:
            # The image size was unknown up until now, set it
            self._set_image_size(bytes_written)

        # This is just a sanity check - we should have written exactly
        # 'mapped_cnt' blocks.
        if blocks_written != self.mapped_cnt:
            raise Error("wrote %u blocks from image '%s' to '%s', but should "
                        "have %u - bmap file '%s' does not belong to this "
                        "image" %
                        (blocks_written, self._image_path, self._dest_path,
                         self.mapped_cnt, self._bmap_path))

        if self._dest_is_regfile:
            # Make sure the destination file has the same size as the image
            try:
                os.ftruncate(self._f_dest.fileno(), self.image_size)
            except OSError as err:
                raise Error("cannot truncate file '%s': %s" %
                            (self._dest_path, err))

        try:
            self._f_dest.flush()
        except IOError as err:
            raise Error("cannot flush '%s': %s" % (self._dest_path, err))

        if sync:
            self.sync()
Example #34
0
 def write(self, pid):
     os.ftruncate(self.fd, 0)
     os.write(self.fd, b"%d" % pid)
     os.fsync(self.fd)
Example #35
0
def mksparse(outfile, size):
    '''use os.ftruncate to create a sparse file of the given size.'''
    fobj = open(outfile, "w")
    os.ftruncate(fobj.fileno(), size)
Example #36
0
    def write(self, target, expand):
        """Write disk image to the media or file."""
        def write_sfdisk_script(outf, parts):
            for key, val in parts['partitiontable'].items():
                if key in ("partitions", "device", "firstlba", "lastlba"):
                    continue
                if key == "id":
                    key = "label-id"
                outf.write("{}: {}\n".format(key, val))
            outf.write("\n")
            for part in parts['partitiontable']['partitions']:
                line = ''
                for name in ('attrs', 'name', 'size', 'type', 'uuid'):
                    if name == 'size' and part['type'] == 'f':
                        # don't write size for extended partition
                        continue
                    val = part.get(name)
                    if val:
                        line += '{}={}, '.format(name, val)
                if line:
                    line = line[:-2]  # strip ', '
                if part.get('bootable'):
                    line += ' ,bootable'
                outf.write("{}\n".format(line))
            outf.flush()

        def read_ptable(path):
            out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
            return json.loads(out)

        def write_ptable(parts, target):
            with tempfile.NamedTemporaryFile(prefix="wic-sfdisk-",
                                             mode='w') as outf:
                write_sfdisk_script(outf, parts)
                cmd = "{} --no-reread {} < {} ".format(self.sfdisk, target,
                                                       outf.name)
                exec_cmd(cmd, as_shell=True)

        if expand is None:
            sparse_copy(self.imagepath, target)
        else:
            # copy first sectors that may contain bootloader
            sparse_copy(self.imagepath,
                        target,
                        length=2048 * self._lsector_size)

            # copy source partition table to the target
            parts = read_ptable(self.imagepath)
            write_ptable(parts, target)

            # get size of unpartitioned space
            free = None
            for line in exec_cmd("{} -F {}".format(self.sfdisk,
                                                   target)).splitlines():
                if line.startswith("Unpartitioned space ") and line.endswith(
                        "sectors"):
                    free = int(line.split()[-2])
                    # Align free space to a 2048 sector boundary. YOCTO #12840.
                    free = free - (free % 2048)
            if free is None:
                raise WicError("Can't get size of unpartitioned space")

            # calculate expanded partitions sizes
            sizes = {}
            num_auto_resize = 0
            for num, part in enumerate(parts['partitiontable']['partitions'],
                                       1):
                if num in expand:
                    if expand[
                            num] != 0:  # don't resize partition if size is set to 0
                        sectors = expand[num] // self._lsector_size
                        free -= sectors - part['size']
                        part['size'] = sectors
                        sizes[num] = sectors
                elif part['type'] != 'f':
                    sizes[num] = -1
                    num_auto_resize += 1

            for num, part in enumerate(parts['partitiontable']['partitions'],
                                       1):
                if sizes.get(num) == -1:
                    part['size'] += free // num_auto_resize

            # write resized partition table to the target
            write_ptable(parts, target)

            # read resized partition table
            parts = read_ptable(target)

            # copy partitions content
            for num, part in enumerate(parts['partitiontable']['partitions'],
                                       1):
                pnum = str(num)
                fstype = self.partitions[pnum].fstype

                # copy unchanged partition
                if part['size'] == self.partitions[
                        pnum].size // self._lsector_size:
                    logger.info("copying unchanged partition {}".format(pnum))
                    sparse_copy(self._get_part_image(pnum),
                                target,
                                seek=part['start'] * self._lsector_size)
                    continue

                # resize or re-create partitions
                if fstype.startswith('ext') or fstype.startswith('fat') or \
                   fstype.startswith('linux-swap'):

                    partfname = None
                    with tempfile.NamedTemporaryFile(
                            prefix="wic-part{}-".format(pnum)) as partf:
                        partfname = partf.name

                    if fstype.startswith('ext'):
                        logger.info("resizing ext partition {}".format(pnum))
                        partimg = self._get_part_image(pnum)
                        sparse_copy(partimg, partfname)
                        exec_cmd("{} -pf {}".format(self.e2fsck, partfname))
                        exec_cmd("{} {} {}s".format(\
                                 self.resize2fs, partfname, part['size']))
                    elif fstype.startswith('fat'):
                        logger.info(
                            "copying content of the fat partition {}".format(
                                pnum))
                        with tempfile.TemporaryDirectory(
                                prefix='wic-fatdir-') as tmpdir:
                            # copy content to the temporary directory
                            cmd = "{} -snompi {} :: {}".format(
                                self.mcopy, self._get_part_image(pnum), tmpdir)
                            exec_cmd(cmd)
                            # create new msdos partition
                            label = part.get("name")
                            label_str = "-n {}".format(label) if label else ''

                            cmd = "{} {} -C {} {}".format(
                                self.mkdosfs, label_str, partfname,
                                part['size'])
                            exec_cmd(cmd)
                            # copy content from the temporary directory to the new partition
                            cmd = "{} -snompi {} {}/* ::".format(
                                self.mcopy, partfname, tmpdir)
                            exec_cmd(cmd, as_shell=True)
                    elif fstype.startswith('linux-swap'):
                        logger.info("creating swap partition {}".format(pnum))
                        label = part.get("name")
                        label_str = "-L {}".format(label) if label else ''
                        uuid = part.get("uuid")
                        uuid_str = "-U {}".format(uuid) if uuid else ''
                        with open(partfname, 'w') as sparse:
                            os.ftruncate(sparse.fileno(),
                                         part['size'] * self._lsector_size)
                        exec_cmd("{} {} {} {}".format(self.mkswap, label_str,
                                                      uuid_str, partfname))
                    sparse_copy(partfname,
                                target,
                                seek=part['start'] * self._lsector_size)
                    os.unlink(partfname)
                elif part['type'] != 'f':
                    logger.warning(
                        "skipping partition {}: unsupported fstype {}".format(
                            pnum, fstype))
Example #37
0
    def copy(self, sync=True, verify=True):
        """ Copy the image to the destination file using bmap. The sync
        argument defines whether the destination file has to be synchronized
        upon return.  The 'verify' argument defines whether the SHA1 checksum
        has to be verified while copying. """

        # Save file positions in order to restore them at the end
        image_pos = self._f_image.tell()
        dest_pos = self._f_dest.tell()
        if self._f_bmap:
            bmap_pos = self._f_bmap.tell()

        # Create the queue for block batches and start the reader thread, which
        # will read the image in batches and put the results to '_batch_queue'.
        self._batch_queue = Queue.Queue(self._batch_queue_len)
        thread.start_new_thread(self._get_data, (verify, ))

        blocks_written = 0
        bytes_written = 0
        fsync_last = 0

        # Read the image in '_batch_blocks' chunks and write them to the
        # destination file
        while True:
            batch = self._batch_queue.get()
            if batch is None:
                # No more data, the image is written
                break
            elif batch[0] == "error":
                # The reader thread encountered an error and passed us the
                # exception.
                exc_info = batch[1]
                raise exc_info[1].with_traceback(exc_info[2])

            (start, end, buf) = batch[1:4]

            assert len(buf) <= (end - start + 1) * self.block_size
            assert len(buf) > (end - start) * self.block_size

            self._f_dest.seek(start * self.block_size)

            # Synchronize the destination file if we reached the watermark
            if self._dest_fsync_watermark:
                if blocks_written >= fsync_last + self._dest_fsync_watermark:
                    fsync_last = blocks_written
                    self.sync()

            try:
                self._f_dest.write(buf)
            except IOError as err:
                raise Error("error while writing blocks %d-%d of '%s': %s" \
                            % (start, end, self._dest_path, err))

            self._batch_queue.task_done()
            blocks_written += (end - start + 1)
            bytes_written += len(buf)

        if not self.image_size:
            # The image size was unknown up until now, probably because this is
            # a compressed image. Initialize the corresponding class attributes
            # now, when we know the size.
            self._initialize_sizes(bytes_written)

        # This is just a sanity check - we should have written exactly
        # 'mapped_cnt' blocks.
        if blocks_written != self.mapped_cnt:
            raise Error("wrote %u blocks, but should have %u - inconsistent " \
                       "bmap file" % (blocks_written, self.mapped_cnt))

        if self._dest_is_regfile:
            # Make sure the destination file has the same size as the image
            try:
                os.ftruncate(self._f_dest.fileno(), self.image_size)
            except OSError as err:
                raise Error("cannot truncate file '%s': %s" \
                            % (self._dest_path, err))

        try:
            self._f_dest.flush()
        except IOError as err:
            raise Error("cannot flush '%s': %s" % (self._dest_path, err))

        if sync:
            self.sync()

        # Restore file positions
        self._f_image.seek(image_pos)
        self._f_dest.seek(dest_pos)
        if self._f_bmap:
            self._f_bmap.seek(bmap_pos)
 async def resize(self, length):
     await self.file_transactions.fd_resize(self.fd, length)
     os.ftruncate(self.file_oracle_fd, length)
Example #39
0
def padToBlockSize(path):
    with open(path, 'a') as f:
        size = os.fstat(f.fileno()).st_size
        newSize = 512 * ((size + 511) / 512)
        log.debug("Truncating file %s to %d bytes", path, newSize)
        os.ftruncate(f.fileno(), newSize)
Example #40
0
    def __init__(self, name=None, create=False, size=0):
        if not size >= 0:
            raise ValueError("'size' must be a positive integer")
        if create:
            self._flags = _O_CREX | os.O_RDWR
            if size == 0:
                raise ValueError(
                    "'size' must be a positive number different from zero")
        if name is None and not self._flags & os.O_EXCL:
            raise ValueError("'name' can only be None if create=True")

        if _USE_POSIX:

            # POSIX Shared Memory

            if name is None:
                while True:
                    name = _make_filename()
                    try:
                        self._fd = _posixshmem.shm_open(name,
                                                        self._flags,
                                                        mode=self._mode)
                    except FileExistsError:
                        continue
                    self._name = name
                    break
            else:
                name = "/" + name if self._prepend_leading_slash else name
                self._fd = _posixshmem.shm_open(name,
                                                self._flags,
                                                mode=self._mode)
                self._name = name
            try:
                if create and size:
                    os.ftruncate(self._fd, size)
                stats = os.fstat(self._fd)
                size = stats.st_size
                self._mmap = mmap.mmap(self._fd, size)
            except OSError:
                self.unlink()
                raise

            resource_tracker.register(self._name, "shared_memory")

        else:

            # Windows Named Shared Memory

            if create:
                while True:
                    temp_name = _make_filename() if name is None else name
                    # Create and reserve shared memory block with this name
                    # until it can be attached to by mmap.
                    h_map = _winapi.CreateFileMapping(
                        _winapi.INVALID_HANDLE_VALUE, _winapi.NULL,
                        _winapi.PAGE_READWRITE, (size >> 32) & 0xFFFFFFFF,
                        size & 0xFFFFFFFF, temp_name)
                    try:
                        last_error_code = _winapi.GetLastError()
                        if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
                            if name is not None:
                                raise FileExistsError(
                                    errno.EEXIST, os.strerror(errno.EEXIST),
                                    name, _winapi.ERROR_ALREADY_EXISTS)
                            else:
                                continue
                        self._mmap = mmap.mmap(-1, size, tagname=temp_name)
                    finally:
                        _winapi.CloseHandle(h_map)
                    self._name = temp_name
                    break

            else:
                self._name = name
                # Dynamically determine the existing named shared memory
                # block's size which is likely a multiple of mmap.PAGESIZE.
                h_map = _winapi.OpenFileMapping(_winapi.FILE_MAP_READ, False,
                                                name)
                try:
                    p_buf = _winapi.MapViewOfFile(h_map, _winapi.FILE_MAP_READ,
                                                  0, 0, 0)
                finally:
                    _winapi.CloseHandle(h_map)
                size = _winapi.VirtualQuerySize(p_buf)
                self._mmap = mmap.mmap(-1, size, tagname=name)

        self._size = size
        self._buf = memoryview(self._mmap)
Example #41
0
#!/usr/bin/python3
import os, sys
#打开文件
fd =os.open("foo.txt",os.O_RDWR|os.O_CREAT)

#写入字符串
os.write(fd,"This is test - This is test".encode())

#使用ftruncate()方法
os.ftruncate(fd,10)

#读取内容
os.lseek(fd,0,0)
str=os.read(fd,100)
print("读取的字符串是:",str)

#关闭文件
os.close(fd)

print("关闭文件成功!!")

def replaceFdContent(fd, content):
    os.lseek(fd, 0, os.SEEK_SET)
    os.ftruncate(fd, 0)
    os.write(fd, bytes(content, "utf-8"))
Example #43
0
 def rollback(self):
     """truncate file to last_offset."""
     os.ftruncate(self.last_offset)
Example #44
0
def CreateEmptyFile(fd, size):
    if (size > 0):
        os.ftruncate(fd, size)
Example #45
0
#!/usr/bin/env python

import os
import mmap

bin = file("/usr/bin/id").read()

fd = os.open("id", os.O_RDWR | os.O_CREAT | os.O_EXCL, 02750)
os.ftruncate(fd, len(bin))

m = mmap.mmap(fd, len(bin))
m[:] = bin
m.flush()
Example #46
0
def delete_content(fd):
    os.ftruncate(fd, 0)
    os.lseek(fd, 0, os.SEEK_SET)
Example #47
0
def init_shmem_file(filepath, size):
    with open(filepath, "w+b") as f:
        os.ftruncate(f.fileno(), size)
Example #48
0
    def _do_file(self):
        """Operate on a file, create, read, truncate, etc."""
        self.absfile = ""
        # Number of files available
        nlen = len(self.n_files)
        if self.up_minfiles == 0 and nlen > self.minfiles:
            self.minfiles = self.bot_minfiles
            self.up_minfiles = 1
        if self.up_minfiles > 0 and nlen < self.minfiles:
            self.minfiles = self.top_minfiles
            self.up_minfiles = 0

        if nlen > self.minfiles and self._percent(self.trunc):
            # Truncate file using the file name
            fileobj = self._get_fileobj()
            self.absfile = os.path.join(self.datadir, fileobj.name)
            # Choose new size at random
            nsize = self.random.randint(0, fileobj.size + self.wsizedev)
            self._dprint(
                "DBG2",
                "TRUNC   %s %d -> %d" % (fileobj.name, fileobj.size, nsize))
            out = self.libc.truncate(self.absfile, nsize)
            if out == -1:
                err = ctypes.get_errno()
                if hasattr(fileobj, 'srcname') and err == errno.ENOENT:
                    # Make sure not to fail if it is a broken symbolic link
                    self._dprint(
                        "DBG2",
                        "TRUNC   %s: broken symbolic link" % fileobj.name)
                    return
                raise OSError(err, os.strerror(err), fileobj.name)
            else:
                self.ntrunc += 1
                fileobj.size = nsize
            return

        if nlen > self.minfiles and self._percent(self.rename):
            # Rename file
            fileobj = self._get_fileobj()
            name = self._newname()
            self.absfile = os.path.join(self.datadir, fileobj.name)
            newfile = os.path.join(self.datadir, name)
            self._dprint("DBG2", "RENAME  %s -> %s" % (fileobj.name, name))
            os.rename(self.absfile, newfile)
            self.nrename += 1
            fileobj.name = name
            return

        if nlen > self.minfiles and self._percent(self.remove):
            # Remove file
            fileobj = self._get_fileobj()
            self.absfile = os.path.join(self.datadir, fileobj.name)
            self._dprint("DBG2", "REMOVE  %s" % fileobj.name)
            os.unlink(self.absfile)
            self.nremove += 1
            self.n_files.remove(fileobj)
            return

        if nlen > self.minfiles and self._percent(self.link):
            # Create hard link
            name = self._newname()
            self.absfile = os.path.join(self.datadir, name)
            try:
                fileobj = self._get_fileobj(
                    condition=lambda f: not hasattr(f, 'srcname'))
            except NoFilesMatchingCondition:
                raise Exception(
                    "Unable to find a valid source file for hard link")
            srcfile = os.path.join(self.datadir, fileobj.name)
            self._dprint("DBG2", "LINK    %s -> %s" % (name, fileobj.name))
            os.link(srcfile, self.absfile)
            self.nlink += 1
            linkobj = FileObj(name=name, size=fileobj.size)
            self.n_files.append(linkobj)
            return

        if nlen > self.minfiles and self._percent(self.slink):
            # Create symbolic link
            name = self._newname()
            self.absfile = os.path.join(self.datadir, name)
            try:
                fileobj = self._get_fileobj(
                    condition=lambda f: not hasattr(f, 'srcname'))
            except NoFilesMatchingCondition:
                raise Exception(
                    "Unable to find a valid source file for symbolic link")
            self._dprint("DBG2", "SLINK   %s -> %s" % (name, fileobj.name))
            os.symlink(fileobj.name, self.absfile)
            self.nslink += 1
            slinkobj = FileObj(name=name,
                               size=fileobj.size,
                               srcname=fileobj.name)
            self.n_files.append(slinkobj)
            return

        if nlen > self.minfiles and self._percent(self.readdir):
            # Read directory
            count = self.random.randint(1, 99)
            self._dprint("DBG2",
                         "READDIR %s maxentries: %d" % (self.datadir, count))
            self.absfile = self.datadir
            fd = self.libc.opendir(self.datadir)
            index = 0
            while True:
                dirent = self.libc.readdir(fd)
                if dirent == 0 or index >= count:
                    break
                index += 1
            out = self.libc.closedir(fd)
            self.nreaddir += 1
            return

        # Select type of open: read, write or rdwr
        total = self.read + self.write
        rn = self.random.randint(0, 99)
        if rn < self.read:
            oflags = os.O_RDONLY
            oflist = ["O_RDONLY"]
        elif rn < total:
            oflags = os.O_WRONLY
            oflist = ["O_WRONLY"]
        else:
            oflags = os.O_RDWR
            oflist = ["O_RDWR"]

        # Set create file flag
        if nlen < self.minfiles:
            # Create at least self.minfiles before any other operation
            cflag = True
        else:
            cflag = self._percent(self.create)

        if cflag:
            # Create new name
            name = self._newname()
            fileobj = FileObj(name=name, size=0)
            self.n_files.append(fileobj)
            if oflags == os.O_RDONLY:
                # Creating file, must be able to write
                oflags = os.O_WRONLY
                oflist = ["O_WRONLY"]
            oflags |= os.O_CREAT
            oflist.append("O_CREAT")
        else:
            # Use name chosen at random
            fileobj = self._get_fileobj()

        if "O_RDONLY" not in oflist and self._percent(self.osync):
            # Add O_SYNC flag when opening file for writing
            oflags |= os.O_SYNC
            oflist.append("O_SYNC")
            self.nosync += 1

        if self.direct:
            # Open file for direct I/O
            oflags |= os.O_DIRECT
            oflist.append("O_DIRECT")

        # Select random or sequential I/O
        sstr = "sequen"
        if self._percent(self.randio):
            sstr = "random"

        ostr = "|".join(oflist)

        fd = None
        index = 0
        is_symlink = False
        while fd is None:
            try:
                index += 1
                if hasattr(fileobj, 'srcname'):
                    is_symlink = True
                self.absfile = os.path.join(self.datadir, fileobj.name)
                self._dprint("DBG2",
                             "OPEN    %s %s %s" % (fileobj.name, sstr, ostr))
                fd = os.open(self.absfile, oflags)
                st = os.fstat(fd)
                if is_symlink:
                    self._dprint(
                        "DBG6", "OPEN    %s inode:%d symlink" %
                        (fileobj.name, st.st_ino))
                    absfile = os.path.join(self.datadir, fileobj.srcname)
                    st = os.stat(absfile)
                    self._dprint(
                        "DBG6", "OPEN    %s inode:%d src:%s" %
                        (fileobj.name, st.st_ino, fileobj.srcname))
                else:
                    self._dprint(
                        "DBG6",
                        "OPEN    %s inode:%d" % (fileobj.name, st.st_ino))
            except OSError as openerr:
                if is_symlink and openerr.errno == errno.ENOENT:
                    self._dprint(
                        "DBG2",
                        "OPEN    %s: broken symbolic link" % fileobj.name)
                    if index >= 10:
                        # Do not exit execution, just return to select another operation
                        return
                    # Choose a new name at random
                    fileobj = self._get_fileobj()
                    is_symlink = False
                else:
                    # Unknown error
                    raise
        self.nopen += 1

        # Get file size for writing
        size = int(abs(self.random.gauss(self.fsizeavg, self.fsizedev)))

        odgrade = False
        if oflags & os.O_WRONLY == os.O_WRONLY:
            lock_type = fcntl.F_WRLCK
            iolist = self._getiolist(size, True)
        elif oflags & os.O_RDWR == os.O_RDWR:
            lock_type = None
            iolist = self._getiolist(size, True)
            iolist += self._getiolist(size, False)
            if self._percent(self.odgrade):
                odgrade = True
        else:
            lock_type = fcntl.F_RDLCK
            size = fileobj.size
            if size == 0:
                # File does not have any data, at least try to read one block
                size = self.rsize
            iolist = self._getiolist(size, False)

        if sstr == "random":
            # Shuffle I/O list for random access
            self.random.shuffle(iolist)

        # Lock full file if necessary
        lockfull = False
        if self._percent(self.lockfull):
            lockfull = True
            lockfout = self._getlock(fileobj.name,
                                     fd,
                                     lock_type=lock_type,
                                     offset=0,
                                     length=0)

        if nlen > self.minfiles and "O_RDONLY" not in oflist and self._percent(
                self.ftrunc):
            # Truncate file using the file descriptor
            # Choose new size at random
            nsize = self.random.randint(0, fileobj.size + self.wsizedev)
            self._dprint(
                "DBG2",
                "FTRUNC  %s %d -> %d" % (fileobj.name, fileobj.size, nsize))
            os.ftruncate(fd, nsize)
            self.nftrunc += 1
            fileobj.size = nsize

        # Read or write the file
        for item in iolist:
            if self.runtime > 0 and time.time() >= self.s_time + self.runtime:
                # Runtime has been reached
                break
            self._do_io(
                **dict(fd=fd, fileobj=fileobj, lockfull=lockfull, **item))

        if lockfull:
            # Unlock full file
            self._getlock(fileobj.name,
                          fd,
                          lock_type=fcntl.F_UNLCK,
                          offset=0,
                          length=0,
                          lock=lockfout)

        fdr = None
        fdroffset = 0
        if odgrade:
            # Need for open downgrade:
            # First, the file has been opened for read and write
            # Second, open file again for reading
            # Then close read and write file descriptor
            self._dprint("DBG2", "OPENDGR %s" % fileobj.name)
            fdr = os.open(self.absfile, os.O_RDONLY)
            self.nopendgr += 1
            count = self._do_io(fd=fdr,
                                offset=fdroffset,
                                size=self.rsize,
                                fileobj=fileobj)
            fdroffset += count

        # Close main file descriptor
        self._dprint("DBG3", "CLOSE   %s" % fileobj.name)
        os.close(fd)
        self.nclose += 1

        if odgrade:
            for i in xrange(10):
                count = self._do_io(fd=fdr,
                                    offset=fdroffset,
                                    size=self.rsize,
                                    fileobj=fileobj)
                fdroffset += count
            self._dprint("DBG3", "CLOSE   %s" % fileobj.name)
            os.close(fdr)
            self.nclose += 1

        return
Example #49
0
def recvSkipHandler(stream, length, opaque):
    fd = opaque
    cur = os.lseek(fd, length, os.SEEK_CUR)
    return os.ftruncate(fd, cur)
Example #50
0
    def _clone_local(self, meter, size_bytes):
        if self._input_path == "/dev/null":  # pragma: no cover
            # Not really sure why this check is here,
            # but keeping for compat
            log.debug("Source dev was /dev/null. Skipping")
            return
        if self._input_path == self._output_path:
            log.debug("Source and destination are the same. Skipping.")
            return

        # If a destination file exists and sparse flag is True,
        # this priority takes an existing file.
        if (not os.path.exists(self._output_path) and self._sparse):
            clone_block_size = 4096
            sparse = True
            fd = None
            try:
                fd = os.open(self._output_path, os.O_WRONLY | os.O_CREAT,
                             0o640)
                os.ftruncate(fd, size_bytes)
            finally:
                if fd:
                    os.close(fd)
        else:
            clone_block_size = 1024 * 1024 * 10
            sparse = False

        log.debug("Local Cloning %s to %s, sparse=%s, block_size=%s",
                      self._input_path, self._output_path,
                      sparse, clone_block_size)

        zeros = b'\0' * 4096

        src_fd, dst_fd = None, None
        try:
            try:
                src_fd = os.open(self._input_path, os.O_RDONLY)
                dst_fd = os.open(self._output_path,
                                os.O_WRONLY | os.O_CREAT, 0o640)
                if (True):
                    #size = int(self.get_size() * 1024 * 1024 * 1024 * 10)
                    size = _get_size(self._path)
                    log.debug(("USING REF {size}", size), exc_info=True)
                    while size > 0:
                        ret = os.copy_file_range(src_fd, dst_fd, size)
                        size -= ret
                else:
                    i = 0
                    while 1:
                        l = os.read(src_fd, clone_block_size)
                        s = len(l)
                        if s == 0:
                            meter.end(size_bytes)
                            break
                        # check sequence of zeros
                        if sparse and zeros == l:
                            os.lseek(dst_fd, s, 1)
                        else:
                            b = os.write(dst_fd, l)
                            if s != b:  # pragma: no cover
                                meter.end(i)
                                break
                        i += s
                        if i < size_bytes:
                            meter.update(i)
            except OSError as e:  # pragma: no cover
                log.debug("Error while cloning", exc_info=True)
                msg = (_("Error cloning diskimage "
                         "%(inputpath)s to %(outputpath)s: %(error)s") %
                         {"inputpath": self._input_path,
                          "outputpath": self._output_path,
                          "error": str(e)})
                raise RuntimeError(msg) from None
        finally:
            if src_fd is not None:
                os.close(src_fd)
            if dst_fd is not None:
                os.close(dst_fd)
 def _truncate(self, size):
     os.ftruncate(self.fd, size)
Example #52
0
    def __init__(self, seed, enable_graphviz=False, flush=True):
        global KaflNodeID

        self.level = 0
        self.max_level = 0
        self.cycles = -1
        self.all_nodes = []
        self.references = {}
        self.current = self.MASTER_NODE_ID
        self.random_shuffled = False

        self.favorite_buf = []
        self.favorite_unfinished_buf = []
        self.regular_buf = []
        self.regular_unfinished_buf = []
        self.finished_buf = []

        self.bitmap_size = FuzzerConfiguration(
        ).config_values['BITMAP_SHM_SIZE']

        self.buckets = [0x0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80]

        self.fav_bitmap = []
        self.fav_bitmap_updated = False

        self.bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] + "/bitmap",
            os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.crash_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/crash_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.kasan_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/kasan_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)
        self.timeout_bitmap_fd = os.open(
            FuzzerConfiguration().argument_values['work_dir'] +
            "/timeout_bitmap", os.O_RDWR | os.O_SYNC | os.O_CREAT)

        os.ftruncate(self.bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.crash_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.kasan_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])
        os.ftruncate(self.timeout_bitmap_fd,
                     FuzzerConfiguration().config_values['BITMAP_SHM_SIZE'])

        self.bitmap = mmap.mmap(self.bitmap_fd, self.bitmap_size,
                                mmap.MAP_SHARED,
                                mmap.PROT_WRITE | mmap.PROT_READ)
        self.crash_bitmap = mmap.mmap(self.crash_bitmap_fd, self.bitmap_size,
                                      mmap.MAP_SHARED,
                                      mmap.PROT_WRITE | mmap.PROT_READ)
        self.kasan_bitmap = mmap.mmap(self.kasan_bitmap_fd, self.bitmap_size,
                                      mmap.MAP_SHARED,
                                      mmap.PROT_WRITE | mmap.PROT_READ)
        self.timeout_bitmap = mmap.mmap(self.timeout_bitmap_fd,
                                        self.bitmap_size, mmap.MAP_SHARED,
                                        mmap.PROT_WRITE | mmap.PROT_READ)

        if flush:
            for i in range(self.bitmap_size):
                self.bitmap[i] = '\x00'
                self.crash_bitmap[i] = '\x00'
                self.kasan_bitmap[i] = '\x00'
                self.timeout_bitmap[i] = '\x00'
        for i in range(self.bitmap_size):
            self.fav_bitmap.append(None)

        self.graph = KaflGraph([], enabled=enable_graphviz)
        self.favorites = 0
        self.favorites_in_progress = 0
        self.favorites_finished = 0
        self.paths = 0
        self.paths = len(seed)
        self.paths_in_progress = 0
        self.paths_finished = 0

        self.score_changed = False

        self.payload_hashes = {}

        for payload, bitmap in seed:
            node = KaflNode(self.level,
                            payload,
                            bitmap,
                            node_type=KaflNodeType.favorite)
            self.__append_to_level(node)
            self.favorites += 1

        if self.all_nodes:
            self.current = 0  #self.__get_ref(self.all_nodes[0])
        self.__restore_state()
        self.__restore_graph()
Example #53
0
    def create(self):
        logger.debug("Creating sparse file %s", self.path)
        with open(self.path, 'w') as sparse:
            os.ftruncate(sparse.fileno(), self.min_size)

        logger.debug("Initializing partition table for %s", self.path)
        exec_native_cmd("parted -s %s mklabel %s" %
                        (self.path, self.ptable_format), self.native_sysroot)

        logger.debug("Set disk identifier %x", self.identifier)
        with open(self.path, 'r+b') as img:
            img.seek(0x1B8)
            img.write(self.identifier.to_bytes(4, 'little'))

        logger.debug("Creating partitions")

        for part in self.partitions:
            if part.num == 0:
                continue

            if self.ptable_format == "msdos" and part.num == 5:
                # Create an extended partition (note: extended
                # partition is described in MBR and contains all
                # logical partitions). The logical partitions save a
                # sector for an EBR just before the start of a
                # partition. The extended partition must start one
                # sector before the start of the first logical
                # partition. This way the first EBR is inside of the
                # extended partition. Since the extended partitions
                # starts a sector before the first logical partition,
                # add a sector at the back, so that there is enough
                # room for all logical partitions.
                self._create_partition(self.path, "extended",
                                       None, part.start - 1,
                                       self.offset - part.start + 1)

            if part.fstype == "swap":
                parted_fs_type = "linux-swap"
            elif part.fstype == "vfat":
                parted_fs_type = "fat32"
            elif part.fstype == "msdos":
                parted_fs_type = "fat16"
                if not part.system_id:
                    part.system_id = '0x6' # FAT16
            else:
                # Type for ext2/ext3/ext4/btrfs
                parted_fs_type = "ext2"

            # Boot ROM of OMAP boards require vfat boot partition to have an
            # even number of sectors.
            if part.mountpoint == "/boot" and part.fstype in ["vfat", "msdos"] \
               and part.size_sec % 2:
                logger.debug("Subtracting one sector from '%s' partition to "
                             "get even number of sectors for the partition",
                             part.mountpoint)
                part.size_sec -= 1

            self._create_partition(self.path, part.type,
                                   parted_fs_type, part.start, part.size_sec)

            if part.part_name:
                logger.debug("partition %d: set name to %s",
                             part.num, part.part_name)
                exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
                                         (part.num, part.part_name,
                                          self.path), self.native_sysroot)

            if part.part_type:
                logger.debug("partition %d: set type UID to %s",
                             part.num, part.part_type)
                exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
                                         (part.num, part.part_type,
                                          self.path), self.native_sysroot)

            if part.uuid and self.ptable_format == "gpt":
                logger.debug("partition %d: set UUID to %s",
                             part.num, part.uuid)
                exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
                                (part.num, part.uuid, self.path),
                                self.native_sysroot)

            if part.label and self.ptable_format == "gpt":
                logger.debug("partition %d: set name to %s",
                             part.num, part.label)
                exec_native_cmd("parted -s %s name %d %s" % \
                                (self.path, part.num, part.label),
                                self.native_sysroot)

            if part.active:
                flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
                logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
                             flag_name, part.num, self.path)
                exec_native_cmd("parted -s %s set %d %s on" % \
                                (self.path, part.num, flag_name),
                                self.native_sysroot)
            if part.system_id:
                exec_native_cmd("sfdisk --part-type %s %s %s" % \
                                (self.path, part.num, part.system_id),
                                self.native_sysroot)
Example #54
0
def ar_open(root, path, flags, mode=None, create=False, truncate=False):
    """A function similar to os.open() that ensures that the path
        we're accessing resides within a specified directory subtree.

        'root' is a directory that path must reside in.

        'path' is a path that is interpreted relative to 'root'.  i.e., 'root'
        is prepended to path.  'path' can not contain any symbolic links that
        would cause an access to be redirected outside of 'root'.  If this
        happens we'll raise an OSError exception with errno set to EREMOTE

        'mode' optional permissions mask used if we create 'path'

        'create' optional flag indicating if we should create 'path'

        'truncate' optional flag indicating if we should truncate 'path' after
        opening it."""

    # all paths must be absolute
    assert os.path.isabs(root)

    # only allow read/write flags
    assert (flags & ~(os.O_WRONLY | os.O_RDONLY)) == 0

    # we can't truncate a file unless we open it for writing
    assert not truncate or (flags & os.O_WRONLY)

    # if create is true the user must supply a mode mask
    assert not create or mode != None

    # we're going to update root and path so prepare an error
    # message with the existing values now.
    eremote = _("Path outside alternate root: root={root}, "
                "path={path}").format(root=root, path=path)

    # make target into a relative path
    if os.path.isabs(path):
        path = __path_abs_to_relative(path)

    # now open the alternate root and get its path
    # done to eliminate any links/mounts/etc in the path
    root_fd = os.open(root, os.O_RDONLY)
    try:
        root = __fd_to_path(root_fd)
    except OSError as e:
        if e.errno != errno.ENOENT:
            os.close(root_fd)
            raise e
    os.close(root_fd)

    # now open the target file, get its path, and make sure it
    # lives in the alternate root
    path_fd = None
    try:
        path_tmp = os.path.join(root, path)
        path_fd = os.open(path_tmp, flags)
    except OSError as e:
        if e.errno != errno.ENOENT or not create:
            raise e

    assert path_fd or create
    if not path_fd:
        # the file doesn't exist so we should try to create it.
        # we'll do this by first opening the directory which
        # will contain the file and then using openat within
        # that directory.
        path_dir = os.path.dirname(path)
        path_file = os.path.basename(path)
        try:
            path_dir_fd = \
                ar_open(root, path_dir, os.O_RDONLY)
        except OSError as e:
            if e.errno != errno.EREMOTE:
                raise e
            raise OSError(errno.EREMOTE, eremote)

        # we opened the directory, now create the file
        try:
            path_fd = sat.openat(path_dir_fd, path_file,
                                 flags | os.O_CREAT | os.O_EXCL, mode)
        except OSError as e:
            os.close(path_dir_fd)
            raise e

        # we created the file
        assert path_fd
        os.close(path_dir_fd)

    # verify that the file we opened lives in the alternate root
    try:
        path = __fd_to_path(path_fd)
    except OSError as e:
        if e.errno != errno.ENOENT:
            os.close(path_fd)
            raise e
        path = os.path.join(root, path)

    if not path.startswith(root):
        os.close(path_fd)
        raise OSError(errno.EREMOTE, eremote)

    if truncate:
        # the user wanted us to truncate the file
        try:
            os.ftruncate(path_fd, 0)
        except OSError as e:
            os.close(path_fd)
            raise e

    return path_fd
Example #55
0
    process_running = False
    try:
        process.wait(timeout=2.0 if prev_line_count == 0 else 15.0)
    except subprocess.TimeoutExpired:
        process_running = True

    with open(local_filename, "rt") as f:
        # read new lines
        new_lines = f.readlines()
        if not new_lines:
            continue
        output = "".join(new_lines)
        print(output)
        # update task comment with jupyter notebook server links
        if prev_line_count == 0:
            task.comment += "\n" + "".join(
                line for line in new_lines
                if "http://" in line or "https://" in line)
        prev_line_count += len(new_lines)

    os.lseek(fd, 0, 0)
    os.ftruncate(fd, 0)

# cleanup
os.close(fd)
# noinspection PyBroadException
try:
    os.unlink(local_filename)
except Exception:
    pass
Example #56
0
 def write(self, data):
     os.lseek(self.fd, 0, os.SEEK_SET)
     os.ftruncate(self.fd, 0)
     os.write(self.fd, data)
     os.fsync(self.fd)
Example #57
0
def _truncate(fn, length):
    fd = os.open(fn, os.O_RDWR)
    os.ftruncate(fd, length)
    os.close(fd)
Example #58
0
'''
概述
    os.ftruncate() 裁剪文件描述符fd对应的文件, 它最大不能超过文件大小。
    Unix上可用。

语法
    ftruncate()方法语法格式如下:
    os.ftruncate(fd, length)¶

参数
    fd -- 文件的描述符。
    length -- 要裁剪文件大小。

返回值
        该方法没有返回值。
'''

# 打开文件
fd = os.open('/tmp/foo.txt', os.O_RDWR)

# 写入内容
os.write(fd, '新生代文本内容')

# 截取文件内容
os.ftruncate(fd, 20)

# 移动文件指针
os.lseek(fd, 0, 0)
print('读取文件内容:%s' % os.read(fd, 100))

os.close(fd)
Example #59
0
def decode_xmlrpc_chunk(chunk_start,
                        chunk_len,
                        chunk_checksum,
                        encoded_chunk,
                        write_to=None,
                        mode=0o644):
    """
    Decode a data chunk and optionally write it to a file.

    @param chunk_start: chunk start position in the file (-1 for append)
    @type  chunk_start: str
    @param chunk_len: chunk length
    @type  chunk_len: str
    @param chunk_checksum: sha256 checksum (lower case)
    @type  chunk_checksum: str
    @param encoded_chunk: base64 encoded chunk
    @type  encoded_chunk: str
    @param write_to: path to a file in which the decoded data will be written
    @type  write_to: str
    @param mode: file permissions (example: 0644)
    @type  mode: int
    @return: decoded data
    @rtype:  str
    """

    chunk_start = int(chunk_start)
    chunk_len = int(chunk_len)
    decode_func = base64.decodebytes if hasattr(
        base64, "decodebytes") else base64.decodestring

    if isinstance(encoded_chunk, xmlrpclib.Binary):
        chunk = decode_func(encoded_chunk.data)
    else:
        chunk = decode_func(encoded_chunk)

    if chunk_len not in (-1, len(chunk)):
        raise ValueError("Chunk length doesn't match.")

    if chunk_len == -1:
        chunk = ""
    elif chunk_checksum != hashlib.sha256(chunk).hexdigest().lower():
        raise ValueError("Chunk checksum doesn't match.")

    if not write_to:
        return chunk

    # code below handles writing to a file

    target_dir = os.path.dirname(write_to)
    if not os.path.isdir(target_dir):
        try:
            os.makedirs(target_dir, mode=0o755)
        except OSError as ex:
            if ex.errno != 17:
                raise

    fd = os.open(write_to, os.O_RDWR | os.O_CREAT, mode)
    fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    try:
        if chunk_start != -1:
            os.ftruncate(fd, chunk_start)
        os.lseek(fd, 0, 2)  # 2=os.SEEK_END
        os.write(fd, chunk)
    finally:
        fcntl.lockf(fd, fcntl.LOCK_UN)
        os.close(fd)

    if chunk_start != -1 and chunk_len == -1:
        # final chunk, compute checksum of whole file
        file_checksum = kobo.shortcuts.compute_file_checksums(
            write_to, ["sha256"])["sha256"]
        if file_checksum != chunk_checksum:
            raise ValueError("File checksum does not match.")

    return chunk
Example #60
0
    def write_data(self, data, thefile, shred=True, mode=0o600):
        # TODO: add docstrings for arg types since this code is picky about that
        """Write the data bytes to given path

        This is used to write a byte string to a file or stdout. It is used for
        writing the results of vault encryption or decryption. It is used for
        saving the ciphertext after encryption and it is also used for saving the
        plaintext after decrypting a vault. The type of the 'data' arg should be bytes,
        since in the plaintext case, the original contents can be of any text encoding
        or arbitrary binary data.

        When used to write the result of vault encryption, the val of the 'data' arg
        should be a utf-8 encoded byte string and not a text typ and not a text type..

        When used to write the result of vault decryption, the val of the 'data' arg
        should be a byte string and not a text type.

        :arg data: the byte string (bytes) data
        :arg thefile: file descriptor or filename to save 'data' to.
        :arg shred: if shred==True, make sure that the original data is first shredded so that is cannot be recovered.
        :returns: None
        """
        # FIXME: do we need this now? data_bytes should always be a utf-8 byte string
        b_file_data = to_bytes(data, errors='strict')

        # check if we have a file descriptor instead of a path
        is_fd = False
        try:
            is_fd = (isinstance(thefile, int) and fcntl.fcntl(thefile, fcntl.F_GETFD) != -1)
        except Exception:
            pass

        if is_fd:
            # if passed descriptor, use that to ensure secure access, otherwise it is a string.
            # assumes the fd is securely opened by caller (mkstemp)
            os.ftruncate(thefile, 0)
            os.write(thefile, b_file_data)
        elif thefile == '-':
            # get a ref to either sys.stdout.buffer for py3 or plain old sys.stdout for py2
            # We need sys.stdout.buffer on py3 so we can write bytes to it since the plaintext
            # of the vaulted object could be anything/binary/etc
            output = getattr(sys.stdout, 'buffer', sys.stdout)
            output.write(b_file_data)
        else:
            # file names are insecure and prone to race conditions, so remove and create securely
            if os.path.isfile(thefile):
                if shred:
                    self._shred_file(thefile)
                else:
                    os.remove(thefile)

            # when setting new umask, we get previous as return
            current_umask = os.umask(0o077)
            try:
                try:
                    # create file with secure permissions
                    fd = os.open(thefile, os.O_CREAT | os.O_EXCL | os.O_RDWR | os.O_TRUNC, mode)
                except OSError as ose:
                    # Want to catch FileExistsError, which doesn't exist in Python 2, so catch OSError
                    # and compare the error number to get equivalent behavior in Python 2/3
                    if ose.errno == errno.EEXIST:
                        raise AnsibleError('Vault file got recreated while we were operating on it: %s' % to_native(ose))

                    raise AnsibleError('Problem creating temporary vault file: %s' % to_native(ose))

                try:
                    # now write to the file and ensure ours is only data in it
                    os.ftruncate(fd, 0)
                    os.write(fd, b_file_data)
                except OSError as e:
                    raise AnsibleError('Unable to write to temporary vault file: %s' % to_native(e))
                finally:
                    # Make sure the file descriptor is always closed and reset umask
                    os.close(fd)
            finally:
                os.umask(current_umask)