Ejemplo n.º 1
0
def get_default_sector_size():
    with NamedTemporaryFile() as fp:
        # Truncate to zero, so that extending the size in the next call
        # will cause all the bytes to read as zero.  Stevens $4.13
        os.truncate(fp.name, 0)
        os.truncate(fp.name, MiB(1))
        return Device(fp.name).sectorSize
Ejemplo n.º 2
0
def tst_truncate_path(mnt_dir):
    assert len(TEST_DATA) > 1024

    filename = pjoin(mnt_dir, name_generator())
    with open(filename, 'wb') as fh:
        fh.write(TEST_DATA)

    fstat = os.stat(filename)
    size = fstat.st_size
    assert size == len(TEST_DATA)

    # Add zeros at the end
    os.truncate(filename, size + 1024)
    assert os.stat(filename).st_size == size + 1024
    with open(filename, 'rb') as fh:
        assert fh.read(size) == TEST_DATA
        assert fh.read(1025) == b'\0' * 1024

    # Truncate data
    os.truncate(filename, size - 1024)
    assert os.stat(filename).st_size == size - 1024
    with open(filename, 'rb') as fh:
        assert fh.read(size) == TEST_DATA[:size-1024]

    os.unlink(filename)
Ejemplo n.º 3
0
def nvperf(mode, verbose=False):

    state_file = os.fdopen(os.open(NVPERF_FILE, os.O_RDWR | os.O_CREAT), 'rb+')
    fcntl.flock(state_file, fcntl.LOCK_EX)

    try:

        counter = int(b'0' + state_file.read())

        if mode in ('?', 'query', None):
            if counter == 0:
                level = NVPERF_LEVELS[0]
            else:
                level = NVPERF_LEVELS[1]
            print(level)
            return

        if mode in ('-', 'off', 'adapt'):
            new_counter = counter - 1
        elif mode in ('+', 'on', 'max'):
            new_counter = counter + 1
        else:
            return

        if new_counter < 0:
            new_counter = 0

        if new_counter == counter:
            # No change in level needed
            return

        if new_counter == 1 and counter == 0:
            # Switch to maximum level.
            new_level = 1
        elif new_counter == 0:
            # Switch to adaptive level.
            new_level = 0
        else:
            # Already at maximum level.
            new_level = None

        if new_level is not None:

            if verbose:
                level = NVPERF_LEVELS[new_level]
                print('switching powermizer performance level to %s' % level)

            subprocess.check_call(('nvidia-settings', '-a', 'GPUPowerMizerMode=%u' % new_level))

        state_file.seek(0)
        state_file.write(b'%u' % new_counter)
        os.truncate(state_file.fileno(), state_file.tell())

    finally:

        fcntl.flock(state_file, fcntl.LOCK_UN)
        state_file.close()
Ejemplo n.º 4
0
    def handle(self, *args, **options):
        election = Election.objects.get()
        election.state = Election.NOT_STARTED
        election.start_time = None
        election.save()

        Session.objects.exclude(state=Session.BANNED).delete()
        logging_dir = settings.LOGGING_DIR
        for f in os.listdir(logging_dir):
            os.truncate(os.path.join(logging_dir, f), 0)
Ejemplo n.º 5
0
def create_gdb_init() -> None:
    # deny all access to outside users
    os.makedirs(_GDB_INIT_PATH, mode=0o700, exist_ok=True)
    os.chmod(_GDB_INIT_PATH,
             mode=stat.S_IRWXU,
             follow_symlinks=False)
    target = _GDB_INIT_PATH + _GDB_INIT_FILE_NAME
    with secure_open(target, "w") as init:
        os.truncate(target, 0)  # wipe the init file
        init.write("python import morunner.debugger.gdb.connect")
        init.write("python morunner.debugger.gdb.connect.accept_task()")
Ejemplo n.º 6
0
 def test_sparse_copy(self):
     with ExitStack() as resources:
         tmpdir = resources.enter_context(TemporaryDirectory())
         sparse_file = os.path.join(tmpdir, 'sparse.dat')
         fp = resources.enter_context(open(sparse_file, 'w'))
         os.truncate(fp.fileno(), 1000000)
         # This file is sparse.
         self.assertTrue(is_sparse(sparse_file))
         copied_file = os.path.join(tmpdir, 'copied.dat')
         sparse_copy(sparse_file, copied_file)
         self.assertTrue(is_sparse(copied_file))
    def test_mount_possible_after_corrupt_directory_and_cached_next_inode_number(
        self
    ) -> None:
        test_dir_path = self.mount_path / "test_dir"
        test_dir_path.mkdir()
        test_dir_overlay_file_path = self.overlay.materialize_dir(test_dir_path)

        self.eden.unmount(self.mount_path)
        os.truncate(test_dir_overlay_file_path, 0)
        self.overlay.delete_cached_next_inode_number()

        self.eden.mount(self.mount_path)
Ejemplo n.º 8
0
def create_gdb_init(gdb_conf: configparser.SectionProxy) -> None:
    """
    Temp function.  Just a quick hack to build the file containing initial
    commands for GDBProcesses.
    """
    # deny all access to outside users
    os.makedirs(gdb_conf["config-path"], mode=0o700, exist_ok=True)
    os.chmod(gdb_conf["config-path"],
             mode=stat.S_IRWXU,
             follow_symlinks=False)
    target = gdb_conf["init-path"]
    with authentication.secure_open(target, "w") as init:
        os.truncate(target, 0)  # wipe the init file
        init.write("python import morunner.debugger.gdb.connect")
        init.write("python morunner.debugger.gdb.connect.accept_task()")
Ejemplo n.º 9
0
 def test_copy_symlink(self):
     with ExitStack() as resources:
         tmpdir = resources.enter_context(TemporaryDirectory())
         sparse_file = os.path.join(tmpdir, 'sparse.dat')
         fp = resources.enter_context(open(sparse_file, 'w'))
         os.truncate(fp.fileno(), 1000000)
         # This file is sparse.
         self.assertTrue(is_sparse(sparse_file))
         # Create a symlink to the sparse file.
         linked_file = os.path.join(tmpdir, 'linked.dat')
         os.symlink(sparse_file, linked_file)
         self.assertTrue(os.path.islink(linked_file))
         copied_link = os.path.join(tmpdir, 'copied.dat')
         sparse_copy(linked_file, copied_link, follow_symlinks=False)
         self.assertTrue(os.path.islink(copied_link))
Ejemplo n.º 10
0
    def daemond(self):
        pid = os.fork()
        if pid != 0:
            sys.exit(1)
        if os.setsid() == -1:
            print('setsid error')
            sys.exit(1)
        if self._fd is not None:
            stdfd = [s.fileno() for s in [sys.stdin, sys.stdout, sys.stderr]]
            for ofd in stdfd:
                os.dup2(self._fd, ofd)

        if self._pidfd:
            os.truncate(self._pidfd, 0)
            os.write(self._pidfd, str(os.getpid()).encode())
            os.close(self._pidfd)
    def truncate(self, filename, size, **args):
        line = "truncate " + filename + " " + str(size)
        dentry = self.vfs_op_prelude(line, filename, args)

        try:
            if sys.version_info[0] == 2:
                self.verbose("os.ftruncate(", filename, ",", size, ")\n")
                truncate_fd = os.open(filename, os.O_RDWR)
                os.ftruncate(truncate_fd, size)
                os.close(truncate_fd)
            else:
                self.verbose("os.truncate(", filename, ",", size, ")\n")
                os.truncate(filename, size)
            self.vfs_op_success(filename, dentry, args, copy_up=True)
        except OSError as oe:
            self.vfs_op_error(oe, filename, dentry, args)
Ejemplo n.º 12
0
def create_lock_file(lock_file_path):
    """Create a lock file with the current process ID as it's contents.

    Arguments:
        lock_file_path {string} -- path to the lock file
    Returns:
        lock file descriptor
    """

    # open or create the lock file in rw mode and if creating it
    # set the read and write user permission flags
    fd = os.open(lock_file_path, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)

    if fd < 0:
        syslog(LOG_ERR, "failed to open lock file: {}".format(lock_file_path))
        sys.exit(1)

    # set the close-on-exec file descriptor flag which will close the file
    # descriptor the next time exec() is called, useful for daemons that
    # reload themselves by re-execing
    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
    if flags < 0:
        syslog(LOG_ERR, "failed to open lock file: {}".format(lock_file_path))
        sys.exit(1)

    flags |= fcntl.FD_CLOEXEC
    if fcntl.fcntl(fd, fcntl.F_SETFD, flags) < 0:
        syslog(LOG_ERR, "failed to open lock file: {}".format(lock_file_path))
        sys.exit(1)

    # lock the entire contents of the lock file to prevent another process
    # from writing to it while we're running
    try:
        fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except (IOError, OSError):
        syslog("Cannot lock {}".format(lock_file_path))
        sys.exit(1)

    # truncate the contents of the lock file
    os.truncate(fd, 0)

    # write the current process ID to the lock file
    with open(lock_file_path, 'w') as f:
        f.write('{}'.format(os.getpid()))

    # return the lock file descriptor
    return fd
Ejemplo n.º 13
0
 def test_does_not_fit(self):
     # The contents of a structure is too large for the image size.
     workdir = self._resources.enter_context(TemporaryDirectory())
     # See LP: #1666580
     main(('snap', '--workdir', workdir,
           '--thru', 'load_gadget_yaml',
           self.model_assertion))
     # Make the gadget's mbr contents too big.
     path = os.path.join(workdir, 'unpack', 'gadget', 'pc-boot.img')
     os.truncate(path, 512)
     mock = self._resources.enter_context(patch(
         'ubuntu_image.__main__._logger.error'))
     code = main(('snap', '--workdir', workdir, '--resume'))
     self.assertEqual(code, 1)
     self.assertEqual(
         mock.call_args_list[-1],
         call('Volume contents do not fit (72B over): '
              'volumes:<pc>:structure:<mbr> [#0]'))
Ejemplo n.º 14
0
    def test_truncated_hg_dirstate_is_a_problem(self) -> None:
        dirstate_path = self.checkout.path / ".hg" / "dirstate"
        os.truncate(dirstate_path, dirstate_path.stat().st_size - 1)

        out = self.cure_what_ails_you(dry_run=True)
        self.assertEqual(
            f"""\
Checking {self.checkout.path}
<yellow>- Found problem:<reset>
Found inconsistent/missing data in {self.checkout.path}/.hg:
  error parsing .hg/dirstate: Reached EOF while reading checksum \
hash in {self.checkout.path}/.hg/dirstate.

Would repair hg directory contents for {self.checkout.path}

<yellow>Discovered 1 problem during --dry-run<reset>
""",
            out.getvalue(),
        )
Ejemplo n.º 15
0
    def __init__(self, path, size):
        """Initialize an image file to a given size in bytes.

        :param path: Path to image file on the file system.
        :type path: str
        :param size: Size in bytes to set the image file to.
        :type size: int

        Public attributes:

        * path       - Path to the image file.
        """
        self.path = path
        # Create an empty image file of a fixed size.  Unlike
        # truncate(1) --size 0, os.truncate(path, 0) doesn't touch the
        # file; i.e. it must already exist.
        with open(path, 'wb'):
            pass
        # Truncate to zero, so that extending the size in the next call
        # will cause all the bytes to read as zero.  Stevens $4.13
        os.truncate(path, 0)
        os.truncate(path, size)
Ejemplo n.º 16
0
    def setattr(self, inode, attr, fields):
        path = self._inode_to_path(inode)

        try:
            if fields.update_size:
                os.truncate(path, attr.st_size)

            if fields.update_mode:
                os.chmod(path, ~stat_m.S_IFMT & attr.st_mode,
                         follow_symlinks=False)

            if fields.update_uid or fields.update_gid:
                os.chown(path, attr.st_uid, attr.st_gid, follow_symlinks=False)

            if fields.update_atime or fields.update_mtime:
                os.utime(path, None, follow_symlinks=False,
                         ns=(attr.st_atime_ns, attr.st_mtime_ns))

        except OSError as exc:
            raise FUSEError(exc.errno)

        return self.getattr(inode)
Ejemplo n.º 17
0
    def __init__(self, path, size, schema=None):
        """Initialize an image file to a given size in bytes.

        :param path: Path to image file on the file system.
        :type path: str
        :param size: Size in bytes to set the image file to.
        :type size: int
        :param schema: The partitioning schema of the volume.
        :type schema: VolumeSchema

        Public attributes:

        * path - Path to the image file.
        """
        self.path = path
        # Create an empty image file of a fixed size.  Unlike
        # truncate(1) --size 0, os.truncate(path, 0) doesn't touch the
        # file; i.e. it must already exist.
        with open(path, 'wb'):
            pass
        # Truncate to zero, so that extending the size in the next call
        # will cause all the bytes to read as zero.  Stevens $4.13
        os.truncate(path, 0)
        os.truncate(path, size)
        # Prepare the device and disk objects for parted to be used for all
        # future partition() calls.  Only do it if we actually care about the
        # partition table.
        if schema is None:
            self.sector_size = 512
            self.device = None
            self.disk = None
            self.schema = None
        else:
            self.device = parted.Device(self.path)
            label = 'msdos' if schema is VolumeSchema.mbr else 'gpt'
            self.schema = schema
            self.disk = parted.freshDisk(self.device, label)
            self.sector_size = self.device.sectorSize
    def _corrupt_files(self) -> List[pathlib.Path]:
        """Corrupt some files inside the mount.
        Returns relative paths to these files inside the mount.
        """
        # Corrupt 3 separate files.  2 are tracked by mercurial, one is not.
        # We will corrupt 2 of them by truncating the overlay file, and one by
        # completely removing the overlay file.  (In practice an unclean reboot often
        # leaves overlay files that exist but have 0 length.)
        tracked_path = pathlib.Path("src/committed_file")
        untracked_path = pathlib.Path("src/new_file")
        readme_path = pathlib.Path("readme.txt")

        tracked_overlay_file_path = self.overlay.materialize_file(tracked_path)
        untracked_overlay_file_path = self.overlay.materialize_file(untracked_path)
        readme_overlay_file_path = self.overlay.materialize_file(readme_path)

        self.eden.unmount(self.mount_path)
        os.truncate(tracked_overlay_file_path, 0)
        os.unlink(untracked_overlay_file_path)
        os.truncate(readme_overlay_file_path, 0)
        self.eden.mount(self.mount_path)

        return [tracked_path, untracked_path, readme_path]
Ejemplo n.º 19
0
    def mount_btrfs(self):
        mountpoint = tempfile.mkdtemp()
        try:
            with tempfile.NamedTemporaryFile(delete=False) as f:
                os.truncate(f.fileno(), 1024 * 1024 * 1024)
                image = f.name
        except Exception as e:
            os.rmdir(mountpoint)
            raise e

        if os.path.exists('../../mkfs.btrfs'):
            mkfs = '../../mkfs.btrfs'
        else:
            mkfs = 'mkfs.btrfs'
        try:
            subprocess.check_call([mkfs, '-q', image])
            subprocess.check_call(['mount', '-o', 'loop', '--', image, mountpoint])
        except Exception as e:
            os.rmdir(mountpoint)
            os.remove(image)
            raise e

        self._mountpoints.append((mountpoint, image))
        return mountpoint, image
Ejemplo n.º 20
0
    def post(self, upload_uuid=None, offset=None, upload_from_db=None):
        upload_dir = os.path.join(config.STORAGE_PATH, 'uploads')
        os.makedirs(upload_dir, exist_ok=True)

        upload_path = os.path.join(upload_dir, upload_from_db.uuid)
        os.truncate(upload_path, int(offset))
Ejemplo n.º 21
0
def server_thread():
    global q

    fd_cnt = randint(0, 9999)

    file_fd_dict = {}  #dictionar gia kathe filename wste na apothikeuetai ton
    #official fd kai ton fd pou stelnetais ton client

    while True:
        message, client_address = q.get()  #pairnoume mhnuma aop oura

        flag, rest_packet = message.split(
            b',',
            1)  #analoga me to flag tou mhnumatos epexergazomastw to mhnuma

        #morfh 'O,fd, flags'
        if flag == b'O':  #mhnyma gia open
            rest_packet = rest_packet.decode("utf-8")

            rest_packet, temp_fd = rest_packet.rsplit(',', 1)

            temp_fd = int(temp_fd)

            try:  #elegxos gia an ew flags
                fname, rest_packet = rest_packet.split(',', 1)
                fname = directory + fname
                flags_array = rest_packet.split(',')

                if fname not in file_fd_dict:  #ean to arxeio den einai anoikto
                    #elegxos olwn twn periptwsewn gia ta flags
                    if 'O_CREAT' in flags_array and 'O_EXCL' in flags_array:
                        if 'O_TRUNC' in flags_array:
                            try:
                                fd = os.open(
                                    fname, os.O_EXCL | os.O_CREAT | os.O_TRUNC
                                    | os.O_RDWR)
                            except OSError:
                                fd = -1  #shmatodotei error apo server
                        else:
                            try:
                                fd = os.open(
                                    fname, os.O_EXCL | os.O_CREAT | os.O_RDWR)
                            except OSError:
                                fd = -1  #shmatodotei error apo server
                    elif 'O_CREAT' in flags_array:
                        if 'O_TRUNC' in flags_array:
                            try:
                                fd = os.open(
                                    fname, os.O_TRUNC | os.O_CREAT | os.O_RDWR)
                            except OSError:
                                fd = -1  #shmatodotei error apo server
                        else:
                            try:
                                fd = os.open(fname, os.O_CREAT | os.O_RDWR)
                            except OSError:
                                fd = -1  #shmatodotei error apo server
                    elif 'O_TRUNC' in flags_array:
                        try:
                            fd = os.open(fname, os.O_TRUNC | os.O_RDWR)
                        except OSError:
                            fd = -1

                    if fd != -1:  #an den uparxei apotyxia ananewse ton  pinaka
                        if temp_fd == -1:  #leitoirgei ws flag gia  reboot tou server
                            file_fd_dict[fname] = [fd, fd_cnt]
                            fd_cnt += 1
                        else:
                            file_fd_dict[fname] = [
                                fd, temp_fd
                            ]  #to arxeio uparxei hdh sthn pleura tou server me fd =temp_fd

                else:  #to arxeio einai anoixto alla hr8e flag O_TRUNC
                    if 'O_TRUNC' in flags_array:
                        if 'O_CREAT' in flags_array and 'O_EXCL' in flags_array:  #error giati uparxei to arxeio
                            s.sendto(b'O,-1', client_address)
                            continue
                        #truncate to arxeio
                        os.truncate(file_fd_dict[fname][0], 0)
            except:  #flag einai O_RDONLY H O_WRONLY H O_RDWR
                fname = rest_packet
                fname = directory + fname
                if fname not in file_fd_dict:
                    try:
                        fd = os.open(fname, os.O_RDWR)
                    except OSError:
                        fd = -1  #shmatodotei error apo server

                    if fd != -1:  #an den uparxei apotyxia ananewse ton  pinaka
                        if temp_fd == -1:
                            file_fd_dict[fname] = [fd, fd_cnt]
                            fd_cnt += 1
                        else:
                            file_fd_dict[fname] = [fd, temp_fd]

            #dhmiourgia paketou epistrofhhs
            if fd != -1:
                ret_fd = file_fd_dict[fname][
                    1]  #teika epistrefoume ton fd tou pinaka (-1 h ton swsto airthmo)
                packet = ('O,' + str(ret_fd)).encode("utf-8")
            else:
                packet = ('O,-1').encode(
                    "utf-8")  #diaforetika epistrefoume apotyxia

            s.sendto(packet, client_address)
        #morfh 'R,fd,size,pos'
        elif flag == b'R':
            rest_packet = rest_packet.decode("utf-8")

            server_fd, rest_packet = rest_packet.split(',', 1)
            file_size, file_pos = rest_packet.split(',', 1)

            server_fd = int(server_fd)
            file_size = int(file_size)
            file_pos = int(file_pos)

            for x in file_fd_dict:  #eyresh tou server fd pou antisoixei ston fd
                if server_fd != file_fd_dict[x][1]:
                    ret = -2  #epistrofh latous, den uparxei o fd pou zhththike
                else:
                    fd = file_fd_dict[x][0]  #diaforetika
                    ret = 0
                    break

            #an uparxe o zhtoumenos fd
            if ret == 0:
                os.lseek(
                    fd, file_pos,
                    0)  #metainhsh tou fd kata poses theseis zhthsei o xrhsths

                data = os.read(fd, file_size)

                #epistofh paketou me ton antisoixo kwdiko
                packet = ('R,' + str(ret) + ',').encode("utf-8") + data
            elif ret == -2:
                packet = ('R,' + str(ret)).encode("utf-8")

            s.sendto(
                packet, client_address
            )  #send se for me to posa paketa antisoixoun sto input otu xrhsth
        #morfh 'W,fd,pos,data'
        elif flag == b'W':
            server_fd, rest_packet = rest_packet.split(b',', 1)
            file_pos, data = rest_packet.split(b',', 1)

            server_fd = server_fd.decode("utf-8")
            file_pos = file_pos.decode("utf-8")

            server_fd = int(server_fd)
            file_pos = int(file_pos)

            for x in file_fd_dict:  #eyresh tou server fd pou antisoixei ston fd
                if server_fd != file_fd_dict[x][1]:
                    ret = -2  #an o fd den brethike epistoffh lathous
                else:
                    fd = file_fd_dict[x][0]
                    ret = 0
                    break

            #an uparxe o zhtoumenos fd
            if ret == 0:
                os.lseek(
                    fd, file_pos,
                    0)  #metainhsh tou fd kata poses theseis zhthsei o xrhsths

                bytes_written = os.write(fd, data)

                packet = ('W,' + str(ret) + ',' +
                          str(bytes_written)).encode("utf-8")
            elif ret == -2:
                packet = ('W,' + str(ret)).encode("utf-8")

            #dhmiourgia paketou me antisoixo kwdiko lathous
            s.sendto(packet, client_address)
        #morfh 'S,fd'
        elif flag == b'S':
            rest_packet = rest_packet.decode("utf-8")

            fd = int(rest_packet)

            for x in file_fd_dict:  #eyresh tou server fd pou antisoixei ston fd
                if fd != file_fd_dict[x][1]:
                    ret = -2
                else:
                    file_name = x
                    file_size = os.stat(
                        file_name
                    ).st_size  #eyeresh tou size oloklhrou tou arxeiou
                    ret = file_size
                    break

            #epistrofh paketou me to megethos tou arxeiou
            packet = ('S,' + str(ret)).encode("utf-8")
            s.sendto(packet, client_address)

        print("----- file_fd_dict: ", file_fd_dict)
Ejemplo n.º 22
0
def _truncate(path, length):
    fd = os.open(path, os.O_RDWR)
    try:
        os.truncate(fd, length)
    finally:
        os.close(fd)
Ejemplo n.º 23
0
def test_zip_path_replace(tmpdir):
    zipped_path = zip_path(str(tmpdir))
    os.truncate(zipped_path, 0)
    assert os.path.getsize(zipped_path) == 0
    zip_path(str(tmpdir), replace=True)
    assert os.path.getsize(zipped_path) > 0
Ejemplo n.º 24
0
def _test_storage(st, driver, tmpdir, storage_config):
    scratch = tmpdir.join("scratch")
    compat.makedirs(str(scratch), exist_ok=True)

    # File not found cases
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_metadata_for_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.delete_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_file("NONEXISTENT", str(scratch.join("a")))
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_fileobj("NONEXISTENT", BytesIO())
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_string("NONEXISTENT")
    assert st.list_path("") == []
    assert st.list_path("NONEXISTENT") == []
    st.store_file_from_memory("NONEXISTENT-a/x1", b"dummy", None)
    dummy_file = str(scratch.join("a"))
    with open(dummy_file, "wb") as fp:
        fp.write(b"dummy")
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, None)
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, {"x": 1})

    st.delete_key("NONEXISTENT-b/x1")
    st.delete_key("NONEXISTENT-a/x1")

    # Other basic cases
    from_disk_file = str(scratch.join("a"))
    input_data = b"from disk"
    if driver == "local":
        input_data = input_data * 150000
    with open(from_disk_file, "wb") as fp:
        fp.write(input_data)
    st.store_file_from_disk("test1/x1", from_disk_file, None)
    out = BytesIO()

    reported_positions = []

    def progress_callback(pos, total):
        reported_positions.append((pos, total))

    assert st.get_contents_to_fileobj("test1/x1", out, progress_callback=progress_callback) == {}
    assert out.getvalue() == input_data
    if driver == "local":
        input_size = len(input_data)
        assert reported_positions == [(1024 * 1024, input_size), (input_size, input_size)]

    if driver == "s3":
        response = st.s3_client.head_object(
            Bucket=st.bucket_name,
            Key=st.format_key_for_backend("test1/x1"),
        )
        assert bool(response.get("ServerSideEncryption")) == bool(storage_config.get('encrypted'))

    st.store_file_from_memory("test1/x1", b"dummy", {"k": "v"})
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {"k": "v"}
    assert out.getvalue() == b"dummy"

    # Copy file
    st.copy_file(source_key="test1/x1", destination_key="test_copy/copy1")
    assert st.get_contents_to_string("test_copy/copy1") == (b"dummy", {"k": "v"})
    st.copy_file(source_key="test1/x1", destination_key="test_copy/copy2", metadata={"new": "meta"})
    assert st.get_contents_to_string("test_copy/copy2") == (b"dummy", {"new": "meta"})

    st.store_file_from_memory("test1/x1", b"l", {"fancymetadata": "value"})
    assert st.get_contents_to_string("test1/x1") == (b"l", {"fancymetadata": "value"})

    st.store_file_from_memory("test1/x1", b"1", None)
    assert st.get_contents_to_string("test1/x1") == (b"1", {})

    st.store_file_from_memory("test1/td", b"to disk", {"to-disk": "42"})
    to_disk_file = str(scratch.join("b"))
    assert st.get_contents_to_file("test1/td", to_disk_file) == {"to-disk": "42"}

    created_keys = {"test1/x1", "test1/td"}

    if driver == "s3":
        response = st.s3_client.head_object(
            Bucket=st.bucket_name,
            Key=st.format_key_for_backend("test1/x1"),
        )
        assert bool(response.get("ServerSideEncryption")) == bool(storage_config.get('encrypted'))

    assert st.list_path("") == []  # nothing at top level (directories not listed)
    if driver == "local":
        # create a dot-file (hidden), this must be ignored
        target_file = os.path.join(st.prefix, "test1/.null")
        with open(target_file, "w"):
            pass

    tlist = st.list_path("test1")
    assert len(tlist) == 2
    for fe in tlist:
        assert isinstance(fe["last_modified"], datetime.datetime)
        assert fe["last_modified"].tzinfo is not None
        if fe["name"] == "test1/x1":
            assert fe["size"] == 1
            assert fe["metadata"] == {}
        elif fe["name"] == "test1/td":
            assert fe["size"] == len(b"to disk")
            assert fe["metadata"] == {"to-disk": "42"}
        else:
            assert 0, "unexpected name in directory"

    assert set(st.iter_prefixes("test1")) == set()

    for key in ["test1/sub1/sub1.1", "test1/sub2/sub2.1/sub2.1.1", "test1/sub3"]:
        st.store_file_from_memory(key, b"1", None)
        created_keys.add(key)

    if driver == "local":
        # sub3 is a file. Actual object storage systems support this, but a file system does not
        with pytest.raises(NotADirectoryError):
            st.store_file_from_memory("test1/sub3/sub3.1/sub3.1.1", b"1", None)
    else:
        st.store_file_from_memory("test1/sub3/sub3.1/sub3.1.1", b"1", None)
        created_keys.add("test1/sub3/sub3.1/sub3.1.1")

    if driver == "local":
        assert set(st.iter_prefixes("test1")) == {"test1/sub1", "test1/sub2"}
    else:
        assert set(st.iter_prefixes("test1")) == {"test1/sub1", "test1/sub2", "test1/sub3"}
    assert {item["name"] for item in st.list_path("test1")} == {"test1/x1", "test1/td", "test1/sub3"}
    assert set(st.iter_prefixes("test1/sub1")) == set()
    assert {item["name"] for item in st.list_path("test1/sub1")} == {"test1/sub1/sub1.1"}
    assert {item["name"] for item in st.list_path("test1/sub2")} == set()
    assert {item["name"] for item in st.list_path("test1/sub3")} == set()
    assert set(st.iter_prefixes("test1/sub2")) == {"test1/sub2/sub2.1"}
    if driver == "local":
        assert set(st.iter_prefixes("test1/sub3")) == set()  # sub3 is a file
    else:
        assert set(st.iter_prefixes("test1/sub3")) == {"test1/sub3/sub3.1"}
    assert set(st.iter_prefixes("test1/sub3/3.1")) == set()

    expected_deep_iter_test1_names = {
        "test1/x1",
        "test1/td",
        "test1/sub1/sub1.1",
        "test1/sub2/sub2.1/sub2.1.1",
        "test1/sub3",
    }
    if driver != "local":
        expected_deep_iter_test1_names.add("test1/sub3/sub3.1/sub3.1.1")

    assert {item["name"] for item in st.list_path("test1", deep=True)} == expected_deep_iter_test1_names

    def _object_names(iterable):
        names = set()
        for item in iterable:
            assert item.type == KEY_TYPE_OBJECT
            names.add(item.value["name"])
        return names

    deep_names_with_key = _object_names(st.iter_key("test1/sub3", deep=True, include_key=True))
    deep_names_without_key = _object_names(st.iter_key("test1/sub3", deep=True, include_key=False))

    if driver == "local":
        assert deep_names_with_key == {"test1/sub3"}
        assert deep_names_without_key == set()
    else:
        assert deep_names_with_key == {"test1/sub3", "test1/sub3/sub3.1/sub3.1.1"}
        assert deep_names_without_key == {"test1/sub3/sub3.1/sub3.1.1"}

    if driver == "google":
        # test extra props for cacheControl in google
        st.store_file_from_memory("test1/x1", b"no cache test",
                                  metadata={"test": "value"},
                                  extra_props={"cacheControl": "no-cache"})

    if driver == "local":
        # test LocalFileIsRemoteFileError for local storage
        target_file = os.path.join(st.prefix, "test1/x1")
        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.store_file_from_disk("test1/x1", target_file, {"local": True})
        assert st.get_contents_to_string("test1/x1") == (b"1", {"local": "True"})

        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.get_contents_to_file("test1/x1", target_file)

        # Missing metadata is an error situation that should fail
        os.unlink(target_file + ".metadata")
        with pytest.raises(errors.FileNotFoundFromStorageError):
            st.get_metadata_for_key("test1/x1")

    for key in created_keys:
        st.delete_key(key)
    assert st.list_path("test1") == []  # empty again

    for name in ["test2/foo", "test2/suba/foo", "test2/subb/bar", "test2/subb/subsub/zob"]:
        st.store_file_from_memory(name, b"somedata")
    names = sorted(item["name"] for item in st.list_path("test2", deep=True))
    assert names == ["test2/foo", "test2/suba/foo", "test2/subb/bar", "test2/subb/subsub/zob"]

    st.delete_tree("test2")
    assert st.list_path("test2", deep=True) == []

    test_hash = hashlib.sha256()
    test_file = str(scratch.join("30m"))
    test_size_send = 0
    with open(test_file, "wb") as fp:
        chunk = b"30m file" * 10000
        while test_size_send < 30 * 1024 * 1024:
            test_hash.update(chunk)
            fp.write(chunk)
            test_size_send += len(chunk)
    test_hash_send = test_hash.hexdigest()

    st.store_file_from_disk("test1/30m", test_file, multipart=True,
                            metadata={"thirtymeg": "data", "size": test_size_send, "key": "value-with-a-hyphen"})

    os.unlink(test_file)

    expected_meta = {"thirtymeg": "data", "size": str(test_size_send), "key": "value-with-a-hyphen"}
    meta = st.get_metadata_for_key("test1/30m")
    assert meta == expected_meta

    progress_reports = []

    def dl_progress(current_pos, expected_max):
        progress_reports.append((current_pos, expected_max))

    with open(test_file, "wb") as fp:
        assert st.get_contents_to_fileobj("test1/30m", fp, progress_callback=dl_progress) == expected_meta

    assert len(progress_reports) > 0
    assert progress_reports[-1][0] == progress_reports[-1][1]

    test_hash = hashlib.sha256()
    test_size_rec = 0
    with open(test_file, "rb") as fp:
        while True:
            chunk = fp.read(1024 * 1024)
            if not chunk:
                break
            test_hash.update(chunk)
            test_size_rec += len(chunk)
    test_hash_rec = test_hash.hexdigest()
    assert test_hash_rec == test_hash_send
    assert test_size_rec == test_size_send

    tlist = st.list_path("test1")
    assert len(tlist) == 1
    assert tlist[0]["name"] == "test1/30m"
    assert tlist[0]["size"] == test_size_rec

    if driver == "swift":
        segments = test_size_send // st.segment_size
        segment_list = st.list_path("test1_segments/30m")
        assert len(segment_list) >= segments

        if segments >= 2:
            # reupload a file with the same name but with less chunks
            os.truncate(test_file, st.segment_size + 1)
            test_size_send = os.path.getsize(test_file)
            st.store_file_from_disk("test1/30m", test_file, multipart=True,
                                    metadata={"30m": "less data", "size": test_size_send})

            segment_list = st.list_path("test1_segments/30m")
            assert len(segment_list) == 2
            assert len(st.list_path("test1")) == 1

    st.delete_key("test1/30m")
    assert st.list_path("test1") == []

    if driver == "swift":
        assert st.list_path("test1_segments/30m") == []

    progress_reports = []

    def upload_progress(progress):
        progress_reports.append(progress)

    for size in (300, 3 * 1024 * 1024, 11 * 1024 * 1024):
        progress_reports = []
        rds = RandomDataSource(size)
        key = "test1/{}b".format(size)
        st.store_file_object(key, rds, upload_progress_fn=upload_progress)
        # Progress may be reported after each chunk and chunk size depends on available memory
        # on current machine so there's no straightforward way of checking reasonable progress
        # updates were made. Just ensure they're ordered correctly if something was provided
        assert sorted(progress_reports) == progress_reports
        bio = BytesIO()
        st.get_contents_to_fileobj(key, bio)
        buffer = bio.getbuffer()
        assert len(buffer) == size
        assert buffer == rds.data
        st.delete_key(key)
Ejemplo n.º 25
0
 def truncate(self, dentry: Dentry, size: int):
     os.truncate(dentry.host_path, size)
Ejemplo n.º 26
0
def _test_storage(st, driver, tmpdir, storage_config):
    scratch = tmpdir.join("scratch")
    compat.makedirs(str(scratch), exist_ok=True)

    # File not found cases
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_metadata_for_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.delete_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_file("NONEXISTENT", str(scratch.join("a")))
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_fileobj("NONEXISTENT", BytesIO())
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_string("NONEXISTENT")
    assert st.list_path("") == []
    assert st.list_path("NONEXISTENT") == []
    st.store_file_from_memory("NONEXISTENT-a/x1", b"dummy", None)
    dummy_file = str(scratch.join("a"))
    with open(dummy_file, "wb") as fp:
        fp.write(b"dummy")
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, None)
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, {"x": 1})

    st.delete_key("NONEXISTENT-b/x1")
    st.delete_key("NONEXISTENT-a/x1")

    # Other basic cases
    from_disk_file = str(scratch.join("a"))
    with open(from_disk_file, "wb") as fp:
        fp.write(b"from disk")
    st.store_file_from_disk("test1/x1", from_disk_file, None)
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {}
    assert out.getvalue() == b"from disk"

    if driver == "s3":
        response = st.s3_client.head_object(
            Bucket=st.bucket_name,
            Key=st.format_key_for_backend("test1/x1"),
        )
        assert bool(response.get("ServerSideEncryption")) == bool(
            storage_config.get('encrypted'))

    st.store_file_from_memory("test1/x1", b"dummy", {"k": "v"})
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {"k": "v"}
    assert out.getvalue() == b"dummy"

    st.store_file_from_memory("test1/x1", b"l", {"fancymetadata": "value"})
    assert st.get_contents_to_string("test1/x1") == (b"l", {
        "fancymetadata": "value"
    })

    st.store_file_from_memory("test1/x1", b"1", None)
    assert st.get_contents_to_string("test1/x1") == (b"1", {})

    st.store_file_from_memory("test1/td", b"to disk", {"to-disk": "42"})
    to_disk_file = str(scratch.join("b"))
    assert st.get_contents_to_file("test1/td", to_disk_file) == {
        "to-disk": "42"
    }

    if driver == "s3":
        response = st.s3_client.head_object(
            Bucket=st.bucket_name,
            Key=st.format_key_for_backend("test1/x1"),
        )
        assert bool(response.get("ServerSideEncryption")) == bool(
            storage_config.get('encrypted'))

    assert st.list_path("") == [
    ]  # nothing at top level (directories not listed)
    if driver == "local":
        # create a dot-file (hidden), this must be ignored
        target_file = os.path.join(st.prefix, "test1/.null")
        with open(target_file, "w"):
            pass

    tlist = st.list_path("test1")
    assert len(tlist) == 2
    for fe in tlist:
        assert isinstance(fe["last_modified"], datetime.datetime)
        assert fe["last_modified"].tzinfo is not None
        if fe["name"] == "test1/x1":
            assert fe["size"] == 1
            assert fe["metadata"] == {}
        elif fe["name"] == "test1/td":
            assert fe["size"] == len(b"to disk")
            assert fe["metadata"] == {"to-disk": "42"}
        else:
            assert 0, "unexpected name in directory"

    if driver == "google":
        # test extra props for cacheControl in google
        st.store_file_from_memory("test1/x1",
                                  b"no cache test",
                                  metadata={"test": "value"},
                                  extra_props={"cacheControl": "no-cache"})

    if driver == "local":
        # test LocalFileIsRemoteFileError for local storage
        target_file = os.path.join(st.prefix, "test1/x1")
        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.store_file_from_disk("test1/x1", target_file, {"local": True})
        assert st.get_contents_to_string("test1/x1") == (b"1", {
            "local": "True"
        })

        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.get_contents_to_file("test1/x1", target_file)

        # unlink metadata file, this shouldn't break anything
        os.unlink(target_file + ".metadata")
        assert st.get_metadata_for_key("test1/x1") == {}

    st.delete_key("test1/x1")
    st.delete_key("test1/td")
    assert st.list_path("test1") == []  # empty again

    test_hash = hashlib.sha256()
    test_file = str(scratch.join("30m"))
    test_size_send = 0
    with open(test_file, "wb") as fp:
        chunk = b"30m file" * 10000
        while test_size_send < 30 * 1024 * 1024:
            test_hash.update(chunk)
            fp.write(chunk)
            test_size_send += len(chunk)
    test_hash_send = test_hash.hexdigest()

    st.store_file_from_disk("test1/30m",
                            test_file,
                            multipart=True,
                            metadata={
                                "thirtymeg": "data",
                                "size": test_size_send,
                                "key": "value-with-a-hyphen"
                            })

    os.unlink(test_file)

    expected_meta = {
        "thirtymeg": "data",
        "size": str(test_size_send),
        "key": "value-with-a-hyphen"
    }
    meta = st.get_metadata_for_key("test1/30m")
    assert meta == expected_meta

    progress_reports = []

    def dl_progress(current_pos, expected_max):
        progress_reports.append((current_pos, expected_max))

    with open(test_file, "wb") as fp:
        assert st.get_contents_to_fileobj(
            "test1/30m", fp, progress_callback=dl_progress) == expected_meta

    assert len(progress_reports) > 0
    assert progress_reports[-1][0] == progress_reports[-1][1]

    test_hash = hashlib.sha256()
    test_size_rec = 0
    with open(test_file, "rb") as fp:
        while True:
            chunk = fp.read(1024 * 1024)
            if not chunk:
                break
            test_hash.update(chunk)
            test_size_rec += len(chunk)
    test_hash_rec = test_hash.hexdigest()
    assert test_hash_rec == test_hash_send
    assert test_size_rec == test_size_send

    tlist = st.list_path("test1")
    assert len(tlist) == 1
    assert tlist[0]["name"] == "test1/30m"
    assert tlist[0]["size"] == test_size_rec

    if driver == "swift":
        segments = test_size_send // st.segment_size
        segment_list = st.list_path("test1_segments/30m")
        assert len(segment_list) >= segments

        if segments >= 2:
            # reupload a file with the same name but with less chunks
            os.truncate(test_file, st.segment_size + 1)
            test_size_send = os.path.getsize(test_file)
            st.store_file_from_disk("test1/30m",
                                    test_file,
                                    multipart=True,
                                    metadata={
                                        "30m": "less data",
                                        "size": test_size_send
                                    })

            segment_list = st.list_path("test1_segments/30m")
            assert len(segment_list) == 2
            assert len(st.list_path("test1")) == 1

    st.delete_key("test1/30m")
    assert st.list_path("test1") == []

    if driver == "swift":
        assert st.list_path("test1_segments/30m") == []
Ejemplo n.º 27
0
def create_complete_sparse_file(filename, filesize):
    file_path = os.path.join(test_directory_path, filename)
    sparse = Path(file_path)
    sparse.touch()
    os.truncate(str(sparse), filesize)
    return file_path
Ejemplo n.º 28
0
class DirSheet(Sheet):
    'Sheet displaying directory, using ENTER to open a particular file.  Edited fields are applied to the filesystem.'
    rowtype = 'files'  # rowdef: Path
    columns = [
        DeferredSetColumn(
            'directory',
            getter=lambda col, row: row.parent.relpath(col.sheet.source.
                                                       resolve()),
            setter=lambda col, row, val: col.sheet.moveFile(row, val)),
        DeferredSetColumn(
            'filename',
            getter=lambda col, row: row.name + row.ext,
            setter=lambda col, row, val: col.sheet.renameFile(row, val)),
        DeferredSetColumn(
            'pathname',
            width=0,
            getter=lambda col, row: row.resolve(),
            setter=lambda col, row, val: os.rename(row.resolve(), val)),
        Column('ext',
               getter=lambda col, row: row.is_dir() and '/' or row.suffix),
        DeferredSetColumn(
            'size',
            type=int,
            getter=lambda col, row: row.stat().st_size,
            setter=lambda col, row, val: os.truncate(row.resolve(), int(val))),
        DeferredSetColumn(
            'modtime',
            type=date,
            getter=lambda col, row: row.stat().st_mtime,
            setter=lambda col, row, val: os.utime(
                row.resolve(), times=((row.stat().st_atime, float(val))))),
        DeferredSetColumn(
            'owner',
            width=0,
            getter=lambda col, row: pwd.getpwuid(row.stat().st_uid).pw_name,
            setter=lambda col, row, val: os.chown(row.resolve(),
                                                  pwd.getpwnam(val).pw_uid, -1
                                                  )),
        DeferredSetColumn(
            'group',
            width=0,
            getter=lambda col, row: grp.getgrgid(row.stat().st_gid).gr_name,
            setter=lambda col, row, val: os.chown(row.resolve(), -1,
                                                  grp.getgrnam(val).pw_gid)),
        DeferredSetColumn(
            'mode',
            width=0,
            getter=lambda col, row: '{:o}'.format(row.stat().st_mode),
            setter=lambda col, row, val: os.chmod(row.resolve(), int(val, 8))),
        Column('filetype',
               width=0,
               cache=True,
               getter=lambda col, row: subprocess.Popen(
                   ['file', '--brief', row.resolve()],
                   stdout=subprocess.PIPE,
                   stderr=subprocess.PIPE).communicate()[0].strip()),
    ]
    colorizers = [
        #        CellColorizer(4, None, lambda s,c,r,v: s.colorOwner(s,c,r,v)),
        CellColorizer(8, 'color_change_pending',
                      lambda s, c, r, v: s.changed(c, r)),
        RowColorizer(9, 'color_delete_pending',
                     lambda s, c, r, v: r in s.toBeDeleted),
    ]
    nKeys = 2

    @staticmethod
    def colorOwner(sheet, col, row, val):
        ret = ''
        if col.name == 'group':
            mode = row.stat().st_mode
            if mode & stat.S_IXGRP: ret = 'bold '
            if mode & stat.S_IWGRP: return ret + 'green'
            if mode & stat.S_IRGRP: return ret + 'yellow'
        elif col.name == 'owner':
            mode = row.stat().st_mode
            if mode & stat.S_IXUSR: ret = 'bold '
            if mode & stat.S_IWUSR: return ret + 'green'
            if mode & stat.S_IRUSR: return ret + 'yellow'

    def changed(self, col, row):
        try:
            return isinstance(col, DeferredSetColumn) and col.changed(row)
        except Exception:
            return False

    def deleteFiles(self, rows):
        for r in rows:
            if r not in self.toBeDeleted:
                self.toBeDeleted.append(r)

    def moveFile(self, row, val):
        fn = row.name + row.ext
        newpath = os.path.join(val, fn)
        if not newpath.startswith('/'):
            newpath = os.path.join(self.source.resolve(), newpath)

        parent = Path(newpath).parent
        if parent.exists():
            if not parent.is_dir():
                error('destination %s not a directory' % parent)
        else:
            with contextlib.suppress(FileExistsError):
                os.makedirs(parent.resolve())

        os.rename(row.resolve(), newpath)
        row.fqpn = newpath
        self.restat(row)

    def renameFile(self, row, val):
        newpath = row.with_name(val)
        os.rename(row.resolve(), newpath.resolve())
        row.fqpn = newpath
        self.restat(row)

    def removeFile(self, path):
        if path.is_dir():
            os.rmdir(path.resolve())
        else:
            os.remove(path.resolve())

    def undoMod(self, row):
        for col in self.visibleCols:
            if getattr(col, '_modifiedValues',
                       None) and id(row) in col._modifiedValues:
                del col._modifiedValues[id(row)]

        if row in self.toBeDeleted:
            self.toBeDeleted.remove(row)
        self.restat(row)

    def save(self, *rows):
        changes = []
        deletes = {}
        for r in list(
                rows
                or self.rows):  # copy list because elements may be removed
            if r in self.toBeDeleted:
                deletes[id(r)] = r
            else:
                for col in self.visibleCols:
                    if self.changed(col, r):
                        changes.append((col, r))

        if not changes and not deletes:
            fail('nothing to save')

        cstr = ''
        if changes:
            cstr += 'change %d attributes' % len(changes)

        if deletes:
            if cstr: cstr += ' and '
            cstr += 'delete %d files' % len(deletes)

        confirm('really %s? ' % cstr)

        self._commit(changes, deletes)

    @asyncthread
    def _commit(self, changes, deletes):
        oldrows = self.rows
        self.rows = []
        for r in oldrows:
            try:
                if id(r) in deletes:
                    self.removeFile(r)
                else:
                    self.rows.append(r)
            except Exception as e:
                exceptionCaught(e)

        for col, row in changes:
            try:
                col.realsetter(col, row, col._modifiedValues[id(row)])
                self.restat(r)
            except Exception as e:
                exceptionCaught(e)

    @asyncthread
    def reload(self):
        self.toBeDeleted = []
        self.rows = []
        basepath = self.source.resolve()
        for folder, subdirs, files in os.walk(basepath):
            subfolder = folder[len(basepath) + 1:]
            if subfolder.startswith('.'): continue
            for fn in files:
                if fn.startswith('.'): continue
                p = Path(os.path.join(folder, fn))
                self.rows.append(p)

        # sort by modtime initially
        self.rows.sort(key=lambda row: row.stat().st_mtime, reverse=True)

    def restat(self, row):
        row.stat(force=True)
Ejemplo n.º 29
0
    def start(self):
        self.mtda.debug(3, "power.qemu.start()")

        if self.pidOfQemu is not None:
            return True
        if os.path.exists("/tmp/qemu-mtda.in"):
            os.unlink("/tmp/qemu-mtda.in")
        if os.path.exists("/tmp/qemu-mtda.out"):
            os.unlink("/tmp/qemu-mtda.out")
        if os.path.exists("/tmp/qemu-serial.in"):
            os.unlink("/tmp/qemu-serial.in")
        if os.path.exists("/tmp/qemu-serial.out"):
            os.unlink("/tmp/qemu-serial.out")
        os.mkfifo("/tmp/qemu-mtda.in")
        os.mkfifo("/tmp/qemu-mtda.out")
        os.mkfifo("/tmp/qemu-serial.in")
        os.mkfifo("/tmp/qemu-serial.out")

        atexit.register(self.stop)

        # base options
        options = "-daemonize -S -m %d" % self.memory
        options += " -chardev pipe,id=monitor,path=/tmp/qemu-mtda -monitor chardev:monitor"
        options += " -serial pipe:/tmp/qemu-serial"
        options += " -device e1000,netdev=net0"
        options += " -netdev user,id=net0,hostfwd=tcp::2222-:22,hostname={0}".format(
            self.hostname)
        options += " -usb"
        options += " -vnc :0"

        # extra options
        if self.bios is not None:
            options += " -bios %s" % self.bios
        if self.cpu is not None:
            options += " -cpu %s" % self.cpu
        if self.machine is not None:
            options += " -machine %s" % self.machine
        if self.pflash_ro is not None:
            options += " -drive if=pflash,format=raw,readonly,file=%s" % self.pflash_ro
        if self.pflash_rw is not None:
            options += " -drive if=pflash,format=raw,file=%s" % self.pflash_rw
        if self.storage is not None:
            options += " -drive file=%s,media=disk,format=raw" % self.storage
            if os.path.exists(self.storage) == False:
                sparse = pathlib.Path(self.storage)
                sparse.touch()
                os.truncate(str(sparse), 16 * 1024 * 1024 * 1024)
        if self.watchdog is not None:
            options += " -watchdog %s" % self.watchdog

        # swtpm options
        if self.swtpm is not None:
            with tempfile.NamedTemporaryFile() as pidfile:
                os.makedirs("/tmp/qemu-swtpm", exist_ok=True)
                result = os.system(
                    self.swtpm + " socket -d" +
                    " --tpmstate dir=/tmp/qemu-swtpm" +
                    " --ctrl type=unixio,path=/tmp/qemu-swtpm/sock" +
                    " --pid file=%s --tpm2" % pidfile.name)
                if result == 0:
                    self.pidOfSwTpm = self.getpid(pidfile.name)
                    self.mtda.debug(
                        2, "power.qemu.start(): swtpm process started [%d]" %
                        self.pidOfSwTpm)
                else:
                    self.mtda.debug(
                        1, "power.qemu.start(): swtpm process failed (%d)" %
                        result)
                    return False

                options += " -chardev socket,id=chrtpm,path=/tmp/qemu-swtpm/sock"
                options += " -tpmdev emulator,id=tpm0,chardev=chrtpm"
                options += " -device tpm-tis,tpmdev=tpm0"

        with tempfile.NamedTemporaryFile() as pidfile:
            options += " -pidfile {0}".format(pidfile.name)
            result = os.system("%s %s" % (self.executable, options))
            if result == 0:
                self.pidOfQemu = self.getpid(pidfile.name)
                self.mtda.debug(
                    2, "power.qemu.start(): qemu process started [%d]" %
                    self.pidOfQemu)
                return True
            else:
                self.mtda.debug(
                    1, "power.qemu.start(): qemu process failed (%d)" % result)
        return False
Ejemplo n.º 30
0
# Parse the UE4 version information
rootDir = sys.argv[1]
version = json.loads(readFile(join(rootDir, 'Engine', 'Build', 'Build.version')))

# Determine if we are excluding debug symbols
truncateDebug = len(sys.argv) > 2 and sys.argv[2] == '1'
if truncateDebug == True:
	
	# Truncate all PDB files to save space whilst avoiding the issues that would be caused by the files being missing
	log('User opted to exclude debug symbols, truncating all PDB files.')
	log('Scanning for PDB files in directory {}...'.format(rootDir))
	pdbFiles = glob.glob(join(rootDir, '**', '*.pdb'), recursive=True)
	for pdbFile in pdbFiles:
		log('Truncating PDB file {}...'.format(pdbFile))
		try:
			os.truncate(pdbFile, 0)
		except:
			log('  Warning: failed to truncate PDB file {}.'.format(pdbFile))
	
	# Under UE4.19, we need to delete the PDB files for AutomationTool entirely, since truncated files cause issues
	if version['MinorVersion'] < 20:
		pdbFiles = glob.glob(join(rootDir, 'Engine', 'Source', 'Programs', 'AutomationTool', '**', '*.pdb'), recursive=True)
		for pdbFile in pdbFiles:
			log('Removing PDB file {}...'.format(pdbFile))
			try:
				os.unlink(pdbFile)
			except:
				log('  Warning: failed to remove PDB file {}.'.format(pdbFile))

# Determine if we are excluding the Engine's template projects and samples
excludeTemplates = len(sys.argv) > 3 and sys.argv[3] == '1'
Ejemplo n.º 31
0
    def startup_walk_for_missed_files(self):
        """Check xlog and xlog_incoming directories for files that receivexlog has received but not yet
        compressed as well as the files we have compressed but not yet uploaded and process them."""
        for site in self.config["backup_sites"]:
            compressed_xlog_path, _ = self.create_backup_site_paths(site)
            uncompressed_xlog_path = compressed_xlog_path + "_incoming"

            # Process uncompressed files (ie WAL pg_receivexlog received)
            for filename in os.listdir(uncompressed_xlog_path):
                full_path = os.path.join(uncompressed_xlog_path, filename)
                if wal.PARTIAL_WAL_RE.match(filename):
                    # pg_receivewal may have been in the middle of storing WAL file when PGHoard was stopped.
                    # If the file is 0 or 16 MiB in size it will continue normally but in some cases the file can be
                    # incomplete causing pg_receivewal to halt processing. Truncating the file to zero bytes correctly
                    # makes it continue streaming from the beginning of that segment.
                    file_size = os.stat(full_path).st_size
                    if file_size in {0, wal.WAL_SEG_SIZE}:
                        self.log.info("Found partial file %r, size %d bytes",
                                      full_path, file_size)
                    else:
                        self.log.warning(
                            "Found partial file %r with unexpected size %d, truncating to zero bytes",
                            full_path, file_size)
                        # Make a copy of the file for safekeeping. The data should still be available on PG
                        # side but just in case it isn't the incomplete segment could still be relevant for
                        # manual processing later
                        shutil.copyfile(full_path, full_path + "_incomplete")
                        self.metrics.increase(
                            "pghoard.incomplete_partial_wal_segment")
                        os.truncate(full_path, 0)
                    continue

                if not wal.WAL_RE.match(
                        filename) and not wal.TIMELINE_RE.match(filename):
                    self.log.warning(
                        "Found invalid file %r from incoming xlog directory",
                        full_path)
                    continue

                compression_event = {
                    "delete_file_after_compression": True,
                    "full_path": full_path,
                    "site": site,
                    "src_path": "{}.partial",
                    "type": "MOVE",
                }
                self.log.debug(
                    "Found: %r when starting up, adding to compression queue",
                    compression_event)
                self.compression_queue.put(compression_event)

            # Process compressed files (ie things we've processed but not yet uploaded)
            for filename in os.listdir(compressed_xlog_path):
                if filename.endswith(".metadata"):
                    continue  # silently ignore .metadata files, they're expected and processed below
                full_path = os.path.join(compressed_xlog_path, filename)
                metadata_path = full_path + ".metadata"
                is_xlog = wal.WAL_RE.match(filename)
                is_timeline = wal.TIMELINE_RE.match(filename)
                if not ((is_xlog or is_timeline)
                        and os.path.exists(metadata_path)):
                    self.log.warning(
                        "Found invalid file %r from compressed xlog directory",
                        full_path)
                    continue
                with open(metadata_path, "r") as fp:
                    metadata = json.load(fp)

                transfer_event = {
                    "file_size": os.path.getsize(full_path),
                    "filetype": "xlog" if is_xlog else "timeline",
                    "local_path": full_path,
                    "metadata": metadata,
                    "site": site,
                    "type": "UPLOAD",
                }
                self.log.debug(
                    "Found: %r when starting up, adding to transfer queue",
                    transfer_event)
                self.transfer_queue.put(transfer_event)
Ejemplo n.º 32
0
 def _overwrite_file_with_content(self, name, content=""):
     handler = os.open(name, os.O_RDWR)
     os.truncate(name, 0)
     os.write(handler, str.encode(content))
     os.close(handler)
Ejemplo n.º 33
0
def convert_seed_inputs(ktest_tool, input_klee, input_corpus):
    """
    Convert seeds to a format KLEE understands.

    Returns the number of converted seeds.
    """

    print('[run_fuzzer] Converting seed files...')

    # We put the file data into the symbolic buffer,
    # and the model_version set to 1 for uc-libc
    model = struct.pack('@i', 1)
    files = glob.glob(os.path.join(input_corpus, '*'))
    n_converted = 0

    for seedfile in files:
        if '.ktest' in seedfile:
            continue

        if not os.path.isfile(seedfile):
            continue

        # Truncate the seed to the max size for the benchmark
        file_size = os.path.getsize(seedfile)
        benchmark_size = get_size_for_benchmark()
        if file_size > benchmark_size:
            print('[run_fuzzer] Truncating {path} ({file_size}) to \
                    {benchmark_size}'.format(path=seedfile,
                                             file_size=file_size,
                                             benchmark_size=benchmark_size))
            os.truncate(seedfile, benchmark_size)

        seed_in = '{seed}.ktest'.format(seed=seedfile)
        seed_out = os.path.join(input_klee, os.path.basename(seed_in))

        # Create file for symblic buffer
        input_file = '{seed}.ktest.{symbolic}'.format(seed=seedfile,
                                                      symbolic=SYMBOLIC_BUFFER)
        output_kfile = '{seed}.ktest'.format(seed=seedfile)
        shutil.copyfile(seedfile, input_file)
        os.rename(seedfile, input_file)

        # Create file for mode version
        model_input_file = '{seed}.ktest.{symbolic}'.format(
            seed=seedfile, symbolic=MODEL_VERSION)
        with open(model_input_file, 'wb') as mfile:
            mfile.write(model)

        # Run conversion tool
        convert_cmd = [
            ktest_tool, 'create', output_kfile, '--args', seed_out,
            '--objects', MODEL_VERSION, SYMBOLIC_BUFFER
        ]

        run(convert_cmd)

        # Move the resulting file to klee corpus dir
        os.rename(seed_in, seed_out)

        n_converted += 1

    print('[run_fuzzer] Converted {converted} seed files'.format(
        converted=n_converted))

    return n_converted
Ejemplo n.º 34
0
    lenght = 0
    old_time = 0

    flag_urg = 0
    flag_ack = 0
    flag_psh = 0
    flag_rst = 0
    flag_syn = 0
    flag_fin = 0

    API = "http://127.0.0.1:4020/API/v1/getTrace"

    PARAMS = {'authkey': "vzkTx3652MKsLNmV4wH3oaGSzsfMGP"}

    with requests.get(url=API, params=PARAMS, stream=True) as r:
        os.truncate(fl, lhf)
        os.lseek(fl, 0, 0)
        os.write(fl, r.content)
        lhf = len(r.content)

    try:
        pkts = rdpcap(filename)
    except:
        continue

    for pkt in pkts:
        if pkt.haslayer(IP):
            if pkt.haslayer(TCP):
                F = pkt[TCP].flags  # this should give you an integer

                if F & URG:
Ejemplo n.º 35
0
 def _prepare_one_volume(self, volume_index, name, volume):
     volume.part_images = []
     farthest_offset = 0
     for partnum, part in enumerate(volume.structures):
         part_img = os.path.join(
             volume.basedir, 'part{}.img'.format(partnum))
         if part.role is StructureRole.system_data:
             # The image for the root partition.
             if part.size is None:
                 part.size = self.rootfs_size
             elif part.size < self.rootfs_size:
                 _logger.warning('rootfs partition size ({}) smaller than '
                                 'actual rootfs contents {}'.format(
                                     part.size, self.rootfs_size))
                 part.size = self.rootfs_size
             # We defer creating the root file system image because we have
             # to populate it at the same time.  See mkfs.ext4(8) for
             # details.
             Path(part_img).touch()
             os.truncate(part_img, part.size)
         else:
             run('dd if=/dev/zero of={} count=0 bs={} seek=1'.format(
                 part_img, part.size))
             if part.filesystem is FileSystemType.vfat:
                 label_option = (
                     '-n {}'.format(part.filesystem_label)
                     # TODO: I think this could be None or the empty string,
                     # but this needs verification.
                     if part.filesystem_label
                     else '')
                 # TODO: hard-coding of sector size.
                 run('mkfs.vfat -s 1 -S 512 -F 32 {} {}'.format(
                     label_option, part_img))
         volume.part_images.append(part_img)
         farthest_offset = max(farthest_offset, (part.offset + part.size))
     # Calculate or check the final image size.
     #
     # TODO: Hard-codes last 34 512-byte sectors for backup GPT,
     # empirically derived from sgdisk behavior.
     calculated = ceil(farthest_offset / 1024 + 17) * 1024
     if self.args.image_size is None:
         volume.image_size = calculated
     elif isinstance(self.args.image_size, int):
         # One size to rule them all.
         if self.args.image_size < calculated:
             _logger.warning(
                 'Ignoring image size smaller '
                 'than minimum required size: vol[{}]:{} '
                 '{} < {}'.format(volume_index, name,
                                  self.args.given_image_size, calculated))
             volume.image_size = calculated
         else:
             volume.image_size = self.args.image_size
     else:
         # The --image-size arguments are a dictionary, so look up the
         # one used for this volume.
         size_by_index = self.args.image_size.get(volume_index)
         size_by_name = self.args.image_size.get(name)
         if size_by_index is not None and size_by_name is not None:
             _logger.warning(
                 'Ignoring ambiguous volume size; index+name given')
             volume.image_size = calculated
         else:
             image_size = (size_by_index
                           if size_by_name is None
                           else size_by_name)
             if image_size < calculated:
                 _logger.warning(
                     'Ignoring image size smaller '
                     'than minimum required size: vol[{}]:{} '
                     '{} < {}'.format(volume_index, name,
                                      self.args.given_image_size,
                                      calculated))
                 volume.image_size = calculated
             else:
                 volume.image_size = image_size
Ejemplo n.º 36
0
def check_replace(string, table_word_list):
    if word_exist(table_word_list, string):
        return '' # ignore sqlblock with these tables.
    else:
        name = get_name(string)
        string = replace(string, name)
        outputfile(string)
        return ''

def main(filename, table_word_list):
    with open(filename, 'r', encoding='utf-8-sig') as f:
        string = ''
        for line in f:
            if ';' in line:
                string = string + line
                string = check_replace(string, table_word_list)
            elif line != '\n':
                string = string + line

if __name__ == '__main__':
    if len(sys.argv) < 2:
        sys.exit("Please include an input filename")
    else:
        filename = sys.argv[1]
        table_word_list = sys.argv[2:]
    if os.path.exists('output.sql'):
        os.truncate('output.sql', 0)
    main(filename, table_word_list)


Ejemplo n.º 37
0
 def run(self, id, path, size):
     path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', id, path)
     with open(path, 'ab') as device:
         os.truncate(device.fileno(), size)
Ejemplo n.º 38
0
def coalescer_main():

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-s",
        "--service-only",
        type=str,
        help="Only run this space separated list of services",
    )
    parser.add_argument(
        "-x",
        "--exclude-services",
        type=str,
        help="Exclude running this space separated list of services",
    )

    parser.add_argument("-c",
                        "--config",
                        type=str,
                        help="alternate config file")
    parser.add_argument(
        "--run-once",
        default=False,
        help='Run the coalescer once and exit',
        action='store_true',
    )
    parser.add_argument(
        "-p",
        "--period",
        type=str,
        help=('Override the period specified in config file with this. '
              'Format is <period><m|h|d|w>. 1h is 1 hour, 2w is 2 weeks etc.'))
    parser.add_argument("--no-sqpoller",
                        action='store_true',
                        help=argparse.SUPPRESS)

    userargs = parser.parse_args()

    cfg = load_sq_config(config_file=userargs.config)
    if not cfg:
        print(f'Invalid Suzieq config file {userargs.config}')
        sys.exit(1)

    logfile, loglevel, logsize, log_stdout = get_log_params(
        'coalescer', cfg, '/tmp/sq-coalescer.log')
    logger = init_logger('suzieq.coalescer', logfile, loglevel, logsize,
                         log_stdout)

    # Ensure we're the only compacter
    coalesce_dir = cfg.get('coalescer', {})\
        .get('coalesce-directory',
             f'{cfg.get("data-directory")}/coalesced')

    fd = ensure_single_instance(f'{coalesce_dir}/.sq-coalescer.pid', False)
    if not fd:
        print('ERROR: Another coalescer process present')
        logger.error('Another coalescer process present')
        sys.exit(errno.EBUSY)

    timestr = userargs.period or (cfg.get('coalescer', {
        'period': '1h'
    }).get('period', '1h'))

    schemas = Schema(cfg.get('schema-directory'))
    if userargs.service_only or userargs.exclude_services:
        tables = [
            x for x in schemas.tables()
            if (schemas.type_for_table(x) != "derivedRecord")
        ]
        if userargs.service_only:
            tables = [x for x in tables if x in userargs.service_only.split()]
        if userargs.exclude_services:
            tables = [
                x for x in tables
                if x not in userargs.exclude_services.split()
            ]
    else:
        tables = []

    run_coalescer(cfg, tables, timestr, userargs.run_once, logger,
                  userargs.no_sqpoller or False)
    os.truncate(fd, 0)
    try:
        fcntl.flock(fd, fcntl.LOCK_UN)
        os.close(fd)
    except OSError:
        pass

    sys.exit(0)
Ejemplo n.º 39
0
def locker(file_path, password, remove=True):
    """Provides file locking/unlocking mechanism
    This function either encrypts or decrypts the file - *file_path*.
    Encryption or decryption depends upon the file's extension.
    The user's encryption or decryption task is almost automated since
    *encryption* or *decryption* is determined by the file's extension.


      Usage
     -------
     file_path = File to be written on.

     password = Key to be used for encryption/decryption.

       remove = If set to True, the the file that is being
                encrypted or decrypted will be removed.
                (Default: True).
    """

    # The file is being decrypted
    try:
        if file_path.endswith(EXT):
            method = 'decrypt'
            flag = False
            new_file = os.path.splitext(file_path)[0]

            # Retrieve the nonce and remove it from the
            # encrypted file

            with open(file_path, 'rb+') as f:
                f.seek(-(NONCE_SIZE + SALT_LEN), 2)
                nonce, salt = unpack('<{}s{}s'.format(NONCE_SIZE, SALT_LEN),
                                     f.read())

            orig_size = os.path.getsize(file_path) - (NONCE_SIZE + SALT_LEN)
            os.truncate(file_path, orig_size)

        # The file is being encrypted
        else:
            method = 'encrypt'
            flag = True
            new_file = file_path + EXT

            salt = os.urandom(SALT_LEN)
            nonce = os.urandom(NONCE_SIZE)

        # Create a cipher with the required method

        key = hashlib.pbkdf2_hmac('sha3-256', password, salt, 10000, 32)
        cipher = getattr(AESGCM(key), method)

        # Create a partial function with default values.

        crp = partial(cipher, nonce=nonce, associated_data=None)

        # Read from *file_path* and write to the *new_file*
        try:
            _writer(
                file_path,
                new_file,
                crp,
                flag,
                nonce=nonce,
                salt=salt,
            )
        except InvalidTag as err:
            os.remove(new_file)
            raise InvalidTag('Invalid Password or tampered data.')

        if remove:
            os.remove(file_path)

    except Exception as err:
        raise err
Ejemplo n.º 40
0
def _truncate(path, length):
    fd = os.open(path, os.O_RDWR)
    try:
        os.truncate(fd, length)
    finally:
        os.close(fd)
Ejemplo n.º 41
0
Archivo: test.py Proyecto: kvap/raft
def clear_logs():
    for filename in logfiles:
        try:
            os.truncate(filename, 0)
        except:
            pass
Ejemplo n.º 42
0
def clearFile(fName):
    if os.path.isfile(fName):
        os.truncate(fName,0)
    else:
        with open(fName,'wb') as f:
            pass
Ejemplo n.º 43
0
def main():
    logging.basicConfig(level=logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument('--clang')
    parser.add_argument('--save-ast')
    parser.add_argument('--save-pp')
    parser.add_argument('--load-pp')
    parser.add_argument('--load-ast')
    parser.add_argument('--output')
    parser.add_argument('options')
    parser.add_argument('all_file')
    args = parser.parse_args()

    with open(args.options) as fp:
        compiler_params = fp.read().split('\n')
    compiler_params.append('-std=c++17')

    parser = ClangJsonAstParser(args.all_file, compiler_params, args.clang)
    if args.load_pp:
        logging.info(f'Load {args.load_pp}')
        parser._source_code = open(args.load_pp, 'rb').read()
    else:
        logging.info(f'Preprocess {args.all_file}')
        parser.preprocess()

    if args.load_ast:
        logging.info(f'Load {args.load_ast}')
        parser._json_ast = json.load(open(args.load_ast))
    else:
        logging.info(f'Parse {args.all_file}')
        parser.parse_json_ast()

    # Parse json ast and create a convenient object tree.
    root_node = parser.create_ast()

    # Truncate pregenerated files
    for file_name in os.listdir(args.output):
        os.truncate(os.path.join(args.output, file_name), 0)

    # Run passes on AST
    logging.info(f'Run AST passes')
    writer = InterfaceWriter(os.path.join(args.output))
    bind_lang = 'cs'
    try:
        passes = [
            TrimNodesPass(parser, writer, bind_lang),
            DefineConstantsPass(parser, writer, bind_lang),
            DefineRefCountedPass(parser, writer, bind_lang),
            DefineEnumValuesPass(parser, writer, bind_lang),
            DiscoverInterfacesPass(parser, writer, bind_lang),
            DefinePropertiesPass(parser, writer, bind_lang),
            DefineEventsPass(parser, writer, bind_lang),
        ]
        for pass_instance in passes:
            pass_instance.on_start()
            pass_instance.walk_ast(root_node)
            pass_instance.on_finish()
    finally:
        writer.close()

    # Save AST or preprocessed source code (for debugging)
    if args.save_ast is not None:
        logging.info(f'Save {args.save_ast}')
        parser.save_ast(args.save_ast)
    if args.save_pp:
        logging.info(f'Save {args.save_pp}')
        parser.save_preprocessed_source_code(args.save_pp)
Ejemplo n.º 44
0
 def _prepare_one_volume(self, volume_index, name, volume):
     volume.part_images = []
     farthest_offset = 0
     for partnum, part in enumerate(volume.structures):
         part_img = os.path.join(volume.basedir,
                                 'part{}.img'.format(partnum))
         # The system-data and system-seed partitions do not have to have
         # an explicit size set.
         if part.role in (StructureRole.system_data,
                          StructureRole.system_seed):
             if part.size is None:
                 part.size = self.rootfs_size
             elif part.size < self.rootfs_size:
                 _logger.warning('rootfs partition size ({}) smaller than '
                                 'actual rootfs contents {}'.format(
                                     part.size, self.rootfs_size))
                 part.size = self.rootfs_size
         # Create the actual image files now.
         if part.role is StructureRole.system_data:
             # The image for the root partition.
             # We defer creating the root file system image because we have
             # to populate it at the same time.  See mkfs.ext4(8) for
             # details.
             Path(part_img).touch()
             os.truncate(part_img, part.size)
         else:
             run('dd if=/dev/zero of={} count=0 bs={} seek=1'.format(
                 part_img, part.size))
             if part.filesystem is FileSystemType.vfat:
                 label_option = (
                     '-n {}'.format(part.filesystem_label)
                     # TODO: I think this could be None or the empty string,
                     # but this needs verification.
                     if part.filesystem_label else '')
                 # TODO: hard-coding of sector size.
                 run('mkfs.vfat -s 1 -S 512 -F 32 {} {}'.format(
                     label_option, part_img))
         volume.part_images.append(part_img)
         farthest_offset = max(farthest_offset, (part.offset + part.size))
     # Calculate or check the final image size.
     #
     # TODO: Hard-codes last 34 512-byte sectors for backup GPT,
     # empirically derived from sgdisk behavior.
     calculated = ceil(farthest_offset / 1024 + 17) * 1024
     if self.args.image_size is None:
         volume.image_size = calculated
     elif isinstance(self.args.image_size, int):
         # One size to rule them all.
         if self.args.image_size < calculated:
             _logger.warning('Ignoring image size smaller '
                             'than minimum required size: vol[{}]:{} '
                             '{} < {}'.format(volume_index, name,
                                              self.args.given_image_size,
                                              calculated))
             volume.image_size = calculated
         else:
             volume.image_size = self.args.image_size
     else:
         # The --image-size arguments are a dictionary, so look up the
         # one used for this volume.
         size_by_index = self.args.image_size.get(volume_index)
         size_by_name = self.args.image_size.get(name)
         if size_by_index is not None and size_by_name is not None:
             _logger.warning(
                 'Ignoring ambiguous volume size; index+name given')
             volume.image_size = calculated
         else:
             image_size = (size_by_index
                           if size_by_name is None else size_by_name)
             if image_size < calculated:
                 _logger.warning('Ignoring image size smaller '
                                 'than minimum required size: vol[{}]:{} '
                                 '{} < {}'.format(
                                     volume_index, name,
                                     self.args.given_image_size,
                                     calculated))
                 volume.image_size = calculated
             else:
                 volume.image_size = image_size
Ejemplo n.º 45
0
def clear_logs():
    for filename in logfiles:
        try:
            os.truncate(filename, 0)
        except:
            pass
Ejemplo n.º 46
0
os.rmdir(path="path")  # $ getAPathArgument="path"

os.scandir("path")  # $ getAPathArgument="path"
os.scandir(path="path")  # $ getAPathArgument="path"

os.stat("path")  # $ getAPathArgument="path"
os.stat(path="path")  # $ getAPathArgument="path"

os.statvfs("path")  # $ getAPathArgument="path"
os.statvfs(path="path")  # $ getAPathArgument="path"

os.symlink("src", "dst")  # $ getAPathArgument="src" getAPathArgument="dst"
os.symlink(src="src",
           dst="dst")  # $ getAPathArgument="src" getAPathArgument="dst"

os.truncate("path", 42)  # $ getAPathArgument="path"
os.truncate(path="path", length=42)  # $ getAPathArgument="path"

os.unlink("path")  # $ getAPathArgument="path"
os.unlink(path="path")  # $ getAPathArgument="path"

os.utime("path")  # $ getAPathArgument="path"
os.utime(path="path")  # $ getAPathArgument="path"

os.walk("top")  # $ getAPathArgument="top"
os.walk(top="top")  # $ getAPathArgument="top"

os.fwalk("top")  # $ getAPathArgument="top"
os.fwalk(top="top")  # $ getAPathArgument="top"

# Linux only
        mbr = ifh.read(446)
        ifh.seek(0, 2)
        cur_filesize = ifh.tell()

    ####### check what to do

    if len(sys.argv) > 2:
        if sys.argv[2][0] == '+':
            image_newsize = cur_filesize + int(sys.argv[2][1:])
        else:
            image_newsize = int(sys.argv[2])

    ####### resize image file #######

    if image_newsize and cur_filesize < image_newsize:
        os.truncate(image_filepath, image_newsize)

    ####### partion image file #######

    parted_dev = parted.getDevice(image_filepath)
    parted_disk = parted.Disk(parted_dev)

    if len(parted_disk.partitions
           ) + 1 > parted_disk.maxSupportedPartitionCount:
        sys.exit(1)

    # resizeToMaxExt4PartitionOptimizedForMicroSDHC(parted_disk, part_home_size) #part3 Home
    resizeToMaxExt4PartitionOptimizedForMicroSDHC(
        parted_disk)  #part4 Var, max remaining size
    parted_disk.commit()
Ejemplo n.º 48
0
def questionnaire(slug):
    data = _get_questionnaire_data(slug)

    if request.method == 'GET':
        return render_template('questionnaire.html',
                               questionnaire=data,
                               slug=slug)

    if request.cookies.get(slug) == 'voted':
        flash('请勿重复提交喔 😯')
        return redirect(url_for('ques.square'))

    form = request.form
    print(f"form: {form}")
    result_file_path = os.path.join(_get_option('RESULTS_DIR'), slug + '.json')

    assert (Path(result_file_path).is_file())
    signal.signal(signal.SIGALRM, handler)
    signal.alarm(5)

    fd = os.open(result_file_path, os.O_RDWR)  # 返回值是 int 类型的 file descriptor
    fcntl.flock(fd, fcntl.LOCK_EX)

    result_dict = defaultdict(lambda: tuple(None, None))
    result_str = os.read(fd, 100000).decode("utf-8")
    result_dict = json.loads(result_str)
    if result_dict['total'] == 0:
        result_dict['total'] = 1
        q_list = list()
        for question in data.get(
                'questions',
            []):  # 对问卷表里的每个问题,全部存到 q_list 然后赋给 result_dict['questions']
            q = dict()
            q['label'] = question['label']
            q['type'] = question['type']

            if q['type'] == 'text':  # 文本题
                t_list = list()
                t_list.append(form[q['label']])
                q['texts'] = t_list
            else:  # 选择题
                if form[q[
                        'label']] == 'Other':  # 如果选的是 Other,把文本添加到 other_options
                    q['other_options'] = [form['other_option']]
                o_list = list()
                for option in question['options']:
                    o = dict()
                    o['option'] = option
                    # count & percentage
                    if option in form[q['label']]:
                        o['count'] = 1
                        if q['type'] == 'radio':
                            o['percentage'] = 100
                    else:
                        o['count'] = 0
                        if q['type'] == 'radio':
                            o['percentage'] = 0
                    o_list.append(o)
                q['result'] = o_list
            q_list.append(q)

        result_dict['questions'] = q_list
    else:
        result_dict['total'] = result_dict['total'] + 1
        # 加入文本,选择项的 count++,所有项的 percentage 清空
        for q in result_dict['questions']:
            if 'texts' in q:  # 文本题
                if form[q['label']] != '':
                    q['texts'].append(form[q['label']])
            elif 'result' in q:  # 选择题
                for r in q['result']:
                    if r['option'] in form[q['label']]:
                        r['count'] = r['count'] + 1
                    if 'percentage' in r:
                        r['percentage'] = 0  # 全部选项的百分比先归零
                # 如果选的是 Other,要把文本添加到 other_options
                if form[q['label']] == 'Other':
                    q['other_options'].append(form['other_option'])
            else:
                raise Error('No matching question type in result json.')
        # 更新单选问题的百分比(count/total)
        for q in result_dict['questions']:
            if q['type'] == 'radio':
                for r in q['result']:
                    r['percentage'] = format(
                        int(r['count']) / int(result_dict['total']) * 100,
                        '0.2f')

    os.lseek(fd, 0, os.SEEK_SET)  # 指针位置指向开头
    os.write(
        fd, bytes(json.dumps(result_dict, indent=4, ensure_ascii=False),
                  'utf8'))
    cur_pos = os.lseek(fd, 0, os.SEEK_CUR)
    os.truncate(fd, cur_pos)
    fcntl.flock(fd, fcntl.LOCK_UN)
    os.close(fd)
    signal.alarm(0)

    resp = make_response(redirect(url_for('ques.square')))
    resp.set_cookie(slug, 'voted')
    flash('谢谢参与!')
    return resp
Ejemplo n.º 49
0
def locker(file_path, password, remove=True):
    """Provides file locking/unlocking mechanism
    This function either encrypts or decrypts the file - *file_path*.
    Encryption or decryption depends upon the file's extension.
    The user's encryption or decryption task is almost automated since
    *encryption* or *decryption* is determined by the file's extension.
  
    Added:
        After the *file_path* decryption, decrypted file's verification
        is done. If it fails, either the Password is incorrect or the
        encrypted data was supposedly tampered with.
    Usage
   -------
   file_path = File to be written on.
   password = Key to be used for encryption/decryption.
              - Raises DataDecryptionError if *Password* is incorrect
                or Encrypted data has been tampered with.
     remove = If set to True, the the file that is being
              encrypted or decrypted will be removed.
              (Default: True).
  """

    try:

        # The file is being decrypted

        if file_path.endswith(EXT):
            method = 'decrypt'
            flag = False

            # Read the *nonce* and *mac* values.
            # Please note that we are receiving the *nonce*
            # and *mac* values.

            with open(file_path, 'rb+') as f:
                f.seek(-(NONCE_SIZE + MAC_LEN + SALT_LEN), 2)
                (nonce, mac, salt) = unpack('<{}s{}s{}s'.format(NONCE_SIZE, 
                                                                MAC_LEN, 
                                                                SALT_LEN),
                                            f.read())

            # Remove the *mac* and *nonce* from the encrypted file.
            # If not removed, Incorrect decryption will occur.

            orig_file_size = os.path.getsize(file_path) - (NONCE_SIZE + 
                                                           MAC_LEN + 
                                                           SALT_LEN)
            os.truncate(file_path, orig_file_size)
            new_file = os.path.splitext(file_path)[0]

        else:

            # The file is being encrypted.

            method = 'encrypt'
            flag = True
            new_file = file_path + EXT

            # Generate a *nonce* and set the mac to None,
            # As the *mac* ***will not be received*** this time
            # but it will be generated after encryption.
            #
            # Generation will take place in _writer(...)
            nonce = os.urandom(NONCE_SIZE)
            salt = os.urandom(SALT_LEN)
            mac = None

        key = hashlib.pbkdf2_hmac('sha512', password, salt, 50000, 32)

        # ============ CIPHER GENERATION PORTION ===============
        # A cipher object will take care of the all
        # the required mac_tag and verification.
        # AES-GCM-256 chosen for security and authentication

        cipher_obj = AES.new(key, AES.MODE_GCM, nonce)
        crp = getattr(cipher_obj, method)
        mac_func = getattr(cipher_obj, 'digest')
        verifier = getattr(cipher_obj, 'verify')

        # =============== FILE WRITING PORTION =================
        # Read from the *file_path* and,
        # write to the *new_file* using _writer defined above.

        _writer(file_path,
                new_file,
                crp,
                flag,
                nonce=nonce,
                mac_function=mac_func,
                mac_value=mac,
                salt=salt, )

        # ================ VERIFICATION PORTION ================
        # Verify the file for integrity if the
        # current file is being decrypted.

        if not flag:
            try:
                verifier(mac)

            except ValueError:

                # Remove the incorrectly decrypted file
                # and raise DataDecryptionError.

                os.remove(new_file)

                raise DecryptionError("Invalid password or "
                                      "tampered data.")

        # ======================================================

        # If remove set to True, delete the file
        # that is being worked upon.

        if remove:
            os.remove(file_path)

    except FileNotFoundError:
        pass
    
    except IsADirectoryError:
        pass
Ejemplo n.º 50
0
 def test_load_empty_file(self):
     if not os.path.exists(self.manager._store._path):
         open(self.manager._store._path, 'w+').close()
     os.truncate(self.manager._store._path, 0)
     self.manager._store.load()
Ejemplo n.º 51
0
 def __exit__(self, *exc_info):
     #fcntl.flock(self.fd, fcntl.LOCK_UN)
     os.truncate(self.fd, 0)
     os.fsync(self.fd)
     os.close(self.fd)
     self.fd = -1
Ejemplo n.º 52
0
 def truncateFile(self, path, size):
     os.truncate(self.source_dir + path, size)
Ejemplo n.º 53
0
def create_disk():
    """Create 16M sparse file"""
    pathlib.Path(DISK_PATH).touch()
    os.truncate(DISK_PATH, 1 << 24)
    yield
    os.remove(DISK_PATH)
Ejemplo n.º 54
0
 def test_truncate_file(self):
     fname, text = self.test_write_to_file()
     os.truncate(fname, 0)
     self.assertEqual(os.stat(fname).st_size, 0, 'Truncated file still has non-zero length!')
Ejemplo n.º 55
0
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))

import logging
LOG_LEVEL = logging.DEBUG
log_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'logs', os.path.basename(__file__)[:-3] + '.log'))
if os.path.isfile(log_filename):
    os.truncate(log_filename, 0)
logging.basicConfig(filename=log_filename, level=LOG_LEVEL, style='{',
                    format='[{name}] {levelname}: {message}')
logger = logging.getLogger(__name__)

from datetime import datetime
import tempfile
import random
import re
import unittest

from apache_log_filter import ApacheLogFilter
from apache_log_filter.filters import DictFilter, DictFilterSet, RegExFilterSet


# noinspection PyClassHasNoInit,PyPep8Naming
class SimpleLogFile():
    def setUp(self):
        number_of_test_log_files = 4
        number_of_lines_per_file = 12

        self.format_string = '%h <<%P>> %t %Dus \"%r\" %>s %b  \"%{Referer}i\" \"%{User-Agent}i\" %l %u'
        self.valid_logline = '127.0.0.1 <<6113>> [16/Aug/2013:15:45:34 +0000] 1966093us "GET / HTTP/1.1" 200 3478  "https://example.com/" "Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.18)" - -'
Ejemplo n.º 56
0
def _test_storage(st, driver, tmpdir):
    scratch = tmpdir.join("scratch")
    os.makedirs(str(scratch), exist_ok=True)

    # File not found cases
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_metadata_for_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.delete_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_file("NONEXISTENT", str(scratch.join("a")))
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_fileobj("NONEXISTENT", BytesIO())
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_string("NONEXISTENT")
    assert st.list_path("") == []
    assert st.list_path("NONEXISTENT") == []
    st.store_file_from_memory("NONEXISTENT-a/x1", b"dummy", None)
    dummy_file = str(scratch.join("a"))
    with open(dummy_file, "wb") as fp:
        fp.write(b"dummy")
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, None)
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, {"x": 1})

    st.delete_key("NONEXISTENT-b/x1")
    st.delete_key("NONEXISTENT-a/x1")

    # Other basic cases
    from_disk_file = str(scratch.join("a"))
    with open(from_disk_file, "wb") as fp:
        fp.write(b"from disk")
    st.store_file_from_disk("test1/x1", from_disk_file, None)
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {}
    assert out.getvalue() == b"from disk"

    st.store_file_from_memory("test1/x1", b"dummy", {"k": "v"})
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {"k": "v"}
    assert out.getvalue() == b"dummy"

    st.store_file_from_memory("test1/x1", b"1", None)
    assert st.get_contents_to_string("test1/x1") == (b"1", {})

    st.store_file_from_memory("test1/td", b"to disk", {"to-disk": "42"})
    to_disk_file = str(scratch.join("b"))
    assert st.get_contents_to_file("test1/td", to_disk_file) == {
        "to-disk": "42"
    }

    assert st.list_path("") == [
    ]  # nothing at top level (directories not listed)
    if driver == "local":
        # create a dot-file (hidden), this must be ignored
        target_file = os.path.join(st.prefix, "test1/.null")
        with open(target_file, "w"):
            pass

    tlist = st.list_path("test1")
    assert len(tlist) == 2
    for fe in tlist:
        assert isinstance(fe["last_modified"], datetime.datetime)
        assert fe["last_modified"].tzinfo is not None
        if fe["name"] == "test1/x1":
            assert fe["size"] == 1
            assert fe["metadata"] == {}
        elif fe["name"] == "test1/td":
            assert fe["size"] == len(b"to disk")
            assert fe["metadata"] == {"to-disk": "42"}
        else:
            assert 0, "unexpected name in directory"

    if driver == "google":
        # test extra props for cacheControl in google
        st.store_file_from_memory("test1/x1",
                                  b"no cache test",
                                  metadata={"test": "value"},
                                  extra_props={"cacheControl": "no-cache"})

    if driver == "local":
        # test LocalFileIsRemoteFileError for local storage
        target_file = os.path.join(st.prefix, "test1/x1")
        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.store_file_from_disk("test1/x1", target_file, {"local": True})
        assert st.get_contents_to_string("test1/x1") == (b"1", {
            "local": "True"
        })

        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.get_contents_to_file("test1/x1", target_file)

        # unlink metadata file, this shouldn't break anything
        os.unlink(target_file + ".metadata")
        assert st.get_metadata_for_key("test1/x1") == {}

    st.delete_key("test1/x1")
    st.delete_key("test1/td")
    assert st.list_path("test1") == []  # empty again

    test_hash = hashlib.sha256()
    test_file = str(scratch.join("30m"))
    test_size_send = 0
    with open(test_file, "wb") as fp:
        chunk = b"30m file" * 10000
        while test_size_send < 30 * 1024 * 1024:
            test_hash.update(chunk)
            fp.write(chunk)
            test_size_send += len(chunk)
    test_hash_send = test_hash.hexdigest()

    if driver == "s3":
        # inject a failure in multipart uploads
        def failing_new_key(key_name):  # pylint: disable=unused-argument
            # fail after the second call, restore functionality after the third
            fail_calls[0] += 1
            if fail_calls[0] > 3:
                st.bucket.new_key = orig_new_key
            if fail_calls[0] > 2:
                raise Exception("multipart upload failure!")

        fail_calls = [0]
        orig_new_key = st.bucket.new_key
        st.bucket.new_key = failing_new_key

        st.store_file_from_disk("test1/30m",
                                test_file,
                                multipart=True,
                                metadata={
                                    "30m": "data",
                                    "size": test_size_send
                                })

        assert fail_calls[0] > 3
    else:
        st.store_file_from_disk("test1/30m",
                                test_file,
                                multipart=True,
                                metadata={
                                    "30m": "data",
                                    "size": test_size_send
                                })

    os.unlink(test_file)

    expected_meta = {"30m": "data", "size": str(test_size_send)}
    meta = st.get_metadata_for_key("test1/30m")
    assert meta == expected_meta

    with open(test_file, "wb") as fp:
        assert st.get_contents_to_fileobj("test1/30m", fp) == expected_meta
    test_hash = hashlib.sha256()
    test_size_rec = 0
    with open(test_file, "rb") as fp:
        while True:
            chunk = fp.read(1024 * 1024)
            if not chunk:
                break
            test_hash.update(chunk)
            test_size_rec += len(chunk)
    test_hash_rec = test_hash.hexdigest()
    assert test_hash_rec == test_hash_send
    assert test_size_rec == test_size_send

    tlist = st.list_path("test1")
    assert len(tlist) == 1
    assert tlist[0]["name"] == "test1/30m"
    assert tlist[0]["size"] == test_size_rec

    if driver == "swift":
        segments = test_size_send // st.segment_size
        segment_list = st.list_path("test1_segments/30m")
        assert len(segment_list) >= segments

        if segments >= 2:
            # reupload a file with the same name but with less chunks
            os.truncate(test_file, st.segment_size + 1)
            test_size_send = os.path.getsize(test_file)
            st.store_file_from_disk("test1/30m",
                                    test_file,
                                    multipart=True,
                                    metadata={
                                        "30m": "less data",
                                        "size": test_size_send
                                    })

            segment_list = st.list_path("test1_segments/30m")
            assert len(segment_list) == 2
            assert len(st.list_path("test1")) == 1

    st.delete_key("test1/30m")
    assert st.list_path("test1") == []

    if driver == "swift":
        assert st.list_path("test1_segments/30m") == []
Ejemplo n.º 57
0
 def truncate(self, length):
     os.truncate(self, length)
Ejemplo n.º 58
0
##### controller
# it starts the processes, and managing the space on device suspend them

import psutil
import os
import time
import sys

DB_BASE_PATH = os.path.join(os.getcwd(), 'DB', 'WOW')

print('Truncating DB objects...')
# Truncate DB file content
try:
    for dirname, dirnames, filenames in os.walk(DB_BASE_PATH):
        if dirname.__contains__('data'):
            continue
        print(dirname)
        for filename in filenames:
            try:
                if filename.endswith('.json'):
                    file_path = os.path.join(dirname, filename)
                    os.truncate(file_path, 0)
            except os.error as err:
                print(str(err) + ' -- line: ' + str(sys.exc_info()[-1].tb_lineno))
except os.error as err:
    print(str(err) + ' -- line: ' + str(sys.exc_info()[-1].tb_lineno))
    time.sleep(60)
print('...DONE --> usage: ' + str(psutil.disk_usage('.')[-1]))
Ejemplo n.º 59
0
def _test_storage(st, driver, tmpdir):
    scratch = tmpdir.join("scratch")
    os.makedirs(str(scratch), exist_ok=True)

    # File not found cases
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_metadata_for_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.delete_key("NONEXISTENT")
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_file("NONEXISTENT", str(scratch.join("a")))
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_fileobj("NONEXISTENT", BytesIO())
    with pytest.raises(errors.FileNotFoundFromStorageError):
        st.get_contents_to_string("NONEXISTENT")
    assert st.list_path("") == []
    assert st.list_path("NONEXISTENT") == []
    st.store_file_from_memory("NONEXISTENT-a/x1", b"dummy", None)
    dummy_file = str(scratch.join("a"))
    with open(dummy_file, "wb") as fp:
        fp.write(b"dummy")
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, None)
    st.store_file_from_disk("NONEXISTENT-b/x1", dummy_file, {"x": 1})

    st.delete_key("NONEXISTENT-b/x1")
    st.delete_key("NONEXISTENT-a/x1")

    # Other basic cases
    from_disk_file = str(scratch.join("a"))
    with open(from_disk_file, "wb") as fp:
        fp.write(b"from disk")
    st.store_file_from_disk("test1/x1", from_disk_file, None)
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {}
    assert out.getvalue() == b"from disk"

    st.store_file_from_memory("test1/x1", b"dummy", {"k": "v"})
    out = BytesIO()
    assert st.get_contents_to_fileobj("test1/x1", out) == {"k": "v"}
    assert out.getvalue() == b"dummy"

    st.store_file_from_memory("test1/x1", b"1", None)
    assert st.get_contents_to_string("test1/x1") == (b"1", {})

    st.store_file_from_memory("test1/td", b"to disk", {"to-disk": "42"})
    to_disk_file = str(scratch.join("b"))
    assert st.get_contents_to_file("test1/td", to_disk_file) == {"to-disk": "42"}

    assert st.list_path("") == []  # nothing at top level (directories not listed)
    if driver == "local":
        # create a dot-file (hidden), this must be ignored
        target_file = os.path.join(st.prefix, "test1/.null")
        with open(target_file, "w"):
            pass

    tlist = st.list_path("test1")
    assert len(tlist) == 2
    for fe in tlist:
        assert isinstance(fe["last_modified"], datetime.datetime)
        assert fe["last_modified"].tzinfo is not None
        if fe["name"] == "test1/x1":
            assert fe["size"] == 1
            assert fe["metadata"] == {}
        elif fe["name"] == "test1/td":
            assert fe["size"] == len(b"to disk")
            assert fe["metadata"] == {"to-disk": "42"}
        else:
            assert 0, "unexpected name in directory"

    if driver == "google":
        # test extra props for cacheControl in google
        st.store_file_from_memory("test1/x1", b"no cache test",
                                  metadata={"test": "value"},
                                  extra_props={"cacheControl": "no-cache"})

    if driver == "local":
        # test LocalFileIsRemoteFileError for local storage
        target_file = os.path.join(st.prefix, "test1/x1")
        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.store_file_from_disk("test1/x1", target_file, {"local": True})
        assert st.get_contents_to_string("test1/x1") == (b"1", {"local": "True"})

        with pytest.raises(errors.LocalFileIsRemoteFileError):
            st.get_contents_to_file("test1/x1", target_file)

        # unlink metadata file, this shouldn't break anything
        os.unlink(target_file + ".metadata")
        assert st.get_metadata_for_key("test1/x1") == {}

    st.delete_key("test1/x1")
    st.delete_key("test1/td")
    assert st.list_path("test1") == []  # empty again

    test_hash = hashlib.sha256()
    test_file = str(scratch.join("30m"))
    test_size_send = 0
    with open(test_file, "wb") as fp:
        chunk = b"30m file" * 10000
        while test_size_send < 30 * 1024 * 1024:
            test_hash.update(chunk)
            fp.write(chunk)
            test_size_send += len(chunk)
    test_hash_send = test_hash.hexdigest()

    if driver == "s3":
        # inject a failure in multipart uploads
        def failing_new_key(key_name):  # pylint: disable=unused-argument
            # fail after the second call, restore functionality after the third
            fail_calls[0] += 1
            if fail_calls[0] > 3:
                st.bucket.new_key = orig_new_key
            if fail_calls[0] > 2:
                raise Exception("multipart upload failure!")

        fail_calls = [0]
        orig_new_key = st.bucket.new_key
        st.bucket.new_key = failing_new_key

        st.store_file_from_disk("test1/30m", test_file, multipart=True,
                                metadata={"30m": "data", "size": test_size_send})

        assert fail_calls[0] > 3
    else:
        st.store_file_from_disk("test1/30m", test_file, multipart=True,
                                metadata={"30m": "data", "size": test_size_send})

    os.unlink(test_file)

    expected_meta = {"30m": "data", "size": str(test_size_send)}
    meta = st.get_metadata_for_key("test1/30m")
    assert meta == expected_meta

    with open(test_file, "wb") as fp:
        assert st.get_contents_to_fileobj("test1/30m", fp) == expected_meta
    test_hash = hashlib.sha256()
    test_size_rec = 0
    with open(test_file, "rb") as fp:
        while True:
            chunk = fp.read(1024 * 1024)
            if not chunk:
                break
            test_hash.update(chunk)
            test_size_rec += len(chunk)
    test_hash_rec = test_hash.hexdigest()
    assert test_hash_rec == test_hash_send
    assert test_size_rec == test_size_send

    tlist = st.list_path("test1")
    assert len(tlist) == 1
    assert tlist[0]["name"] == "test1/30m"
    assert tlist[0]["size"] == test_size_rec

    if driver == "swift":
        segments = test_size_send // st.segment_size
        segment_list = st.list_path("test1_segments/30m")
        assert len(segment_list) >= segments

        if segments >= 2:
            # reupload a file with the same name but with less chunks
            os.truncate(test_file, st.segment_size + 1)
            test_size_send = os.path.getsize(test_file)
            st.store_file_from_disk("test1/30m", test_file, multipart=True,
                                    metadata={"30m": "less data", "size": test_size_send})

            segment_list = st.list_path("test1_segments/30m")
            assert len(segment_list) == 2
            assert len(st.list_path("test1")) == 1

    st.delete_key("test1/30m")
    assert st.list_path("test1") == []

    if driver == "swift":
        assert st.list_path("test1_segments/30m") == []
Ejemplo n.º 60
0
def TODO_i18n_fetch_error(output_path: Path, message: str):
    os.truncate(output_path, 0)
    return [I18nMessage("TODO_i18n", {"text": message}, None)]