def combiner(name, size, part_size):

    # Preallocate the file
    with open(name, "w+b") as f:

        fallocate(f, 0, size)

    writers = []
    onlyfiles = [f for f in listdir("./temp") if isfile(join("./temp", f))]
    k = 0

    onlyfiles.sort()

    for file in onlyfiles:

        w = Writer("./temp/" + file, name, k * (part_size + 1))
        writers.append(w)
        k = k + 1

    for w in writers:

        w.start()

    for w in writers:

        w.join()
Beispiel #2
0
def make_extra_swap():
    #os.system('fallocate -l 1G /swapfile')
    with open("/swapfile", "w+b") as f:
        fallocate(f, 0, 1610612736)  # 1610612736 = 1.5Gig
    os.chmod('/swapfile', 0o600)
    os.system('mkswap /swapfile')
    os.system('swapon /swapfile')
def reallocation_func(target_file, start, size):
    fallocate.fallocate(target_file,
                        start,
                        size,
                        mode=fallocate.FALLOC_FL_PUNCH_HOLE
                        | fallocate.FALLOC_FL_KEEP_SIZE)
    fallocate.fallocate(target_file, start, size, mode=0)
    def create_test_files(self, backup_extension=".tar.gz"):
        filename = self.create_test_config()
        cfg = get_config(filename)
        cfg['backup_root'] = self.test_dir

        for dir in cfg['backup_dirs']:
            full_dir = os.path.join(cfg['backup_root'], dir)
            if not os.path.exists(full_dir):
                os.makedirs(full_dir)
            for i in range(100):
                filename = str(i) + "-test%s" % backup_extension
                filepath = os.path.join(full_dir, filename)

                # Note the use of fallocate here to create files quickly that have a
                # given size.
                with open(filepath, "w+") as f:
                    size = (i + 1) * 1024
                    with open(filepath, "w+b") as f:
                        fallocate(f, 0, size)
                    f.close()

                # Here we are setting the mtime so that cloud archive manager
                # has some newer and older files to use, as far as its concerned
                stat = os.stat(filepath)
                mtime = stat.st_mtime
                atime = stat.st_atime

                # Make each file one hour older...though this will make file 99
                # the oldest file which is a bit counter intuitive...
                new_mtime = mtime - i * 3600
                os.utime(filepath, (atime, new_mtime))
Beispiel #5
0
 def fallocate_punch_hole_test():
     with tempfile.NamedTemporaryFile() as ntf:
         assert os.path.getsize(ntf.name) == 0
         ntf.write(b"Hello World")
         ntf.flush()
         ntf.seek(0)
         assert ntf.read() == b"Hello World"
         fallocate(ntf.fileno(), 6, 4, mode=FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)
         ntf.seek(0)
         assert ntf.read() == b"Hello \x00\x00\x00\x00d"
Beispiel #6
0
 def fallocate_punch_hole_test():
     with tempfile.NamedTemporaryFile() as ntf:
         assert os.path.getsize(ntf.name) == 0
         ntf.write(b"Hello World")
         ntf.flush()
         ntf.seek(0)
         assert ntf.read() == b"Hello World"
         fallocate(ntf.fileno(), 6, 4, mode=FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)
         ntf.seek(0)
         assert ntf.read() == b"Hello \x00\x00\x00\x00d"
Beispiel #7
0
def test_file_tail(nydus_anchor: NydusAnchor, nydus_scratch_image: RafsImage,
                   backend):
    """
    description: Read data from file tail
        - Create several files of different sizes
        - Punch hole to each file of which some should have hole tail
        - Create rafs image from test scratch directory.
        - Mount rafs
        - Do some test.
    """
    file_size_list = [
        Size(1, Unit.KB),
        Size(6, Unit.KB),
        Size(2, Unit.MB),
        Size(10034, Unit.KB),
    ]
    file_list = []

    dist = Distributor(nydus_anchor.scratch_dir, 2, 2)
    dist.generate_tree()

    for f_s in file_size_list:
        f_name = dist.put_single_file(f_s)
        file_list.append(f_name)
        # Punch hole
        with utils.pushd(nydus_anchor.scratch_dir):
            with open(f_name, "a+b") as f:
                fallocate(
                    f,
                    f_s.B - 500,
                    1000,
                    mode=FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                )

    nydus_scratch_image.set_backend(backend).create_image()

    rafs_conf = RafsConf(nydus_anchor, nydus_scratch_image)
    rafs_conf.set_rafs_backend(backend, image=nydus_scratch_image)

    rafs = RafsMount(nydus_anchor, nydus_scratch_image, rafs_conf)
    rafs.mount()

    with utils.pushd(nydus_anchor.mount_point):
        for name in file_list:
            with open(name, "rb") as f:
                size = os.stat(name).st_size
                f.seek(size - 300)
                buf = f.read(1000)
                assert len(buf) == 300

    wg = WorkloadGen(nydus_anchor.mount_point, nydus_scratch_image.rootfs())
    for f in file_list:
        wg.verify_single_file(os.path.join(nydus_anchor.mount_point, f))

    assert wg.io_error == False
Beispiel #8
0
    def fallocate_collapse_size_test():
        try:
            from fallocate import FALLOC_FL_COLLAPSE_SIZE
        except:
            return  # this installation doesn't have access to FALLOC_FL_COLLAPSE_SIZE, skip

        with tempfile.NamedTemporaryFile() as ntf:
            assert os.path.getsize(ntf.name) == 0
            ntf.write(b"Hello World")
            ntf.flush()
            ntf.seek(0)
            assert ntf.read() == b"Hello World"
            fallocate(ntf.fileno(), 0, 6, mode=FALLOC_FL_COLLAPSE_SIZE)
            ntf.seek(0)
            assert ntf.read() == b"World"
Beispiel #9
0
    def fallocate_collapse_size_test():
        try:
            from fallocate import FALLOC_FL_COLLAPSE_SIZE
        except:
            return # this installation doesn't have access to FALLOC_FL_COLLAPSE_SIZE, skip

        with tempfile.NamedTemporaryFile() as ntf:
            assert os.path.getsize(ntf.name) == 0
            ntf.write(b"Hello World")
            ntf.flush()
            ntf.seek(0)
            assert ntf.read() == b"Hello World"
            fallocate(ntf.fileno(), 0, 6, mode=FALLOC_FL_COLLAPSE_SIZE)
            ntf.seek(0)
            assert ntf.read() == b"World"
    def setUp(self):
        self.top_dir = 'temp_testing_directory'
        if os.path.exists(self.top_dir):
            shutil.rmtree(self.top_dir)

        self.nested_dir = 'nested'
        self.dirs = os.path.join(self.top_dir, self.nested_dir)
        os.makedirs(self.dirs)

        self.file_names = [
            os.path.join(self.dirs, name) + '.tmp'
            for name in ['oldest', 'middle', 'youngest']
        ]
        self.file_size = 1024

        for file_name in self.file_names:
            with open(file_name, "w+b") as f:
                fallocate(f, 0, self.file_size)
                time.sleep(0.1)
        self.dsm = DirectorySizeManager(self.top_dir, self.file_size)
    def test_bad_file_names(self):
        """Test that only the backup_extension files are picked up"""
        archive = self.generic_archive()

        # FIXME: a better way to create these kind of files?
        bad_filename = "shouldntexist"
        file_extension = []
        file_extension.append('.tar.gz')
        file_extension.append('.tgz')
        file_extension.append('.zip')
        file_extension.append('.txt')
        file_extension.append('.rar')
        file_extension.append('.rpm')

        # If the custom backup extension is in the list, remove it
        try:
            file_extension.remove(archive.backup_extension)
        except:
            pass

        for dir in archive.backup_dirs:
            # Create bad files
            for f in file_extension:
                bf = "%s%s" % (bad_filename, f)
                filepath = os.path.join(archive.backup_root, dir, bf)
                with open(filepath, "w+") as f:
                    size = 1024
                    with open(filepath, "w+b") as f:
                        fallocate(f, 0, size)
                    f.close()

            files = archive.get_files(dir)
            self.assertEqual(len(files), 100)

            # Bad files should not be included in the files list
            for f in file_extension:
                bf = "%s%s" % (bad_filename, f)
                self.assertNotIn(bf, files)
Beispiel #12
0
def simple_fallocate_1kb_test():
    with tempfile.NamedTemporaryFile() as ntf:
        assert os.path.getsize(ntf.name) == 0
        fallocate(ntf, 0, 1024)
        assert os.path.getsize(ntf.name) == 1024
Beispiel #13
0
def simple_fallocate_1kb_test():
    with tempfile.NamedTemporaryFile() as ntf:
        assert os.path.getsize(ntf.name) == 0
        fallocate(ntf, 0, 1024)
        assert os.path.getsize(ntf.name) == 1024
Beispiel #14
0
seq_size = size * 32
data = targetFile_f.read(size)
start = targetFile_f.tell() - size
count = 1

fileSize = targetFile_f.seek(0, 2)

subprocess.check_call(
    ["fallocate", "-o",
     str(0), "-l",
     str(fileSize),
     str(targetFile_f.name)])
while data:
    fallocate.fallocate(targetFile_f,
                        start,
                        size,
                        mode=fallocate.FALLOC_FL_PUNCH_HOLE
                        | fallocate.FALLOC_FL_KEEP_SIZE)
    fallocate.fallocate(targetFile_f, start, size, mode=0)
    #    subprocess.check_call(["fallocate", "-p", "-o", str(start), "-l", str(size), str(targetFile_f.name)])
    #    subprocess.check_call(["fallocate", "-o", str(start), "-l", str(size), str(targetFile_f.name)])
    targetFile_f.seek(start, 0)
    targetFile_f.write(data)
    #    os.fsync(targetFile_f.fileno())

    targetFile_f.seek(size, 1)
    data = targetFile_f.read(size)
    start = targetFile_f.tell() - size

#new start
targetFile_f.seek(seq_size, 0)