Beispiel #1
0
    def test_expire_race(self):
        # Create element
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)
        self.cache.upload(fh)

        # Make sure entry will be expired
        self.cache.cache.max_entries = 0
        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock it
        self.cache._lock_entry(inode, blockno, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                safe_sleep(0.1)
                self.cache._unlock_entry(inode, blockno)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 0
        finally:
                self.cache._unlock_entry(inode, blockno, release_global=True,
                                         noerror=True)
Beispiel #2
0
    def test_parallel_expire(self):
        # Create elements
        inode = self.inode
        for i in range(5):
            data1 = self.random_data(int(0.4 * self.max_obj_size))
            with self.cache.get(inode, i) as fh:
                fh.write(data1)

        # We want to expire just one element, but have
        # several threads running expire() simultaneously
        self.cache.cache.max_entries = 4
        def e_w_l():
            with llfuse.lock:
                self.cache.expire()

        # Lock first element so that we have time to start threads
        self.cache._lock_entry(inode, 0, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(e_w_l)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(e_w_l)
            t2.start()

            # Release lock
            with llfuse.lock_released:
                safe_sleep(0.1)
                self.cache._unlock_entry(inode, 0)
                t1.join_and_raise()
                t2.join_and_raise()

            assert len(self.cache.cache) == 4
        finally:
                self.cache._unlock_entry(inode, 0, release_global=True,
                                         noerror=True)
Beispiel #3
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    try:
        options.storage_url = options.src_storage_url
        src_backend_factory = get_backend_factory(options, plain=True)

        options.storage_url = options.dst_storage_url
        dst_backend_factory = get_backend_factory(options, plain=True)
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    src_backends = [ src_backend_factory() for _ in range(options.threads) ]
    dst_backends = [ dst_backend_factory() for _ in range(options.threads) ]

    queue = Queue(maxsize=options.threads)
    threads = []
    for (src_backend, dst_backend) in zip(src_backends, dst_backends):
        t = AsyncFn(copy_loop, queue, src_backend, dst_backend)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    stamp1 = 0
    for (i, key) in enumerate(src_backends[-1]):
        stamp2 = time.time()
        if stamp2 - stamp1 > 1:
            stamp1 = stamp2
            sys.stdout.write('\rCopied %d objects so far...' % i)
            sys.stdout.flush()

            # Terminate early if any thread failed with an exception
            for t in threads:
                if not t.is_alive():
                    t.join_and_raise()

        # Avoid blocking if all threads terminated
        while True:
            try:
                queue.put(key, timeout=1)
            except QueueFull:
                pass
            else:
                break
            for t in threads:
                if not t.is_alive():
                    t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()
Beispiel #4
0
def main(args=None):
    options = parse_args(args)
    setup_logging(options)

    try:
        options.storage_url = options.src_storage_url
        src_backend_factory = get_backend_factory(options.src_storage_url,
                                                  options.backend_options,
                                                  options.authfile,
                                                  raw=True)

        options.storage_url = options.dst_storage_url
        dst_backend_factory = get_backend_factory(options.dst_storage_url,
                                                  options.backend_options,
                                                  options.authfile,
                                                  raw=True)
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    queue = Queue(maxsize=options.threads)
    threads = []
    for _ in range(options.threads):
        t = AsyncFn(copy_loop, queue, src_backend_factory, dst_backend_factory)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    with src_backend_factory() as backend:
        stamp1 = 0
        for (i, key) in enumerate(backend):
            stamp2 = time.time()
            if stamp2 - stamp1 > 1:
                stamp1 = stamp2
                sys.stdout.write('\rCopied %d objects so far...' % i)
                sys.stdout.flush()

                # Terminate early if any thread failed with an exception
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()

            # Avoid blocking if all threads terminated
            while True:
                try:
                    queue.put(key, timeout=1)
                except QueueFull:
                    pass
                else:
                    break
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()
Beispiel #5
0
    def test_issue_241(self):

        inode = self.inode

        # Create block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # "Fill" cache
        self.cache.cache.max_entries = 0

        # Mock locking to reproduce race condition
        mlock = MockMultiLock(self.cache.mlock)
        with patch.object(self.cache, 'mlock', mlock):
            # Start first expiration run, will block in upload
            thread1 = AsyncFn(self.cache.expire)
            thread1.start()

            # Remove the object while the expiration thread waits
            # for it to become available.
            thread2 = AsyncFn(self.cache.remove, inode, 0, 1)
            thread2.start()
            mlock.yield_to(thread2)
            thread2.join_and_raise(timeout=10)
            assert not thread2.is_alive()

        # Create a new object for the same block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # Continue first expiration run
        mlock.yield_to(thread1, block=False)
        thread1.join_and_raise(timeout=10)
        assert not thread1.is_alive()
Beispiel #6
0
    def test_parallel_expire(self):
        # Create elements
        inode = self.inode
        for i in range(5):
            data1 = self.random_data(int(0.4 * self.max_obj_size))
            with self.cache.get(inode, i) as fh:
                fh.write(data1)

        # We want to expire just one element, but have
        # several threads running expire() simultaneously
        self.cache.cache.max_entries = 4

        # Lock first element so that we have time to start threads
        self.cache._lock_entry(inode, 0, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(self.cache.expire)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(self.cache.expire)
            t2.start()

            # Release lock
            self.cache._unlock_entry(inode, 0)
            t1.join_and_raise()
            t2.join_and_raise()

            assert len(self.cache.cache) == 4
        finally:
            self.cache._unlock_entry(inode,
                                     0,
                                     release_global=True,
                                     noerror=True)
Beispiel #7
0
    def test_expire_race(self):
        # Create element
        inode = self.inode
        blockno = 1
        data1 = self.random_data(int(0.4 * self.max_obj_size))
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data1)
        assert self.cache.upload_if_dirty(fh)

        # Make sure entry will be expired
        self.cache.cache.max_entries = 0

        # Lock it
        self.cache._lock_entry(inode, blockno, release_global=True)

        try:
            # Start expiration, will block on lock
            t1 = AsyncFn(self.cache.expire)
            t1.start()

            # Start second expiration, will block
            t2 = AsyncFn(self.cache.expire)
            t2.start()

            # Release lock
            self.cache._unlock_entry(inode, blockno)
            t1.join_and_raise()
            t2.join_and_raise()

            assert len(self.cache.cache) == 0
        finally:
            self.cache._unlock_entry(inode,
                                     blockno,
                                     release_global=True,
                                     noerror=True)
Beispiel #8
0
def main(args=None):
    options = parse_args(args)

    src_backend_factory = options.src_backend_factory
    dst_backend_factory = options.dst_backend_factory

    # Try to access both backends before starting threads
    try:
        src_backend_factory().lookup('s3ql_metadata')
        try:
            dst_backend_factory().lookup('some random object')
        except NoSuchObject:
            pass
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    queue = Queue(maxsize=options.threads)
    threads = []
    for _ in range(options.threads):
        t = AsyncFn(copy_loop, queue, src_backend_factory,
                    dst_backend_factory)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    with src_backend_factory() as backend:
        stamp1 = 0
        for (i, key) in enumerate(backend):
            stamp2 = time.time()
            if stamp2 - stamp1 > 1:
                stamp1 = stamp2
                sys.stdout.write('\rCopied %d objects so far...' % i)
                sys.stdout.flush()

                # Terminate early if any thread failed with an exception
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()

            # Avoid blocking if all threads terminated
            while True:
                try:
                    queue.put(key, timeout=1)
                except QueueFull:
                    pass
                else:
                    break
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()