Beispiel #1
0
    def test_issue_241(self):

        inode = self.inode

        # Create block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # "Fill" cache
        self.cache.cache.max_entries = 0

        # Mock locking to reproduce race condition
        mlock = MockMultiLock(self.cache.mlock)
        with patch.object(self.cache, 'mlock', mlock):
            # Start first expiration run, will block in upload
            thread1 = AsyncFn(self.cache.expire)
            thread1.start()

            # Remove the object while the expiration thread waits
            # for it to become available.
            thread2 = AsyncFn(self.cache.remove, inode, 0, 1)
            thread2.start()
            mlock.yield_to(thread2)
            thread2.join_and_raise(timeout=10)
            assert not thread2.is_alive()

        # Create a new object for the same block
        with self.cache.get(inode, 0) as fh:
            fh.write(self.random_data(500))

        # Continue first expiration run
        mlock.yield_to(thread1, block=False)
        thread1.join_and_raise(timeout=10)
        assert not thread1.is_alive()
Beispiel #2
0
def main(args=None):
    options = parse_args(args)

    src_backend_factory = options.src_backend_factory
    dst_backend_factory = options.dst_backend_factory

    # Try to access both backends before starting threads
    try:
        src_backend_factory().lookup('s3ql_metadata')
        try:
            dst_backend_factory().lookup('some random object')
        except NoSuchObject:
            pass
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    queue = Queue(maxsize=options.threads)
    threads = []
    for _ in range(options.threads):
        t = AsyncFn(copy_loop, queue, src_backend_factory,
                    dst_backend_factory)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    with src_backend_factory() as backend:
        stamp1 = 0
        for (i, key) in enumerate(backend):
            stamp2 = time.time()
            if stamp2 - stamp1 > 1:
                stamp1 = stamp2
                sys.stdout.write('\rCopied %d objects so far...' % i)
                sys.stdout.flush()

                # Terminate early if any thread failed with an exception
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()

            # Avoid blocking if all threads terminated
            while True:
                try:
                    queue.put(key, timeout=1)
                except QueueFull:
                    pass
                else:
                    break
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()
Beispiel #3
0
def main(args=None):
    options = parse_args(args)

    src_backend_factory = options.src_backend_factory
    dst_backend_factory = options.dst_backend_factory

    # Try to access both backends before starting threads
    try:
        src_backend_factory().lookup('s3ql_metadata')
        try:
            dst_backend_factory().lookup('some random object')
        except NoSuchObject:
            pass
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    queue = Queue(maxsize=options.threads)
    threads = []
    for _ in range(options.threads):
        t = AsyncFn(copy_loop, queue, src_backend_factory, dst_backend_factory)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    with src_backend_factory() as backend:
        stamp1 = 0
        for (i, key) in enumerate(backend):
            stamp2 = time.time()
            if stamp2 - stamp1 > 1:
                stamp1 = stamp2
                sys.stdout.write('\rCopied %d objects so far...' % i)
                sys.stdout.flush()

                # Terminate early if any thread failed with an exception
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()

            # Avoid blocking if all threads terminated
            while True:
                try:
                    queue.put(key, timeout=1)
                except QueueFull:
                    pass
                else:
                    break
                for t in threads:
                    if not t.is_alive():
                        t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()
Beispiel #4
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    try:
        options.storage_url = options.src_storage_url
        src_backend_factory = get_backend_factory(options, plain=True)

        options.storage_url = options.dst_storage_url
        dst_backend_factory = get_backend_factory(options, plain=True)
    except DanglingStorageURLError as exc:
        raise QuietError(str(exc)) from None

    src_backends = [ src_backend_factory() for _ in range(options.threads) ]
    dst_backends = [ dst_backend_factory() for _ in range(options.threads) ]

    queue = Queue(maxsize=options.threads)
    threads = []
    for (src_backend, dst_backend) in zip(src_backends, dst_backends):
        t = AsyncFn(copy_loop, queue, src_backend, dst_backend)
        # Don't wait for worker threads, gives deadlock if main thread
        # terminates with exception
        t.daemon = True
        t.start()
        threads.append(t)

    stamp1 = 0
    for (i, key) in enumerate(src_backends[-1]):
        stamp2 = time.time()
        if stamp2 - stamp1 > 1:
            stamp1 = stamp2
            sys.stdout.write('\rCopied %d objects so far...' % i)
            sys.stdout.flush()

            # Terminate early if any thread failed with an exception
            for t in threads:
                if not t.is_alive():
                    t.join_and_raise()

        # Avoid blocking if all threads terminated
        while True:
            try:
                queue.put(key, timeout=1)
            except QueueFull:
                pass
            else:
                break
            for t in threads:
                if not t.is_alive():
                    t.join_and_raise()
    sys.stdout.write('\n')

    queue.maxsize += len(threads)
    for t in threads:
        queue.put(None)

    for t in threads:
        t.join_and_raise()