def test_is_error_to_release_unknown_sequence_number(self):
     sem = SlidingWindowSemaphore(3)
     sem.acquire('a', blocking=False)
     with self.assertRaises(ValueError):
         sem.release('a', 1)
 def test_error_to_release_unknown_tag(self):
     sem = SlidingWindowSemaphore(3)
     with self.assertRaises(ValueError):
         sem.release('a', 0)
    def test_can_handle_multiple_tags_released(self):
        sem = SlidingWindowSemaphore(4)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('b', blocking=False)
        sem.acquire('b', blocking=False)

        sem.release('b', 1)
        sem.release('a', 1)
        self.assertEqual(sem.current_count(), 0)

        sem.release('b', 0)
        self.assertEqual(sem.acquire('a', blocking=False), 2)

        sem.release('a', 0)
        self.assertEqual(sem.acquire('b', blocking=False), 2)
    def test_release_counters_can_increment_counter_repeatedly(self):
        sem = SlidingWindowSemaphore(3)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)

        # These two releases don't increment the counter
        # because we're waiting on 0.
        sem.release('a', 1)
        sem.release('a', 2)
        self.assertEqual(sem.current_count(), 0)
        # But as soon as we release 0, we free up 0, 1, and 2.
        sem.release('a', 0)
        self.assertEqual(sem.current_count(), 3)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
    def test_counter_release_only_on_min_element(self):
        sem = SlidingWindowSemaphore(3)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)

        # The count only increases when we free the min
        # element.  This means if we're currently failing to
        # acquire now:
        with self.assertRaises(NoResourcesAvailable):
            sem.acquire('a', blocking=False)

        # Then freeing a non-min element:
        sem.release('a', 1)

        # doesn't change anything.  We still fail to acquire.
        with self.assertRaises(NoResourcesAvailable):
            sem.acquire('a', blocking=False)
        self.assertEqual(sem.current_count(), 0)
 def test_can_acquire_a_range(self):
     sem = SlidingWindowSemaphore(3)
     self.assertEqual(sem.acquire('a', blocking=False), 0)
     self.assertEqual(sem.acquire('a', blocking=False), 1)
     self.assertEqual(sem.acquire('a', blocking=False), 2)
     sem.release('a', 0)
     sem.release('a', 1)
     sem.release('a', 2)
     # Now we're reset so we should be able to acquire the same
     # sequence again.
     self.assertEqual(sem.acquire('a', blocking=False), 3)
     self.assertEqual(sem.acquire('a', blocking=False), 4)
     self.assertEqual(sem.acquire('a', blocking=False), 5)
     self.assertEqual(sem.current_count(), 0)
예제 #7
0
    def test_can_check_in_partial_range(self):
        sem = SlidingWindowSemaphore(4)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)
        sem.acquire('a', blocking=False)

        sem.release('a', 1)
        sem.release('a', 3)
        sem.release('a', 0)
        self.assertEqual(sem.current_count(), 2)
    def __init__(self, client, config=None, osutil=None, executor_cls=None):
        """A transfer manager interface for Amazon S3

        :param client: Client to be used by the manager
        :param config: TransferConfig to associate specific configurations
        :param osutil: OSUtils object to use for os-related behavior when
            using with transfer manager.

        :type executor_cls: ibm_s3transfer.futures.BaseExecutor
        :param executor_cls: The class of executor to use with the transfer
            manager. By default, concurrent.futures.ThreadPoolExecutor is used.
        """
        self._client = client
        self._config = config
        if config is None:
            self._config = TransferConfig()
        self._osutil = osutil
        if osutil is None:
            self._osutil = OSUtils()
        self._coordinator_controller = TransferCoordinatorController()
        # A counter to create unique id's for each transfer submitted.
        self._id_counter = 0

        # The executor responsible for making S3 API transfer requests
        self._request_executor = BoundedExecutor(
            max_size=self._config.max_request_queue_size,
            max_num_threads=self._config.max_request_concurrency,
            tag_semaphores={
                IN_MEMORY_UPLOAD_TAG: TaskSemaphore(
                    self._config.max_in_memory_upload_chunks),
                IN_MEMORY_DOWNLOAD_TAG: SlidingWindowSemaphore(
                    self._config.max_in_memory_download_chunks)
            },
            executor_cls=executor_cls
        )

        # The executor responsible for submitting the necessary tasks to
        # perform the desired transfer
        self._submission_executor = BoundedExecutor(
            max_size=self._config.max_submission_queue_size,
            max_num_threads=self._config.max_submission_concurrency,
            executor_cls=executor_cls

        )

        # There is one thread available for writing to disk. It will handle
        # downloads for all files.
        self._io_executor = BoundedExecutor(
            max_size=self._config.max_io_queue_size,
            max_num_threads=1,
            executor_cls=executor_cls
        )

        # The component responsible for limiting bandwidth usage if it
        # is configured.
        self._bandwidth_limiter = None
        if self._config.max_bandwidth is not None:
            logger.debug(
                'Setting max_bandwidth to %s', self._config.max_bandwidth)
            leaky_bucket = LeakyBucket(self._config.max_bandwidth)
            self._bandwidth_limiter = BandwidthLimiter(leaky_bucket)

        self._register_handlers()