Exemplo n.º 1
0
    def _fetch_list_locked(self, url, query_string, query_params, headers):
        url_full = "%s?%s" % (url, query_string)

        jobs = [(self._fetch, url, query_string, query_params, headers)]
        q = pool_apply(self.store.finder.worker_pool(), jobs)

        log.debug('RemoteReader:: Storing FetchInProgress for %s' % url_full)
        return FetchInProgress(_Results(q))
Exemplo n.º 2
0
    def _fetch_list_locked(self, url, query_string, query_params, headers):
        url_full = "%s?%s" % (url, query_string)

        jobs = [(self._fetch, url, query_string, query_params, headers)]
        q = pool_apply(self.store.finder.worker_pool(), jobs)

        log.debug('RemoteReader:: Storing FetchInProgress for %s' % url_full)
        return FetchInProgress(_Results(q))
Exemplo n.º 3
0
    def test_named(self):
        default = pool.get_pool()
        p = pool.get_pool(name='test')

        self.assertNotEqual(default, p)

        q = pool.pool_apply(p, [(lambda v: v, 'a')])

        self.assertEqual(q.get(True, 1), 'a')
Exemplo n.º 4
0
    def test_named(self):
        default = pool.get_pool()
        p = pool.get_pool(name='test')

        self.assertNotEqual(default, p)

        q = pool.pool_apply(p, [(lambda v: v, 'a')])

        self.assertEqual(q.get(True, 1), 'a')
Exemplo n.º 5
0
    def test_named_no_worker_pool(self):
        default = pool.get_pool()
        p = pool.get_pool(name='test')

        self.assertIsNone(p)
        self.assertIsNone(default)

        q = pool.pool_apply(p, [(lambda v: v, 'a')])

        self.assertEqual(q.get_nowait(), 'a')
Exemplo n.º 6
0
    def test_named_no_worker_pool(self):
        default = pool.get_pool()
        p = pool.get_pool(name='test')

        self.assertIsNone(p)
        self.assertIsNone(default)

        q = pool.pool_apply(p, [(lambda v: v, 'a')])

        self.assertEqual(q.get_nowait(), 'a')
Exemplo n.º 7
0
    def find_all(self, query):
        start = time.time()
        jobs = []

        # Start local searches
        for finder in self.finders:
            # Support legacy finders by defaulting to 'local = True'
            is_local = not hasattr(finder, 'local') or finder.local
            if query.local and not is_local:
                continue
            if getattr(finder, 'disabled', False):
                continue
            jobs.append((finder.find_nodes, query))

        result_queue = pool_apply(get_pool(), jobs)

        # Group matching nodes by their path
        nodes_by_path = defaultdict(list)

        timeout = settings.REMOTE_FIND_TIMEOUT
        deadline = start + timeout
        done = 0
        total = len(jobs)

        while done < total:
            wait_time = deadline - time.time()
            nodes = []

            try:
                nodes = result_queue.get(True, wait_time)

            # ValueError could happen if due to really unlucky timing wait_time
            # is negative
            except (Queue.Empty, ValueError):
                if time.time() > deadline:
                    log.debug("Timed out in find_nodes after %fs" % timeout)
                    break
                else:
                    continue

            log.debug("Got a find result after %fs" % (time.time() - start))
            done += 1
            for node in nodes or []:
                nodes_by_path[node.path].append(node)

        log.debug("Got all find results in %fs" % (time.time() - start))
        return self._list_nodes(query, nodes_by_path)
Exemplo n.º 8
0
    def find_all(self, query):
        start = time.time()
        jobs = []

        # Start local searches
        for finder in self.finders:
            # Support legacy finders by defaulting to 'local = True'
            is_local = not hasattr(finder, 'local') or finder.local
            if query.local and not is_local:
                continue
            jobs.append((finder.find_nodes, query))

        result_queue = pool_apply(get_pool(), jobs)

        # Group matching nodes by their path
        nodes_by_path = defaultdict(list)

        timeout = settings.REMOTE_FIND_TIMEOUT
        deadline = start + timeout
        done = 0
        total = len(jobs)

        while done < total:
            wait_time = deadline - time.time()
            nodes = []

            try:
                nodes = result_queue.get(True, wait_time)

            # ValueError could happen if due to really unlucky timing wait_time
            # is negative
            except (Queue.Empty, ValueError):
                if time.time() > deadline:
                    log.debug("Timed out in find_nodes after %fs" % timeout)
                    break
                else:
                    continue

            log.debug("Got a find result after %fs" % (time.time() - start))
            done += 1
            for node in nodes or []:
                nodes_by_path[node.path].append(node)

        log.debug("Got all find results in %fs" % (time.time() - start))
        return self._list_nodes(query, nodes_by_path)
Exemplo n.º 9
0
    def find_nodes(self, query):
        start = time.time()
        jobs = []
        random.shuffle(self.remote_stores)
        for store in self.remote_stores:
            if store.available:
                jobs.append((store.find, query))

        queue = pool_apply(self.worker_pool(), jobs)

        timeout = settings.REMOTE_FIND_TIMEOUT
        deadline = start + timeout
        done = 0
        total = len(jobs)

        while done < total:
            wait_time = deadline - time.time()
            nodes = []

            try:
                nodes = queue.get(True, wait_time)

            # ValueError could happen if due to really unlucky timing wait_time
            # is negative.
            except (Queue.Empty, ValueError):
                if time.time() > deadline:
                    log.debug("Timed out in find_nodes after %fs" % timeout)
                    break
                else:
                    continue

            log.debug("Got a remote find result after %fs" %
                      (time.time() - start))
            done += 1
            for node in nodes or []:
                yield node

        log.debug("Got all remote find results in %fs" % (time.time() - start))
Exemplo n.º 10
0
    def find_nodes(self, query):
        start = time.time()
        jobs = []
        random.shuffle(self.remote_stores)
        for store in self.remote_stores:
            if store.available:
                jobs.append((store.find, query))

        queue = pool_apply(self.worker_pool(), jobs)

        timeout = settings.REMOTE_FIND_TIMEOUT
        deadline = start + timeout
        done = 0
        total = len(jobs)

        while done < total:
            wait_time = deadline - time.time()
            nodes = []

            try:
                nodes = queue.get(True, wait_time)

            # ValueError could happen if due to really unlucky timing wait_time
            # is negative.
            except (Queue.Empty, ValueError):
                if time.time() > deadline:
                    log.debug("Timed out in find_nodes after %fs" % timeout)
                    break
                else:
                    continue

            log.debug("Got a remote find result after %fs" % (time.time() - start))
            done += 1
            for node in nodes or []:
                yield node

        log.debug("Got all remote find results in %fs" % (time.time() - start))
Exemplo n.º 11
0
 def test_basic(self):
     p = pool.get_pool()
     pool.pool_apply(p, [])
Exemplo n.º 12
0
 def test_basic(self):
     p = pool.get_pool()
     pool.pool_apply(p, [])