예제 #1
0
    def test_put_off(self):
        def _test_put(i):
            requests.delete('%s/nodes/fakenode%d?state=off' % (self.url, i))

        futures = []
        for i in range(self.count):
            future = self._executor.submit(_test_put, i)
            futures.append(future)
        waiters.wait_for_all(futures, 3600)
예제 #2
0
    def test_delete(self):
        def _test_delete(i):
            requests.delete('%s/nodes/fakenode%d' % (self.host, i))

        futures = []
        for i in range(self.count):
            future = self._executor.submit(_test_delete, i)
            futures.append(future)
        waiters.wait_for_all(futures, 3600)
예제 #3
0
    def test_show(self):
        def _test_get(i):
            requests.get('%s/nodes/fakenode%d' % (self.url, i))

        futures = []
        for i in range(self.count):
            future = self._executor.submit(_test_get, i)
            futures.append(future)
        waiters.wait_for_all(futures, 3600)
예제 #4
0
파일: nova.py 프로젝트: laashub-soa/watcher
    def _add_physical_layer(self):
        """Collects all information on compute nodes and instances

        Will collect all required compute node and instance information based
        on the host aggregates and availability zones. If aggregates and zones
        do not specify any compute nodes all nodes are retrieved instead.

        The collection of information happens concurrently using the
        DecisionEngineThreadpool. The collection is parallelized in three steps
        first information about aggregates and zones is gathered. Secondly,
        for each of the compute nodes a tasks is submitted to get detailed
        information about the compute node. Finally, Each of these submitted
        tasks will submit an additional task if the compute node contains
        instances. Before returning from this function all instance tasks are
        waited upon to complete.
        """

        compute_nodes = set()
        host_aggregates = self.model_scope.get("host_aggregates")
        availability_zones = self.model_scope.get("availability_zones")

        """Submit tasks to gather compute nodes from availability zones and
        host aggregates. Each task adds compute nodes to the set, this set is
        threadsafe under the assumption that CPython is used with the GIL
        enabled."""
        zone_aggregate_futures = {
            self.executor.submit(
                self._collect_aggregates, host_aggregates, compute_nodes),
            self.executor.submit(
                self._collect_zones, availability_zones, compute_nodes)
        }
        waiters.wait_for_all(zone_aggregate_futures)

        # if zones and aggregates did not contain any nodes get every node.
        if not compute_nodes:
            self.no_model_scope_flag = True
            all_nodes = self.call_retry(
                f=self.nova_helper.get_compute_node_list)
            compute_nodes = set(
                [node.hypervisor_hostname for node in all_nodes])
        LOG.debug("compute nodes: %s", compute_nodes)

        node_futures = [self.executor.submit(
            self.nova_helper.get_compute_node_by_name,
            node, servers=True, detailed=True)
            for node in compute_nodes]
        LOG.debug("submitted {0} jobs".format(len(compute_nodes)))

        # Futures will concurrently be added, only safe with CPython GIL
        future_instances = []
        self.executor.do_while_futures_modify(
            node_futures, self._compute_node_future, future_instances)

        # Wait for all instance jobs to finish
        waiters.wait_for_all(future_instances)
예제 #5
0
    def _do_collection(self, metrics, timestamp):
        def _get_result(metric):
            try:
                return self._collect(metric, timestamp)
            except collector.NoDataCollected:
                LOG.info(self._log_prefix + 'No data collected '
                         'for metric {metric} at timestamp {ts}'.format(
                             metric=metric, ts=timestamp))
                return metric, None
            except Exception as e:
                LOG.exception(
                    self._log_prefix + 'Error while collecting'
                    ' metric {metric} at timestamp {ts}: {e}. Exiting.'.format(
                        metric=metric, ts=timestamp, e=e))
                # FIXME(peschk_l): here we just exit, and the
                # collection will be retried during the next collect
                # cycle. In the future, we should implement a retrying
                # system in workers
                sys.exit(1)

        with futurist.ThreadPoolExecutor(
                max_workers=CONF.orchestrator.max_threads) as tpool:
            futs = [tpool.submit(_get_result, metric) for metric in metrics]
            LOG.debug(self._log_prefix +
                      'Collecting {} metrics.'.format(len(metrics)))
            results = [r.result() for r in waiters.wait_for_all(futs).done]
            LOG.debug(self._log_prefix + 'Collecting {} metrics took {}s '
                      'total, with {}s average'.format(
                          tpool.statistics.executed, tpool.statistics.runtime,
                          tpool.statistics.average_runtime))
        return dict(filter(lambda x: x[1] is not None, results))
예제 #6
0
def _parallel_create(func, nodes, total, split_num):
    import eventlet
    eventlet.monkey_patch(os=False)
    import futurist
    from futurist import waiters
    data = []
    i = 0
    per_split = total / split_num
    while i < split_num - 1:
        data.append({'nodes': nodes[i * per_split:(i + 1) * per_split]})
        i += 1
    data.append({'nodes': nodes[i * per_split:]})
    _executor = futurist.GreenThreadPoolExecutor(max_workers=split_num)
    j = 0
    futures = []
    while j < split_num:
        future = _executor.submit(func, data[j])
        futures.append(future)
        j += 1

    done, not_done = waiters.wait_for_all(futures, 3600)
    result = {'nodes': {}}
    for r in done:
        result['nodes'].update(r.result()['nodes'])

    return result
예제 #7
0
def _parallel_update(func, names, patch, total, split_num):
    import eventlet
    eventlet.monkey_patch(os=False)
    import futurist
    from futurist import waiters
    patch_dicts = []
    patch_dict = {'nodes': [], "patches": patch}
    i = 0
    per_split = total / split_num
    while i < total:
        if i != 0 and i % per_split == 0 and i != split_num * per_split:
            patch_dicts.append(patch_dict)
            patch_dict = {'nodes': [], "patches": patch}

        patch_dict['nodes'].append({'name': names[i]})
        i += 1
    patch_dicts.append(patch_dict)

    _executor = futurist.GreenThreadPoolExecutor(max_workers=split_num)
    futures = []
    for patch_dict in patch_dicts:
        future = _executor.submit(func, patch_dict)
        futures.append(future)

    done, not_done = waiters.wait_for_all(futures, 3600)
    result = {'nodes': {}}
    for r in done:
        result['nodes'].update(r.result()['nodes'])

    return result
예제 #8
0
 def test_wait_for_all(self):
     fs = []
     for _i in range(0, 10):
         fs.append(self.executor.submit(
             mini_delay, use_eventlet_sleep=self.use_eventlet_sleep))
     done_fs, not_done_fs = waiters.wait_for_all(fs)
     self.assertEqual(len(fs), sum(f.result() for f in done_fs))
     self.assertEqual(0, len(not_done_fs))
예제 #9
0
 def test_wait_for_all(self):
     fs = []
     for _i in range(0, 10):
         fs.append(
             self.executor.submit(
                 mini_delay, use_eventlet_sleep=self.use_eventlet_sleep))
     done_fs, not_done_fs = waiters.wait_for_all(fs)
     self.assertEqual(len(fs), sum(f.result() for f in done_fs))
     self.assertEqual(0, len(not_done_fs))
예제 #10
0
    def test_post(self):
        def _test_post(data):
            requests.post("%s/nodes" % self.host, data=json.dumps(data))

        futures = []
        for i in range(self.count):
            data = {
                'name': 'fakenode%d' % i,
                'driver': 'ssh',
                'ondemand': True,
                'params': {
                    'user': '******',
                    'host': '11.114.120.101',
                    'port': '22',
                    'private_key': '/Users/longcheng/.ssh/id_rsa'
                }
            }
            future = self._executor.submit(_test_post, data)
            futures.append(future)
        waiters.wait_for_all(futures, 3600)
예제 #11
0
    def _do_execute_collection(self, _get_result, metrics):
        """Execute the metric measurement collection

        When executing this method a ZeroDivisionError might be raised.
        This happens when no executions have happened in the
        `futurist.ThreadPoolExecutor`; then, when calling the
        `average_runtime`, the exception is thrown. In such a case, there is
         no need for further actions, and we can ignore the error.

        :param _get_result: the method to execute and get the metrics
        :param metrics: the list of metrics to be collected
        :return: the metrics measurements
        """
        results = []
        try:
            with futurist.ThreadPoolExecutor(
                    max_workers=CONF.orchestrator.max_threads) as tpool:

                futs = [tpool.submit(_get_result, metric)
                        for metric in metrics]

                LOG.debug(self._log_prefix +
                          'Collecting [{}] metrics.'.format(metrics))

                results = [r.result() for r in waiters.wait_for_all(futs).done]

                log_message = self._log_prefix + \
                    "Collecting {} metrics took {}s total, with {}s average"

                LOG.debug(log_message.format(tpool.statistics.executed,
                                             tpool.statistics.runtime,
                                             tpool.statistics.average_runtime))

        except ZeroDivisionError as zeroDivisionError:
            LOG.debug("Ignoring ZeroDivisionError for metrics [%s]: [%s].",
                      metrics, zeroDivisionError)

        return dict(filter(lambda x: x[1] is not None, results))
예제 #12
0
import futurist
from futurist import waiters, rejection
import random


def compute():
    return sum([random.randint(1, 100) for i in range(10000)])


with futurist.ThreadPoolExecutor(max_workers=8) as executor:
    futures = [executor.submit(compute) for _ in range(8)]
    print(executor.statistics)

results = waiters.wait_for_all(futures)
print(executor.statistics)

print(f"Results: {[r.result() for r in results.done]}")
예제 #13
0
 def wait(self):
     """Waits for all outstanding events to be dispatched."""
     done, not_done = waiters.wait_for_all(self.fts)
     if not not_done:
         del self.fts[:]
예제 #14
0
from futurist import waiters
import futurist
import random


#   pip install futurist


def compute():
    return sum([random.randint(1, 100) for i in range(1000000)])


with futurist.ThreadPoolExecutor(max_workers=8) as executor:
    futures = [executor.submit(compute) for _ in range(8)]


results = waiters.wait_for_all(futures)
print(executor.statistics)


print('Results: {}'.format([r.result() for r in results.done]))
예제 #15
0
파일: ovo_rpc.py 프로젝트: cubeek/neutron
 def wait(self):
     """Waits for all outstanding events to be dispatched."""
     done, not_done = waiters.wait_for_all(self.fts)
     if not not_done:
         del self.fts[:]