Exemple #1
0
def test_get_hosts_no_tty_force(isatty, stderr):
    """Calling get_hosts() with or without a TTY with --force should return the list of hosts."""
    args = cli.parse_args(['--force', 'D{host1}', 'command1'])
    config = {'backend': 'direct'}
    assert cli.get_hosts(args, config) == nodeset('host1')
    isatty.return_value = True
    assert cli.get_hosts(args, config) == nodeset('host1')
    assert stderr.called
Exemple #2
0
    def test_instantiation(self):
        """Raise if instantiated directly, should return an instance of BaseWorker if inherited."""
        target = transports.Target(cumin.nodeset('node1'))
        with pytest.raises(TypeError):
            transports.BaseWorker({}, target)  # pylint: disable=abstract-class-instantiated

        assert isinstance(
            ConcreteBaseWorker({},
                               transports.Target(cumin.nodeset('node[1-2]'))),
            transports.BaseWorker)
Exemple #3
0
def test_execute_complex_global():
    """Executing a valid complex query should return the matching hosts."""
    query = Query({})
    hosts = query.execute(
        '(D{(host1 or host2) and host[1-5]}) or ((D{host[100-150]} and not D{host1[20-30]}) and D{host1[01,15,30]})'
    )
    assert hosts == nodeset('host[1-2,101,115]')
Exemple #4
0
    def ev_timer(self, timer):
        """Schedule the current command on the next node or the next command on the first batch of nodes.

        This callback is triggered by `ClusterShell` when a scheduled `Task.timer()` goes off.

        :Parameters:
            according to parent :py:meth:`ClusterShell.Event.EventHandler.ev_timer`.
        """
        success_ratio = 1 - ((self.counters['failed'] + self.counters['timeout']) / self.counters['total'])

        node = None
        if success_ratio >= self.success_threshold:
            # Success ratio is still good, looking for the next node
            with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
                for new_node in self.nodes.values():
                    if new_node.state.is_pending:
                        # Found the next node where to execute all the commands
                        node = new_node
                        node.state.update(State.scheduled)
                        break

        if node is not None:
            # Schedule the exeuction of the first command to the next node with ClusterShell
            command = node.commands[0]
            self.logger.debug("next_node=%s, timeout=%s, command='%s'", node.name, command.command, command.timeout)
            Task.task_self().shell(
                command.command, handler=timer.eh, timeout=command.timeout, nodes=nodeset(node.name))
        else:
            self.logger.debug('No more nodes left')
Exemple #5
0
    def test_execute_all(self, keystone_identity, keystone_session,
                         keystone_client, nova_client):
        """Calling execute() with a query that select all hosts should return the list of all hosts."""
        keystone_client.return_value.projects.list.return_value = [
            Project('project1'), Project('project2')
        ]
        nova_client.return_value.servers.list.side_effect = [[
            Server('host1'), Server('host2')
        ], [Server('host1'), Server('host2')]]

        hosts = self.query.execute('*')
        assert hosts == nodeset('host[1-2].project[1-2]')

        assert keystone_identity.call_count == 3
        assert keystone_session.call_count == 3
        keystone_client.assert_called_once_with(session=keystone_session(),
                                                timeout=10)
        assert nova_client.call_args_list == [
            mock.call('2',
                      endpoint_type='public',
                      session=keystone_session(),
                      timeout=10),
            mock.call('2',
                      endpoint_type='public',
                      session=keystone_session(),
                      timeout=10)
        ]
        assert nova_client().servers.list.call_args_list == [
            mock.call(search_opts={
                'vm_state': 'ACTIVE',
                'status': 'ACTIVE'
            })
        ] * 2
Exemple #6
0
    def __init__(self, config):
        """Query constructor for the test external backend.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery.__init__`.

        """
        super().__init__(config)
        self.hosts = nodeset()
    def test_ev_read_single_host(self, tqdm):
        """Calling ev_read() should print the worker message if matching a single host."""
        self.target = Target(nodeset('node1'))
        self.handler = ConcreteBaseEventHandler(self.target, self.commands)

        output = b'node1 output'
        self.worker.nodes = self.target.hosts
        self.handler.ev_read(self.worker, self.target.hosts[0], self.worker.SNAME_STDOUT, output)
        assert tqdm.write.call_args[0][0] == output.decode()
 def setup_method(self, *args):  # pylint: disable=arguments-differ
     """Initialize default properties and instances."""
     self.target = Target(nodeset('node[1-2]'))
     self.commands = [Command('command1', ok_codes=[0, 100]), Command('command2', timeout=5)]
     self.worker = mock.MagicMock()
     self.worker.current_node = 'node1'
     self.worker.command = 'command1'
     self.worker.nodes = self.target.hosts
     self.handler = None
     self.args = args
Exemple #9
0
 def setup_method(self, _):
     """Initialize default properties and instances."""
     # pylint: disable=attribute-defined-outside-init
     self.worker = ConcreteBaseWorker({},
                                      transports.Target(
                                          cumin.nodeset('node[1-2]')))
     self.commands = [
         transports.Command('command1'),
         transports.Command('command2')
     ]
Exemple #10
0
    def test_init(self):
        """Constructor should save config and set environment variables."""
        env_dict = {'ENV_VARIABLE': 'env_value'}
        config = {'transport': 'test_transport', 'environment': env_dict}

        assert transports.os.environ == {}
        worker = ConcreteBaseWorker(
            config, transports.Target(cumin.nodeset('node[1-2]')))
        assert transports.os.environ == env_dict
        assert worker.config == config
Exemple #11
0
def test_complex_query(query_requests):
    """Calling execute() with a complex query should return the exptected structure."""
    category = 'R'
    endpoint = query_requests[0].endpoints[category]
    key = query_requests[0].hosts_keys[endpoint]
    query_requests[1].register_uri('GET', query_requests[0].url + endpoint + '?query=', status_code=200, json=[
        {key: endpoint + '_host1', 'key': 'value1'}, {key: endpoint + '_host2', 'key': 'value2'}])

    hosts = query_requests[0].execute('(resources_host1 or resources_host2) and R:Class = MyClass')
    assert hosts == nodeset('resources_host[1-2]')
    assert query_requests[1].call_count == 1
Exemple #12
0
    def _parse_token(self, token):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery._parse_token`.
        """
        if isinstance(token, str):
            return

        token_dict = token.asDict()
        self.hosts |= nodeset(token_dict['hosts'])
Exemple #13
0
def test_execute_valid_global_with_nested_aliases():
    """Executing a valid query with nested aliases should return the matching hosts."""
    query = Query({
        'aliases': {
            'group1': 'D{host1 or host2}',
            'group2': 'D{host3 or host4}',
            'all': 'A:group1 or A:group2',
        }
    })
    hosts = query.execute('A:all')
    assert hosts == nodeset('host[1-4]')
Exemple #14
0
    def _execute(self):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery._execute`.
        """
        hosts = nodeset()
        self._loop_stack(
            hosts, self.stack
        )  # The hosts NodeSet is updated in place while looping the stack
        self.logger.debug('Found %d hosts', len(hosts))

        return hosts
    def test_ev_hup_ok(self):
        """Calling ev_hup with a worker that has zero exit status should enqueue the next command."""
        self.handler.ev_pickup(self.worker, self.worker.current_node)
        self.handler.ev_hup(self.worker, self.worker.current_node, 0)
        self.worker.task.shell.assert_called_once_with(
            'command2', handler=self.handler, timeout=5, stdin=False, nodes=nodeset(self.worker.current_node))

        # Calling it again
        self.worker.command = 'command2'
        self.handler.ev_pickup(self.worker, self.worker.current_node)
        self.handler.ev_hup(self.worker, self.worker.current_node, 0)
        assert self.handler.counters['success'] == 1
        assert self.handler.progress.update_success.called
Exemple #16
0
    def test_execute_project_domain(self, keystone_identity, keystone_session,
                                    keystone_client, nova_client):
        """When the domain suffix is configured, it should append it to all hosts."""
        nova_client.return_value.servers.list.return_value = [
            Server('host1'), Server('host2')
        ]
        self.config['openstack']['domain_suffix'] = 'servers.local'
        query = openstack.OpenStackQuery(self.config)

        hosts = query.execute('project:project1')
        assert hosts == nodeset('host[1-2].project1.servers.local')

        assert keystone_identity.call_count == 1
        assert keystone_session.call_count == 1
        keystone_client.assert_not_called()
Exemple #17
0
    def test_execute_query_params(self, keystone_identity, keystone_session,
                                  keystone_client, nova_client):
        """When the query_params are set, they must be loaded automatically."""
        nova_client.return_value.servers.list.return_value = [
            Server('host1'), Server('host2')
        ]
        self.config['openstack']['query_params'] = {'project': 'project1'}
        query = openstack.OpenStackQuery(self.config)

        hosts = query.execute('*')
        assert hosts == nodeset('host[1-2].project1')

        assert keystone_identity.call_count == 1
        assert keystone_session.call_count == 1
        keystone_client.assert_not_called()
Exemple #18
0
    def _loop_stack(self, hosts, stack_element):
        """Loop the stack generated while parsing the query and aggregate the results.

        Arguments:
            hosts (ClusterShell.NodeSet.NodeSet): the hosts to be updated with the current stack element results. This
                object is updated in place by reference.
            stack_element (dict): the stack element to iterate.

        """
        if stack_element['hosts'] is None:
            element_hosts = nodeset()
            for child in stack_element['children']:
                self._loop_stack(element_hosts, child)
        else:
            element_hosts = stack_element['hosts']

        self._aggregate_hosts(hosts, element_hosts, stack_element['bool'])
Exemple #19
0
 def test_execute(self):
     """Calling execute() should return the list of hosts."""
     assert self.query.execute('host1 or host2') == nodeset('host[1-2]')
     assert self.query.execute('host1 and host2') == nodeset()
     assert self.query.execute('host1 and not host2') == nodeset('host1')
     assert self.query.execute('host[1-5] xor host[3-7]') == nodeset(
         'host[1-2,6-7]')
     assert self.query.execute('host1 or (host[10-20] and not host15)'
                               ) == nodeset('host[1,10-14,16-20]')
     assert self.query.execute(
         '(host1 or host[2-3]) and not (host[3-9] or host2)') == nodeset(
             'host1')
Exemple #20
0
    def _execute(self):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery._execute`.

        Returns:
            ClusterShell.NodeSet.NodeSet: with the FQDNs of the matching hosts.

        """
        if self.search_project is None:
            hosts = nodeset()
            for project in self._get_projects():
                hosts |= self._get_project_hosts(project)
        else:
            hosts = self._get_project_hosts(self.search_project)

        return hosts
    def setup_method(self, _, task_self):  # pylint: disable=arguments-differ
        """Initialize default properties and instances."""
        self.config = {
            'clustershell': {
                'ssh_options': ['-o StrictHostKeyChecking=no', '-o BatchMode=yes'],
                'fanout': 3}}

        self.target = Target(nodeset('node[1-2]'))
        self.worker = clustershell.worker_class(self.config, self.target)
        self.commands = [Command('command1'), Command('command2', ok_codes=[0, 100], timeout=5)]
        self.task_self = task_self
        # Mock default handlers
        clustershell.DEFAULT_HANDLERS = {
            'sync': mock.MagicMock(spec_set=clustershell.SyncEventHandler),
            'async': mock.MagicMock(spec_set=clustershell.AsyncEventHandler)}

        # Initialize the worker
        self.worker.commands = self.commands
Exemple #22
0
    def _parse_token(self, token):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery._parse_token`.

        Raises:
            cumin.backends.InvalidQueryError: on internal parsing error.

        """
        if isinstance(token, str):
            return

        token_dict = token.asDict()
        # post-process types
        if 'quoted' in token_dict:
            token_dict['value'] = ParsedString(token_dict['quoted'], True)
            del token_dict['quoted']
        elif 'value' in token_dict:
            token_dict['value'] = ParsedString(token_dict['value'], False)

        # Based on the token type build the corresponding query object
        if 'open_subgroup' in token_dict:
            self._open_subgroup()
            for subtoken in token:
                self._parse_token(subtoken)
            self._close_subgroup()

        elif 'bool' in token_dict:
            self._add_bool(token_dict['bool'])

        elif 'hosts' in token_dict:
            token_dict['hosts'] = nodeset(token_dict['hosts'])
            self._add_hosts(**token_dict)

        elif 'category' in token_dict:
            self._add_category(**token_dict)

        else:  # pragma: no cover - this should never happen
            raise InvalidQueryError(
                "No valid key found in token, one of bool|hosts|category expected: {token}"
                .format(token=token_dict))
Exemple #23
0
    def test_execute_project(self, keystone_identity, keystone_session,
                             keystone_client, nova_client):
        """Calling execute() with a query that select all hosts in a project should return the list of hosts."""
        nova_client.return_value.servers.list.return_value = [
            Server('host1'), Server('host2')
        ]

        hosts = self.query.execute('project:project1')
        assert hosts == nodeset('host[1-2].project1')

        assert keystone_identity.call_count == 1
        assert keystone_session.call_count == 1
        keystone_client.assert_not_called()
        nova_client.assert_called_once_with('2',
                                            endpoint_type='public',
                                            session=keystone_session(),
                                            timeout=10)
        nova_client().servers.list.assert_called_once_with(search_opts={
            'vm_state': 'ACTIVE',
            'status': 'ACTIVE'
        })
Exemple #24
0
    def ev_hup(self, worker, node, rc):
        """Command execution completed on a node.

        This callback is triggered by ClusterShell for each node when it completes the execution of a command.
        Enqueue the next command if the success criteria are met, track the failure otherwise. Update the progress
        bars accordingly.

        :Parameters:
            according to parent :py:meth:`ClusterShell.Event.EventHandler.ev_hup`.
        """
        self.logger.debug("node=%s, rc=%d, command='%s'", node, rc, worker.command)

        schedule_next = False
        schedule_timer = False
        with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
            curr_node = self.nodes[node]

            ok_codes = curr_node.commands[curr_node.running_command_index].ok_codes
            if rc in ok_codes or not ok_codes:
                if curr_node.running_command_index == (len(curr_node.commands) - 1):
                    self.progress.update_success()
                    self.counters['success'] += 1
                    curr_node.state.update(State.success)
                    schedule_timer = True  # Continue the execution on other nodes if criteria are met
                else:
                    schedule_next = True  # Continue the execution in the current node with the next command
            else:
                self.progress.update_failed()
                self.counters['failed'] += 1
                curr_node.state.update(State.failed)
                schedule_timer = True  # Continue the execution on other nodes if criteria are met

        if schedule_next:
            # Schedule the execution of the next command on this node with ClusterShell
            command = curr_node.commands[curr_node.running_command_index + 1]
            worker.task.shell(
                command.command, nodes=nodeset(node), handler=worker.eh, timeout=command.timeout, stdin=False)
        elif schedule_timer:
            # Schedule a timer to allow to run all the commands in the next available node
            worker.task.timer(self.target.batch_sleep, worker.eh)
Exemple #25
0
def test_get_hosts_ok(isatty, mocked_input, stderr):
    """Calling get_hosts() should query the backend and return the list of hosts."""
    args = cli.parse_args(['D{host1}', 'command1'])
    config = {'backend': 'direct'}
    isatty.return_value = True

    mocked_input.return_value = 'y'
    assert cli.get_hosts(args, config) == nodeset('host1')

    mocked_input.return_value = 'n'
    with pytest.raises(cli.KeyboardInterruptError):
        cli.get_hosts(args, config)

    mocked_input.return_value = 'invalid_answer'
    with pytest.raises(cli.KeyboardInterruptError):
        cli.get_hosts(args, config)

    mocked_input.return_value = ''
    with pytest.raises(cli.KeyboardInterruptError):
        cli.get_hosts(args, config)

    assert stderr.called
Exemple #26
0
def test_execute_valid_global():
    """Executing a valid query should return the matching hosts."""
    query = Query({})
    hosts = query.execute('D{(host1 or host2) and host[1-5]}')
    assert hosts == nodeset('host[1-2]')
Exemple #27
0
def test_nodeset_empty():
    """Calling nodeset() without parameter should return an instance of ClusterShell NodeSet with no resolver."""
    nodeset = cumin.nodeset()
    assert isinstance(nodeset, NodeSet)
    assert nodeset == NodeSet()
    assert nodeset._resolver is None  # pylint: disable=protected-access
Exemple #28
0
def test_execute_subgroups():
    """Executing a query with multiple subgroups should return the matching hosts."""
    query = Query({})
    hosts = query.execute('(D{host1} or D{host2}) and not (D{host1})')
    assert hosts == nodeset('host2')
Exemple #29
0
def test_endpoints(query_requests, query, expected):
    """Calling execute() with a query that goes to the nodes endpoint should return the list of hosts."""
    hosts = query_requests[0].execute(query)
    assert hosts == nodeset(expected)
    assert query_requests[1].call_count == 1
Exemple #30
0
    def ev_timer(self, timer):  # noqa, mccabe: MC0001 too complex (15) FIXME
        """Schedule the current command on the next node or the next command on the first batch of nodes.

        This callback is triggered by `ClusterShell` when a scheduled `Task.timer()` goes off.

        :Parameters:
            according to parent :py:meth:`ClusterShell.Event.EventHandler.ev_timer`.
        """
        success_ratio = 1 - ((self.counters['failed'] + self.counters['timeout']) / self.counters['total'])

        node = None
        if success_ratio >= self.success_threshold:
            # Success ratio is still good, looking for the next node
            with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
                for new_node in self.nodes.values():
                    if new_node.state.is_pending:
                        # Found the next node where to execute the command
                        node = new_node
                        node.state.update(State.scheduled)
                        break

        if node is not None:
            # Schedule the execution with ClusterShell of the current command to the next node found above
            command = self.nodes[node.name].commands[self.nodes[node.name].running_command_index + 1]
            self.logger.debug("next_node=%s, timeout=%s, command='%s'", node.name, command.command, command.timeout)
            Task.task_self().shell(command.command, handler=timer.eh, timeout=command.timeout, nodes=nodeset(node.name))
            return

        # No more nodes were left for the execution of the current command
        with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
            try:
                command = self.commands[self.current_command_index].command
            except IndexError:
                command = None  # Last command reached

            # Get a list of the nodes still in pending state
            pending = [pending_node.name for pending_node in self.nodes.values() if pending_node.state.is_pending]
            # Nodes in running are still running the command and nodes in scheduled state will execute the command
            # anyway, they were already offloaded to ClusterShell
            accounted = len(pending) + self.counters['failed'] + self.counters['success'] + self.counters['timeout']

            # Avoid race conditions
            if self.aborted or accounted != self.counters['total'] or command is None or self.global_timedout:
                self.logger.debug("Skipped timer")
                return

            if pending:
                # This usually happens when executing in batches
                self.logger.warning("Command '%s' was not executed on: %s", command, nodeset_fromlist(pending))

            self.logger.info("Completed command '%s'", command)
            restart = self.end_command()
            self.current_command_index += 1  # Move the global pointer of the command in execution

            if restart:
                for node in self.nodes.values():
                    if node.state.is_success:
                        # Only nodes in pending state will be scheduled for the next command
                        node.state.update(State.pending)

        if restart:
            self.start_command(schedule=True)