Exemple #1
0
    def _parse_token(self, token):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQueryAggregator._parse_token`.
        """
        if not isinstance(token, pp.ParseResults):  # pragma: no cover - this should never happen
            raise InvalidQueryError('Expecting ParseResults object, got {type}: {token}'.format(
                type=type(token), token=token))

        token_dict = token.asDict()
        self.logger.trace('Token is: %s | %s', token_dict, token)

        if 'hosts' in token_dict:
            element = self._get_stack_element()
            element['hosts'] = nodeset_fromlist(token_dict['hosts'])
            if 'bool' in token_dict:
                element['bool'] = token_dict['bool']
            self.stack_pointer['children'].append(element)
        elif 'open_subgroup' in token_dict and 'close_subgroup' in token_dict:
            self._open_subgroup()
            if 'bool' in token_dict:
                self.stack_pointer['bool'] = token_dict['bool']
            for subtoken in token:
                if isinstance(subtoken, str):  # Grammar literals, boolean operators and parentheses
                    continue
                self._parse_token(subtoken)
            self._close_subgroup()
        else:  # pragma: no cover - this should never happen
            raise InvalidQueryError('Got unexpected token: {token}'.format(token=token))
Exemple #2
0
 def test_first_batch(self):
     """The first_batch property should return the first_batch of hosts."""
     size = 5
     target = transports.Target(self.hosts, batch_size=size)
     assert len(target.first_batch) == size
     assert target.first_batch == cumin.nodeset_fromlist(self.hosts[:size])
     assert isinstance(target.first_batch, NodeSet)
Exemple #3
0
    def start_command(self, schedule=False):
        """Initialize progress bars and variables for this command execution.

        Executed at the start of each command.

        Arguments:
            schedule (bool, optional): whether the next command should be sent to ClusterShell for execution or not.

        """
        self.counters['success'] = 0

        self.progress.init(self.counters['total'])

        # Schedule the next command, the first was already scheduled by ClusterShellWorker.execute()
        if schedule:
            with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
                # Available nodes for the next command execution were already update back to the pending state
                remaining_nodes = [node.name for node in self.nodes.values() if node.state.is_pending]
                first_batch = remaining_nodes[:self.target.batch_size]
                first_batch_set = nodeset_fromlist(first_batch)
                for node_name in first_batch:
                    self.nodes[node_name].state.update(State.scheduled)

            command = self.commands[self.current_command_index]
            self.logger.debug(
                "command='%s', timeout=%s, first_batch=%s", command.command, command.timeout, first_batch_set)

            # Schedule the command for execution in ClusterShell
            Task.task_self().flush_buffers()
            Task.task_self().shell(command.command, nodes=first_batch_set, handler=self, timeout=command.timeout)
Exemple #4
0
    def get_results(self):
        """Get the results of the last task execution.

        Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.transports.BaseWorker.get_results`.
        """
        for output, nodelist in self.task.iter_buffers():
            yield nodeset_fromlist(nodelist), output
Exemple #5
0
    def __init__(self, hosts, batch_size=None, batch_size_ratio=None, batch_sleep=None):
        """Constructor, inizialize the Target with the list of hosts and additional parameters.

        Arguments:
            hosts (ClusterShell.NodeSet.NodeSet, list): hosts that will be targeted, both
                :py:class:`ClusterShell.NodeSet.NodeSet` and :py:class:`list` are accepted and converted automatically
                to :py:class:`ClusterShell.NodeSet.NodeSet` internally.
            batch_size (int, optional): set the batch size so that no more that this number of hosts are targeted
                at any given time. It must be a positive integer. If greater than the number of hosts it will be
                auto-resized to the number of hosts.
            batch_size_ratio (float, optional): set the batch size with a ratio so that no more that this fraction
                of hosts are targeted at any given time. It must be a float between 0 and 1 and will raise exception
                if after rounding it there are 0 hosts selected.
            batch_sleep (float, optional): sleep time in seconds between the end of execution of one host in the
                batch and the start in the next host. It must be a positive float.

        Raises:
            cumin.transports.WorkerError: if the `hosts` parameter is empty or invalid, if both the `batch_size` and
                `batch_size_ratio` parameters are set or if the `batch_size_ratio` selects no hosts.

        """
        self.logger = logging.getLogger('.'.join((self.__module__, self.__class__.__name__)))

        message = "must be a non-empty ClusterShell NodeSet or list"
        if not hosts:
            raise_error('hosts', message, hosts)
        elif isinstance(hosts, NodeSet):
            self.hosts = hosts
        elif isinstance(hosts, list):
            self.hosts = nodeset_fromlist(hosts)
        else:
            raise_error('hosts', message, hosts)

        if batch_size is not None and batch_size_ratio is not None:
            raise WorkerError(("The 'batch_size' and 'batch_size_ratio' parameters are mutually exclusive but they're "
                               "both set."))

        if batch_size_ratio is not None:
            if not isinstance(batch_size_ratio, float) or not 0.0 <= batch_size_ratio <= 1.0:
                raise_error('batch_size_ratio', 'must be a float between 0.0 and 1.0', batch_size_ratio)

            batch_size = round(len(self.hosts) * batch_size_ratio)
            if batch_size == 0:
                raise_error('batch_size_ratio', 'has generated a batch_size of 0 hosts', batch_size_ratio)

        self.batch_size = self._compute_batch_size(batch_size, self.hosts)
        self.batch_sleep = Target._compute_batch_sleep(batch_sleep)
Exemple #6
0
def get_hosts(args, config):
    """Resolve the hosts selection into a list of hosts and return it. Raises KeyboardInterruptError.

    Arguments:
        args: ArgumentParser instance with parsed command line arguments
        config: a dictionary with the parsed configuration file

    """
    hosts = query.Query(config).execute(args.hosts)

    if not hosts:
        stderr('No hosts found that matches the query')
        return hosts

    stderr('{num} hosts will be targeted:'.format(num=len(hosts)))
    stderr(Colored.cyan(cumin.nodeset_fromlist(hosts)))

    if args.dry_run:
        stderr('DRY-RUN mode enabled, aborting')
        return []

    if args.force:
        stderr('FORCE mode enabled, continuing without confirmation')
        return hosts

    if not sys.stdout.isatty():  # pylint: disable=no-member
        message = 'Not in a TTY but neither DRY-RUN nor FORCE mode were specified.'
        stderr(message)
        raise cumin.CuminError(message)

    for i in range(10):
        stderr('Confirm to continue [y/n]?', end=' ')
        answer = input()  # nosec
        if not answer:
            continue

        if answer in 'yY':
            break
        if answer in 'nN':
            raise KeyboardInterruptError

    else:
        stderr('Got invalid answer for {i} times'.format(i=i))
        raise KeyboardInterruptError

    return hosts
Exemple #7
0
    def _execute(self):
        """Concrete implementation of parent abstract method.

        :Parameters:
            according to parent :py:meth:`cumin.backends.BaseQuery._execute`.

        Returns:
            ClusterShell.NodeSet.NodeSet: with the FQDNs of the matching hosts.

        """
        query = self._get_query_string(group=self.grouped_tokens).format(
            host_key=self.hosts_keys[self.endpoint])
        hosts = self._api_call(query)
        unique_hosts = nodeset_fromlist(
            [host[self.hosts_keys[self.endpoint]] for host in hosts])
        self.logger.debug("Queried puppetdb for '%s', got '%d' results.",
                          query, len(unique_hosts))

        return unique_hosts
Exemple #8
0
    def _get_ec2_hosts(self):
        """Return a NodeSet with the list of matching hosts based on the parameters.
        Returns:
            ClusterShell.NodeSet.NodeSet: with the FQDNs of the matching hosts.
        """

        ec2_client = session.Session(profile_name=self.ec2_profile,
                                     region_name=self.ec2_region,
                                     aws_access_key_id=self.ec2_access_key,
                                     aws_secret_access_key=self.ec2_secret_key).client('ec2')
        ec2_paginator = ec2_client.get_paginator('describe_instances')
        ec2_filters = self._ec2_build_filters()

        # response is an iterator
        response = ec2_paginator.paginate(Filters=ec2_filters)
        hosts = []
        for item in response:
            hosts += self._parse_response(item)
        return nodeset_fromlist(hosts)
Exemple #9
0
    def _get_project_hosts(self, project):
        """Return a NodeSet with the list of matching hosts based for the project based on the search parameters.

        Arguments:
            project (str): the project name where to get the list of hosts.

        Returns:
            ClusterShell.NodeSet.NodeSet: with the FQDNs of the matching hosts.

        """
        client = _get_nova_client(self.openstack_config, project)

        domain = ''
        domain_suffix = self.openstack_config.get('domain_suffix', None)
        if domain_suffix is not None:
            if domain_suffix[0] != '.':
                domain = '.{suffix}'.format(suffix=domain_suffix)
            else:
                domain = domain_suffix

        return nodeset_fromlist(
            '{host}.{project}{domain}'.format(
                host=server.name, project=project, domain=domain)
            for server in client.servers.list(search_opts=self.search_params))
Exemple #10
0
    def _get_log_message(self, num, message, nodes=None):
        """Get a pre-formatted message suitable for logging or printing.

        Arguments:
            num (int): the number of affecte nodes.
            message (str): the message to print.
            nodes (list, optional): the list of nodes affected.

        Returns:
            tuple: a tuple of ``(logging message, NodeSet of the affected nodes)``.

        """
        if nodes is None:
            nodes_string = ''
            message_end = ''
        else:
            nodes_string = nodeset_fromlist(nodes)
            message_end = ': '

        tot = self.counters['total']
        log_message = '{perc:.1%} ({num}/{tot}) {message}{message_end}'.format(
            perc=(num / tot), num=num, tot=tot, message=message, message_end=message_end)

        return (log_message, str(nodes_string))
Exemple #11
0
def test_nodeset_fromlist_empty():
    """Calling nodeset_fromlist() with empty list should return an instance of ClusterShell NodeSet with no resolver."""
    nodeset = cumin.nodeset_fromlist([])
    assert isinstance(nodeset, NodeSet)
    assert nodeset == NodeSet()
    assert nodeset._resolver is None  # pylint: disable=protected-access
Exemple #12
0
    def ev_timer(self, timer):  # noqa, mccabe: MC0001 too complex (15) FIXME
        """Schedule the current command on the next node or the next command on the first batch of nodes.

        This callback is triggered by `ClusterShell` when a scheduled `Task.timer()` goes off.

        :Parameters:
            according to parent :py:meth:`ClusterShell.Event.EventHandler.ev_timer`.
        """
        success_ratio = 1 - ((self.counters['failed'] + self.counters['timeout']) / self.counters['total'])

        node = None
        if success_ratio >= self.success_threshold:
            # Success ratio is still good, looking for the next node
            with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
                for new_node in self.nodes.values():
                    if new_node.state.is_pending:
                        # Found the next node where to execute the command
                        node = new_node
                        node.state.update(State.scheduled)
                        break

        if node is not None:
            # Schedule the execution with ClusterShell of the current command to the next node found above
            command = self.nodes[node.name].commands[self.nodes[node.name].running_command_index + 1]
            self.logger.debug("next_node=%s, timeout=%s, command='%s'", node.name, command.command, command.timeout)
            Task.task_self().shell(command.command, handler=timer.eh, timeout=command.timeout, nodes=nodeset(node.name))
            return

        # No more nodes were left for the execution of the current command
        with self.lock:  # Avoid modifications of the same data from other callbacks triggered by ClusterShell
            try:
                command = self.commands[self.current_command_index].command
            except IndexError:
                command = None  # Last command reached

            # Get a list of the nodes still in pending state
            pending = [pending_node.name for pending_node in self.nodes.values() if pending_node.state.is_pending]
            # Nodes in running are still running the command and nodes in scheduled state will execute the command
            # anyway, they were already offloaded to ClusterShell
            accounted = len(pending) + self.counters['failed'] + self.counters['success'] + self.counters['timeout']

            # Avoid race conditions
            if self.aborted or accounted != self.counters['total'] or command is None or self.global_timedout:
                self.logger.debug("Skipped timer")
                return

            if pending:
                # This usually happens when executing in batches
                self.logger.warning("Command '%s' was not executed on: %s", command, nodeset_fromlist(pending))

            self.logger.info("Completed command '%s'", command)
            restart = self.end_command()
            self.current_command_index += 1  # Move the global pointer of the command in execution

            if restart:
                for node in self.nodes.values():
                    if node.state.is_success:
                        # Only nodes in pending state will be scheduled for the next command
                        node.state.update(State.pending)

        if restart:
            self.start_command(schedule=True)
Exemple #13
0
    def _commands_output_report(self, buffer_iterator, command=None):
        """Print the commands output in a colored and tqdm-friendly way.

        Arguments:
            buffer_iterator (mixed): any `ClusterShell` object that implements ``iter_buffers()`` like
                :py:class:`ClusterShell.Task.Task` and all the `Worker` objects.
            command (str, optional): the command the output is referring to.

        """
        if not self.deduplicate_output:
            tqdm.write(Colored.blue('================'), file=sys.stdout)
            return

        nodelist = None
        if command is not None:
            output_message = "----- OUTPUT of '{command}' -----".format(command=self._get_short_command(command))
        else:
            output_message = '----- OUTPUT -----'

        for output, nodelist in buffer_iterator.iter_buffers():
            tqdm.write(Colored.blue('===== NODE GROUP ====='), file=sys.stdout)
            tqdm.write(Colored.cyan('({num}) {nodes}'.format(num=len(nodelist), nodes=nodeset_fromlist(nodelist))),
                       file=sys.stdout)
            tqdm.write(Colored.blue(output_message), file=sys.stdout)
            tqdm.write(output.message().decode(), file=sys.stdout)

        if nodelist is None:
            message = '===== NO OUTPUT ====='
        else:
            message = '================'

        tqdm.write(Colored.blue(message), file=sys.stdout)
Exemple #14
0
 def setup_method(self, _):
     """Initialize default properties and instances."""
     # pylint: disable=attribute-defined-outside-init
     self.hosts_list = ['host' + str(i) for i in range(10)]
     self.hosts = cumin.nodeset_fromlist(self.hosts_list)