Example #1
0
    def status(_):
        """Check the status of the playbook execution and update the status
        and result of the underlying Completion object.
        """

        status = pb_execution.get_status()

        if status in (ExecutionStatusCode.SUCCESS, ExecutionStatusCode.ERROR):
            if status == ExecutionStatusCode.ERROR:
                raw_result = pb_execution.get_result([
                    "runner_on_failed", "runner_on_unreachable",
                    "runner_on_no_hosts", "runner_on_async_failed",
                    "runner_item_on_failed"
                ])
            else:
                raw_result = pb_execution.get_result(event_filter_list)

            if output_wizard:
                processed_result = output_wizard.process(
                    pb_execution.play_uuid, raw_result)
            else:
                processed_result = raw_result

            # Clean hosts if operation is succesful
            if status == ExecutionStatusCode.SUCCESS:
                assert clean_hosts_on_success is not None
                clean_inventory(client, clean_hosts_on_success)

            return processed_result
        else:
            return orchestrator.Completion(on_complete=status)
Example #2
0
    def update_mons(self, num, host_specs):
        # type: (int, List[orchestrator.HostSpec]) -> orchestrator.Completion
        """
        Adjust the number of cluster monitors.
        """
        # current support limited to adding monitors.
        mon_map = self.get("mon_map")
        num_mons = len(mon_map["mons"])
        if num == num_mons:
            return orchestrator.Completion(value="The requested number of monitors exist.")
        if num < num_mons:
            raise NotImplementedError("Removing monitors is not supported.")

        self.log.debug("Trying to update monitors on: {}".format(host_specs))
        # check that all the hosts are registered
        [self._require_hosts(host.hostname) for host in host_specs]

        # current support requires a network to be specified
        for host, network, _ in host_specs:
            if not network:
                raise RuntimeError("Host '{}' is missing a network spec".format(host))

        def update_mons_with_daemons(daemons):
            for _, _, name in host_specs:
                if name and len([d for d in daemons if d.service_instance == name]):
                    raise RuntimeError('name %s alrady exists', name)

            # explicit placement: enough hosts provided?
            num_new_mons = num - num_mons
            if len(host_specs) < num_new_mons:
                raise RuntimeError("Error: {} hosts provided, expected {}".format(
                    len(host_specs), num_new_mons))

            self.log.info("creating {} monitors on hosts: '{}'".format(
                num_new_mons, ",".join(map(lambda h: ":".join(h), host_specs))))

            # TODO: we may want to chain the creation of the monitors so they join
            # the quorum one at a time.
            return self._create_mon(host_specs)
        return self._get_services('mon').then(update_mons_with_daemons)
Example #3
0
def playbook_operation(
    client,  # type: Client
    playbook,  # type: str
    result_pattern,  # type: str
    params,  # type: dict
    event_filter_list=None,  # type: Optional[List[str]]
    querystr_dict=None,  # type: Optional[dict]
    output_wizard=None,  # type: Optional[OutputWizard]
    clean_hosts_on_success=None  # type: Optional[dict]
):
    # type: (...) -> orchestrator.Completion
    """
    :param client        : Ansible Runner Service Client
    :param playbook      : The playbook to execute
    :param result_pattern: The "pattern" to discover what execution events
                           have the information deemed as result
    :param params        : http request payload for the playbook execution
    :param querystr_dict : http request querystring for the playbook
                           execution (DO NOT MODIFY HERE)
    :param event_filter_list: An aditional filter of result events based in the event
    :param clean_hosts_on_success: A dict with groups and hosts to remove from inventory if operation is
        succesful. Ex: {"group1": ["host1"], "group2": ["host3", "host4"]}
    """

    querystr_dict = querystr_dict or {}
    event_filter_list = event_filter_list or [""]
    clean_hosts_on_success = clean_hosts_on_success or {}

    def status(_):
        """Check the status of the playbook execution and update the status
        and result of the underlying Completion object.
        """

        status = pb_execution.get_status()

        if status in (ExecutionStatusCode.SUCCESS, ExecutionStatusCode.ERROR):
            if status == ExecutionStatusCode.ERROR:
                raw_result = pb_execution.get_result([
                    "runner_on_failed", "runner_on_unreachable",
                    "runner_on_no_hosts", "runner_on_async_failed",
                    "runner_item_on_failed"
                ])
            else:
                raw_result = pb_execution.get_result(event_filter_list)

            if output_wizard:
                processed_result = output_wizard.process(
                    pb_execution.play_uuid, raw_result)
            else:
                processed_result = raw_result

            # Clean hosts if operation is succesful
            if status == ExecutionStatusCode.SUCCESS:
                assert clean_hosts_on_success is not None
                clean_inventory(client, clean_hosts_on_success)

            return processed_result
        else:
            return orchestrator.Completion(on_complete=status)

    pb_execution = PlayBookExecution(client, playbook, result_pattern, params,
                                     querystr_dict)

    return orchestrator.Completion(
        on_complete=lambda _: pb_execution.launch()).then(status)
Example #4
0
 def wrapper(*args, **kwargs):
     return orchestrator.Completion(
         on_complete=lambda _: f(*args, **kwargs))
Example #5
0
    def _update_mgrs(self, num, host_specs, daemons):
        num_mgrs = len(daemons)
        if num == num_mgrs:
            return orchestrator.Completion(value="The requested number of managers exist.")

        self.log.debug("Trying to update managers on: {}".format(host_specs))
        # check that all the hosts are registered
        [self._require_hosts(host.hostname) for host in host_specs]

        results = []
        if num < num_mgrs:
            num_to_remove = num_mgrs - num

            # first try to remove unconnected mgr daemons that the
            # cluster doesn't see
            connected = []
            mgr_map = self.get("mgr_map")
            if mgr_map.get("active_name", {}):
                connected.append(mgr_map.get('active_name', ''))
            for standby in mgr_map.get('standbys', []):
                connected.append(standby.get('name', ''))
            to_remove_damons = []
            for d in daemons:
                if d.service_instance not in connected:
                    to_remove_damons.append(('%s.%s' % (d.service_type, d.service_instance),
                         d.nodename))
                    num_to_remove -= 1
                    if num_to_remove == 0:
                        break

            # otherwise, remove *any* mgr
            if num_to_remove > 0:
                for d in daemons:
                    to_remove_damons.append(('%s.%s' % (d.service_type, d.service_instance), d.nodename))
                    num_to_remove -= 1
                    if num_to_remove == 0:
                        break
            return self._remove_daemon(to_remove_damons)

        else:
            # we assume explicit placement by which there are the same number of
            # hosts specified as the size of increase in number of daemons.
            num_new_mgrs = num - num_mgrs
            if len(host_specs) < num_new_mgrs:
                raise RuntimeError(
                    "Error: {} hosts provided, expected {}".format(
                        len(host_specs), num_new_mgrs))

            for host_spec in host_specs:
                if host_spec.name and len([d for d in daemons if d.service_instance == host_spec.name]):
                    raise RuntimeError('name %s alrady exists', host_spec.name)

            for host_spec in host_specs:
                if host_spec.name and len([d for d in daemons if d.service_instance == host_spec.name]):
                    raise RuntimeError('name %s alrady exists', host_spec.name)

            self.log.info("creating {} managers on hosts: '{}'".format(
                num_new_mgrs, ",".join([spec.hostname for spec in host_specs])))

            args = []
            for host_spec in host_specs:
                name = host_spec.name or self.get_unique_name(daemons)
                host = host_spec.hostname
                args.append((host, name))
        return self._create_mgr(args)