示例#1
0
def salt_proxy():
    '''
    Start a proxy minion.
    '''
    import salt.cli.daemons
    import salt.utils.platform
    import multiprocessing
    if '' in sys.path:
        sys.path.remove('')

    if salt.utils.platform.is_windows():
        proxyminion = salt.cli.daemons.ProxyMinion()
        proxyminion.start()
        return

    if '--disable-keepalive' in sys.argv:
        sys.argv.remove('--disable-keepalive')
        proxyminion = salt.cli.daemons.ProxyMinion()
        proxyminion.start()
        return

    # keep one minion subprocess running
    while True:
        try:
            queue = multiprocessing.Queue()
        except Exception:  # pylint: disable=broad-except
            # This breaks in containers
            proxyminion = salt.cli.daemons.ProxyMinion()
            proxyminion.start()
            return
        process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
        process.start()
        try:
            process.join()
            try:
                restart_delay = queue.get(block=False)
            except Exception:  # pylint: disable=broad-except
                if process.exitcode == 0:
                    # Minion process ended naturally, Ctrl+C or --version
                    break
                restart_delay = 60
            if restart_delay == 0:
                # Minion process ended naturally, Ctrl+C, --version, etc.
                sys.exit(process.exitcode)
            # delay restart to reduce flooding and allow network resources to close
            time.sleep(restart_delay)
        except KeyboardInterrupt:
            break
        # need to reset logging because new minion objects
        # cause extra log handlers to accumulate
        rlogger = logging.getLogger()
        for handler in rlogger.handlers:
            rlogger.removeHandler(handler)
        logging.basicConfig()
示例#2
0
    def start_daemon(self, cls, opts, start_fun):
        def start(cls, opts, start_fun):
            salt.utils.appendproctitle('{0}-{1}'.format(
                self.__class__.__name__, cls.__name__))
            daemon = cls(opts)
            getattr(daemon, start_fun)()

        process = multiprocessing.Process(target=start,
                                          args=(cls, opts, start_fun))
        process.start()
        return process
示例#3
0
文件: scripts.py 项目: bryson/salt
def salt_proxy_minion():
    '''
    Start a proxy minion.
    '''
    import salt.cli.daemons
    import multiprocessing
    if '' in sys.path:
        sys.path.remove('')

    if salt.utils.is_windows():
        proxyminion = salt.cli.daemons.ProxyMinion()
        proxyminion.start()
        return

    if '--disable-keepalive' in sys.argv:
        sys.argv.remove('--disable-keepalive')
        proxyminion = salt.cli.daemons.ProxyMinion()
        proxyminion.start()
        return

    # keep one minion subprocess running
    while True:
        try:
            queue = multiprocessing.Queue()
        except Exception:
            # This breaks in containers
            proxyminion = salt.cli.daemons.ProxyMinion()
            proxyminion.start()
            return
        process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
        process.start()
        try:
            process.join()
            try:
                restart_delay = queue.get(block=False)
            except Exception:
                if process.exitcode == 0:
                    # Minion process ended naturally, Ctrl+C or --version
                    break
                restart_delay = 60
            if restart_delay == 0:
                # Minion process ended naturally, Ctrl+C, --version, etc.
                sys.exit(process.exitcode)
            # delay restart to reduce flooding and allow network resources to close
            time.sleep(restart_delay)
        except KeyboardInterrupt:
            break
        # need to reset logging because new minion objects
        # cause extra log handlers to accumulate
        rlogger = logging.getLogger()
        for handler in rlogger.handlers:
            rlogger.removeHandler(handler)
        logging.basicConfig()
示例#4
0
    def action(self):
        '''
        Pull the queue for functions to execute
        '''
        while self.fun.value:
            msg = self.fun.value.popleft()
            data = msg.get('pub')
            match = getattr(
                    self.matcher.value,
                    '{0}_match'.format(
                        data.get('tgt_type', 'glob')
                        )
                    )(data['tgt'])
            if not match:
                continue
            if 'user' in data:
                log.info(
                        'User {0[user]} Executing command {0[fun]} with jid '
                        '{0[jid]}'.format(data))
            else:
                log.info(
                        'Executing command {0[fun]} with jid {0[jid]}'.format(data)
                        )
            log.debug('Command details {0}'.format(data))

            if is_windows():
                # SaltRaetNixJobber is not picklable. Pickling is necessary
                # when spawning a process in Windows. Since the process will
                # be spawned and joined on non-Windows platforms, instead of
                # this, just run the function directly and absorb any thrown
                # exceptions.
                try:
                    self.proc_run(msg)
                except Exception as exc:
                    log.error(
                            'Exception caught by jobber: {0}'.format(exc),
                            exc_info=True)
            else:
                process = multiprocessing.Process(
                        target=self.proc_run,
                        kwargs={'msg': msg}
                        )
                process.start()
                process.join()
示例#5
0
def salt_minion():
    '''
    Start the salt minion in a subprocess.
    Auto restart minion on error.
    '''
    import signal

    import salt.utils.platform
    import salt.utils.process
    salt.utils.process.notify_systemd()

    import salt.cli.daemons
    import multiprocessing
    if '' in sys.path:
        sys.path.remove('')

    if salt.utils.platform.is_windows():
        minion = salt.cli.daemons.Minion()
        minion.start()
        return

    if '--disable-keepalive' in sys.argv:
        sys.argv.remove('--disable-keepalive')
        minion = salt.cli.daemons.Minion()
        minion.start()
        return

    def escalate_signal_to_process(pid, signum, sigframe):  # pylint: disable=unused-argument
        '''
        Escalate the signal received to the multiprocessing process that
        is actually running the minion
        '''
        # escalate signal
        os.kill(pid, signum)

    # keep one minion subprocess running
    prev_sigint_handler = signal.getsignal(signal.SIGINT)
    prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
    while True:
        try:
            process = multiprocessing.Process(target=minion_process)
            process.start()
            signal.signal(
                signal.SIGTERM,
                functools.partial(escalate_signal_to_process, process.pid))
            signal.signal(
                signal.SIGINT,
                functools.partial(escalate_signal_to_process, process.pid))
            signal.signal(
                signal.SIGHUP,
                functools.partial(escalate_signal_to_process, process.pid))
        except Exception:  # pylint: disable=broad-except
            # if multiprocessing does not work
            minion = salt.cli.daemons.Minion()
            minion.start()
            break

        process.join()

        # Process exited or was terminated. Since we're going to try to restart
        # it, we MUST, reset signal handling to the previous handlers
        signal.signal(signal.SIGINT, prev_sigint_handler)
        signal.signal(signal.SIGTERM, prev_sigterm_handler)

        if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
            sys.exit(process.exitcode)
        # ontop of the random_reauth_delay already preformed
        # delay extra to reduce flooding and free resources
        # NOTE: values are static but should be fine.
        time.sleep(2 + randint(1, 10))
        # need to reset logging because new minion objects
        # cause extra log handlers to accumulate
        rlogger = logging.getLogger()
        for handler in rlogger.handlers:
            rlogger.removeHandler(handler)
        logging.basicConfig()
示例#6
0
文件: proxy.py 项目: jodok/salt
def handle_decoded_payload(self, data):
    '''
    Override this method if you wish to handle the decoded data
    differently.
    '''
    # Ensure payload is unicode. Disregard failure to decode binary blobs.
    if six.PY2:
        data = salt.utils.data.decode(data, keep=True)
    if 'user' in data:
        log.info(
            'User %s Executing command %s with jid %s',
            data['user'], data['fun'], data['jid']
        )
    else:
        log.info(
            'Executing command %s with jid %s',
            data['fun'], data['jid']
        )
    log.debug('Command details %s', data)

    # Don't duplicate jobs
    log.trace('Started JIDs: %s', self.jid_queue)
    if self.jid_queue is not None:
        if data['jid'] in self.jid_queue:
            return
        else:
            self.jid_queue.append(data['jid'])
            if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
                self.jid_queue.pop(0)

    if isinstance(data['fun'], six.string_types):
        if data['fun'] == 'sys.reload_modules':
            self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    process_count_max = self.opts.get('process_count_max')
    if process_count_max > 0:
        process_count = len(salt.utils.minion.running(self.opts))
        while process_count >= process_count_max:
            log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
            yield tornado.gen.sleep(10)
            process_count = len(salt.utils.minion.running(self.opts))

    # We stash an instance references to allow for the socket
    # communication in Windows. You can't pickle functions, and thus
    # python needs to be able to reconstruct the reference on the other
    # side.
    instance = self
    multiprocessing_enabled = self.opts.get('multiprocessing', True)
    if multiprocessing_enabled:
        if sys.platform.startswith('win'):
            # let python reconstruct the minion on the other side if we're
            # running on windows
            instance = None
        with default_signals(signal.SIGINT, signal.SIGTERM):
            process = SignalHandlingProcess(
                target=self._target,
                name='ProcessPayload',
                args=(instance, self.opts, data, self.connected)
            )
    else:
        process = threading.Thread(
            target=self._target,
            args=(instance, self.opts, data, self.connected),
            name=data['jid']
        )

    if multiprocessing_enabled:
        with default_signals(signal.SIGINT, signal.SIGTERM):
            # Reset current signals before starting the process in
            # order not to inherit the current signal handlers
            process.start()
    else:
        process.start()
    process.name = '{}-Job-{}'.format(process.name, data['jid'])
    self.subprocess_list.add(process)
示例#7
0
def salt_minion():
    """
    Start the salt minion in a subprocess.
    Auto restart minion on error.
    """
    import signal

    import salt.utils.platform
    import salt.utils.process

    salt.utils.process.notify_systemd()

    import salt.cli.daemons
    import multiprocessing

    # Fix for setuptools generated scripts, so that it will
    # work with multiprocessing fork emulation.
    # (see multiprocessing.forking.get_preparation_data())
    if __name__ != "__main__":
        sys.modules["__main__"] = sys.modules[__name__]

    if "" in sys.path:
        sys.path.remove("")

    if salt.utils.platform.is_windows():
        minion = salt.cli.daemons.Minion()
        minion.start()
        return
    # REMOVEME after Python 2.7 support is dropped (also the six import)
    elif six.PY2:
        from salt.utils.versions import warn_until

        # Message borrowed from pip's deprecation warning
        warn_until(
            "3001",
            "Python 2.7 will reach the end of its life on January 1st,"
            " 2020. Please upgrade your Python as Python 2.7 won't be"
            " maintained after that date.  Salt will drop support for"
            " Python 2.7 in the 3001 release or later.",
        )
    # END REMOVEME

    if "--disable-keepalive" in sys.argv:
        sys.argv.remove("--disable-keepalive")
        minion = salt.cli.daemons.Minion()
        minion.start()
        return

    def escalate_signal_to_process(pid, signum, sigframe):  # pylint: disable=unused-argument
        """
        Escalate the signal received to the multiprocessing process that
        is actually running the minion
        """
        # escalate signal
        os.kill(pid, signum)

    # keep one minion subprocess running
    prev_sigint_handler = signal.getsignal(signal.SIGINT)
    prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
    while True:
        try:
            process = multiprocessing.Process(target=minion_process)
            process.start()
            signal.signal(
                signal.SIGTERM,
                functools.partial(escalate_signal_to_process, process.pid),
            )
            signal.signal(
                signal.SIGINT,
                functools.partial(escalate_signal_to_process, process.pid),
            )
            signal.signal(
                signal.SIGHUP,
                functools.partial(escalate_signal_to_process, process.pid),
            )
        except Exception:  # pylint: disable=broad-except
            # if multiprocessing does not work
            minion = salt.cli.daemons.Minion()
            minion.start()
            break

        process.join()

        # Process exited or was terminated. Since we're going to try to restart
        # it, we MUST, reset signal handling to the previous handlers
        signal.signal(signal.SIGINT, prev_sigint_handler)
        signal.signal(signal.SIGTERM, prev_sigterm_handler)

        if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
            sys.exit(process.exitcode)
        # ontop of the random_reauth_delay already preformed
        # delay extra to reduce flooding and free resources
        # NOTE: values are static but should be fine.
        time.sleep(2 + randint(1, 10))
        # need to reset logging because new minion objects
        # cause extra log handlers to accumulate
        rlogger = logging.getLogger()
        for handler in rlogger.handlers:
            rlogger.removeHandler(handler)
        logging.basicConfig()
示例#8
0
def handle_decoded_payload(self, data):
    """
    Override this method if you wish to handle the decoded data
    differently.
    """
    # Ensure payload is unicode. Disregard failure to decode binary blobs.
    if "user" in data:
        log.info(
            "User %s Executing command %s with jid %s",
            data["user"],
            data["fun"],
            data["jid"],
        )
    else:
        log.info("Executing command %s with jid %s", data["fun"], data["jid"])
    log.debug("Command details %s", data)

    # Don't duplicate jobs
    log.trace("Started JIDs: %s", self.jid_queue)
    if self.jid_queue is not None:
        if data["jid"] in self.jid_queue:
            return
        else:
            self.jid_queue.append(data["jid"])
            if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
                self.jid_queue.pop(0)

    if isinstance(data["fun"], str):
        if data["fun"] == "sys.reload_modules":
            (
                self.functions,
                self.returners,
                self.function_errors,
                self.executors,
            ) = self._load_modules()
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    process_count_max = self.opts.get("process_count_max")
    if process_count_max > 0:
        process_count = len(salt.utils.minion.running(self.opts))
        while process_count >= process_count_max:
            log.warning(
                "Maximum number of processes reached while executing jid %s, waiting...",
                data["jid"],
            )
            yield salt.ext.tornado.gen.sleep(10)
            process_count = len(salt.utils.minion.running(self.opts))

    # We stash an instance references to allow for the socket
    # communication in Windows. You can't pickle functions, and thus
    # python needs to be able to reconstruct the reference on the other
    # side.
    instance = self
    multiprocessing_enabled = self.opts.get("multiprocessing", True)
    if multiprocessing_enabled:
        if sys.platform.startswith("win"):
            # let python reconstruct the minion on the other side if we're
            # running on windows
            instance = None
        with default_signals(signal.SIGINT, signal.SIGTERM):
            process = SignalHandlingProcess(
                target=self._target,
                name="ProcessPayload",
                args=(instance, self.opts, data, self.connected),
            )
    else:
        process = threading.Thread(
            target=self._target,
            args=(instance, self.opts, data, self.connected),
            name=data["jid"],
        )

    if multiprocessing_enabled:
        with default_signals(signal.SIGINT, signal.SIGTERM):
            # Reset current signals before starting the process in
            # order not to inherit the current signal handlers
            process.start()
    else:
        process.start()
    process.name = "{}-Job-{}".format(process.name, data["jid"])
    self.subprocess_list.add(process)
示例#9
0
def handle_decoded_payload(self, data):
    """
    Override this method if you wish to handle the decoded data
    differently.
    """
    if "user" in data:
        log.info(
            "User %s Executing command %s with jid %s",
            data["user"],
            data["fun"],
            data["jid"],
        )
    else:
        log.info("Executing command %s with jid %s", data["fun"], data["jid"])
    log.debug("Command details %s", data)

    # Don"t duplicate jobs
    log.trace("Started JIDs: %s", self.jid_queue)
    if self.jid_queue is not None:
        if data["jid"] in self.jid_queue:
            return
        else:
            self.jid_queue.append(data["jid"])
            if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
                self.jid_queue.pop(0)

    if isinstance(data["fun"], str):
        if data["fun"] == "sys.reload_modules":
            (
                self.functions,
                self.returners,
                self.function_errors,
                self.executors,
            ) = self._load_modules()
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    process_count_max = self.opts.get("process_count_max")
    if process_count_max > 0:
        process_count = self.subprocess_list.count
        once_logged = False
        while process_count >= process_count_max:
            if once_logged is False:
                log.debug(
                    "Maximum number of processes reached while executing jid %s, waiting...",
                    data["jid"],
                )
                once_logged = True
            yield salt.ext.tornado.gen.sleep(0.5)
            process_count = self.subprocess_list.count

    # We stash an instance references to allow for the socket
    # communication in Windows. You can"t pickle functions, and thus
    # python needs to be able to reconstruct the reference on the other
    # side.
    instance = self
    multiprocessing_enabled = self.opts.get("multiprocessing", True)
    name = "ProcessPayload(jid={})".format(data["jid"])
    if multiprocessing_enabled:
        if sys.platform.startswith("win"):
            # let python reconstruct the minion on the other side if we"re
            # running on windows
            instance = None
        process = SignalHandlingProcess(
            target=target,
            args=(self, instance, instance.opts, data, self.connected),
            name=name,
        )
    else:
        process = threading.Thread(
            target=target,
            args=(self, instance, instance.opts, data, self.connected),
            name=name,
        )

    process.start()
    process.name = "{}-Job-{}".format(process.name, data["jid"])
    self.subprocess_list.add(process)
示例#10
0
文件: scripts.py 项目: bryson/salt
def salt_minion():
    '''
    Start the salt minion in a subprocess.
    Auto restart minion on error.
    '''
    import signal

    import salt.utils.process
    salt.utils.process.notify_systemd()

    import salt.cli.daemons
    import multiprocessing
    if '' in sys.path:
        sys.path.remove('')

    if salt.utils.is_windows():
        minion = salt.cli.daemons.Minion()
        minion.start()
        return

    if '--disable-keepalive' in sys.argv:
        sys.argv.remove('--disable-keepalive')
        minion = salt.cli.daemons.Minion()
        minion.start()
        return

    def escalate_signal_to_process(pid, signum, sigframe):  # pylint: disable=unused-argument
        '''
        Escalate the signal received to the multiprocessing process that
        is actually running the minion
        '''
        # escalate signal
        os.kill(pid, signum)

    # keep one minion subprocess running
    prev_sigint_handler = signal.getsignal(signal.SIGINT)
    prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
    while True:
        try:
            process = multiprocessing.Process(target=minion_process)
            process.start()
            signal.signal(signal.SIGTERM,
                          functools.partial(escalate_signal_to_process,
                                            process.pid))
            signal.signal(signal.SIGINT,
                          functools.partial(escalate_signal_to_process,
                                            process.pid))
            signal.signal(signal.SIGHUP,
                          functools.partial(escalate_signal_to_process,
                                            process.pid))
        except Exception:  # pylint: disable=broad-except
            # if multiprocessing does not work
            minion = salt.cli.daemons.Minion()
            minion.start()
            break

        process.join()

        # Process exited or was terminated. Since we're going to try to restart
        # it, we MUST, reset signal handling to the previous handlers
        signal.signal(signal.SIGINT, prev_sigint_handler)
        signal.signal(signal.SIGTERM, prev_sigterm_handler)

        if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
            sys.exit(process.exitcode)
        # ontop of the random_reauth_delay already preformed
        # delay extra to reduce flooding and free resources
        # NOTE: values are static but should be fine.
        time.sleep(2 + randint(1, 10))
        # need to reset logging because new minion objects
        # cause extra log handlers to accumulate
        rlogger = logging.getLogger()
        for handler in rlogger.handlers:
            rlogger.removeHandler(handler)
        logging.basicConfig()