Esempio n. 1
0
class ThreadedStatistics(Statistics, Threaded, threading.Thread):
    """ ThreadedStatistics plugins process client statistics in a
    separate thread. """
    def __init__(self, core, datastore):
        Statistics.__init__(self, core, datastore)
        Threaded.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = os.path.join(datastore, "etc",
                                         "%s.pending" % self.name)
        self.daemon = False

    def start_threads(self):
        self.start()

    def _save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, xdata) = self.work_queue.get_nowait()
                data = \
                    lxml.etree.tostring(xdata,
                                        xml_declaration=False).decode("UTF-8")
                pending_data.append((metadata.hostname, data))
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            cPickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.name)
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """Load any pending data from a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % err)
            return False
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait(
                    (metadata,
                     lxml.etree.XML(pdata, parser=Bcfg2.Server.XMLParser)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load saved interaction: %s" %
                                  lxml_error)
            except MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save "
                                  "interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except OSError:
            self.logger.error("Failed to unlink save file: %s" %
                              self.pending_file)
        self.logger.info("Loaded pending %s data" % self.name)
        return True

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.work_queue is not None:
            try:
                (client, xdata) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % err)
                continue
            self.handle_statistic(client, xdata)
        if self.work_queue is not None and not self.work_queue.empty():
            self._save()

    def process_statistics(self, metadata, data):
        try:
            self.work_queue.put_nowait((metadata, copy.copy(data)))
        except Full:
            self.logger.warning("%s: Queue is full.  Dropping interactions." %
                                self.name)

    def handle_statistic(self, metadata, data):
        """ Process the given XML statistics data for the specified
        client object.  This differs from the
        :func:`Statistics.process_statistics` method only in that
        ThreadedStatistics first adds the data to a queue, and then
        processes them in a separate thread.

        :param metadata: The client metadata
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param data: The statistics data
        :type data: lxml.etree._Element
        :return: None
        """
        raise NotImplementedError
Esempio n. 2
0
class ThreadedStatistics(Statistics, Threaded, threading.Thread):
    """ ThreadedStatistics plugins process client statistics in a
    separate thread. """

    def __init__(self, core, datastore):
        Statistics.__init__(self, core, datastore)
        Threaded.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = os.path.join(datastore, "etc",
                                         "%s.pending" % self.name)
        self.daemon = False

    def start_threads(self):
        self.start()

    def _save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, data) = self.work_queue.get_nowait()
                pending_data.append(
                    (metadata.hostname,
                     lxml.etree.tostring(
                            data,
                            xml_declaration=False).decode("UTF-8")))
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            cPickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.name)
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """Load any pending data from a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % err)
            return False
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait(
                    (metadata,
                     lxml.etree.XML(pdata, parser=Bcfg2.Server.XMLParser)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load saved interaction: %s" %
                                  lxml_error)
            except MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save "
                                  "interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except OSError:
            self.logger.error("Failed to unlink save file: %s" %
                              self.pending_file)
        self.logger.info("Loaded pending %s data" % self.name)
        return True

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.work_queue != None:
            try:
                (client, xdata) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % err)
                continue
            self.handle_statistic(client, xdata)
        if self.work_queue != None and not self.work_queue.empty():
            self._save()

    def process_statistics(self, metadata, data):
        try:
            self.work_queue.put_nowait((metadata, copy.copy(data)))
        except Full:
            self.logger.warning("%s: Queue is full.  Dropping interactions." %
                                self.name)

    def handle_statistic(self, metadata, data):
        """ Process the given XML statistics data for the specified
        client object.  This differs from the
        :func:`Statistics.process_statistics` method only in that
        ThreadedStatistics first adds the data to a queue, and then
        processes them in a separate thread.

        :param metadata: The client metadata
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param data: The statistics data
        :type data: lxml.etree._Element
        :return: None
        """
        raise NotImplementedError
Esempio n. 3
0
class DirectStore(TransportBase, threading.Thread):
    def __init__(self, setup):
        TransportBase.__init__(self, setup)
        threading.Thread.__init__(self)
        self.save_file = os.path.join(self.data, ".saved")
        self.storage = load_storage_from_config(setup)
        self.queue = Queue(100000)
        self.terminate = threading.Event()
        self.start()

    def shutdown(self):
        self.terminate.set()

    def store(self, hostname, metadata, stats):
        try:
            self.queue.put_nowait(dict(
                    hostname=hostname,
                    metadata=metadata,
                    stats=stats))
        except Full:
            self.logger.warning("Reporting: Queue is full, "
                                "dropping statistics")

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.queue is not None:
            try:
                interaction = self.queue.get(block=True,
                                             timeout=self.timeout)
                self.storage.import_interaction(interaction)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("Reporting: Could not import interaction: %s"
                                  % err)
                continue
        if self.queue is not None and not self.queue.empty():
            self._save()

    def fetch(self):
        """ no collector is necessary with this backend """
        pass

    def start_monitor(self, collector):
        """ no collector is necessary with this backend """
        pass

    def rpc(self, method, *args, **kwargs):
        try:
            return getattr(self.storage, method)(*args, **kwargs)
        except:  # pylint: disable=W0702
            msg = "Reporting: RPC method %s failed: %s" % (method,
                                                           sys.exc_info()[1])
            self.logger.error(msg)
            raise TransportError(msg)

    def _save(self):
        """ Save any saved data to a file """
        saved_data = []
        try:
            while not self.queue.empty():
                saved_data.append(self.queue.get_nowait())
        except Empty:
            pass

        try:
            savefile = open(self.save_file, 'w')
            cPickle.dump(saved_data, savefile)
            savefile.close()
            self.logger.info("Saved pending Reporting data")
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """ Load any saved data from a file """
        if not os.path.exists(self.save_file):
            return True
        saved_data = []
        try:
            savefile = open(self.save_file, 'r')
            saved_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load saved data: %s" % err)
            return False
        for interaction in saved_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                self.queue.put_nowait(interaction)
            except Full:
                self.logger.warning("Reporting: Queue is full, failed to "
                                    "load saved interaction data")
                break
        try:
            os.unlink(self.save_file)
        except OSError:
            self.logger.error("Reporting: Failed to unlink save file: %s" %
                              self.save_file)
        self.logger.info("Reporting: Loaded saved interaction data")
        return True
Esempio n. 4
0
class Core(BuiltinCore):
    """ A multiprocessing core that delegates building the actual
    client configurations to
    :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects.  The
    parent process doesn't build any children itself; all calls to
    :func:`GetConfig` are delegated to children. All other calls are
    handled by the parent process. """

    #: How long to wait for a child process to shut down cleanly
    #: before it is terminated.
    shutdown_timeout = 10.0

    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)

    def _run(self):
        for cnum in range(self.setup['children']):
            name = "Child-%s" % cnum
            (mainpipe, childpipe) = multiprocessing.Pipe()
            self.pipes[name] = mainpipe
            self.logger.debug("Starting child %s" % name)
            childcore = ChildCore(self.setup, childpipe, self.terminate)
            child = multiprocessing.Process(target=childcore.run, name=name)
            child.start()
            self.logger.debug("Child %s started with PID %s" % (name,
                                                                child.pid))
            self.available_children.put(name)
        return BuiltinCore._run(self)

    def shutdown(self):
        BuiltinCore.shutdown(self)
        for child in multiprocessing.active_children():
            self.logger.debug("Shutting down child %s" % child.name)
            child.join(self.shutdown_timeout)
            if child.is_alive():
                self.logger.error("Waited %s seconds to shut down %s, "
                                  "terminating" % (self.shutdown_timeout,
                                                   child.name))
                child.terminate()
            else:
                self.logger.debug("Child %s shut down" % child.name)
        self.logger.debug("All children shut down")

    @exposed
    def GetConfig(self, address):
        client = self.resolve_client(address)[0]
        childname = self.available_children.get()
        self.logger.debug("Building configuration on child %s" % childname)
        pipe = self.pipes[childname]
        pipe.send(client)
        config = pipe.recv()
        self.available_children.put_nowait(childname)
        return config
Esempio n. 5
0
class Core(BuiltinCore):
    """ A multiprocessing core that delegates building the actual
    client configurations to
    :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects.  The
    parent process doesn't build any children itself; all calls to
    :func:`GetConfig` are delegated to children. All other calls are
    handled by the parent process. """

    #: How long to wait for a child process to shut down cleanly
    #: before it is terminated.
    shutdown_timeout = 10.0

    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)

    def _run(self):
        for cnum in range(self.setup['children']):
            name = "Child-%s" % cnum
            (mainpipe, childpipe) = multiprocessing.Pipe()
            self.pipes[name] = mainpipe
            self.logger.debug("Starting child %s" % name)
            childcore = ChildCore(self.setup, childpipe, self.terminate)
            child = multiprocessing.Process(target=childcore.run, name=name)
            child.start()
            self.logger.debug("Child %s started with PID %s" %
                              (name, child.pid))
            self.available_children.put(name)
        return BuiltinCore._run(self)

    def shutdown(self):
        BuiltinCore.shutdown(self)
        for child in multiprocessing.active_children():
            self.logger.debug("Shutting down child %s" % child.name)
            child.join(self.shutdown_timeout)
            if child.is_alive():
                self.logger.error("Waited %s seconds to shut down %s, "
                                  "terminating" %
                                  (self.shutdown_timeout, child.name))
                child.terminate()
            else:
                self.logger.debug("Child %s shut down" % child.name)
        self.logger.debug("All children shut down")

    @exposed
    def GetConfig(self, address):
        client = self.resolve_client(address)[0]
        childname = self.available_children.get()
        self.logger.debug("Building configuration on child %s" % childname)
        pipe = self.pipes[childname]
        pipe.send(client)
        config = pipe.recv()
        self.available_children.put_nowait(childname)
        return config