Example #1
0
    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)
Example #2
0
 def __init__(self, setup):
     TransportBase.__init__(self, setup)
     threading.Thread.__init__(self)
     self.save_file = os.path.join(self.data, ".saved")
     self.storage = load_storage_from_config(setup)
     self.queue = Queue(100000)
     self.terminate = threading.Event()
     self.start()
Example #3
0
 def __init__(self, core, datastore):
     Statistics.__init__(self, core, datastore)
     Threaded.__init__(self)
     threading.Thread.__init__(self)
     # Event from the core signaling an exit
     self.terminate = core.terminate
     self.work_queue = Queue(100000)
     self.pending_file = os.path.join(datastore, "etc",
                                      "%s.pending" % self.name)
     self.daemon = False
Example #4
0
    def __init__(self, setup):
        TransportBase.__init__(self, setup)
        threading.Thread.__init__(self)
        self.save_file = os.path.join(self.data, ".saved")

        self.storage = load_storage_from_config(setup)
        self.storage.validate()

        self.queue = Queue(100000)
        self.terminate = threading.Event()
        self.debug_log("Reporting: Starting %s thread" %
                       self.__class__.__name__)
        self.start()
Example #5
0
    def __init__(self):
        TransportBase.__init__(self)
        threading.Thread.__init__(self)
        self.save_file = os.path.join(self.data, ".saved")

        self.storage = Bcfg2.Options.setup.reporting_storage()
        self.storage.validate()

        self.queue = Queue(100000)
        self.terminate = threading.Event()
        self.debug_log("Reporting: Starting %s thread" %
                       self.__class__.__name__)
        self.start()
Example #6
0
    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)
Example #7
0
    def __init__(self):
        BuiltinCore.__init__(self)

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = \
            Queue(maxsize=Bcfg2.Options.setup.core_children)

        #: The flag that indicates when to stop child threads and
        #: processes
        self.terminate = DualEvent(threading_event=self.terminate)

        #: A :class:`Bcfg2.Server.MultiprocessingCore.RPCQueue` object
        #: used to send or publish commands to children.
        self.rpc_q = RPCQueue()

        #: A list of children that will be cycled through
        self._all_children = []

        #: An iterator that each child will be taken from in sequence,
        #: to provide a round-robin distribution of render requests
        self.children = None
Example #8
0
 def __init__(self, core, datastore):
     Statistics.__init__(self, core, datastore)
     Threaded.__init__(self)
     threading.Thread.__init__(self)
     # Event from the core signaling an exit
     self.terminate = core.terminate
     self.work_queue = Queue(100000)
     self.pending_file = os.path.join(datastore, "etc",
                                      "%s.pending" % self.name)
     self.daemon = False
Example #9
0
    def __init__(self):
        TransportBase.__init__(self)
        threading.Thread.__init__(self)
        self.save_file = os.path.join(self.data, ".saved")

        self.storage = Bcfg2.Options.setup.reporting_storage()
        self.storage.validate()

        self.queue = Queue(100000)
        self.terminate = threading.Event()
        self.debug_log("Reporting: Starting %s thread" %
                       self.__class__.__name__)
        self.start()
Example #10
0
class ThreadedStatistics(Statistics, Threaded, threading.Thread):
    """ ThreadedStatistics plugins process client statistics in a
    separate thread. """

    def __init__(self, core, datastore):
        Statistics.__init__(self, core, datastore)
        Threaded.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = os.path.join(datastore, "etc",
                                         "%s.pending" % self.name)
        self.daemon = False

    def start_threads(self):
        self.start()

    def _save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, data) = self.work_queue.get_nowait()
                pending_data.append(
                    (metadata.hostname,
                     lxml.etree.tostring(
                            data,
                            xml_declaration=False).decode("UTF-8")))
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            cPickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.name)
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """Load any pending data from a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % err)
            return False
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait(
                    (metadata,
                     lxml.etree.XML(pdata, parser=Bcfg2.Server.XMLParser)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load saved interaction: %s" %
                                  lxml_error)
            except MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save "
                                  "interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except OSError:
            self.logger.error("Failed to unlink save file: %s" %
                              self.pending_file)
        self.logger.info("Loaded pending %s data" % self.name)
        return True

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.work_queue != None:
            try:
                (client, xdata) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % err)
                continue
            self.handle_statistic(client, xdata)
        if self.work_queue != None and not self.work_queue.empty():
            self._save()

    def process_statistics(self, metadata, data):
        try:
            self.work_queue.put_nowait((metadata, copy.copy(data)))
        except Full:
            self.logger.warning("%s: Queue is full.  Dropping interactions." %
                                self.name)

    def handle_statistic(self, metadata, data):
        """ Process the given XML statistics data for the specified
        client object.  This differs from the
        :func:`Statistics.process_statistics` method only in that
        ThreadedStatistics first adds the data to a queue, and then
        processes them in a separate thread.

        :param metadata: The client metadata
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param data: The statistics data
        :type data: lxml.etree._Element
        :return: None
        """
        raise NotImplementedError
Example #11
0
class Core(BuiltinCore):
    """ A multiprocessing core that delegates building the actual
    client configurations to
    :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects.  The
    parent process doesn't build any children itself; all calls to
    :func:`GetConfig` are delegated to children. All other calls are
    handled by the parent process. """

    #: How long to wait for a child process to shut down cleanly
    #: before it is terminated.
    shutdown_timeout = 10.0

    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)

    def _run(self):
        for cnum in range(self.setup['children']):
            name = "Child-%s" % cnum
            (mainpipe, childpipe) = multiprocessing.Pipe()
            self.pipes[name] = mainpipe
            self.logger.debug("Starting child %s" % name)
            childcore = ChildCore(self.setup, childpipe, self.terminate)
            child = multiprocessing.Process(target=childcore.run, name=name)
            child.start()
            self.logger.debug("Child %s started with PID %s" % (name,
                                                                child.pid))
            self.available_children.put(name)
        return BuiltinCore._run(self)

    def shutdown(self):
        BuiltinCore.shutdown(self)
        for child in multiprocessing.active_children():
            self.logger.debug("Shutting down child %s" % child.name)
            child.join(self.shutdown_timeout)
            if child.is_alive():
                self.logger.error("Waited %s seconds to shut down %s, "
                                  "terminating" % (self.shutdown_timeout,
                                                   child.name))
                child.terminate()
            else:
                self.logger.debug("Child %s shut down" % child.name)
        self.logger.debug("All children shut down")

    @exposed
    def GetConfig(self, address):
        client = self.resolve_client(address)[0]
        childname = self.available_children.get()
        self.logger.debug("Building configuration on child %s" % childname)
        pipe = self.pipes[childname]
        pipe.send(client)
        config = pipe.recv()
        self.available_children.put_nowait(childname)
        return config
Example #12
0
class Snapshots(Bcfg2.Server.Plugin.Statistics):
    name = 'Snapshots'
    deprecated = True

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
        self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
        self.work_queue = Queue()
        self.loader = threading.Thread(target=self.load_snapshot)

    def start_threads(self):
        self.loader.start()

    def load_snapshot(self):
        while self.running:
            try:
                (metadata, data) = self.work_queue.get(block=True, timeout=5)
            except:
                continue
            self.statistics_from_old_stats(metadata, data)

    def process_statistics(self, metadata, data):
        return self.work_queue.put((metadata, data))

    def statistics_from_old_stats(self, metadata, xdata):
        # entries are name -> (modified, correct, start, desired, end)
        # not sure we can get all of this from old format stats
        t1 = time.time()
        entries = dict([('Package', dict()),
                        ('Service', dict()), ('Path', dict())])
        extra = dict([('Package', dict()), ('Service', dict()),
                      ('Path', dict())])
        bad = []
        state = xdata.find('.//Statistics')
        correct = state.get('state') == 'clean'
        revision = u_str(state.get('revision', '-1'))
        for entry in state.find('.//Bad'):
            data = [False, False, u_str(entry.get('name'))] \
                   + build_snap_ent(entry)
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            entries[etag][entry.get('name')] = data
        for entry in state.find('.//Modified'):
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            if entry.get('name') in entries[etag]:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
            else:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
        for entry in state.find('.//Extra'):
            if entry.tag in datafields:
                data = build_snap_ent(entry)[1]
                ename = u_str(entry.get('name'))
                data['name'] = ename
                extra[entry.tag][ename] = data
            else:
                print("extra", entry.tag, entry.get('name'))
        t2 = time.time()
        snap = Snapshot.from_data(self.session, correct, revision,
                                  metadata, entries, extra)
        self.session.add(snap)
        self.session.commit()
        t3 = time.time()
        logger.info("Snapshot storage took %fs" % (t3 - t2))
        return True
Example #13
0
class DirectStore(TransportBase, threading.Thread):
    def __init__(self, setup):
        TransportBase.__init__(self, setup)
        threading.Thread.__init__(self)
        self.save_file = os.path.join(self.data, ".saved")
        self.storage = load_storage_from_config(setup)
        self.queue = Queue(100000)
        self.terminate = threading.Event()
        self.start()

    def shutdown(self):
        self.terminate.set()

    def store(self, hostname, metadata, stats):
        try:
            self.queue.put_nowait(dict(
                    hostname=hostname,
                    metadata=metadata,
                    stats=stats))
        except Full:
            self.logger.warning("Reporting: Queue is full, "
                                "dropping statistics")

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.queue is not None:
            try:
                interaction = self.queue.get(block=True,
                                             timeout=self.timeout)
                self.storage.import_interaction(interaction)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("Reporting: Could not import interaction: %s"
                                  % err)
                continue
        if self.queue is not None and not self.queue.empty():
            self._save()

    def fetch(self):
        """ no collector is necessary with this backend """
        pass

    def start_monitor(self, collector):
        """ no collector is necessary with this backend """
        pass

    def rpc(self, method, *args, **kwargs):
        try:
            return getattr(self.storage, method)(*args, **kwargs)
        except:  # pylint: disable=W0702
            msg = "Reporting: RPC method %s failed: %s" % (method,
                                                           sys.exc_info()[1])
            self.logger.error(msg)
            raise TransportError(msg)

    def _save(self):
        """ Save any saved data to a file """
        saved_data = []
        try:
            while not self.queue.empty():
                saved_data.append(self.queue.get_nowait())
        except Empty:
            pass

        try:
            savefile = open(self.save_file, 'w')
            cPickle.dump(saved_data, savefile)
            savefile.close()
            self.logger.info("Saved pending Reporting data")
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """ Load any saved data from a file """
        if not os.path.exists(self.save_file):
            return True
        saved_data = []
        try:
            savefile = open(self.save_file, 'r')
            saved_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load saved data: %s" % err)
            return False
        for interaction in saved_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                self.queue.put_nowait(interaction)
            except Full:
                self.logger.warning("Reporting: Queue is full, failed to "
                                    "load saved interaction data")
                break
        try:
            os.unlink(self.save_file)
        except OSError:
            self.logger.error("Reporting: Failed to unlink save file: %s" %
                              self.save_file)
        self.logger.info("Reporting: Loaded saved interaction data")
        return True
Example #14
0
 def __init__(self, core, datastore):
     Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
     self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
     self.work_queue = Queue()
     self.loader = threading.Thread(target=self.load_snapshot)
     self.loader.start()
Example #15
0
 def __init__(self):
     Bcfg2.Server.Plugin.Debuggable.__init__(self)
     self._terminate = threading.Event()
     self._queues = dict()
     self._available_listeners = Queue()
     self._blocking_listeners = []
Example #16
0
class ThreadedStatistics(Statistics, Threaded, threading.Thread):
    """ ThreadedStatistics plugins process client statistics in a
    separate thread. """
    def __init__(self, core, datastore):
        Statistics.__init__(self, core, datastore)
        Threaded.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = os.path.join(datastore, "etc",
                                         "%s.pending" % self.name)
        self.daemon = False

    def start_threads(self):
        self.start()

    def _save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, xdata) = self.work_queue.get_nowait()
                data = \
                    lxml.etree.tostring(xdata,
                                        xml_declaration=False).decode("UTF-8")
                pending_data.append((metadata.hostname, data))
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            cPickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.name)
        except (IOError, TypeError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to save pending data: %s" % err)

    def _load(self):
        """Load any pending data from a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = cPickle.load(savefile)
            savefile.close()
        except (IOError, cPickle.UnpicklingError):
            err = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % err)
            return False
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait(
                    (metadata,
                     lxml.etree.XML(pdata, parser=Bcfg2.Server.XMLParser)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load saved interaction: %s" %
                                  lxml_error)
            except MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save "
                                  "interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except OSError:
            self.logger.error("Failed to unlink save file: %s" %
                              self.pending_file)
        self.logger.info("Loaded pending %s data" % self.name)
        return True

    def run(self):
        if not self._load():
            return
        while not self.terminate.isSet() and self.work_queue is not None:
            try:
                (client, xdata) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except:
                err = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % err)
                continue
            self.handle_statistic(client, xdata)
        if self.work_queue is not None and not self.work_queue.empty():
            self._save()

    def process_statistics(self, metadata, data):
        try:
            self.work_queue.put_nowait((metadata, copy.copy(data)))
        except Full:
            self.logger.warning("%s: Queue is full.  Dropping interactions." %
                                self.name)

    def handle_statistic(self, metadata, data):
        """ Process the given XML statistics data for the specified
        client object.  This differs from the
        :func:`Statistics.process_statistics` method only in that
        ThreadedStatistics first adds the data to a queue, and then
        processes them in a separate thread.

        :param metadata: The client metadata
        :type metadata: Bcfg2.Server.Plugins.Metadata.ClientMetadata
        :param data: The statistics data
        :type data: lxml.etree._Element
        :return: None
        """
        raise NotImplementedError
Example #17
0
 def __init__(self, core, datastore):
     Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
     self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
     self.work_queue = Queue()
     self.loader = threading.Thread(target=self.load_snapshot)
Example #18
0
class Core(BuiltinCore):
    """ A multiprocessing core that delegates building the actual
    client configurations to
    :class:`Bcfg2.Server.MultiprocessingCore.ChildCore` objects.  The
    parent process doesn't build any children itself; all calls to
    :func:`GetConfig` are delegated to children. All other calls are
    handled by the parent process. """

    #: How long to wait for a child process to shut down cleanly
    #: before it is terminated.
    shutdown_timeout = 10.0

    def __init__(self, setup):
        BuiltinCore.__init__(self, setup)
        if setup['children'] is None:
            setup['children'] = multiprocessing.cpu_count()

        #: A dict of child name -> one end of the
        #: :class:`multiprocessing.Pipe` object used to communicate
        #: with that child.  (The child is given the other end of the
        #: Pipe.)
        self.pipes = dict()

        #: A queue that keeps track of which children are available to
        #: render a configuration.  A child is popped from the queue
        #: when it starts to render a config, then it's pushed back on
        #: when it's done.  This lets us use a blocking call to
        #: :func:`Queue.Queue.get` when waiting for an available
        #: child.
        self.available_children = Queue(maxsize=self.setup['children'])

        # sigh.  multiprocessing was added in py2.6, which is when the
        # camelCase methods for threading objects were deprecated in
        # favor of the Pythonic under_score methods.  So
        # multiprocessing.Event *only* has is_set(), while
        # threading.Event has *both* isSet() and is_set().  In order
        # to make the core work with Python 2.4+, and with both
        # multiprocessing and threading Event objects, we just
        # monkeypatch self.terminate to have isSet().
        self.terminate = DualEvent(threading_event=self.terminate)

    def _run(self):
        for cnum in range(self.setup['children']):
            name = "Child-%s" % cnum
            (mainpipe, childpipe) = multiprocessing.Pipe()
            self.pipes[name] = mainpipe
            self.logger.debug("Starting child %s" % name)
            childcore = ChildCore(self.setup, childpipe, self.terminate)
            child = multiprocessing.Process(target=childcore.run, name=name)
            child.start()
            self.logger.debug("Child %s started with PID %s" %
                              (name, child.pid))
            self.available_children.put(name)
        return BuiltinCore._run(self)

    def shutdown(self):
        BuiltinCore.shutdown(self)
        for child in multiprocessing.active_children():
            self.logger.debug("Shutting down child %s" % child.name)
            child.join(self.shutdown_timeout)
            if child.is_alive():
                self.logger.error("Waited %s seconds to shut down %s, "
                                  "terminating" %
                                  (self.shutdown_timeout, child.name))
                child.terminate()
            else:
                self.logger.debug("Child %s shut down" % child.name)
        self.logger.debug("All children shut down")

    @exposed
    def GetConfig(self, address):
        client = self.resolve_client(address)[0]
        childname = self.available_children.get()
        self.logger.debug("Building configuration on child %s" % childname)
        pipe = self.pipes[childname]
        pipe.send(client)
        config = pipe.recv()
        self.available_children.put_nowait(childname)
        return config
Example #19
0
class Snapshots(Bcfg2.Server.Plugin.Statistics):
    name = 'Snapshots'
    deprecated = True

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Statistics.__init__(self, core, datastore)
        self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
        self.work_queue = Queue()
        self.loader = threading.Thread(target=self.load_snapshot)
        self.loader.start()

    def load_snapshot(self):
        while self.running:
            try:
                (metadata, data) = self.work_queue.get(block=True, timeout=5)
            except:
                continue
            self.statistics_from_old_stats(metadata, data)

    def process_statistics(self, metadata, data):
        return self.work_queue.put((metadata, data))

    def statistics_from_old_stats(self, metadata, xdata):
        # entries are name -> (modified, correct, start, desired, end)
        # not sure we can get all of this from old format stats
        t1 = time.time()
        entries = dict([('Package', dict()), ('Service', dict()),
                        ('Path', dict())])
        extra = dict([('Package', dict()), ('Service', dict()),
                      ('Path', dict())])
        bad = []
        state = xdata.find('.//Statistics')
        correct = state.get('state') == 'clean'
        revision = u_str(state.get('revision', '-1'))
        for entry in state.find('.//Bad'):
            data = [False, False, u_str(entry.get('name'))] \
                   + build_snap_ent(entry)
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            entries[etag][entry.get('name')] = data
        for entry in state.find('.//Modified'):
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            if entry.get('name') in entries[etag]:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
            else:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
        for entry in state.find('.//Extra'):
            if entry.tag in datafields:
                data = build_snap_ent(entry)[1]
                ename = u_str(entry.get('name'))
                data['name'] = ename
                extra[entry.tag][ename] = data
            else:
                print("extra", entry.tag, entry.get('name'))
        t2 = time.time()
        snap = Snapshot.from_data(self.session, correct, revision, metadata,
                                  entries, extra)
        self.session.add(snap)
        self.session.commit()
        t3 = time.time()
        logger.info("Snapshot storage took %fs" % (t3 - t2))
        return True
Example #20
0
class RPCQueue(Bcfg2.Server.Plugin.Debuggable):
    """ An implementation of a :class:`multiprocessing.Queue` designed
    for several additional use patterns:

    * Random-access reads, based on a key that identifies the data;
    * Publish-subscribe, where a datum is sent to all hosts.

    The subscribers can deal with this as a normal Queue with no
    special handling.
    """
    poll_wait = 3.0

    def __init__(self):
        Bcfg2.Server.Plugin.Debuggable.__init__(self)
        self._terminate = threading.Event()
        self._queues = dict()
        self._available_listeners = Queue()
        self._blocking_listeners = []

    def add_subscriber(self, name):
        """ Add a subscriber to the queue.  This returns the
        :class:`multiprocessing.Queue` object that the subscriber
        should read from.  """
        self._queues[name] = multiprocessing.Queue()
        return self._queues[name]

    def publish(self, method, args=None, kwargs=None):
        """ Publish an RPC call to the queue for consumption by all
        subscribers. """
        for queue in self._queues.values():
            queue.put((None, (method, args or [], kwargs or dict())))

    def rpc(self, dest, method, args=None, kwargs=None):
        """ Make an RPC call to the named subscriber, expecting a
        response.  This opens a
        :class:`multiprocessing.connection.Listener` and passes the
        Listener address to the child as part of the RPC call, so that
        the child can connect to the Listener to submit its results.

        Listeners are reused when possible to minimize overhead.
        """
        try:
            listener = self._available_listeners.get_nowait()
            self.logger.debug("Reusing existing RPC listener at %s" %
                              listener.address)
        except Empty:
            listener = Listener()
            self.logger.debug("Created new RPC listener at %s" %
                              listener.address)
        self._blocking_listeners.append(listener)
        try:
            self._queues[dest].put((listener.address,
                                    (method, args or [], kwargs or dict())))
            conn = listener.accept()
            self._blocking_listeners.remove(listener)
            try:
                while not self._terminate.is_set():
                    if conn.poll(self.poll_wait):
                        return conn.recv()
            finally:
                conn.close()
        finally:
            self._available_listeners.put(listener)

    def close(self):
        """ Close queues and connections. """
        self._terminate.set()
        self.logger.debug("Closing RPC queues")
        for name, queue in self._queues.items():
            self.logger.debug("Closing RPC queue to %s" % name)
            queue.close()

        # close any listeners that are waiting for connections
        self.logger.debug("Closing RPC connections")
        for listener in self._blocking_listeners:
            self.logger.debug("Closing RPC connection at %s" %
                              listener.address)
            listener.close()

        self.logger.debug("Closing RPC listeners")
        try:
            while True:
                listener = self._available_listeners.get_nowait()
                self.logger.debug("Closing RPC listener at %s" %
                                  listener.address)
                listener.close()
        except Empty:
            pass