コード例 #1
0
 def __init__(self, core, datastore):
     Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
     Bcfg2.Server.Plugin.Statistics.__init__(self)
     self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
     self.work_queue = Queue()
     self.loader = threading.Thread(target=self.load_snapshot)
     self.loader.start()
コード例 #2
0
ファイル: Plugin.py プロジェクト: jjneely/bcfg2-solj
 def __init__(self, core, datastore):
     Statistics.__init__(self)
     threading.Thread.__init__(self)
     # Event from the core signaling an exit
     self.terminate = core.terminate
     self.work_queue = Queue(100000)
     self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
     self.daemon = True
     self.start()
コード例 #3
0
ファイル: Snapshots.py プロジェクト: m4z/bcfg2
 def __init__(self, core, datastore):
     Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
     Bcfg2.Server.Plugin.Statistics.__init__(self)
     self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
     self.work_queue = Queue()
     self.loader = threading.Thread(target=self.load_snapshot)
     self.loader.start()
コード例 #4
0
ファイル: Plugin.py プロジェクト: espro/bcfg2
 def __init__(self, core, datastore):
     Statistics.__init__(self)
     threading.Thread.__init__(self)
     # Event from the core signaling an exit
     self.terminate = core.terminate
     self.work_queue = Queue(100000)
     self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
     self.daemon = True
     self.start()
コード例 #5
0
ファイル: Plugin.py プロジェクト: stpierre/bcfg2
 def __init__(self, core, datastore):
     Statistics.__init__(self, core, datastore)
     threading.Thread.__init__(self)
     # Event from the core signaling an exit
     self.terminate = core.terminate
     self.work_queue = Queue(100000)
     self.pending_file = os.path.join(datastore, "etc",
                                      "%s.pending" % self.name)
     self.daemon = False
     self.start()
コード例 #6
0
ファイル: Plugin.py プロジェクト: espro/bcfg2
class ThreadedStatistics(Statistics,
                         threading.Thread):
    """Threaded statistics handling capability."""
    def __init__(self, core, datastore):
        Statistics.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
        self.daemon = True
        self.start()

    def save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, data) = self.work_queue.get_nowait()
                try:
                    pending_data.append((metadata.hostname, lxml.etree.tostring(data)))
                except:
                    self.logger.warning("Dropping interaction for %s" % metadata.hostname)
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            pickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.__class__.__name__)
        except:
            self.logger.warning("Failed to save pending data")

    def load(self):
        """Load any pending data to a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = pickle.load(savefile)
            savefile.close()
        except Exception:
            e = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % e)
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load save interaction: %s" % lxml_error)
            except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except:
            self.logger.error("Failed to unlink save file: %s" % self.pending_file)
        self.logger.info("Loaded pending %s data" % self.__class__.__name__)
        return True

    def run(self):
        if not self.load():
            return
        while not self.terminate.isSet():
            try:
                (xdata, client) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except Exception:
                e = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % e)
                continue
            self.handle_statistic(xdata, client)
        if not self.work_queue.empty():
            self.save()

    def process_statistics(self, metadata, data):
        warned = False
        try:
            self.work_queue.put_nowait((metadata, copy.copy(data)))
            warned = False
        except Full:
            if not warned:
                self.logger.warning("%s: Queue is full.  Dropping interactions." % self.__class__.__name__)
            warned = True

    def handle_statistics(self, metadata, data):
        """Handle stats here."""
        pass
コード例 #7
0
ファイル: Plugin.py プロジェクト: jjneely/bcfg2-solj
class ThreadedStatistics(Statistics,
                         threading.Thread):
    """Threaded statistics handling capability."""
    def __init__(self, core, datastore):
        Statistics.__init__(self)
        threading.Thread.__init__(self)
        # Event from the core signaling an exit
        self.terminate = core.terminate
        self.work_queue = Queue(100000)
        self.pending_file = "%s/etc/%s.pending" % (datastore, self.__class__.__name__)
        self.daemon = True
        self.start()

    def save(self):
        """Save any pending data to a file."""
        pending_data = []
        try:
            while not self.work_queue.empty():
                (metadata, data) = self.work_queue.get_nowait()
                try:
                    pending_data.append((metadata.hostname, lxml.etree.tostring(data)))
                except:
                    self.logger.warning("Dropping interaction for %s" % metadata.hostname)
        except Empty:
            pass

        try:
            savefile = open(self.pending_file, 'w')
            pickle.dump(pending_data, savefile)
            savefile.close()
            self.logger.info("Saved pending %s data" % self.__class__.__name__)
        except:
            self.logger.warning("Failed to save pending data")

    def load(self):
        """Load any pending data to a file."""
        if not os.path.exists(self.pending_file):
            return True
        pending_data = []
        try:
            savefile = open(self.pending_file, 'r')
            pending_data = pickle.load(savefile)
            savefile.close()
        except Exception:
            e = sys.exc_info()[1]
            self.logger.warning("Failed to load pending data: %s" % e)
        for (pmetadata, pdata) in pending_data:
            # check that shutdown wasnt called early
            if self.terminate.isSet():
                return False

            try:
                while True:
                    try:
                        metadata = self.core.build_metadata(pmetadata)
                        break
                    except Bcfg2.Server.Plugins.Metadata.MetadataRuntimeError:
                        pass

                    self.terminate.wait(5)
                    if self.terminate.isSet():
                        return False

                self.work_queue.put_nowait((metadata, lxml.etree.fromstring(pdata)))
            except Full:
                self.logger.warning("Queue.Full: Failed to load queue data")
                break
            except lxml.etree.LxmlError:
                lxml_error = sys.exc_info()[1]
                self.logger.error("Unable to load save interaction: %s" % lxml_error)
            except Bcfg2.Server.Plugins.Metadata.MetadataConsistencyError:
                self.logger.error("Unable to load metadata for save interaction: %s" % pmetadata)
        try:
            os.unlink(self.pending_file)
        except:
            self.logger.error("Failed to unlink save file: %s" % self.pending_file)
        self.logger.info("Loaded pending %s data" % self.__class__.__name__)
        return True

    def run(self):
        if not self.load():
            return
        while not self.terminate.isSet():
            try:
                (xdata, client) = self.work_queue.get(block=True, timeout=2)
            except Empty:
                continue
            except Exception:
                e = sys.exc_info()[1]
                self.logger.error("ThreadedStatistics: %s" % e)
                continue
            self.handle_statistic(xdata, client)
        if not self.work_queue.empty():
            self.save()

    def process_statistics(self, metadata, data):
        warned = False
        try:
            self.work_queue.put_nowait((metadata, copy.deepcopy(data)))
            warned = False
        except Full:
            if not warned:
                self.logger.warning("%s: Queue is full.  Dropping interactions." % self.__class__.__name__)
            warned = True

    def handle_statistics(self, metadata, data):
        """Handle stats here."""
        pass
コード例 #8
0
ファイル: Snapshots.py プロジェクト: m4z/bcfg2
class Snapshots(Bcfg2.Server.Plugin.Statistics,
                Bcfg2.Server.Plugin.Plugin):
    name = 'Snapshots'
    experimental = True

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.Statistics.__init__(self)
        self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
        self.work_queue = Queue()
        self.loader = threading.Thread(target=self.load_snapshot)
        self.loader.start()

    def load_snapshot(self):
        while self.running:
            try:
                (metadata, data) = self.work_queue.get(block=True, timeout=5)
            except:
                continue
            self.statistics_from_old_stats(metadata, data)

    def process_statistics(self, metadata, data):
        return self.work_queue.put((metadata, data))

    def statistics_from_old_stats(self, metadata, xdata):
        # entries are name -> (modified, correct, start, desired, end)
        # not sure we can get all of this from old format stats
        t1 = time.time()
        entries = dict([('Package', dict()),
                        ('Service', dict()), ('Path', dict())])
        extra = dict([('Package', dict()), ('Service', dict()),
                      ('Path', dict())])
        bad = []
        state = xdata.find('.//Statistics')
        correct = state.get('state') == 'clean'
        revision = u_str(state.get('revision', '-1'))
        for entry in state.find('.//Bad'):
            data = [False, False, u_str(entry.get('name'))] \
                   + build_snap_ent(entry)
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            entries[etag][entry.get('name')] = data
        for entry in state.find('.//Modified'):
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            if entry.get('name') in entries[etag]:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
            else:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
        for entry in state.find('.//Extra'):
            if entry.tag in datafields:
                data = build_snap_ent(entry)[1]
                ename = u_str(entry.get('name'))
                data['name'] = ename
                extra[entry.tag][ename] = data
            else:
                print("extra", entry.tag, entry.get('name'))
        t2 = time.time()
        snap = Snapshot.from_data(self.session, correct, revision,
                                  metadata, entries, extra)
        self.session.add(snap)
        self.session.commit()
        t3 = time.time()
        logger.info("Snapshot storage took %fs" % (t3 - t2))
        return True
コード例 #9
0
class Snapshots(Bcfg2.Server.Plugin.Statistics,
                Bcfg2.Server.Plugin.Plugin):
    name = 'Snapshots'
    experimental = True

    def __init__(self, core, datastore):
        Bcfg2.Server.Plugin.Plugin.__init__(self, core, datastore)
        Bcfg2.Server.Plugin.Statistics.__init__(self)
        self.session = Bcfg2.Server.Snapshots.setup_session(core.cfile)
        self.work_queue = Queue()
        self.loader = threading.Thread(target=self.load_snapshot)
        self.loader.start()

    def load_snapshot(self):
        while self.running:
            try:
                (metadata, data) = self.work_queue.get(block=True, timeout=5)
            except:
                continue
            self.statistics_from_old_stats(metadata, data)

    def process_statistics(self, metadata, data):
        return self.work_queue.put((metadata, data))

    def statistics_from_old_stats(self, metadata, xdata):
        # entries are name -> (modified, correct, start, desired, end)
        # not sure we can get all of this from old format stats
        t1 = time.time()
        entries = dict([('Package', dict()),
                        ('Service', dict()), ('Path', dict())])
        extra = dict([('Package', dict()), ('Service', dict()),
                      ('Path', dict())])
        bad = []
        state = xdata.find('.//Statistics')
        correct = state.get('state') == 'clean'
        revision = u_str(state.get('revision', '-1'))
        for entry in state.find('.//Bad'):
            data = [False, False, u_str(entry.get('name'))] \
                   + build_snap_ent(entry)
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            entries[etag][entry.get('name')] = data
        for entry in state.find('.//Modified'):
            if entry.tag in ftypes:
                etag = 'Path'
            else:
                etag = entry.tag
            if entry.get('name') in entries[etag]:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
            else:
                data = [True, False, u_str(entry.get('name'))] + \
                       build_snap_ent(entry)
        for entry in state.find('.//Extra'):
            if entry.tag in datafields:
                data = build_snap_ent(entry)[1]
                ename = u_str(entry.get('name'))
                data['name'] = ename
                extra[entry.tag][ename] = data
            else:
                print("extra", entry.tag, entry.get('name'))
        t2 = time.time()
        snap = Snapshot.from_data(self.session, correct, revision,
                                  metadata, entries, extra)
        self.session.add(snap)
        self.session.commit()
        t3 = time.time()
        logger.info("Snapshot storage took %fs" % (t3 - t2))
        return True