Example #1
0
    def import_cluster_data(self, db, cluster_name):
        """Import from sources all data specific to a cluster."""

        cluster = None

        #
        # Architecture Importer is both responsible for importing/updating data
        # about cluster and nodes in database and creating the Cluster object
        # for other importers.
        #
        logger.info("updating architecture for cluster %s", cluster_name)
        self.arch = \
          ArchitectureImporterFactory.factory(self, db, self.conf,
                                              cluster_name)
        self.arch.load()
        self.arch.update()

        cluster = self.arch.cluster

        # check that cluster has been properly created and initialized
        if cluster is None or cluster.cluster_id is None:
            raise HPCStatsRuntimeError("problem in DB with cluster %s" %
                                       (str(cluster)))

        logger.info("updating users for cluster %s", cluster.name)
        self.users = \
          UserImporterFactory.factory(self, db, self.conf, cluster)
        self.users.load()
        self.users.update()

        logger.info("updating filesystem usage for cluster %s", cluster.name)
        self.fsusage = \
          FSUsageImporterFactory.factory(self, db, self.conf, cluster)
        self.fsusage.load()
        self.fsusage.update()

        logger.info("updating filesystem quota for cluster %s", cluster.name)
        self.fsquota = \
          FSQuotaImporterFactory.factory(self, db, self.conf, cluster)
        self.fsquota.load()
        self.fsquota.update()

        logger.info("updating events for cluster %s", cluster.name)
        self.events = \
          EventImporterFactory.factory(self, db, self.conf, cluster)
        self.events.load()
        self.events.update()

        logger.info("updating jobs for cluster %s", cluster.name)
        self.jobs = JobImporterFactory.factory(self, db, self.conf, cluster)
        self.jobs.load_update_window()
Example #2
0
    def import_cluster_data(self, db, cluster_name):
        """Import from sources all data specific to a cluster."""

        cluster = None

        #
        # Architecture Importer is both responsible for importing/updating data
        # about cluster and nodes in database and creating the Cluster object
        # for other importers.
        #
        logger.info("updating architecture for cluster %s", cluster_name)
        self.arch = \
          ArchitectureImporterFactory.factory(self, db, self.conf,
                                              cluster_name)
        self.arch.load()
        self.arch.update()

        cluster = self.arch.cluster

        # check that cluster has been properly created and initialized
        if cluster is None or cluster.cluster_id is None:
            raise HPCStatsRuntimeError("problem in DB with cluster %s"
                                         % (str(cluster)))

        logger.info("updating users for cluster %s", cluster.name)
        self.users = \
          UserImporterFactory.factory(self, db, self.conf, cluster)
        self.users.load()
        self.users.update()

        logger.info("updating filesystem usage for cluster %s", cluster.name)
        self.fsusage = \
          FSUsageImporterFactory.factory(self, db, self.conf, cluster)
        self.fsusage.load()
        self.fsusage.update()

        logger.info("updating events for cluster %s", cluster.name)
        self.events = \
          EventImporterFactory.factory(self, db, self.conf, cluster)
        self.events.load()
        self.events.update()

        logger.info("updating jobs for cluster %s", cluster.name)
        self.jobs = JobImporterFactory.factory(self, db, self.conf, cluster)
        self.jobs.load_update_window()
Example #3
0
    def check_cluster_sources(self, db, cluster_name):
        """Check data sources for a cluster."""

        cluster = None

        logger.info("checking architecture source for cluster %s",
                    cluster_name)
        self.arch = \
          ArchitectureImporterFactory.factory(self, db, self.conf,
                                              cluster_name)
        self.arch.check()

        cluster = Cluster(cluster_name)

        logger.info("checking users source for cluster %s", cluster.name)
        self.users = \
          UserImporterFactory.factory(self, db, self.conf, cluster)
        self.users.check()

        logger.info("checking filesystem usage source for cluster %s",
                    cluster.name)
        self.fsusage = \
          FSUsageImporterFactory.factory(self, db, self.conf, cluster)
        self.fsusage.check()

        logger.info("checking filesystem quota source for cluster %s",
                    cluster.name)
        self.fsquota = \
          FSQuotaImporterFactory.factory(self, db, self.conf, cluster)
        self.fsquota.check()

        logger.info("checking events source for cluster %s", cluster.name)
        self.events = \
          EventImporterFactory.factory(self, db, self.conf, cluster)
        self.events.check()

        logger.info("checking jobs source for cluster %s", cluster.name)
        self.jobs = \
          JobImporterFactory.factory(self, db, self.conf, cluster)
        self.jobs.check()

        logger.info("every sources are properly available")
Example #4
0
    def check_cluster_sources(self, db, cluster_name):
        """Check data sources for a cluster."""

        cluster = None

        logger.info("checking architecture source for cluster %s",
                    cluster_name)
        self.arch = \
          ArchitectureImporterFactory.factory(self, db, self.conf,
                                              cluster_name)
        self.arch.check()

        cluster = Cluster(cluster_name)

        logger.info("checking users source for cluster %s",
                    cluster.name)
        self.users = \
          UserImporterFactory.factory(self, db, self.conf, cluster)
        self.users.check()

        logger.info("checking filesystem usage source for cluster %s",
                    cluster.name)
        self.fsusage = \
          FSUsageImporterFactory.factory(self, db, self.conf, cluster)
        self.fsusage.check()

        logger.info("checking events source for cluster %s",
                    cluster.name)
        self.events = \
          EventImporterFactory.factory(self, db, self.conf, cluster)
        self.events.check()

        logger.info("checking jobs source for cluster %s",
                    cluster.name)
        self.jobs = \
          JobImporterFactory.factory(self, db, self.conf, cluster)
        self.jobs.check()

        logger.info("every sources are properly available")
Example #5
0
def main(args=sys.argv):

    # Command line argument parser
    usage = "%prog [options] command"
    parser = StatsOptionParser(usage)
    (options, args) = parser.parse_args(args[1:])

    # validate options
    parser.validate(options)

    # configure logging
    logging_level = logging.INFO
    if options.debug:
        logging_level = logging.DEBUG
    logging.basicConfig(format = '%(levelname)s: %(filename)s: %(message)s',
                        level  = logging_level,
                        stream  = sys.stdout)
    
    # Config file argument parser
    config = HPCStatsConfig(options)

    # dump entire config file
    for section in config.sections():
        logging.debug(section)
        for option in config.options(section):
            logging.debug(" %s = %s", option, config.get(section, option))

    # Instantiate connexion to db
    db_section = "hpcstatsdb"
    dbhostname = config.get(db_section,"hostname")
    dbport = config.get(db_section,"port")
    dbname = config.get(db_section,"dbname")
    dbuser = config.get(db_section,"user")
    dbpass = config.get(db_section,"password")
    db = HPCStatsdb(dbhostname, dbport, dbname, dbuser, dbpass)
    db.bind()
    
    logging.debug("db information %s %s %s %s %s" % db.infos())
    
    cluster_finder = ClusterFinder(db)
    cluster = cluster_finder.find(options.clustername)

    if (options.arch):
        logging.info("=> Updating architecture for cluster %s" % (options.clustername))
        try:
            arch_importer = ArchitectureImporterFactory().factory(db, config, cluster.get_name())
            arch_importer.update_architecture()
            db.commit()
        except RuntimeError:
            logging.error("error occured on architecture update.")

    if (options.events):
        logging.info("=> Updating events for cluster %s" % (options.clustername))
        try:
            event_importer = EventImporterFactory().factory(db, config, cluster.get_name())
            event_importer.update_events()
            db.commit()
        except RuntimeError:
            logging.error("error occured on events update.")

    if (options.users):
        logging.info("=> Updating users for cluster %s" % (options.clustername))
        try:
          user_importer = UserImporterFactory().factory(db, config, cluster.get_name())
          user_importer.update_users()
          db.commit()
        except RuntimeError:
            logging.error("error occured on users update.")

    if (options.jobs):
        logging.info("=> Update of jobs for cluster %s" % (options.clustername))
        try:
            job_importer = JobImporterFactory().factory(db, config, cluster.get_name())
            # The last updated job in hpcstatsdb for this cluster
            last_updated_id = job_importer.get_last_job_id()
            # The unfinished jobs in hpcstatsdb for this cluster
            ids = job_importer.get_unfinished_job_id()
    
            jobs_to_update = ['not_empty']
            new_jobs = ['not_empty']
    
            nb_theads = 4
    
            offset = 0
            max_jobs = 100000
    
            logging.debug("Get jobs to update")
            jobs_to_update = job_importer.get_job_information_from_dbid_job_list(ids)
            for job in jobs_to_update:
                offset = offset + 1
                if not offset % 10:
                    logging.debug("update job push %d" % offset)
                job.update(db)
    
            offset = 0
    
            while new_jobs:
                logging.debug("Get %d new jobs starting at offset %d" % (max_jobs, offset))
                new_jobs = job_importer.get_job_for_id_above(last_updated_id, offset, max_jobs)
                for job in new_jobs:
                    offset = offset + 1
                    if not offset % 10000:
                        logging.debug("create job push %d" % offset)
                    job.save(db)
    
            db.commit()
        except RuntimeError:
            logging.error("error occured on jobs update.")
        
    db.unbind()