Пример #1
0
 def setUp(self):
     self.filename = 'fake'
     self.cluster = Cluster('testcluster')
     HPCStatsConf.__bases__ = (MockConfigParser, object)
     self.conf = HPCStatsConf(self.filename, self.cluster.name)
     self.conf.conf = CONFIG
     self.app = None
     self.db = None
     self.importer = BusinessCodeImporterCSV(self.app, self.db, self.conf)
     init_reqs()
Пример #2
0
 def setUp(self):
     self.filename = 'fake'
     self.cluster = Cluster('testcluster')
     self.cluster.cluster_id = 0
     HPCStatsConf.__bases__ = (MockConfigParser, object)
     self.conf = HPCStatsConf(self.filename, self.cluster)
     self.conf.conf = CONFIG.copy()
     self.db = HPCStatsDB(self.conf)
     self.db.bind()
     self.app = MockApp(self.db, self.conf, self.cluster)
     self.importer = UserImporterLdap(self.app,
                                      self.db,
                                      self.conf,
                                      self.cluster)
     init_reqs()
Пример #3
0
 def setUp(self):
     self.filename = 'fake'
     self.cluster = Cluster('testcluster')
     self.cluster.cluster_id = 0
     HPCStatsConf.__bases__ = (MockConfigParser, object)
     self.conf = HPCStatsConf(self.filename, self.cluster)
     self.conf.conf = CONFIG.copy()
     self.db = HPCStatsDB(self.conf)
     self.db.bind()
     self.app = MockApp(self.db, self.conf, self.cluster)
     self.importer = UserImporterLdapSlurm(self.app,
                                           self.db,
                                           self.conf,
                                           self.cluster)
     # Disable strict_user_membership to avoid exception when user found
     # in Slurm and not in LDAP then.
     self.importer.strict_user_membership = False
     init_reqs()
    def test_update_2(self):
        """ProjectImporterCSV.update() detect existing cluster and node
        """

        cluster1 = Cluster('cluster1')
        node1 = Node('node1', cluster1, 'model1', 'test_partition', 12, 6 * 1024 ** 3, 1)

        MockPg2.PG_REQS['find_cluster'].set_assoc(
          params=( cluster1.name, ),
          result=[ [ 1 ] ]
        )
        MockPg2.PG_REQS['find_node'].set_assoc(
          params=( node1.name, cluster1.cluster_id, ),
          result=[ [ 1 ] ]
        )
        self.importer.cluster = cluster1
        self.importer.nodes = [ node1 ]

        self.importer.update()
Пример #5
0
    def check_cluster_sources(self, db, cluster_name):
        """Check data sources for a cluster."""

        cluster = None

        logger.info("checking architecture source for cluster %s",
                    cluster_name)
        self.arch = \
          ArchitectureImporterFactory.factory(self, db, self.conf,
                                              cluster_name)
        self.arch.check()

        cluster = Cluster(cluster_name)

        logger.info("checking users source for cluster %s", cluster.name)
        self.users = \
          UserImporterFactory.factory(self, db, self.conf, cluster)
        self.users.check()

        logger.info("checking filesystem usage source for cluster %s",
                    cluster.name)
        self.fsusage = \
          FSUsageImporterFactory.factory(self, db, self.conf, cluster)
        self.fsusage.check()

        logger.info("checking filesystem quota source for cluster %s",
                    cluster.name)
        self.fsquota = \
          FSQuotaImporterFactory.factory(self, db, self.conf, cluster)
        self.fsquota.check()

        logger.info("checking events source for cluster %s", cluster.name)
        self.events = \
          EventImporterFactory.factory(self, db, self.conf, cluster)
        self.events.check()

        logger.info("checking jobs source for cluster %s", cluster.name)
        self.jobs = \
          JobImporterFactory.factory(self, db, self.conf, cluster)
        self.jobs.check()

        logger.info("every sources are properly available")
    def test_update(self):
        """ProjectImporterCSV.update() creates cluster and node if not existing
        """

        cluster1 = Cluster('cluster1')
        node1 = Node('node1', cluster1, 'model1', 'test_partition', 12, 6 * 1024 ** 3, 1)

        MockPg2.PG_REQS['save_cluster'].set_assoc(
          params=( cluster1.name ),
          result=[ [ 1 ] ]
        )
        MockPg2.PG_REQS['save_node'].set_assoc(
          params=( node1.name, cluster1.cluster_id, node1.partition,
                   node1.cpu, node1.memory, node1.flops ),
          result=[ [ 1 ] ]
        )
        self.importer.cluster = cluster1
        self.importer.nodes = [ node1 ]

        self.importer.update()
Пример #7
0
 def setUp(self):
     # setup conf
     self.filename = 'fake'
     self.cluster = Cluster('testcluster')
     HPCStatsConf.__bases__ = (MockConfigParser, object)
     self.conf = HPCStatsConf(self.filename, self.cluster)
     self.conf.conf = CONFIG.copy()
     # setup importer
     self.db = HPCStatsDB(self.conf)
     self.db.bind()
     self.app = MockApp(self.db, self.conf, self.cluster)
     self.importer = EventImporterSlurm(self.app, self.db, self.conf,
                                        self.cluster)
     init_reqs()
     # setup logger
     logging.setLoggerClass(HPCStatsLogger)
     self.logger = logging.getLogger(__name__)
     self.handler = MockLoggingHandler()
     self.logger.addHandler(self.handler)
     self.handler.reset()
     HPCStatsLogger.set_error_mgr(HPCStatsErrorMgr(self.conf))
Пример #8
0
    def load(self):
        """Load Cluster, Nodes and partitions from Architecture files. Raises
           HPCStatsRuntimeError or HPCStatsSourceError if error is encountered
           while loading data from sources. It sets attributes cluster, nodes
           and partitions with loaded data.
        """

        self.cluster = Cluster(self.cluster_name)
        self.nodes = []
        self.partitions = {}

        self.read_arch()
        config_get = self.config_get
        partitions = config_get(self.cluster.name, "partitions").split(',')

        for partition in partitions:

            part_sect = self.cluster.name + "/" + partition

            nodegroups = config_get(part_sect, "nodegroups").split(',')
            job_partitions = config_get(part_sect, "job_partitions") \
                               .split(',')

            nodeset_part = NodeSet() # nodeset for the partitions attribute

            for nodegroup in nodegroups:

                nodegroup_sect = self.cluster.name + "/" + partition \
                                 + "/" + nodegroup
                nodenames = config_get(nodegroup_sect, "names")
                nodeset_part.add(nodenames)

                sockets = config_get(nodegroup_sect, "sockets", isint=True)
                cores_per_socket = config_get(nodegroup_sect,
                                              "corespersocket",
                                              isint=True)
                cpu = sockets * cores_per_socket

                float_instructions = config_get(nodegroup_sect,
                                                "floatinstructions",
                                                isint=True)

                freq_str = config_get(nodegroup_sect, "frequency")
                freq = ArchitectureImporterArchfile.convert_freq(freq_str)
                if freq is None:
                    raise HPCStatsSourceError( \
                            "format of frequency for nodeset %s/%s/%s (%s) " \
                            "'%s' is not valid" \
                              % ( self.cluster.name,
                                  partition,
                                  nodegroup,
                                  nodenames,
                                  freq_str ))

                flops = sockets * cores_per_socket * float_instructions * freq

                mem_str = config_get(nodegroup_sect, "memory")
                mem = ArchitectureImporterArchfile.convert_mem(mem_str)
                if mem is None:
                    raise HPCStatsSourceError( \
                            "format of memory for nodeset %s/%s/%s (%s) " \
                            "'%s' is not valid" \
                              % ( self.cluster.name,
                                  partition,
                                  nodegroup,
                                  nodenames,
                                  mem_str ))

                model = config_get(nodegroup_sect, "model")
            
                nodeset_group = NodeSet(nodenames)
                for nodename in nodeset_group:
                    # create and append node
                    new_node = Node(name=nodename,
                                    cluster=self.cluster,
                                    model=model,
                                    partition=partition,
                                    cpu=cpu,
                                    memory=mem,
                                    flops=flops)
                    self.nodes.append(new_node)

            self.partitions[str(nodeset_part)] = job_partitions
Пример #9
0
    def run(self):
        """Run HPCStats Reporter application."""

        self.run_check()

        logger.debug("running on cluster %s with interval %s" \
                     % (self.cluster_name, self.interval))

        db = self.new_db()

        self.cluster = Cluster(self.cluster_name)
        # check if cluster really exists
        if not cluster.find(db):
            raise HPCStatsRuntimeError( \
                      "cluster %s does not exist in database. " \
                      "Available clusters are: %s." \
                        % (cluster, ",".join(available_clusters)))

        # get the total number of cpus inside the cluster
        logger.debug("main: getting nb cpus on cluster %s" % (cluster.name))
        nb_cpus_cluster = cluster.get_nb_cpus(db)

        results = []

        # get datetime of the first job
        min_datetime = cluster.get_min_datetime(db)
        #min_datetime = datetime(2011,5,1,0,0,0)
        max_datetime = datetime.now()
        tmp_datetime = min_datetime

        db.unbind()

        userstats_global = {}
        groupstats_global = {}
        processes_args = []

        # construct intervals with process information mapping
        while tmp_datetime < max_datetime:

            # get the exacts beginning and end of the step sized interval
            # around the tmp datetime
            (begin,end) = self.get_interval_begin_end(tmp_datetime)

            # construct an array of args for each process/interval
            process_info = []

            process_info.append(begin)
            process_info.append(end)

            # finally appends this append to the global array
            processes_args.append(process_info)

            # going to next interval
            interval = self.get_interval_timedelta()
            tmp_datetime += interval

            logger.debug(processes_args)

        # launch processes with their corresponding arguments
        parallel = True
        processes_results = []
        if parallel:
            pool = Pool(4)
            processes_results = pool.map(self.run_interval, processes_args)
        else:
            for process_info in processes_args:
                process_results = run_interval(process_info)
                processes_results.append(process_results)

        # then get results
        for result in processes_results:
            str_date = result[0]
            groupstats = result.pop()
            userstats = result.pop()
            userstats_global[str_date] = userstats
            groupstats_global[str_date] = groupstats
            results.append(result)

            logger.debug("usersstats", userstats_global)
            logger.debug("groupsstats", groupstats_global)

        # print results using template
        mytemplate = Template( filename=self.get_template_filename(),
                               input_encoding='utf-8',
                               output_encoding='utf-8',
                               default_filters=['decode.utf8'],
                               encoding_errors='ignore'
                             )
        print mytemplate.render( cluster=cluster,
                                 step=self.interval,
                                 results=results,
                                 userstats_global=userstats_global,
                                 groupstats_global=groupstats_global)