Esempio n. 1
0
    def get_queues_info(self):
        logging.debug("Parsing tree of %s" % self.sge_file)
        fileutils.check_empty_file(self.sge_file)

        tree, root = self.sge_stat_maker.tree, self.sge_stat_maker.root

        qstatq_list = self._extract_queues('queue_info/Queue-List', root)

        total_running_jobs = sum([d['run'] for d in qstatq_list])
        logging.info('Total running jobs found: %s' % total_running_jobs)

        for d in qstatq_list:
            d['run'] = str(d['run'])
            d['queued'] = str(d['queued'])

        total_queued_jobs = self._get_total_queued_jobs(
            'job_info/job_list', root)

        qstatq_list.append({
            'run': '0',
            'queued': total_queued_jobs,
            'queue_name': 'Pending',
            'state': 'Q',
            'lm': '0'
        })
        logging.debug('qstatq_list contains %s elements' % len(qstatq_list))
        # TODO: check validity. 'state' shouldnt just be 'Q'!
        logging.debug("Closing %s" % self.sge_file)

        return total_running_jobs, int(eval(
            str(total_queued_jobs))), qstatq_list
Esempio n. 2
0
    def extract_qstat(self, orig_file):
        try:
            fileutils.check_empty_file(orig_file)
        except fileutils.FileEmptyError:
            all_qstat_values = []
        else:
            all_qstat_values = list()
            with open(orig_file, 'r') as fin:
                _ = fin.readline()  # header
                fin.readline()
                line = fin.readline()
                re_match_positions = ('job_id', 'user', 'state', 'queue_name'
                                      )  # was: (1, 5, 7, 8), (1, 4, 5, 8)
                try:  # first qstat line determines which format qstat follows.
                    re_search = self.user_q_search
                    qstat_values = self._process_qstat_line(
                        re_search, line, re_match_positions)
                    all_qstat_values.append(qstat_values)
                    # unused: _job_nr, _ce_name, _name, _time_use = m.group(2), m.group(3), m.group(4), m.group(6)
                except AttributeError:  # this means 'prior' exists in qstat, it's another format
                    re_search = self.user_q_search_prior
                    qstat_values = self._process_qstat_line(
                        re_search, line, re_match_positions)
                    all_qstat_values.append(qstat_values)
                    # unused:  _prior, _name, _submit, _start_at, _queue_domain, _slots, _ja_taskID =
                    # m.group(2), m.group(3), m.group(6), m.group(7), m.group(9), m.group(10), m.group(11)

                # hence the rest of the lines should follow either try's or except's same format
                for line in fin:
                    qstat_values = self._process_qstat_line(
                        re_search, line, re_match_positions)
                    all_qstat_values.append(qstat_values)

        return all_qstat_values
Esempio n. 3
0
File: pbs.py Progetto: qtop/qtop
    def extract_qstat(self, orig_file):
        try:
            fileutils.check_empty_file(orig_file)
        except fileutils.FileEmptyError:
            logging.error('File %s seems to be empty.' % orig_file)
            all_qstat_values = []
        else:
            all_qstat_values = list()
            with open(orig_file, 'r') as fin:
                _ = fin.readline()  # header
                fin.readline()
                line = fin.readline()
                re_match_positions = ('job_id', 'user', 'state', 'queue_name')  # was: (1, 5, 7, 8), (1, 4, 5, 8)
                try:  # first qstat line determines which format qstat follows.
                    re_search = self.user_q_search
                    qstat_values = self._process_qstat_line(re_search, line, re_match_positions)
                    # unused: _job_nr, _ce_name, _name, _time_use = m.group(2), m.group(3), m.group(4), m.group(6)
                except AttributeError:  # this means 'prior' exists in qstat, it's another format
                    re_search = self.user_q_search_prior
                    qstat_values = self._process_qstat_line(re_search, line, re_match_positions)
                    # unused:  _prior, _name, _submit, _start_at, _queue_domain, _slots, _ja_taskID =
                    # m.group(2), m.group(3), m.group(6), m.group(7), m.group(9), m.group(10), m.group(11)
                finally:
                    all_qstat_values.append(qstat_values)

                # hence the rest of the lines should follow either try's or except's same format
                for line in fin:
                    qstat_values = self._process_qstat_line(re_search, line, re_match_positions)
                    all_qstat_values.append(qstat_values)

        return all_qstat_values
Esempio n. 4
0
    def extract_qstat(self, orig_file):
        try:
            fileutils.check_empty_file(orig_file)
        except fileutils.FileEmptyError:
            logging.error('File %s seems to be empty.' % orig_file)
            all_qstat_values = []
        else:
            all_qstat_values = list()
            with open(orig_file, 'r') as fin:
                _ = fin.readline()  # header
                fin.readline()
                line = fin.readline()
                re_match_positions = ('job_id', 'user', 'state', 'queue_name'
                                      )  # was: (1, 5, 7, 8), (1, 4, 5, 8)

                re_search = self.user_q_search
                qstat_values = self._process_qstat_line(
                    re_search, line, re_match_positions)

                all_qstat_values.append(qstat_values)

                # hence the rest of the lines should follow either try's or except's same format
                for line in fin:
                    qstat_values = self._process_qstat_line(
                        re_search, line, re_match_positions)
                    all_qstat_values.append(qstat_values)

        return all_qstat_values
Esempio n. 5
0
File: pbs.py Progetto: qtop/qtop
    def extract_qstatq(self, orig_file):
        """
        reads QSTATQ_ORIG_FN sequentially and returns useful data
        Searches for lines in the following format:
        biomed             --      --    72:00:00   --   31   0 --   E R
        (except for the last line, which contains two sums and is parsed separately)
        """
        try:
            fileutils.check_empty_file(orig_file)
        except fileutils.FileEmptyError:
            all_values = []
        else:
            anonymize = self.anonymize_func()
            queue_search = r'^(?P<queue_name>[\w.-]+)\s+' \
                           r'(?:--|[0-9]+[mgtkp]b[a-z]*)\s+' \
                           r'(?:--|\d+:\d+:?\d*:?)\s+' \
                           r'(?:--|\d+:\d+:?\d+:?)\s+(--)\s+' \
                           r'(?P<run>\d+)\s+' \
                           r'(?P<queued>\d+)\s+' \
                           r'(?P<lm>--|\d+)\s+' \
                           r'(?P<state>[DE] R)'
            run_qd_search = '^\s*(?P<tot_run>\d+)\s+(?P<tot_queued>\d+)'  # this picks up the last line contents

            all_values = list()
            with open(orig_file, 'r') as fin:
                fin.next()
                fin.next()
                # server_name = fin.next().split(': ')[1].strip()
                fin.next()
                fin.next().strip()  # the headers line should later define the keys in temp_dict, should they be different
                fin.next()
                for line in fin:
                    line = line.strip()
                    m = re.search(queue_search, line)
                    n = re.search(run_qd_search, line)
                    temp_dict = {}
                    try:
                        queue_name = m.group('queue_name') if not self.options.ANONYMIZE else anonymize(m.group('queue_name'), 'qs')
                        run, queued, lm, state = m.group('run'), m.group('queued'), m.group('lm'), m.group('state')
                    except AttributeError:
                        try:
                            total_running_jobs, total_queued_jobs = n.group('tot_run'), n.group('tot_queued')
                        except AttributeError:
                            continue
                    else:
                        for key, value in [('queue_name', queue_name),
                                           ('run', run),
                                           ('queued', queued),
                                           ('lm', lm),
                                           ('state', state)]:
                            temp_dict[key] = value
                        all_values.append(temp_dict)
                all_values.append({'Total_running': total_running_jobs, 'Total_queued': total_queued_jobs})
        finally:
            return all_values
Esempio n. 6
0
File: pbs.py Progetto: tin6150/qtop
    def get_worker_nodes(self, job_ids, job_queues, options):
        try:
            fileutils.check_empty_file(self.pbsnodes_file)
        except fileutils.FileEmptyError:
            all_pbs_values = []
            return all_pbs_values

        raw_blocks = self._read_all_blocks(self.pbsnodes_file)
        all_pbs_values = []
        anonymize = self.qstat_maker.anonymize_func()
        for block in raw_blocks:
            pbs_values = dict()
            pbs_values['domainname'] = block[
                'domainname'] if not self.options.ANONYMIZE else anonymize(
                    block['domainname'], 'wns')

            nextchar = block['state'][0]
            state = (nextchar == 'f') and "-" or nextchar

            pbs_values['state'] = state
            try:
                pbs_values['np'] = block['np']
            except KeyError:
                pbs_values['np'] = block[
                    'pcpus']  # handle torque cases  # todo : to check

            if block.get('gpus') > 0:  # this should be rare.
                pbs_values['gpus'] = block['gpus']

            try:  # this should turn up more often, hence the try/except.
                _ = block['jobs']
            except KeyError:
                pbs_values['core_job_map'] = dict(
                )  # change of behaviour: all entries should contain the key even if no value
            else:
                # jobs = re.split(r'(?<=[A-Za-z0-9]),\s?', block['jobs'])
                jobs = re.findall(r'[0-9][0-9,-]*/[^,]+', block['jobs'])
                pbs_values['core_job_map'] = dict(
                    (core, job) for job, core in self._get_jobs_cores(jobs))
            finally:
                all_pbs_values.append(pbs_values)

        all_pbs_values = self.ensure_worker_nodes_have_qnames(
            all_pbs_values, job_ids, job_queues)
        return all_pbs_values
Esempio n. 7
0
File: pbs.py Progetto: qtop/qtop
    def get_worker_nodes(self, job_ids, job_queues, options):
        try:
            fileutils.check_empty_file(self.pbsnodes_file)
        except fileutils.FileEmptyError:
            all_pbs_values = []
            return all_pbs_values

        raw_blocks = self._read_all_blocks(self.pbsnodes_file)
        all_pbs_values = []
        anonymize = self.qstat_maker.anonymize_func()
        for block in raw_blocks:
            pbs_values = dict()
            pbs_values['domainname'] = block['domainname'] if not self.options.ANONYMIZE else anonymize(block['domainname'], 'wns')

            nextchar = block['state'][0]
            state = (nextchar == 'f') and "-" or nextchar

            pbs_values['state'] = state
            try:
                pbs_values['np'] = block['np']
            except KeyError:
                pbs_values['np'] = block['pcpus']  # handle torque cases  # todo : to check

            if block.get('gpus') > 0:  # this should be rare.
                pbs_values['gpus'] = block['gpus']

            try:  # this should turn up more often, hence the try/except.
                _ = block['jobs']
            except KeyError:
                pbs_values['core_job_map'] = dict()  # change of behaviour: all entries should contain the key even if no value
            else:
                # jobs = re.split(r'(?<=[A-Za-z0-9]),\s?', block['jobs'])
                jobs = re.findall(r'[0-9][0-9,-]*/[^,]+', block['jobs'])
                pbs_values['core_job_map'] = dict((core, job) for job, core in self._get_jobs_cores(jobs))
            finally:
                all_pbs_values.append(pbs_values)

        all_pbs_values = self.ensure_worker_nodes_have_qnames(all_pbs_values, job_ids, job_queues)
        return all_pbs_values
Esempio n. 8
0
File: sge.py Progetto: qtop/qtop
    def get_queues_info(self):
        logging.debug("Parsing tree of %s" % self.sge_file)
        fileutils.check_empty_file(self.sge_file)

        tree, root = self.sge_stat_maker.tree, self.sge_stat_maker.root

        qstatq_list = self._extract_queues('queue_info/Queue-List', root)

        total_running_jobs = sum([d['run'] for d in qstatq_list])
        logging.info('Total running jobs found: %s' % total_running_jobs)

        for d in qstatq_list:
            d['run'] = str(d['run'])
            d['queued'] = str(d['queued'])

        total_queued_jobs = self._get_total_queued_jobs('job_info/job_list', root)

        qstatq_list.append({'run': '0', 'queued': total_queued_jobs, 'queue_name': 'Pending', 'state': 'Q', 'lm': '0'})
        logging.debug('qstatq_list contains %s elements' % len(qstatq_list))
        # TODO: check validity. 'state' shouldnt just be 'Q'!
        logging.debug("Closing %s" % self.sge_file)

        return total_running_jobs, int(eval(str(total_queued_jobs))), qstatq_list
Esempio n. 9
0
    def extract_qstatq(self, orig_file):
        """
        reads QSTATQ_ORIG_FN sequentially and returns useful data
        Searches for lines in the following format:
        biomed             --      --    72:00:00   --   31   0 --   E R
        (except for the last line, which contains two sums and is parsed separately)
        """
        try:
            fileutils.check_empty_file(orig_file)
        except fileutils.FileEmptyError:
            all_values = []
        else:
            anonymize = self.anonymize_func()
            queue_search = r'^(?P<queue_name>[\w.-]+)\s+' \
                           r'(?:--|[0-9]+[mgtkp]b[a-z]*)\s+' \
                           r'(?:--|\d+:\d+:?\d*:?)\s+' \
                           r'(?:--|\d+:\d+:?\d+:?)\s+(--)\s+' \
                           r'(?P<run>\d+)\s+' \
                           r'(?P<queued>\d+)\s+' \
                           r'(?P<lm>--|\d+)\s+' \
                           r'(?P<state>[DE] R)'
            run_qd_search = '^\s*(?P<tot_run>\d+)\s+(?P<tot_queued>\d+)'  # this picks up the last line contents

            all_values = list()
            with open(orig_file, 'r') as fin:
                fin.next()
                fin.next()
                # server_name = fin.next().split(': ')[1].strip()
                fin.next()
                fin.next().strip(
                )  # the headers line should later define the keys in temp_dict, should they be different
                fin.next()
                for line in fin:
                    line = line.strip()
                    m = re.search(queue_search, line)
                    n = re.search(run_qd_search, line)
                    temp_dict = {}
                    try:
                        queue_name = m.group(
                            'queue_name'
                        ) if not self.options.ANONYMIZE else anonymize(
                            m.group('queue_name'), 'qs')
                        run, queued, lm, state = m.group('run'), m.group(
                            'queued'), m.group('lm'), m.group('state')
                    except AttributeError:
                        try:
                            total_running_jobs, total_queued_jobs = n.group(
                                'tot_run'), n.group('tot_queued')
                        except AttributeError:
                            continue
                    else:
                        for key, value in [('queue_name', queue_name),
                                           ('run', run), ('queued', queued),
                                           ('lm', lm), ('state', state)]:
                            temp_dict[key] = value
                        all_values.append(temp_dict)
                all_values.append({
                    'Total_running': total_running_jobs,
                    'Total_queued': total_queued_jobs
                })
        finally:
            return all_values