def generateUsageRecords(self, hostname, user_map, project_map): """ Starts the UR generation process. """ self.missing_user_mappings = {} tlp = SlurmBackend(self.state, self.cfg.getConfigValue(SECTION, MAX_DAYS, MAX_DAYS_DEFAULT)) count = 0 while True: log_entry = tlp.getNextLogEntry() if log_entry is None: break # no more log entries ur = self.createUsageRecord(log_entry, hostname, user_map, project_map) if ur is not None: common.writeUr(ur,self.cfg) count = count + 1 # only update state if a entry i written if count > 0: self.state = tlp.end_str logging.info('Total number of UR written = %d' % count)
def generateUsageRecords(self, hostname, user_map, project_map): """ Starts the UR generation process. """ self.missing_user_mappings = {} tlp = SlurmBackend( self.state, self.cfg.getConfigValue(SECTION, MAX_DAYS, MAX_DAYS_DEFAULT)) count = 0 while True: log_entry = tlp.getNextLogEntry() if log_entry is None: break # no more log entries ur = self.createUsageRecord(log_entry, hostname, user_map, project_map) if ur is not None: common.writeUr(ur, self.cfg) count = count + 1 # only update state if a entry i written if count > 0: self.state = tlp.end_str logging.info('Total number of UR written = %d' % count)
def generateUsageRecords(self, hostname, user_map, project_map): """ Starts the UR generation process. """ self.missing_user_mappings = {} # Creates 5 Usage Record object for count in [1,2,3,4,5]: self.state = self.state + 1 # create some data at random... job_id = str(self.state) account_name = 'default' user_name = 'default' submit_time = time.mktime(common.datetimeFromIsoStr('2012-01-01T00:00:00').timetuple()) start_time = time.mktime(common.datetimeFromIsoStr('2012-01-02T01:23:45').timetuple()) end_time = time.mktime(common.datetimeFromIsoStr('2012-01-02T02:34:56').timetuple()) # clean data and create various composite entries from the work load trace fqdn_job_id = hostname + ':' + job_id if self.idtimestamp: record_id_timestamp = re.sub("[-:TZ]","",usagerecord.epoch2isoTime(start_time)) # remove characters record_id = fqdn_job_id + ':' + record_id_timestamp else: record_id = fqdn_job_id if not user_name in user_map.getMapping(): self.missing_user_mappings[user_name] = True vo_info = [] if account_name is not None: mapped_project = project_map.get(account_name) if mapped_project is not None: voi = usagerecord.VOInformation() voi.type = 'lrmsurgen-projectmap' voi.name = mapped_project vo_info = [voi] ## fill in usage record fields ur = usagerecord.UsageRecord() ur.record_id = record_id ur.local_job_id = job_id ur.global_job_id = fqdn_job_id ur.local_user_id = user_name ur.global_user_name = user_map.get(user_name) ur.machine_name = hostname ur.queue = 'default' ur.processors = 1 ur.node_count = 1 ur.host = hostname ur.submit_time = usagerecord.epoch2isoTime(submit_time) ur.start_time = usagerecord.epoch2isoTime(start_time) ur.end_time = usagerecord.epoch2isoTime(end_time) ur.cpu_duration = 90 ur.wall_duration = 100 ur.project_name = account_name ur.vo_info += vo_info common.writeUr(ur,self.cfg)
def generateUsageRecords(self,hostname, user_map, vo_map): """ Starts the UR generation process. """ torque_spool_dir = self.cfg.getConfigValue(SECTION, SPOOL_DIR, DEFAULT_SPOOL_DIR) torque_accounting_dir = os.path.join(torque_spool_dir, 'server_priv', 'accounting') torque_date_today = time.strftime(TORQUE_DATE_FORMAT, time.gmtime()) job_id = self.state_job_id torque_date = self.state_log_file self.missing_user_mappings = {} while True: log_file = os.path.join(torque_accounting_dir, torque_date) tlp = TorqueLogParser(log_file) if job_id is not None: try: tlp.spoolToEntry(job_id) except IOError, e: logging.error('Error spooling log file at %s for date %s to %s (%s)' % (log_file, torque_date, job_id, str(e)) ) job_id = None continue while True: try: log_entry = tlp.getNextLogEntry() except IOError, e: if torque_date == torque_date_today: # todays entry might not exist yet break logging.error('Error reading log file at %s for date %s (%s)' % (log_file, torque_date, str(e))) break if log_entry is None: break # no more log entries job_id = log_entry['jobid'] ur = self.createUsageRecord(log_entry, hostname, user_map, vo_map) common.writeUr(ur,self.cfg) self.state_job_id = job_id self.state_log_file = torque_date common.writeGeneratorState(self) job_id = None
def generateUsageRecords(self, hostname, user_map, vo_map): """ Starts the UR generation process. """ maui_spool_dir = self.cfg.getConfigValue(SECTION, SPOOL_DIR, DEFAULT_SPOOL_DIR) maui_server_host = self.getMauiServer(maui_spool_dir) maui_date_today = time.strftime(MAUI_DATE_FORMAT, time.gmtime()) # set initial job_id job_id = self.state_job_id maui_date = self.state_log_file self.missing_user_mappings = {} while True: log_file = os.path.join(maui_spool_dir, STATS_DIR, maui_date) mlp = MauiLogParser(log_file) if job_id is not None: mlp.spoolToEntry(job_id) while True: try: log_entry = mlp.getNextLogEntry() except IOError: if maui_date == maui_date_today: # todays entry might not exist yet break logging.error('Error opening log file at %s for date %s' % (log_file, maui_date)) break if log_entry is None: break # no more log entries if len(log_entry) != 44: logging.error('Read entry with an invalid number fields:') logging.error(' - File %s contains entry with %i fields. First field: %s' % (log_file, len(log_entry), log_entry[0])) logging.error(' - No usage record will be generated from this line') continue job_id = log_entry[0] if not self.shouldGenerateUR(log_entry, user_map): logging.debug('Job %s: No UR will be generated.' % job_id) continue ur = self.createUsageRecord(log_entry, hostname, user_map, vo_map, maui_server_host) common.writeUr(ur,self.cfg) # write generated state self.state_job_id = ur.job_id self.state_log_file = maui_date common.writeGeneratorState(self) job_id = None if maui_date == maui_date_today: break maui_date = common.getIncrementalDate(maui_date, MAUI_DATE_FORMAT) job_id = None
def generateUsageRecords(self, hostname, user_map, project_map): """ Starts the UR generation process. """ self.missing_user_mappings = {} # Creates 5 Usage Record object for count in [1, 2, 3, 4, 5]: self.state = self.state + 1 # create some data at random... job_id = str(self.state) account_name = 'default' user_name = 'default' submit_time = time.mktime( common.datetimeFromIsoStr('2012-01-01T00:00:00').timetuple()) start_time = time.mktime( common.datetimeFromIsoStr('2012-01-02T01:23:45').timetuple()) end_time = time.mktime( common.datetimeFromIsoStr('2012-01-02T02:34:56').timetuple()) # clean data and create various composite entries from the work load trace fqdn_job_id = hostname + ':' + job_id if self.idtimestamp: record_id_timestamp = re.sub( "[-:TZ]", "", usagerecord.epoch2isoTime(start_time)) # remove characters record_id = fqdn_job_id + ':' + record_id_timestamp else: record_id = fqdn_job_id if not user_name in user_map.getMapping(): self.missing_user_mappings[user_name] = True vo_info = [] if account_name is not None: mapped_project = project_map.get(account_name) if mapped_project is not None: voi = usagerecord.VOInformation() voi.type = 'lrmsurgen-projectmap' voi.name = mapped_project vo_info = [voi] ## fill in usage record fields ur = usagerecord.UsageRecord() ur.record_id = record_id ur.local_job_id = job_id ur.global_job_id = fqdn_job_id ur.local_user_id = user_name ur.global_user_name = user_map.get(user_name) ur.machine_name = hostname ur.queue = 'default' ur.processors = 1 ur.node_count = 1 ur.host = hostname ur.submit_time = usagerecord.epoch2isoTime(submit_time) ur.start_time = usagerecord.epoch2isoTime(start_time) ur.end_time = usagerecord.epoch2isoTime(end_time) ur.cpu_duration = 90 ur.wall_duration = 100 ur.project_name = account_name ur.vo_info += vo_info common.writeUr(ur, self.cfg)