def __init__ (self, project, command='', run=0, subrun=0, email='', server='', runtable='', sleep=0, period=0, enable=True, resource={}, ver=-1): if not resource: resource = {} try: if not type(resource) == type(dict()): raise ValueError self._project = str(project) self._command = str(command) self._run = int(run) self._subrun = int(subrun) self._email = str(email) self._server = str(server) self._sleep = int(sleep) self._runtable = str(runtable) self._period = int(period) self._enable = bool(enable) self._resource = copy.copy(resource) self._ver = int(ver) if self._run < 0 or self._subrun < 0 or self._sleep < 0 or self._period < 0: raise ValueError except ValueError: name = '%s' % inspect.stack()[1][3] pub_logger.get_logger(name).critical('Invalid value type!') raise DSException()
def __init__(self,logger_name=None): ## Attach a logger for each instance with a default name = class name if not logger_name: self._logger = pub_logger.get_logger(self.__class__.__name__) else: self._logger = pub_logger.get_logger(logger_name) # Import message functions from logger ## @brief pub_logger.debug function. # @details Takes a string as an input for debug message self.debug = self._logger.debug ## @brief pub_logger.info function. # @details Takes a string as an input for info message self.info = self._logger.info ## @brief pub_logger.warning function. # @details Takes a string as an input for warning message self.warning = self._logger.warning ## @brief pub_logger.error function. # @details Takes a string as an input for error message self.error = self._logger.error ## @brief pub_logger.critical function. # @details Takes a string as an input for critical message self.critical = self._logger.critical
def __init__(self, conn_info = None, logger = None): if not conn_info : conn_info = self.__class__._conn_info self._cursor = None self._conn_info = conn_info self._logger = logger self._n_retrial = 10 self._sleep = 10 if not self._logger: self._logger = pub_logger.get_logger(self.__class__.__name__) elif not isinstance(logger,logging.Logger): pub_logger.get_logger('pubdb').critical('Invalid logger!') raise DBException() self.debug = self._logger.debug self.info = self._logger.info self.warning = self._logger.warning self.error = self._logger.error self.critical = self._logger.critical
def __init__ (self, server, max_proj_ctr = 10, lifetime = 3600*24, log_lifetime = 3600*24, runsync_time = 120, update_time = 60, cleanup_time = 120, email = '', enable = True): try: self._server = str(server) self._max_proj_ctr = int(max_proj_ctr) self._lifetime = int(lifetime) self._log_lifetime = int(log_lifetime) self._runsync_time = int(runsync_time) self._update_time = int(update_time) self._cleanup_time = int(cleanup_time) self._email = str(email) self._enable = bool(enable) if self._lifetime < 0 or self._log_lifetime < 0: raise ValueError except ValueError: name = '%s' % inspect.stack()[1][3] pub_logger.get_logger(name).critical('Invalid value type!') raise DSException()
def __eq__(self,other): if isinstance(other,pubdb_conn_info): issame = ( self._host == other._host and self._port == other._port and self._db == other._db and self._user == other._user and self._ntrial == other._ntrial and self._sleep == other._sleep and self._role == other._role) if issame and not self._passwd == other._passwd: issame = False pub_logger.get_logger('pubdb').exception('Same configuration but different password!') raise DBException() return issame else: pub_logger.get_logger('pubdb').exception('Invalid type comparison!') raise DBException()
def log(self, proj_ctr, uptime, log, logtime=None,max_proj_ctr=None,lifetime=None): try: self._proj_ctr = int(proj_ctr) self._uptime = int(uptime) self._log = dict(log) if not max_proj_ctr is None: self._max_proj_ctr = int(max_proj_ctr) if not lifetime is None: self._lifetime = int(lifetime) if not logtime is None: self._logtime = float(logtime) except ValueError: name = '%s' % inspect.stack()[1][3] pub_logger.get_logger(name).critical('Invalid value type!') raise DSException()
def __init__ (self, project = '', run = -1, subrun = -1, seq = -1, status = -1, enable = True, data = ''): try: self._project = str(project) self._run = int(run) self._subrun = int(subrun) self._seq = int(seq) self._status = int(status) self._enable = int(enable) self._data = str(data) except ValueError: name = '%s' % inspect.stack()[1][3] pub_logger.get_logger(name).critical('Invalid value type!') self._project = '' self._run = self._subrun = self._seq = self._status = -1 raise DSException()
def __init__(self, conn_info=None, logger=None): if not conn_info: conn_info = self.__class__._conn_info self._cursor = None self._conn_info = conn_info self._logger = logger self._n_retrial = 10 self._sleep = 10 if not self._logger: self._logger = pub_logger.get_logger(self.__class__.__name__) elif not isinstance(logger, logging.Logger): pub_logger.get_logger('pubdb').critical('Invalid logger!') raise DBException() self.debug = self._logger.debug self.info = self._logger.info self.warning = self._logger.warning self.error = self._logger.error self.critical = self._logger.critical
def __init__(self, project_info, logger=None): self._logger = logger if not self._logger: self._logger = pub_logger.get_logger(self.__class__.__name__) ## Project information self._info = None ## Process handle self._proc = None ## Boolean for running process self._running = False self.set_info(project_info)
def __init__(self, project_info,logger=None): self._logger = logger if not self._logger: self._logger = pub_logger.get_logger(self.__class__.__name__) ## Project information self._info = None ## Process handle self._proc = None ## Boolean for running process self._running = False self.set_info(project_info)
#!/usr/bin/env python from pub_util import pub_logger, pub_env from pub_dbi import pubdb_conn_info from dstream import ds_daemon from dstream.ds_api import ds_master import os, sys logger = pub_logger.get_logger('register_daemon') # DB interface for altering ProcessTable conn = ds_master(pubdb_conn_info.admin_info(), logger) # Connect to DB conn.connect() def parse(contents): new_contents = [] daemon_v = [] ctr = 0 for line in contents.split('\n'): ctr += 1 tmpline = line.strip(' ') tmpline = tmpline.rstrip(' ') if tmpline.startswith('#'): continue if tmpline.find('#') >= 0: tmpline = tmpline[0:tmpline.find('#')] if not tmpline: continue
for p in projects: if conn.project_exist(p._project): status = status and conn.project_version_update(p) else: status = status and conn.define_project(p) return status if __name__ == '__main__': logger = pub_logger.get_logger('register_project') # DB interface for altering ProcessTable conn=ds_master(pubdb_conn_info.admin_info(),logger) # Connect to DB conn.connect() if len(sys.argv)<2: print 'Usage:',sys.argv[0],'$CONFIG_FILE' sys.exit(1) c = open(sys.argv[1],'r').read() projects = register_project.parse(conn,logger,c) register(conn,logger,projects)
#!/usr/bin/env python # python import import sys, os # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('cfg_dump_project') if not len(sys.argv) in [2, 3]: logger.error('Invalid argument. Usage: %s OUTPUT_FILENAME [SERVER]' % sys.argv[0]) sys.exit(1) out_file = sys.argv[1] if os.path.isfile(out_file) or os.path.isdir(out_file): logger.error('File/Dir already exists: %s' % out_file) sys.exit(1) # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() project_info_v = [] for p in k.list_all_projects():
#!/usr/bin/env python # python import import sys # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('list_log') # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() # Define a project projects = k.list_daemon_log() if not projects: print 'No project found... aborting!' print sys.exit(1) for x in projects: msg = x.dump_log() if msg: print msg
dest='email', action='store', default='', type=str, help='contact email address') myparser.add_argument('--enable', dest='enable', action='store', default='DEFAULT', type=str, help='enable this project') args = myparser.parse_args() logger = pub_logger.get_logger('update_project') # DB interface for altering ProcessTable k = ds_master(pubdb_conn_info.writer_info(), logger) # Connect to DB k.connect() if not k.project_exist(args.name): logger.critical('Project %s does not exist!' % args.name) sys.exit(1) orig_info = k.project_info(args.name) if args.cmd: orig_info._command = args.cmd
#!/usr/bin/env python # python import import sys, os # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('cfg_dump_daemon') if not len(sys.argv) in [2, 3]: logger.error('Invalid argument. Usage: %s OUTPUT_FILENAME [SERVER]' % sys.argv[0]) sys.exit(1) out_file = sys.argv[1] if os.path.isfile(out_file) or os.path.isdir(out_file): logger.error('File/Dir already exists: %s' % out_file) sys.exit(1) # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() daemon_info_v = [] if len(sys.argv) == 3: info = k.daemon_info(sys.argv[2])
#!/usr/bin/env python # python import import sys # dstream import from dstream.ds_api import ds_master # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('define_project') if not len(sys.argv) == 2: logger.error('Usage: %s $PROJECT_NAME' % sys.argv[0]) sys.exit(1) # DB interface for altering ProcessTable k=ds_master(pubdb_conn_info.admin_info(), logger) # Connect to DB k.connect() # Define a project k.remove_project(sys.argv[1]) sys.exit(0)
#!/usr/bin/env python # python import import sys # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('list_daemon') # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() # Define a project daemons = k.list_daemon() if not daemons: print 'No daemon found... aborting!' print sys.exit(1) for x in daemons: print print x
def __init__(self): self._logger = pub_logger.get_logger('pubdb') self._data = None
#!/usr/bin/env python # python import import sys # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('list_all_projects') # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() # Define a project projects = k.list_all_projects() if not projects: print 'No project found... aborting!' print sys.exit(1) for x in projects: print print x
def process_newruns(self): # Attempt to connect DB. If failure, abort if not self.connect(): self.error('Cannot connect to DB! Aborting...') return # loop through all directories in which data is to be found for path_num in xrange(len(self._data_dir)): data_path = self._data_dir[path_num] self.info('Start access in data directory %s' % data_path) run_lim = None # if we are not yet at the last directory (for which there should be no run limit) if (path_num < len(self._run_bound)): run_lim = self._run_bound[path_num] self.info('Run limit for this directory: %i' % run_lim) # get ALL closed data files in DATADIR if (os.path.isdir(data_path) == False): self.error('DATA DIR %s does not exist' % data_path) return self.info('Looking for data files in: %s' % data_path) dircontents = os.listdir(data_path) # create a dictionary to keep track of # - file name ----- NAME # - run number ---- RUN # - subrun number - SUBRUN # - time-create --- TIMEC # - time-modify --- TIMEM # : dictionary key: ------ (RUN,SUBRUN) # : dictionary content: -- (NAME,TIMEC,TIMEM) file_info = {} for f in dircontents: filepath = data_path + '/' + f # check that this is a file if (os.path.isfile(filepath) == False): continue try: time_create = os.path.getctime(filepath) time_modify = os.path.getmtime(filepath) # file format: # NoiseRun-YYYY_M_DD_HH_MM_SS-RUN-SUBRUN.ubdaq run = int(f.replace('.ubdaq', '').split('-')[-2]) subrun = int(f.replace('.ubdaq', '').split('-')[-1]) #self.info('found run (%i, %i)'%(run,subrun)) file_info[tuple( (run, subrun))] = [f, time_create, time_modify] except: # if file-name is .ubdaq then we have a problem # were not able to read run-subrun info from file if (f.find('.ubdaq')): self.info( 'Could not read RUN/SUBRUN info for file %s' % f) # sort the dictionary # we want to ignore the largest run/subrun information # this will prevent us from potentially logging info # for a file that has not yet been closed sorted_file_info = sorted(file_info) # get tuple with largest run/subrun info found in files max_file_info = sorted_file_info[-1] # fetch from database the last run/subrun number recorded logger = pub_logger.get_logger('register_new_run') reader = ds_api.ds_reader(pubdb_conn_info.reader_info(), logger) last_recorded_info = reader.get_last_run_subrun(self._runtable) # log which (run,subrun) pair was added last self.info('last recorded (run,subrun) is (%d,%d)' % (int(last_recorded_info[0]), int(last_recorded_info[1]))) self.info( 'No run with (run,subrun) smaller than this will be added to the RunTable' ) # if we made it this far the file info needs to be # recorded to the database # DANGER *** DANGER *** DANGER *** DANGER # we will now invoke the death_start # this API will access the RUN table end edit # informaton, which is exactly what we need to do # however, this is dangerous and you should not # copy this code and re-use it somewhere # if you do, the Granduca's wrath will be upon you # lucikly for you, the Granduca was the first to # abolish the death penalty on November 30th 1786 # http://en.wikipedia.org/wiki/Grand_Duchy_of_Tuscany#Reform # However, the imperial army may be less mercyful. # DANGER *** DANGER *** DANGER *** DANGER logger = pub_logger.get_logger('death_star') rundbWriter = ds_api.death_star(pubdb_conn_info.admin_info(), logger) # loop through dictionary keys and write to DB info # for runs/subruns not yet stored for info in sorted_file_info: # this key needs to be larger than the last logged value # but less than the last element in the dictionary if (info >= max_file_info): continue if (run_lim): if (info[0] > run_lim): continue if (info <= last_recorded_info): continue self.info('Trying to add to RunTable (run,subrun) = (%d,%d)' % (int(info[0]), int(info[1]))) try: # info is key (run,subrun) # dictionary value @ key is array # [file name, time_crate, time_modify] run = info[0] subrun = info[1] run_info = file_info[info] file_creation = time.gmtime(int(run_info[1])) file_closing = time.gmtime(int(run_info[2])) file_creation = time.strftime('%Y-%m-%d %H:%M:%S', file_creation) file_closing = time.strftime('%Y-%m-%d %H:%M:%S', file_closing) self.info('filling death star...') # insert into the death start rundbWriter.insert_into_death_star(self._runtable, info[0], info[1], file_creation, file_closing) # Report starting self.info( 'recording info for new run: run=%d, subrun=%d ...' % (int(run), int(subrun))) except: # we did not succeed in adding this (run,subrun) self.info('FAILED to add run=%d, subrun=%d to RunTable' % (int(run), int(subrun)))
myparser = argparse.ArgumentParser(description='Filling a run table w/ new run/subrun') myparser.add_argument('--name', dest='name', action='store', default='TestRun', type=str, help='Name of a run table to create/alter') myparser.add_argument('--run', dest='run', action='store', default=0, type=int, help='Run number to be added') myparser.add_argument('--nsubruns',dest='nsubruns',action='store', default=0, type=int, help='Number of sub-runs to be added') args = myparser.parse_args() logger = pub_logger.get_logger('death_star') k=death_star( pubdb_conn_info.admin_info(), logger ) if not k.connect(): sys.exit(1) # fake time stamp ts = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( time.time() ) ) for subrun in xrange(args.nsubruns): k.insert_into_death_star(args.name,args.run,subrun,ts,ts)
#!/usr/bin/env python # python import import argparse, sys # dstream import from dstream.ds_api import ds_master # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('update_daemon') check = True if not len(sys.argv) == 3 and not (len(sys.argv) == 4 and sys.argv[3] in ['0', '1']): logger.error('Invalid argument. Usage: %s PROJECT_NAME VALUE [0|1]' % sys.argv[0]) sys.exit(1) daemon = sys.argv[1] value = sys.argv[2] if len(sys.argv) == 4: check = bool(int(sys.argv[3])) # DB interface for altering ProcessTable k = ds_master(pubdb_conn_info.writer_info(), logger) # Connect to DB k.connect() if not k.daemon_exist(daemon):
class pubdb_conn(object): _conn_v = [] _conn_info_v = [] _logger = pub_logger.get_logger(__name__) debug = _logger.debug info = _logger.info warning = _logger.warning error = _logger.error critical = _logger.critical def __init__(self): self.warning('%s instance created by %s ' % (self.__class__, inspect.stack()[1][3])) self.warning('This is not needed as this is a factory class!') @classmethod def _check_conn_info_type(cls, conn_info): if not isinstance(conn_info, pubdb_conn_info): cls.critical('Input conn_info: %s not pubdb_conn_info type!' % conn_info) raise DBException() @classmethod def check_conn_info_exist(cls, conn_info, throw=False): cls._check_conn_info_type(conn_info) conn_index = -1 for x in xrange(len(cls._conn_info_v)): if cls._conn_info_v[x] == conn_info: conn_index = x break if throw and conn_index < 0: cls.critical('Never existed connection: (%s,%s,%s,%s,XXX)' % (conn_info._host, conn_info._port, conn_info._db, conn_info._user)) raise DBException('Invalid conn string provided') return conn_index @classmethod def close(cls, conn_info): if cls.closed(conn_info): return True conn_index = cls.check_conn_info_exist(conn_info, True) if conn_index < 0: return False else: cls._conn_v[conn_index].close() return bool(cls._conn_v[conn_index].closed) @classmethod def closed(cls, conn_info): conn_index = cls.check_conn_info_exist(conn_info, True) return cls._conn_v[conn_index].closed @classmethod def is_connected(cls, conn_info): if cls.closed(conn_info): return False conn_index = cls.check_conn_info_exist(conn_info, True) valid_conn = False try: c = cls._conn_v[conn_index].cursor() c.execute('SELECT 1;') valid_conn = bool(c.rowcount) c.close() except Exception as e: pass return valid_conn @classmethod def _connect(cls, conn_info): conn_index = cls.check_conn_info_exist(conn_info) if conn_index >= 0 and cls.is_connected(conn_info): return conn_index now_str = time.strftime('%Y-%m-%d %H:%M:%S') try: if conn_info._port: conn = psycopg2.connect(host=conn_info._host, port=conn_info._port, database=conn_info._db, user=conn_info._user, password=conn_info._passwd) else: conn = psycopg2.connect(host=conn_info._host, database=conn_info._db, user=conn_info._user, password=conn_info._passwd) cls.info('Connected to DB: (%s,%s,%s,%s,XXX) @ %s' % (conn_info._host, conn_info._port, conn_info._db, conn_info._user, now_str)) if conn_info._role: cursor = conn.cursor() try: cls.info('Setting ROLE = %s' % conn_info._role) cursor.execute('SET ROLE %s;' % conn_info._role) cursor.close() del cursor except psycopg2.ProgrammingError as e: cls.error(e.pgerror) cursor.close() cls.close(conn_info) conn.close() return conn_index if conn_index < 0: conn_index = len(cls._conn_v) cls._conn_v.append(conn) cls._conn_info_v.append(copy.copy(conn_info)) else: cls._conn_v[conn_index] = conn except psycopg2.OperationalError as e: cls.error('Connection failed (%s,%s,%s,%s,XXX) @ %s ' % (conn_info._host, conn_info._port, conn_info._db, conn_info._user, now_str)) return conn_index @classmethod def connect(cls, conn_info): conn_index = cls._connect(conn_info) connected = bool(conn_index >= 0 and cls.is_connected(conn_info)) ctr = conn_info._ntrial while not connected and ctr > 0: time.sleep(conn_info._sleep) conn_index = cls._connect(conn_info) connected = bool(conn_index >= 0 and cls.is_connected(conn_info)) ctr -= 1 return connected @classmethod def reconnect(cls, conn_info): if not cls.close(conn_info): raise Exception() return cls.connect(conn_info) @classmethod def commit(cls, conn_info): if not cls.is_connected(conn_info): return False conn_index = cls.check_conn_info_exist(conn_info, True) cls._conn_v[conn_index].commit() return True @classmethod def cursor(cls, conn_info): if not cls.connect(conn_info): return None conn_index = cls.check_conn_info_exist(conn_info, True) return cls._conn_v[conn_index].cursor()
#!/usr/bin/env python import sys from dstream.ds_api import ds_master from pub_dbi import pubdb_conn_info from pub_util import pub_logger import time if not len(sys.argv) == 2: print print 'Usage: %s PROJECT_NAME' % sys.argv[0] print sys.exit(1) logger = pub_logger.get_logger('ds_master') k = ds_master(pubdb_conn_info.admin_info(), logger) if not k.connect(): sys.exit(1) k.project_version_update(sys.argv[1]) sys.exit(0)
print "" print "Usage: correct_file_status.py <table> <old_status> <new_status> <run> {subrun}" print "" print "The subrun is optional and if not specified indicates all subruns in the run." print "EXITING WITH STATUS = 1" sys.exit(1) table = sys.argv[1] old_status = int(sys.argv[2]) new_status = int(sys.argv[3]) run = int(sys.argv[4]) subrun = -1 if len(sys.argv) == 6: subrun = int(sys.argv[5]) logger = pub_logger.get_logger('table') reader = ds_reader(pubdb_conn_info.reader_info(), logger) writer = ds_writer(pubdb_conn_info.writer_info(), logger) if not reader.project_exist(table): print 'The table you gave me does not exist: %s' % table for x in reader.get_runs(table, old_status): if run == x[0]: if subrun == -1: logger.info( 'In table %s, changing status of run %d, subrun %d from old_status=%d to new_status=%d' % (table, int(x[0]), int(x[1]), old_status, new_status)) updated_status = ds_status(project=table,
#!/usr/bin/env python # python import import argparse, sys # dstream import from dstream.ds_api import ds_master # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('update_daemon') check = True if not len(sys.argv) == 3 and not (len(sys.argv)==4 and sys.argv[3] in ['0','1']): logger.error('Invalid argument. Usage: %s PROJECT_NAME VALUE [0|1]' % sys.argv[0]) sys.exit(1) daemon = sys.argv[1] value = sys.argv[2] if len(sys.argv) == 4: check = bool(int(sys.argv[3])) # DB interface for altering ProcessTable k=ds_master(pubdb_conn_info.writer_info(), logger) # Connect to DB k.connect() if not k.daemon_exist(daemon): logger.error('Daemon %s does not exist!' % daemon) sys.exit(1)
#!/usr/bin/env python # python import import sys # dstream import from dstream.ds_api import ds_reader # pub_util import from pub_util import pub_logger # pub_dbi import from pub_dbi import pubdb_conn_info logger = pub_logger.get_logger('list_project') # DB interface for altering ProcessTable k = ds_reader(pubdb_conn_info.reader_info(), logger) # Connect to DB k.connect() # Define a project projects = k.list_projects() if not projects: print 'No project found... aborting!' print sys.exit(1) for x in projects: print print x