Пример #1
0
    def __init__(self):

        super(proc_daemon, self).__init__()

        ## Constructor time
        self._creation_ts = time.time()

        ## API for proc_daemon is ds_master to interact with ProcessTable
        self._api = ds_master(pubdb_conn_info.admin_info(),
                              logger=self._logger)

        ## Array of proc_action instance (one per project)
        self._project_v = {}

        ## Array of execution timestamp (one per project)
        self._exe_time_v = {}

        ## Array of execution counter (one per project)
        self._exe_ctr_v = {}

        ## Network node name
        self._server = pub_env.kSERVER_NAME

        ## Server configuration ... instantiated within load_daemon
        self._config = None

        ## Log ... instantiated within load_daemon
        self._log = None

        ## Next execution timestamp ... for processing
        self._next_exec_time = time.time()

        ## SIGINT marker
        self._exit_routine = False

        ## Log function
        self._logger_func = None

        ## Server status handler
        self._server_handler = None
Пример #2
0
    def __init__(self):

        super(proc_daemon,self).__init__()

        ## Constructor time
        self._creation_ts = time.time()

        ## API for proc_daemon is ds_master to interact with ProcessTable
        self._api = ds_master(pubdb_conn_info.admin_info(),logger=self._logger)

        ## Array of proc_action instance (one per project)
        self._project_v  = {}

        ## Array of execution timestamp (one per project)
        self._exe_time_v = {}

        ## Array of execution counter (one per project)
        self._exe_ctr_v = {}

        ## Network node name
        self._server = pub_env.kSERVER_NAME

        ## Server configuration ... instantiated within load_daemon
        self._config = None

        ## Log ... instantiated within load_daemon
        self._log = None

        ## Next execution timestamp ... for processing
        self._next_exec_time = time.time()

        ## SIGINT marker
        self._exit_routine = False

        ## Log function
        self._logger_func = None

        ## Server status handler
        self._server_handler = None
Пример #3
0
#!/usr/bin/env python
import sys
from dstream.ds_api import ds_master
from pub_dbi import pubdb_conn_info
from pub_util import pub_logger
import time

if not len(sys.argv) == 2:
    print
    print 'Usage: %s PROJECT_NAME' % sys.argv[0]
    print
    sys.exit(1)

logger = pub_logger.get_logger('ds_master')
k = ds_master(pubdb_conn_info.admin_info(), logger)

if not k.connect():
    sys.exit(1)

k.project_version_update(sys.argv[1])

sys.exit(0)
        if conn.project_exist(p._project):

            status = status and conn.project_version_update(p)

        else:

            status = status and conn.define_project(p)

    return status


if __name__ == '__main__':

    logger = pub_logger.get_logger('register_project')
    # DB interface for altering ProcessTable
    conn=ds_master(pubdb_conn_info.admin_info(),logger)
    
    # Connect to DB
    conn.connect()
    
    if len(sys.argv)<2:
        print 'Usage:',sys.argv[0],'$CONFIG_FILE'
        sys.exit(1)

    c = open(sys.argv[1],'r').read()

    projects = register_project.parse(conn,logger,c)

    register(conn,logger,projects)

Пример #5
0
#!/usr/bin/env python
import argparse, sys
from dstream.ds_api import death_star
from pub_dbi        import pubdb_conn_info
from pub_util       import pub_logger

myparser = argparse.ArgumentParser(description='Remove a run table.')

if not len(sys.argv) == 2:
    print 'Usage: %s TABLE_NAME' % sys.argv[0]
    sys.exit(1)

tname = sys.argv[1]
logger = pub_logger.get_logger('death_star')
k=death_star( pubdb_conn_info.admin_info(),
              logger )

if not k.connect():
    sys.exit(1)

k.end_of_galaxy(tname)

Пример #6
0
myparser = argparse.ArgumentParser(description='Filling a run table w/ new run/subrun')

myparser.add_argument('--name', dest='name', action='store',
                      default='TestRun', type=str,
                      help='Name of a run table to create/alter')

myparser.add_argument('--run', dest='run', action='store',
                      default=0, type=int,
                      help='Run number to be added')

myparser.add_argument('--nsubruns',dest='nsubruns',action='store',
                      default=0, type=int,
                      help='Number of sub-runs to be added')

args = myparser.parse_args()

logger = pub_logger.get_logger('death_star')
k=death_star( pubdb_conn_info.admin_info(),
              logger )

if not k.connect():
    sys.exit(1)

# fake time stamp
ts = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( time.time() ) )

for subrun in xrange(args.nsubruns):
    k.insert_into_death_star(args.name,args.run,subrun,ts,ts)

Пример #7
0
    def process_newruns(self):

        # Attempt to connect DB. If failure, abort
        if not self.connect():
            self.error('Cannot connect to DB! Aborting...')
            return

        # loop through all directories in which data is to be found
        for path_num in xrange(len(self._data_dir)):

            data_path = self._data_dir[path_num]

            self.info('Start access in data directory %s' % data_path)

            run_lim = None
            # if we are not yet at the last directory (for which there should be no run limit)
            if (path_num < len(self._run_bound)):
                run_lim = self._run_bound[path_num]
                self.info('Run limit for this directory: %i' % run_lim)

            # get ALL closed data files in DATADIR
            if (os.path.isdir(data_path) == False):
                self.error('DATA DIR %s does not exist' % data_path)
                return

            self.info('Looking for data files in: %s' % data_path)

            dircontents = os.listdir(data_path)

            # create a dictionary to keep track of
            # - file name ----- NAME
            # - run number ---- RUN
            # - subrun number - SUBRUN
            # - time-create --- TIMEC
            # - time-modify --- TIMEM
            # : dictionary key: ------ (RUN,SUBRUN)
            # : dictionary content: -- (NAME,TIMEC,TIMEM)
            file_info = {}

            for f in dircontents:

                filepath = data_path + '/' + f

                # check that this is a file
                if (os.path.isfile(filepath) == False):
                    continue

                try:

                    time_create = os.path.getctime(filepath)
                    time_modify = os.path.getmtime(filepath)

                    # file format:
                    # NoiseRun-YYYY_M_DD_HH_MM_SS-RUN-SUBRUN.ubdaq
                    run = int(f.replace('.ubdaq', '').split('-')[-2])
                    subrun = int(f.replace('.ubdaq', '').split('-')[-1])
                    #self.info('found run (%i, %i)'%(run,subrun))
                    file_info[tuple(
                        (run, subrun))] = [f, time_create, time_modify]

                except:

                    # if file-name is .ubdaq then we have a problem
                    # were not able to read run-subrun info from file
                    if (f.find('.ubdaq')):
                        self.info(
                            'Could not read RUN/SUBRUN info for file %s' % f)

            # sort the dictionary
            # we want to ignore the largest run/subrun information
            # this will prevent us from potentially logging info
            # for a file that has not yet been closed
            sorted_file_info = sorted(file_info)
            # get tuple with largest run/subrun info found in files
            max_file_info = sorted_file_info[-1]

            # fetch from database the last run/subrun number recorded
            logger = pub_logger.get_logger('register_new_run')
            reader = ds_api.ds_reader(pubdb_conn_info.reader_info(), logger)
            last_recorded_info = reader.get_last_run_subrun(self._runtable)

            # log which (run,subrun) pair was added last
            self.info('last recorded (run,subrun) is (%d,%d)' %
                      (int(last_recorded_info[0]), int(last_recorded_info[1])))
            self.info(
                'No run with (run,subrun) smaller than this will be added to the RunTable'
            )

            # if we made it this far the file info needs to be
            # recorded to the database
            # DANGER *** DANGER *** DANGER *** DANGER
            # we will now invoke the death_start
            # this API will access the RUN table end edit
            # informaton, which is exactly what we need to do
            # however, this is dangerous and you should not
            # copy this code and re-use it somewhere
            # if you do, the Granduca's wrath will be upon you
            # lucikly for you, the Granduca was the first to
            # abolish the death penalty on November 30th 1786
            # http://en.wikipedia.org/wiki/Grand_Duchy_of_Tuscany#Reform
            # However, the imperial army may be less mercyful.
            # DANGER *** DANGER *** DANGER *** DANGER
            logger = pub_logger.get_logger('death_star')
            rundbWriter = ds_api.death_star(pubdb_conn_info.admin_info(),
                                            logger)

            # loop through dictionary keys and write to DB info
            # for runs/subruns not yet stored
            for info in sorted_file_info:

                # this key needs to be larger than the last logged value
                # but less than the last element in the dictionary
                if (info >= max_file_info):
                    continue
                if (run_lim):
                    if (info[0] > run_lim):
                        continue
                if (info <= last_recorded_info):
                    continue

                self.info('Trying to add to RunTable (run,subrun) = (%d,%d)' %
                          (int(info[0]), int(info[1])))

                try:

                    # info is key (run,subrun)
                    # dictionary value @ key is array
                    # [file name, time_crate, time_modify]
                    run = info[0]
                    subrun = info[1]
                    run_info = file_info[info]
                    file_creation = time.gmtime(int(run_info[1]))
                    file_closing = time.gmtime(int(run_info[2]))
                    file_creation = time.strftime('%Y-%m-%d %H:%M:%S',
                                                  file_creation)
                    file_closing = time.strftime('%Y-%m-%d %H:%M:%S',
                                                 file_closing)

                    self.info('filling death star...')
                    # insert into the death start
                    rundbWriter.insert_into_death_star(self._runtable, info[0],
                                                       info[1], file_creation,
                                                       file_closing)
                    # Report starting
                    self.info(
                        'recording info for new run: run=%d, subrun=%d ...' %
                        (int(run), int(subrun)))

                except:

                    # we did not succeed in adding this (run,subrun)
                    self.info('FAILED to add run=%d, subrun=%d to RunTable' %
                              (int(run), int(subrun)))
Пример #8
0
    def process_newruns(self):

        # Attempt to connect DB. If failure, abort
        if not self.connect():
	    self.error('Cannot connect to DB! Aborting...')
            return


        # loop through all directories in which data is to be found
        for path_num in xrange(len(self._data_dir)):

            data_path = self._data_dir[path_num]

            self.info('Start access in data directory %s'%data_path)

            run_lim = None
            # if we are not yet at the last directory (for which there should be no run limit)
            if ( path_num < len(self._run_bound) ):
                run_lim = self._run_bound[path_num]
                self.info('Run limit for this directory: %i'%run_lim)

            # get ALL closed data files in DATADIR
            if (os.path.isdir(data_path) == False):
                self.error('DATA DIR %s does not exist'%data_path)
                return

            self.info('Looking for data files in: %s'%data_path)

            dircontents = os.listdir(data_path)

            # create a dictionary to keep track of
            # - file name ----- NAME
            # - run number ---- RUN
            # - subrun number - SUBRUN
            # - time-create --- TIMEC
            # - time-modify --- TIMEM
            # : dictionary key: ------ (RUN,SUBRUN)
            # : dictionary content: -- (NAME,TIMEC,TIMEM)
            file_info = {}
            
            for f in dircontents:
            
                filepath = data_path+'/'+f

                # check that this is a file
                if (os.path.isfile(filepath) == False):
                    continue
            
                try:
                    
                    time_create  = os.path.getctime(filepath)
                    time_modify = os.path.getmtime(filepath)
                
                    # file format:
                    # NoiseRun-YYYY_M_DD_HH_MM_SS-RUN-SUBRUN.ubdaq
                    run    = int(f.replace('.ubdaq','').split('-')[-2])
                    subrun = int(f.replace('.ubdaq','').split('-')[-1])
                    #self.info('found run (%i, %i)'%(run,subrun))
                    file_info[tuple((run,subrun))] = [f,time_create,time_modify]

                except:
                
                    # if file-name is .ubdaq then we have a problem
                    # were not able to read run-subrun info from file
                    if (f.find('.ubdaq')):
                        self.info('Could not read RUN/SUBRUN info for file %s'%f)

            # sort the dictionary
            # we want to ignore the largest run/subrun information
            # this will prevent us from potentially logging info
            # for a file that has not yet been closed
            sorted_file_info = sorted(file_info)
            # get tuple with largest run/subrun info found in files
            max_file_info = sorted_file_info[-1]
            
            # fetch from database the last run/subrun number recorded
            logger = pub_logger.get_logger('register_new_run')
            reader = ds_api.ds_reader(pubdb_conn_info.reader_info(), logger)
            last_recorded_info = reader.get_last_run_subrun(self._runtable)

            # log which (run,subrun) pair was added last
            self.info('last recorded (run,subrun) is (%d,%d)'%(int(last_recorded_info[0]),int(last_recorded_info[1])))
            self.info('No run with (run,subrun) smaller than this will be added to the RunTable')

            # if we made it this far the file info needs to be
            # recorded to the database
            # DANGER *** DANGER *** DANGER *** DANGER
            # we will now invoke the death_start
            # this API will access the RUN table end edit
            # informaton, which is exactly what we need to do
            # however, this is dangerous and you should not
            # copy this code and re-use it somewhere
            # if you do, the Granduca's wrath will be upon you
            # lucikly for you, the Granduca was the first to
            # abolish the death penalty on November 30th 1786
            # http://en.wikipedia.org/wiki/Grand_Duchy_of_Tuscany#Reform
            # However, the imperial army may be less mercyful.
            # DANGER *** DANGER *** DANGER *** DANGER
            logger = pub_logger.get_logger('death_star')
            rundbWriter = ds_api.death_star(pubdb_conn_info.admin_info(),logger)
            
            # loop through dictionary keys and write to DB info
            # for runs/subruns not yet stored
            for info in sorted_file_info:
                
                # this key needs to be larger than the last logged value
                # but less than the last element in the dictionary
                if (info >= max_file_info):
                    continue;
                if (run_lim):
                    if ( info[0] > run_lim):
                        continue;
                if (info <= last_recorded_info):
                    continue;

                self.info('Trying to add to RunTable (run,subrun) = (%d,%d)'%(int(info[0]),int(info[1])))

                try:

                    # info is key (run,subrun)
                    # dictionary value @ key is array
                    # [file name, time_crate, time_modify]
                    run           = info[0]
                    subrun        = info[1]
                    run_info      = file_info[info]
                    file_creation = time.gmtime(int(run_info[1]))
                    file_closing  = time.gmtime(int(run_info[2]))
                    file_creation = time.strftime('%Y-%m-%d %H:%M:%S',file_creation)
                    file_closing  = time.strftime('%Y-%m-%d %H:%M:%S',file_closing)
                    
                    self.info('filling death star...')
                    # insert into the death start
                    rundbWriter.insert_into_death_star(self._runtable,info[0],info[1],file_creation, file_closing)
                    # Report starting
                    self.info('recording info for new run: run=%d, subrun=%d ...' % (int(run),int(subrun)))

                except:
                    
                    # we did not succeed in adding this (run,subrun)
                    self.info('FAILED to add run=%d, subrun=%d to RunTable'%(int(run),int(subrun)))