Esempio n. 1
0
def main(argv):
    # allow us to run over just one run at a time
    MIN_RUN = -1
    MAX_RUN = 1000000
    parser = OptionParser(usage="process_runlog_files.py [options]")
    parser.add_option("-b",
                      "--min_run",
                      dest="min_run",
                      help="Minimum run number to process")
    parser.add_option("-e",
                      "--max_run",
                      dest="max_run",
                      help="Maximum run number to process")
    (options, args) = parser.parse_args(argv)
    if options.min_run:
        MIN_RUN = int(options.min_run)
    if options.max_run:
        MAX_RUN = int(options.max_run)

    # run over all files
    LOOKBACKTIME = 3 * 60 * 60
    current_time = int(time.time())
    dirs_on_disk = [
        join(RAWDATA_DIR, d) for d in listdir(RAWDATA_DIR)
        if os.path.isdir(join(RAWDATA_DIR, d)) and (
            (current_time -
             os.path.getmtime(join(RAWDATA_DIR, d))) < LOOKBACKTIME)
    ]
    dirs_on_disk += [
        join(RAWDATA2_DIR, d) for d in listdir(RAWDATA2_DIR)
        if os.path.isdir(join(RAWDATA2_DIR, d)) and (
            (current_time -
             os.path.getmtime(join(RAWDATA_DIR, d))) < LOOKBACKTIME)
    ]
    #runs_on_disk = [ int(d[3:]) for d in dirs_on_disk ]
    runs_on_disk = {}
    for d in dirs_on_disk:
        try:
            path = d.split('/')
            run = int(path[-1][3:])
            runs_on_disk[run] = d
        except:
            print "invalid directory = " + d
    #runs_on_disk.sort()

    # Add information to DB
    ## initialize DB
    db = datamon_db()
    for (run, d) in sorted(runs_on_disk.items()):
        if run < MIN_RUN or run > MAX_RUN:
            continue
        print "processing run %s ..." % (run)
        ## add blank run to DB if it doesn't exist
        if (db.GetRunID(run) < 0):
            db.CreateRun(run)

        # get info from logs and EPICS archive and EVIO files
        process_logs(db, run, d)
def main(argv):
    if len(argv) <= 1:
        print_usage()
        return
    cmd = argv[0]
    if cmd not in VALID_COMMANDS:
        print "Invalud command = " + cmd
        print_usage()
        return
        
    if cmd == "add":
        if len(argv) < 2:
            print_usage()
            return
        filename = argv[1]
    elif cmd == "update":
        if len(argv) < 3:
            print_usage()
            return
        try:
            version_id = int(argv[1])
        except ValueError:
            print "Bad version ID = " + argv[1]
            return
        filename = argv[2]

    # set up defaults
    version_properties = init_property_mapping()
    # read data from file
    input_properties = parse_version_file(filename)

    # do some sanity checking
    if input_properties['data_type'] not in VALID_DATA_TYPES:
        print "Invalid data_type specified = " + input_properties['data_type']
        print "Valid data types = " + " ".join(VALID_DATA_TYPES)
        return
    try:
        revision = int(input_properties['revision'])
    except ValueError:
        print "Bad revision value = " + input_properties['revision']
        return


    # build final version info
    for key in version_properties:
        if key in input_properties:
            version_properties[key] = input_properties[key]

    
    # input to database
    db = datamon_db()
    if cmd == "add":
        version_id = db.AddVersionInfo(version_properties)
        print "Your new version number is:  " + str(int(version_id[0]))
    elif cmd == "update":
        db.UpdateVersionInfo(version_id, version_properties)
Esempio n. 3
0
def main(argv):
    if len(argv) <= 1:
        print_usage()
        return
    cmd = argv[0]
    if cmd not in VALID_COMMANDS:
        print "Invalud command = " + cmd
        print_usage()
        return
        
    if cmd == "add":
        if len(argv) < 2:
            print_usage()
            return
        filename = argv[1]
    elif cmd == "update":
        if len(argv) < 3:
            print_usage()
            return
        try:
            version_id = int(argv[1])
        except ValueError:
            print "Bad version ID = " + argv[1]
            return
        filename = argv[2]

    # set up defaults
    version_properties = init_property_mapping()
    # read data from file
    input_properties = parse_version_file(filename)

    # do some sanity checking
    if input_properties['data_type'] not in VALID_DATA_TYPES:
        print "Invalid data_type specified = " + input_properties['data_type']
        print "Valid data types = " + " ".join(VALID_DATA_TYPES)
        return
    try:
        revision = int(input_properties['revision'])
    except ValueError:
        print "Bad revision value = " + input_properties['revision']
        return


    # build final version info
    for key in version_properties:
        if key in input_properties:
            version_properties[key] = input_properties[key]

    
    # input to database
    db = datamon_db()
    if cmd == "add":
        version_id = db.AddVersionInfo(version_properties)
        print "Your new version number is:  " + str(int(version_id[0]))
    elif cmd == "update":
        db.UpdateVersionInfo(version_id, version_properties)
Esempio n. 4
0
    def __init__(self, logfile=""):
        ## initialize DB
        self.db = datamon_db()

        self.BASE_ONLINEMON_DIR = "/work/halld/online_monitoring"
        self.PROCESSED_RUN_LIST_FILE = "processedrun.lst.online"
        self.ONLINE_ROOT_DIR = self.BASE_ONLINEMON_DIR + '/root'
        self.ONLINE_CONDITION_DIR = self.BASE_ONLINEMON_DIR + '/conditions'

        self.MIN_RUN_NUMBER = 30001
        #self.MAX_RUN_NUMBER = 9000
        self.MAX_RUN_NUMBER = 1000000
        self.VERSION_NUMBER  =  88  ## hardcode default - need to change this
        self.MONITORING_OUTPUT_DIR = "/work/halld2/data_monitoring"
        self.RUN_PERIOD = "RunPeriod-2017-01"

        self.MAKE_PLOTS = True
        self.MAKE_DB_SUMMARY = True
        #self.MAKE_PLOTS = False
        #self.MAKE_DB_SUMMARY = False
        #self.MAKE_RUN_CONDITIONS = False
        self.MAKE_RUN_CONDITIONS = True

        self.NEWDIR_MODE = "775"
        self.VERBOSE = 1
        self.FORCE_PROCESSING = False

        # limit ROOT output
        gROOT.ProcessLine("gErrorIgnoreLevel = 2001;")   # only output at least "Error" level messages

        # optionally log the output to a file
        # use separate log files for ROOT and all other output
        # append by default
        self.LOGFILE = logfile
        if self.LOGFILE is not "":
            logging.basicConfig(filename=self.LOGFILE)
            gSystem.RedirectOutput(self.LOGFILE)
Esempio n. 5
0
def main():
    # Define command line options
    parser = OptionParser(
        usage="process_new_offline_data.py input_directory output_directory")
    parser.add_option("-p",
                      "--disable_plots",
                      dest="disable_plotting",
                      action="store_true",
                      help="Don't make PNG files for web display")
    parser.add_option(
        "-d",
        "--disable_summary",
        dest="disable_db_summary",
        action="store_true",
        help="Don't calculate summary information and store it in the DB")
    parser.add_option(
        "-s",
        "--disable_hadd",
        dest="disable_hadd",
        action="store_true",
        help="Don't sum ouptut histograms into one combined file.")
    parser.add_option("-f",
                      "--force",
                      dest="force",
                      action="store_true",
                      help="Ignore list of already processed runs")
    parser.add_option("-R",
                      "--run_number",
                      dest="run_number",
                      help="Process only this particular run number")
    parser.add_option("-V",
                      "--version_number",
                      dest="version_number",
                      help="Save summary results with this DB version ID")
    parser.add_option(
        "-v",
        "--version",
        dest="version_string",
        help=
        "Save summary results with a particular data version, specified using the string \"RunPeriod,Revision\", e.g., \"RunPeriod-2014-10,5\""
    )
    parser.add_option("-b",
                      "--min_run",
                      dest="min_run",
                      help="Minimum run number to process")
    parser.add_option("-e",
                      "--max_run",
                      dest="max_run",
                      help="Maximum run number to process")
    parser.add_option("-L",
                      "--logfile",
                      dest="logfile",
                      help="Base file name to save logs to")
    parser.add_option("-t",
                      "--nthreads",
                      dest="nthreads",
                      help="Number of threads to use")
    parser.add_option("-A",
                      "--parallel",
                      dest="parallel",
                      action="store_true",
                      help="Enable parallel processing.")
    parser.add_option("-S",
                      "--save_rest",
                      dest="save_rest",
                      action="store_true",
                      help="Save REST files to conventional location.")
    parser.add_option(
        "-M",
        "--merge-incrementally",
        dest="merge_increment",
        action="store_true",
        help="Merge ROOT files incrementally and delete old ones.")
    parser.add_option("-E",
                      "--no-end-of-job-processing",
                      dest="noendofjob_processing",
                      action="store_true",
                      help="Disable end of run processing.")
    parser.add_option("--merge-trees",
                      dest="root_trees_to_merge",
                      help="Merge these ROOT trees.")
    parser.add_option("--merge-skims",
                      dest="evio_skims_to_merge",
                      help="Merge these EVIO skims.")
    parser.add_option("--merge-hddms",
                      dest="hddm_files_to_merge",
                      help="Merge these HDDM files.")
    parser.add_option("-T",
                      "--merged-root-output-dir",
                      dest="root_output_dir",
                      help="Directory to save merged ROOT files")
    parser.add_option(
        "-B",
        "--batchfarm-tempdir",
        dest="batch_tempdir",
        action="store_true",
        help=
        "Merge ROOT files in the local directory, as required by current batch farm configuration"
    )

    (options, args) = parser.parse_args(sys.argv)

    if (len(args) < 3):
        parser.print_help()
        sys.exit(0)

    # initialize configuration
    config = ProcessMonDataConfig()
    db = datamon_db()
    config.ProcessCommandline(args, options, db)

    # try to connect to RCDB
    rcdb_conn = None
    try:
        rcdb_conn = rcdb.RCDBProvider("mysql://rcdb@hallddb/rcdb")
    except:
        e = sys.exc_info()[0]
        print "Could not connect to RCDB: " + str(e)

    # Set up directories and any other prep work that needs to be done
    config.BuildEnvironment()

    # Check which runs have been already processed
    rundirs_on_disk = config.BuildListOfProcessedRunsOnDisk()

    # For each run, check to see if there are any ROOT files we haven't processed yet
    # If that is true, then we need to process the run - N.B. most of our outputs
    # depend on the full results from a run
    runs_to_process = []
    for rundir in rundirs_on_disk:
        try:
            runnum = int(rundir)
        except:
            continue

        # handle any options about which runs to process
        # specifying a particular run to process beats specifying a range
        if config.RUN_NUMBER is not None:
            if runnum != config.RUN_NUMBER:
                continue
        else:
            if runnum < config.MIN_RUN or runnum > config.MAX_RUN:
                continue

        if config.VERBOSE > 0:
            logging.info("checking run " + str(runnum))

        ## add blank run to DB if it doesn't exist
        if (db.GetRunID(runnum) < 0):
            db.CreateRun(runnum)
            # skip run if it's not in RCDB for some weird reason
            if rcdb_conn.get_run(runnum) is None:
                continue
            # add run start time, needed for monitoring web pages
            rcdb_run = rcdb_conn.get_run(runnum)
            run_properties = {}
            run_properties['start_time'] = rcdb_run.start_time
            run_properties['num_events'] = rcdb_run.get_condition_value(
                'event_count')
            db.UpdateRunInfo(runnum, run_properties)

        ## make sure we have a directory to store some meta-information
        rootfiles_already_processed = []  # let's not do this anymore
        if config.REVISION == "mc":
            log_dir = join(config.OUTPUT_DIRECTORY, "log", rundir)
        else:
            log_dir = join(config.OUTPUT_DIRECTORY, "log", rundir)
        if isfile(join(log_dir, "processed_files.dat")):
            rootfiles_already_processed = pickle.load(
                open(join(log_dir, "processed_files.dat"), "r"))

        #if config.REVISION == "mc":
        #    misc_dir = join(config.INPUT_DIRECTORY,"misc","%06d"%(int(rundir)))
        #else:
        #    misc_dir = join(config.INPUT_DIRECTORY,config.REVISION,"misc",rundir)
        #rootfiles_already_processed = []
        #if not os.path.exists(misc_dir):
        #    os.system("mkdir -p " + misc_dir)
        #if not os.path.isdir(misc_dir):
        #    logging.error("file %s exists and is not a directory, skipping this run ..."%misc_dir)
        #    continue
        #else:
        #    # check to see if we've already processed some of the ROOT files
        #    if isfile(join(misc_dir,"processed_files.dat")):
        #        rootfiles_already_processed = pickle.load( open(join(misc_dir,"processed_files.dat"),"r") )
        #    #except Exception, e:
        #    #    logging.error("Couldn't load list of processed files: %s"%str(e))

        ## figure out which files for this run are currently on disk
        rootfile_map = config.BuildROOTFileList(rundir)
        if not config.FORCE_PROCESSING and len(rootfile_map) == 0:
            continue

        ## figure out which files are new from the last run
        rootfiles_to_process = [
            f for f in sorted(rootfile_map.keys())
            if f not in rootfiles_already_processed
        ]

        #print "ROOTFILES_ALREADY_PROCESSED = " + str(rootfiles_already_processed)
        #print "ROOTFILE_MAP = " + str(rootfile_map)
        #print "ROOTFILES_TO_PROCESS = " + str(rootfiles_to_process)

        ## if there's new information, or if the user wants us to anyway,
        ## add the run to the list of the ones we should process
        if config.FORCE_PROCESSING or len(rootfiles_to_process) > 0:
            runs_to_process.append((runnum, config, rootfile_map))

    ## loop DONE

    ## Start processing all the runs!
    if config.VERBOSE > 0:
        logging.info("%d runs to process..." % (len(runs_to_process)))
    if len(runs_to_process) == 0:
        sys.exit(0)

    if options.parallel is None:
        # process serially
        for run_args in runs_to_process:
            ProcessOfflineData(run_args)
    else:
        # process in parallel
        p = multiprocessing.Pool(config.NTHREAD)
        p.map(ProcessOfflineData, runs_to_process)

    # save tarballs of log files and PNGs
    if config.EOR_PROCESSING and len(runs_to_process) > 0:
        # save log files
        logdir = join(config.INPUT_SMALLFILE_DIRECTORY, config.REVISION, "log")
        if isdir(logdir):
            os.system("tar cf %s/%s/log.tar %s" %
                      (config.INPUT_DIRECTORY, config.REVISION,
                       logdir))  # overwrite any existing file
        os.system("jcache put %s/%s/log.tar" %
                  (config.INPUT_DIRECTORY, config.REVISION))  # save to tape
        # save IDXA files (for EventStore)
        idxadir = join(config.INPUT_SMALLFILE_DIRECTORY, config.REVISION,
                       "IDXA")
        if isdir(idxadir):
            os.system("tar cf %s/%s/IDXA.tar %s" %
                      (config.INPUT_DIRECTORY, config.REVISION,
                       idxadir))  # overwrite any existing file
        os.system("jcache put %s/%s/IDXA.tar" %
                  (config.INPUT_DIRECTORY, config.REVISION))  # save to tape
        # save web figures
        os.system("tar cf %s/%s/web_figures.tar %s/Run*" %
                  (config.INPUT_DIRECTORY, config.REVISION,
                   config.OUTPUT_DIRECTORY))  # overwrite any existing file
Esempio n. 6
0
def main(argv):
    global RUN_NUMBER,VERSION_NUMBER,FILE_NUMBER,ROOTDIR_PREFIX
    
    init()
    # read in command line args
    parser = OptionParser(usage = "process_monitoring_data.py [options] run_number version_number file.root")
    parser.add_option("-D","--output_dir", dest="output_dir",
                      help="Directory where output files will be stored")
    parser.add_option("-F","--file_number", dest="file_number",
                      help="Specify the file number in the run that we are monitoring")
    parser.add_option("-R","--root_dir", dest="root_dir",
                      help="The base ROOT directory in which the histograms are stored")


    (options, args) = parser.parse_args(argv)

    if(len(args) < 3):
        parser.print_help()
        return 
        #sys.exit(0)


    run_number = int(args[0])
    version_number = int(args[1])
    file_name = str(args[2])

    if(options.file_number):
        FILE_NUMBER = options.file_number
    if(options.output_dir):
        if(os.path.isdir(options.output_dir)):
            OUTPUT_DIRECTORY = options.output_dir
        else:
            print "WARNING: Output directory '"+options.output_dir+"' does not exist, defaulting to current directory..."
    if(options.root_dir):
        ROOTDIR_PREFIX = options.root_dir

    # sanity checks
    if(run_number > 0):
        RUN_NUMBER = run_number
    if(version_number > 0):
        VERSION_NUMBER = version_number

    if(not os.path.isfile(file_name)):
        print " no such file: " + file_name
    if(RUN_NUMBER < 1):
        print " bad run number! = " + str(RUN_NUMBER)
    if(VERSION_NUMBER < 1):
        print " bad version number! = " + str(VERSION_NUMBER)
    if(FILE_NUMBER < 1):
        print " bad file number! = " + str(FILE_NUMBER)

    
    # initializing monitoring DB connection
    mondb = datamon_db()

    # run over data to make some plots
    root_file = TFile(file_name)

    ## calculate number of events??

    # Do subdetector-specific tasks
    try:
        ProcessCDC(mondb, root_file)
    except MySQLdb.Error, e:
        print_mysql_error(e)
    parser = OptionParser(usage="datamon_db_tool.py [options] [command]")
    parser.add_option("-I",
                      "--interactive",
                      dest="interactive",
                      action="store_true",
                      help="Interactive mode")

    (options, args) = parser.parse_args()

    if (len(args) < 1):
        parser.print_help()
        print_commands()
        exit(0)

    ## initialize DB
    db = datamon_db()

    ## figure out what we want to do
    cmd = args[0]
    if (cmd == "initDB"):
        print "initializng database..."
        db.DefineTables()
    elif (cmd == "clearDB"):
        print "clearing DB..."
        if (query_yes_no("Are you REALLY sure?? This is dangerous!!", "no")):
            db.ClearDB()
            ## handcraft something here
            #print "doesn't do anything sorry!"
    elif (cmd == "AddRun"):
        # NEEDS TO UPDATE
        # extract info
Esempio n. 8
0
def main(argv):
    global CANVAS_WIDTH,CANVAS_HEIGHT,OUTPUT_DIRECTORY
    
    init()
    # initialize lists used to store data
    hists_to_plot = []
    hists_to_sum  = []
    macros_to_run = []
    
    # read in command line args
    parser = OptionParser(usage = "make_monitoring_plots.py [options] <list of files to process>")
    parser.add_option("-D","--output_dir", dest="output_dir",
                      help="Directory where output files will be stored")
    parser.add_option("-S","--canvas_size", dest="canvas_size",
                      help="Size of canvas in the format '[width]x[height]', default = 800x600")
    parser.add_option("-f","--file_list", dest="file_list",
                      help="Specify the list of files to process")
    parser.add_option("-H","--histogram_list", dest="hname_list",
                      help="Specify the list of names of histograms to plot")
    parser.add_option("-M","--macro_list", dest="macroname_list",
                      help="Specify the list of macro files to make plots of")
#    parser.add_option("-i","--input_file", dest="input_file",
#                      help="Specify the file name to process.")

    (options, args) = parser.parse_args(argv)

#    if( options.file_list is None and  options.input_file is None ):
#        print "Either an input file (-i) or file list (-f) must be specified!"
#        parser.print_help()
#        exit(0)

#    if( options.file_list is not None and options.input_file is not None ):
#        print "Please specify either -i or -f options, not both!"
#        parser.print_help()
#        exit(0)


    if(options.output_dir):
        if(os.path.isdir(options.output_dir)):
            OUTPUT_DIRECTORY = options.output_dir
        else:
            print "WARNING: Output directory '"+options.output_dir+"' does not exist, defaulting to current directory..."
    if(options.canvas_size):
        (new_width,new_height) = options.canvas_size.split("x")
        if( not new_width or not new_height ):
            print "Invalid canvas sizes specified, using defaults..."
        else:
            CANVAS_WIDTH  = int(new_width)
            CANVAS_HEIGHT = int(new_height)
        
    
    ## build List of files to run over
    file_list = []
    # get files passed in on command line
    for line in args:
        file_name = line.strip()
        if file_name == "":
            continue
        if file_name[0] == '#':
            continue
        if os.path.isfile(file_name):
            file_list.append(file_name)
        else:
            print "input file does not exist: " + file_name
    if(options.file_list):
        f = open(options.file_list)
        for line in f:
            file_name = line.strip()
            if file_name == "":
                continue
            if file_name[0] == '#':
                continue
            if os.path.isfile(file_name):
                file_list.append(file_name)
            else:
                print "input file does not exist: " + file_name
        f.close()

    ## load lists of objects to save
    histf = open(options.hname_list)
    for line in histf:
        hname = line.strip()
        if hname == "":
            continue
        if hname[0] == '#':
            continue        
        hists_to_plot.append(hname)
    histf.close()
    macrof = open(options.macroname_list)
    for line in macrof:
        macroname = line.strip()
        if macroname == "":
            continue
        if macroname[0] == '#':
            continue        
        macros_to_run.append(macroname)
    macrof.close()

    ## sanity checks
    if(len(file_list) == 0):
        print "No input files given!"
        return
        #sys.exit(0)
    if(len(hists_to_plot) == 0):
        print "No histograms to save!"
    if(len(macros_to_run) == 0):
        print "No macros to save!"


    ## we need to sum both the histograms that we plotting by themselves
    ## and the histograms used by the macros we want
    hists_to_sum = hists_to_plot
    if(len(macros_to_run) > 0):
        for macro in macros_to_run:
            new_macros = extract_macro_hists(macro)
            # merge lists without duplicates
            if not new_macros or len(new_macros) == 0:
                continue
            hists_to_sum = list( set(hists_to_sum) | set(new_macros) )

    ## initializing monitoring DB connection
    mondb = datamon_db()

    ## save mapping of  "hname or hnamepath => ROOT histogram object"
    sum_hists = {}
    sum_dir = TMemFile(".monitor_tmp.root","RECREATE")

    ## run over data to make some plots
    for filename in file_list:
        root_file = TFile(filename)
        # should handle bad files better!
        if(root_file is None):
            print "Could not open file: " + filename
            continue
        else:
            print "processing file " + filename + " ..." 
        AccessHistogramsRecursively(root_file, "", sum_hists, sum_dir, hists_to_sum)
        root_file.Close()


    ## finally, make the plots and save them as files
    ClearPad(c1)
    c1.SetCanvasSize(CANVAS_WIDTH,CANVAS_HEIGHT)
    SavePlots(sum_hists, sum_dir, hists_to_plot, macros_to_run)

    ## clean up some memory
    sum_dir.Close()
    del sum_hists
    del hists_to_sum
Esempio n. 9
0
def main(argv):
    # configuration vars
    RUN_PERIOD = "RunPeriod-2014-10"
    CONDITION_FILE_FORMAT = "/work/halld/online_monitoring/conditions/run_conditions%06d.dat"
    # assume that the files are loaded on the cache disk
    RAWDATA_DIR_FORMAT = "/cache/mss/halld/RunPeriod-2014-10/rawdata/Run%06d"

    # read in run number from command line
    try:
        run_number = int(argv[0])
    except:
        print "Need to pass the run number to process as a command line argument!"
        return

    run_properties = init_property_mapping()

    run_conditions = parse_condition_file(CONDITION_FILE_FORMAT % (run_number))
    if run_conditions is None:
        return

    # start extracting saved EPICS values
    #run_number = run_conditions['RUN']     ## check this?
    run_properties['beam_current'] = run_conditions['IBCAD00CRCUR6']
    run_properties['start_time'] = run_conditions['TIME']
    run_properties['solenoid_current'] = run_conditions[
        'HallD-PXI:Data:I_Shunt']

    # figure out which radiator was used
    # save luminosity factor = current * radiator thickness
    amorphous_radiator_position = float(
        run_conditions['hd:radiator:motor.RBV'])
    if fabs(amorphous_radiator_position - 135.948) < RADIATOR_TOLERANCE:
        run_properties['radiator_type'] = '2x10-5 RL'
        run_properties['luminosity'] = 1.7e-5 * float(
            run_properties['beam_current'])
    elif fabs(amorphous_radiator_position - 166.095) < RADIATOR_TOLERANCE:
        run_properties['radiator_type'] = '1x10-4 RL'
        run_properties['luminosity'] = 11.2e-5 * float(
            run_properties['beam_current'])
    elif fabs(amorphous_radiator_position - 196.262) < RADIATOR_TOLERANCE:
        run_properties['radiator_type'] = '3x10-4 RL'
        run_properties['luminosity'] = 22.7e-5 * float(
            run_properties['beam_current'])
    else:
        run_properties['radiator_type'] = 'None'
        #run_properties['luminosity'] = run_properties['beam_current']
        run_properties['luminosity'] = 0.

    # parse EVIO files to extract useful information
    # eventually the DAQ will report this properly?
    rawdata_evio_dir = RAWDATA_DIR_FORMAT % (run_number)
    if os.path.isdir(rawdata_evio_dir):
        filelist = [
            join(rawdata_evio_dir, f) for f in listdir(rawdata_evio_dir)
            if ((f[:10] == "hd_rawdata" or f[:6] == "hd_raw") and (
                f[-5:] == '.evio'))
        ]
        filelist.sort()
        file_properties = ParseEVIOFiles(filelist)
        if len(file_properties) > 0:
            run_properties['num_events'] = file_properties['num_events']
            run_properties['num_files'] = file_properties['num_files']
            run_properties['start_time'] = file_properties['start_time']
            run_properties['end_time'] = file_properties['end_time']

    # pull out target information from the CCDB
    # load CCDB connection
    ccdb_context = InitCCDB()

    # read target index -> name mapping definition in from the CCDB
    target_types = load_target_types(ccdb_context, run_number)

    # make temp file to store CCDB info in
    fconst = NamedTemporaryFile()
    ccdb_context.process_command_line("dump /TARGET/target_parms:" +
                                      str(run_number) + " > " + fconst.name)

    # read in info
    fconst.flush()
    const_lines = fconst.readlines()

    if len(const_lines) < 2:
        print "Problem writing out CCDB constants to file!"
    else:
        # the first line of the output file from CCDB is junk, and our numbers are on the second line
        vals = const_lines[1].split()
        target_index = int(vals[0])
        if target_index in target_types:
            run_properties['target_type'] = target_types[target_index]
        else:
            print "Invalid target index from CCDB = " + str(target_index)
        fconst.close()

    if VERBOSE:
        print "RUN PROPERTIES FOR RUN " + str(run_number)
        print str(run_properties)

    # Add information to DB
    ## initialize DB
    db = datamon_db()
    ## add blank run to DB if it doesn't exist
    if (db.GetRunID(run_number) < 0):
        db.CreateRun(run_number)
    db.UpdateRunInfo(run_number, run_properties)
Esempio n. 10
0
cdc_test_data = [[
    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4,
    5, 6, 7, 8
],
                 [
                     1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9,
                     10, 1, 2, 3, 4, 5, 6, 7, 8
                 ],
                 [
                     1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9,
                     10, 1, 2, 3, 4, 5, 6, 7, 8
                 ],
                 [
                     1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9,
                     10, 1, 2, 3, 4, 5, 6, 7, 8
                 ]]

mondb = datamon_db.datamon_db("test.sqlite")
mondb.DefineTables()

#mondb.db.executemany('INSERT INTO run_info VALUES (?,?,?,?,?)', run_test_data)
#mondb.db.executemany('INSERT INTO sc_hits VALUES (?,?,?, ?,?,?,?,?,?,?,?,?,?, ?,?,?,?,?,?,?,?,?,?, ?,?,?,?,?,?,?,?,?,? )', sc_test_data)

mondb.AddRunInfo(1, 9000, 100, 1, 1)
#print "run  = " + str(mondb.GetRunID(9000))
#print "run  = " + str(mondb.GetRunID(9001))

mondb.AddCDCRawHits(1, 1, 1, 100, cdc_test_data[0])

mondb.CloseDB()
Esempio n. 11
0
                  help="Minimum run number to process")
parser.add_option("-e","--max_run", dest="max_run",
                  help="Maximum run number to process")

(options, args) = parser.parse_args(sys.argv)

if(len(args) < 3):
    parser.print_help()
    sys.exit(0)

REVISION = args[1]
INPUT_DIRECTORY = args[2]
OUTPUT_DIRECTORY = args[3]

# initialize DB
db = datamon_db()

if(options.disable_plotting):
    MAKE_PLOTS = False
if(options.disable_db_summary):
    MAKE_DB_SUMMARY = False
#if(options.disable_run_conditions):
#    MAKE_RUN_CONDITIONS = False
if(options.process_run_conditions):
    MAKE_RUN_CONDITIONS = True
if(options.force):
    FORCE_PROCESSING = True
if(options.run_number):
    try:
        RUN_NUMBER = int(options.run_number)
    except ValueError:
Esempio n. 12
0
def main(argv):
    # configuration vars
    RUN_PERIOD = "RunPeriod-2014-10"
    CONDITION_FILE_FORMAT = "/work/halld/online_monitoring/conditions/run_conditions%06d.dat"
    # assume that the files are loaded on the cache disk
    RAWDATA_DIR_FORMAT = "/cache/mss/halld/RunPeriod-2014-10/rawdata/Run%06d"

    # read in run number from command line
    try:
        run_number = int(argv[0])
    except:
        print "Need to pass the run number to process as a command line argument!"
        return

    run_properties = init_property_mapping()

    run_conditions = parse_condition_file(CONDITION_FILE_FORMAT % (run_number))
    if run_conditions is None:
        return

    # start extracting saved EPICS values
    # run_number = run_conditions['RUN']     ## check this?
    run_properties["beam_current"] = run_conditions["IBCAD00CRCUR6"]
    run_properties["start_time"] = run_conditions["TIME"]
    run_properties["solenoid_current"] = run_conditions["HallD-PXI:Data:I_Shunt"]

    # figure out which radiator was used
    # save luminosity factor = current * radiator thickness
    amorphous_radiator_position = float(run_conditions["hd:radiator:motor.RBV"])
    if fabs(amorphous_radiator_position - 135.948) < RADIATOR_TOLERANCE:
        run_properties["radiator_type"] = "2x10-5 RL"
        run_properties["luminosity"] = 1.7e-5 * float(run_properties["beam_current"])
    elif fabs(amorphous_radiator_position - 166.095) < RADIATOR_TOLERANCE:
        run_properties["radiator_type"] = "1x10-4 RL"
        run_properties["luminosity"] = 11.2e-5 * float(run_properties["beam_current"])
    elif fabs(amorphous_radiator_position - 196.262) < RADIATOR_TOLERANCE:
        run_properties["radiator_type"] = "3x10-4 RL"
        run_properties["luminosity"] = 22.7e-5 * float(run_properties["beam_current"])
    else:
        run_properties["radiator_type"] = "None"
        # run_properties['luminosity'] = run_properties['beam_current']
        run_properties["luminosity"] = 0.0

    # parse EVIO files to extract useful information
    # eventually the DAQ will report this properly?
    rawdata_evio_dir = RAWDATA_DIR_FORMAT % (run_number)
    if os.path.isdir(rawdata_evio_dir):
        filelist = [
            join(rawdata_evio_dir, f)
            for f in listdir(rawdata_evio_dir)
            if ((f[:10] == "hd_rawdata" or f[:6] == "hd_raw") and (f[-5:] == ".evio"))
        ]
        filelist.sort()
        file_properties = ParseEVIOFiles(filelist)
        if len(file_properties) > 0:
            run_properties["num_events"] = file_properties["num_events"]
            run_properties["num_files"] = file_properties["num_files"]
            run_properties["start_time"] = file_properties["start_time"]
            run_properties["end_time"] = file_properties["end_time"]

    # pull out target information from the CCDB
    # load CCDB connection
    ccdb_context = InitCCDB()

    # read target index -> name mapping definition in from the CCDB
    target_types = load_target_types(ccdb_context, run_number)

    # make temp file to store CCDB info in
    fconst = NamedTemporaryFile()
    ccdb_context.process_command_line("dump /TARGET/target_parms:" + str(run_number) + " > " + fconst.name)

    # read in info
    fconst.flush()
    const_lines = fconst.readlines()

    if len(const_lines) < 2:
        print "Problem writing out CCDB constants to file!"
    else:
        # the first line of the output file from CCDB is junk, and our numbers are on the second line
        vals = const_lines[1].split()
        target_index = int(vals[0])
        if target_index in target_types:
            run_properties["target_type"] = target_types[target_index]
        else:
            print "Invalid target index from CCDB = " + str(target_index)
        fconst.close()

    if VERBOSE:
        print "RUN PROPERTIES FOR RUN " + str(run_number)
        print str(run_properties)

    # Add information to DB
    ## initialize DB
    db = datamon_db()
    ## add blank run to DB if it doesn't exist
    if db.GetRunID(run_number) < 0:
        db.CreateRun(run_number)
    db.UpdateRunInfo(run_number, run_properties)