def boundary_plots(args): nactions = sum([args.add, args.remove, args.update]) if nactions > 1: fatal('Cannot add, remove, or update boundary plots simultaneously') run = args.boundary_run plot = args.plot_name # Print boundary plots if given a boundary and no actions if not run: return pjson(ret_dict(failure='Boundary plot not specified')) # Print list of plots defined on the boundary if no plot name and no # actions are given if run and not plot and nactions == 0: with ReferenceDatabase(Config().reference_db_path) as db: pjson(find_boundary_plots(db, run)) return # Everything past this point requires a plot name and an action if not plot: return pjson(ret_dict(failure='Plot name not specified')) if nactions == 0: return pjson(ret_dict(failure='No action specified, e.g. --add')) with ReferenceDatabase(Config().reference_db_path) as db: up, down = args.up_run, args.down_run if args.add: pjson(add_boundary_plot(db, run, plot, up, down)) elif args.remove: pjson(remove_boundary_plot(db, run, plot)) elif args.update: pjson(update_boundary_plot(db, run, plot, up, down))
def main(): parser = create_parser() args = parser.parse_args() runs = frozenset(args.runs) run_data_dir = args.run_data_dir output_dir = args.output_dir if not output_dir: output_dir = run_file_path(args.runs[0]) # Change the run data directory to the user-specified one Config().run_data_dir = run_data_dir configWrapper = AnalysisConfigWrapper(Config().analysis_config) # Create the DB if it doesn't exist already file = Config().dq_db_file_path if os.path.isfile(file): db = DQDB(Config().dq_db_file_path) existing_runs = frozenset(db.get_runs()) db.close() else: db = DQDB(file, 'CREATE') db.close() existing_runs = frozenset() runs_to_process = sorted(list(runs - existing_runs)) for run in runs_to_process: combinerTrunk = configWrapper.getTrunkForRun(run) combinerTrunk.evaluate() combinerTrunk.write_to_db()
def setUp(self): """Use a temporary run directory for the tests.""" self.old_rdd = Config().run_data_dir Config().run_data_dir = tempfile.mkdtemp() with open(Config().processed_run_list_path, "w") as f: for r in RUNS: f.write("{0}\n".format(r))
def test_property_values_persist_across_instances_on_init(self): """Multiple instances of Config should have equal property values.""" key = self.test_key value = self.test_value # Instantiate the first Config with a custom property value Config(**{key: value}) # Instantiate a second instance, which should have the custom value self.assertEqual(getattr(Config(), key), value)
def test_property_values_persist_across_instances_on_assignment(self): """Multiple instances of Config should have equal property values.""" key = self.test_key value = self.test_value # Instantiate the first Config, then set the custom value as a property c1 = Config() setattr(c1, key, value) # Instantiate a second instance, which should have the custom value self.assertEqual(getattr(Config(), key), value)
def veloview_configuration(param, run_data_dir): from veloview.runview import utils # Change the run data directory to the user-specified one Config().run_data_dir = run_data_dir if param == "run_view_config": sys.stdout.write(json.dumps(Config().run_view_pages)) elif param == "run_list": sys.stdout.write("\n".join([str(r) for r in utils.run_list()]))
def setUp(self): """Create dictionaries needed by combiners.""" dirname = os.path.join(os.path.dirname(__file__), 'fixtures') orfdata = TFile( os.path.abspath(os.path.join(dirname, 'dqm_data.root')), 'read') orfref = TFile(os.path.abspath(os.path.join(dirname, 'dqm_ref.root')), 'read') # valid ROOT files assert (not orfdata.IsZombie()) assert (not orfref.IsZombie()) self.tdir = tempfile.mkdtemp() self.rfdata = TFile(os.path.join(self.tdir, 'fdata.root'), 'recreate') self.rfref = TFile(os.path.join(self.tdir, 'fref.root'), 'recreate') hist_recipes = [ (get_avg_trend, 'Vetra/NoiseMon/ADCCMSuppressed', ('RMSNoise_vs_ChipChannel', 'AvgRMSNoise_trend')), (get_avg_hist, 'Vetra/NoiseMon/ADCCMSuppressed', ('RMSNoise_vs_ChipChannel', 'AvgRMSNoise_all')), (get_avg_hist, 'Vetra/NoiseMon/ADCCMSuppressed', ('RMSNoise_vs_ChipChannel', 'AvgRMSNoise_R', 'r')), (get_avg_hist, 'Vetra/NoiseMon/ADCCMSuppressed', ('RMSNoise_vs_ChipChannel', 'AvgRMSNoise_Phi', 'p')), # (get_avg_hist, 'Vetra/VeloPedestalSubtractorMoni', # ('Ped_Sub_ADCs_Profile', 'Ped_Sub_ADCs_all')) ] # histograms: make, save, and cleanup for recipe in hist_recipes: href = recipe[0](orfref.GetDirectory(recipe[1]), *recipe[2]) self.rfref.WriteTObject(href) del href hdata = recipe[0](orfdata.GetDirectory(recipe[1]), *recipe[2]) self.rfdata.WriteTObject(hdata) del hdata self.rfref.Close() self.rfdata.Close() # Write DQ database to temp directory (rather than run directory) Config().dq_db_file_path = os.path.join(self.tdir, Config().dq_db) configfile = os.path.join(os.path.dirname(__file__), 'analysis_config_test.py') with open(configfile, 'r') as inputFile: exec(inputFile.read()) config = AnalysisConfigWrapper( (analysis_config_branches, analysis_config_leaves)) self.mycombiner = config.getTrunk(orfdata.GetName(), orfref.GetName()) # Results to compare against self.results = {"score": Score(70.62594356001006), "lvl": ERROR}
def setUp(self): """Check that the value of test configuration key isn't the test value. If this isn't true, methods that test changing configuration values will always pass. """ self.assertNotEqual(getattr(Config(), self.test_key), self.test_value)
def test_all_default_properties_are_available(self): """All keys on DEFAULTS should be properties on Config instances.""" c = Config() for key in DEFAULTS: try: getattr(c, key) except: self.fail('Could not retrieve `{0}` from Config'.format(key))
def test_can_instantiate(self): """Config should instantiate within errors or warnings.""" c = Config() # Duck typing: should be able to access a key of DEFAULT as a property try: self.assertIsNotNone(getattr(c, self.test_key)) except AttributeError: self.fail('Could not access DEFAULTS members on Config()')
def test_default_properties_set_correctly(self): """The values in DEFAULTS should be correctly set on Config. This assumes that nothing else in VeloAnalysisFramework will modify Config. """ c = Config() for key, value in DEFAULTS.iteritems(): self.assertEqual(getattr(c, key), value)
def test_can_set_property_value_on_instantiation(self): """Should be able to override DEFAULTS with keyword arguments.""" # Get some key to test with key = self.test_key value = 'MyTestValue' # Ensure the test value isn't the default value (sanity check) self.assertNotEqual(value, DEFAULTS[key]) # Instantiate the Config object with the custom property value c = Config(**{key: value}) self.assertEqual(getattr(c, key), value)
def create_parser(): parser = argparse.ArgumentParser(description=__doc__.split("\n")[0], epilog="\n".join(__doc__.split("\n")[1:])) parser.add_argument("runs", type=int, nargs='+', help="Run numbers") parser.add_argument("--run-data-dir", default=Config().run_data_dir, help="Directory to search for run list and data") parser.add_argument("--output-dir", default=None, help="Directory to write output to") return parser
def dereference(args): if not args.run: fatal('Nominal run not specified') run = args.run plot = args.plot polarity = RunDB().polarity(run) if not polarity: fatal('Could not resolve polarity of run {0}'.format(run)) if run and not plot: with ReferenceDatabase(Config().reference_db_path) as db: run = db.reference_run(run, polarity) if run and plot: with ReferenceDatabase(Config().reference_db_path) as db: run = db.reference_run_for_plot(run, plot, polarity) ret = ret_dict(success='') ret['data'] = run pjson(ret)
def run_boundaries(args): if sum([args.add, args.remove, args.update]) > 1: fatal('Cannot add, remove, or update boundaries simultaneously') # Print list of run boundaries in the DB if we're not manipulating them if not args.run_number: with ReferenceDatabase(Config().reference_db_path) as db: pjson(db.run_boundaries()) return with ReferenceDatabase(Config().reference_db_path) as db: run = args.run_number up, down = args.up_run, args.down_run if args.add: pjson(add_boundary_run(db, run, up, down)) elif args.remove: pjson(remove_boundary_run(db, run)) elif args.update: pjson(update_boundary_run(db, run, up, down)) else: pjson(find_boundary_runs(db, run))
def get_trending_plot(name, runRange, formatter = dictionary_formatter): """ Get a trending plot, showing a certain variable plotted against run number. @param name the name of the variable to plot. @param runRange list of all run numbers to plot. If this contains exactly two items, it is treated as a range instead, plotting all runs with run number greater than or equal to the first item but not greater than the second. """ db = DQDB(Config().dq_db_file_path) data = db.trend(name, runRange) db.close() return formatter(dict(name=name, title='run number versus {0}'.format(name), xLabel="run number", yLabel=name, data=data))
def get_2d_trending_plot(nameX, nameY, runRange, formatter = dictionary_formatter): """ Get a trending plot, showing two variables plotted against each other. @param nameX the name of the first variable to plot. @param nameY the name of the second variable to plot. @param runRange list of all run numbers to plot. If this contains exactly two items, it is treated as a range instead, plotting all runs with run number greater than or equal to the first item but not greater than the second. """ db = DQDB(Config().dq_db_file_path) data = db.trend2d(nameX, nameY, runRange) db.close() return formatter(dict(name='{0};{1}'.format(nameX, nameY), title='{0} versus {1}'.format(nameX, nameY), xLabel=nameX, yLabel=nameY, data=data))
def create_parser(): parser = argparse.ArgumentParser( description=__doc__.split("\n")[0], epilog="\n".join(__doc__.split("\n")[1:]), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("param", type=str, choices=[ "run_view_config", "run_list" ], help="Configuration parameter to return") parser.add_argument("--run-data-dir", default=Config().run_data_dir, help="Directory to search for run list and data") return parser
def create_parser(): parser = argparse.ArgumentParser() parser.add_argument("--run-data-dir", default=Config().run_data_dir, help="Directory to search for run list and data") subparsers = parser.add_subparsers() parser_rb = subparsers.add_parser('run_boundaries', help='View and modify run boundaries') parser_rb.add_argument('--add', action='store_true', help='Add a run boundary') parser_rb.add_argument('--remove', action='store_true', help='Remove a run boundary') parser_rb.add_argument('--update', action='store_true', help='Edit the reference runs for a run boundary') parser_rb.add_argument('run_number', type=int, nargs='?', help='Boundary run number to process (optional)') parser_rb.add_argument('--up-run', type=int, help='Run number for magnet up reference run') parser_rb.add_argument('--down-run', type=int, help='Run number for magnet down reference run') parser_rb.set_defaults(func=run_boundaries) parser_bp = subparsers.add_parser('boundary_plots', help='View and modify boundary plots') parser_bp.add_argument('--add', action='store_true', help='Add a boundary plot') parser_bp.add_argument('--remove', action='store_true', help='Remove a boundary plot') parser_bp.add_argument('--update', action='store_true', help='Edit the reference runs for a boundary plot') parser_bp.add_argument('boundary_run', type=int, nargs='?', help='Boundary run number to process (optional)') parser_bp.add_argument('plot_name', type=str, nargs='?', help='Plot name to process (optional)') parser_bp.add_argument('--up-run', type=int, help='Run number for magnet up reference run') parser_bp.add_argument('--down-run', type=int, help='Run number for magnet down reference run') parser_bp.set_defaults(func=boundary_plots) parser_deref = subparsers.add_parser( 'dereference', help='Find the reference run for a given plot and/or run') parser_deref.add_argument('--run', help='Run number') parser_deref.add_argument('--plot', help='Plot name') parser_deref.set_defaults(func=dereference) return parser
def create_parser(): parser = argparse.ArgumentParser(description=__doc__.split("\n")[0], epilog="\n".join(__doc__.split("\n")[1:])) parser.add_argument("run", type=int, help="Run number") parser.add_argument("plot", type=str, help="Plot name in run file") parser.add_argument("sensor", type=int, nargs="?", default=0, help="Sensor number") parser.add_argument("--run-data-dir", default=Config().run_data_dir, help="Directory to search for run list and data") parser.add_argument("--no-reference", action="store_true", help="Omit the reference plot") return parser
def runview_plot(run, name, sensor, run_data_dir, refRun='Auto', getRef=False, normalise=False, notifyBox=None): Config().run_data_dir = run_data_dir err = False # Need to append the sensor number to the name. # if not utils.valid_run(run): # err = True # msg = "Invalid run number provided: {0}".format(run) # print msg if not utils.valid_sensor(sensor): err = True msg = "Invalid sensor number provided: {0}".format(sensor) print msg name = name.format(sensor) if getRef: if refRun == 'Auto': print 'Getting auto ref' return plots.get_run_plot_with_reference(name, run, normalise=normalise, notifyBox=notifyBox) else: print 'Getting specified ref' return plots.get_run_plot_with_reference(name, run, refRun=refRun, normalise=normalise, notifyBox=notifyBox) else: return plots.get_run_plot(name, run, normalise=normalise, notifyBox=notifyBox)
def retrieve_run_view_plot(run, plot, sensor, noreference, run_data_dir): import json from veloview.runview import plots, response_formatters, utils # Change the run data directory to the user-specified one Config().run_data_dir = run_data_dir # Check all arguments have valid values err = False if not utils.valid_run(run): err = True msg = "Invalid run number provided: {0}".format(run) if not utils.valid_sensor(sensor): err = True msg = "Invalid sensor number provided: {0}".format(sensor) if err: exit_with_error(msg) # Format the plot name with the sensor number # str.format will work even with no format specifiers in the string plot = plot.format(sensor) # Try to the get the plot object, formatting it to JSON try: if noreference: # Return a 2-tuple to be consistent with the nominal+reference case response = (plots.get_run_plot( plot, run, reference=False, formatter=response_formatters.json_formatter), json.dumps(None)) else: response = plots.get_run_plot_with_reference( plot, run, formatter=response_formatters.json_formatter) except KeyError, e: err = True msg = ("Invalid plot name provided: {0}. " "Exception caught: {1}.").format(plot, e)
def run_list(): """Return a list of run numbers as integers sorted high-to-low.""" run_file_lines = tuple(open(Config().processed_run_list_path, "r")) return sorted([int(l.strip()) for l in run_file_lines], reverse=True)
def reference_run_file(run): """Return the reference run file for the given run.""" refdb = ReferenceDatabase(Config().reference_db_path) polarity = rundb.RunDB().polarity(run) ref_run = refdb.reference_run(run, polarity) return run_file(ref_run)
def run_file_path(run): """Return TFile object directory path for the given run.""" return paths.make_dir_tree(run, Config().run_data_dir)
def reference_run(plot, run): """Return the reference run number for the plot and nominal run number.""" refdb = ReferenceDatabase(Config().reference_db_path) polarity = rundb.RunDB().polarity(run) return refdb.reference_run_for_plot(run, plot, polarity)
import copy import unittest from numbers import Real from veloview.config import Config # Copy the dict so we can safely mutate it run_view_pages = copy.deepcopy(Config().run_view_pages) # The IV scans dictionary is a special case, currently handled only by the # offline GUI, so we'll ignore it in these tests run_view_pages.pop('IV', None) # Required keys for the page dictionary REQ_PAGE_KEYS = ['title'] # Value types for a given key within a page dictionary PAGE_VALUE_TYPES = {'title': str, 'layout': tuple} # Key name that holds the plot dictionary within a page dictionary PLOTS_KEY = 'plots' # Key name that holds the plot options dictionary within a plot dictionary OPTIONS_KEY = 'options' # Required keys for the plot dictionary REQ_PLOT_KEYS = ['title', 'name'] # Value types for a given key within a plot dictionary PLOT_VALUE_TYPES = { 'title': str, 'name': str, 'short': str, 'sensor_dependent': bool,
def get_dq_variables(self): return self.__add_variables_branch( Config().analysis_config, '', Config().analysis_config[0]['MasterCombiner'])
def get_dq_values(runnr): db = DQDB(Config().dq_db_file_path) values = db.read(runnr) db.close() return values
def set_comment(runnr, comment): db = DQDB(Config().dq_db_file_path) db.set_comment(runnr, comment) db.close()