class TimingCut_Run_Channel(Serializable, db.Model): __tablename__ = 'timingcut_run_channel' id = db.Column(db.Integer, primary_key=True) run = db.Column(db.Integer, db.ForeignKey('online_run.run'), index=True) channel_id = db.Column(db.BigInteger, db.ForeignKey('channel.id'), index=True) value = db.Column(db.Float) def __repr__(self): return "id {}, channel {}, run {} => {}".format( self.id, self.channel_id, self.run, self.value) #return "Detector: ({}, {}, {}, {}) | Electronics: ({}, {}, {}, {}) | emap {}".format(self.subdet, self.ieta, self.iphi, self.depth, self.crate, self.slot, self.fiber, self.fiber_channel, self.emap_version) # Extract data from DQM histogram def extract(self, run, emap_version="2018", overwrite=False): print("[TimingCut_Run_Channel::extract] Extracting for run {}".format( run)) # Check that this run is not already in DB if not check_overwrite( TimingCut_Run_Channel, run, emap_version, overwrite=overwrite): return # Make sure run is in the run database add_run(run, overwrite=False) # Get data dqm_data = load_dqm_object(run, "Hcal/DigiTask/TimingCut/depth") if len(dqm_data) == 0: print( "[TimingCut_Run_Channel::extract] ERROR : DQM data is empty! Skipping." ) return #print(dqm_data) # Get histograms hist_pedestal_mean = {} for depth in range(1, 8): hist_pedestal_mean[depth] = dqm_data["depth{}".format(depth)] # Extract all pedestals from the DQM histograms here channels = Channel.query.filter(Channel.emap_version == emap_version) for channel in channels: xbin, ybin = detid_to_histbins(channel.subdet, channel.ieta, channel.iphi) this_pedestal_mean = hist_pedestal_mean[ channel.depth].GetBinContent(xbin, ybin) if this_pedestal_mean == 0: # Zero suppress. This plot monitors drifts, not errors. continue this_reading = TimingCut_Run_Channel(run=run, value=this_pedestal_mean, channel_id=channel.id) #print(this_reading) db.session.add(this_reading) db.session.commit()
class PedestalRMS_Run_Channel(Serializable, db.Model): __tablename__ = 'pedestal_rms_run_channel' id = db.Column(db.Integer, primary_key=True) run = db.Column(db.Integer, db.ForeignKey('local_run.run'), index=True) channel_id = db.Column(db.BigInteger, db.ForeignKey('channel.id'), index=True) value = db.Column(db.Float, nullable=False) def __repr__(self): return "id {}, run {}".format(self.id, self.channel_id, self.run) #return "Detector: ({}, {}, {}, {}) | Electronics: ({}, {}, {}, {}) | emap {}".format(self.subdet, self.ieta, self.iphi, self.depth, self.crate, self.slot, self.fiber, self.fiber_channel, self.emap_version) # Extract data from DQM histogram def extract(self, run, emap_version="2017J", overwrite=False): print("[PedestalRMS_Run_Channel::extract] Extracting for run {}".format(run)) # Check that this run is not already in DB if not check_overwrite(PedestalRMS_Run_Channel, run, emap_version, overwrite=overwrite): return # Make sure run is in the run database add_run(run, overwrite=False) # Get data if emap_version == "2017J": dataset = "PEDESTAL/Commissioning2016/DQMIO" else: dataset = "PEDESTAL/Commissioning2018/DQMIO" dqm_data = load_dqm_object(run, dataset, "Hcal/PedestalTask/RMS/depth") #print(dqm_data) # Get histograms hist_pedestal_rms = {} for depth in range(1, 8): hist_pedestal_rms[depth] = dqm_data["depth{}".format(depth)] # Extract all pedestals from the DQM histograms here channels = Channel.query.filter(Channel.emap_version==emap_version) for channel in channels: if not channel.subdet in ["HB", "HE", "HF", "HO", "HEP17"]: continue xbin, ybin = detid_to_histbins(channel.subdet, channel.ieta, channel.iphi) this_value = hist_pedestal_rms[channel.depth].GetBinContent(xbin, ybin) if this_value == 0: # Zero suppress continue this_reading = PedestalRMS_Run_Channel(run=run, value=this_value, channel_id=channel.id) db.session.add(this_reading) db.session.commit()
class Channel(Serializable, db.Model): __tablename__ = "channel" id = db.Column(db.BigInteger, primary_key=True) subdet = db.Column(db.String(8), nullable=False) ieta = db.Column(db.SmallInteger, nullable=False) iphi = db.Column(db.SmallInteger, nullable=False) depth = db.Column(db.SmallInteger, nullable=False) crate = db.Column(db.SmallInteger, nullable=False) slot = db.Column(db.SmallInteger, nullable=False) dcc = db.Column(db.SmallInteger, nullable=False) spigot = db.Column(db.SmallInteger, nullable=False) fiber = db.Column(db.SmallInteger, nullable=False) fiber_channel = db.Column(db.SmallInteger, nullable=False) emap_version = db.Column(db.String(8), nullable=False) # Backrefs pedestal_mean_run_channel = db.relationship('PedestalMean_Run_Channel', backref='channel', lazy='dynamic') pedestal_rms_run_channel = db.relationship('PedestalRMS_Run_Channel', backref='channel', lazy='dynamic') #sipmgaina_run_channel = db.relationship('SiPMGainA_Run_Channel', backref='channel', lazy='dynamic') tdctime_run_channel = db.relationship('TDCTime_Run_Channel', backref='channel', lazy='dynamic') timingcut_run_channel = db.relationship('TimingCut_Run_Channel', backref='channel', lazy='dynamic') def __init__(self, subdet, ieta, iphi, depth, crate, slot, dcc, spigot, fiber, fiber_channel, emap_version): self.subdet = subdet self.ieta = ieta self.iphi = iphi self.depth = depth self.crate = crate self.slot = slot self.dcc = dcc self.spigot = spigot self.fiber = fiber self.fiber_channel = fiber_channel self.emap_version = emap_version self.id = hash( (subdet, str(ieta), iphi, depth, emap_version) ) # NOTE: python has the unfortunate feature hash(-1) == hash(-2). So here, we convert ieta to a string first. def __repr__(self): return "Detector: ({}, {}, {}, {}) | Electronics: ({}, {}, {}, {}) | emap {}".format( self.subdet, self.ieta, self.iphi, self.depth, self.crate, self.slot, self.fiber, self.fiber_channel, self.emap_version) # Short string for plot legends def get_label(self): return "{} | ieta={} | iphi={} | depth={}".format( self.subdet, self.ieta, self.iphi, self.depth) @property def as_dict(self): return { "subdet": self.subdet, "ieta": self.ieta, "iphi": self.iphi, "depth": self.depth }
class OnlineRun(db.Model): __tablename__ = "online_run" run = db.Column(db.Integer, primary_key=True) start_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) ytd_lumi = db.Column(db.Float) mode = db.Column(db.String, nullable=True) events = db.Column(db.Integer, nullable=False) _cache = cache_directory + "/runs/" def __repr__(self): return str(self.run) def query_runinfo(self, run, keys=[ "CMS.LVL0:START_TIME_T", "CMS.LVL0:TRIGGER_MODE_AT_START", "CMS.DAQ:NB_EVENTS_BUILT" ], overwrite_runinfo_cache=True): # Alternative idea: sql query. This doesn't work on the VM, because the DB can't be found... might work with a tunnel, but that's annoying. # SQL query for runinfo DB. # sqlplus -S cms_hcl_runinfo/run2009info@cms_rcms @/WBM/sql_templates/query.sql STRING_VALUE CMS.HCAL%:LOCAL_RUN_KEY runnumber runinfo_url = "https://cmswbm.cern.ch/cmsdb/servlet/RunParameters?RUN={}&FORMAT=XML".format( run) # requests doesn't seem to pick up the SSL cookie... # html = requests.get(runinfo_url).text runinfo_filename = "{}/runinfo/{}.dat".format(cache_directory, run) if not os.path.exists(runinfo_filename) or overwrite_runinfo_cache: runinfo_command = "wget '{}' -O {} --no-check-certificate".format( runinfo_url, runinfo_filename) print(runinfo_command) os.system(runinfo_command) html = "" print("[debug] Runinfo saved to {}".format(runinfo_filename)) with open(runinfo_filename) as runinfo_file: for line in runinfo_file: html += line # Parsing example from https://stackoverflow.com/questions/23377533/python-beautifulsoup-parsing-table table_data = {} #print(html) soup = BeautifulSoup(html) #print(soup) table = soup.find('table', attrs={'class': 'example'}) print(table) table_body = table.find('tbody') rows = table_body.find_all('tr') for row in rows: cols = row.find_all('td') for key in keys: if key in col[0]: table_data[key] = col[1] return table_data def get_ytd_lumi(self, run, overwrite_lumi_cache=False): brilcalc_filename = "{}/brilcalc/{}.dat".format(cache_directory, run) if not os.path.exists(brilcalc_filename) or overwrite_lumi_cache: brilcalc_command = r"brilcalc lumi -u /fb --begin 01/01/{}\ 00:00:00 --end {} -o {}".format( start_time.strftime(r"%y"), start_time.strftime(r"%m/%d/%y\ %h:%m:%s"), brilcalc_filename) print(brilcalc_command) os.system(brilcalc_command) summary_line = subprocess.check_output( ['tail', '-1', brilcalc_filename]) # Summary line format: # #nfill,nrun,nls,ncms,totdelivered(/fb),totrecorded(/fb) return float(summary_line.split(",")[4])