コード例 #1
0
ファイル: utils.py プロジェクト: loiccoyle/pylossmap
def get_timber_db(*args, **kwargs):
    '''Fetches a pytimber DB insstance.
    '''
    global DB
    if DB is None:
        try:
            DB = pytimber.LoggingDB(*args, **kwargs)
        except (AttributeError, TypeError) as e:
            print(e)
            DB = None
    return DB
コード例 #2
0
ファイル: test_loggingdb.py プロジェクト: piotrsow/pytimber
def test_search():

    ldb = pytimber.LoggingDB()

    result = ldb.search("HX:BETA%")
    expected = [
        "HX:BETASTAR_IP1",
        "HX:BETASTAR_IP2",
        "HX:BETASTAR_IP5",
        "HX:BETASTAR_IP8",
    ]
    assert result == expected
コード例 #3
0
ファイル: LHCBSRT.py プロジェクト: nkarast/pytimber
    def fromdb(cls, t1, t2, beam='B1', db=None, verbose=False):
        """
    retrieve data using timber and calculate normalized emittances 
    from extracted values.
    Note: all values in self.emitfit are deleted

    Example:
    --------
      To extract the data from timber:

        t1=pytimber.parsedate("2016-08-24 00:58:00.000")
        t2=pytimber.parsedate("2016-08-24 00:59:00.000")
        bsrt=pytimber.BSRT.fromdb(t1,t2,beam='B1')

    Parameters:
    -----------
    db : pytimber or pagestore database
    beam : either 'B1' or 'B2'
    t1,t2 : start and end time of extracted data
           in unix time
    verbose: verbose mode, default verbose = False

    Returns:
    -------
    class: BSRT class instance with dictionary of normalized emittances
           stored in self.emit. self.emit is sorted after slot number
          {slot: [time [s],emith [um],emitv[um],sigh[mm],sigv[mm],
                  lsfh [mm], lsfv[mm], beth[mm], betv[mm],energy[GeV]]}
    """
        if beam not in ['B1', 'B2']:
            raise ValueError("beam = %s must be either 'B1' or 'B2'" % beam)
        # if no database is given create dummy database to extract data
        if db is None:
            db = pytimber.LoggingDB()
            if verbose:
                print('... no database given, creating default database ' +
                      'pytimber.LoggingDB()')
        if verbose:
            print('... extracting data from timber')
        if verbose:
            print('... calculating emittance for non-empty slots')
        # -- get timber data
        bsrt_array = _get_timber_data(beam=beam, t1=t1, t2=t2, db=db)
        # -- calculate emittances, store them in
        #    dictionary self.emit = emit
        emit_dict = _timber_to_emit(bsrt_array)
        return cls(db=db,
                   emit=emit_dict,
                   emitfit=None,
                   t_start=t1,
                   t_end=t2,
                   beam=beam)
コード例 #4
0
ファイル: timber_extract.py プロジェクト: pylhc/Beta-Beat.src
def lhc_fill_to_tfs(fill_number, keys=None, names=None):
    """ Extracts data for keys of fill from timber.

    Args:
        fill_number: fill number
        keys: list of data to extract
        names: dict to map keys to column names

    Returns: tfs pandas dataframe.
    """
    db = pytimber.LoggingDB()
    t_start, t_end = get_fill_times(db, fill_number)
    out_df = extract_between_times(t_start, t_end, keys, names)
    return out_df
def foo(q, vv):
    print(f'{vv} Here 0')
    import pytimber
    print(f'{vv} Here 1')
    #db = pytimber.nxcals.NXCals()
    db = pytimber.LoggingDB(source='nxcals',
                            #sparkconf='large'
                            )
    print(f'{vv} Here 2')
    t1 = time.time()
    res = db.get([vv], t_fill_start, t_fill_end)
    t2 = time.time()
    print(f'elapsed {t2-t1}s')
    print(f'{vv} Here 3')
    q.put(res)
コード例 #6
0
 def read_timber(self,scale,offset,observable=['IP.NSRCGEN:SOURCEHTAQNI'],plot_me=True,pickle_me=False,ax_obj=None):
             
      if scale=='hours':
             delta=datetime.timedelta(hours=offset)
      elif scale=='minutes':
             delta=datetime.timedelta(minutes=offset)
      elif scale=='days':
             delta=datetime.timedelta(days=offset)
      else:
             raise NameError('Wrong input for datetime conversion. Choose between "days","hours","minutes"')
         
         
         
      assert offset>=0,"Please provide a finite positive offset."
     
      t1 = datetime.datetime.now() - delta
     
      t2 = datetime.datetime.now()
     
      
      
      db=pytimber.LoggingDB()
     
      data=db.get(observable,t1,t2)[observable[0]]
      
      
      calendar=[pytimber.dumpdate(ts) for ts in data[0]]
      
      df=pd.DataFrame(index=calendar,columns=observable)
      
      for obs in observable:
          
          data=db.get(obs,t1,t2)[obs]
          numeric=data[1]
          df[obs]=numeric
          
     
      
     
      if pickle_me:
          df.to_pickle(calendar[-1]+'.pickle')
          
      if plot_me:
          
          df.plot(ax=ax_obj, marker='')
          plt.show()
          
      return df
コード例 #7
0
def extract_between_times(t_start, t_end, keys=None, names=None):
    """
    Extracts data for keys between t_start and t_end from timber.

    Args:
        t_start: starting time in local time or timestamp.
        t_end: end time in local time or timestamp.
        keys: list of data to extract.
        names: dict to map keys to column names.

    Returns: tfs pandas dataframe.
    """
    db = pytimber.LoggingDB()
    if keys is None:
        keys = get_tune_and_coupling_variables(db)

    extract_dict = db.get(keys, t_start, t_end)

    out_df = tfs.TfsDataFrame()
    for key in keys:
        if extract_dict[key.upper()][1][0].size > 1:
            raise NotImplementedError(
                "Multidimensional variables are not implemented yet.")

        data = np.asarray(extract_dict[key.upper()]).transpose()
        col = names.get(key, key)

        key_df = tfs.TfsDataFrame(data, columns=[TIME_COL,
                                                 col]).set_index(TIME_COL)

        out_df = out_df.merge(key_df,
                              how="outer",
                              left_index=True,
                              right_index=True)

    out_df.index = [CERNDatetime.from_timestamp(i) for i in out_df.index]
    out_df.headers[START_TIME] = CERNDatetime.from_timestamp(
        t_start).cern_utc_string()
    out_df.headers[END_TIME] = CERNDatetime.from_timestamp(
        t_end).cern_utc_string()
    return out_df
コード例 #8
0
def lhc_fill_to_tfs(fill_number, keys=None, names=None):
    """ Extracts data for keys of fill from timber.

    Args:
        fill_number: fill number
        keys: list of data to extract
        names: dict to map keys to column names

    Returns: tfs pandas dataframe.
    """
    db = pytimber.LoggingDB()
    t_start, t_end = get_fill_times(db, fill_number)

    if keys is None:
        keys = get_tune_and_coupling_variables(db)

    extract_dict = db.get(keys, t_start, t_end)

    out_df = tfs.TfsDataFrame()
    for key in keys:
        if extract_dict[key.upper()][1][0].size > 1:
            raise NotImplementedError(
                "Multidimensional variables are not implemented yet.")

        data = np.asarray(extract_dict[key.upper()]).transpose()
        col = names.get(key, key)

        key_df = tfs.TfsDataFrame(data, columns=[TIME_COL,
                                                 col]).set_index(TIME_COL)

        out_df = out_df.merge(key_df,
                              how="outer",
                              left_index=True,
                              right_index=True)
    out_df.headers[START_TIME] = t_start
    out_df.headers[END_TIME] = t_end
    return out_df
コード例 #9
0
h5_folder = 'fill_basic_data_h5s'
filepath = h5_folder + '/basic_data_fill'

if not os.path.isdir(h5_folder):
    os.mkdir(h5_folder)

fills_json_name = 'fills_and_bmodes.json'
dict_fill_bmodes = load_fill_dict_from_json(fills_json_name)

saved_json = h5_folder + '/saved_fills.json'

varlist = []
varlist += Energy.variable_list()
varlist += BCT.variable_list()

# Switch between cals and nxcals
import pytimber
db = pytimber.LoggingDB(source='nxcals')
#db = pytimber.LoggingDB(source='ldb')

#from LHCMeasurementTools.TimberManager import NXCalsFastQuery
#db = NXCalsFastQuery(system='CMW')

save_variables_and_json(varlist=varlist,
                        file_path_prefix=filepath,
                        save_json=saved_json,
                        fills_dict=dict_fill_bmodes,
                        db=db,
                        n_vars_per_extraction=1000)
コード例 #10
0
import pytimber
from datetime import datetime, timedelta

log = pytimber.LoggingDB(source='all')

now = datetime.now()
fundamental = 'CPS:%:SFTPRO%'
log.get('CPS.TGM:USER%',
        now - timedelta(minutes=10),
        now,
        fundamental=fundamental)

print(log.searchFundamental('%', now - timedelta(minutes=10)))
コード例 #11
0
import pandas as pd
import numpy as np
import os
import inspect
# Fundamental contribution by R. De Maria et al.
import pytimber

# TODO: discuss about the possible problem if the user has already defined a variable named 'cals'
cals = pytimber.LoggingDB()


def _smartList(myList):
    '''
    Return a list with no duplicate and resolve the '%' search pattern.
    
    ===Example===
    _smartList(['CPS.LSA:%','TFB-DSPU-%-NEW:OPERATION:BLOWUPENABLE'])
    _smartList('CPS.LSA:%')

    '''
    if isinstance(myList, str):
        if '%' in myList:
            return cals.search(myList)
        else:
            return [myList]

    newList = []
    for i in myList:
        if '%' in i:
            newList = newList + cals.search(i)
        else:
コード例 #12
0
  def fromdb(cls,t1,t2,beam='B1',db=None,verbose=False):
    """
    retrieve data using timber and calculate normalized emittances 
    from extracted values.
    Note: all values in self.emitfit are deleted

    Example:
    --------
      To extract the data from timber:

        t1=pytimber.parsedate("2016-08-24 00:58:00.000")
        t2=pytimber.parsedate("2016-08-24 00:59:00.000")
        bsrt=pytimber.BSRT.fromdb(t1,t2,beam='B1')

    Parameters:
    -----------
    db : pytimber or pagestore database
    beam : either 'B1' or 'B2'
    t1,t2 : start and end time of extracted data
           in unix time
    verbose: verbose mode, default verbose = False

    Returns:
    -------
    class: BSRT class instance with dictionary of normalized emittances
           stored in self.emit. self.emit is sorted after slot number
          {slot: [time [s],emith [um],emitv[um]]}
    """
    # if no database is given create dummy database to extract data
    if db is None:
      db = pytimber.LoggingDB()
      if verbose:
        print('... no database given, creating default database ' +
        'pytimber.LoggingDB()')
    if verbose:
      print('... extracting data from timber')
    # -- get timber data
    bsrt_array = _get_timber_data(beam=beam,t1=t1,t2=t2,db=db)
    # -- calculate emittances, store them in 
    #    dictionary self.emit = emit
    if verbose:
      print('... calculating emittance for non-empty slots')
    # create dictionary indexed with slot number
    emit_dict = {}
    # loop over slots
    for j in set(bsrt_array['gate']):
      # data for slot j
      bsrt_slot = bsrt_array[bsrt_array['gate']==j]
      bsrt_emit = []
      # loop over all timestamps for slot j
      for k in set(bsrt_slot['time']):
        # data for slot j and timestamp k
        bsrt_aux = bsrt_slot[bsrt_slot['time']==k]
        # gives back several values per timestamp -> take the mean value
        # energy [GeV]
        energy_aux = np.mean(bsrt_aux['energy'])
        # geometric emittance [um]
        emith_aux  = np.mean((bsrt_aux['sigh']**2
                             -bsrt_aux['lsfh']**2)/bsrt_aux['beth'])
        emitv_aux  = np.mean((bsrt_aux['sigv']**2
                             -bsrt_aux['lsfv']**2)/bsrt_aux['betv'])
        # normalized emittance
        emith = emitnorm(emith_aux, energy_aux)
        emitv = emitnorm(emitv_aux, energy_aux)
        bsrt_emit.append((k,emith,emitv))
      # sort after the time
      emit_dict[j] = np.sort(np.array(bsrt_emit,
        dtype=[('time',float),('emith',float),('emitv',float)]),
        axis=0)
    return cls(db=db,emit=emit_dict,emitfit=None,t_start=t1,t_end=t2)
コード例 #13
0
ファイル: LHCBWS.py プロジェクト: nkarast/pytimber
def _get_timber_data(beam, t1, t2, db=None):
    """
  retrieve data from timber needed for
  BWS emittance calculation
  
  Parameters:
  ----------
  db    : timber database
  beam  : either 'B1' or 'B2'
  t1,t2 : start and end time of extracted data in unix time
  
  Returns:
  -------
  bsrt_data: structured array with
             time = timestamp
             gate = gate delay
             sig* = beam size
             bet* = beta functions at position of BSRT
             *_time = time stamps for rarely logged 
                      variables, explicitly timber variables
                      %LHC%BSRT%LSF_%, %LHC%BSRT%BETA% and
                      LHC.BOFSU:OFC_ENERGY
  """
    if db is None:
        db = pytimber.LoggingDB()
    # -- some checks
    if t2 < t1:
        raise ValueError('End time smaller than start time, t2 = ' +
                         '%s > %s = t1' % (t2, t1))
    name = '%LHC%BWS%' + beam.upper()
    # check for which wire we have data
    data = db.get(db.search(name + '%NB_GATES%'), t1, t2)
    var_names = []
    for plane in 'HV':
        nm = name + plane.upper()
        wire = ''
        try:
            if len(data[db.search(nm + '1%NB_GATES%')[0]][1]) != 0:
                wire += '1'
        except (KeyError, IndexError):
            pass
        try:
            if len(data[db.search(nm + '2%NB_GATES%')[0]][1]) != 0:
                wire += '2'
        except (KeyError, IndexError):
            pass
        if wire == '1' or wire == '2': pass
        elif wire == '':
            raise ValueError("No data found for wire 1 or wire 2 as " +
                             "db.search('%s') is empty!" %
                             (name + "%NB_GATES%"))
        elif wire == '12':
            raise ValueError(
                "Both wires appear to be used! This class " +
                " assumes that only one wire is used!" +
                "db.search('%s') = %s!" %
                (name + "%NB_GATES%", db.search(name + '%NB_GATES%')))
        else:
            raise ValueError(
                "This completely failed! wire = %s " % wire +
                "and db.search('%s') = %s!" %
                (name + "%NB_GATES%", db.search(name + '%NB_GATES%')))
        # extract variable names for wires from database
        for var in [
                'NB_GATES', 'GAIN', 'BUNCH_SELECTION', 'PROF_POSITION_',
                'PROF_DATA_'
        ]:
            nm = name + plane.upper() + wire
            var_names.extend(db.search(nm + '%' + var + '%'))
    for var in ['BETA', 'EMITTANCE_NORM']:
        var_names.extend(db.search(name + '%' + var + '%'))
    var_names.extend(['LHC.BOFSU:OFC_ENERGY'])
    # check that variable names haven't changed
    var_check = _bws_timber_variables()  # hardcoded names
    flag_check = True
    for var in var_names:
        if var not in var_check[beam.upper()]:
            print('WARNING: variable name %s changed!' % var)
            flag_check = False
    if flag_check is False:
        print('Hardcoded variable names are: %s' % var_check)
    # get data
    data = db.get(var_names, t1, t2)
    # check that there is an energy value smaller than t1
    var_egev = 'LHC.BOFSU:OFC_ENERGY'
    degev = data[var_egev]
    t1_new = t1
    # make sure data is not empty
    while (degev[0].size == 0):
        if (np.abs(t1_new - t1) > 30 * 24 * 60 * 60):
            raise ValueError('Last logging time for LHC.BOFSU:OFC_ENERGY' +
                             ' exceeds 1 month! Check your data!!!')
            return
        t1_new = t1_new - 24 * 60 * 60
        degev = db.get([var_egev], t1_new, t2)[var_egev]
    # then check that first time stamp is smaller than t1
    while (degev[0][0] > t1):
        if (np.abs(t1_new - t1) > 30 * 24 * 60 * 60):
            raise ValueError('Last logging time for LHC.BOFSU:OFC_ENERGY' +
                             ' exceeds 1 month! Check your data!!!')
            return
        t1_new = t1_new - 24 * 60 * 60
        degev = db.get([var_egev], t1_new, t2)[var_egev]
    # update data
    data['LHC.BOFSU:OFC_ENERGY'] = degev
    return data
コード例 #14
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import logging

# should be done before importing pytimber
logging.basicConfig(level=logging.INFO)

import pytimber

ldb = pytimber.LoggingDB(source="ldb")


def test_search():
    variables = ldb.search("HX:BETA%")
    assert "HX:BETASTAR_IP1" in variables
    assert len(variables) == 4


def test_get_simple():
    t1 = "2015-05-13 12:00:00.000"
    t2 = "2015-05-15 00:00:00.000"
    data = ldb.get("HX:FILLN", t1, t2)

    t, v = data["HX:FILLN"]
    assert len(t) == 6
    assert len(v) == 6

    assert t[0] == 1431523684.764
    assert v[0] == 3715.0
コード例 #15
0
ファイル: utils.py プロジェクト: loiccoyle/blm_header
def get_timber_db(*args, **kwargs):
    """Fetches a pytimber DB insstance."""
    try:
        return pytimber.LoggingDB(*args, **kwargs)
    except (AttributeError, TypeError) as e:
        print(e)
コード例 #16
0
def doMakeBeamCurrentFile(ConfigInfo):

    import csv, pickle

    AnalysisDir = str(ConfigInfo['AnalysisDir'])
    InputCentralPath = str(ConfigInfo['InputCentralPath'])
    InputScanFile = './' + AnalysisDir + '/' + str(ConfigInfo['InputScanFile'])
    OutputSubDir = str(ConfigInfo['OutputSubDir'])

    outpath = './' + AnalysisDir + '/' + OutputSubDir

    ReadFromTimber = False
    try:
        if 'ReadFromTimber' in ConfigInfo:
            ReadFromTimber = ConfigInfo['ReadFromTimber']
            import pytimber as pytimber
            db = pytimber.LoggingDB()
    except:
        print "makeBeamCurrentFileII: add ReadFromTimber argument in makeBeamCurrentFileConfig"

    CalibrateFBCTtoDCCT = False
    CalibrateFBCTtoDCCT = ConfigInfo['CalibrateFBCTtoDCCT']

    with open(InputScanFile, 'rb') as f:
        scanInfo = pickle.load(f)

    Fill = scanInfo["Fill"]
    ScanNames = scanInfo["ScanNames"]

    CollidingBunches = scanInfo["CollidingBunches"]
    FilledBunchesB1 = scanInfo["FilledBunchesB1"]
    FilledBunchesB2 = scanInfo["FilledBunchesB2"]

    table = {}
    csvtable = []
    #    csvtable.append(["ScanNumber, ScanNames, ScanPointNumber, avrgdcct1, avrgdcct2, sum(avrgfbctB1), sum(avrgfbctB2), sumColl(avrgfbct1), sumColl(avrgfbct2), fbct1 per Bx, fbct2 per BX"])
    csvtable.append([
        "ScanNumber, ScanNames, ScanPointNumber, avrgdcct1, avrgdcct2, sum(avrgfbctB1), sum(avrgfbctB2), fbct1 per Bx, fbct2 per BX"
    ])

    for i in range(len(ScanNames)):
        key = "Scan_" + str(i + 1)
        scanpoints = scanInfo[key]
        table["Scan_" + str(i + 1)] = []
        for j, sp in enumerate(scanpoints):
            if (ReadFromTimber):
                avrgdcct1, avrgdcct2, avrgfbct1, avrgfbct2 = getCurrentsFromTimber(
                    sp[3:], int(Fill), db, pytimber)
            else:
                avrgdcct1, avrgdcct2, avrgfbct1, avrgfbct2, FilledBunchesB1, FilledBunchesB2, CollidingBunches = getCurrents(
                    InputCentralPath, sp[3:], int(Fill))

#Sums over all filled bunches
            sumavrgfbct1 = sumCurrents(avrgfbct1, FilledBunchesB1)
            sumavrgfbct2 = sumCurrents(avrgfbct2, FilledBunchesB2)
            #Sums over all colliding bunches
            sumCollavrgfbct1 = sumCurrents(avrgfbct1, CollidingBunches)
            sumCollavrgfbct2 = sumCurrents(avrgfbct2, CollidingBunches)
            avrgfbct1['sum'] = sumCollavrgfbct1
            avrgfbct2['sum'] = sumCollavrgfbct2

            print "Scan point", j, sp
            row = [
                i + 1,
                str(ScanNames[i]), j + 1, avrgdcct1, avrgdcct2, sumavrgfbct1,
                sumavrgfbct2, avrgfbct1, avrgfbct2
            ]
            table["Scan_" + str(i + 1)].append(row)

    canvas = ROOT.TCanvas()

    ROOT.gStyle.SetOptFit(111)
    ROOT.gStyle.SetOptStat(0)

    h_ratioB1 = ROOT.TGraph()
    h_ratioB2 = ROOT.TGraph()

    outpdf = outpath + '/checkFBCTcalib_' + str(Fill) + '.pdf'
    for i in range(len(ScanNames)):
        key = "Scan_" + str(i + 1)
        [h_ratioB1, h_ratioB2,
         table[key]] = checkFBCTcalib(table[key], CalibrateFBCTtoDCCT)
        h_ratioB1.Draw("AP")
        canvas.SaveAs(outpdf + '(')
        h_ratioB2.Draw("AP")
        canvas.SaveAs(outpdf + '(')

    canvas.SaveAs(outpdf + ']')

    for i in range(len(ScanNames)):
        key = "Scan_" + str(i + 1)
        csvtable.append([str(key)])
        for idx, entry in enumerate(table[key]):
            row = [
                entry[0], entry[1], entry[2], entry[3], entry[4], entry[5],
                entry[6], entry[7], entry[8]
            ]
            csvtable.append(row)

    return table, csvtable
コード例 #17
0
    .orderBy("acqStamp", ascending=False)
    .na()
    .drop()
)
[(r.get(0), r.get(1)) for r in rows.collect()]

ts, val = nxcals.getVariable("RPMBB.UA87.RSF2.A81B1:I_MEAS", t1, t2)


# simple comparison
import pytimber

t1 = "2018-06-15 23:00:00.000"
t2 = "2018-06-15 23:01:00.000"
vname = "RPMBB.UA87.RSF2.A81B1:I_MEAS"
cals = pytimber.LoggingDB()
print(cals.getVariable(vname, t1, t2))
nxcals = pytimber.NXCals()
print(nxcals.getVariable(vname, t1, t2))


# from doc
import time
import numpy as np

nxcals = pytimber.NXCals()
start = time.time()
ds = (
    nxcals.DevicePropertyQuery.system("CMW")
    .startTime("2018-11-27 01:00:00.000")
    .endTime("2018-11-27 01:10:00.000")
コード例 #18
0
import LHCMeasurementTools.LHC_BCT as BCT
import LHCMeasurementTools.LHC_Energy as Energy
import LHCMeasurementTools.LHC_BSRT as BSRT
import LHCMeasurementTools.TimberManager as tm
import LHCMeasurementTools.mystyle as ms
import BSRT_calib
import numpy as np
import pylab as pl
import pickle
import sys, time
from colorsys import hsv_to_rgb
import os

import pytimber

ldb = pytimber.LoggingDB()

t_step_resample_s = 2 * 60.

filln = 6071
t_cut_delay_h = 0
beam = 1

DT_Demitt_s = 3600.

scan_thrld = 70

average_repeated_meas = False

if len(sys.argv) > 1:
    print('--> Processing fill {:s}'.format(sys.argv[1]))
コード例 #19
0
ファイル: LHCBWS.py プロジェクト: nkarast/pytimber
    def fromdb(cls, t1, t2, beam='B1', db=None, verbose=False):
        """
    retrieve data using timber and calculate normalized emittances 
    from extracted values.
    Note: all values in self.emitfit are deleted

    Example:
    --------
      To extract the data from timber:

        t1=pytimber.parsedate("2016-08-24 00:58:00.000")
        t2=pytimber.parsedate("2016-08-24 00:59:00.000")
        bws=pytimber.BWS.fromdb(t1,t2,beam='B1')

    Parameters:
    -----------
    db : pytimber or pagestore database
    beam : either 'B1' or 'B2'
    t1,t2 : start and end time of extracted data
            in unix time
    verbose: verbose mode, default verbose = False

    Returns:
    -------
    class: BWS class instance with dictionary of normalized emittances,
           profiles and other relevant parameters stored in self.emit.
           self.emit is sorted after slot number. 'in' and 'out' refer
           to the measurement of the wire moving inwards and outwards
           respectively.
          {slot: [time [s],emith [um],emitv[um],sigh[mm],sigv[mm],
                  lsfh [mm], lsfv[mm], beth[mm], betv[mm],energy[GeV]]}
    """
        if beam not in ['B1', 'B2']:
            raise ValueError("beam = %s must be either 'B1' or 'B2'" % beam)
        # if no database is given create dummy database to extract data
        if db is None:
            db = pytimber.LoggingDB()
            if verbose:
                print('... no database given, creating default database ' +
                      'pytimber.LoggingDB()')
        # get data from timber
        if verbose:
            print('... extracting data from timber')
        timber_data = _get_timber_data(beam=beam, t1=t1, t2=t2, db=db)
        timber_vars = timber_data.keys()
        # generate dictionary
        data = {}
        for plane in 'HV':
            data[plane] = {}
            for io in 'IN', 'OUT':
                data[plane][io] = _timber_to_dict(beam=beam,
                                                  plane=plane,
                                                  direction=io,
                                                  data=timber_data,
                                                  db=db)
        return cls(db=db,
                   timber_vars=timber_vars,
                   data=data,
                   t_start=t1,
                   t_end=t2,
                   beam=beam)
コード例 #20
0
class CollFillFunction(CollFunction):
    db = pytimber.LoggingDB()
    cache_dir = "cache"

    def __init__(self, fill_nbr, cname):
        super(CollFillFunction, self).__init__(cname)
        self.fill_nbr = fill_nbr

    def fetch(self, forced=False, cache=True):
        """ Fetch all logged motor functions for collimator 'cname'
            and fill 'nbr'.

            Will try to cache the data if possible, as fetching from Timber can be slow.
        """
        print("loading collimator {}... ".format(self.cname), end=" ")
        if not forced and os.path.isfile(self.cache_file()):
            print("from cache")
            with open(self.cache_file(), 'rb') as f:
                self.unpack(pickle.loads(f.read()))
            return

        print("fetching", end=" ")
        # Fill data
        meta = self.db.getLHCFillData(self.fill_nbr)
        ramp_mode = next(
            (item for item in meta['beamModes'] if item['mode'] == "RAMP"),
            None)
        if not ramp_mode:
            raise Exception("did not find ramp beam mode in fill")
        start = ramp_mode['startTime']
        end = ramp_mode['endTime']

        # Collimator motor data
        variables = [
            "{}:{}".format(self.cname, v)
            for v in CollTrimVMap.fillVars[CollTrimVMap.fillVars != "t"]
        ]
        variables.append("{}:MEAS_PROFILE_TIME".format(self.cname))
        print(variables)
        response = self.db.getAligned(variables, start, end)
        dmap = []
        data = []
        print(response.keys())
        for key in response:
            if "PROFILE" in key:
                i_align = np.argmin(response[key])
                continue
            elif key == "timestamps":
                dmap.append('t')
            else:
                dmap.append(key.split(":")[1])
            data.append(response[key])
        self.dmap = np.array(dmap)
        self.data = np.array(data)

        # align w.r.t. profile function
        t = np.where(self.dmap == 't')[0]
        self.data[t] -= self.data[t, i_align]

        if cache:
            with open(self.cache_file(), 'wb') as f:
                pickle.dump(self.pack(), f)

    def pack(self):
        return {'dmap': self.dmap, 'data': self.data}

    def unpack(self, dump):
        self.dmap = dump['dmap']
        self.data = dump['data']

    def cache_file(self):
        return "{}/{}_{}.dat".format(self.cache_dir, self.fill_nbr, self.cname)
コード例 #21
0
import pytimber as pt
import matplotlib.pyplot as plt
import numpy as np


def savedata():
    np.save('E:\Ansys\MKPL/SPS_timedata.npy', timestamps2)
    np.save('E:\Ansys\MKPL/SPS_tempdata.npy', temperatures)
    np.save('E:\Ansys\MKPL/SPS_tempdata2.npy', temperatures2)
    np.save('E:\Ansys\MKPL/SPS_timedata2.npy', timestamps3)


db = pt.LoggingDB()

START_TIMESTAMP = '2015-06-17 02:00:00'
STOP_TIMESTAMP = '2015-06-17 02:03:00'

#FIRST SEGMENT
# START_TIMESTAMP = '2015-06-17 00:00:00'
# STOP_TIMESTAMP = '2015-06-17 04:00:00'

#SECOND SEGMENT
# START_TIMESTAMP = '2015-06-17 04:00:00'
# STOP_TIMESTAMP = '2015-06-17 06:03:00'

#THIRD SEGMENT
# START_TIMESTAMP = '2015-06-17 06:03:00'
# STOP_TIMESTAMP = '2015-06-17 08:00:00'

#FOURTH SEGMENT
# START_TIMESTAMP = '2015-06-17 08:00:00'
コード例 #22
0
            self[k] = v

    def __getstate__(self):
        return self

    def __setstate__(self, state):
        self.update(state)
        self.__dict__ = self


# In[ ]:

# In[1]:

if 'log' not in locals():
    log = pytimber.LoggingDB()


class myToolbox:
    import functools

    speedOfLight = 299792458

    @staticmethod
    def plotSamplerFromObject(myobject, scale=1):
        info = myobject
        unitFactor = info.Samples.value.timeUnitFactor
        firstSampleTime = info.Samples.value.firstSampleTime
        samplingTrain = info.Samples.value.samplingTrain
        data = info.Samples.value.samples * scale
        x = np.arange(
コード例 #23
0
def nxcals():
    return pytimber.LoggingDB(source="nxcals")
コード例 #24
0
ファイル: oml.py プロジェクト: Pingul/cern
class Fill:

    class STATUS:
        OK = 'OK'
        NORAMP = 'NORAMP'
        ERROR = 'ERROR'
    #### class STATUS

    class Variable:
        """ Purely a wrapper so we can get named variables instead of [0] and [1] """
        def __init__(self, data):
            # data = [[x1, x2, ..], [y1, y2, ...]]
            #
            # Using x and y because it's easy to write and quickly shows intent,
            # even though our x almost exclusively is time
            self.x = data[0]
            self.y = data[1]

        def __len__(self):
            return 2

        def __getitem__(self, key):
            if key == 0:
                return self.x
            elif key == 1:
                return self.y
            else:
                raise IndexError("not valid key '{}'".format(key))

        def __setitem__(self, key, value):
            if key == 0:
                self.x = value
            elif key == 1:
                self.y = value
            else:
                raise IndexError("not valid key '{}'".format(key))

        def index_for_time(self, timestamps):
            """ Helper function: find the index in the variable corresponding to the given timestamp """
            try:
                indices = []
                for t in timestamps:
                    indices.append(self.index_for_time(t))
                return indices
            except TypeError:
                index = bisect.bisect_left(self.x, timestamps)

                # Sometimes there's jump in the data sets -- take the value closest to the given timestamp
                ts = np.abs(self.x[[index-1, index]] - timestamps)
                return index-1 if ts[0] < ts[1] else index
    #### class Variable

    ## Variables in lists will be fetched aligned with each other
    BEAM1_VARIABLES = {
        'intensity_b1' : 'LHC.BCTFR.A6R4.B1:BEAM_INTENSITY',
        # 'beta_coll_b1' : ['BLMTI.06L7.B1E10_TCP.B6L7.B1:LOSS_RS09', 'BLMTI.06L7.B1E10_TCP.C6L7.B1:LOSS_RS09', 'BLMTI.06L7.B1E10_TCP.D6L7.B1:LOSS_RS09'],
        'beta_coll_b1' : 'BLMTI.06L7.B1E10_TCP.C6L7.B1:LOSS_RS09', 
        'synch_coll_b1' : 'BLMTI.06L3.B1I10_TCP.6L3.B1:LOSS_RS09',
        'abort_gap_int_b1' : 'LHC.BSRA.US45.B1:ABORT_GAP_INTENSITY',

        'motor_ir3_b1' : ['TCP.6L3.B1:MEAS_MOTOR_LU', 'TCP.6L3.B1:MEAS_MOTOR_RU'],
        'motor_ir7_b1' : ['TCP.C6L7.B1:MEAS_MOTOR_LU', 'TCP.C6L7.B1:MEAS_MOTOR_RU'],
    }

    BEAM2_VARIABLES = {
        'intensity_b2' : 'LHC.BCTFR.A6R4.B2:BEAM_INTENSITY',  
        # 'beta_coll_b2' : ['BLMTI.06R7.B2I10_TCP.B6R7.B2:LOSS_RS09', 'BLMTI.06R7.B2I10_TCP.C6R7.B2:LOSS_RS09', 'BLMTI.06R7.B2I10_TCP.D6R7.B2:LOSS_RS09'],
        'beta_coll_b2' : 'BLMTI.06R7.B2I10_TCP.C6R7.B2:LOSS_RS09', 
        'synch_coll_b2' : 'BLMTI.06R3.B2E10_TCP.6R3.B2:LOSS_RS09', 
        'abort_gap_int_b2' : 'LHC.BSRA.US45.B2:ABORT_GAP_INTENSITY',

        'motor_ir3_b2' : ['TCP.6R3.B2:MEAS_MOTOR_LU', 'TCP.6R3.B2:MEAS_MOTOR_RU'],
        'motor_ir7_b2' : ['TCP.C6R7.B2:MEAS_MOTOR_LU', 'TCP.C6R7.B2:MEAS_MOTOR_RU'],
    }

    COMB_VARIABLES = {
        'energy' : 'LHC.BOFSU:OFSU_ENERGY',
        'ramp_mode' : 'HX:BMODE_RAMP',
        'motor_start' : 'TCP.C6L7.B1:MEAS_PROFILE_TIME',
    }
    

    db = pytimber.LoggingDB()

    OML_period_file = "fills/spikes.dat"

    def __init__(self, nbr, fetch=True, beam=settings.BEAM):
        """ Note -- the default beam is decided upon loading this file """

        if not beam in (1, 2):
            raise Exception("Beam can only be 1 or 2")
        self.nbr = nbr
        self.beam = beam
        self.data = {}
        self.meta = {}
        self.status = Fill.STATUS.OK
        self.time_correction = 0

        if self.beam == 1:
            self.timber_var_map = {**self.BEAM1_VARIABLES, **self.COMB_VARIABLES}
        else:
            self.timber_var_map = {**self.BEAM2_VARIABLES, **self.COMB_VARIABLES}

        if fetch:
            self.fetch()
            self.normalize_intensity()
            self.beta_coll_merge()
            self.offset_time()


    def fetch(self, forced=False, cache=True):
        self.fetch_range("PRERAMP", "RAMP", forced, cache)

    def fetch_range(self, start, stop, forced=False, cache=True):
        l = ["INJPROT", "INJPHYS", "PRERAMP", "RAMP", "FLATTOP", "SQUEEZE", "ADJUST", "STABLE", "BEAMDUMP", "RAMPDOWN"]
        if not start in l or not stop in l:
            raise Exception("fetch range [{} - {}] is not allowed".format(start, stop))

        to_fetch = self.timber_var_map.keys()
        cache_file = store_file_for_fill(self.nbr)
        if not forced and os.path.isfile(cache_file):
            self.load_cache()
            cached_variables = list(self.data.keys())
            non_cached_variables = [var for var in self.timber_var_map.keys() if not var in cached_variables]
            to_fetch = non_cached_variables

        if not to_fetch:
            return

        aligned_var = [var for var in to_fetch if type(self.timber_var_map[var]) == list]
        non_aligned_var = [var for var in to_fetch if type(self.timber_var_map[var]) == str]
        if not (len(aligned_var) + len(non_aligned_var)) == len(to_fetch): 
            # Sanity check
            raise Exception("Sanity check failed")

        self.meta = self.db.getLHCFillData(self.nbr)
        start_t = self.meta['startTime']
        end_t = self.meta['endTime']
        start_mode = next((item for item in self.meta['beamModes'] if item['mode'] == start), None)
        end_mode = next((item for item in self.meta['beamModes'] if item['mode'] == stop), None)
        if not end_mode or not start_mode:
            self.status = Fill.STATUS.NORAMP
        else:

            # It's nice to have some context just before the ramp starts
            # Also, it's quite inexact
            # start_t = end_mode['startTime']
            start_t = start_mode['startTime']
            end_t = end_mode['endTime']
        
        # The actual data fetching
        if non_aligned_var:
            lg.log("fetching: {}...".format(", ".join(non_aligned_var)), end=" ")
            timber_vars = [self.timber_var_map[var] for var in non_aligned_var]
            data = self.db.get(timber_vars, start_t, end_t)
            for name, timber_var in self.timber_var_map.items():
                if not type(timber_var) == list and timber_var in data:
                    self.data[name] = Fill.Variable(data[timber_var])
            lg.log("done!", log_level=LogLevel.success, module_prestring=False)

        for var in aligned_var:
            lg.log("fetching aligned: {}...".format(var), end=' ')
            timber_vars = self.timber_var_map[var]
            data = self.db.getAligned(timber_vars, start_t, end_t)

            if len(data) == 0:
                raise Exception("No data found")
            xdata = data.pop('timestamps')
            ydata = np.stack((data[y] for y in data))
            self.data[var] = Fill.Variable((xdata, ydata))
            lg.log("done!", log_level=LogLevel.success, module_prestring=False)

        if cache:
            self.cache()

    def pack(self):
        return {
            'nbr' : self.nbr,
            'data' : self.data,
            'meta' : self.meta,
            'status' : self.status,
            'time_correction' : self.time_correction
        }

    def unpack(self, dump):
        self.nbr = dump['nbr']
        self.data = dump['data']
        self.meta = dump['meta']
        self.status = dump['status']
        self.time_correction = dump['time_correction'] if 'time_correction' in dump else self.time_correction

    def clear_cache(self):
        lg.log('clearing cache {}'.format(self.nbr))
        open(store_file_for_fill(self.nbr), 'wb')

    def cache(self):
        lg.log('caching {}'.format(self.nbr))
        with open(store_file_for_fill(self.nbr), 'wb') as f:
            pickle.dump(self.pack(), f)

    def load_cache(self):
        lg.log('loading {}'.format(self.nbr))
        with open(store_file_for_fill(self.nbr), 'rb') as f:
            self.unpack(pickle.loads(f.read()))

    # Data access methods
    def intensity(self):
        return self.data['intensity_b{}'.format(self.beam)]
    def blm_ir3(self):
        return self.data['synch_coll_b{}'.format(self.beam)]
    def blm_ir7(self):
        return self.data['beta_coll_b{}'.format(self.beam)]
    def motor_ir3(self):
        return self.data['motor_ir3_b{}'.format(self.beam)]
    def motor_ir7(self):
        return self.data['motor_ir7_b{}'.format(self.beam)]
    def motor_start(self):
        return self.data['motor_start']
    def abort_gap(self):
        return self.data['abort_gap_int_b{}'.format(self.beam)]
    def energy(self):
        return self.data['energy']

    def oml(self, aligned=False):
        if aligned:
            return self.data['A_synch_coll_b1']
        else:
            return self.data['synch_coll_b1']
    #### 


    ### Operations that affect the data somehow
    def normalize_intensity(self):
        var = 'intensity_b{}'.format(self.beam)
        self.data[var].y = self.data[var].y/np.max(self.data[var].y)

    def offset_time(self, align_mode="time_corr", t=0):
        # start, end = self.OML_period()
        # t = self.blm_ir3().x[start]

        if align_mode == "ramp":
            t = self.blm_ir3().x[0]
        elif align_mode == "ramp_mode":
            t = self.data['ramp_mode'].x[1]
        elif align_mode == "energy":
            t = self.energy().x[0]
        elif align_mode == "peak":
            # align w.r.t. beam 1 x peak
            beam = self.beam
            self.beam = 1
            # Previously we did alignment based of start of ramp which normally put the peak
            # at ~10 s. As we now align w.r.t. the peak, we move it 10 extra seconds so we
            # get similar results as before
            t = self.OML_peak()['t'] - 10 
            self.beam = beam
        elif align_mode == "time_corr":
            t = self.time_correction
        elif align_mode == "meas_time":
            ts = np.empty(self.motor_start().x.size)
            for i, v in enumerate(zip(*self.motor_start())):
                ts[i] = v[0] - v[1]*1e-9
            # t = stats.mode(ts)[0]
            t = np.median(ts)
            # print(t, self.motor_start().x[0])
        elif align_mode == "manual":
            t = self._timeshift = t
        else:
            raise Exception("align_mode does not exist")

        for v in self.data:
            self.data[v].x -= t
        self._timeshift = t

    def beta_coll_merge(self):
        # Is made obsolute 
        return

        var = 'beta_coll_b{}'.format(self.beam)
        if self.data[var].y.shape[0] == 3:
            self.data[var].y = np.sum(self.data[var].y, axis=0)

    ### Data queries
    def has_off_momentum_loss(self):
        """ Just some arbitrary test to see if we have a spike """
        max_loss = max(self.blm_ir3().y)
        mean_loss = np.mean(self.blm_ir3().y)
        if max_loss < 0 or mean_loss < 0:
            return False
        return max_loss/mean_loss > 10

    def OML_period(self): 
        """ The period is defined as
                start: t = 0
                end: t = cross over point
            returns (start, end) indices
        """ 
        i_co = self.crossover_point()['i']
        i_start = self.blm_ir3().index_for_time(0)
        return [i_start, i_co]

    def OML_peak(self):
        """ The timestamp and index for the maximum OML peak
            returns {'i' : ..., 't' : ...}
        """
        i_peak = imax(self.blm_ir3().y)[1]
        return {'i' : i_peak, 't' : self.blm_ir3().x[i_peak]}

    def crossover_point(self):
        """ Look for the point after OML spike when transversal losses starts 
            to dominate the momentum losses 
        """
        x = np.union1d(self.blm_ir3().x, self.blm_ir7().x)
        x = x[(x > self.blm_ir3().x.min())*(x < self.blm_ir3().x.max())]
        x = x[(x > self.blm_ir7().x.min())*(x < self.blm_ir7().x.max())]

        blm_ir3y = interpolate.interp1d(*self.blm_ir3())(x)
        blm_ir7y = interpolate.interp1d(*self.blm_ir7())(x)

        i = imax(blm_ir3y)[1]
        while i < len(x) - 1 and blm_ir3y[i] > blm_ir7y[i]: i += 1
        return {'t' : x[i], 'i' : self.blm_ir3().index_for_time(x[i])}
コード例 #25
0
def XMPP_ProfilePlot(plotax,fig,fixedaxes,japc,vec,prfLaser,MarkerLaserStageSetValmm,textPos,StagePositionZeroValmm):
    laserSetVal=MarkerLaserStageSetValmm
    import scipy.constants as spc
    import time
    time.sleep(1)
    plotax.clear()
    timeVal=japc.getParam('XMPP-STREAK/StreakImage#streakImageTimeValues')
    vec=japc.getParam('XMPP-STREAK/StreakImage#streakImageData')-400
    currentPos=japc.getParam('AIRTR01/Acq#numericPosition')
    
    '''
    Unverschobener laserwert!
    '''
    delayZeroPos=StagePositionZeroValmm #mm
    delayZeroPs=delayZeroPos*1e-3/spc.c/1e-12 #ps
    ZeroPxVal,ZeroFineDelay=LaserZeroValDict[japc.getParam('XMPP-STREAK/StreakImage#streakImageTimeRange')]
    '''Calc difference'''
    psShiftDelay=2*(currentPos-delayZeroPos)*1e-3/spc.c/1e-12 # in ps, 2* weil zweifacher weg
    print(psShiftDelay)
    # laserSetVal in ps, aber translator ist in mm
    setMMpos=laserSetVal#spc.c/1e12/1e3*laserSetVal+delayZeroPos
    
    if laserSetVal != -1 and laserSetVal != currentPos:
        # setze auf zerovalue!
        japc.setParam('AIRTR01/Setting#positionIn',setMMpos)
    if laserSetVal ==-1:
        japc.setParam('AIRTR01/Setting#positionIn',delayZeroPos)
    
    def gaussFIT1D(prm,x,y):
        return ((prm[0]/np.sqrt(2*prm[1]**2)*np.exp( - (x-prm[2])**2 /(2*prm[1]**2)) + prm[3]) -y).ravel()
    
    vecP=vec.reshape(512,672)[:,prfLaser[0]:prfLaser[1]].sum(1)/(prfLaser[1]-prfLaser[0])
    vecP=vecP/np.max(vecP)
    timeVal=np.append(timeVal[1],timeVal[1:])
    plobj1=plotax.plot(np.flipud(timeVal),np.flipud(vecP),c='r',linewidth=2,label='temporal Profile')
    try:
        parentFig=plotax.get_figure()
        if len(parentFig.axes)>3:
            ax2=parentFig.axes[3]
            ax2.clear()
        else:
            ax2=plotax.twiny()
        vecP2=vec.reshape(512,672).sum(0)/(512)
        plobj2=ax2.plot(fixedaxes[0],vecP2/np.max(vecP2),label='Spatial Profile')
    except:
        print('no standard')
        
    try:
        import scipy as sp
        startGuess=[(np.max(vecP)-np.min(vecP))/2,1/100*(timeVal[-1]-timeVal[0]),timeVal[255],10]
        optimres=sp.optimize.least_squares(gaussFIT1D,startGuess,args=(np.flipud(timeVal),np.flipud(vecP)))
  
        print('Finished fit')
        '''Calc TimeWindow Shift'''
        import pytimber
        ldb=pytimber.LoggingDB()
        FineDelayStreak=ldb.get('MPPAWAKE:FASTTRIG-1:STREAKTUBE-FINEDELAY',time.strftime('%Y-%m-%d %H:%M:%S'))['MPPAWAKE:FASTTRIG-1:STREAKTUBE-FINEDELAY'][1][0]
         
        print('Finished getting ldb finedelay value:{0:1.2f}'.format(FineDelayStreak))
        FineDelay=FineDelayStreak-ZeroFineDelay # set shift
        relShift=optimres.x[2]-ZeroPxVal #relative shift measured by laser
        totalShift=FineDelay-(FineDelay+relShift)+psShiftDelay

        print('trying to plot')
        plotax.text(textPos[0],textPos[1],'StageCurrentPosition is {4:3.2f}mm\nStageZeroPosition is {3:3.2f}mm\nMeasured delay shift is:{0:3.0f}ps, set is {1:1.2f}ps\nmarker laser stage shift is:{2:3.0f}ps'.format(totalShift,FineDelay,psShiftDelay,StagePositionZeroValmm,currentPos),bbox=dict(facecolor='red', alpha=0.5))
        
        '''PLot'''
        plobj3=plotax.plot(np.flipud(timeVal),np.flipud(gaussFIT1D(optimres.x,timeVal,0)),c='g',linestyle='dotted',linewidth=1.5,label='Gauss fit: sigma={0:1.2f}ps,   pos in image is {1:3.0f}ps'.format(np.abs(optimres.x[1]),optimres.x[2]))
        legendAll=[l.get_label() for l in plobj1+plobj2+plobj3]
        plotax.legend(plobj1+plobj2+plobj3,legendAll)
    except:
        print('no fitplot')
    #plotax.set_ylim(np.min(vec),1.05*np.max(vec))
    plotax.set_ylim(0,1.05)
コード例 #26
0
import time

import numpy as np

varfname = 'hlvarnames.txt'

t_fill_start = 1527662512.98
t_fill_end = 1527704298.4750001

with open(varfname, 'r') as fid:
    varlist = [vv.replace('\n', '') for vv in fid.readlines()]

# Switch between cals and nxcals
import pytimber
nxcals = pytimber.LoggingDB(source='nxcals')
cals = pytimber.LoggingDB(source='ldb')

for ivv, vv in enumerate(varlist):
    t_start_cals = time.time()
    data_cals = cals.get([vv], t_fill_start, t_fill_end)
    t_end_cals = time.time()
    dt_cals = t_end_cals - t_start_cals

    t_start_nxcals = time.time()
    data_nxcals = nxcals.get([vv], t_fill_start, t_fill_end)
    t_end_nxcals = time.time()
    dt_nxcals = t_end_nxcals - t_start_nxcals

    found_in_cals = vv in data_cals.keys()
    found_in_nxcals = vv in data_nxcals.keys()
コード例 #27
0
#~ filln=6677
#~ filln=6681
#~ filln=6714

ff = './results_squeeze/'

if len(sys.argv)>1:
   filln = int(sys.argv[1])

beta_obs_cm = 33.
#beta_obs_cm = 43.



import pytimber
ldb = pytimber.LoggingDB(source='ldb')
mdb = pytimber.LoggingDB(source='mdb')

fillinfo = mdb.getLHCFillData(filln)
bmodes = fillinfo['beamModes']
for bm in bmodes:
    if bm['mode'] == 'FLATTOP':
        t_start = bm['startTime']
    if bm['mode'] == 'ADJUST':
        t_stop = bm['endTime']+5*60.
        break

#hack
t_stop = t_start+20*60.
        
コード例 #28
0
import pytimber

cals = pytimber.LoggingDB(source="ldb")
nxca = pytimber.LoggingDB(source="nxcals")

t1 = "2018-11-27 01:00:00.000"
t2 = "2018-11-27 01:10:00.000"
name = "LHC.BLM.LIFETIME:LUMINOSITY_LOSS"
ts1, ds1 = cals.get(name, t1, t2)[name]
ts2, ds2 = nxca.get(name, t1, t2)[name]

t1 = "2016-08-07 17:27:00"
t2 = "2016-08-07 17:37:00"
name = "LHC.BQBBQ.CONTINUOUS_HS.B1:ACQ_DATA_H"

ts1, ds1 = cals.get(name, t1, t2)[name]
ts2, ds2 = nxca.get(name, t1, t2)[name]

cals.search("%LHC%LUMI%")
nxca.search("%LHC%LUMI%")