def FilledSlotsAtTime(tt): ''' Obtain the list of filled BID slots at a time (as array and as filled slots with 0/1) Returns: fb1/fb2 [n] : array with the filled bucket IDs b1/b2 [3564] : arrays with the filled buckets fslots : dictionary with all the data ''' # vlist = ['LHC.BQM.B1:NO_BUNCHES','LHC.BQM.B2:NO_BUNCHES','LHC.BQM.B1:FILLED_BUCKETS','LHC.BQM.B2:FILLED_BUCKETS'] # - use the BCT data that are more reliable vlist = ['LHC.BCTFR.A6R4.B1:BUNCH_FILL_PATTERN','LHC.BCTFR.A6R4.B2:BUNCH_FILL_PATTERN'] _fbct = importData.cals2pd(vlist,tt,'last') beam1 = _fbct.iloc[0]['LHC.BCTFR.A6R4.B1:BUNCH_FILL_PATTERN'] beam2 = _fbct.iloc[0]['LHC.BCTFR.A6R4.B2:BUNCH_FILL_PATTERN'] if _fbct.index.year <= 2015 : # --- it seems for 2015 the B1 and B2 had a +1 difference beam2 = np.roll(beam2, -1) b1 = np.array(beam1) b2 = np.array(beam2) fb1 = np.where(b1>0)[0] fb2 = np.where(b2>0)[0] fslots = {} fslots['B1'] = {} fslots['B2'] = {} fslots['B1']['Filled'] = np.array(beam1) fslots['B2']['Filled'] = np.array(beam2) fslots['B1']['FilledBID'] = fb1 fslots['B2']['FilledBID'] = fb2 return fb1, fb2, b1, b2, fslots
def InjectionsPerFill(fno): ''' Get the number of injections per beam for the selected fill(s) Returns separately the probe and physics bunch injections. For fills with multiple injection periods the last is considered and the numbers have a negative sign. Filling pattern from fast-BCT device: 'LHC.BCTFR.A6R4.B1:BUNCH_FILL_PATTERN' 'LHC.BCTFR.A6R4.B2:BUNCH_FILL_PATTERN' ''' injections = {'b1':{'INJPROT':0, 'INJPHYS':0}, 'b2':{'INJPROT':0, 'INJPHYS':0}} afilldf = importData.LHCFillsByNumber(fno) for mode in ['INJPROT', 'INJPHYS']: injdf = afilldf[afilldf['mode'].str.contains(mode)] if not injdf.empty: t1 = injdf['startTime'].iloc[-1] t2 = injdf['endTime'].iloc[-1] dur = injdf['duration'].iloc[-1] for ib in ['b1','b2']: var = 'LHC.BCTFR.A6R4.'+ib.upper()+':BUNCH_FILL_PATTERN' fbdf = importData.cals2pd(var, t1, t2, split=int(dur/pd.Timedelta(20,'m'))) fbdf['nfb'] = fbdf.apply(lambda row: np.sum(row[var]), axis=1) no_increase = np.diff(fbdf['nfb'].values) nprot = len(np.where(no_increase>0)[0]) if injdf.shape[0] > 1 : injections[ib][mode] = -nprot else: injections[ib][mode] = nprot return injections
def addColumnsFromCals(myDF, listOfVariables): ''' It allows the user to add columns to a previsouly initialized DF. This will not update in time. ''' aux = importData.cals2pd(listOfVariables, myDF.index[0], myDF.index[-1]) myDF = pd.concat([myDF, aux]) return myDF
def addRowsFromCals(myDF, deltaTime=pd.offsets.Minute(2)): ''' This function allows the user to update a previsously initialized DF with the data for a given amount of time. By defaut, it adds two minutes from the last index of the input DF. ''' aux = importData.cals2pd(list(myDF), myDF.index[-1], myDF.index[-1] + deltaTime) myDF = pd.concat([myDF, aux]) return myDF
def initializeDF(variables, LastMinutesToConsider=2., startTime=pd.Timestamp.now(tz='CET')): ''' Initialize a DF, downloading the data from CALS for a given amount of time. By defaut, it downloads the data for the two last minutes countung from now. ''' return importData.cals2pd( variables, startTime - pd.offsets.Minute(LastMinutesToConsider), startTime)
def getData(self, time_list, return_status=False, for_beam='both', for_plane='both', remove_overlap=False, span=3, buffer_size=2048, skip=0): df = dotdict.dotdict() if for_beam == 'both': beams = ['B1', 'B2'] else: beams = [for_beam] if for_plane == 'both': planes = ['H', 'V'] else: planes = [for_plane] for beam in beams: for plane in planes: df['at%s%s' % (beam, plane)] = pd.DataFrame() var = [ 'LHC.BQBBQ.CONTINUOUS_HS.%s:ACQ_DATA_%s' % (beam, plane), 'ALB.SR4.%s:FGC_FREQ' % beam ] for time in time_list: raw_data = importData.cals2pd(var, time[0], time[1]) if return_status: raw_data['status'] = raw_data.index.map(FindStatus) raw_data[var[1]] = raw_data[var[1]].interpolate( limit_direction='both') raw_data['frev'] = raw_data[var[1]] / 35640. raw_data.dropna(subset=[var[0]], inplace=True) raw_data['shape'] = raw_data[var[0]].apply( lambda x: len(x)) raw_data = raw_data[raw_data['shape'] == buffer_size] if not remove_overlap: df['at%s%s' % (beam, plane)] = pd.concat( [df['at%s%s' % (beam, plane)], raw_data]) elif not raw_data.empty: ### Remove overlap data = [] for i in raw_data[var[0]]: data.append(i) to_flatten = tuple([ np.array(raw_data.index), np.array(data), np.array(raw_data[var[1]]) ]) test = {var[0]: to_flatten} flatten = {} for name, (timestamps, values, values2) in test.items(): flatten[ name], timestamps2, frf2 = self.flattenoverlap( values, timestamps, values2) step = 1 + skip n = span * buffer_size turns = np.arange(0, len(flatten[var[0]])) chunk_t = [ turns[x] for x in xrange(0, len(turns) - n, step) ] chunk_var = [ flatten[var[0]][x:x + n] for x in xrange(0, len(flatten[var[0]]) - n, step) ] chunk_time = [ timestamps2[x] for x in xrange(0, len(timestamps2) - n, step) ] chunk_frf = [ frf2[x] for x in xrange(0, len(frf2) - n, step) ] raw_data2 = pd.DataFrame( { var[0]: chunk_var, 'turns': chunk_t, var[1]: chunk_frf }, index=chunk_time) raw_data2['frev'] = raw_data2[var[1]] / 35640. raw_data2['shape'] = raw_data2[var[0]].apply( lambda x: len(x)) df['at%s%s' % (beam, plane)] = raw_data2 return df
def get_data(modes, time, rename_duplicates=False, remove_overlap=False, n=8000): """ TbT data for the modes and time specified If the same mode is specified more than once, it will be renamed If remove_overlap is True, it will remove the overlap and combine the data with a sliding window of 8000 turns Input: list of modes & dictionary time with start and end time for each key Output: df-> beam ('B1, B2') -> plane ('H', 'V') - > 'tbt' (Tbt data & frev interpolated) """ df = dotdict.dotdict() beams = ['B1', 'B2'] planes = ['H', 'V'] for beam in beams: df[beam] = dotdict.dotdict() for plane in planes: df[beam][plane] = dotdict.dotdict() df[beam][plane]['tbt'] = dotdict.dotdict() var = [ 'LHC.BQBBQ.CONTINUOUS_HS.%s:ACQ_DATA_%s' % (beam, plane), 'ALB.SR4.%s:FGC_FREQ' % beam ] if rename_duplicates: counter_dup = 0 for mode in modes: if mode in df[beam][plane]['tbt'] and rename_duplicates: print "Renaming key..." counter_dup += 1 new_mode = '%s%s' % (mode, str(counter_dup)) df[beam][plane]['tbt'][new_mode] = dotdict.dotdict() else: df[beam][plane]['tbt'][mode] = dotdict.dotdict() new_mode = mode if time[new_mode][0] == 'all': raw_data = importData.LHCCals2pd(var, time[mode][1], beamModeList=mode) else: t1 = time[new_mode][0] t2 = time[new_mode][1] raw_data = importData.cals2pd(var, t1, t2) raw_data['status'] = new_mode raw_data['ALB.SR4.%s:FGC_FREQ' % beam] = raw_data['ALB.SR4.%s:FGC_FREQ' % beam].interpolate( limit_direction='both') if not remove_overlap: df[beam][plane]['tbt'][new_mode] = raw_data else: raw_data = raw_data.dropna(subset=[var[0]]) m = [] for i in raw_data[var[0]]: m.append(i) m = np.array(m) test2 = tuple([ np.array(raw_data.index), m, np.array(raw_data[var[1]]) ]) test = {var[0]: test2} flatten = {} for name, (timestamps, values, values2) in test.items(): flatten[name], timestamps2, frf2 = flattenoverlap( values, timestamps, values2) step = 1 #n = 8000 turns = np.arange(0, len(flatten[var[0]])) chunk_t = [ turns[x:x + n] for x in xrange(0, len(turns) - n, step) ] chunk_var = [ flatten[var[0]][x:x + n] for x in xrange(0, len(flatten[var[0]]) - n, step) ] chunk_time = [ timestamps2[x:x + n] for x in xrange(0, len(timestamps2) - n, step) ] chunk_frf = [ frf2[x:x + n] for x in xrange(0, len(frf2) - n, step) ] raw_data2 = pd.DataFrame({ var[0]: chunk_var, 'turns': chunk_t, 'timestamps': chunk_time, var[1]: chunk_frf, 'status': new_mode }) df[beam][plane]['tbt'][new_mode] = raw_data2 return df