def store_special_qbs(self, filln, qbs_ob): """ Arguments: - filln - qbs_ob """ qbs_file = self.get_special_qbs_file(filln) if not os.path.isdir(os.path.dirname(qbs_file)): os.mkdir(os.path.dirname(qbs_file)) with h5py.File(qbs_file, 'w') as h5_handle: h5_handle.create_dataset('timestamps', data=qbs_ob.timestamps) h5_handle.create_dataset('variables', data=qbs_ob.variables) qbs_dataset = h5_handle.create_dataset('data', data=qbs_ob.data) qbs_dataset.attrs[ 'time_created'] = tm.UnixTimeStamp2UTCTimberTimeString( time.time())
def get_fill_dict(filln, h5_storage=None, use_dP=True): fname = h5_storage.get_qbs_file(filln, use_dP=use_dP) obhl = ob = mfm.h5_to_obj(fname) ms0 = 0.*obhl.timestamps dict_out = {} for ii, vv in enumerate(obhl.variables): tv = tm.timber_variable_list() tv.t_stamps = obhl.timestamps tv.ms = ms0 tv.values = obhl.data[:, ii] dict_out[vv] = tv return dict_out
if '--notrace' in sys.argv: list_scan_times = [] if '--plotave' in sys.argv: plotave = True if '--plotaveonly' in sys.argv: plotaveonly = True fill_dict = {} if os.path.isdir(data_folder_fill+'/fill_basic_data_csvs'): # 2016+ structure fill_dict.update(tm.parse_timber_file( data_folder_fill+'/fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=True)) fill_dict.update(tm.parse_timber_file( (data_folder_fill + '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv'%filln), verbose=True)) elif os.path.isdir(data_folder_fill+'/fill_basic_data_h5s'): # 2016+ structure fill_dict.update(tm.CalsVariables_from_h5( data_folder_fill+'/fill_basic_data_h5s/basic_data_fill_%d.h5'%filln)) fill_dict.update(tm.CalsVariables_from_h5( (data_folder_fill + '/fill_bunchbybunch_data_h5s/bunchbybunch_data_fill_%d.h5'%filln))) else: # 2015 structure fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_csvs/fill_%d.csv'%filln, verbose=True))
for filln in sorted(dict_fill_bmodes.keys())[::-1]: print('Fill %i' % filln) h5filename = h5folder + '/imp_and_SR_fill_%d.h5' % filln if dict_fill_bmodes[filln]['flag_complete'] is False: print("Fill incomplete --> no h5 convesion") continue if os.path.isfile(h5filename): print("Already complete and in h5") continue try: fill_dict = {} fill_dict.update( tm.CalsVariables_from_h5( 'fill_basic_data_h5s/basic_data_fill_%d.h5' % filln)) fill_dict.update( tm.CalsVariables_from_h5( 'fill_bunchbybunch_data_h5s/bunchbybunch_data_fill_%d.h5' % filln)) fbct_bx = {} bct_bx = {} blength_bx = {} for beam_n in (1, 2): fbct_bx[beam_n] = FBCT(fill_dict, beam=beam_n) bct_bx[beam_n] = BCT(fill_dict, beam=beam_n) blength_bx[beam_n] = blength(fill_dict, beam=beam_n) hl_imped_fill = fc.HeatLoad_calculated_fill(fill_dict, hli_calculator,
else: filln = max(dict_fill_bmodes.keys()) print('--> Processing latest fill: %d' % filln) # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] t_ref = dict_fill_bmodes[filln]['t_startfill'] tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln, verbose=False)) elif os.path.isdir(data_folder_fill + '/fill_basic_data_h5s'): fill_dict.update( tm.CalsVariables_from_h5( data_folder_fill + '/fill_bunchbybunch_data_h5s/bunchbybunch_data_fill_%d.h5' % filln, )) else: raise ValueError('What?!') i_fig = 0 plt.close('all') # Loop over beams beam_col = ['b', 'r']
fill_info = Fills_Info(dict_fill_bmodes) fill_list = fill_info.fills_in_time_window(t_start_unix, t_end_unix) # find offset to remove if zero_at is not None: print 'Evaluating offsets' if ':' in zero_at: t_zero_unix = time.mktime(time.strptime(zero_at, '%d-%m-%Y,%H:%M')) else: t_zero_unix = t_ref_unix + float(zero_at)*3600. filln_offset = fill_info.filln_at_time(t_zero_unix) data_folder_fill = dict_fill_bmodes[filln_offset]['data_folder'] try: fill_dict = tm.timber_variables_from_h5(data_folder_fill+'/heatloads_fill_h5s/heatloads_all_fill_%d.h5'%filln_offset) print 'From h5!' except IOError: print "h5 file not found, using csvs" fill_dict = {} fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_basic_data_csvs/basic_data_fill_%d.csv'%filln_offset, verbose=False)) fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln_offset, verbose=False)) if args.use_recalc: #import GasFlowHLCalculator.qbs_fill as qf fill_dict.update(qf.get_fill_dict(filln_offset,h5_storage=H5_storage(recalc_h5_folder),use_dP=True)) dict_offsets={} for kk in hl_varlist: dict_offsets[kk] = np.interp(t_zero_unix, np.float_(np.array(fill_dict[kk].t_stamps)), fill_dict[kk].float_values())
def load_special_data_file(self, filln): ob = mfm.h5_to_obj(self.get_special_data_file(filln)) return tm.AlignedTimberData(ob.timestamps, ob.data, ob.variables)
def load_special_qbs(self, filln): qbs_file = self.get_special_qbs_file(filln) qbs_ob = mfm.h5_to_obj(qbs_file) return tm.AlignedTimberData(qbs_ob.timestamps, qbs_ob.data, qbs_ob.variables)
this_dict_fill_bmodes[kk]['data_folder'] = df dict_fill_bmodes.update(this_dict_fill_bmodes) N_snapshots = len(snapshots) for i_snapshot in xrange(N_snapshots): filln = snapshots[i_snapshot]['filln'] t_sample_h = snapshots[i_snapshot]['t_h'] t_offset_h = snapshots[i_snapshot]['t_offs_h'] if args.zeroat is not None: t_offset_h = None if from_csv: fill_file = 'fill_heatload_data_csvs/hl_all_cells_fill_%d.csv' % filln hid = tm.parse_timber_file(fill_file, verbose=args.v) else: hid = qf.get_fill_dict(filln, h5_storage=H5_storage(recalc_h5_folder)) # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] t_fill_st = dict_fill_bmodes[filln]['t_startfill'] t_fill_end = dict_fill_bmodes[filln]['t_endfill'] t_ref = t_fill_st tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) tref_string_short = time.strftime("%d %b %Y %H:%M", time.localtime(t_ref)) # extract standard fill data fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): # 2016 structure
filln = snapshots[i_snapshot]['filln'] t_sample_h = snapshots[i_snapshot]['t_h'] t_offset_h = snapshots[i_snapshot]['t_offs_h'] # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] t_fill_st = dict_fill_bmodes[filln]['t_startfill'] t_fill_end = dict_fill_bmodes[filln]['t_endfill'] t_ref = t_fill_st tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) tref_string_short = time.strftime("%d %b %Y %H:%M", time.localtime(t_ref)) if from_published: fill_file = f"{data_folder_fill}/fill_cell_by_cell_heatload_data_h5s/cell_by_cell_heatloads_fill_{filln}.h5" hid = tm.CalsVariables_from_h5(fill_file) else: hid = qf.get_fill_dict(filln, h5_storage=H5_storage(recalc_h5_folder)) # extract standard fill data fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): # 2016 structure fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=args.v)) fill_dict.update( tm.parse_timber_file( data_folder_fill +
def recalc_multiple_circuits(raw_data_object, calibration, circuit_selection, with_P_drop): if circuit_selection == 'full_lhc': circuits = calibration.circuits elif circuit_selection == 'all_instrumented': circuits = sorted(instrumented_cells_config.keys()) else: raise ValueError('Not implemented!') obraw = raw_data_object qbs_recalc = [] issues = [] instrum_cell_recalc_dict = {} for ii, circuit in enumerate(circuits): if len(circuits) > 100: if np.mod(ii, 20) == 0: print('Circuit %d/%d' % (ii, len(circuits))) else: print(ii, circuit) cell_calib = calibration.get_circuit(circuit) T1 = obraw.dictionary[cell_calib['T1']] T3 = obraw.dictionary[cell_calib['T3']] P1 = obraw.dictionary[cell_calib['P1']] P4 = obraw.dictionary[cell_calib['P4']] CV = obraw.dictionary[cell_calib['CV']] EH = obraw.dictionary[cell_calib['EH']] # T2 = obraw.dictionary[cell_calib['T2']] Q_bs, other = hlr.compute_heat_load( P1, T1, T3, P4, CV, EH, Qs_calib=cell_calib['Qs_calib'], Kv_calib=cell_calib['Kv_calib'], R_calib=cell_calib['R_calib'], cell_length=cell_calib['length'], n_channels=cell_calib['n_channels_tot'], channel_radius=cell_calib['channel_radius'], channel_roughness=cell_calib['roughness'], with_P_drop=with_P_drop, N_iter_max=100, scale_correction=0.3, iter_toll=1e-3) qbs_recalc.append(Q_bs) if len(other['issues']) > 0: print('Issues found for circuit %s:' % circuit) print('\n'.join(other['issues'])) issues.append([circuit, other['issues']]) if circuit_selection == 'all_instrumented': instrum_cell_config = instrumented_cells_config[circuit] (n_channels_circuits, magnet_lengths_circuits, in_sensor_names_circuits, out_sensor_names_circuits ) = hlr.extract_info_from_instrum_config_dict( config_dict=instrum_cell_config) t_out_magnets_circuits = [[ obraw.dictionary[vv] for vv in out_sensor_names_circuits[ii] ] for ii in [0, 1]] t_in_magnets_circuits = [[ obraw.dictionary[vv] for vv in in_sensor_names_circuits[ii] ] for ii in [0, 1]] qbs_magnets_circuits, other_instr = \ hlr.compute_heat_loads_instrumented_cell( mass_flow = other['mass_flow'], P1=P1, T_in_magnets_circuits=t_in_magnets_circuits, T_out_magnets_circuits=t_out_magnets_circuits, magnet_lengths_circuits=magnet_lengths_circuits, n_channels_circuits=n_channels_circuits, channel_radius=cell_calib['channel_radius'], channel_roughness=cell_calib['roughness'], dp_toll = 0.001, N_iter_max=200) magnet_beam_circuits = [ instrum_cell_config['circuit_%s_beam' % cc] for cc in ['A', 'A'] ] dict_output = hlr.build_instrumented_hl_dict( config_dict=instrum_cell_config, circuit=circuit, Qbs_magnets_circuits=qbs_magnets_circuits) instrum_cell_recalc_dict.update(dict_output) avg_loads = [] avg_varnames = [] if circuit_selection == 'full_lhc': # Build temporary object to compute arc averages obhl = tm.AlignedTimberData(timestamps=obraw.timestamps, data=np.array(qbs_recalc).T, variables=calibration.circuits) # Compute arc averages for arc in '12 23 34 45 56 67 78 81'.split(): arc_circuits = HL.arc_cells_by_sector['S' + arc] arc_loads = np.array([obhl.dictionary[kk] for kk in arc_circuits]) avg_load = np.nanmean(arc_loads, axis=0) avg_loads.append(avg_load) avg_varnames.append('S%s_QBS_AVG_ARC.POSST' % arc) instrum_varnames = sorted(instrum_cell_recalc_dict.keys()) instrum_qbs_recalc = [ instrum_cell_recalc_dict[kk] for kk in instrum_varnames ] obhl_store = tm.AlignedTimberData( timestamps=obraw.timestamps, data=np.array(qbs_recalc + avg_loads + instrum_qbs_recalc).T, variables=(circuits + avg_varnames + instrum_varnames)) other = {} other['issues'] = issues return obhl_store, other
df+'/fills_and_bmodes.json') for kk in this_dict_fill_bmodes: this_dict_fill_bmodes[kk]['data_folder'] = df dict_fill_bmodes.update(this_dict_fill_bmodes) # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] #load data if os.path.isdir(data_folder_fill+'/fill_basic_data_csvs'): # 2016+ structure fill_dict = {} fill_dict.update(tm.parse_timber_file( data_folder_fill+'/fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=True)) fill_dict.update(tm.parse_timber_file( data_folder_fill + ('/fill_bunchbybunch_data_csvs/' 'bunchbybunch_data_fill_%d.csv'%filln), verbose=True)) if not use_recalculated: fill_dict.update(tm.parse_timber_file( data_folder_fill + ('/fill_heatload_data_csvs/' 'heatloads_fill_%d.csv'%filln), verbose=False)) elif os.path.isdir(data_folder_fill+'/fill_basic_data_h5s'): # 2016+ structure fill_dict = {} fill_dict.update(tm.CalsVariables_from_h5( data_folder_fill+'/fill_basic_data_h5s/basic_data_fill_%d.h5'%filln)) fill_dict.update(tm.CalsVariables_from_h5( data_folder_fill + ('/fill_bunchbybunch_data_h5s/'
channel_roughness=cell_calib['roughness'], with_P_drop=with_P_drop, N_iter_max=100, scale_correction=0.3, iter_toll=1e-3) qbs_recalc.append(Q_bs) if len(other['issues']) > 0: print('Issues found for circuit %s:' % circuit) print('\n'.join(other['issues'])) issues.append([circuit, other['issues']]) # Build temporary object to compute arc averages obhl = tm.AlignedTimberData(timestamps=obraw.timestamps, data=np.array(qbs_recalc).T, variables=calibration.circuits) avg_loads = [] avg_varnames = [] # Compute arc averages for arc in '12 23 34 45 56 67 78 81'.split(): arc_circuits = HL.arc_cells_by_sector['S' + arc] arc_loads = np.array([obhl.dictionary[kk] for kk in arc_circuits]) avg_load = np.nanmean(arc_loads, axis=0) avg_loads.append(avg_load) avg_varnames.append('S%s_QBS_AVG_ARC.POSST' % arc) # Build temporary object to compute arc averages obhl_store = tm.AlignedTimberData(timestamps=obraw.timestamps,
import sys, os BIN = os.path.expanduser("../") sys.path.append(BIN) # Load fills import LHCMeasurementTools.TimberManager as tm filln = 6740 fill_dict = {} fill_dict.update( tm.CalsVariables_from_h5( ('../../LHC_followup_download_scripts/fill_basic_data_h5s' '/basic_data_fill_%d.h5' % filln))) fill_dict.update( tm.CalsVariables_from_h5( ('../../LHC_followup_download_scripts/fill_bunchbybunch_data_h5s/' 'bunchbybunch_data_fill_%d.h5' % filln))) # Build heat load calculators import impedance_heatload as ihl import synchrotron_radiation_heatload as srhl hli_calculator = ihl.HeatLoadCalculatorImpedanceLHCArc() hlsr_calculator = srhl.HeatLoadCalculatorSynchrotronRadiationLHCArc() # Use fill calculator import FillCalculator as fc hl_imped_fill = fc.HeatLoad_calculated_fill(fill_dict, hli_calculator) hl_sr_fill = fc.HeatLoad_calculated_fill(fill_dict, hlsr_calculator)
import LHCMeasurementTools.TimberManager as tm data_folder_fill = './' filln = 6741 varlist = [] data = tm.CalsVariables_from_h5(data_folder_fill + '/fill_basic_data_h5s/basic_data_fill_%d.h5' % filln) varlist += data.keys() data = tm.CalsVariables_from_h5( data_folder_fill + '/fill_bunchbybunch_data_h5s/bunchbybunch_data_fill_%d.h5' % filln) varlist += data.keys() data = tm.CalsVariables_from_h5( data_folder_fill + '/fill_heatload_data_h5s/heatloads_fill_%d.h5' % filln) varlist += data.keys() data = tm.CalsVariables_from_h5(data_folder_fill + '/fill_extra_data_h5s/extra_data_fill_%d.h5' % filln) varlist += data.keys() import LHCMeasurementTools.LHC_Fills as Fills varlist += Fills.get_varlist() # Variables for heat load recalculation varfiles = [ '../tools/GasFlowHLCalculator/variable_list_complete.txt',
dict_fill_bmodes = pickle.load(fid) if len(sys.argv) > 1: print '--> Processing fill {:s}'.format(sys.argv[1]) filln = int(sys.argv[1]) else: filln = max(dict_fill_bmodes.keys()) print '--> Processing latest fill: %d' % filln t_ref = dict_fill_bmodes[filln]['t_startfill'] tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) fill_dict = {} fill_dict.update( tm.parse_timber_file( 'fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln, verbose=False)) bint_thresh = 8e9 totint_thresh = 2e11 t_inter = 60. #seconds i_fig = 0 plt.close('all') # Loop over beams beam_col = ['b', 'r'] for beam in [1, 2]: print '\nPreparing plot beam %d...' % beam fbct = FBCT.FBCT(fill_dict, beam=beam)
help='Plot a legend for Imp/SR', action='store_true') args = parser.parse_args() filln = args.filln t1 = args.t min_scale = args.min_scale max_scale = args.max_scale tagfname = args.tag #if args.t_offset: # t_offset = args.t_offset t_offset = None fill_file = 'fill_heatload_data_csvs/t3_all_cells_fill_%d.csv' % filln hid = tm.parse_timber_file(fill_file, verbose=args.v) varlist = cq.config_qbs.TT94x_list hid_set = shv.SetOfHomogeneousNumericVariables(varlist, hid) # merge pickles and add info on location dict_fill_bmodes = {} for df in data_folder_list: with open(df + '/fills_and_bmodes.pkl', 'rb') as fid: this_dict_fill_bmodes = pickle.load(fid) for kk in this_dict_fill_bmodes: this_dict_fill_bmodes[kk]['data_folder'] = df dict_fill_bmodes.update(this_dict_fill_bmodes) # get location of current data
#~ filln=4252 #~ filln=4260 #~ filln=4261 if len(sys.argv) > 1: print('--> Processing fill {:s}'.format(sys.argv[1])) filln = int(sys.argv[1]) myfontsz = 16 with open('fills_and_bmodes.pkl', 'rb') as fid: dict_fill_bmodes = pickle.load(fid) dict_fill_data = {} dict_fill_data.update( tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=True)) dict_fill_data.update( tm.parse_timber_file('fill_extra_data_csvs/extra_data_fill_%d.csv' % filln, verbose=True)) dict_beam = dict_fill_data dict_fbct = dict_fill_data colstr = {} colstr[1] = 'b' colstr[2] = 'r' energy = Energy.energy(dict_fill_data, beam=1) t_fill_st = dict_fill_bmodes[filln]['t_startfill'] t_fill_end = dict_fill_bmodes[filln]['t_endfill']
#pkl_name = 'fills_and_bmodes.pkl' json_name = 'fills_and_bmodes.json' ldb = pytimber.LoggingDB(source='nxcals') #ldb = pytimber.LoggingDB() dict_fill_info = {} for period in periods: t_start_string = period.split('!')[0] t_stop_string = period.split('!')[1] t_start = th.localtime2unixstamp(t_start_string) t_stop = th.localtime2unixstamp(t_stop_string) # Get data from database data_fnum = tm.CalsVariables_from_pytimber( ldb.get(['HX:FILLN'], t_start, t_stop)) list_bmodes = ldb.getLHCFillsByTime(t_start, t_stop) # Generate dictionary dict_fill_info.update( Fills.make_fill_info_dict(data_fnum, list_bmodes, t_stop)) # with open(pkl_name, 'wb') as fid: # pickle.dump(dict_fill_info, fid) import json with open(json_name, 'w') as fid: json.dump(dict_fill_info, fid)
sp_blen_vs_int.grid('on') sp_blen_vs_int.set_xlabel('Bunch intensity [p+]') sp_blen_vs_int.set_ylabel('Bunch length [ns]') hli_calculator = ihl.HeatLoadCalculatorImpedanceLHCArc() hlsr_calculator = srhl.HeatLoadCalculatorSynchrotronRadiationLHCArc() fills_string = '' for i_fill, filln in enumerate(filln_list): data_folder_fill = dict_fill_bmodes[filln]['data_folder'] fills_string += '_%d' % filln fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=False)) fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln, verbose=False)) if use_recalculated: fill_dict.update(qf.get_fill_dict(filln)) else: fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_heatload_data_csvs/heatloads_fill_%d.csv' % filln, verbose=False))
if avg_time_hrs == -1.: if dict_fill_bmodes[filln]['t_start_STABLE'] != -1: avg_time_hrs = (dict_fill_bmodes[filln]['t_start_STABLE'] - dict_fill_bmodes[filln]['t_startfill']) / 3600. else: print('Warning: Avg time hrs = 0.5') avg_time_hrs = 0.5 # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] fill_dict = {} if filln < 4857: fill_dict.update( tm.parse_timber_file( '/afs/cern.ch/project/spsecloud/LHC_2015_PhysicsAfterTS2/fill_csvs/fill_%d.csv' % filln, verbose=False)) fill_dict.update( tm.parse_timber_file( '/afs/cern.ch/project/spsecloud/LHC_2015_PhysicsAfterTS2/heatloads_fill_h5s/heatloads_all_fill_%i.h5' % filln, verbose=False)) else: fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=False)) fill_dict.update( tm.parse_timber_file( data_folder_fill +
def load_qbs(self, filln, use_dP): qbs_file = self.get_qbs_file(filln, use_dP=use_dP) qbs_ob = mfm.h5_to_obj(qbs_file) #print('Loaded file %s' % qbs_file) return tm.AlignedTimberData(qbs_ob.timestamps, qbs_ob.data, qbs_ob.variables)
for filln in sorted(dict_fill_bmodes.keys()): print('Fill n.',filln) h5filename = h5folder+'/heatloads_all_fill_%d.h5'%filln if dict_fill_bmodes[filln]['flag_complete'] is False: print("Fill incomplete --> no h5 convesion") continue if os.path.isfile(h5filename) and dict_fill_bmodes[filln]['flag_complete'] is True: print("Already complete and in h5") continue try: dict_fill_data = {} dict_fill_data.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=False)) dict_fill_data.update(tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln, verbose=False)) varlist = [] varlist += LHC_BCT.variable_list() varlist += LHC_Energy.variable_list() for kk in list(LHC_Heatloads.variable_lists_heatloads.keys()): varlist+=LHC_Heatloads.variable_lists_heatloads[kk] dict_to_h5 = {} for varname in varlist: #~ print varname
fill_info = Fills_Info(dict_fill_bmodes) fill_list = fill_info.fills_in_time_window(t_start_unix, t_end_unix) # find offset to remove if zero_at is not None: print('Evaluating offsets') if ':' in zero_at: t_zero_unix = time.mktime(time.strptime(zero_at, '%d-%m-%Y,%H:%M')) else: t_zero_unix = t_ref_unix + float(zero_at)*3600. filln_offset = fill_info.filln_at_time(t_zero_unix) data_folder_fill = dict_fill_bmodes[filln_offset]['data_folder'] try: fill_dict = tm.timber_variables_from_h5(data_folder_fill+'/heatloads_fill_h5s/heatloads_all_fill_%d.h5'%filln_offset) print('From h5!') except IOError: print("h5 file not found, using h5s :-P") fill_dict = {} fill_dict.update(tm.CalsVariables_from_h5( data_folder_fill + ('/fill_basic_data_h5s/' 'basic_data_fill_%d.h5'%filln_offset))) fill_dict.update(tm.CalsVariables_from_h5( data_folder_fill + ('/fill_heatload_data_h5s/' 'heatloads_fill_%d.h5'%filln_offset))) if args.use_recalc: fill_dict.update(qf.get_fill_dict(filln_offset, h5_storage=H5_storage(recalc_h5_folder), use_dP=True))
print('Interval manually set: %.2fh to %.2fh' % (t_start_man, t_end_man)) if N_traces_set == None: N_traces_set = 30 list_scan_times = np.linspace(t_start_man, t_end_man, N_traces_set) xlim = t_start_man, t_end_man else: xlim = None, None if '--notrace' in sys.argv: list_scan_times = [] fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=True)) elif os.path.isdir(data_folder_fill + '/fill_basic_data_h5s'): fill_dict.update( tm.CalsVariables_from_h5( data_folder_fill + '/fill_basic_data_h5s/basic_data_fill_%d.h5' % filln, )) pl.close('all') sp_ploss = None # START PLOT fig_h = pl.figure(1, figsize=(8 * 1.6, 6 * 1.3)) fig_h.patch.set_facecolor('w') ms.mystyle_arial(fontsz=16, dist_tick_lab=5) fig_h.suptitle('Fill %d: started on %s' % (filln, tref_string), fontsize=18)
dict_hl_groups['Q6s_IR28'] = HL.variable_lists_heatloads[ 'Q6s_IR2'] + HL.variable_lists_heatloads['Q6s_IR8'] dict_hl_groups['special_HC_Q1'] = HL.variable_lists_heatloads['special_HC_Q1'] dict_hl_groups['special_HC_dipoles'] = HL.variable_lists_heatloads['special_HC_D2']+\ HL.variable_lists_heatloads['special_HC_D3']+HL.variable_lists_heatloads['special_HC_D4'] dict_hl_groups['Q4D2s_IR15'] = HL.variable_lists_heatloads[ 'Q4D2s_IR1'] + HL.variable_lists_heatloads['Q4D2s_IR5'] dict_hl_groups['Q4D2s_IR28'] = HL.variable_lists_heatloads[ 'Q4D2s_IR2'] + HL.variable_lists_heatloads['Q4D2s_IR8'] with open('fills_and_bmodes.pkl', 'rb') as fid: dict_fill_bmodes = pickle.load(fid) fill_dict = {} fill_dict.update( tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=False)) fill_dict.update( tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv' % filln, verbose=False)) fill_dict.update( tm.parse_timber_file( 'fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln, verbose=False)) dict_beam = fill_dict dict_fbct = fill_dict colstr = {} colstr[1] = 'b' colstr[2] = 'r'
with open(df+'/fills_and_bmodes.pkl', 'rb') as fid: this_dict_fill_bmodes = pickle.load(fid) for kk in this_dict_fill_bmodes: this_dict_fill_bmodes[kk]['data_folder'] = df dict_fill_bmodes.update(this_dict_fill_bmodes) # get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] #load data if os.path.isdir(data_folder_fill+'/fill_basic_data_csvs'): # 2016+ structure fill_dict = {} fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=True)) fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv'%filln, verbose=True)) if not use_recalculated: fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln, verbose=False)) else: # 2015 structure fill_dict = {} fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_csvs/fill_%d.csv'%filln, verbose=True)) if use_recalculated: print 'Using recalc data' # remove db values from dictionary (for 2015 cases) for kk in fill_dict.keys(): if 'QBS' in kk and '.POSST'in kk:
# get location of current data data_folder_fill = dict_fill_bmodes[filln]['data_folder'] t_start_fill = dict_fill_bmodes[filln]['t_startfill'] t_end_fill = dict_fill_bmodes[filln]['t_endfill'] t_fill_len = t_end_fill - t_start_fill t_ref = t_start_fill n_traces = 50. fill_dict = {} if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'): # 2016 structure fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln, verbose=True)) fill_dict.update( tm.parse_timber_file( data_folder_fill + '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln, verbose=True)) else: # 2015 structure fill_dict.update( tm.parse_timber_file(data_folder_fill + '/fill_csvs/fill_%d.csv' % filln, verbose=True)) beam_col = ['k', 'b', 'r']
time_conv = TH.TimeConverter(time_in, t_ref_unix, t_plot_tick_h=t_plot_tick_h) tc = time_conv.from_unix fill_info = Fills_Info('fills_and_bmodes.pkl') fill_list = fill_info.fills_in_time_window(t_start_unix, t_end_unix) # find offset to remove if zero_at is not None: if ':' in zero_at: t_zero_unix = time.mktime(time.strptime(zero_at, '%d-%m-%Y,%H:%M')) else: t_zero_unix = t_ref_unix + float(zero_at)*3600. filln_offset = fill_info.filln_at_time(t_zero_unix) fill_dict = {} fill_dict.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln_offset, verbose=False)) fill_dict.update(tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln_offset, verbose=False)) dict_offsets={} for kk in hl_varlist: dict_offsets[kk] = np.interp(t_zero_unix, np.float_(np.array(fill_dict[kk].t_stamps)), fill_dict[kk].float_values()) pl.close('all') ms.mystyle_arial(fontsz=fontsz, dist_tick_lab=9) #ms.mystyle(fontsz=fontsz) fig = pl.figure(1, figsize=figsz) fig.patch.set_facecolor('w') ax1 = fig.add_subplot(311) ax11 = ax1.twinx() ax2 = fig.add_subplot(312, sharex=ax1) ax3 = fig.add_subplot(313, sharex=ax1)
traces_times = [] plt.rcParams.update({'axes.labelsize': 18, 'axes.linewidth': 2, 'xtick.labelsize': 'large', 'ytick.labelsize': 'large', 'xtick.major.pad': 14, 'ytick.major.pad': 14}) format_datetime = mdt.DateFormatter('%m-%d %H:%M') fill_dict = {} fill_dict.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=False)) fill_dict.update(tm.parse_timber_file('fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv'%filln, verbose=False)) n_traces = len(traces_times) bint_thresh = 8e9 i_fig = 0 plt.close('all') beam_col = ['b','r'] for beam in [1,2]: fbct = FBCT.FBCT(fill_dict, beam=beam) bct = BCT.BCT(fill_dict, beam=beam) fig1 = plt.figure(i_fig, figsize=(14, 8), tight_layout=False)
def __getitem__(self, kk): var = tm.timber_variable_list() var.t_stamps, var.values = self.db.get(kk, self.t_start, self.t_end)[kk] return var