t_ref = t_fill_st tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) pl.close('all') ms.mystyle_arial(fontsz=myfontsz, dist_tick_lab=8) fbct_bx = {} bct_bx = {} blength_bx = {} for beam_n in beams_list: fbct_bx[beam_n] = FBCT(fill_dict, beam=beam_n) bct_bx[beam_n] = BCT(fill_dict, beam=beam_n) if flag_bunch_length: blength_bx[beam_n] = blength(fill_dict, beam=beam_n) #dict_hl_data = tm.parse_timber_file('heatloads_fill_csvs/heatloads_all_special_fill_%d.csv'%filln) dict_hl_data = fill_dict group_names = dict_hl_groups.keys() N_figures = len(group_names) sp1 = None for ii in xrange(N_figures): fig_h = pl.figure(ii, figsize=(12, 10)) fig_h.patch.set_facecolor('w') sptotint = pl.subplot(3, 1, 1, sharex=sp1) sp1 = sptotint
pl.close('all') ms.mystyle_arial(fontsz=myfontsz, dist_tick_lab=8) fbct_bx = {} bct_bx = {} blength_bx = {} for beam_n in beams_list: fbct_bx[beam_n] = FBCT(fill_dict, beam = beam_n) bct_bx[beam_n] = BCT(fill_dict, beam = beam_n) if flag_bunch_length: blength_bx[beam_n] = blength(fill_dict, beam = beam_n) #dict_hl_data = tm.parse_timber_file('heatloads_fill_csvs/heatloads_all_special_fill_%d.csv'%filln) dict_hl_data = fill_dict group_names = dict_hl_groups.keys() N_figures = len(group_names) sp1 = None for ii in xrange(N_figures): fig_h = pl.figure(ii, figsize=(12, 10)) fig_h.patch.set_facecolor('w') sptotint = pl.subplot(3,1,1, sharex=sp1)
def __init__(self, fill_dict, heat_load_calculator, Dt_calc=60., fbct_dict=None, bct_dict=None, blength_dict=None): """ Returns the half cell heat load for a fill dict, which has to consist of basic and bunchbybunch data """ self.heat_load_calculator = heat_load_calculator ene = energy(fill_dict, beam=1) # Different for both beams self.heat_load_calculated_per_beam_Wm = {} for beam_ctr in (1, 2): if bct_dict is None: bct = BCT(fill_dict, beam=beam_ctr) else: bct = bct_dict[beam_ctr] if fbct_dict is None: fbct = FBCT(fill_dict, beam_ctr) else: fbct = fbct_dict[beam_ctr] if blength_dict is None: bunch_length = blength(fill_dict, beam=beam_ctr) else: bunch_length = blength_dict[beam_ctr] if beam_ctr == 1: self.t_stamps = np.arange(bct.t_stamps[0], bct.t_stamps[-1], Dt_calc) ppb = [] energy_eV = [] sigma_t = [] for tt in self.t_stamps: fbct_trace = fbct.nearest_older_sample(tt) summed = np.sum(fbct_trace) if summed != 0.: fbct_trace *= bct.nearest_older_sample(tt) / summed bl_trace = bunch_length.nearest_older_sample(tt) mask_no_bunch = bl_trace < 1e-15 bl_trace[mask_no_bunch] = 1. fbct_trace[mask_no_bunch] = 0. ppb.append(fbct_trace) sigma_t.append(bl_trace / 4.) energy_eV.append(ene.nearest_older_sample(tt) * 1e9) ppb = np.array(ppb) sigma_t = np.array(sigma_t) energy_eV = np.array(energy_eV) self.heat_load_calculated_per_beam_Wm[ 'beam_%d' % beam_ctr] = self.heat_load_calculator.calculate_P_Wm( ppb, sigma_t, energy_eV=energy_eV) self.heat_load_calculated_total = self.heat_load_calculated_per_beam_Wm[ 'beam_1'] + self.heat_load_calculated_per_beam_Wm['beam_2']
tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref)) fbct_bx = {} bct_bx = {} blength_bx = {} n_bunches_bx = {} for beam_n in (1, 2): fbct_bx[beam_n] = FBCT(fill_dict, beam=beam_n) bint = fbct_bx[beam_n].bint min_int = 0.1 * np.max(bint) mask_filled = bint > min_int n_bunches_bx[beam_n] = np.max(np.sum(mask_filled, axis=1)) bct_bx[beam_n] = BCT(fill_dict, beam=beam_n) if flag_bunch_length: blength_bx[beam_n] = blength(fill_dict, beam=beam_n) n_bunches_string = 'B1: %ib, B2: %ib ' % (n_bunches_bx[1], n_bunches_bx[2]) if i_fill == 0 and len(filln_list) == 1: for fig_ in fig_vs_int, fig_blen_vs_int: fig_.suptitle( ' Fill. %d started on %s\n%s %s (%s)' % (filln, tref_string, n_bunches_string, 'Arcs', title_string)) print((filln, 'Number of bunches:', n_bunches_bx)) if n_bunches_bx[1] != n_bunches_bx[2]: print('Not the same number of bunches! Choosing beam 1.') n_bunches = (n_bunches_bx[1] + n_bunches_bx[2]) / 2. fig_h = pl.figure(i_fill, figsize=(8, 6)) fig_h.patch.set_facecolor('w')
def extract_and_compute_extra_fill_data(fill_dict, t_ref, t_sample_h, thresh_bint=3e10): from LHCMeasurementTools.LHC_FBCT import FBCT from LHCMeasurementTools.LHC_BCT import BCT from LHCMeasurementTools.LHC_BQM import blength from LHCMeasurementTools.LHC_Energy import energy import HeatLoadCalculators.impedance_heatload as ihl import HeatLoadCalculators.synchrotron_radiation_heatload as srhl import HeatLoadCalculators.FillCalculator as fc fbct_bx = {} bct_bx = {} blength_bx = {} for beam_n in [1, 2]: fbct_bx[beam_n] = FBCT(fill_dict, beam=beam_n) bct_bx[beam_n] = BCT(fill_dict, beam=beam_n) blength_bx[beam_n] = blength(fill_dict, beam=beam_n) hli_calculator = ihl.HeatLoadCalculatorImpedanceLHCArc() hlsr_calculator = srhl.HeatLoadCalculatorSynchrotronRadiationLHCArc() hl_imped_fill = fc.HeatLoad_calculated_fill(fill_dict, hli_calculator, bct_dict=bct_bx, fbct_dict=fbct_bx, blength_dict=blength_bx) hl_sr_fill = fc.HeatLoad_calculated_fill(fill_dict, hlsr_calculator, bct_dict=bct_bx, fbct_dict=fbct_bx, blength_dict=blength_bx) hl_imped_sample = hl.magnet_length['AVG_ARC'][0] * np.interp( t_sample_h, (hl_imped_fill.t_stamps - t_ref) / 3600, hl_imped_fill.heat_load_calculated_total) hl_sr_sample = hl.magnet_length['AVG_ARC'][0] * np.interp( t_sample_h, (hl_imped_fill.t_stamps - t_ref) / 3600, hl_sr_fill.heat_load_calculated_total) intensity_b1 = np.interp(t_sample_h, (bct_bx[1].t_stamps - t_ref) / 3600, bct_bx[1].values) intensity_b2 = np.interp(t_sample_h, (bct_bx[2].t_stamps - t_ref) / 3600, bct_bx[2].values) bl_ave_b1 = np.interp(t_sample_h, (blength_bx[1].t_stamps - t_ref) / 3600, blength_bx[1].avblen) bl_ave_b2 = np.interp(t_sample_h, (blength_bx[2].t_stamps - t_ref) / 3600, blength_bx[2].avblen) n_bunches_b1 = np.sum(fbct_bx[1].nearest_older_sample(t_sample_h * 3600 + t_ref) > thresh_bint) n_bunches_b2 = np.sum(fbct_bx[2].nearest_older_sample(t_sample_h * 3600 + t_ref) > thresh_bint) energy_GeV = energy(fill_dict, beam=1).nearest_older_sample(t_sample_h * 3600 + t_ref) return intensity_b1, intensity_b2, bl_ave_b1, bl_ave_b2, n_bunches_b1, n_bunches_b2, energy_GeV, hl_imped_sample, hl_sr_sample