traces_times = []



plt.rcParams.update({'axes.labelsize': 18,
                     'axes.linewidth': 2,
                     'xtick.labelsize': 'large',
                     'ytick.labelsize': 'large',
                     'xtick.major.pad': 14,
                     'ytick.major.pad': 14})

format_datetime = mdt.DateFormatter('%m-%d %H:%M')


fill_dict = {}
fill_dict.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=False))
fill_dict.update(tm.parse_timber_file('fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv'%filln, verbose=False))

n_traces = len(traces_times)
bint_thresh = 8e9



i_fig = 0
plt.close('all')
beam_col = ['b','r']
for beam in [1,2]:
    fbct = FBCT.FBCT(fill_dict, beam=beam)
    bct = BCT.BCT(fill_dict, beam=beam)
    
    fig1 = plt.figure(i_fig, figsize=(14, 8), tight_layout=False)
Beispiel #2
0
# get location of current data
data_folder_fill = dict_fill_bmodes[filln]['data_folder']

t_start_fill = dict_fill_bmodes[filln]['t_startfill']
t_end_fill = dict_fill_bmodes[filln]['t_endfill']
t_fill_len = t_end_fill - t_start_fill
t_ref = t_start_fill
n_traces = 50.

fill_dict = {}
if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'):
    # 2016 structure
    fill_dict.update(
        tm.parse_timber_file(
            data_folder_fill +
            '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln,
            verbose=True))
    fill_dict.update(
        tm.parse_timber_file(
            data_folder_fill +
            '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' %
            filln,
            verbose=True))
else:
    # 2015 structure
    fill_dict.update(
        tm.parse_timber_file(data_folder_fill +
                             '/fill_csvs/fill_%d.csv' % filln,
                             verbose=True))

beam_col = ['k', 'b', 'r']
Beispiel #3
0
    print 'Evaluating offsets'
    if ':' in zero_at:
        t_zero_unix = time.mktime(time.strptime(zero_at, '%d-%m-%Y,%H:%M'))
    else:
        t_zero_unix  = t_ref_unix + float(zero_at)*3600.
    filln_offset = fill_info.filln_at_time(t_zero_unix)

    data_folder_fill = dict_fill_bmodes[filln_offset]['data_folder']

    try:
        fill_dict = tm.timber_variables_from_h5(data_folder_fill+'/heatloads_fill_h5s/heatloads_all_fill_%d.h5'%filln_offset)
        print 'From h5!'
    except IOError:
        print "h5 file not found, using csvs"
        fill_dict = {}
        fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_basic_data_csvs/basic_data_fill_%d.csv'%filln_offset, verbose=False))
        fill_dict.update(tm.parse_timber_file(data_folder_fill+'/fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln_offset, verbose=False))

    if args.use_recalc:
        #import GasFlowHLCalculator.qbs_fill as qf
        fill_dict.update(qf.get_fill_dict(filln_offset,h5_storage=H5_storage(recalc_h5_folder),use_dP=True))


    dict_offsets={}
    for kk in hl_varlist:
        dict_offsets[kk] = np.interp(t_zero_unix, np.float_(np.array(fill_dict[kk].t_stamps)), fill_dict[kk].float_values())


pl.close('all')
ms.mystyle_arial(fontsz=fontsz, dist_tick_lab=9)
fig = pl.figure(1, figsize=figsz)
dict_hl_groups['Q6s_IR28'] = HL.variable_lists_heatloads[
    'Q6s_IR2'] + HL.variable_lists_heatloads['Q6s_IR8']
dict_hl_groups['special_HC_Q1'] = HL.variable_lists_heatloads['special_HC_Q1']
dict_hl_groups['special_HC_dipoles'] = HL.variable_lists_heatloads['special_HC_D2']+\
                  HL.variable_lists_heatloads['special_HC_D3']+HL.variable_lists_heatloads['special_HC_D4']
dict_hl_groups['Q4D2s_IR15'] = HL.variable_lists_heatloads[
    'Q4D2s_IR1'] + HL.variable_lists_heatloads['Q4D2s_IR5']
dict_hl_groups['Q4D2s_IR28'] = HL.variable_lists_heatloads[
    'Q4D2s_IR2'] + HL.variable_lists_heatloads['Q4D2s_IR8']

with open('fills_and_bmodes.pkl', 'rb') as fid:
    dict_fill_bmodes = pickle.load(fid)

fill_dict = {}
fill_dict.update(
    tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv' % filln,
                         verbose=False))
fill_dict.update(
    tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv' %
                         filln,
                         verbose=False))
fill_dict.update(
    tm.parse_timber_file(
        'fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln,
        verbose=False))

dict_beam = fill_dict
dict_fbct = fill_dict

colstr = {}
colstr[1] = 'b'
colstr[2] = 'r'
Beispiel #5
0
for filln in sorted(dict_fill_bmodes.keys()):
    print('Fill n.',filln)
    h5filename = h5folder+'/heatloads_all_fill_%d.h5'%filln

    if dict_fill_bmodes[filln]['flag_complete'] is False:
        print("Fill incomplete --> no h5 convesion")
        continue

    if os.path.isfile(h5filename) and dict_fill_bmodes[filln]['flag_complete'] is True:
        print("Already complete and in h5")
        continue

    try:
        dict_fill_data = {}
        dict_fill_data.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln, verbose=False))
        dict_fill_data.update(tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln, verbose=False))


        varlist = []

        varlist += LHC_BCT.variable_list()
        varlist += LHC_Energy.variable_list()
        for kk in list(LHC_Heatloads.variable_lists_heatloads.keys()):
            varlist+=LHC_Heatloads.variable_lists_heatloads[kk]


        dict_to_h5 = {}

        for varname in varlist:
            #~ print varname
sp_blen_vs_int.grid('on')
sp_blen_vs_int.set_xlabel('Bunch intensity [p+]')
sp_blen_vs_int.set_ylabel('Bunch length [ns]')

hli_calculator = ihl.HeatLoadCalculatorImpedanceLHCArc()
hlsr_calculator = srhl.HeatLoadCalculatorSynchrotronRadiationLHCArc()

fills_string = ''
for i_fill, filln in enumerate(filln_list):
    data_folder_fill = dict_fill_bmodes[filln]['data_folder']
    fills_string += '_%d' % filln
    fill_dict = {}
    if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'):
        fill_dict.update(
            tm.parse_timber_file(
                data_folder_fill +
                '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln,
                verbose=False))
        fill_dict.update(
            tm.parse_timber_file(
                data_folder_fill +
                '/fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' %
                filln,
                verbose=False))
        if use_recalculated:
            fill_dict.update(qf.get_fill_dict(filln))
        else:
            fill_dict.update(
                tm.parse_timber_file(
                    data_folder_fill +
                    '/fill_heatload_data_csvs/heatloads_fill_%d.csv' % filln,
                    verbose=False))
Beispiel #7
0
if avg_time_hrs == -1.:
    if dict_fill_bmodes[filln]['t_start_STABLE'] != -1:
        avg_time_hrs = (dict_fill_bmodes[filln]['t_start_STABLE'] -
                        dict_fill_bmodes[filln]['t_startfill']) / 3600.
    else:
        print('Warning: Avg time hrs = 0.5')
        avg_time_hrs = 0.5

# get location of current data
data_folder_fill = dict_fill_bmodes[filln]['data_folder']

fill_dict = {}
if filln < 4857:
    fill_dict.update(
        tm.parse_timber_file(
            '/afs/cern.ch/project/spsecloud/LHC_2015_PhysicsAfterTS2/fill_csvs/fill_%d.csv'
            % filln,
            verbose=False))
    fill_dict.update(
        tm.parse_timber_file(
            '/afs/cern.ch/project/spsecloud/LHC_2015_PhysicsAfterTS2/heatloads_fill_h5s/heatloads_all_fill_%i.h5'
            % filln,
            verbose=False))
else:
    fill_dict.update(
        tm.parse_timber_file(
            data_folder_fill +
            '/fill_basic_data_csvs/basic_data_fill_%d.csv' % filln,
            verbose=False))
    fill_dict.update(
        tm.parse_timber_file(
            data_folder_fill +
Beispiel #8
0
#~ filln=4252
#~ filln=4260
#~ filln=4261

if len(sys.argv) > 1:
    print('--> Processing fill {:s}'.format(sys.argv[1]))
    filln = int(sys.argv[1])

myfontsz = 16

with open('fills_and_bmodes.pkl', 'rb') as fid:
    dict_fill_bmodes = pickle.load(fid)

dict_fill_data = {}
dict_fill_data.update(
    tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv' % filln,
                         verbose=True))
dict_fill_data.update(
    tm.parse_timber_file('fill_extra_data_csvs/extra_data_fill_%d.csv' % filln,
                         verbose=True))

dict_beam = dict_fill_data
dict_fbct = dict_fill_data

colstr = {}
colstr[1] = 'b'
colstr[2] = 'r'

energy = Energy.energy(dict_fill_data, beam=1)

t_fill_st = dict_fill_bmodes[filln]['t_startfill']
t_fill_end = dict_fill_bmodes[filln]['t_endfill']
Beispiel #9
0
    dict_fill_bmodes = pickle.load(fid)

if len(sys.argv) > 1:
    print '--> Processing fill {:s}'.format(sys.argv[1])
    filln = int(sys.argv[1])
else:
    filln = max(dict_fill_bmodes.keys())
    print '--> Processing latest fill: %d' % filln

t_ref = dict_fill_bmodes[filln]['t_startfill']
tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref))

fill_dict = {}
fill_dict.update(
    tm.parse_timber_file(
        'fill_bunchbybunch_data_csvs/bunchbybunch_data_fill_%d.csv' % filln,
        verbose=False))

bint_thresh = 8e9
totint_thresh = 2e11

t_inter = 60.  #seconds

i_fig = 0
plt.close('all')
# Loop over beams
beam_col = ['b', 'r']
for beam in [1, 2]:
    print '\nPreparing plot beam %d...' % beam

    fbct = FBCT.FBCT(fill_dict, beam=beam)
time_conv = TH.TimeConverter(time_in, t_ref_unix, t_plot_tick_h=t_plot_tick_h)
tc = time_conv.from_unix

fill_info = Fills_Info('fills_and_bmodes.pkl')
fill_list = fill_info.fills_in_time_window(t_start_unix, t_end_unix)

# find offset to remove
if zero_at is not None:
    if ':' in zero_at:
        t_zero_unix = time.mktime(time.strptime(zero_at, '%d-%m-%Y,%H:%M'))
    else:
        t_zero_unix  = t_ref_unix + float(zero_at)*3600.
    filln_offset = fill_info.filln_at_time(t_zero_unix)
    fill_dict = {}
    fill_dict.update(tm.parse_timber_file('fill_basic_data_csvs/basic_data_fill_%d.csv'%filln_offset, verbose=False))
    fill_dict.update(tm.parse_timber_file('fill_heatload_data_csvs/heatloads_fill_%d.csv'%filln_offset, verbose=False))
    dict_offsets={}
    for kk in hl_varlist:
        dict_offsets[kk] = np.interp(t_zero_unix, np.float_(np.array(fill_dict[kk].t_stamps)), fill_dict[kk].float_values())


pl.close('all')
ms.mystyle_arial(fontsz=fontsz, dist_tick_lab=9)
#ms.mystyle(fontsz=fontsz)
fig = pl.figure(1, figsize=figsz)
fig.patch.set_facecolor('w')
ax1 = fig.add_subplot(311)
ax11 = ax1.twinx()
ax2 = fig.add_subplot(312, sharex=ax1)
ax3 = fig.add_subplot(313, sharex=ax1)
                    help='Plot a legend for Imp/SR',
                    action='store_true')

args = parser.parse_args()
filln = args.filln
t1 = args.t
min_scale = args.min_scale
max_scale = args.max_scale
tagfname = args.tag

#if args.t_offset:
#    t_offset = args.t_offset
t_offset = None

fill_file = 'fill_heatload_data_csvs/t3_all_cells_fill_%d.csv' % filln
hid = tm.parse_timber_file(fill_file, verbose=args.v)

varlist = cq.config_qbs.TT94x_list

hid_set = shv.SetOfHomogeneousNumericVariables(varlist, hid)

# merge pickles and add info on location
dict_fill_bmodes = {}
for df in data_folder_list:
    with open(df + '/fills_and_bmodes.pkl', 'rb') as fid:
        this_dict_fill_bmodes = pickle.load(fid)
        for kk in this_dict_fill_bmodes:
            this_dict_fill_bmodes[kk]['data_folder'] = df
        dict_fill_bmodes.update(this_dict_fill_bmodes)

# get location of current data
Beispiel #12
0
            this_dict_fill_bmodes[kk]['data_folder'] = df
        dict_fill_bmodes.update(this_dict_fill_bmodes)

N_snapshots = len(snapshots)

for i_snapshot in xrange(N_snapshots):

    filln = snapshots[i_snapshot]['filln']
    t_sample_h = snapshots[i_snapshot]['t_h']
    t_offset_h = snapshots[i_snapshot]['t_offs_h']
    if args.zeroat is not None:
        t_offset_h = None

    if from_csv:
        fill_file = 'fill_heatload_data_csvs/hl_all_cells_fill_%d.csv' % filln
        hid = tm.parse_timber_file(fill_file, verbose=args.v)
    else:
        hid = qf.get_fill_dict(filln, h5_storage=H5_storage(recalc_h5_folder))

    # get location of current data
    data_folder_fill = dict_fill_bmodes[filln]['data_folder']
    t_fill_st = dict_fill_bmodes[filln]['t_startfill']
    t_fill_end = dict_fill_bmodes[filln]['t_endfill']
    t_ref = t_fill_st
    tref_string = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(t_ref))
    tref_string_short = time.strftime("%d %b %Y %H:%M", time.localtime(t_ref))

    # extract standard fill data
    fill_dict = {}
    if os.path.isdir(data_folder_fill + '/fill_basic_data_csvs'):
        # 2016 structure