def main(): """ Code for varying bias runs: 1174-1176 """ run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="A/E cut for MJ60") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-db", "--writeDB", action=st, help="store results in DB") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db) #,tier_dir=tier_dir) if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db) # resolution(ds, args["writeDB"]) baseline_noise(ds)
def main(): run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="calibration suite for MJ60") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db) if args["run"]: ds = DataSet(run=int(args["run"][0]), sub='none', md=run_db, cal=cal_db) # gain_shift(ds) # get_power_spectrum(ds) baseline_noise(ds)
def main(): """ Code to implement an A/E cut """ # global runDB # with open("runDB.json") as f: # runDB = json.load(f) # global tier_dir # tier_dir = runDB["tier_dir"] # global meta_dir # meta_dir = runDB["meta_dir"] run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="A/E cut for MJ60") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-db", "--writeDB", action=st, help="store results in DB") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db) #,tier_dir=tier_dir) if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db) find_cut(ds, ds_lo, args["writeDB"])
def main(): """ perform automatic calibration of pygama DataSets. command line options to specify the DataSet are the same as in processing.py save results in a JSON database for access by other routines. """ run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="calibration suite for MJ60") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-s", "--spec", action=st, help="print simple spectrum") arg("-p1", "--pass1", action=st, help="run pass-1 (linear) calibration") arg("-p2", "--pass2", action=st, help="run pass-2 (peakfit) calibration") arg("-e", "--etype", nargs=1, help="custom energy param (default is e_ftp)") arg("-t", "--test", action=st, help="set verbose (testing) output") arg("-db", "--writeDB", action=st, help="store results in DB") arg("-pr", "--printDB", action=st, help="print calibration results in DB") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db, v=args["test"]) if args["run"]: ds = DataSet(run=int(args["run"][0]), sub='none', md=run_db, cal=cal_db, v=args["test"]) # -- start calibration routines -- etype = args["etype"][0] if args["etype"] else "e_ftp" if args["spec"]: show_spectrum(ds, etype) if args["pass1"]: calibrate_pass1(ds, etype, args["writeDB"], args["test"]) if args["pass2"]: calibrate_pass2(ds, args["test"]) # fit to germanium peakshape function goes here -- take from matthew's code # if args["pass3"]: # calibrate_pass3(ds) if args["printDB"]: show_calDB(cal_db)
def main(argv): """ Uses pygama's amazing DataSet class to process runs for different data sets, with arbitrary configuration options defined in a JSON file. C. Wiseman, 2019/04/09 Modified for HADES data A.Zschocke """ # -- parse args -- par = argparse.ArgumentParser(description="test data processing suite") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-t0", "--tier0", action=st, help="run ProcessTier0 on list") arg("-t1", "--tier1", action=st, help="run ProcessTier1 on list") arg("-t", "--test", action=st, help="test mode, don't run") arg("-n", "--nevt", nargs='?', default=np.inf, help="limit max num events") arg("-i", "--ioff", nargs='?', default=0, help="start at index [i]") arg("-v", "--verbose", action=st, help="set verbose output") arg("-o", "--ovr", action=st, help="overwrite existing files") arg("-m", "--nomp", action=sf, help="don't use multiprocessing") arg("-s", "--sub", nargs=1, help="number of subfiles") arg("-db", "--db", nargs=1, help="/path/to/runDB.json") args = vars(par.parse_args()) if args["db"]: run_db = args["db"][0] + "/runDB.json" if args["sub"]: sub = int(args["sub"][0]) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(sub, ds_lo, ds_hi, md=run_db, v=args["verbose"]) if args["run"]: ds = DataSet(sub, run=int(args["run"][0]), md=run_db, v=args["verbose"]) # -- start processing -- if args["tier0"] and args["sub"]: sub = int(args["sub"][0]) tier0(ds, sub, args["ovr"], args["nevt"], args["verbose"], args["test"]) if args["tier1"]: tier1(ds, sub, args["ovr"], args["nevt"], args["ioff"], args["nomp"], args["verbose"], args["test"])
def main(argv): """ Uses pygama's amazing DataSet class to process runs for different data sets and arbitrary configuration options defined in a JSON file. """ #datadir = os.environ["CAGEDATA"] run_db, cal_db = f'./meta/runDB.json', f'./meta/calDB.json' # -- parse args -- par = argparse.ArgumentParser(description="data processing suite for CAGE") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-d2r", "--daq_to_raw", action=st, help="run daq_to_raw on list") arg("-r2d", "--raw_to_dsp", action=st, help="run raw_to_dsp on list") arg("-t", "--test", action=st, help="test mode, don't run") arg("-n", "--nevt", nargs='?', default=np.inf, help="limit max num events") arg("-i", "--ioff", nargs='?', default=0, help="start at index [i]") arg("-o", "--ovr", action=st, help="overwrite existing files") arg('-v', '--verbose', default=2, type=int, help="Verbosity level: 0=silent, 1=basic warnings, 2=verbose output, 3=debug. Default is 2.") arg('-b', '--block', default=8, type=int, help="Number of waveforms to process simultaneously. Default is 8") arg('-g', '--group', default='', help="Name of group in LH5 file. By default process all base groups. Supports wildcards.") # -- declare the DataSet -- args = par.parse_args() d_args = vars(par.parse_args()) #ds = pu.get_dataset_from_cmdline(d_args, run_db, cal_db) # -- declare the DataSet -- if d_args["ds"]: ds_lo = int(d_args["ds"][0]) try: ds_hi = int(d_args["ds"][1]) except: ds_hi = None ds = DataSet(1,ds_lo, ds_hi, md=run_db, v=d_args["verbose"]) if d_args["run"]: ds = DataSet(1,run=int(d_args["run"][0]), md=run_db, v=d_args["verbose"]) #print(ds.runs) #pprint(ds.paths) # -- start processing -- if args.daq_to_raw: daq_to_raw(ds, args.ovr, args.nevt, args.verbose, args.test) if args.raw_to_dsp: raw_to_dsp(ds, args.ovr, args.nevt, args.test, args.verbose, args.block, args.group)
def window_ds(): """ Take a single DataSet and window it so that the file only contains events near an expected peak location. Create some temporary in/out files s/t the originals aren't overwritten. """ # run = 42 # ds = DataSet(run=run, md="runDB.json") ds_num = 3 ds = DataSet(ds_num, md="runDB.json") # specify temporary I/O locations p_tmp = "~/Data/cage" f_tier1 = "~/Data/cage/cage_ds3_t1.h5" f_tier2 = "~/Data/cage/cage_ds3_t2.h5" # figure out the uncalibrated energy range of the K40 peak # xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk) xlo, xhi, xpb = 990000, 1030000, 250 # k40 peak, ds 3 t2df = ds.get_t2df() hE, xE = ph.get_hist(t2df["energy"], range=(xlo, xhi), dx=xpb) plt.semilogy(xE, hE, ls='steps', lw=1, c='r') import matplotlib.ticker as ticker plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.4e')) plt.locator_params(axis='x', nbins=5) plt.xlabel("Energy (uncal.)", ha='right', x=1) plt.ylabel("Counts", ha='right', y=1) plt.savefig(f"./plots/cage_ds{ds_num}_winK40.pdf") # exit() # write a windowed tier 1 file containing only waveforms near the peak t1df = pd.DataFrame() for run in ds.paths: ft1 = ds.paths[run]["t1_path"] print(f"Scanning ds {ds_num}, run {run}\n file: {ft1}") for chunk in pd.read_hdf(ft1, 'ORSIS3302DecoderForEnergy', chunksize=5e4): t1df_win = chunk.loc[(chunk.energy > xlo) & (chunk.energy < xhi)] print(t1df_win.shape) t1df = pd.concat([t1df, t1df_win], ignore_index=True) # -- save to HDF5 output file -- h5_opts = { "mode":"w", # overwrite existing "append":False, "format":"table", "complib":"blosc:zlib", "complevel":1, "data_columns":["ievt"] } t1df.reset_index(inplace=True) t1df.to_hdf(f_tier1, key="df_windowed", **h5_opts) print("wrote file:", f_tier1)
def main(): """ tumbsi analysis suite """ global display display = 1 # allow displaying intermediate distributions for control run_db, cal_db = "runDB.json", "calDB.json" with open(run_db) as f: runDB = json.load(f) global tier_dir tier_dir = runDB["tier_dir"] global meta_dir meta_dir = runDB["meta_dir"] global dep_line dep_line = 1592.5 global dep_acc dep_acc = 0.9 # peaks_of_interest = sorted(runDB["peaks_of_interest"], reverse=True) peaks_of_interest = [2614.5, 1460.8, 583.2] # take calibration parameter for the 'calibration.py' output # with open(cal_db) as f: # calDB = json.load(f) par = argparse.ArgumentParser(description="calibration suite for tumbsi") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") args = vars(par.parse_args()) ecal = np.zeros(3) eres = np.zeros(2) # Which run number is the being analyzed if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db) run = ds_lo if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db) print("") print("Start Pulse Shape Anlysis") print("") psa(run, ds, ecal, eres, peaks_of_interest, ds, ds_lo)
def process_ds(rise_times): """ and determine the trapezoid parameters that minimize the FWHM of the peak (fitting to the peakshape function). """ from pygama.dsp.base import Intercom from pygama.io.tier1 import ProcessTier1 import pygama.io.decoders.digitizers as pgd ds_num = 3 ds = DataSet(ds_num, md="runDB.json") first_run = ds.runs[0] # specify temporary I/O locations out_dir = os.path.expanduser('~') + "/Data/cage" t1_file = f"{out_dir}/cage_ds3_t1.h5" t2_file = f"{out_dir}/cage_ds3_t2.h5" opt_file = f"{out_dir}/cage_ds3_optimize.h5" if os.path.exists(opt_file): os.remove(opt_file) # check the windowed file tmp = pd.read_hdf(t1_file) nevt = len(tmp) rc_decay = 72 for i, rt in enumerate(rise_times): # custom tier 1 processor list -- very minimal proc_list = { "clk" : 100e6, "fit_bl" : {"ihi":500, "order":1}, "blsub" : {}, "trap" : [ {"wfout":"wf_etrap", "wfin":"wf_blsub", "rise":rt, "flat":2.5, "decay":rc_decay}, {"wfout":"wf_atrap", "wfin":"wf_blsub", "rise":0.04, "flat":0.1, "fall":2} ], "get_max" : [{"wfin":"wf_etrap"}, {"wfin":"wf_atrap"}], # "ftp" : {"test":1} "ftp" : {} } proc = Intercom(proc_list) dig = pgd.SIS3302Decoder dig.decoder_name = "df_windowed" dig.class_name = None ProcessTier1(t1_file, proc, output_dir=out_dir, overwrite=True, verbose=False, multiprocess=True, nevt=np.inf, ioff=0, chunk=ds.config["chunksize"], run=first_run, t2_file=t2_file, digitizers=[dig]) # load the temporary file and append to the main output file df_key = f"opt_{i}" t2df = pd.read_hdf(t2_file) t2df.to_hdf(opt_file, df_key)
def get_spectra(): ds = DataSet(runlist=[143, 144, 145], md='./runDB.json', tier_dir=tier_dir) t2df = ds.get_t2df() xlo, xhi, xpb = 0, 10000, 10 xP, hP = get_hist(t2df["trap_max"], xlo, xhi, xpb) plt.plot(xP, hP, ls='steps', lw=1.5, c='m', label="pygama trap_max, {} cts".format(sum(hP))) plt.xlabel("Energy (uncal)", ha='right', x=1) plt.ylabel("Counts", ha='right', y=1) plt.legend() plt.tight_layout() plt.show()
def main(argv): """ Uses pygama's amazing DataSet class to process runs for different data sets, with arbitrary configuration options defined in a JSON file. C. Wiseman, 2019/04/09 """ run_db = './runDB.json' # -- parse args -- par = argparse.ArgumentParser(description="test data processing suite") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-t0", "--daq_to_raw", action=st, help="run ProcessRaw on list") arg("-t1", "--raw_to_dsp", action=st, help="run RunDSP on list") arg("-t", "--test", action=st, help="test mode, don't run") arg("-n", "--nevt", nargs='?', default=np.inf, help="limit max num events") arg("-i", "--ioff", nargs='?', default=0, help="start at index [i]") arg("-v", "--verbose", action=st, help="set verbose output") arg("-o", "--ovr", action=st, help="overwrite existing files") arg("-m", "--nomp", action=sf, help="don't use multiprocessing") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, v=args["verbose"]) if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, v=args["verbose"]) # -- start processing -- if args["daq_to_raw"]: daq_to_raw(ds, args["ovr"], args["nevt"], args["verbose"], args["test"]) if args["raw_to_dsp"]: raw_to_dsp(ds, args["ovr"], args["nevt"], args["ioff"], args["nomp"], args["verbose"], args["test"])
def tier2_AoverE(): """ show the A/E distribution. """ run = 42 ds = DataSet(run=run, md="runDB.json") t2df = ds.get_t2df() aoe = t2df["current_max"] / t2df["e_ftp"] # # 1d # xlo, xhi, xpb = -2000, 2000, 10 # h, x = ph.get_hist(aoe, range=(xlo, xhi), dx=xpb) # plt.semilogy(x, h, ls='steps', lw=1, c='r', label=f'run {run}') # plt.xlabel("A/E (uncal.)", ha='right', x=1) # plt.ylabel("Counts", ha='right', y=1) # plt.grid(linestyle=':') # plt.legend() # # plt.show() # plt.cla() # 2d vs E xlo, xhi, xpb = 0, 6000, 5 # ylo, yhi, ypb = 0.6, 1.2, 0.001 ylo, yhi, ypb = 0, 0.1, 0.001 nbx, nby = int((xhi - xlo) / xpb), int((yhi - ylo) / ypb) from matplotlib.colors import LogNorm plt.hist2d(t2df["e_ftp"], aoe, bins=(nbx, nby), range=((xlo, xhi), (ylo, yhi)), norm=LogNorm(), cmap='jet') # cb = plt.colorbar() # cb.set_label("Counts", ha='right', y=1) plt.xlabel("e_ftp (uncal.)", ha='right', x=1) plt.ylabel("A/E", ha='right', y=1) # plt.grid(which='both', linestyle=':') plt.grid() plt.savefig(f"./plots/cage_run{run}_AE.png", dpi=200)
def get_dataset_from_cmdline(args, run_db, cal_db): """ make it easier to call this from argparse: arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") """ from pygama import DataSet if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db, v=args["verbose"]) if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db, v=args["verbose"]) return ds
def tier2_spec(): """ show a few examples of energy spectra (onboard E and offline E) """ run = 42 ds = DataSet(run=run, md="runDB.json") t2df = ds.get_t2df() # print(t2df.columns) # onboard E ene = "energy" # xlo, xhi, xpb = 0, 20e6, 5000 # show muon peak (full dyn. range) xlo, xhi, xpb = 0, 2e6, 2000 # show phys. spectrum (top feature is 2615 pk) # # trap_max E # ene = "etrap_max" # xlo, xhi, xpb = 0, 50000, 100 # muon peak # xlo, xhi, xpb = 0, 6000, 10 # gamma spectrum # # fixed time pickoff E # ene = "e_ftp" # # xlo, xhi, xpb = 0, 50000, 100 # muon peak # xlo, xhi, xpb = 0, 6000, 10 # gamma spectrum # get histogram hE, xE = ph.get_hist(t2df[ene], range=(xlo, xhi), dx=xpb) # make the plot plt.semilogy(xE, hE, ls='steps', lw=1, c='r', label=f'run {run}') plt.xlabel("Energy (uncal.)", ha='right', x=1) plt.ylabel("Counts", ha='right', y=1) # show a couple formatting tricks import matplotlib.ticker as ticker plt.gca().xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1e')) plt.locator_params(axis='x', nbins=5) plt.grid(linestyle=':') plt.legend() # plt.show() plt.savefig(f"./plots/cage_run{run}_{ene}.pdf")
def main(): """ mj60 waveform viewer """ run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="waveform viewer for mj60") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-db", "--writeDB", action=st, help="store results in DB") args = vars(par.parse_args()) # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal = cal_db) #,tier_dir=tier_dir) if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db) # Which run number is the being analyzed # run = 249 # run = 214 # run = 204 # run = 278 # working on analysis for the AvsE cut in mj60 # t1df, t2df = chunker(run) # cutwf, t2cut = cutter(t1df, t2df, run) # histograms(cutwf, t2cut, run) # histograms(ds) drift_correction(ds)
def main(): """ perform automatic calibration of pygama DataSets. command line options to specify the DataSet are the same as in processing.py save results in a JSON database for access by other routines. """ par = argparse.ArgumentParser(description="calibration suite for tumbsi") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-s", "--spec", action=st, help="print simple spectrum") arg("-sc", "--cal", action=st, help="print calibrated spectrum") arg("-p0", "--pass0", action=st, help="run pass0 (single peak) calibration") arg("-p1", "--pass1", action=st, help="run pass-1 (linear) calibration") arg("-p2", "--pass2", action=st, help="run pass-2 (peakfit) calibration") arg("-e", "--etype", nargs=1, help="custom energy param (default is e_ftp)") arg("-t", "--test", action=st, help="set verbose (testing) output") arg("-w", "--writeDB", action=st, help="store results in DB") arg("-pr", "--printDB", action=st, help="print calibration results in DB") arg("-pa", "--path", nargs=1, help="Set Path to runDB.json file") arg("-db", "--db", nargs=1, help="Path to runDB.json and calDB.json") arg("-sub", "--sub", nargs=1, help="Number of Subfiles") args = vars(par.parse_args()) etype = args["etype"][0] if args["etype"] else "e_ftp" if args["db"]: path_to_files = args["db"][0] run_db, cal_db = path_to_files + "/runDB.json", path_to_files + "/calDB.json" # -- declare the DataSet -- if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, 1, md=run_db, cal=cal_db, v=args["test"]) if args["run"]: run = int(args["run"][0]) ds = DataSet(1, run, md=run_db, cal=cal_db, v=args["test"]) fp = ds.paths[run]["t2_path"].split("t2")[0] t2_file = ds.get_t2df() if args["sub"]: subNumber = int(args["sub"][0]) counter = 0 for p, d, files in os.walk(ds.tier2_dir): for f in files: if any("{}-".format(r) in f for r in [run]): if counter < subNumber: t2_file = t2_file.append(pd.read_hdf(fp + f)) counter += 1 print("Whaat") if args["spec"]: his = t2_file.hist("e_ftp", bins=2000) plt.yscale('log') plt.savefig(path_to_files + 'plots/Raw.png', bbox_inches='tight', transperent=True) plt.show() if args["pass0"]: calibrate_pass0(ds, t2_file, etype, args["writeDB"]) if args["pass1"]: calibrate_pass1(ds, t2_file, etype, args["writeDB"], args["test"]) if args["pass2"]: calibrate_pass2(ds, t2_file, run, cal_db, run_db, args["writeDB"]) if args["printDB"]: show_calDB(cal_db) if args["cal"]: show_calspectrum(ds, t2_file, cal_db, etype, run, args["pass1"], args["pass2"])
def tier1_wfs(): """ show some waveforms, with an example of a data cleaning cut. """ run = 42 iwf_max = 100000 # tier 1 files can be a lot to load into memory ds = DataSet(run=run, md="runDB.json") ft1 = ds.paths[run]["t1_path"] t1df = pd.read_hdf(ft1, "ORSIS3302DecoderForEnergy", where=f"ievt < {iwf_max}") t1df.reset_index(inplace=True) # required step -- fix pygama "append" bug # get waveform dataframe wf_cols = [] for col in t1df.columns: if isinstance(col, int): wf_cols.append(col) wfs = t1df[wf_cols] # apply a cut based on the t1 columns # idx = t1df.index[(t1df.energy > 1.5e6)&(t1df.energy < 2e6)] # apply a cut based on the t2 columns ft2 = ds.paths[run]['t2_path'] t2df = pd.read_hdf(ft2, where=f"ievt < {iwf_max}") t2df.reset_index(inplace=True) # t2df['AoE'] = t2df.current_max / t2df.e_ftp # scipy method t2df['AoE'] = t2df.atrap_max / t2df.e_ftp # trapezoid method idx = t2df.index[(t2df.AoE < 0.7) & (t2df.e_ftp > 1000) & (t2df.e_ftp < 10000) & (t2df.index < iwf_max)] wfs = wfs.loc[idx] wf_idxs = wfs.index.values # kinda like a TEntryList # make sure the cut output makes sense cols = [ 'ievt', 'timestamp', 'energy', 'e_ftp', 'atrap_max', 'current_max', 't0', 't_ftp', 'AoE', 'tslope_pz', 'tail_tau' ] print(t2df.loc[idx][cols].head()) print(t1df.loc[idx].head()) print(wfs.head()) # iterate over the waveform block iwf = -1 while True: if iwf != -1: inp = input() if inp == "q": exit() if inp == "p": iwf -= 2 iwf += 1 iwf_cut = wf_idxs[iwf] # get waveform and dsp values wf = wfs.iloc[iwf] dsp = t2df.iloc[iwf_cut] ene = dsp.e_ftp aoe = dsp.AoE ts = np.arange(len(wf)) # nice horizontal print of a pd.Series print(iwf, iwf_cut) print(wf.to_frame().T) print(t2df.iloc[iwf_cut][cols].to_frame().T) plt.cla() plt.plot(ts, wf, "-b", alpha=0.9, label=f'e: {ene:.1f}, a/e: {aoe:.1f}') # savitzky-golay smoothed # wfsg = signal.savgol_filter(wf, 47, 2) wfsg = signal.savgol_filter(wf, 47, 1) plt.plot(ts, wfsg, "-r", label='savitzky-golay filter') plt.xlabel("clock ticks", ha='right', x=1) plt.ylabel("ADC", ha='right', y=1) plt.legend() plt.tight_layout() plt.show(block=False) plt.pause(0.01)
import json import pandas as pd import matplotlib.pyplot as plt import numpy as np from scipy.optimize import curve_fit from pygama import DataSet with open("runDB.json") as f: runDB = json.load(f) tier_dir = runDB["tier_dir"] ds0 = DataSet(runlist=[554], md='./runDB.json', tier_dir=tier_dir) t2df_0 = ds0.get_t2df() ds1 = DataSet(runlist=[555], md='./runDB.json', tier_dir=tier_dir) t2df_1 = ds1.get_t2df() ds2 = DataSet(runlist=[556], md='./runDB.json', tier_dir=tier_dir) t2df_2 = ds2.get_t2df() e_0 = t2df_0["energy"] e_1 = t2df_1["energy"] e_2 = t2df_2["energy"] e_full = [0, 3.3e6] e_pks = [1.2e6, 2.6e6] e_K = [1.3e6, 1.36e6] e_T = [2.35e6, 2.42e6] h_0, edg_0 = np.histogram(e_0, bins=5000, range=e_full) x_0 = (edg_0[:-1] + edg_0[1:]) / 2 # h_0_K,edg_0_K = np.histogram(e_0, bin=500, range=e_K)
def histograms(run): ds = DataSet(runlist=[run], md='./runDB.json', tier_dir=tier_dir) t2 = ds.get_t2df() t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir, run)) t2df = pd.read_hdf(t2df, key="df") # n = "tslope_savgol" # n = "current_max" # n = "tslope_pz" n = "tail_tau" # n = "tail_amp" e = "e_cal" x = t2df[e] # y = t2df[n] y = t2df[n] / x plt.clf() # H, xedges, yedges = np.histogram2d(t2df["tail_tau"], t2df["e_ftp"], bins=[2000,200], range=[[0, 6600], [0, 5]]) plt.hist2d(x, y, bins=[1000, 200], range=[[0, 200], [0, .001]], norm=LogNorm(), cmap='jet') # plt.hist2d(x, y, bins=[1000,1000], norm=LogNorm()) # plt.scatter(H[0],H[1]) # f = plt.figure(figsize=(20,5)) # p1 = f.add_subplot(111, title='Test', xlabel='Energy (keV)', ylabel=n) # h1,xedg1,yedg1 = np.histogram2d(x, y, bins=[1000,200], range=[[0,2000],[0,100]]) # h1 = h1.T # # hMin, hMax = np.amin(h1), np.amax(h1) # # im1 = p1.imshow(h1,cmap='jet',vmin=hMin,vmax=hMax, aspect='auto') #norm=LogNorm()) # im1 = p1.imshow(h1,cmap='jet', origin='lower', aspect='auto', norm=LogNorm(), extent=[xedg1[0], xedg1[-1], yedg1[0], yedg1[-1]]) # cb1 = f.colorbar(im1, ax=p1)#, fraction=0.037, pad=0.04) cbar = plt.colorbar() # plt.xscale('symlog') # plt.yscale('symlog') plt.title("Run {}".format(run)) plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel(n, ha='right', y=1) # cbar.ax.set_ylabel('Counts') # plt.ylabel("tslope_savgol", ha='right', y=1) # plt.ylabel("A/E_ftp", ha='right', y=1) # plt.tight_layout() # # plt.savefig('./plots/meeting_plots/run{}_{}_vs_{}.png'.format(run, n, e)) # plt.show() # xlo, xhi, xpb = 0, 10000, 10 # xP, hP = get_hist(t2df["trap_max"], xlo, xhi, xpb) # # plt.plot(xP, hP, ls='steps', lw=1.5, c='m', # label="pygama trap_max, {} cts".format(sum(hP))) # plt.xlabel("Energy (uncal)", ha='right', x=1) # plt.ylabel("Counts", ha='right', y=1) # plt.legend() plt.tight_layout() plt.show()
testDB = json.load(f) print("-- Top-level information -- ") for key in testDB: if not isinstance(testDB[key], dict): print(key, ":", testDB[key]) print("-- Data set definitions -- ") pprint(testDB["ds"]) try: xrun = int(sys.argv[1]) except: print("You have to give a run number as argument!") exit(0) ds = DataSet(run=xrun, md=db_file, v=True) # can also use a list of run number # print some of the DataSet attributes print("raw dir : ", ds.raw_dir) print("tier1 dir : ", ds.tier1_dir) print("t1 file prefix :", ds.t1pre) print("t2 file prefix :", ds.t2pre) print("current run list :", ds.runs) print("current file paths :") pprint(ds.paths) print("IF YOUR t0_path IS EMPTY, CROSS-CHECK $DATADIR AND FILE NAMING") """ Show waveforms from the Tier 1 file. NOTE: pygama.DataSet has a convenience function "get_t1df" but is undeveloped.
def calibrate(): """ do a rough energy calibration "automatic": based on finding ratios """ from scipy.signal import medfilt, find_peaks_cwt from scipy.stats import linregress pks_lit = [239, 911, 1460.820, 1764, 2614.511] # ds = DataSet(11, md='./runDB.json', tier_dir=tier_dir) ds = DataSet(run=204, md='./runDB.json', tier_dir=tier_dir) t2df = ds.get_t2df() rt = ds.get_runtime() / 3600 # hrs ene = t2df["e_ftp"] xlo, xhi, xpb = 0, 10000, 10 # damn, need to remove the overflow peak nbins = int((xhi - xlo) / xpb) hE, xE, _ = get_hist(ene, nbins, (xlo, xhi)) # xE, hE = get_hist(ene, xlo, xhi, xpb) # -- pygama's cal routine needs some work ... -- # need to manually remove the overflow peak? # data_peaks = get_most_prominent_peaks(ene, xlo, xhi, xpb, test=True) # ene_peaks = get_calibration_energies("uwmjlab") # ene_peaks = get_calibration_energies("th228") # best_m, best_b = match_peaks(data_peaks, ene_peaks) # ecal = best_m * t2df["trap_max"] + best_b # -- test out a rough automatic calibration here -- npks = 15 hE_med = medfilt(hE, 21) hE_filt = hE - hE_med pk_width = np.arange(1, 10, 0.1) pk_idxs = find_peaks_cwt(hE_filt, pk_width, min_snr=5) pks_data = xE[pk_idxs] pk_counts = hE[pk_idxs] idx_sorted = np.argsort(pk_counts) pk_idx_max = pk_idxs[idx_sorted[-npks:]] pks_data = np.sort(xE[pk_idx_max]) r0 = pks_lit[4] / pks_lit[2] # this is pretty ad hoc, should use more of the match_peaks function found_match = False for pk1 in pks_data: for pk2 in pks_data: r = pk1 / pk2 if np.fabs(r - r0) < 0.005: print("found match to peak list:\n " "r0 {:.3f} r {:.3f} pk1 {:.0f} pk2 {:.0f}".format( r0, r, pk1, pk2)) found_match = True # be careful, there might be more than one break if found_match: break # # check uncalibrated spectrum # plt.plot(xE, hE, ls='steps', lw=1, c='b') # # plt.plot(xE, hE_filt, ls='steps', lw=1, c='b') # # for pk in pks_data: # # plt.axvline(pk, color='r', lw=1, alpha=0.6) # plt.axvline(pk1, color='r', lw=1) # plt.axvline(pk2, color='r', lw=1) # plt.show() # exit() # two-point calibration data = np.array(sorted([pk1, pk2])) lit = np.array([pks_lit[2], pks_lit[4]]) m, b, _, _, _ = linregress(data, y=lit) print("Paste this into runDB.json:\n ", m, b) # err = np.sum((lit - (m * data + b))**2) # plt.plot(data, lit, '.b', label="E = {:.2e} x + {:.2e}".format(m, b)) # xf = np.arange(data[0], data[1], 1) # plt.plot(xf, m * xf + b, "-r") # plt.legend() # plt.show() # apply calibration ecal = m * ene + b # # check calibrated spectrum xlo, xhi, xpb = 0, 3000, 1 hC, xC, _ = get_hist(ecal, int((xhi - xlo) / xpb), (xlo, xhi)) hC = np.concatenate( (hC, [0])) # FIXME: annoying - have to add an extra zero plt.semilogy(xC, hC / rt, c='b', ls='steps', lw=1, label="MJ60 data, {:.2f} hrs".format(rt)) plt.axvline(pks_lit[2], c='r', lw=3, alpha=0.7, label="40K, 1460.820 keV") plt.axvline(pks_lit[4], c='m', lw=3, alpha=0.7, label="208Tl, 2614.511 keV") plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / hr / {:.2f} keV".format(xpb), ha='right', y=1) plt.legend() plt.tight_layout() # plt.show() plt.savefig("./plots/surface_spec.pdf") # exit() # check low-e spectrum plt.figure() xlo, xhi, xpb = 0, 50, 0.1 hC, xC, _ = get_hist(ecal, int((xhi - xlo) / xpb), (xlo, xhi)) hC = np.concatenate( (hC, [0])) # FIXME: annoying - have to add an extra zero plt.plot(xC, hC, c='b', ls='steps', lw=1, label="Kr83 data") plt.axvline(9.4057, color='r', lw=1.5, alpha=0.6, label="9.4057 keV") # kr83 lines plt.axvline(12.651, color='g', lw=1.5, alpha=0.6, label="12.651 keV") # kr83 lines plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / {:.2f} keV".format(xpb), ha='right', y=1) plt.legend() plt.tight_layout() # plt.show() plt.savefig("./plots/test_kr83_cal.pdf")
def resolution(): """ fit the 208Tl 2615 keV peak and give me the resolution test out pygama's peak fitting routines """ ds_num = 11 ds = DataSet(ds_num, md='./runDB.json', tier_dir=tier_dir) t2df = ds.get_t2df() ene = t2df["energy"].values rt = ds.get_runtime() / 3600 # hrs # apply calibration cal = runDB["cal_onboard"][str(ds_num)] m, b = cal[0], cal[1] ene = m * ene + b # zoom in to the area around the 2615 peak xlo, xhi, xpb = 2565, 2665, 0.5 ene2 = ene[np.where((ene > xlo) & (ene < xhi))] xE, hE = get_hist(ene, xlo, xhi, xpb) # set peak bounds guess_ene = 2615 guess_sig = 5 idxpk = np.where((xE > guess_ene - guess_sig) & (xE > guess_ene + guess_sig)) guess_area = np.sum(hE[idxpk]) # radford_peak function pars: mu, sigma, hstep, htail, tau, bg0, a p0 = [guess_ene, guess_sig, 1E-3, 0.7, 5, 0, guess_area] bnd = [[0.9 * guess_ene, 0.5 * guess_sig, 0, 0, 0, 0, 0], [1.1 * guess_ene, 2 * guess_sig, 0.1, 0.75, 10, 10, 5 * guess_area]] pars = fit_binned(radford_peak, hE, xE, p0) #, bounds=bnd) print("mu:", pars[0], "\n", "sig", pars[1], "\n", "hstep:", pars[2], "\n", "htail:", pars[3], "\n", "tau:", pars[4], "\n", "bg0:", pars[5], "\n", "a:", pars[6]) plt.plot(xE, hE, c='b', ls='steps', lw=1, label="MJ60 data, {:.2f} hrs".format(rt)) plt.plot(xE, radford_peak(xE, *pars), color="r", alpha=0.7, label=r"Radford peak, $\sigma$={:.2f} keV".format(pars[1])) plt.axvline(2614.511, color='r', alpha=0.6, lw=1, label=r"$E_{lit}$=2614.511") plt.axvline(pars[0], color='g', alpha=0.6, lw=1, label=r"$E_{fit}$=%.3f" % (pars[0])) plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / {:.2f} keV".format(xpb), ha='right', y=1) plt.legend() plt.tight_layout() # plt.show() plt.savefig("./plots/kr83_resolution.pdf")
def get_multiple_spectra(): # energy (onboard) # xlo, xhi, xpb = 0, 2000000, 1000 # xlo, xhi, xpb = 0, 500000, 1000 # xlo, xhi, xpb = 0, 50000, 100 # energy (onboard, calibrated) xlo, xhi, xpb = 0, 40, 0.1 # trap_max # xlo, xhi, xpb = 0, 10000, 10 # xlo, xhi, xpb = 0, 300, 0.3 # xlo, xhi, xpb = 0, 80, 0.2 # xlo, xhi, xpb = 0, 40, 0.1 # ds = DataSet(run=147, md='./runDB.json', tier_dir=tier_dir) # get calibration cal = runDB["cal_onboard"]["11"] m, b = cal[0], cal[1] ds = DataSet(10, md='./runDB.json', tier_dir=tier_dir) rt1 = ds.get_runtime() / 3600 t2df = ds.get_t2df() ene1 = m * t2df["energy"] + b x, h1 = get_hist(ene1, xlo, xhi, xpb) # x, h1 = get_hist(t2df["trap_max"], xlo, xhi, xpb) h1 = np.divide(h1, rt1) ds2 = DataSet(11, md='./runDB.json', tier_dir=tier_dir) t2df2 = ds2.get_t2df() rt2 = ds2.get_runtime() / 3600 ene2 = m * t2df2["energy"] + b x, h2 = get_hist(ene2, xlo, xhi, xpb) # x, h2 = get_hist(t2df2["trap_max"], xlo, xhi, xpb) h2 = np.divide(h2, rt2) plt.figure(figsize=(7, 5)) plt.plot(x, h1, ls='steps', lw=1, c='b', label="bkg, {:.2f} hrs".format(rt1)) plt.plot(x, h2, ls='steps', lw=1, c='r', label="Kr83, {:.2f} hrs".format(rt2)) plt.axvline(9.4057, color='m', lw=2, alpha=0.4, label="9.4057 keV") # kr83 lines plt.axvline(12.651, color='g', lw=2, alpha=0.4, label="12.651 keV") # kr83 lines plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / hr / {:.2f} keV".format(xpb), ha='right', y=1) plt.legend() plt.tight_layout() # plt.show() # plt.savefig("./plots/krSpec_{:.0f}_{:.0f}_onboard.pdf".format(xlo,xhi)) # plt.savefig("./plots/krSpec_{:.0f}_{:.0f}_uncal.pdf".format(xlo,xhi)) plt.savefig("./plots/krSpec_{:.0f}_{:.0f}_cal.pdf".format(xlo, xhi))
def get_spectra(): with open("runDB.json") as f: runDB = json.load(f) tier_dir = runDB["tier_dir"] ds = DataSet(runlist=[555], md='./runDB.json', tier_dir=tier_dir) t2df = ds.get_t2df() t2df = t2df.loc[t2df.e_ftp > 500] # Low energy cut #print(t2df.columns) # print(t2df) # exit() # 4 to 36 pF variable cap rise_time = t2df["tp90"] - t2df["tp10"] ds2 = DataSet(runlist=[556], md='./runDB.json', tier_dir=tier_dir) t2df_2 = ds2.get_t2df() t2df_2 = t2df_2.loc[t2df_2.e_ftp > 500] rise_time2 = t2df_2["tp90"] - t2df_2["tp10"] ds3 = DataSet(runlist=[554], md='./runDB.json', tier_dir=tier_dir) t2df_3 = ds3.get_t2df() t2df_3 = t2df_3.loc[t2df_3.e_ftp > 500] rise_time3 = t2df_3["tp90"] - t2df_3["tp10"] xlo, xhi, xpb = 0., 500., 1 hP, xP, _ = get_hist(rise_time, range=(xlo, xhi), dx=xpb) hP2, xP2, _ = get_hist(rise_time2, range=(xlo, xhi), dx=xpb) hP3, xP3, _ = get_hist(rise_time3, range=(xlo, xhi), dx=xpb) #Note to self: for risetime histograms, use similar to above, but replace #first parameter with rise_time! plt.semilogy(xP[:-1] * 0.423, hP, ls='steps', lw=1.5, c='k', label="Rise Time, Preamp 1".format(sum(hP))) # hist = plt.hist(rise_time, bins = 1000) plt.semilogy(xP2[:-1] * 0.423, hP2, ls='steps', lw=1.5, c='c', label="Rise Time, Preamp 2".format(sum(hP))) plt.semilogy(xP3[:-1] * 0.423, hP3, ls='steps', lw=1.5, c='0.5', label="Rise Time, Preamp 0".format(sum(hP))) plt.xlabel("Rise Time", ha='right', x=1) plt.ylabel("Counts", ha='right', y=1) plt.legend() plt.tight_layout() plt.show() plt.savefig("Rise Time Comparison")
def main(): """ tumbsi analysis suite """ global display display = 1 # allow displaying intermediate distributions for control run_db, cal_db = "runDB.json", "calDB.json" with open(run_db) as f: runDB = json.load(f) global tier_dir tier_dir = runDB["tier_dir"] global meta_dir meta_dir = runDB["meta_dir"] global dep_line dep_line = 1592.5 global dep_acc dep_acc = 0.9 peaks_of_interest = sorted(runDB["peaks_of_interest"], reverse=True) # take calibration parameter for the 'calibration.py' output with open(cal_db) as f: calDB = json.load(f) par = argparse.ArgumentParser(description="calibration suite for tumbsi") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") args = vars(par.parse_args()) ecal = np.zeros(3) ecal[0] = calDB["cal_pass1"]["1"]["p1cal"] if ("cal_pass2" in calDB): ecal[1] = calDB["cal_pass2"]["1"]["p2acal"] ecal[2] = calDB["cal_pass2"]["1"]["p2bcal"] eres = np.zeros(2) if ("eres_curve" in calDB): eres[0] = calDB["eres_curve"]["1"]["acal"] eres[1] = calDB["eres_curve"]["1"]["bcal"] else: print( "You must run a calibration to get the energy resolution curve. Exit." ) sys.exit() # Which run number is the being analyzed if args["ds"]: ds_lo = int(args["ds"][0]) try: ds_hi = int(args["ds"][1]) except: ds_hi = None ds = DataSet(ds_lo, ds_hi, md=run_db, cal=cal_db) run = ds_lo if args["run"]: ds = DataSet(run=int(args["run"][0]), md=run_db, cal=cal_db) print("") print("Start Pulse Shape Anlysis") print("") psa(run, ds, ecal, eres, peaks_of_interest)
def process_data(): from pygama import DataSet ds = DataSet(0, md="config.json") ds.daq_to_raw(overwrite=True, test=False)
def psa(run, dataset, ecal, eres, peaks_of_interest): # ds = DataSet(runlist=[191], md='./runDB.json', tier_dir=tier_dir) ds = DataSet(ds_lo=0, md='./runDB.json', tier_dir=tier_dir) t2 = ds.get_t2df() # t2df = os.path.expandvars('{}/Spectrum_{}.hdf5'.format(meta_dir,run)) # t2df = pd.read_hdf(t2df, key="df") # t2df = t2df.reset_index(drop=True) t2 = t2.reset_index(drop=True) print(" Energy calibration:") cal = ecal[0] * np.asarray(t2["e_ftp"]) print(" -> 1st pass linear energy calibration done") if (cal[1]): cal = cal / ecal[1] - ecal[2] print(" -> 2nd pass linear energy calibration done") n = "current_max" e = "e_cal" e_over_unc = cal / np.asarray(t2["e_ftp"]) aoe0 = np.asarray(t2[n]) print(" Apply quality cuts") Nall = len(cal) bl0 = np.asarray(t2["bl0"]) bl1 = np.asarray(t2["bl1"]) e_over_unc = e_over_unc[(bl1 - bl0) < 2] cal = cal[(bl1 - bl0) < 2] aoe0 = aoe0[(bl1 - bl0) < 2] Nqc_acc = len(cal) print(" -> Total number of events: ", Nall) print(" -> After quality cuts : ", Nqc_acc) print(" -> Quality cuts rejection: ", 100 * float(Nqc_acc) / float(Nall), "%") aoe = aoe0 * e_over_unc / cal print(" Compute AoE normalization curve") aoe_norm = AoEcorrection(cal, aoe) print(" -> parameteres (a x E + b):", aoe_norm[0], aoe_norm[1]) aoe = aoe / (aoe_norm[0] * cal + aoe_norm[1]) print(" Find the low-side A/E cut for ", 100 * dep_acc, "% 208Tl DEP acceptance") cut = get_aoe_cut(cal, aoe, dep_line, eres) print(" -> cut: ", '{:1.3f}'.format(cut)) if cut == 0: print(" -> cut not found. Exit.") sys.exit() print(" Compute energy spectrum after A/E cut") cal_cut = cal[aoe >= cut] print(" Compute survival fractions: ") sf = np.zeros(len(peaks_of_interest)) sferr = np.zeros(len(peaks_of_interest)) for i, peak in enumerate(peaks_of_interest): sf[i], sferr[i] = get_sf(cal, aoe, cut, peak, eres) print(" -> ", peak, '{:2.1f}'.format(100. * sf[i]), " +/- ", '{:2.1f}'.format(100. * sferr[i]), "%") print(" Display hitograms") plt.figure(2) plt.hist2d(cal, aoe, bins=[2000, 400], range=[[0, 3000], [0, 1.5]], norm=LogNorm(), cmap='jet') cbar = plt.colorbar() plt.title("Dataset {}".format(dataset)) plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("A/E (a.u.)", ha='right', y=1) cbar.ax.set_ylabel('Counts') plt.tight_layout() plt.savefig('./plots/aoe_versus_energy.pdf', bbox_inches='tight', transparent=True) plt.show() plt.figure(3) hist, bins = np.histogram(cal, bins=3000, range=[0, 3000]) hist1, bins1 = np.histogram(cal_cut, bins=3000, range=[0, 3000]) plt.clf() plt.plot(bins[1:], hist, color='black', ls="steps", linewidth=1.5, label='all events') plt.plot(bins1[1:], hist1, '-r', ls="steps", linewidth=1.5, label='after A/E cut') plt.ylabel('Counts', ha='right', y=1) plt.xlabel('Energy (keV)', ha='right', x=1) plt.legend(title='Calibrated Energy') plt.yscale('log') plt.savefig('./plots/calEnergy_spectrum_after_psa.pdf', bbox_inches='tight', transparent=True) plt.show() print("") print(" Normal termination") print("")