示例#1
0
def write_harpy_output(main_input, harpy_data_frame, headers, spectrum, plane):
    output_file = get_outpath_with_suffix(
        main_input.file, main_input.outputdir, ".lin" + plane
    )
    tfs_pandas.write_tfs(output_file, harpy_data_frame, headers)
    if not main_input.skip_files:
    	_write_full_spectrum(main_input, spectrum, plane)
示例#2
0
def clean_tunes(files, limit=DEF_LIMIT):
    for file in files:
        file_df = tfs_pandas.read_tfs(file)
        mask = _get_mask(file_df, limit)
        file_df = file_df.loc[mask, :]
        _recompute_tune_stats(file_df)
        tfs_pandas.write_tfs(file, file_df)
示例#3
0
def _write_coupling_diff_file(meas_path, meas, model):
    LOG.debug("Calculating coupling diff.")
    tw = pd.merge(meas.coupling, model, how='inner', on='NAME')
    out_columns = ['NAME', 'S']
    for rdt in ['F1001', 'F1010']:
        tw[rdt + 're'] = tw.loc[:, rdt + 'R']
        tw[rdt + 'im'] = tw.loc[:, rdt + 'I']
        tw[rdt + 'e'] = tw.loc[:, 'FWSTD1']
        tw[rdt + 're_m'] = np.real(tw.loc[:, rdt + '_c'])
        tw[rdt + 'im_m'] = np.imag(tw.loc[:, rdt + '_c'])
        tw[rdt +
           're_prediction'] = tw.loc[:, rdt + 're'] - tw.loc[:, rdt + 're_m']
        tw[rdt +
           'im_prediction'] = tw.loc[:, rdt + 'im'] - tw.loc[:, rdt + 'im_m']
        tw[rdt + 'W_prediction'] = np.sqrt(
            np.square(tw[rdt + 're_prediction']) +
            np.square(tw[rdt + 'im_prediction']))

        out_columns += [
            rdt + 're', rdt + 'im', rdt + 'e', rdt + 're_m', rdt + 'im_m',
            rdt + 'W', rdt + 'W_prediction', rdt + 're_prediction',
            rdt + 'im_prediction'
        ]

    tw['in_use'] = 1
    out_columns += ['in_use']
    write_tfs(join(meas_path, 'couple.out'), tw.loc[:, out_columns])
def write_bad_bpms(first_file, bad_bpms_to_write):
    meas_dir = os.path.abspath(os.path.join(first_file, os.pardir))
    for plane in PLANE:
        bad_bpms_summary_path = os.path.join(
            meas_dir, "bad_bpms_iforest_{}.tfs".format(plane))
        tfs_pandas.write_tfs(bad_bpms_summary_path, bad_bpms_to_write[plane])
    LOGGER.info("Bad BPMs summary from Isolation Forest written to: %s",
                meas_dir)
def _write_table(results, output):
    rows = [[
        BEAM_STR[beam], SIDE_STR[side], results[(beam, side, HOR)][0],
        results[(beam, side, HOR)][1], results[(beam, side, VER)][0],
        results[(beam, side, VER)][1]
    ] for beam in BEAMS for side in SIDES]
    data = pd.DataFrame(data=rows,
                        columns=("BEAM", "SIDE", "OFFSETX", "ERROFFSETX",
                                 "OFFSETY", "ERROFFSETY"))
    tfs_pandas.write_tfs(output, data)
示例#6
0
def _write_full_spectrum(main_input, spectrum, plane):
    spectr_amps_files = get_outpath_with_suffix(
        main_input.file, main_input.outputdir, ".amps" + plane
    )
    amps_df = spectrum["COEFS"].abs().T
    tfs_pandas.write_tfs(spectr_amps_files, amps_df)
    spectr_freqs_files = get_outpath_with_suffix(
        main_input.file, main_input.outputdir, ".freqs" + plane
    )
    freqs_df = spectrum["FREQS"].T
    tfs_pandas.write_tfs(spectr_freqs_files, freqs_df)
示例#7
0
def remove_bpms_from_file(path, bad_bpm_names):
    #copy and rename original file
    src_dir = os.path.abspath(os.path.join(path, os.pardir))
    filename = os.path.basename(path)
    new_filename = os.path.join(src_dir, filename + ".notcleaned")
    os.rename(path, new_filename)
    #take the content of renamed file, remove bpms and write new file with the name of original file
    original_file_tfs = tfs_pandas.read_tfs(new_filename).set_index("NAME",
                                                                    drop=False)
    original_file_tfs = original_file_tfs.loc[~original_file_tfs.index.
                                              isin(bad_bpm_names)]
    tfs_pandas.write_tfs(path, original_file_tfs)
示例#8
0
def clean_files(list_of_files, replace=False):
    for filepath in list_of_files:
        try:
            df = tfs.read_tfs(filepath)
            LOG.info("Read file {:s}".format(filepath))
        except (IOError, tfs.TfsFormatError):
            LOG.info("Skipped file {:s}".format(filepath))
        else:
            df = df.dropna(axis='index')
            if not replace:
                filepath += ".dropna"
            tfs.write_tfs(filepath, df)
示例#9
0
def _write_beta_diff_file(meas_path, meas, model, plane):
    LOG.debug("Calculating beta diff.")
    up = plane.upper()
    tw = pd.merge(meas.beta[plane], model, how='inner', on='NAME')
    tw['MEA'] = ((tw.loc[:, 'BET' + up] - tw.loc[:, 'BET' + up + 'MDL']) /
                 tw.loc[:, 'BET' + up + 'MDL'])
    tw['ERROR'] = tw.loc[:, 'ERRBET' + up] / tw.loc[:, 'BET' + up + 'MDL']
    tw['MODEL'] = (
        (tw.loc[:, 'BET' + up + '_c'] - tw.loc[:, 'BET' + up + '_n']) /
        tw.loc[:, 'BET' + up + '_n'])
    tw['EXPECT'] = tw['MEA'] - tw['MODEL']
    write_tfs(join(meas_path, 'bb' + plane + '.out'),
              tw.loc[:, ['NAME', 'S', 'MEA', 'ERROR', 'MODEL', 'EXPECT']])
示例#10
0
def _write_disp_diff_file(meas_path, meas, model, plane):
    LOG.debug("Calculating dispersion diff.")
    try:
        up = plane.upper()
        tw = pd.merge(meas.disp[plane], model, how='inner', on='NAME')
        tw['MEA'] = tw.loc[:, 'D' + up] - tw.loc[:, 'D' + up + 'MDL']
        tw['ERROR'] = tw.loc[:, 'STDD' + up]
        tw['MODEL'] = tw.loc[:, 'D' + up + '_c'] - tw.loc[:, 'D' + up + '_n']
        tw['EXPECT'] = tw['MEA'] - tw['MODEL']
        write_tfs(join(meas_path, 'd' + plane + '.out'),
                  tw.loc[:, ['NAME', 'S', 'MEA', 'ERROR', 'MODEL', 'EXPECT']])
    except IOError:
        LOG.debug("Dispersion measurements not found. Skipped.")
示例#11
0
def _write_norm_disp_diff_file(meas_path, meas, model):
    LOG.debug("Calculating normalized dispersion diff.")
    try:
        tw = pd.merge(meas.norm_disp, model, how='inner', on='NAME')
        tw['MEA'] = tw.loc[:, 'NDX'] - tw.loc[:, 'NDXMDL']
        tw['ERROR'] = tw.loc[:, 'STDNDX']
        tw['MODEL'] = (tw.loc[:, 'DX_c'] / np.sqrt(tw.loc[:, 'BETX_c']) -
                       tw.loc[:, 'DX_n'] / np.sqrt(tw.loc[:, 'BETX_n']))
        tw['EXPECT'] = tw['MEA'] - tw['MODEL']
        write_tfs(join(meas_path, 'ndx.out'),
                  tw.loc[:, ['NAME', 'S', 'MEA', 'ERROR', 'MODEL', 'EXPECT']])
    except IOError:
        LOG.debug("Normalized dispersion measurements not found. Skipped.")
示例#12
0
def _create_base_file(source_dir, source_file, meas, error, expect, outname):
    """ Copy Measurement into a base-file. """
    data = tfs_pandas.read_tfs(source_file)

    if error == "":
        new_data = data.loc[:, ["S", "NAME", meas]]
        new_data.columns = ["S", "NAME", expect]
    else:
        new_data = data.loc[:, ["S", "NAME", meas, error]]
        new_data.columns = ["S", "NAME", expect, error]

    path_out = os.path.join(source_dir, outname + BASE_ID)
    tfs_pandas.write_tfs(path_out, new_data)
    return path_out
示例#13
0
def _write_phase_diff_file(meas_path, meas, model, plane):
    LOG.debug("Calculating phase diff.")
    up = plane.upper()
    tw = pd.merge(meas.phase[plane], model, how='inner', on='NAME')
    tw['MEA'] = tw.loc[:, 'PHASE' + up]
    tw['ERROR'] = tw.loc[:, 'STDPH' + up]
    tw['MODEL'] = np.concatenate(
        (np.diff(tw.loc[:, 'MU' + up + '_c']), np.array([0.0])))
    tw['DIFF'] = tw.loc[:, 'PHASE' + up] - tw.loc[:, 'PH' + up + 'MDL']
    tw['DIFF_MDL'] = tw.loc[:, 'MODEL'] - tw.loc[:, 'PH' + up + 'MDL']
    tw['EXPECT'] = tw['DIFF'] - tw['DIFF_MDL']
    write_tfs(
        join(meas_path, 'phase' + plane + '.out'), tw.loc[tw.index[:-1], [
            'NAME', 'S', 'MEA', 'ERROR', 'MODEL', 'DIFF', 'DIFF_MDL', 'EXPECT'
        ]])
def revert_forest_cleaning(files):
    """
    Reverts the cleaning. The backup files are renamed back to the original names (e.g .linx.notcleaned --> .linx)
    :param paths: list of files, where bad bpms identified by iForest are removed
    """
    files_list = files.split(',')
    for path in files_list:
        src_dir = os.path.abspath(os.path.join(path, os.pardir))
        filename = os.path.basename(path)
        notcleaned_file = os.path.join(src_dir, filename + ".notcleaned")
        original_file_tfs = tfs_pandas.read_tfs(notcleaned_file).set_index(
            "NAME", drop=False)
        os.remove(path)
        lin_file = os.path.join(src_dir,
                                notcleaned_file.replace(".notcleaned", ""))
        os.rename(notcleaned_file, lin_file)
        tfs_pandas.write_tfs(lin_file, original_file_tfs)
示例#15
0
def _write_chromatic_coupling_files(meas_path, cor_path):
    LOG.debug("Calculating chromatic coupling diff.")
    # TODO: Add Cf1010
    try:
        twiss_plus = read_tfs(join(split(cor_path)[0], TWISS_CORRECTED_PLUS),
                              index='NAME')
        twiss_min = read_tfs(join(split(cor_path)[0], TWISS_CORRECTED_MINUS),
                             index='NAME')
        deltap = np.abs(twiss_plus.DELTAP - twiss_min.DELTAP)
        plus = TwissOptics(twiss_plus,
                           quick_init=True).get_coupling(method='cmatrix')
        minus = TwissOptics(twiss_min,
                            quick_init=True).get_coupling(method='cmatrix')
        model = pd.merge(plus,
                         minus,
                         how='inner',
                         left_index=True,
                         right_index=True,
                         suffixes=('_p', '_m'))
        model['NAME'] = model.index.values
        if exists(join(meas_path, "chromcoupling_free.out")):
            meas = read_tfs(join(meas_path, "chromcoupling_free.out"))
        else:
            meas = read_tfs(join(meas_path, "chromcoupling.out"))
        tw = pd.merge(meas, model, how='inner', on='NAME')
        cf1001 = (tw.loc[:, 'F1001_p'] - tw.loc[:, 'F1001_m']) / deltap
        tw['Cf1001r_model'] = np.real(cf1001)
        tw['Cf1001i_model'] = np.imag(cf1001)
        tw['Cf1001r_prediction'] = tw.loc[:,
                                          'Cf1001r'] - tw.loc[:,
                                                              'Cf1001r_model']
        tw['Cf1001i_prediction'] = tw.loc[:,
                                          'Cf1001i'] - tw.loc[:,
                                                              'Cf1001i_model']
        write_tfs(
            join(meas_path, 'chromatic_coupling.out'), tw.loc[:, [
                'NAME', 'S', 'Cf1001r', 'Cf1001rERR', 'Cf1001i', 'Cf1001iERR',
                'Cf1001r_model', 'Cf1001i_model', 'Cf1001r_prediction',
                'Cf1001i_prediction'
            ]])
    except IOError:
        LOG.debug("Chromatic coupling measurements not found. Skipped.")
示例#16
0
def _write_beta_diff_file(meas_path, meas, model, plane, betafile):
    LOG.debug("Calculating beta diff.")
    if betafile == "getbeta":
        meas_beta = meas.beta[plane]
    elif betafile == "getampbeta":
        meas_beta = meas.amp_beta[plane]
    elif betafile == "getkmodbeta":
        meas_beta = meas.kmod_beta[plane]
    else:
        raise KeyError("Unknown beta file name '{}'.".format(betafile))

    up = plane.upper()
    tw = pd.merge(meas_beta, model, how='inner', on='NAME')
    tw['MEA'] = ((tw.loc[:, 'BET' + up] - tw.loc[:, 'BET' + up + 'MDL']) /
                 tw.loc[:, 'BET' + up + 'MDL'])
    tw['ERROR'] = tw.loc[:, 'ERRBET' + up] / tw.loc[:, 'BET' + up + 'MDL']
    tw['MODEL'] = (
        (tw.loc[:, 'BET' + up + '_c'] - tw.loc[:, 'BET' + up + '_n']) /
        tw.loc[:, 'BET' + up + '_n'])
    tw['EXPECT'] = tw['MEA'] - tw['MODEL']
    write_tfs(join(meas_path, 'bb' + plane + '.out'),
              tw.loc[:, ['NAME', 'S', 'MEA', 'ERROR', 'MODEL', 'EXPECT']])
def _get_timber_data(beam, input, output):
    """ Return Timber data from input """
    LOG.debug("Getting timber data from '{}'".format(input))
    try:
        fill_number = int(input)
    except ValueError:
        fill = tfs.read_tfs(input, index=COL_TIME())
        fill.drop([COL_MAV(p) for p in PLANES if COL_MAV(p) in fill.columns],
                  axis='columns')
    else:
        timber_keys = [TIMBER_KEY(plane, beam) for plane in PLANES]
        bbq_cols = [COL_BBQ(plane) for plane in PLANES]

        fill = timber_extract.lhc_fill_to_tfs(fill_number,
                                              keys=timber_keys,
                                              names=dict(
                                                  zip(timber_keys, bbq_cols)))

        if output:
            tfs.write_tfs(output, fill, save_index=COL_TIME())

    return fill
def remove_bpms_from_file(paths, bad_bpm_names, plane):
    """
    Writes a backup of the original .lin files (e.g .linx --> .linx.notcleaned)
    and removes the BPNs identified by iForest as bad.
    :param paths: original lin files
    :param bad_bpm_names: list of the names of bad BPMs identified by iForest
    """
    for path in paths:
        src_dir = os.path.abspath(os.path.join(path, os.pardir))
        filename = os.path.basename(path)
        new_filename = os.path.join(src_dir, filename + ".notcleaned")
        os.rename(path, new_filename)
        original_file_tfs = tfs_pandas.read_tfs(new_filename).set_index(
            "NAME", drop=False)
        original_file_tfs = original_file_tfs.loc[~original_file_tfs.index.
                                                  isin(bad_bpm_names)]
        pln_num = "1" if plane == "x" else "2"
        original_file_tfs.headers["Q{}".format(pln_num)] =\
            original_file_tfs["TUNE" + plane.upper()].mean()
        original_file_tfs.headers["Q{}RMS".format(pln_num)] =\
            np.std(original_file_tfs["TUNE" + plane.upper()])
        tfs_pandas.write_tfs(path, original_file_tfs,
                             original_file_tfs.headers)
def get_phases(files, model, output):
    for plane in ["X", "Y"]:
        file_list = [(file_name.strip() + ".lin" + plane.lower())
                     for file_name in files.strip("\"").split(",")]
        #merging the Dataframes
        model_panda = load_panda(model)
        tune = 0.0
        for i, file_name in enumerate(file_list):
            file_panda = tfs.read_tfs(file_name)
            if plane == "X":
                tune = tune + file_panda.headers['Q1']
            else:
                tune = tune + file_panda.headers['Q2']
            file_panda = pd.DataFrame(file_panda)
            model_panda = pd.merge(model_panda,
                                   file_panda,
                                   how='inner',
                                   on='NAME',
                                   suffixes=('', str(i + 1)))
        tune = tune / len(file_list)
        model_panda.rename(columns={'MU' + plane: 'MU' + plane + 'MDL'},
                           inplace=True)
        columns = ['NAME', 'S']
        for c in model_panda.columns.values:
            if c.startswith('MU' + plane):
                columns.append(c)

        all_data = model_panda.loc[:, columns]
        all_data.set_index("NAME", inplace=True, drop=False)
        #Here is what we need from the model and all the measured phases for the intersected BPMs
        bpms = all_data.loc[:, 'NAME'].values
        columns = ['NAME', 'S', 'MU' + plane + 'MDL']
        results = all_data.loc[bpms[:-1], columns]
        results['NAME2'] = bpms[1:]
        results['S2'] = all_data.loc[bpms[1:], 'S'].values
        cols = []

        for c in all_data.columns.values:
            if c.startswith('MU'):
                field = all_data.loc[bpms[1:],
                                     c].values - all_data.loc[bpms[:-1],
                                                              c].values
                results[c.replace('MU', 'PHASE')] = np.where(
                    np.abs(field) > 0.5, field - np.sign(field), field)
                d = c.replace('MU', 'PHASE')
                cols.append(d)
        cols.remove('PHASE' + plane + 'MDL')
        results.rename(columns={'PHASE' + plane + 'MDL': 'PH' + plane + 'MDL'},
                       inplace=True)

        results['PHASE' + plane] = np.angle(
            np.sum(np.exp(PI2I * results.loc[:, cols]), axis=1)) / (2 * np.pi)
        f = []
        for c in cols:
            field = results.loc[:, c] - results.loc[:, 'PHASE' + plane]
            results['d' + c] = np.where(
                np.abs(field) > 0.5, field - np.sign(field), field)
            f.append('d' + c)
        if len(f) > 1:
            results['STDPH' + plane] = np.std(
                results.loc[:, f], axis=1) * t_value_correction(len(f))
        else:
            results['STDPH' + plane] = 0.0
        if plane == "X":
            header = {'Q1': tune}
        else:
            header = {'Q2': tune}
        heads = [
            'NAME', 'NAME2', 'S', 'S2', 'PHASE' + plane, 'STDPH' + plane,
            'PH' + plane + 'MDL', 'MU' + plane + 'MDL'
        ]
        tfs.write_tfs(
            os.path.join(output, "getphase" + plane.lower() + ".out"),
            results.loc[:, heads], header)
    return
def analyse_with_bbq_corrections(opt):
    """ Create amplitude detuning analysis with BBQ correction from timber data. """
    LOG.info("Starting Amplitude Detuning Analysis")
    with logging_tools.DebugMode(active=opt.debug, log_file=opt.logfile):
        opt = _check_analyse_opt(opt)
        figs = {}

        # get data
        bbq_df = _get_timber_data(opt.beam, opt.timber_in, opt.timber_out)
        kickac_df = tfs.read_tfs(opt.kickac_path, index=COL_TIME())
        x_interval = _get_approx_bbq_interval(bbq_df, kickac_df.index,
                                              opt.window_length)

        # add moving average to kickac
        kickac_df, bbq_df = _add_moving_average(
            kickac_df, bbq_df,
            **opt.get_subdict([
                "window_length", "tune_x_min", "tune_x_max", "tune_y_min",
                "tune_y_max", "fine_cut", "fine_window"
            ]))

        # add corrected values to kickac
        kickac_df = _add_corrected_natural_tunes(kickac_df)

        # output kickac and bbq data
        if opt.kickac_out:
            tfs.write_tfs(opt.kickac_out, kickac_df, save_index=COL_TIME())

        if opt.bbq_out:
            tfs.write_tfs(opt.bbq_out,
                          bbq_df.loc[x_interval[0]:x_interval[1]],
                          save_index=COL_TIME())

        if opt.bbq_plot_out or opt.bbq_plot_show:
            if opt.bbq_plot_full:
                figs["bbq"] = bbq_tools.plot_bbq_data(
                    bbq_df,
                    output=opt.bbq_plot_out,
                    show=opt.bbq_plot_show,
                    two_plots=opt.bbq_plot_two,
                    interval=[
                        str(datetime.datetime.fromtimestamp(xint))
                        for xint in x_interval
                    ],
                )
            else:
                figs["bbq"] = bbq_tools.plot_bbq_data(
                    bbq_df.loc[x_interval[0]:x_interval[1]],
                    output=opt.bbq_plot_out,
                    show=opt.bbq_plot_show,
                    two_plots=opt.bbq_plot_two,
                )

        # amplitude detuning analysis
        plane = ta_const.get_plane_from_orientation(opt.orientation)
        for other_plane in PLANES:
            labels = ta_const.get_paired_lables(plane, other_plane)
            id_str = "J{:s}_Q{:s}".format(plane.upper(), other_plane.upper())

            # get proper data
            columns = ta_const.get_paired_columns(plane, other_plane)
            data = {
                key: kickac_df.loc[:, columns[key]]
                for key in columns.keys()
            }

            # plotting
            try:
                output = os.path.splitext(opt.ampdet_plot_out)
                output = "{:s}_{:s}{:s}".format(output[0], id_str, output[1])
            except AttributeError:
                output = None

            figs[id_str] = detuning_tools.plot_detuning(
                odr_plot=detuning_tools.linear_odr_plot,
                labels={
                    "x": labels[0],
                    "y": labels[1],
                    "line": opt.label
                },
                output=output,
                show=opt.ampdet_plot_show,
                x_min=opt.ampdet_plot_xmin,
                x_max=opt.ampdet_plot_xmax,
                y_min=opt.ampdet_plot_ymin,
                y_max=opt.ampdet_plot_ymax,
                **data)

    if opt.bbq_plot_show or opt.ampdet_plot_show:
        plt.show()

    return figs
示例#21
0
def generate_lin_files(infile, outfile='test', dpp=0.0):
    free = read_tfs(infile + '.dat')
    driven = read_tfs(infile + '_ac.dat')
    nattune = {
        "X": np.remainder(free.headers['Q1'], 1),
        "Y": np.remainder(free.headers['Q2'], 1)
    }
    tune = {
        "X": np.remainder(driven.headers['Q1'], 1),
        "Y": np.remainder(driven.headers['Q2'], 1)
    }
    model = pd.merge(free,
                     driven,
                     how='inner',
                     on='NAME',
                     suffixes=('_f', '_d'))
    model['S'] = model.loc[:, 'S_f']
    nbpms = len(model.index.values)
    coup = 0.01

    for plane in ['X', 'Y']:
        lin = model.loc[:, ['NAME', 'S']]
        lin['NOISE'] = np.abs(np.random.randn(nbpms) * 0.0002 + 0.0002)
        lin['AVG_NOISE'] = lin.loc[:, 'NOISE']
        lin['CO'] = dpp * 1000 * model.loc[:, 'D' + plane +
                                           '_d']  # meters to millimeters
        lin['CORMS'] = np.abs(np.random.randn(nbpms) * 0.003 + 0.003)
        lin['PK2PK'] = 0.07 * np.sqrt(model.loc[:, 'BET' + plane + '_d'])
        lin['TUNE' + plane] = tune[plane] + 1e-7 * np.random.randn(nbpms)
        lin['MU' + plane] = np.remainder(
            model.loc[:, 'MU' + plane + '_d'] +
            dpp * model.loc[:, 'DMU' + plane + '_d'] +
            0.0001 * np.random.randn(nbpms) + np.random.rand(), 1)
        lin['AMP' + plane] = 0.03 * np.sqrt(model.loc[:, 'BET' + plane + '_d'] *
                              (1 + dpp * np.sin(2 * np.pi * model.loc[:, 'PHI' + plane + '_d'])
                              * model.loc[:, 'W' + plane + '_d']))\
                              + 0.001 *np.random.randn(nbpms)
        lin['NATTUNE' + plane] = nattune[plane] + 1e-6 * np.random.randn(nbpms)
        lin['NATMU' + plane] = np.remainder(
            model.loc[:, 'MU' + plane + '_f'] +
            0.001 * np.random.randn(nbpms) + np.random.rand(), 1)
        lin['NATAMP' + plane] = 0.0003 * np.sqrt(model.loc[:, 'BET' + plane + '_f'])\
                                + 0.00001 *np.random.randn(nbpms)

        plane_number = {"X": "1", "Y": "2"}[plane]
        header = OrderedDict()
        header["Q" + plane_number] = tune[plane]
        header["Q" + plane_number + "RMS"] = 1e-7
        header["DPP"] = dpp
        header["NATQ" + plane_number] = nattune[plane]
        header["NATQ" + plane_number + "RMS"] = 1e-6
        if plane == 'X':
            lin['PHASE01'] = np.remainder(
                model.loc[:, 'MUY_d'] + dpp * model.loc[:, 'DMUY_d'] +
                0.0001 * np.random.randn(nbpms) + np.random.rand(), 1)
            lin['AMP01'] = coup * np.sqrt(model.loc[:, 'BETY_d']
                            * (1 + dpp * np.sin(model.loc[:, 'PHIY_d']) * model.loc[:, 'WY_d'])) \
                            + 0.0001 * np.random.randn(nbpms)
            write_tfs(outfile + '.linx', lin, header)
        else:
            lin['PHASE10'] = np.remainder(
                model.loc[:, 'MUX_d'] + 0.0001 * np.random.randn(nbpms) +
                np.random.rand(), 1)
            lin['AMP10'] = coup * np.sqrt(model.loc[:, 'BETX_d'] \
                            * (1 + dpp * np.sin(model.loc[:, 'PHIX_d']) * model.loc[:, 'WX_d'])) \
                            + 0.0001 * np.random.randn(nbpms)
            write_tfs(outfile + '.liny', lin, header)
示例#22
0
        ax1.text(1.0, 1.02, args.omctitle, verticalalignment='bottom', horizontalalignment='right', transform=ax1.transAxes, fontsize=14)
        tightlayout_height -= 0.05

    plt.tight_layout(rect=(.00,.00,1, tightlayout_height), pad=0.5)
    # seperate arc and IR
    
    
#    ax2.hist(erry, label=x_label1, c="deepskyblue")
    
   
    
    #
    
    
f.show()

if printplot:
    plt.savefig(args.printplt)
    commandfile = open(args.printplt + ".command", "w")
    commandfile.write("""#!/bin/bash\n\n""")
    commandfile.write(sys.executable + " " + cmdline)
    commandfile.close()
    os.chmod(args.printplt + ".command", stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE | stat.S_IWOTH | stat.S_IXOTH | stat.S_IROTH | stat.S_IWGRP | stat.S_IXGRP | stat.S_IRGRP)
    

if args.output_pickle:
	tfs.write_tfs(args.outputpath, betabeating_[["S", "BETX1", "ERRBETX1", "BETY1", "ERRBETY1", "BETXMDL1", "BETYMDL1"]])
	
raw_input()

def getNDX(files, model, output):
    #getting the list of BPMs and arcBPMs
    file_list = [(file_name.strip() + ".linx")
                 for file_name in files.strip("\"").split(",")]
    file_dict = {}
    model_tfs = tfs.read_tfs(model)
    bpms = model_tfs.loc[:, "NAME"].values
    for file_name in file_list:
        filetfs = tfs.read_tfs(file_name)
        bpms = intersect(bpms, filetfs.loc[:, "NAME"].values)
    bpms = np.array(bpms)
    arc_bpms = get_arc_bpms(model_tfs, bpms)

    #merging the Dataframes
    model_panda = load_panda(model)
    for i, file_name in enumerate(file_list):
        file_panda = load_panda(file_name)
        model_panda = pd.merge(model_panda,
                               file_panda,
                               how='inner',
                               on='NAME',
                               suffixes=('', str(i + 1)))
    model_panda['NDXMDL'] = (model_panda.loc[:, 'DX'].values /
                             np.sqrt(model_panda.loc[:, 'BETX'].values))
    columns = ['NAME', 'S', 'NDXMDL']
    for c in model_panda.columns.values:
        if c.startswith('AMPZ') or c.startswith('MUZ'):
            columns.append(c)
    results = model_panda.loc[:, columns]
    results.set_index("NAME", inplace=True, drop=False)
    columns = ['S', 'NDXMDL']
    cols = []
    # scaling to the model, and getting the synchrotron phase in the arcs
    for c in results.columns.values:
        if c.startswith('MUZ'):
            results['sc' + c.replace(
                'MU', 'AMP'
            )] = results.loc[:, c.replace('MU', 'AMP')].values * np.sum(
                results.loc[arc_bpms, 'NDXMDL']) / np.sum(
                    results.loc[arc_bpms, c.replace('MU', 'AMP')])
            results['s' + c] = np.angle(np.exp(
                PI2I * results.loc[:, c].values)) / (2 * np.pi)
            field = results.loc[:, 's' + c].values - np.angle(
                np.sum(np.exp(
                    PI2I * results.loc[arc_bpms, 's' + c]))) / (2 * np.pi)
            results['sc' + c] = np.abs(
                np.where(np.abs(field) > 0.5, field - np.sign(field), field))
            d = 'sc' + c
            cols.append(d)
    #resolving the sign of dispersion
    for c in cols:
        results[c.replace(
            'scMUZ', 'fNDX'
        )] = results.loc[:, c.replace('MU', 'AMP')] * np.sign(0.25 - np.abs(
            np.angle(np.sum(np.exp(PI2I * results.loc[:, cols]), axis=1))) /
                                                              (2 * np.pi))
        columns.append(c.replace('scMUZ', 'fNDX'))
    forfile = results.loc[:, columns]
    f = []
    #averaging over files and error calculation
    for c in forfile.columns.values:
        if c.startswith('fNDX'):
            f.append(c)
    if len(f) > 1:
        forfile['STDNDX'] = np.std(forfile.loc[:, f],
                                   axis=1) * t_value_correction(len(f))
    else:
        forfile['STDNDX'] = 0.0
    forfile['NDX'] = np.mean(forfile.loc[:, f], axis=1)
    forfile['DNDX'] = forfile.loc[:, 'NDX'] - forfile.loc[:, 'NDXMDL']
    forfile['NAME'] = results.index
    tfs.write_tfs(os.path.join(output, "getNDx.out"), forfile)
    return
def write_knob(knob_path, delta):
    a = datetime.datetime.fromtimestamp(time.time())
    delta_out = - delta.loc[:, ["DELTA"]]
    delta_out.headers["PATH"] = os.path.dirname(knob_path)
    delta_out.headers["DATE"] = str(a.ctime())
    tfs.write_tfs(knob_path, delta_out, save_index="NAME")
示例#25
0
 def _write_tfs(self, filename, data_frame):
     if self.allow_write:
         tfs_pandas.write_tfs(os.path.join(self.directory, filename),
                              data_frame)
     self._buffer[filename] = data_frame