def _check_opt(opt):
    """ Check on options and put in missing values """
    # get unset paths from other paths
    if opt.output_path is None:
        opt.output_path = opt.meas_dir

    iotools.create_dirs(opt.output_path)

    # some paths "hardcoded"
    opt.change_params_path = os.path.join(opt.output_path,
                                          "{:s}.madx".format(opt.output_filename))
    opt.change_params_correct_path = os.path.join(opt.output_path,
                                                  "{:s}_correct.madx".format(opt.output_filename))
    opt.knob_path = os.path.join(opt.output_path, "{:s}.tfs").format(opt.output_filename)

    # check cuts and weights:
    def_dict = _get_default_values()
    if opt.modelcut is None:
        opt.modelcut = [def_dict["modelcut"][p] for p in opt.optics_params]
    elif len(opt.optics_params) != len(opt.modelcut):
        raise ValueError("The length of modelcut is not the same as of the optical parameters!")

    if opt.errorcut is None:
        opt.errorcut = [def_dict["errorcut"][p] for p in opt.optics_params]
    elif len(opt.optics_params) != len(opt.errorcut):
        raise ValueError("The length of errorcut is not the same as of the optical parameters!")

    if opt.weights is None:
        opt.weights = [def_dict["weights"][p] for p in opt.optics_params]
    elif len(opt.optics_params) != len(opt.weights):
        raise ValueError("The length of the weights is not the same as of the optical parameters!")

    return opt
Esempio n. 2
0
def create_instance_and_model(opt, accel_opt):
    numeric_level = 0
    if (sys.flags.debug):
        numeric_level = getattr(logging, "DEBUG", None)
        #print "DEBUG Level %d" % numeric_level
        ch = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter(' %(asctime)s %(levelname)s | %(name)s : %(message)s')
        ch.setFormatter(formatter)
        logging.getLogger().addHandler(ch)
        logging.getLogger().setLevel(numeric_level)
        
    else:
        numeric_level = getattr(logging, "WARNING", None)
        #print "WARNING Level %d" % numeric_level
        logging.basicConfig(level=numeric_level) # warning level to stderr
    
    
    create_dirs(opt.output)
    accel_inst = manager.get_accel_instance(accel_opt)
    create_model(
        accel_inst,
        opt.type,
        opt.output,
        writeto=opt.writeto,
        logfile=opt.logfile,
    )
Esempio n. 3
0
def run_all(main_input, clean_input, harpy_input, optics_input, to_log):
    with timeit(
            lambda spanned: LOGGER.info("Total time for file: %s", spanned)):
        if (not main_input.write_raw and clean_input is None
                and harpy_input is None):
            LOGGER.error("No file has been choosen to be writen!")
            return
        _setup_file_log_handler(main_input)
        LOGGER.debug(to_log)
        tbt_files = [
            turn_by_turn_reader.read_tbt_file(input_file.strip())
            for input_file in main_input.file.strip("\"").split(",")
        ]

        lins = []
        for tbt_file in tbt_files:
            lins.extend([
                run_all_for_file(bunchfile, this_main_input, clean_input,
                                 harpy_input) for this_main_input, bunchfile in
                output_handler.handle_multibunch(main_input, tbt_file)
            ])

        if optics_input is not None:
            inputs = measure_optics.InputFiles(lins)
            iotools.create_dirs(optics_input.outputdir)
            calibrations = measure_optics._copy_calibration_files(
                optics_input.outputdir, optics_input.calibrationdir)
            inputs.calibrate(calibrations)
            measure_optics.measure_optics(inputs, optics_input)
def model_creation():
    iotools.create_dirs(test_folder)
    iotools.copy_item(os.path.join(data_folder, "modifiers.madx"),
                      os.path.join(test_folder, "modifiers.madx"))

    LOG.info("Creating Model")
    create_instance_and_model(creator_args)
Esempio n. 5
0
def run_all(main_input, clean_input, harpy_input, optics_input, to_log):
    with timeit(lambda spanned: LOGGER.info("Total time for file: %s", spanned)):
        if (not main_input.write_raw and
                clean_input is None and harpy_input is None):
            LOGGER.error("No file has been choosen to be writen!")
            return
        _setup_file_log_handler(main_input)
        LOGGER.debug(to_log)
        tbt_files = [turn_by_turn_reader.read_tbt_file(input_file.strip())
                     for input_file in main_input.file.strip("\"").split(",")]
        
        lins = []
        for tbt_file in tbt_files:
            lins.extend(
                [run_all_for_file(bunchfile, this_main_input, clean_input, harpy_input)
                 for this_main_input, bunchfile in
                 output_handler.handle_multibunch(main_input, tbt_file)]
            )

        if optics_input is not None:
            inputs = measure_optics.InputFiles(lins)
            iotools.create_dirs(optics_input.outputdir)
            calibrations = measure_optics._copy_calibration_files(optics_input.outputdir, optics_input.calibrationdir)
            inputs.calibrate(calibrations)
            measure_optics.measure_optics(inputs, optics_input)
 def set_outputpath(self, outputpath):
     if outputpath is None or not isinstance(outputpath,
                                             str) or outputpath == "":
         outputpath = os.path.abspath("./")
     if iotools.not_exists_directory(outputpath):
         iotools.create_dirs(outputpath)
     self.__outputpath = outputpath
Esempio n. 7
0
def main2(knob_names=KNOB_NAMES, time=TIME, root_cwd=CWD):
    cwd = os.path.join(root_cwd, "newcode")
    iotools.create_dirs(cwd)
    try:
        extractor_wrapper.extract_knob_value_and_definition(
            knob_names, time, cwd, server="cs-ccr-dev3")
    except IOError:
        pass
def get_output_dir(seed_dir, xing, error_types, error_loc, optic_type):
    """ Return the output dir based on the input parameters """
    output_dir = os.path.join(
        seed_dir,
        "output_{:s}".format(".".join(get_nameparts_from_parameters(
            xing=xing, error_types=error_types, error_loc=error_loc, optic_type=optic_type
        )))
    )
    iotools.create_dirs(output_dir)
    return output_dir
Esempio n. 9
0
def create_instance_and_model(opt, accel_opt):
    create_dirs(opt.output)
    accel_inst = manager.get_accel_instance(accel_opt)
    create_model(
        accel_inst,
        opt.type,
        opt.output,
        writeto=opt.writeto,
        logfile=opt.logfile,
    )
Esempio n. 10
0
def main3(knob_names=KNOB_NAMES, time=TIME, root_cwd=CWD):
    cwd = os.path.join(root_cwd, "overview")
    iotools.create_dirs(cwd)
    try:
        extractor_wrapper.extract_overview(knob_names,
                                           time,
                                           cwd,
                                           server="cs-ccr-dev3",
                                           show_plot=True)
    except IOError:
        pass
Esempio n. 11
0
def _create_input():
    sourcespath = abspath(join(dirname(__file__), "..", "measurements"))
    iotools.create_dirs(sourcespath)
    accelerator = {"model_dir": join(dirname(__file__), "..", "inputs", "models", "25cm_beam1"),
                   "accel": "lhc", "lhc_mode": "lhc_runII_2018", "beam": 1}
    files_to_load = optics_measurement_test_files(accelerator["model_dir"], sourcespath)
    measure_input = optics_input.OpticsInput()
    measure_input.outputdir = abspath(join(dirname(__file__), "..", "results"))
    measure_input.accelerator = manager.get_accel_instance(accelerator)
    input_files = measure_optics.InputFiles(files_to_load)
    return input_files, measure_input
Esempio n. 12
0
 def _copy_measurement_files(label, measurement_path, match_math):
     iotools.create_dirs(match_math)
     iotools.create_dirs(os.path.join(match_math, "sbs"))
     # GetLLM output files:
     _copy_files_with_extension(measurement_path, match_math, ".out")
     _copy_files_with_extension(measurement_path, match_math, ".dat")
     # SbS output files for the given label:
     _copy_files_which_contains(os.path.join(measurement_path, "sbs"),
                                os.path.join(match_math, "sbs"), label)
     # SbS MAD-X files (not necessary but useful):
     _copy_files_with_extension(os.path.join(measurement_path, "sbs"),
                                os.path.join(match_math, "sbs"), ".madx")
Esempio n. 13
0
def main(opt):
    """ Entrypoint for the online model extractor python wrapper.

    Creates either the overview or knob definitions, depending on the functionality chosen.

    Keyword Args:
        Required
        function (str): Which functionality to run.
                        **Flags**: ['-f', '--functionality']
                        **Choices**: ['overview', 'definition']

        Optional
        cwd (str): Path of the current working directory.
                   **Flags**: ['-c', '--cwd']
                   **Default**: ``./``
        knob_names (str): Names of the knobs to show.
                          **Flags**: ['-k', '--knobs', '--knobnames']
                          **Default**: __See Source__
        server (str): Server to use.
                      **Flags**: ['-s', '--server']
        show_plot: Whether to show plots or not.(Only for overview functionality.)
                   **Flags**: ['--showplots']
                   **Action**: ``store_true``
        time (str): -Help not available-
                    **Flags**: ['-t', '--time']

    """
    iotools.create_dirs(opt.cwd)
    if opt.function == FUNCTION_OVERVIEW:
        extractor_wrapper.extract_overview(opt.knob_names,
                                           opt.time,
                                           opt.cwd,
                                           server=opt.server,
                                           show_plot=opt.show_plot)
    elif opt.function == FUNCTION_DEFINITION:
        if not opt.knob_names:
            raise ArgumentError(
                "Argument 'knob_names' required for function '{:s}'.".format(
                    FUNCTION_DEFINITION))
        if opt.time is None:
            raise ArgumentError(
                "Argument 'time' required for function '{:s}'.".format(
                    FUNCTION_DEFINITION))
        if opt.show_plot:
            LOG.warn(
                "Argument 'show_plot' has no effect in function '{:s}'".format(
                    FUNCTION_DEFINITION))
        extractor_wrapper.extract_knob_value_and_definition(opt.knob_names,
                                                            opt.time,
                                                            opt.cwd,
                                                            server=opt.server)
Esempio n. 14
0
def evaluate_for_variables(accel_inst,
                           variable_categories,
                           order=4,
                           num_proc=multiprocessing.cpu_count(),
                           temp_dir=None):
    """ Generate a dictionary containing response matrices for
        beta, phase, dispersion, tune and coupling and saves it to a file.

        Args:
            accel_inst : Accelerator Instance.
            variable_categories (list): Categories of the variables/knobs to use. (from .json)
            order (int or tuple): Max or [min, max] of K-value order to use.
            num_proc (int): Number of processes to use in parallel.
            temp_dir (str): temporary directory. If ``None``, uses model_dir.
    """
    LOG.debug("Generating Fullresponse via Mad-X.")
    with timeit(lambda t: LOG.debug(
            "  Total time generating fullresponse: {:f}s".format(t))):
        if not temp_dir:
            temp_dir = accel_inst.model_dir
        create_dirs(temp_dir)

        variables = accel_inst.get_variables(classes=variable_categories)
        if len(variables) == 0:
            raise ValueError(
                "No variables found! Make sure your categories are valid!")

        # try:
        #     variables = variables.tolist()
        # except AttributeError:
        #     pass

        num_proc = num_proc if len(variables) > num_proc else len(variables)
        process_pool = multiprocessing.Pool(processes=num_proc)

        k_values = _get_orders(order)

        try:
            _generate_madx_jobs(accel_inst, variables, k_values, num_proc,
                                temp_dir)
            _call_madx(process_pool, temp_dir, num_proc)
            mapping = _load_madx_results(variables, k_values, process_pool,
                                         temp_dir)
        except IOError:
            raise IOError("MADX was unable to compute the mapping.")
        finally:
            _clean_up(variables, temp_dir, num_proc)
    return mapping
Esempio n. 15
0
def main(opt):
    """ Entrypoint for the online model extractor python wrapper.

    Creates either the overview or knob definitions, depending on the functionality chosen.

    Keyword Args:
        Required
        function (str): Which functionality to run.
                        **Flags**: ['-f', '--functionality']
                        **Choices**: ['overview', 'definition']

        Optional
        cwd (str): Path of the current working directory.
                   **Flags**: ['-c', '--cwd']
                   **Default**: ``./``
        knob_names (str): Names of the knobs to show.
                          **Flags**: ['-k', '--knobs', '--knobnames']
                          **Default**: __See Source__
        server (str): Server to use.
                      **Flags**: ['-s', '--server']
        show_plot: Whether to show plots or not.(Only for overview functionality.)
                   **Flags**: ['--showplots']
                   **Action**: ``store_true``
        time (str): -Help not available-
                    **Flags**: ['-t', '--time']

    """
    iotools.create_dirs(opt.cwd)
    if opt.function == FUNCTION_OVERVIEW:
        extractor_wrapper.extract_overview(opt.knob_names, opt.time, opt.cwd,
                                           server=opt.server, show_plot=opt.show_plot)
    elif opt.function == FUNCTION_DEFINITION:
        if not opt.knob_names:
            raise ArgumentError(
                "Argument 'knob_names' required for function '{:s}'.".format(FUNCTION_DEFINITION)
            )
        if opt.time is None:
            raise ArgumentError(
                "Argument 'time' required for function '{:s}'.".format(FUNCTION_DEFINITION)
            )
        if opt.show_plot:
            LOG.warn(
                "Argument 'show_plot' has no effect in function '{:s}'".format(FUNCTION_DEFINITION)
            )
        extractor_wrapper.extract_knob_value_and_definition(opt.knob_names, opt.time, opt.cwd,
                                                            server=opt.server)
Esempio n. 16
0
def generate_fullresponse(accel_inst,
                          variable_categories,
                          delta_k=0.00002,
                          num_proc=multiprocessing.cpu_count(),
                          temp_dir=None):
    """ Generate a dictionary containing response matrices for
        beta, phase, dispersion, tune and coupling and saves it to a file.

        Args:
            accel_inst : Accelerator Instance.
            variable_categories (list): Categories of the variables/knobs to use. (from .json)
            delta_k (float): delta K1L to be applied to quads for sensitivity matrix
            num_proc (int): Number of processes to use in parallel.
            temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile.
    """
    LOG.debug("Generating Fullresponse via Mad-X.")
    with timeit(lambda t: LOG.debug(
            "  Total time generating fullresponse: {:f}s".format(t))):
        if not temp_dir:
            temp_dir = accel_inst.model_dir
        create_dirs(temp_dir)

        variables = accel_inst.get_variables(classes=variable_categories)
        if len(variables) == 0:
            raise ValueError(
                "No variables found! Make sure your categories are valid!")

        # try:
        #     variables = variables.tolist()
        # except AttributeError:
        #     pass

        num_proc = num_proc if len(variables) > num_proc else len(variables)
        process_pool = multiprocessing.Pool(processes=num_proc)

        incr_dict = _generate_madx_jobs(accel_inst, variables, delta_k,
                                        num_proc, temp_dir)
        _call_madx(process_pool, temp_dir, num_proc)
        _clean_up(temp_dir, num_proc)

        var_to_twiss = _load_madx_results(variables, process_pool, incr_dict,
                                          temp_dir)
        fullresponse = _create_fullresponse_from_dict(var_to_twiss)

    return fullresponse
def _call_madx(accel_inst, corrections):
    """ Create and call the madx jobs to apply the corrections """
    original_content = _get_madx_job(accel_inst)
    for dir_correct in sorted(corrections):
        dir_out = os.path.join(dir_correct, RESULTS_DIR)
        iotools.create_dirs(dir_out)
        job_content = original_content
        job_content += "twiss, file='{:s}';\n".format(os.path.join(dir_out,
                                                                   getdiff.TWISS_NOT_CORRECTED))
        for file in sorted(corrections[dir_correct]):
            job_content += "call, file='{:s}';\n".format(file)
        job_content += "twiss, file='{:s}';\n".format(os.path.join(dir_out,
                                                                   getdiff.TWISS_CORRECTED))

        madx_wrapper.resolve_and_run_string(
            job_content,
            output_file=os.path.join(dir_out, MADX_FILE),
            log_file=os.path.join(dir_out, MADXLOG_FILE),
        )
Esempio n. 18
0
def _call_madx(accel_inst, corrections):
    """ Create and call the madx jobs to apply the corrections """
    original_content = _get_madx_job(accel_inst)
    for dir_correct in sorted(corrections):
        dir_out = os.path.join(dir_correct, RESULTS_DIR)
        iotools.create_dirs(dir_out)
        job_content = original_content
        job_content += "twiss, file='{:s}';\n".format(os.path.join(dir_out,
                                                                   getdiff.TWISS_NOT_CORRECTED))
        for file in sorted(corrections[dir_correct]):
            job_content += "call, file='{:s}';\n".format(file)
        job_content += "twiss, file='{:s}';\n".format(os.path.join(dir_out,
                                                                   getdiff.TWISS_CORRECTED))

        madx_wrapper.resolve_and_run_string(
            job_content,
            output_file=os.path.join(dir_out, MADX_FILE),
            log_file=os.path.join(dir_out, MADXLOG_FILE),
        )
Esempio n. 19
0
 def _copy_measurement_files(label, measurement_path, match_math):
     iotools.create_dirs(match_math)
     iotools.create_dirs(os.path.join(match_math, "sbs"))
     # GetLLM output files:
     _copy_files_with_extension(measurement_path,
                                match_math, ".out")
     _copy_files_with_extension(measurement_path,
                                match_math, ".dat")
     # SbS output files for the given label:
     _copy_files_which_contains(
         os.path.join(measurement_path, "sbs"),
         os.path.join(match_math, "sbs"),
         label
     )
     # SbS MAD-X files (not necessary but useful):
     _copy_files_with_extension(
         os.path.join(measurement_path, "sbs"),
         os.path.join(match_math, "sbs"),
         ".madx"
     )
Esempio n. 20
0
def _create_input():
    sourcespath = abspath(join(dirname(__file__), "..", "measurements"))
    iotools.create_dirs(sourcespath)
    accelerator = {
        "model_dir":
        join(dirname(__file__), "..", "inputs", "models", "25cm_beam1"),
        "accel":
        "lhc",
        "lhc_mode":
        "lhc_runII_2018",
        "beam":
        1
    }
    files_to_load = optics_measurement_test_files(accelerator["model_dir"],
                                                  sourcespath)
    measure_input = optics_input.OpticsInput()
    measure_input.outputdir = abspath(join(dirname(__file__), "..", "results"))
    measure_input.accelerator = manager.get_accel_instance(accelerator)
    input_files = measure_optics.InputFiles(files_to_load)
    return input_files, measure_input
Esempio n. 21
0
def evaluate_for_variables(accel_inst, variable_categories, order=4,
                           num_proc=multiprocessing.cpu_count(),
                           temp_dir=None):
    """ Generate a dictionary containing response matrices for
        beta, phase, dispersion, tune and coupling and saves it to a file.

        Args:
            accel_inst : Accelerator Instance.
            variable_categories (list): Categories of the variables/knobs to use. (from .json)
            order (int or tuple): Max or [min, max] of K-value order to use.
            num_proc (int): Number of processes to use in parallel.
            temp_dir (str): temporary directory. If ``None``, uses model_dir.
    """
    LOG.debug("Generating Fullresponse via Mad-X.")
    with timeit(lambda t: LOG.debug("  Total time generating fullresponse: {:f}s".format(t))):
        if not temp_dir:
            temp_dir = accel_inst.model_dir
        create_dirs(temp_dir)

        variables = accel_inst.get_variables(classes=variable_categories)
        if len(variables) == 0:
            raise ValueError("No variables found! Make sure your categories are valid!")

        # try:
        #     variables = variables.tolist()
        # except AttributeError:
        #     pass

        num_proc = num_proc if len(variables) > num_proc else len(variables)
        process_pool = multiprocessing.Pool(processes=num_proc)

        k_values = _get_orders(order)

        try:
            _generate_madx_jobs(accel_inst, variables, k_values, num_proc, temp_dir)
            _call_madx(process_pool, temp_dir, num_proc)
            mapping = _load_madx_results(variables, k_values, process_pool, temp_dir)
        finally:
            _clean_up(variables, temp_dir, num_proc)
    return mapping
Esempio n. 22
0
def generate_fullresponse(accel_inst, variable_categories,
                          delta_k=0.00002, num_proc=multiprocessing.cpu_count(),
                          temp_dir=None):
    """ Generate a dictionary containing response matrices for
        beta, phase, dispersion, tune and coupling and saves it to a file.

        Args:
            accel_inst : Accelerator Instance.
            variable_categories (list): Categories of the variables/knobs to use. (from .json)
            delta_k (float): delta K1L to be applied to quads for sensitivity matrix
            num_proc (int): Number of processes to use in parallel.
            temp_dir (str): temporary directory. If ``None``, uses folder of original_jobfile.
    """
    LOG.debug("Generating Fullresponse via Mad-X.")
    with timeit(lambda t: LOG.debug("  Total time generating fullresponse: {:f}s".format(t))):
        if not temp_dir:
            temp_dir = accel_inst.model_dir
        create_dirs(temp_dir)

        variables = accel_inst.get_variables(classes=variable_categories)
        if len(variables) == 0:
            raise ValueError("No variables found! Make sure your categories are valid!")

        # try:
        #     variables = variables.tolist()
        # except AttributeError:
        #     pass

        num_proc = num_proc if len(variables) > num_proc else len(variables)
        process_pool = multiprocessing.Pool(processes=num_proc)

        incr_dict = _generate_madx_jobs(accel_inst, variables,
                                        delta_k, num_proc, temp_dir)
        _call_madx(process_pool, temp_dir, num_proc)
        _clean_up(temp_dir, num_proc)

        var_to_twiss = _load_madx_results(variables, process_pool, incr_dict, temp_dir)
        fullresponse = _create_fullresponse_from_dict(var_to_twiss)

    return fullresponse
Esempio n. 23
0
def measure_optics(input_files, measure_input):
    """
    Main function to compute various lattice optics parameters from frequency spectra
    Args:
        input_files: InputFiles object containing frequency spectra files (linx/y)
        measure_input: OpticsInput object containing analysis settings

    Returns:
    """
    LOGGER.info("Calculating optics parameters - code version " + VERSION)
    global __start_time
    __start_time = time()
    iotools.create_dirs(measure_input.outputdir)
    logging_tools.add_module_handler(logging_tools.file_handler(
            join(measure_input.outputdir, LOG_FILE)))
    common_header = _get_header(measure_input)
    if sys.flags.debug:
        LOGGER.info("     DEBUG ON")
    print_time()
    try:
        tune_dict = tune.calculate_tunes(measure_input, input_files)
        phase_dict = phase.calculate_phases(measure_input, input_files, tune_dict, common_header)
    except:
        raise ValueError("Phase advance or tune calculation failed: No other calculation will run")
    print_time()
    try:
        coupling.calculate_coupling(measure_input, input_files, phase_dict, tune_dict, common_header)
    except:
        _tb_()
    if measure_input.only_coupling:
        LOGGER.info("Finished as only coupling calculation was requested.")
        return
    try:
        beta_df_x, driven_beta_df_x, beta_df_y, driven_beta_df_y = beta.calculate_beta_from_phase(
            measure_input, tune_dict, phase_dict, common_header)
        if driven_beta_df_x is None:
            beta_df_dict = {"X": beta_df_x, "Y": beta_df_y}
        else:
            beta_df_dict = {"X": driven_beta_df_x, "Y": driven_beta_df_y}
    except:
        _tb_()
    try:
        ratio = beta_from_amplitude.calculate_beta_from_amplitude(measure_input, input_files,
                                                                  tune_dict, phase_dict,
                                                                  beta_df_dict, common_header)
    except:
        _tb_()
    # in the following functions, nothing should change, so we choose the models now
    mad_twiss = measure_input.accelerator.get_model_tfs()
    #  mad_elements = measure_input.accelerator.get_elements_tfs()
    if measure_input.accelerator.excitation != AccExcitationMode.FREE:
        mad_ac = measure_input.accelerator.get_driven_tfs()
    else:
        mad_ac = mad_twiss
    try:
        interaction_point.write_betastar_from_phase(
            interaction_point.betastar_from_phase(
                measure_input.accelerator, phase_dict, mad_twiss
            ), common_header, measure_input.outputdir)
    except:
        _tb_()
    try:
        dispersion.calculate_orbit_and_dispersion(measure_input, input_files, tune_dict, mad_twiss,
                                                  beta_df_dict, common_header)
    except:
        _tb_()
    try:
        inv_x, inv_y = kick.calculate_kick(measure_input, input_files, mad_twiss, mad_ac, ratio, common_header)
    except:
        _tb_()
    if measure_input.nonlinear:
        try:
            resonant_driving_terms.calculate_RDTs(measure_input, input_files, mad_twiss, phase_dict, common_header, inv_x, inv_y)
        except:
            _tb_()
    print_time()
Esempio n. 24
0
    def get_data(self, frame, column):
        """
        Returns data in columns of frame corresponding to column in original files
        Parameters:
            frame:  joined frame
            column: name of column in original files
        Returns:
            data in numpy array corresponding to column in original files
        """
        return frame.loc[:, self.get_columns(frame, column)].values


def _copy_calibration_files(outputdir, calibrationdir):
    if calibrationdir is None:
        return None
    calibs = {}
    for plane in PLANES:
        cal_file = "calibration_{}.out".format(plane.lower())
        iotools.copy_item(join(calibrationdir, cal_file), join(outputdir, cal_file))
        calibs[plane] = tfs_pandas.read_tfs(join(outputdir, cal_file)).set_index("NAME")
    return calibs


if __name__ == "__main__":
    arguments = optics_input.parse_args()
    inputs = InputFiles(arguments.files)
    iotools.create_dirs(arguments.outputdir)
    calibrations = _copy_calibration_files(arguments.outputdir, arguments.calibrationdir)
    inputs.calibrate(calibrations)
    measure_optics(inputs, arguments)
Esempio n. 25
0
BB_ROOT = os.path.abspath(
    os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir,
                 os.path.pardir))
if BB_ROOT not in sys.path:
    sys.path.append(BB_ROOT)

from hole_in_one.hole_in_one import run_all
from hole_in_one.io_handlers.input_handler import parse_args
from utils import iotools

REGR_DIR = join(BB_ROOT, "tests", "regression")
TBTS = join(BB_ROOT, "tests", "inputs", "tbt_files")
MODELS = join(BB_ROOT, "tests", "inputs", "models")

if __name__ == '__main__':
    output_dir = join(REGR_DIR, "_out_hole_in_one_test_flat_3dkick")
    arguments = ("--file={file} --model={model} --output={output} clean "
                 "harpy --tunex 0.27 --tuney 0.322 --tunez 4.5e-4 "
                 "--nattunex 0.28 --nattuney 0.31".format(
                     file=join(TBTS, "flat_beam1_3d.sdds"),
                     model=join(MODELS, "flat_beam1", "twiss.dat"),
                     output=output_dir))

    iotools.create_dirs(output_dir)

    try:
        parsed_args = parse_args(arguments.split())
        run_all(*parsed_args)
    finally:
        iotools.delete_item(output_dir)
def get_plot_output_folder(cwd):
    path = os.path.join(cwd, "results_plot")
    iotools.create_dirs(path)
    return path
def get_seed_dir(cwd, seed):
    """ Build the seed-dir-name and create it. """
    seed_dir = os.path.join(cwd, "results_per_seed", get_nameparts_from_parameters(seed=seed)[0])
    iotools.create_dirs(seed_dir)
    return seed_dir
Esempio n. 28
0
 def set_outputpath(self, outputpath):
     if outputpath is None or not isinstance(outputpath, str) or outputpath == "":
         outputpath = os.path.abspath("./")
     if iotools.not_exists_directory(outputpath):
         iotools.create_dirs(outputpath)
     self.__outputpath = outputpath
def get_data_output_folder(cwd):
    path = os.path.join(cwd, "results_gathered")
    iotools.create_dirs(path)
    return path
def _copy_corrections(src, dst, suffix):
    iotools.create_dirs(dst)
    filename = "changeparameters{:s}.madx".format(suffix)
    shutil.copy(os.path.join(src, filename),
                os.path.join(dst, filename))
def get_job_dir(cwd, id):
    """ Build the job-dir-name and create it. """
    job_dir = os.path.join(cwd, "jobs", id)
    iotools.create_dirs(job_dir)
    return job_dir
def get_cta_plot_output_folder(cwd):
    path = os.path.join(get_plot_output_folder(cwd), "cta_hist")
    iotools.create_dirs(path)
    return path