Exemplo n.º 1
0
def findMatchingDirs(box_data_path):
    matching_dir_pairs = []
    if box_data_path == '':
        for dpm_dir in dirs:
            print(dpm_dir)
            match = re.search(
                '^(.*/)dpm_.*?/(ip_offset_XYZDXDYDZ_.*)/.*/\d*/\d*-\d*_(.*cut)/.*/(binning_\d*)/merge_data$',
                dpm_dir)
            pattern = '^' + match.group(1) + 'box_.*?' + match.group(
                2) + '.*' + match.group(3) + '/.*' + match.group(
                    4) + '/merge_data$'
            # print pattern
            for box_dir in box_dirs:
                # print box_dir
                box_match = re.search(pattern, box_dir)
                if box_match:
                    matching_dir_pairs.append([dpm_dir, box_dir])
                    break
    else:
        for dpm_dir in dirs:
            # attempt to find directory with same binning
            print('checking for matching directory for ' + dpm_dir)
            match = re.search('^.*(binning_\d*)/.*$', dpm_dir)
            if match:
                dir_searcher = general.DirectorySearcher([match.group(1)])
                dir_searcher.searchListOfDirectories(box_data_path,
                                                     box_acc_glob_pattern)
                correct_dirs = dir_searcher.getListOfDirectories()

                if correct_dirs:
                    matching_dir_pairs.append([dpm_dir, correct_dirs[0]])
    return matching_dir_pairs
Exemplo n.º 2
0
parser.add_argument(
    'dirname',
    metavar='dirname_to_scan',
    type=str,
    nargs=1,
    help=
    'Name of directory to scan recursively for lmd data files and call merge!')
parser.add_argument('--dir_pattern',
                    metavar='path name pattern',
                    type=str,
                    default='.*',
                    help='')

args = parser.parse_args()

patterns = []
patterns.append(args.dir_pattern)
dir_searcher = general.DirectorySearcher(patterns)

dir_searcher.searchListOfDirectories(args.dirname[0], glob_pattern)
dirs = dir_searcher.getListOfDirectories()

bashcommand = default = os.getenv(
    'LMDFIT_BUILD_PATH') + '/bin/plotIPDistribution'
for dir in dirs:
    bashcommand += ' ' + dir

print bashcommand

returnvalue = subprocess.call(bashcommand.split())
Exemplo n.º 3
0
parser.add_argument('--general_dimension_bins_high',
                    metavar='general_dimension_bins_high',
                    type=int,
                    default=300,
                    help='binning of data max')
parser.add_argument('--general_dimension_bins_step',
                    metavar='general_dimension_bins_step',
                    type=int,
                    default=100,
                    help='binning of data stepsize')

args = parser.parse_args()

failed_submit_commands = []

dir_searcher = general.DirectorySearcher([args.dir_pattern])

dir_searcher.searchListOfDirectories(args.dirname[0], ['filelist_', '.txt'])
dirs = dir_searcher.getListOfDirectories()

config_modifier = general.ConfigModifier()
config = config_modifier.loadConfig(args.config_url[0])

config_paths = []

for bins in range(args.general_dimension_bins_low,
                  args.general_dimension_bins_high + 1,
                  args.general_dimension_bins_step):
    if 'general_data' in config:
        subconfig = config['general_data']
        if 'primary_dimension' in subconfig:
Exemplo n.º 4
0
def simulateDataOnHimster(scenario):
    tasks_to_remove = []

    lab_momentum = scenario.momentum
    for simulation_task in scenario.simulation_info_lists:
        dir_path = simulation_task[0]
        sim_type = simulation_task[1]
        state = simulation_task[2]
        last_state = simulation_task[3]

        print('running simulation of type ' + str(sim_type) + ' and path (' +
              dir_path + ') at state=' + str(state) + '/' + str(last_state))

        data_keywords = []
        data_pattern = ''

        cut_keyword = ''
        if scenario.use_xy_cut:
            cut_keyword += 'xy_'
        if scenario.use_m_cut:
            cut_keyword += 'm_'
        if cut_keyword == '':
            cut_keyword += 'un'
        cut_keyword += 'cut_real'

        merge_keywords = ['merge_data', 'binning_300']
        if 'v' in sim_type:
            data_keywords = ['uncut', 'bunches', 'binning_300']
            data_pattern = 'lmd_vertex_data_'
        elif 'a' in sim_type:
            data_keywords = [cut_keyword, 'bunches', 'binning_300']
            data_pattern = 'lmd_data_'
        else:
            data_keywords = [cut_keyword, 'bunches', 'binning_300']
            data_pattern = 'lmd_res_data_'

        # 1. simulate data
        if state == 1:
            os.chdir(lmd_fit_script_path)
            status_code = 1
            if 'er' in sim_type:
                found_dirs = []
                if dir_path != '':
                    temp_dir_searcher = general.DirectorySearcher(
                        ['box', data_keywords[0]])
                    temp_dir_searcher.searchListOfDirectories(
                        dir_path, 'Lumi_TrksQA_')
                    found_dirs = temp_dir_searcher.getListOfDirectories()

                if found_dirs:
                    status_code = wasSimulationSuccessful(
                        found_dirs[0], 'Lumi_TrksQA_*.root')
                elif last_state < 1:
                    # then lets simulate!
                    # this command runs the full sim software with box gen data
                    # to generate the acceptance and resolution information
                    # for this sample
                    # note: beam tilt and divergence are not necessary here,
                    # because that is handled completely by the model
                    ip_info_dict = scenario.rec_ip_info
                    max_xy_shift = math.sqrt(ip_info_dict['ip_offset_x'] ** 2 +
                                             ip_info_dict['ip_offset_y'] ** 2)
                    max_xy_shift = float('{0:.2f}'.format(
                        round(float(max_xy_shift), 2)))

                    gen_par = general.createGeneralRunParameters(
                        box_num_events_per_sample,
                        box_num_samples, lab_momentum)
                    sim_par = simulation.createSimulationParameters('box')
                    sim_par['theta_min_in_mrad'] -= max_xy_shift
                    sim_par['theta_max_in_mrad'] += max_xy_shift
                    sim_par.update(gen_par)
                    rec_par = reconstruction.createReconstructionParameters()
                    rec_par['use_xy_cut'] = scenario.use_xy_cut
                    rec_par['use_m_cut'] = scenario.use_m_cut
                    rec_par['reco_ip_offset'] = [ip_info_dict['ip_offset_x'],
                                                 ip_info_dict['ip_offset_y'],
                                                 ip_info_dict['ip_offset_z']]
                    rec_par.update(gen_par)

                    # alignment part
                    # if alignement matrices were specified, we used them as a mis-alignment
                    # and alignment for the box simulations
                    align_par = alignment.createAlignmentParameters()
                    if 'alignment_matrices_path' in scen.alignment_parameters:
                        align_par['misalignment_matrices_path'] = scen.alignment_parameters['alignment_matrices_path']
                        align_par['alignment_matrices_path'] = scen.alignment_parameters['alignment_matrices_path']
                    # update the sim and reco par dicts
                    sim_par.update(align_par)
                    rec_par.update(align_par)

                    (dir_path, is_finished) = simulation.startSimulationAndReconstruction(
                        sim_par, align_par, rec_par, use_devel_queue=args.use_devel_queue)
                    simulation_task[0] = dir_path
                    scenario.acc_and_res_dir_path = dir_path
                    if is_finished:
                        state += 1
                    last_state += 1

            elif 'a' in sim_type:
                found_dirs = []
                status_code = 1
                if dir_path != '':
                    temp_dir_searcher = general.DirectorySearcher(
                        ['dpm_elastic', data_keywords[0]])
                    temp_dir_searcher.searchListOfDirectories(
                        dir_path, 'Lumi_TrksQA_')
                    found_dirs = temp_dir_searcher.getListOfDirectories()
                if found_dirs:
                    status_code = wasSimulationSuccessful(
                        found_dirs[0], 'Lumi_TrksQA_*.root')

                elif last_state < state:
                    # then lets do reco
                    # this command runs the track reco software on the
                    # elastic scattering data with the estimated ip position
                    # note: beam tilt and divergence are not used here because
                    # only the last reco steps are rerun of the track reco
                    ip_info_dict = scenario.rec_ip_info

                    # TODO: save digi files instead of mc files!!
                    # we are either in the base dir or an "aligned" subdirectory,
                    # apply dirty hack here:
                    simParamFile = scenario.dir_path+'/../sim_params.config'
                    if not os.path.exists(simParamFile):
                        simParamFile = scenario.dir_path+'/../../sim_params.config'

                    with open(simParamFile, 'r') as json_file:
                        sim_par = json.load(json_file)
                    with open(scenario.dir_path + '/reco_params.config', 'r') as json_file:
                        rec_par = json.load(json_file)
                    rec_par['use_xy_cut'] = scenario.use_xy_cut
                    rec_par['use_m_cut'] = scenario.use_m_cut
                    rec_par['reco_ip_offset'] = [ip_info_dict['ip_offset_x'],
                                                 ip_info_dict['ip_offset_y'],
                                                 ip_info_dict['ip_offset_z']]
                    if (num_samples > 0 and
                            rec_par['num_samples'] > num_samples):
                        rec_par['num_samples'] = num_samples
                        sim_par['num_samples'] = num_samples
                    # dirname = os.path.dirname(scenario.dir_path)
                    (dir_path, is_finished) = simulation.startSimulationAndReconstruction(
                        sim_par, alignment.getAlignmentParameters(rec_par),
                        rec_par, use_devel_queue=args.use_devel_queue)
                    # (dir_path, is_finished) = reconstruction.startReconstruction(
                    #    rec_par, alignment.getAlignmentParameters(rec_par),
                    #    dirname, use_devel_queue=args.use_devel_queue)
                    simulation_task[0] = dir_path
                    scen.filtered_dir_path = dir_path
                    if is_finished:
                        state += 1
                    last_state += 1
            else:
                # just skip simulation for vertex data... we always have that..
                print('skipping simulation step...')
                status_code = 0

            if status_code == 0:
                print('found simulation files, skipping')
                state = 2
                last_state = 1
            elif status_code > 0:
                print('still waiting for himster simulation jobs for ' +
                      sim_type + ' data to complete...')
            else:
                # ok something went wrong there, exit this scenario and
                # push on bad scenario stack
                last_state = -1

        # 2. create data (that means bunch data, create data objects)
        if state == 2:
            # check if data objects already exists and skip!
            temp_dir_searcher = general.DirectorySearcher(data_keywords)
            temp_dir_searcher.searchListOfDirectories(dir_path, data_pattern)
            found_dirs = temp_dir_searcher.getListOfDirectories()
            status_code = 1
            if found_dirs:
                status_code = wasSimulationSuccessful(
                    found_dirs[0], data_pattern + "*", True)
            elif last_state < state:
                os.chdir(lmd_fit_script_path)
                # bunch data
                bashcommand = 'python makeMultipleFileListBunches.py '\
                    '--files_per_bunch 10 --maximum_number_of_files ' + \
                    str(num_samples) + ' ' + dir_path
                returnvalue = subprocess.call(bashcommand.split())
                # create data
                if 'a' in sim_type:
                    el_cs = scenario.elastic_pbarp_integrated_cross_secion_in_mb
                    bashcommand = 'python createMultipleLmdData.py '\
                        + ' --dir_pattern ' + data_keywords[0] + ' ' + str(
                            lab_momentum) + ' ' + sim_type + ' ' + dir_path\
                        + ' ../dataconfig_xy.json'
                    if el_cs:
                        bashcommand += ' --elastic_cross_section ' + str(el_cs)
                else:
                    bashcommand = 'python createMultipleLmdData.py '\
                        + '--dir_pattern ' + data_keywords[0] + ' '\
                        + str(lab_momentum) + ' '\
                        + sim_type + ' ' + dir_path + ' ../dataconfig_xy.json'
                    print(bashcommand)
                returnvalue = subprocess.call(bashcommand.split())
                last_state = last_state + 1

            if status_code == 0:
                print('skipping bunching and data object creation...')
                state = 3
                last_state = 2
            elif status_code > 0:
                print('still waiting for himster simulation jobs for ' +
                      sim_type + ' data to complete...')
            else:
                # ok something went wrong there, exit this scenario and
                # push on bad scenario stack
                print("ERROR: Something went wrong with the cluster jobs! "
                      "This scenario will be pushed onto the dead stack, "
                      "and is no longer processed.")
                last_state = -1

        # 3. merge data
        if state == 3:
            # check first if merged data already exists and skip it!
            temp_dir_searcher = general.DirectorySearcher(merge_keywords)
            temp_dir_searcher.searchListOfDirectories(dir_path, data_pattern)
            found_dirs = temp_dir_searcher.getListOfDirectories()
            if not found_dirs:
                os.chdir(lmd_fit_script_path)
                # merge data
                if 'a' in sim_type:
                    bashcommand = 'python mergeMultipleLmdData.py'\
                        + ' --dir_pattern ' + data_keywords[0]\
                        + ' --num_samples ' + str(bootstrapped_num_samples)\
                        + ' ' + sim_type + ' ' + dir_path
                else:
                    bashcommand = 'python mergeMultipleLmdData.py'\
                        + ' --dir_pattern '+data_keywords[0]\
                        + ' ' + sim_type + ' ' + dir_path
                returnvalue = subprocess.call(bashcommand.split())
            state = 4

        simulation_task[2] = state
        simulation_task[3] = last_state

        if simulation_task[3] == -1:
            scenario.is_broken = True
            break
        if simulation_task[2] == 4:
            tasks_to_remove.append(simulation_task)
            print("Task is finished and will be removed from list!")

    for x in tasks_to_remove:
        del scenario.simulation_info_lists[
            scenario.simulation_info_lists.index(x)]
    return scenario
Exemplo n.º 5
0
def lumiDetermination(scen):
    dir_path = scen.dir_path

    state = scen.state
    last_state = scen.last_state

    # open file
    if os.path.exists(dir_path + "/../../elastic_cross_section.txt"):
        print("Found an elastic cross section file!")
        with open(dir_path + "/../../elastic_cross_section.txt") as f:
            content = f.readlines()
            scen.elastic_pbarp_integrated_cross_secion_in_mb = float(
                content[0])
            f.close()
    else:
        print(f'ERROR! Can not find elastic cross section file! The determined Luminosity will be wrong!\n')
        sys.exit()

    print('processing scenario ' + dir_path + ' at step ' + str(state))

    # TODO: Not sure if later on the lab momentum has to be extracted from the data
    m = re.search('(\d*?\.\d*?)GeV', dir_path)
    momentum = float(m.group(1))
    scen.momentum = momentum

    with open(scen.dir_path + '/reco_params.config', 'r') as json_file:
        rec_par = json.load(json_file)
        scen.alignment_parameters = alignment.getAlignmentParameters(rec_par)

    finished = False
    # 1. create vertex data (that means bunch data, create data objects and merge)
    if state == 1:
        if len(scen.simulation_info_lists) == 0:
            scen.simulation_info_lists.append([dir_path, 'v', 1, 0])

        scen = simulateDataOnHimster(scen)
        if scen.is_broken:
            dead_scenario_stack.append(scen)
            return
        if len(scen.simulation_info_lists) == 0:
            state += 1
            last_state += 1

    if state == 2:
        # check if ip was already determined
        if scen.use_ip_determination:
            temp_dir_searcher = general.DirectorySearcher(
                ['merge_data', 'binning_300'])
            temp_dir_searcher.searchListOfDirectories(dir_path, 'reco_ip.json')
            found_dirs = temp_dir_searcher.getListOfDirectories()
            if not found_dirs:
                # 2. determine offset on the vertex data sample
                os.chdir(lmd_fit_bin_path)
                temp_dir_searcher = general.DirectorySearcher(
                    ['merge_data', 'binning_300'])
                temp_dir_searcher.searchListOfDirectories(
                    dir_path, ['lmd_vertex_data_', 'of1.root'])
                found_dirs = temp_dir_searcher.getListOfDirectories()
                bashcommand = './determineBeamOffset -p ' + \
                    found_dirs[0] + ' -c ' + '../../vertex_fitconfig.json'
                returnvalue = subprocess.call(bashcommand.split())
                ip_rec_file = found_dirs[0] + '/reco_ip.json'
            else:
                ip_rec_file = found_dirs[0] + '/reco_ip.json'

            file_content = open(ip_rec_file)
            ip_rec_data = json.load(file_content)

            scen.rec_ip_info['ip_offset_x'] = float('{0:.3f}'.format(
                round(float(ip_rec_data["ip_x"]), 3)))  # in cm
            scen.rec_ip_info['ip_offset_y'] = float(
                '{0:.3f}'.format(round(float(ip_rec_data["ip_y"]), 3)))
            scen.rec_ip_info['ip_offset_z'] = float(
                '{0:.3f}'.format(round(float(ip_rec_data["ip_z"]), 3)))

            print("Finished IP determination for this scenario!")
        else:
            scen.rec_ip_info['ip_offset_x'] = 0.0
            scen.rec_ip_info['ip_offset_y'] = 0.0
            scen.rec_ip_info['ip_offset_z'] = 0.0
            print("Skipped IP determination for this scenario!")

        state += 1
        last_state += 1

    if state == 3:
        # 3a. track filter the dpm data using the ip values and create ang
        # dist objects
        # (that again means bunch -> create -> merge)
        # 3b. generate acceptance and resolution with these reconstructed ip
        # values
        # (that means simulation + bunching + creating data objects + merging)
        if len(scen.simulation_info_lists) == 0:
            scen.simulation_info_lists.append(['', 'a', 1, 0])
            scen.simulation_info_lists.append(['', 'er', 1, 0])

        scen = simulateDataOnHimster(scen)
        if scen.is_broken:
            dead_scenario_stack.append(scen)
            return

        if len(scen.simulation_info_lists) == 0:
            state += 1
            last_state += 1

    if state == 4:
        # 4. runLmdFit!
        temp_dir_searcher = general.DirectorySearcher(
            ['merge_data', 'binning_300'])
        temp_dir_searcher.searchListOfDirectories(
            scen.filtered_dir_path, 'lmd_fitted_data')
        found_dirs = temp_dir_searcher.getListOfDirectories()
        if not found_dirs:
            os.chdir(lmd_fit_script_path)
            print('running lmdfit!')
            cut_keyword = ''
            if scen.use_xy_cut:
                cut_keyword += 'xy_'
            if scen.use_m_cut:
                cut_keyword += 'm_'
            if cut_keyword == '':
                cut_keyword += 'un'
            cut_keyword += 'cut_real '
            bashcommand = 'python doMultipleLuminosityFits.py '\
                '--forced_box_gen_data ' + scen.acc_and_res_dir_path + \
                ' ' + scen.filtered_dir_path + ' ' + cut_keyword + \
                lmd_fit_path + '/' + args.fit_config
            returnvalue = subprocess.call(bashcommand.split())

        print('this scenario is fully processed!!!')
        finished = True

    # if we are in an intermediate step then push on the waiting stack and
    # increase step state
    if not finished:
        scen.state = state
        scen.last_state = last_state
        waiting_scenario_stack.append(scen)
Exemplo n.º 6
0
parser.add_argument('--use_devel_queue', action='store_true',
                    help='If flag is set, the devel queue is used')

args = parser.parse_args()

lmd_fit_script_path = os.path.dirname(os.path.realpath(__file__))
lmd_fit_path = os.path.dirname(lmd_fit_script_path)
lmd_fit_bin_path = os.getenv('LMDFIT_BUILD_PATH') + '/bin'

num_samples = args.num_samples
bootstrapped_num_samples = args.bootstrapped_num_samples
box_num_samples = args.box_num_samples
box_num_events_per_sample = args.box_num_events_per_sample

# first lets try to find all directories and their status/step
dir_searcher = general.DirectorySearcher(['dpm_elastic', 'uncut'])

dir_searcher.searchListOfDirectories(
    args.base_output_data_dir, 'Lumi_TrksQA_')
dirs = dir_searcher.getListOfDirectories()

print(dirs)

# at first assign each scenario the first step and push on the active stack
for dir in dirs:
    scen = Scenario(dir)
    print("creating scenario:", dir)
    if args.disable_xy_cut or ("/no_alignment_correction" in dir and "no_geo_misalignment/" not in dir):
        print("Disabling xy cut!")
        scen.use_xy_cut = False  # for testing purposes
    if args.disable_m_cut or ("/no_alignment_correction" in dir and "no_geo_misalignment/" not in dir):
Exemplo n.º 7
0
if args.type[0].find('r') >= 0:
    data_type_list.append(
        DataTypeInfo('r', ' -f lmd_res_data_\\d*.root',
                     ['lmd_res_data_', '.root']))
if args.type[0].find('h') >= 0:
    data_type_list.append(
        DataTypeInfo('h', ' -f lmd_res_data_\\d*.root',
                     ['lmd_res_data_', '.root']))
if args.type[0].find('v') >= 0:
    data_type_list.append(
        DataTypeInfo('v', ' -f lmd_vertex_data_\\d*.root',
                     ['lmd_vertex_data_', '.root']))

patterns = []
patterns.append(args.dir_pattern)
# to avoid merge_data directories to be used recursively
# we forbid the occurence merge_data in path name
dir_searcher = general.DirectorySearcher(patterns, 'merge_data')

for data_type_info in data_type_list:
    dir_searcher.searchListOfDirectories(args.dirname[0],
                                         data_type_info.glob_pattern)
    dirs = dir_searcher.getListOfDirectories()
    for dir in dirs:
        print('starting merge for ' + dir)
        bashcommand = default = os.getenv('LMDFIT_BUILD_PATH') + '/bin/mergeLmdData -p ' + dir + ' -t ' + data_type_info.data_type + \
            ' -n ' + str(args.num_samples) + ' -s ' + \
            str(args.sample_size) + data_type_info.pattern
        print(bashcommand)
        returnvalue = subprocess.call(bashcommand.split())