Beispiel #1
0
def scale(observations_hdlr_ref, observations_hdlr, observations_hdlr_set,
          iparams):
    #scale two observations and return the merged observations and the
    #updated observations set.
    txt_scale = ''
    postrefine_hdlr = postrefine_handler(iparams)
    observations_hdlr, txt_out = postrefine_hdlr.scale_with_reference(
        observations_hdlr_ref,
        observations_hdlr=observations_hdlr,
        use_binning=True)
    txt_scale += txt_out

    if observations_hdlr is not None:
        merge_hdlr = merge_handler(iparams)
        observations_hdlr_merge, txt_out = merge_hdlr.merge_observations_handler(
            observations_hdlr_ref, observations_hdlr)
        txt_scale += '\n' + txt_out

        for observations_hdlr_in_set in observations_hdlr_set:
            observations_hdlr_in_set.set_params(
                postref_params=observations_hdlr.postref_params)
            observations_hdlr_in_set.refresh()
    else:
        observations_hdlr_merge = observations_hdlr_ref
        observations_hdlr_set = None

    print txt_scale
    return observations_hdlr_merge, observations_hdlr_set, txt_scale + '\n'
Beispiel #2
0
def scale_cluster_mproc(cluster_no, cluster_paths, iparams):
    frame_files = read_pickles([cluster_paths[cluster_no]])

    #scale and merge
    observations_hdlr_set = []
    txt_scale = ' SCALE CLUSTER %3.0f\n' % (cluster_no + 1)
    postrefine_hdlr = postrefine_handler(iparams)
    for i_frame in range(len(frame_files)):
        observations_hdlr_0, txt_out = postrefine_hdlr.scale_0(
            frame_files[i_frame])
        txt_scale += '\n' + txt_out
        if observations_hdlr_0 is not None:
            observations_hdlr_0.refresh()
            observations_hdlr_set.append(observations_hdlr_0)

    merge_hdlr = merge_handler(iparams)
    observations_hdlr_ref, observations_hdlr_selected_set, txt_out = merge_hdlr.merge_multi_observations_handler(
        observations_hdlr_set, flag_show_summary=False)
    txt_scale += '\n' + txt_out
    if observations_hdlr_ref is None:
        print txt_scale
        return None, None, txt_scale + '\n'

    print txt_scale
    return observations_hdlr_ref, observations_hdlr_selected_set, txt_scale + '\n'
Beispiel #3
0
def postrefine_mproc(obs_hdlr_no, observations_hdlr_ref, observations_hdlr_set,
                     iparams):
    observations_hdlr = observations_hdlr_set[obs_hdlr_no]
    postrefine_hdlr = postrefine_handler(iparams)
    observations_hdlr_out, txt_out = postrefine_hdlr.postrefine_with_reference(
        observations_hdlr_ref, observations_hdlr)
    observations_hdlr_out.refresh()
    print txt_out
    return observations_hdlr_out, txt_out + '\n'
Beispiel #4
0
def scale_observations_mproc(observations_no,
                             observations_hdlr_ref,
                             iparams,
                             observations_hdlr_set=None,
                             observations_file_set=None):
    #scale an observation set to the reference set
    txt_scale = ''
    postrefine_hdlr = postrefine_handler(iparams)
    if observations_hdlr_set is not None:
        observations_hdlr, txt_out = postrefine_hdlr.scale_with_reference(
            observations_hdlr_ref,
            observations_hdlr=observations_hdlr_set[observations_no],
            use_binning=True)
    elif observations_file_set is not None:
        observations_hdlr, txt_out = postrefine_hdlr.scale_with_reference(
            observations_hdlr_ref,
            filename=observations_file_set[observations_no],
            use_binning=True)

    txt_scale += txt_out
    print txt_scale
    return observations_hdlr, txt_scale + '\n'
Beispiel #5
0
            n_conn_list.append(n_conn)
        else:
            conn_images_set.append([])
            n_conn_list.append(0)

    orgh = organize_handler(iparams)
    txt_cluster = orgh.generate_clusters(observations_handler_set,
                                         conn_images_set, n_conn_list)
    print txt_cluster
    txt_step1 += txt_cluster

    #2. scale images in each cluster
    txt_step2 = 'STEP 2: scale images in each cluster\n'
    print txt_step2
    path_to_cluster = iparams.run_no + '/clusters'
    postref_hdlr = postrefine_handler(iparams)
    cluster_paths = []
    for cluster_lst_file in os.listdir(path_to_cluster):
        observations_hdlr_ref = None
        if cluster_lst_file.endswith('.lst'):
            cluster_paths.append(path_to_cluster + '/' + cluster_lst_file)

    def scale_cluster_mproc_wrapper(arg):
        return scale_cluster_mproc(arg, cluster_paths, iparams)

    results = pool_map(iterable=range(len(cluster_paths)),
                       func=scale_cluster_mproc_wrapper,
                       processes=iparams.n_processors)

    observations_hdlr_scaled_set = []
    i_found_good_cluster = 0
Beispiel #6
0
def get_ma_match_dict(image_seq, image_dict, iparams):
    postref_hdlr = postrefine_handler(iparams)
    observations_hdlr, txt_out = postref_hdlr.scale_0(image_dict[image_seq],
                                                      image_seq)
    observations_full_dict = observations_hdlr.get_full_observations_as_dict()
    return observations_full_dict