示例#1
0
def write_dsm():
    """
    Writes the DSM, from the ply files given by each tile.
    """
    dsm_pieces = os.path.join(cfg['out_dir'], 'dsm/dsm_*')
    final_dsm = os.path.join(cfg['out_dir'], 'dsm.vrt')
    common.run("gdalbuildvrt %s %s" % (final_dsm, dsm_pieces))
示例#2
0
def write_dsm():
    """
    Writes the DSM, from the ply files given by each tile.
    """
    dsm_pieces = os.path.join(cfg['out_dir'], 'dsm/dsm_*')
    final_dsm = os.path.join(cfg['out_dir'], 'dsm.vrt')
    common.run("gdalbuildvrt %s %s" % (final_dsm, dsm_pieces))
示例#3
0
def write_dsm(tiles_full_info, n=5):
    """
    Writes the DSM, from the ply files given by each tile.

    Args :
         tiles_full_info: a list of tile_info dictionaries
    """
    clouds_dir = os.path.join(cfg['out_dir'], 'clouds')
    if (os.path.exists(clouds_dir)):
        shutil.rmtree(clouds_dir)
    os.mkdir(clouds_dir)

    for tile_info in tiles_full_info:
        tile_dir = tile_info['directory']
        x, y, w, h = tile_info['coordinates']
        cloud = os.path.join(os.path.abspath(tile_dir), 'cloud.ply')
        cloud_link_name = os.path.join(
            clouds_dir, 'cloud_%d_%d_row_%d_col_%d.ply' % (w, h, y, x))
        if (os.path.exists(cloud)):
            common.run('ln -s %s %s' % (cloud, cloud_link_name))
    out_dsm = os.path.join(cfg['out_dir'], 'dsm.tif')

    common.run(
        "ls %s | plyflatten %f %s" %
        (os.path.join(clouds_dir, 'cloud*'), cfg['dsm_resolution'], out_dsm))
示例#4
0
文件: s2p.py 项目: jmichel-otb/s2p
def generate_dsm(out, point_clouds_list, resolution):
    """
    Args:
        out: output geotiff file
        point_clouds_list: list of ply files
        resolution: in meters per pixel

    The point clouds are supposed to contain points in the same UTM zones.
    """
    if point_clouds_list:
        files = ' '.join(point_clouds_list)
        common.run("ls %s | plyflatten %f %s" % (files, resolution, out))
示例#5
0
def generate_cloud(img_name, exp_name, x, y, w, h, height_map,
    reference_image_id=1):
    """
    Args:
        img_name: name of the dataset, located in the 'pleiades_data/images'
            directory
        exp_name: string used to identify the experiment
        x, y, w, h: four integers defining the rectangular ROI in the original
            panchro image. (x, y) is the top-left corner, and (w, h) are the
            dimensions of the rectangle.
        height_map: path to the height_map, produced by the process_pair of
            process_triplet function
        reference_image_id: id (1, 2 or 3) of the image used as the reference
            image. The height map has been resampled on its grid.
    """


    rpc = 'data/%s/%04d.png.P' % (img_name, reference_image_id)
    im = 'data/%s/%04d.png' % (img_name, reference_image_id)
    im_color = 'data/%s/%04d.png' % (img_name, reference_image_id)   
    crop   = '/tmp/%s_roi_ref%02d.tif' % (exp_name, reference_image_id)
    crop_color = '/tmp/%s_roi_color_ref%02d.tif' % (exp_name, reference_image_id)
    cloud   = '/tmp/%s_cloud.ply'  % (exp_name)



    # read the zoom value
    zoom = global_params.subsampling_factor

    # colorize, then generate point cloud
    tmp_crop = common.image_crop_TIFF(im, x, y, w, h)
    tmp_crop = common.image_safe_zoom_fft(tmp_crop, zoom)
    common.run('cp %s %s' % (tmp_crop, crop))
    A = common.matrix_translation(-x, -y)
    f = 1.0/zoom
    Z = np.diag([f, f, 1])
    A = np.dot(Z, A)
    trans = common.tmpfile('.txt')
    np.savetxt(trans, A)
    
#    compute_point_cloud(common.image_qauto(crop),
#            height_map, rpc, trans, cloud)
    sz = common.image_size(crop)
    compute_point_cloud(common.image_qauto(crop),
          common.image_crop(height_map,0,0,sz[0],sz[1]) , rpc, trans, cloud)

    # cleanup
    while common.garbage:
        common.run('rm ' + common.garbage.pop())

    print "v %s %s %s" % (crop, crop_color, height_map)
    print "meshlab %s" % (cloud)
示例#6
0
文件: s2p.py 项目: cpalmann/s2p
def compute_dsm(args):
    """
    Compute the DSMs

    Args:
         - args  ( <==> [config_file,number_of_tiles,current_tile])
    """
    list_of_tiles_dir = os.path.join(cfg['out_dir'],'list_of_tiles.txt')

    config_file,number_of_tiles,current_tile = args

    dsm_dir = os.path.join(cfg['out_dir'],'dsm')
    out_dsm = os.path.join(dsm_dir,'dsm_%d.tif' % (current_tile) )

    extremaxy = np.loadtxt(os.path.join(cfg['out_dir'], 'global_extent.txt'))

    global_xmin,global_xmax,global_ymin,global_ymax = extremaxy

    global_y_diff = global_ymax-global_ymin
    tile_y_size = (global_y_diff)/(number_of_tiles)

    # horizontal cuts
    ymin = global_ymin + current_tile*tile_y_size
    ymax = ymin + tile_y_size

    # cutting info
    x, y, w, h, z, ov, tw, th, nb_pairs = initialization.cutting(config_file)
    range_y = np.arange(y, y + h - ov, th - ov)
    range_x = np.arange(x, x + w - ov, tw - ov)
    colmin, rowmin, tw, th = common.round_roi_to_nearest_multiple(z, range_x[0], range_y[0], tw, th)
    colmax, rowmax, tw, th = common.round_roi_to_nearest_multiple(z, range_x[-1], range_y[-1], tw, th)
    cutsinf = '%d %d %d %d %d %d %d %d' % (rowmin, th - ov, rowmax, colmin, tw - ov, colmax, tw, th)

    flags = {}
    flags['average-orig'] = 0
    flags['average'] = 1
    flags['variance'] = 2
    flags['min'] = 3
    flags['max'] = 4
    flags['median'] = 5
    flag = "-flag %d" % (flags.get(cfg['dsm_option'], 0))

    if (ymax <= global_ymax):
        common.run("plytodsm %s %f %s %f %f %f %f %s %s" % (flag,
                                                            cfg['dsm_resolution'],
                                                            out_dsm,
                                                            global_xmin,
                                                            global_xmax, ymin,
                                                            ymax, cutsinf,
                                                            cfg['out_dir']))
示例#7
0
def compute_point_cloud(crop_colorized, heights, P, H, cloud):
    """
    Computes a color point cloud from a height map.

    Args:
        crop_colorized: path to the colorized rectified crop
        heights: height map. Its size is the same as the crop_color image
        P: path to file containing projection matrix 
        H: path to the file containing the coefficients of the rectifying
            homography
        cloud: path to the output points cloud (ply format)
    """
    common.run("colormesh_projective %s %s %s %s %s" % (crop_colorized, heights, P, H,
        cloud))
    return
示例#8
0
def lidar_preprocessor(output, input_plys):
    """
    Compute a multi-scale representation of a large point cloud.

    The output file can be viewed with LidarPreprocessor. This is useful for
    huge point clouds. The input is a list of ply files.

    Args:
        output: path to the output folder
        input_plys: list of paths to ply files
    """
    tmp = cfg['temporary_dir']
    nthreads = multiprocessing.cpu_count()
    plys = ' '.join(input_plys)
    common.run("LidarPreprocessor -to %s/LidarO -tp %s/LidarP -nt %d %s -o %s" % (
        tmp, tmp, nthreads, plys, output))
示例#9
0
文件: s2p.py 项目: cpalmann/s2p
def process_tile(tile_info):
    """
    Process a tile by merging the height maps computed for each image pair.

    Args:
        tile_info: a dictionary that provides all you need to process a tile
    """
    tile_dir = tile_info['directory']

    # redirect stdout and stderr to log file
    if not cfg['debug']:
        fout = open('%s/stdout.log' % tile_dir, 'a', 0)  # '0' for no buffering
        sys.stdout = fout
        sys.stderr = fout

    try:
        # check that the tile is not masked
        if os.path.isfile(os.path.join(tile_dir, 'this_tile_is_masked.txt')):
            print 'tile %s already masked, skip' % tile_dir
            return

        # process each pair to get a height map
        nb_pairs = tile_info['number_of_pairs']
        for pair_id in range(1, nb_pairs + 1):
            process_tile_pair(tile_info, pair_id)

        # finalization
        height_maps = []
        for i in xrange(nb_pairs):
            if not os.path.isfile(os.path.join(tile_dir, 'pair_%d' % (i+1), 'this_tile_is_masked.txt')):
                height_maps.append(os.path.join(tile_dir, 'pair_%d' % (i+1), 'height_map.tif'))
        process.finalize_tile(tile_info, height_maps, cfg['utm_zone'])

        # ply extrema
        common.run("plyextrema {} {}".format(tile_dir, os.path.join(tile_dir, 'plyextrema.txt')))

    except Exception:
        print("Exception in processing tile:")
        traceback.print_exc()
        raise

    # close logs
    common.garbage_cleanup()
    if not cfg['debug']:
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
        fout.close()
示例#10
0
文件: process.py 项目: yidongVSI/s2p
def merge_height_maps(height_maps,
                      tile_dir,
                      thresh,
                      conservative,
                      k=1,
                      garbage=[]):
    """
    Merges a list of height maps recursively, computed for one tile from N image pairs.

    Args :
         - height_maps : list of height map directories
         - tile_dir : directory of the tile from which to get a merged height map
         - thresh : threshold used for the fusion algorithm, in meters.
         - conservative (optional, default is False): if True, keep only the
            pixels where the two height map agree (fusion algorithm)
         - k : used to identify the current call of merge_height_maps (default = 1, first call)
         - garbage : a list used to remove temp data (default = [], first call)

    """

    # output file
    local_merged_height_map = tile_dir + '/local_merged_height_map.tif'

    if len(height_maps) == 0:
        return

    if os.path.isfile(local_merged_height_map) and cfg['skip_existing']:
        print 'final height map %s already done, skip' % local_merged_height_map
    else:
        list_height_maps = []
        for i in range(len(height_maps) - 1):
            height_map = tile_dir + '/height_map_' + \
                str(i) + '_' + str(i + 1) + '_' + str(k) + '.tif'
            fusion.merge(height_maps[i], height_maps[i + 1], thresh,
                         height_map, conservative)
            list_height_maps.append(height_map)
            garbage.append(height_map)

        if len(list_height_maps) > 1:
            merge_height_maps(list_height_maps, tile_dir, thresh, conservative,
                              k + 1, garbage)
        else:
            common.run('cp %s %s' %
                       (list_height_maps[0], local_merged_height_map))
            for imtemp in garbage:
                common.run('rm -f %s' % imtemp)
示例#11
0
def lidar_preprocessor(output, input_plys):
    """
    Compute a multi-scale representation of a large point cloud.

    The output file can be viewed with LidarPreprocessor. This is useful for
    huge point clouds. The input is a list of ply files.

    Args:
        output: path to the output folder
        input_plys: list of paths to ply files
    """
    tmp = cfg['temporary_dir']
    nthreads = multiprocessing.cpu_count()
    plys = ' '.join(input_plys)
    common.run(
        "LidarPreprocessor -to %s/LidarO -tp %s/LidarP -nt %d %s -o %s" %
        (tmp, tmp, nthreads, plys, output))
示例#12
0
def write_dsm(tiles_full_info, n=5):
    """
    Writes the DSM, from the ply files given by each tile.

    Args :
         tiles_full_info: a list of tile_info dictionaries
    """
    clouds_dir = os.path.join(cfg['out_dir'], 'clouds')
    if (os.path.exists(clouds_dir)):
        shutil.rmtree(clouds_dir)
    os.mkdir(clouds_dir)

    for tile_info in tiles_full_info:
        tile_dir = tile_info['directory']
        x, y, w, h = tile_info['coordinates']
        cloud = os.path.join(os.path.abspath(tile_dir), 'cloud.ply')
        cloud_link_name = os.path.join(clouds_dir,
                                       'cloud_%d_%d_row_%d_col_%d.ply' % (w, h,
                                                                          x, y))
        if (os.path.exists(cloud)):
            common.run('ln -s %s %s' % (cloud, cloud_link_name))
    out_dsm_dir = os.path.join(cfg['out_dir'], 'dsm')
    if (os.path.exists(out_dsm_dir)):
        shutil.rmtree(out_dsm_dir)
    os.mkdir(out_dsm_dir)

    common.run("ls %s | plyflatten %f %i %s" %
               (clouds_dir + '/cloud*', cfg['dsm_resolution'], n, out_dsm_dir))
    common.run("gdalbuildvrt %s %s" %
               (cfg['out_dir'] + '/dsm.vrt', out_dsm_dir + '/dsm*'))
示例#13
0
文件: process.py 项目: jguinet/s2p
def merge_height_maps(height_maps, tile_dir, thresh, conservative, k=1, garbage=[]):
    """
    Merges a list of height maps recursively, computed for one tile from N image pairs.

    Args :
         - height_maps : list of height map directories
         - tile_dir : directory of the tile from which to get a merged height map
         - thresh : threshold used for the fusion algorithm, in meters.
         - conservative (optional, default is False): if True, keep only the
            pixels where the two height map agree (fusion algorithm)
         - k : used to identify the current call of merge_height_maps (default = 1, first call)
         - garbage : a list used to remove temp data (default = [], first call)

    """

    # output file
    local_merged_height_map = tile_dir + '/local_merged_height_map.tif'

    if os.path.isfile(local_merged_height_map) and cfg['skip_existing']:
        print 'final height map %s already done, skip' % local_merged_height_map
    else:
        list_height_maps = []
        for i in range(len(height_maps) - 1):
            height_map = tile_dir + '/height_map_' + \
                str(i) + '_' + str(i + 1) + '_' + str(k) + '.tif'
            fusion.merge(height_maps[i], height_maps[i + 1], thresh, height_map,
                         conservative)
            list_height_maps.append(height_map)
            garbage.append(height_map)

        if len(list_height_maps) > 1:
            merge_height_maps(list_height_maps, tile_dir,
                              thresh, conservative, k + 1, garbage)
        else:
            common.run('cp %s %s' %
                       (list_height_maps[0], local_merged_height_map))
            for imtemp in garbage:
                common.run('rm -f %s' % imtemp)
示例#14
0
def main(img_name=None, exp_name=None, x=None, y=None, w=None, h=None,
    reference_image_id=1, secondary_image_id=2):


    # input files
    im1 = 'data/%s/%04d.png' % (img_name, reference_image_id)
    im2 = 'data/%s/%04d.png' % (img_name, secondary_image_id)
    rpc1 = 'data/%s/%04d.png.P' % (img_name, reference_image_id)
    rpc2 = 'data/%s/%04d.png.P' % (img_name, secondary_image_id)
    im1_color = 'data/%s/%04d.png' % (img_name, reference_image_id)   
    prev1 = 'data/%s/%04d.png' % (img_name, reference_image_id)  ### GF: all the same image 
    pointing = 'data/%s/pointing_correction_%02d_%02d.txt' % (img_name, reference_image_id, secondary_image_id)

    # output files
    rect1 = '/tmp/%s%d.tif' % (exp_name, reference_image_id)
    rect2 = '/tmp/%s%d.tif' % (exp_name, secondary_image_id)
    hom1  = '/tmp/%s_hom%d.txt' % (exp_name, reference_image_id)
    hom2  = '/tmp/%s_hom%d.txt' % (exp_name, secondary_image_id)
    outrpc1 = '/tmp/%s_rpc%d.xml' % (exp_name, reference_image_id)
    outrpc2 = '/tmp/%s_rpc%d.xml' % (exp_name, secondary_image_id)
    crop1_color = '/tmp/%s%d_color.tif' % (exp_name, reference_image_id)
    disp    = '/tmp/%s_disp.pgm'   % (exp_name)
    mask    = '/tmp/%s_mask.png'   % (exp_name)
    cloud   = '/tmp/%s_cloud.ply'  % (exp_name)
    height  = '/tmp/%s_height.tif' % (exp_name)
    rpc_err = '/tmp/%s_rpc_err.tif'% (exp_name)
    height_unrect  = '/tmp/%s_height_unrect.tif' % (exp_name)
    mask_unrect    = '/tmp/%s_mask_unrect.png'   % (exp_name)
    subsampling_file = '/tmp/%s_subsampling.txt' % (exp_name)


    """
    Launches the s2p stereo pipeline on a pair of Pleiades images
    """
    ## 0. select ROI
    try:
        print "ROI x, y, w, h = %d, %d, %d, %d" % (x, y, w, h)
    except (NameError,TypeError):
        x,y = 0,0
        w,h = common.image_size(im1)
    #    x, y, w, h = common.get_roi_coordinates(rpc1, prev1)    ### GF: ROI IS THE WHOLE IMAGE
        print "ROI x, y, w, h = %d, %d, %d, %d" % (x, y, w, h)

    ## 0.5 copy the rpcs to the output directory, and save the subsampling factor
    from shutil import copyfile
    copyfile(rpc1, outrpc1)
    copyfile(rpc2, outrpc2)
    np.savetxt(subsampling_file, np.array([global_params.subsampling_factor]))

    # ATTENTION if subsampling_factor is set the rectified images will be
    # smaller, and the homography matrices and disparity range will reflect
    # this fact

    ## 1. rectification
    # If the pointing correction matrix is available, then use it. If not
    # proceed without correction

    H1, H2, disp_min, disp_max = rectification.rectify_pair(im1, im2, 
        rpc1,rpc2, x, y, w, h, rect1, rect2)


    # save homographies to tmp files
    np.savetxt(hom1, H1)
    np.savetxt(hom2, H2)

    ## 2. block-matching
#    block_matching.compute_disparity_map(rect1, rect2, disp, mask,
#        'hirschmuller08', disp_min, disp_max, extra_params='3')
    block_matching.compute_disparity_map(rect1, rect2, disp, mask,
        global_params.matching_algorithm, disp_min, disp_max)


    ## 3. triangulation FOR PROJECTIVE MATRICES DLT algorithm Hartley chapter 12.2 or 12.5
#    from python import disp_to_h_projective as triangulate_proj
#    triangulate_proj.compute_height_map(rpc1,rpc2,hom1,hom2,disp,mask, height, rpc_err)
    common.run("disp_to_h_projective %s %s %s %s %s %s %s %s" % (rpc1, rpc2, hom1, hom2,
        disp, mask, height, rpc_err))

    try:
        zoom = global_params.subsampling_factor
    except NameError:
        zoom = 1
    ref_crop = common.image_crop_TIFF(im1, x, y, w, h)
    triangulation.transfer_map(height, ref_crop, H1, x, y, zoom, height_unrect)
    triangulation.transfer_map(mask, ref_crop, H1, x, y, zoom, mask_unrect)


    ## 4. colorize and generate point cloud
    print (img_name, exp_name, x, y, w, h, height_unrect)
    generate_cloud(img_name, exp_name, x, y, w, h, height_unrect, reference_image_id)



    ### cleanup
    while common.garbage:
        common.run('rm ' + common.garbage.pop())
示例#15
0
文件: process.py 项目: cpalmann/s2p
def finalize_tile(tile_info, height_maps, utm_zone=None):
    """
    Finalize the processing of a tile.

    Merge the height maps from the N pairs, remove overlapping areas, get the
    colors from a XS image and use it to color and generate a ply file
    (colorization is not mandatory)

    Args:
        tile_info: a dictionary that provides all you need to process a tile
        height_maps: list of the height maps generated from N pairs
    """
    # get info
    tile_dir = tile_info["directory"]
    nb_pairs = tile_info["number_of_pairs"]
    x, y, w, h = tile_info["coordinates"]
    ov = tile_info["overlap"]
    pos = tile_info["position_type"]
    img1, rpc1 = cfg["images"][0]["img"], cfg["images"][0]["rpc"]

    # merge the n height maps
    local_merged_height_map = os.path.join(tile_dir, "local_merged_height_map.tif")
    if len(height_maps) > 1:
        merge_height_maps(height_maps, tile_dir, cfg["fusion_thresh"], cfg["fusion_conservative"], 1, [])
    elif len(height_maps) == 0:
        return
    else:
        common.run("cp %s %s" % (height_maps[0], local_merged_height_map))

    # remove overlapping areas
    # By tile
    local_merged_height_map = tile_dir + "/local_merged_height_map.tif"
    local_merged_height_map_crop = tile_dir + "/local_merged_height_map_crop.tif"
    crop_ref = tile_dir + "/roi_ref.tif"
    crop_ref_crop = tile_dir + "/roi_ref_crop.tif"

    dicoPos = {}
    dicoPos["M"] = [ov / 2, ov / 2, -ov, -ov]
    dicoPos["L"] = [0, ov / 2, -ov / 2, -ov]
    dicoPos["R"] = [ov / 2, ov / 2, -ov / 2, -ov]
    dicoPos["U"] = [ov / 2, 0, -ov, -ov / 2]
    dicoPos["B"] = [ov / 2, ov / 2, -ov, -ov / 2]
    dicoPos["UL"] = [0, 0, -ov / 2, -ov / 2]
    dicoPos["UR"] = [ov / 2, 0, -ov / 2, -ov / 2]
    dicoPos["BR"] = [ov / 2, ov / 2, -ov / 2, -ov / 2]
    dicoPos["BL"] = [0, ov / 2, -ov / 2, -ov / 2]
    dicoPos["Single"] = [0, 0, 0, 0]

    z = cfg["subsampling_factor"]
    newcol, newrow, difftw, diffth = np.array(dicoPos[pos]) / z
    x = x / z + newcol
    y = y / z + newrow
    w = w / z + difftw
    h = h / z + diffth
    tile_info["coordinates"] = (x, y, w, h)

    # z=1 beacause local_merged_height_map, crop_ref (and so forth) have
    # already been zoomed. So don't zoom again to crop these images.
    if not (os.path.isfile(local_merged_height_map_crop) and cfg["skip_existing"]):
        common.cropImage(local_merged_height_map, local_merged_height_map_crop, newcol, newrow, w, h)
    if not (os.path.isfile(crop_ref_crop) and cfg["skip_existing"]):
        common.cropImage(crop_ref, crop_ref_crop, newcol, newrow, w, h)

    # by pair
    for i in range(1, nb_pairs + 1):
        single_height_map = os.path.join(tile_dir, "pair_%d/height_map.tif" % i)
        single_height_map_crop = os.path.join(tile_dir, "pair_%d/height_map_crop.tif" % i)
        single_rpc_err = os.path.join(tile_dir, "pair_%d/rpc_err.tif" % i)
        single_rpc_err_crop = os.path.join(tile_dir, "pair_%d/rpc_err_crop.tif" % i)
        if not (os.path.isfile(single_height_map_crop) and cfg["skip_existing"]):
            common.cropImage(single_height_map, single_height_map_crop, newcol, newrow, w, h)
        if not (os.path.isfile(single_rpc_err_crop) and cfg["skip_existing"]):
            common.cropImage(single_rpc_err, single_rpc_err_crop, newcol, newrow, w, h)
    # colors
    color_crop_ref(tile_info, cfg["images"][0]["clr"])

    # generate cloud
    generate_cloud(tile_info, cfg["offset_ply"], utm_zone)
示例#16
0
文件: process.py 项目: tangwudu/s2p
def finalize_tile(tile_info, height_maps):
    """
    Finalize the processing of a tile.

    Merge the height maps from the N pairs, remove overlapping areas, get the
    colors from a XS image and use it to color and generate a ply file
    (colorization is not mandatory)

    Args:
        tile_info: a dictionary that provides all you need to process a tile
        height_maps: list of the height maps generated from N pairs
    """
    # get info
    tile_dir = tile_info['directory']
    nb_pairs = tile_info['number_of_pairs']
    x, y, w, h = tile_info['coordinates']
    ov = tile_info['overlap']
    pos = tile_info['position_type']
    img1, rpc1 = cfg['images'][0]['img'], cfg['images'][0]['rpc']

    # merge the n height maps
    local_merged_height_map = os.path.join(tile_dir,
                                           'local_merged_height_map.tif')
    if len(height_maps) > 1:
        merge_height_maps(height_maps, tile_dir, cfg['fusion_thresh'],
                          cfg['fusion_conservative'], 1, [])
    else:
        common.run('cp %s %s' % (height_maps[0], local_merged_height_map))

    # remove overlapping areas
    # By tile
    local_merged_height_map = tile_dir + '/local_merged_height_map.tif'
    local_merged_height_map_crop = tile_dir + '/local_merged_height_map_crop.tif'
    crop_ref = tile_dir + '/roi_ref.tif'
    crop_ref_crop = tile_dir + '/roi_ref_crop.tif'

    dicoPos = {}
    dicoPos['M'] = [ov / 2, ov / 2, -ov, -ov]
    dicoPos['L'] = [0, ov / 2, -ov / 2, -ov]
    dicoPos['R'] = [ov / 2, ov / 2, -ov / 2, -ov]
    dicoPos['U'] = [ov / 2, 0, -ov, -ov / 2]
    dicoPos['B'] = [ov / 2, ov / 2, -ov, -ov / 2]
    dicoPos['UL'] = [0, 0, -ov / 2, -ov / 2]
    dicoPos['UR'] = [ov / 2, 0, -ov / 2, -ov / 2]
    dicoPos['BR'] = [ov / 2, ov / 2, -ov / 2, -ov / 2]
    dicoPos['BL'] = [0, ov / 2, -ov / 2, -ov / 2]
    dicoPos['Single'] = [0, 0, 0, 0]

    z = cfg['subsampling_factor']
    newcol, newrow, difftw, diffth = np.array(dicoPos[pos]) / z
    x = x / z + newcol
    y = y / z + newrow
    w = w / z + difftw
    h = h / z + diffth

    # z=1 beacause local_merged_height_map, crop_ref (and so forth) have
    # already been zoomed. So don't zoom again to crop these images.
    common.cropImage(local_merged_height_map, local_merged_height_map_crop,
                     newcol, newrow, w, h)
    common.cropImage(crop_ref, crop_ref_crop, newcol, newrow, w, h)

    # by pair
    for i in range(1, nb_pairs + 1):
        single_height_map = os.path.join(tile_dir,
                                         'pair_%d/height_map.tif' % i)
        single_height_map_crop = os.path.join(
            tile_dir, 'pair_%d/height_map_crop.tif' % i)
        single_rpc_err = os.path.join(tile_dir, 'pair_%d/rpc_err.tif' % i)
        single_rpc_err_crop = os.path.join(tile_dir,
                                           'pair_%d/rpc_err_crop.tif' % i)
        common.cropImage(single_height_map, single_height_map_crop, newcol,
                         newrow, w, h)
        common.cropImage(single_rpc_err, single_rpc_err_crop, newcol, newrow,
                         w, h)
    # colors
    color_crop_ref(tile_info, cfg['images'][0]['clr'])

    # generate cloud
    generate_cloud(tile_info, cfg['offset_ply'])
示例#17
0
文件: process.py 项目: jguinet/s2p
def finalize_tile(tile_info, height_maps):
    """
    Finalize the processing of a tile.

    Merge the height maps from the N pairs, remove overlapping areas, get the
    colors from a XS image and use it to color and generate a ply file
    (colorization is not mandatory)

    Args:
        tile_info: a dictionary that provides all you need to process a tile
        height_maps: list of the height maps generated from N pairs
    """
    # get info
    tile_dir = tile_info['directory']
    nb_pairs = tile_info['number_of_pairs']
    x, y, w, h = tile_info['coordinates']
    ov = tile_info['overlap']
    pos = tile_info['position_type']
    img1, rpc1 = cfg['images'][0]['img'], cfg['images'][0]['rpc']

    # merge the n height maps
    local_merged_height_map = os.path.join(tile_dir,
                                           'local_merged_height_map.tif')
    if len(height_maps) > 1:
        merge_height_maps(height_maps, tile_dir, cfg['fusion_thresh'],
                          cfg['fusion_conservative'], 1, [])
    else:
        common.run('cp %s %s' % (height_maps[0], local_merged_height_map))

    # remove overlapping areas
    # By tile
    local_merged_height_map = tile_dir + '/local_merged_height_map.tif'
    local_merged_height_map_crop = tile_dir + '/local_merged_height_map_crop.tif'
    crop_ref = tile_dir + '/roi_ref.tif'
    crop_ref_crop = tile_dir + '/roi_ref_crop.tif'

    dicoPos = {}
    dicoPos['M'] = [ov / 2, ov / 2, -ov, -ov]
    dicoPos['L'] = [0, ov / 2, -ov / 2, -ov]
    dicoPos['R'] = [ov / 2, ov / 2, -ov / 2, -ov]
    dicoPos['U'] = [ov / 2, 0, -ov, -ov / 2]
    dicoPos['B'] = [ov / 2, ov / 2, -ov, -ov / 2]
    dicoPos['UL'] = [0, 0, -ov / 2, -ov / 2]
    dicoPos['UR'] = [ov / 2, 0, -ov / 2, -ov / 2]
    dicoPos['BR'] = [ov / 2, ov / 2, -ov / 2, -ov / 2]
    dicoPos['BL'] = [0, ov / 2, -ov / 2, -ov / 2]
    dicoPos['Single'] = [0, 0, 0, 0]

    z = cfg['subsampling_factor']
    newcol, newrow, difftw, diffth = np.array(dicoPos[pos]) / z
    x = x / z + newcol
    y = y / z + newrow
    w = w / z + difftw
    h = h / z + diffth
    tile_info['coordinates'] = (x, y, w, h)
    
    # z=1 beacause local_merged_height_map, crop_ref (and so forth) have
    # already been zoomed. So don't zoom again to crop these images.
    common.cropImage(local_merged_height_map, local_merged_height_map_crop,
                     newcol, newrow, w, h)
    common.cropImage(crop_ref, crop_ref_crop, newcol, newrow, w, h)

    # by pair
    for i in range(1, nb_pairs + 1):
        single_height_map = os.path.join(tile_dir, 'pair_%d/height_map.tif' % i)
        single_height_map_crop = os.path.join(tile_dir, 'pair_%d/height_map_crop.tif' % i)
        single_rpc_err = os.path.join(tile_dir, 'pair_%d/rpc_err.tif' % i)
        single_rpc_err_crop = os.path.join(tile_dir, 'pair_%d/rpc_err_crop.tif' % i)
        common.cropImage(single_height_map, single_height_map_crop, newcol,
                         newrow, w, h)
        common.cropImage(single_rpc_err, single_rpc_err_crop, newcol, newrow, w, h)
    # colors
    color_crop_ref(tile_info, cfg['images'][0]['clr'])

    # generate cloud
    generate_cloud(tile_info, cfg['offset_ply'])