예제 #1
0
def draw_two_attribute_scatter(shp_file, field1, field2, output, logfile):

    x_values = read_attribute(shp_file, field1)
    y_values = read_attribute(shp_file, field2)
    if field1 == 'INarea':  # m^2 to ha
        x_values = [item / 10000.0 for item in x_values]
    if field2 == 'INarea':  # m^2 to ha
        y_values = [item / 10000.0 for item in y_values]

    xlabel = 'IOU value'
    ylabel = 'null'
    text_loc_detX = 0.02
    text_loc_Y = 20
    if field2 == 'INarea':
        text_loc_Y = 20
        ylabel = 'Area ($ha$)'
    elif field2 == 'adj_count':
        text_loc_Y = 5
        ylabel = 'Count'
    elif field2 == 'INperimete':
        text_loc_Y = 4000
        ylabel = 'Perimeter ($m$)'
    elif field2 == 'circularit':
        text_loc_Y = 0.4
        ylabel = 'Circularity'

    draw_two_list_scatter(x_values, y_values, output, xlabel, ylabel,
                          text_loc_detX, text_loc_Y)

    io_function.move_file_to_dst('processLog.txt',
                                 os.path.join(out_dir, logfile),
                                 overwrite=True)
    io_function.move_file_to_dst(output,
                                 os.path.join(out_dir, output),
                                 overwrite=True)
예제 #2
0
def draw_two_values_hist(shp_file,field_name,raster_file,output,logfile,bin_min,bin_max,bin_width,labels,ylim):

    raster_values = read_oneband_image_to_1dArray(raster_file, nodata=0, ignore_small=bin_min)
    bins = np.arange(bin_min, bin_max, bin_width)

    # update
    global global_bin_size
    global_bin_size = bin_width
    ylim = [ item/(100.0*bin_width) for item in ylim]

    draw_two_list_histogram(shp_file, field_name, raster_values, output, bins=bins,labels=labels,
                            color=['black', 'silver'],ylim=ylim)
    io_function.move_file_to_dst('processLog.txt', os.path.join(out_dir,logfile), overwrite=True)
    io_function.move_file_to_dst(output, os.path.join(out_dir,output), overwrite=True)
예제 #3
0
def draw_one_value_hist(shp_file,field_name,output,logfile,bin_min,bin_max,bin_width,ylim):

    values = read_attribute(shp_file, field_name)
    if field_name == 'INarea':                      # m^2 to ha
        values = [item/10000.0 for item in values]

    xlabelrotation = None
    if 'area' in field_name or 'INperimete' in field_name or 'circularit' in field_name or 'aspectLine' in field_name:
        xlabelrotation = 90

    bins = np.arange(bin_min, bin_max, bin_width)

    # plot histogram of slope values
    # value_list,output,bins=None,labels=None,color=None,hatch=""
    draw_one_list_histogram(values, output,bins=bins,color=['grey'],xlabelrotation=xlabelrotation,ylim=ylim )  # ,hatch='-'
    io_function.move_file_to_dst('processLog.txt', os.path.join(out_dir, logfile), overwrite=True)
    io_function.move_file_to_dst(output, os.path.join(out_dir, output), overwrite=True)
예제 #4
0
def move_align_results(ref_dem, dem_tif, save_dir):

    coreg_save_dir = os.path.join(save_dir, 'dem_coreg')
    if os.path.isdir(coreg_save_dir) is False:
        io_function.mkdir(coreg_save_dir)

    align_outputs = check_align_folder(dem_tif)
    if len(align_outputs) < 9:
        raise ValueError('the output of dem_align.py is less than 9 files')

    dem_align = os.path.join(
        coreg_save_dir,
        os.path.basename(io_function.get_name_by_adding_tail(dem_tif,
                                                             'coreg')))
    # align DEM and a filt version, which one should I use? what filter they apply?
    # visually check one results (Banks east) , a the same location, align DEM and a filt one have exact values,
    # but the filt version have more nodata.  Let's use the filt version.
    # the nodata pixels usually are water pixels, but also some inside the thaw slumps
    align_filt = [
        out for out in align_outputs if out.endswith('align_filt.tif')
    ][0]
    io_function.move_file_to_dst(align_filt, dem_align, overwrite=True)

    # copy reference dem if necessary
    ref_dem_copy = os.path.join(coreg_save_dir, os.path.basename(ref_dem))
    if os.path.isfile(ref_dem_copy) is False:
        io_function.copy_file_to_dst(ref_dem, ref_dem_copy)

    # move the elevation difference?
    ele_diff_folder = os.path.join(save_dir, 'dem_diff_from_demcoreg')
    if os.path.isdir(ele_diff_folder) is False:
        io_function.mkdir(ele_diff_folder)
    dem_diff_filt = [
        out for out in align_outputs if out.endswith('align_diff_filt.tif')
    ][0]
    io_function.movefiletodir(dem_diff_filt, ele_diff_folder, overwrite=True)

    coreg_png_plot_folder = os.path.join(save_dir, 'demcoreg_png_plot')
    if os.path.isdir(coreg_png_plot_folder):
        io_function.mkdir(coreg_png_plot_folder)
    coreg_pngs = [out for out in align_outputs if out.endswith('.png')]
    for png in coreg_pngs:
        io_function.movefiletodir(png, coreg_png_plot_folder, overwrite=True)

    return True
def process_one_dem(idx, count, tif, product_list, arcticDEM_slope_dir,
                    arcticDEM_slope_8bit_dir, arcticDEM_hillshade_dir,
                    arcticDEM_tpi_8bit_dir):
    print('%d/%d convert %s to slope (8bit) and hillshade' %
          (idx + 1, count, tif))

    try:
        slope_file = os.path.basename(
            io_function.get_name_by_adding_tail(tif, 'slope'))
        slope_file_bak = os.path.join(arcticDEM_slope_dir,
                                      os.path.basename(slope_file))
        if 'slope' in product_list or 'slope_8bit' in product_list:
            slope_out = dem_to_slope(tif, slope_file, slope_file_bak)
            if slope_out is False:
                pass
            else:
                if 'slope_8bit' in product_list:
                    slope_8bit = io_function.get_name_by_adding_tail(
                        tif, 'slope8bit')
                    slope_8bit = os.path.join(arcticDEM_slope_8bit_dir,
                                              os.path.basename(slope_8bit))
                    slope_to_8bit(slope_file, slope_8bit)

                # delete or move the slope file
                if 'slope' in product_list:
                    io_function.move_file_to_dst(slope_file, slope_file_bak)
                else:
                    io_function.delete_file_or_dir(slope_file)

        if 'hillshade' in product_list:
            hillshapde = io_function.get_name_by_adding_tail(tif, 'hillshade')
            hillshapde = os.path.join(arcticDEM_hillshade_dir,
                                      os.path.basename(hillshapde))
            dem_to_hillshade(tif, hillshapde)

        if 'tpi' in product_list:
            tip_8bit = io_function.get_name_by_adding_tail(tif, 'TPI8bit')
            tip_8bit = os.path.join(arcticDEM_tpi_8bit_dir,
                                    os.path.basename(tip_8bit))
            dem_to_tpi_save_8bit(tif, tip_8bit)

        return True
    except:
        print('failed in process %s' % tif)
        return tif
예제 #6
0
def crop_resample_label_raster():
    img_path = os.path.join(
        data_dir,
        'rs_imagery/Planet/Brazil_area1_2019Feb07_psscene4band_analytic_sr_udm2/'
        'Brazil_area1_20190207_3B_AnalyticMS_SR_mosaic_8bit_rgb_sub.tif')

    label_path = os.path.join(
        data_dir,
        'LCLUC_MapBiomas_Gabriel/COLECAO_5_DOWNLOADS_COLECOES_ANUAL_2019_merge_prj.tif'
    )

    # crop and resample
    label_sub = resample_crop_raster(img_path, label_path)

    # rename the label raster
    new_label_img = io_function.get_name_by_adding_tail(
        os.path.basename(img_path), 'label')
    io_function.move_file_to_dst(label_sub, new_label_img)
예제 #7
0
def get_sub_image(idx,selected_polygon, image_tile_list, image_tile_bounds, save_path, dstnodata, brectangle ):
    '''
    get a mask image based on a selected polygon, it may cross two image tiles
    :param selected_polygon: selected polygons
    :param image_tile_list: image list
    :param image_tile_bounds: the boxes of images in the list
    :param save_path: save path
    :param brectangle: if brectangle is True, crop the raster using bounds, else, use the polygon
    :return: True is successful, False otherwise
    '''
    img_resx, img_resy = raster_io.get_xres_yres_file(image_tile_list[0])
    # find the images which the center polygon overlap (one or two images)
    img_index = get_overlap_image_index([selected_polygon], image_tile_bounds,min_overlap_area=abs(img_resx*img_resy))
    if len(img_index) < 1:
        basic.outputlogMessage(
            'Warning, %dth polygon do not overlap any image tile, please check ' #and its buffer area
            '(1) the shape file and raster have the same projection'
            ' and (2) this polygon is in the extent of images' % idx)
        return False

    image_list = [image_tile_list[item] for item in img_index]

    # check it cross two or more images
    if len(image_list) == 1:
        # for the case that the polygon only overlap one raster
        with rasterio.open(image_list[0]) as src:
            polygon_json = mapping(selected_polygon)

            # not necessary
            # overlap_win = rasterio.features.geometry_window(src, [polygon_json], pad_x=0, pad_y=0, north_up=True, rotated=False,
            #                               pixel_precision=3)

            if brectangle:
                # polygon_box = selected_polygon.bounds
                polygon_json = mapping(selected_polygon.envelope) #shapely.geometry.Polygon([polygon_box])

            # crop image and saved to disk
            out_image, out_transform = mask(src, [polygon_json], nodata=dstnodata, all_touched=True, crop=True)

            # test: save it to disk
            out_meta = src.meta.copy()
            out_meta.update({"driver": "GTiff",
                             "height": out_image.shape[1],
                             "width": out_image.shape[2],
                             "transform": out_transform,
                             "nodata":dstnodata})  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
            with rasterio.open(save_path, "w", **out_meta) as dest:
                dest.write(out_image)
        pass
    else:
        # for the case it overlap more than one raster, need to produce a mosaic
        tmp_saved_files = []

        for k_img,image_path in enumerate(image_list):
            with rasterio.open(image_path) as src:
                polygon_json = mapping(selected_polygon)
                if brectangle:
                    # polygon_box = selected_polygon.bounds
                    polygon_json = mapping(selected_polygon.envelope)  # shapely.geometry.Polygon([polygon_box])

                # crop image and saved to disk
                out_image, out_transform = mask(src, [polygon_json], nodata=dstnodata, all_touched=True, crop=True)
                non_nodata_loc = np.where(out_image != dstnodata)
                if non_nodata_loc[0].size < 1 or np.std(out_image[non_nodata_loc]) < 0.0001:
                    basic.outputlogMessage('out_image is total black or white, ignore, %s: %d' % (save_path, k_img))
                    continue

                tmp_saved = os.path.splitext(save_path)[0] +'_%d'%k_img + os.path.splitext(save_path)[1]
                # test: save it to disk
                out_meta = src.meta.copy()
                out_meta.update({"driver": "GTiff",
                                 "height": out_image.shape[1],
                                 "width": out_image.shape[2],
                                 "transform": out_transform,
                                 "nodata":dstnodata})  # note that, the saved image have a small offset compared to the original ones (~0.5 pixel)
                with rasterio.open(tmp_saved, "w", **out_meta) as dest:
                    dest.write(out_image)
                tmp_saved_files.append(tmp_saved)
        if len(tmp_saved_files) < 1:
            basic.outputlogMessage('Warning, %dth polygon overlap multiple image tiles, but all are black or white, please check ' % idx)
            return False
        elif len(tmp_saved_files) == 1:
            io_function.move_file_to_dst(tmp_saved_files[0],save_path)
            del tmp_saved_files[0]
        else:
            # mosaic files in tmp_saved_files
            mosaic_args_list = ['gdal_merge.py', '-o', save_path,'-n',str(dstnodata),'-a_nodata',str(dstnodata)]
            mosaic_args_list.extend(tmp_saved_files)
            if basic.exec_command_args_list_one_file(mosaic_args_list,save_path) is False:
                raise IOError('error, obtain a mosaic (%s) failed'%save_path)

        # # for test
        # if idx==13:
        #     raise ValueError('for test')

        # remove the tmp files
        for tmp_file in tmp_saved_files:
            io_function.delete_file_or_dir(tmp_file)

    # if it will output a very large image (10000 by 10000 pixels), then raise a error

    return True
예제 #8
0
def create_moasic_of_each_grid_polygon(id,
                                       polygon,
                                       polygon_latlon,
                                       out_res,
                                       cloud_cover_thr,
                                       geojson_list,
                                       save_dir,
                                       new_prj_wkt=None,
                                       new_prj_proj4=None,
                                       sr_min=0,
                                       sr_max=3000,
                                       to_rgb=True,
                                       nodata=0,
                                       save_org_dir=None,
                                       resampling_method='min'):
    '''
    create mosaic for Planet images within a grid
    :param polygon:
    :param polygon_latlon:
    :param out_res:
    :param cloud_cover_thr:
    :param geojson_list:
    :param save_dir:
    :param new_prj_wkt:
    :param new_prj_proj4:
    :param sr_min:
    :param sr_max:
    :param to_rgb:
    :param nodata:
    :return:
    '''
    time0 = time.time()
    file_name = os.path.basename(save_dir)
    fin_out = os.path.join(save_dir, file_name + '_sub_%d.tif' % id)
    if os.path.isfile(fin_out):
        basic.outputlogMessage(
            'Warning, skip %s because it already exists, remove it if want to regenerate it'
            % fin_out)
        return fin_out

    # get image list and cloud cover
    planet_img_list, cloud_covers = get_Planet_SR_image_list_overlap_a_polygon(
        polygon_latlon, geojson_list, cloud_cover_thr)
    if len(planet_img_list) < 1:
        basic.outputlogMessage('warning, no images within %d grid' % id)
        return False

    io_function.mkdir(save_dir)

    print('images and their cloud cover for %dth grid:' % id)
    for img, cloud_cover in zip(planet_img_list, cloud_covers):
        print(img, cloud_cover)

    proc_id = multiprocessing.current_process().pid

    # convert to RGB images (for Planet)
    rgb_image_list = []
    rgb_dir = 'RGB_images_' + str(proc_id)
    if to_rgb:
        for tif_path in planet_img_list:
            rgb_img = convert_planet_to_rgb_images(tif_path,
                                                   save_dir=rgb_dir,
                                                   save_org_dir=save_org_dir,
                                                   sr_min=sr_min,
                                                   sr_max=sr_max)
            rgb_image_list.append(rgb_img)
    if len(rgb_image_list) > 0:
        planet_img_list = rgb_image_list

    reproj_img_list = []
    # reproject if necessary
    reproj_dir = 'planet_images_reproj_' + str(proc_id)
    if new_prj_wkt != None and new_prj_proj4 != None:
        for tif_path in planet_img_list:
            prj_out = reproject_planet_image(tif_path,
                                             new_prj_wkt,
                                             new_prj_proj4,
                                             save_dir=reproj_dir)
            # replace the image
            if prj_out is not False and os.path.isfile(prj_out):
                reproj_img_list.append(prj_out)
            else:
                # if not reproject, then append the original image.
                reproj_img_list.append(tif_path)
    if len(reproj_img_list) > 0:
        planet_img_list = reproj_img_list

    # create mosaic using gdal_merge.py
    # because in gdal_merge.py, a later image will replace one, so we put image with largest cloud cover first

    out = os.path.join(save_dir, file_name + '_sub_%d_tmp.tif' % id)
    if os.path.isfile(out):
        io_function.delete_file_or_dir(out)

    # reverse=True to make it in descending order
    img_cloud_list = [
        (img_path, cloud)
        for cloud, img_path in sorted(zip(cloud_covers, planet_img_list),
                                      key=lambda pair: pair[0],
                                      reverse=True)
    ]
    # for checking
    print('Image and its cloud after sorting:')
    for (img_path, cloud) in img_cloud_list:
        print(img_path, cloud)
    tifs = [img_path for (img_path, cloud) in img_cloud_list]
    tifs_str = ' '.join(tifs)

    # cmd_str = 'gdal_merge.py -o %s -n %d -init %d -ps %d %d %s'%(out,nodata,nodata,out_res,out_res,tifs_str)
    cmd_str = 'gdalbuildvrt -resolution user -tr %d %d -srcnodata %d -vrtnodata %d  %s %s' % (
        out_res, out_res, nodata, nodata, out, tifs_str)
    status, result = basic.exec_command_string(cmd_str)
    if status != 0:
        print(result)
        sys.exit(status)

    # # #  polygon.exterior.coords
    # minx, miny, maxx, maxy =  polygon.bounds    # (minx, miny, maxx, maxy)
    # print(minx, miny, maxx, maxy)
    # results = RSImageProcess.subset_image_projwin(fin_out,out,minx, maxy, maxx, miny, xres=out_res,yres=out_res)
    # print(results)
    results = RSImageProcess.subset_image_by_polygon_box_image_min(
        fin_out,
        out,
        polygon,
        xres=out_res,
        yres=out_res,
        compress='lzw',
        tiled='yes',
        bigtiff='if_safer')

    if results is False:
        basic.outputlogMessage(
            'Warning, Crop %s failed, keep the one without cropping' % out)
        io_function.move_file_to_dst(out, fin_out)
    else:
        io_function.delete_file_or_dir(out)

    # ## mosaic and crop at the same time together
    # minx, miny, maxx, maxy =  polygon.bounds    # (minx, miny, maxx, maxy)
    # print(minx, miny, maxx, maxy)
    # results = RSImageProcess.mosaic_crop_images_gdalwarp(tifs,fin_out,src_nodata=nodata,min_x=minx,min_y=miny,max_x=maxx,max_y=maxy,
    #                                                      xres=out_res,yres=out_res,resampling_method=resampling_method)
    #
    # if results is False:
    #     basic.outputlogMessage('Warning, create %s failed' % fin_out)
    #     return False

    # sys.exit(0)
    cost_time_sec = time.time() - time0
    basic.outputlogMessage(
        'finished creating %s cost %.2f seconds (%.2f minutes)' %
        (fin_out, cost_time_sec, cost_time_sec / 60))

    return fin_out
def run_evaluation_multi_trained_models(train_root_dir,train_dir_pattern,para_file,output_file, working_dir=None):

    curr_dir = os.getcwd()
    if working_dir is None:
        working_dir = curr_dir

    os.chdir(working_dir)

    if os.path.isdir(train_root_dir) is False:
        raise ValueError('%s not exists'%train_root_dir)

    if train_dir_pattern is None:
        folder_list = [train_root_dir]
    else:
        folder_list = io_function.get_file_list_by_pattern(train_root_dir,train_dir_pattern)
        folder_list = [item for item in folder_list if os.path.isdir(item) ]
        folder_list.sort()

    eval_output = {}


    eval_output['class_1'] = []
    eval_output['overall'] = []
    eval_output['step'] = []
    eval_output['expr_name'] = []
    eval_output['train_dir'] = []
    eval_output['train_dir_path'] = []


    for train_folder in folder_list:
        exp_name = parameters.get_string_parameters(para_file, 'expr_name')
        eval_dir = os.path.join(working_dir, exp_name, 'eval')
        bak_miou_dir = os.path.join(working_dir, exp_name, 'eval_%s' % os.path.basename(train_folder))
        if os.path.isdir(bak_miou_dir):
            basic.outputlogMessage('Evaluation on test data uisng model %s already exist, skip'%os.path.basename(train_folder))
        else:
            # run evaluation
            basic.outputlogMessage('run evaluation using trained model in %s' % train_folder)
            TRAIN_LOGDIR = os.path.join(train_folder, exp_name, 'train')
            run_evaluation.run_evaluation_main(para_file, b_new_validation_data=True, train_dir=TRAIN_LOGDIR)

            # move eval dir for next run.
            io_function.move_file_to_dst(eval_dir, bak_miou_dir, overwrite=False)

        # read
        eval_output['train_dir'].append(os.path.basename(train_folder))
        eval_output['train_dir_path'].append(train_folder)
        eval_output['expr_name'].append(exp_name)

        # get miou
        get_miou_of_overall_and_class_1_step(working_dir, para_file, eval_output, eval_folder=os.path.basename(bak_miou_dir))





    # save to excel file
    train_out_table_pd = pd.DataFrame(eval_output)
    with pd.ExcelWriter(output_file) as writer:
        train_out_table_pd.to_excel(writer, sheet_name='training parameter and results')
        # set format
        workbook = writer.book
        format = workbook.add_format({'num_format': '#0.000'})
        train_out_table_sheet = writer.sheets['training parameter and results']
        train_out_table_sheet.set_column('O:P',None,format)

    os.chdir(curr_dir)
예제 #10
0
def main(options, args):

    # get multi-temporal shapefile list
    para_file = options.para_file
    b_remove = parameters.get_bool_parameters_None_if_absence(
        para_file, 'b_remove_polygons_using_multitemporal_results')
    # exit
    if b_remove is None or b_remove is False:
        basic.outputlogMessage(
            'Warning, b_remove_polygons_using_multitemporal_results not set or is NO'
        )
        return True

    shp_dir = args[0]
    file_pattern = args[1]
    polyon_shps_list = io_function.get_file_list_by_pattern(
        shp_dir, file_pattern)
    if len(polyon_shps_list) < 2:
        raise ValueError(
            'Error, less than two shapefiles, cannot conduct multi-polygon anlysis'
        )

    # make polyon_shps_list in order: I0 to In
    polyon_shps_list.sort(
        key=lambda x: int(re.findall('I\d+', os.path.basename(x))[0][1:]))

    # print(polyon_shps_list)
    # sys.exit()

    # check projection of the shape file, should be the same
    new_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(
        polyon_shps_list[0])
    for idx in range(len(polyon_shps_list) - 1):
        shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(
            polyon_shps_list[idx + 1])
        if shp_proj4 != new_shp_proj4:
            raise ValueError('error, projection insistence between %s and %s' %
                             (new_shp_proj4, shp_proj4))

    import remove_nonActive_thawSlumps
    import polygons_change_analyze

    # polygon change analysis
    polygons_change_analyze.cal_multi_temporal_iou_and_occurrence(
        polyon_shps_list, para_file)

    # remove non active polygons
    remove_nonActive_thawSlumps.remove_non_active_thaw_slumps(
        polyon_shps_list, para_file)

    # back up files and conduct evaluation
    for idx, shp_path in enumerate(polyon_shps_list):

        # evaluation files
        shp_rmTimeiou = io_function.get_name_by_adding_tail(
            shp_path, 'rmTimeiou')
        basic.outputlogMessage('(%d/%d) evaluation of %s' %
                               (idx, len(polyon_shps_list), shp_rmTimeiou))

        # evaluation
        args_list = [
            os.path.join(deeplabRS, 'evaluation_result.py'), '-p', para_file,
            shp_rmTimeiou
        ]
        if basic.exec_command_args_list_one_file(
                args_list, 'evaluation_report.txt') is False:
            return False

        I_idx_str = re.findall('I\d+', os.path.basename(shp_rmTimeiou))

        old_eva_report = io_function.get_file_list_by_pattern(
            shp_dir, I_idx_str[0] + '*eva_report*' + '.txt')
        old_eva_report = [
            item for item in old_eva_report if 'rmTimeiou' not in item
        ]

        old_eva_report_name = old_eva_report[0]

        eva_report_name = io_function.get_name_by_adding_tail(
            old_eva_report_name, 'rmTimeiou')
        # io_function.move_file_to_dst(old_eva_report,backup_eva_report)
        # io_function.move_file_to_dst('evaluation_report.txt', old_eva_report)
        io_function.move_file_to_dst('evaluation_report.txt',
                                     eva_report_name,
                                     overwrite=True)

        # back up the shape files (no_need)

    basic.outputlogMessage(
        'Finish removing polygons using multi-temporal mapping results')