def hist_match(source_filename, template_filename):
    """
    Adjust the pixel values of a grayscale image such that its histogram
    matches that of a target image

    Arguments:
    -----------
        source: np.ndarray
            Image to transform; the histogram is computed over the flattened
            array
        template: np.ndarray
            Template image; can have different dimensions to source
    Returns:
    -----------
        matched: np.ndarray
            The transformed output image
    """
    source_filename = source_filename[0]
    template_filename = template_filename[0]

    source_array = read_as_tuple_floats(read_as_dataset(source_filename))
    template_array = read_as_tuple_floats(read_as_dataset(template_filename))

    old_shape = source_array.shape
    source_array = source_array.ravel()
    template_array = template_array.ravel()

    # get the set of unique pixel values and their corresponding indices and
    # counts
    s_values, bin_idx, s_counts = np.unique(source_array,
                                            return_inverse=True,
                                            return_counts=True)

    t_values, t_counts = np.unique(template_array, return_counts=True)

    # take the cumsum of the counts and normalize by the number of pixels to
    # get the empirical cumulative distribution functions for the source and
    # template images (maps pixel value --> quantile)
    s_quantiles = np.cumsum(s_counts).astype(np.float64)
    s_quantiles /= s_quantiles[-1]

    t_quantiles = np.cumsum(t_counts).astype(np.float64)
    t_quantiles /= t_quantiles[-1]

    # interpolate linearly to find the pixel values in the template image
    # that correspond most closely to the quantiles in the source image
    interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
    result = interp_t_values[bin_idx].reshape(old_shape)

    # 设置图像的格式为按照Image可以读取的格式
    result = result.astype('uint8')

    # 写入图片
    outFilename = "radiation.jpg"
    print("Saving aligned image : ", outFilename)
    cv2.imwrite(outFilename, result)

    print('radiation correction done...')

    return result
def change_detect(img1_filename, img2_filename):
    read_dataset = read_as_dataset(img1_filename)

    img1 = read_as_array(read_as_dataset(img1_filename))
    img2 = read_as_array(read_as_dataset(img2_filename))

    change = img1 - img2

    change_dataset, x_size, y_size, band_count = create_rs_data(
        'change.tif', 'GTiff', read_dataset)

    raster = np.zeros((x_size, y_size), dtype=np.uint8)
    change_threshhold = 200

    for i in range(band_count):
        raster = change[:, :, i]
        raster = np.where(raster > change_threshhold, raster, 0)
        print(raster)
        change_dataset.GetRasterBand(i + 1).WriteArray(raster)
        print("band " + str(i + 1) + " has been processed")

    print("change detect done...")
def classify_detect(img1_filename, img2_filename):
    read_dataset = read_as_dataset(img1_filename)

    img1 = read_as_array(read_as_dataset(img1_filename))
    img2 = read_as_array(read_as_dataset(img2_filename))

    x_size = read_dataset.RasterXSize
    y_size = read_dataset.RasterYSize

    change = np.zeros((x_size, y_size))

    min_img1 = np.min(img1)
    max_img1 = np.max(img1)
    min_img2 = np.min(img2)
    max_img2 = np.max(img2)

    img1_class_num = max_img1 - min_img1 + 1
    img2_class_num = max_img2 - min_img2 + 1

    tag = 1
    change_tags = np.zeros((img1_class_num, img2_class_num))
    for i in range(img1_class_num):
        for j in range(img2_class_num):
            if i != j:
                change_tags[i, j] = tag
                tag += 1

    # print(x_size, y_size)
    for x in range(x_size):
        for y in range(y_size):
            change[x, y] = change_tags[img1[y, x] - 1, img2[y, x] - 1]

    generate_classify_pic(
        change, 'class_change.jpg',
        img1_class_num * img2_class_num - min(img1_class_num, img2_class_num))
    print('detect change done...')
def k_means(k_num, filename, max_iter=20):
	dataset = rs_data_pro.read_as_dataset(filename)
	rs_data_array = rs_data_pro.read_as_array(dataset)

	x_size = dataset.RasterXSize
	y_size = dataset.RasterYSize
	band_count = dataset.RasterCount

	kmeans_seed_points = create_seed(rs_data_array, x_size, y_size, k_num, band_count)

	classes_tag = np.zeros((x_size, y_size))

	for i_iter in range(max_iter):
		# calculate distances for each class
		distances = np.zeros((x_size, y_size, k_num))

		for i in range(k_num):
			for i_x in range(x_size):
				for i_y in range(y_size):
					distances[i_x, i_y, i] = sum(abs(rs_data_array[i_x, i_y, ...] - kmeans_seed_points[i, ...]))

		# find min distances and classify
		for i_x in range(x_size):
			for i_y in range(y_size):
				index = np.where(distances[i_x, i_y, ...] == min(distances[i_x, i_y, ...]))
				classes_tag[i_x, i_y] = index[0][0]

		# recalculate seed points
		kmeans_seed_points = np.zeros((k_num, band_count))
		count_num = [1] * k_num

		for i in range(k_num):
			for i_x in range(x_size):
				for i_y in range(y_size):
					if classes_tag[i_x, i_y] == i:
						count_num[i] += 1
						kmeans_seed_points[i, ...] += rs_data_array[i_x, i_y, ...]

			kmeans_seed_points[i, ...] /= count_num[i]

		# print(kmeans_seed_points)
		print(count_num)
		print('iter ' + str(i_iter) + " done...")

	generate_classify_pic(classes_tag, 'classify_result.jpg', k_num)
	# print(classes_tag)
	generate_txt('classify.txt', kmeans_seed_points)
def supervised_classify(classify_txt, classify_filename):
	file = open(classify_txt, 'r')
	txt = file.read()
	txt_lines = txt.split('\n')

	k_num = len(txt_lines)-1
	band = len(txt_lines[0].split(" ")) - 1

	seeds = np.zeros((k_num, band))

	for i in range(len(txt_lines)-1):
		numbers_txt = txt_lines[i].split(" ")
		for j in range(len(numbers_txt)-1):
			seeds[i, j] = float(numbers_txt[j])


	rs_dataset = rs_data_pro.read_as_dataset(classify_filename)
	rs_data_array = rs_data_pro.read_as_array(rs_dataset)

	x_size = rs_dataset.RasterXSize
	y_size = rs_dataset.RasterYSize
	band_count = rs_dataset.RasterCount

	distances = np.zeros((x_size, y_size, k_num))

	for x in range(x_size):
		for y in range(y_size):
			for i in range(k_num):
				distances[x, y, i] = sum(abs(rs_data_array[x, y, ...] - seeds[i, ...]))

	classes_tag = np.zeros((x_size, y_size))

	# find min distances and classify
	for i_x in range(x_size):
		for i_y in range(y_size):
			index = np.where(distances[i_x, i_y, ...] == min(distances[i_x, i_y, ...]))
			classes_tag[i_x, i_y] = index[0][0]

	generate_classify_pic(classes_tag, 'classify_result2.jpg', k_num)
	print("k near classify done...")
예제 #6
0
def registration(img_src, img_des):
    src_filename = img_src[0]
    dst_filename = img_des[0]

    src_img = read_as_tuple_floats(read_as_dataset(src_filename))
    # src_img = src_img[:1000, :1000, :]

    dst_img = read_as_tuple_floats(read_as_dataset(dst_filename))
    # dst_img = src_img[:1000, :1000, :]

    # print(src_img.shape)

    # 找到关键点及其sift特征
    sift = cv2.xfeatures2d.SIFT_create()
    src_kp, src_des = sift.detectAndCompute(src_img, None)
    dst_kp, dst_des = sift.detectAndCompute(dst_img, None)

    # 定义匹配函数,对特征点进行匹配
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(src_des, dst_des, k=2)
    print('matches finished!')

    # 存储匹配好的点
    good = []

    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    # 定义最小的匹配点的个数
    MIN_MATCH_COUNT = 6
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([src_kp[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([dst_kp[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)
    else:
        print("Not enough matches are found - %d/%d" %
              (len(good), MIN_MATCH_COUNT))
        matchesMask = None
    print('good matches done!')

    # 找到仿射变换系数
    h, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC)

    # 应用仿射变换系数,对待校正图像进行校正
    height, width, _ = dst_img.shape
    # 待转换图像坐标角点
    corners = np.array([[0, 0, 1], [0, height, 1], [width, 0, 1],
                        [width, height, 1]])
    # 转换后的图像坐标角点
    trans_corners = np.dot(h, np.transpose(corners))

    # 计算转换后的图片的大小
    regWidth = int(max(trans_corners[0]))
    regHeight = int(max(trans_corners[1]))

    imReg = cv2.warpPerspective(src_img, h, (regWidth, regHeight))

    # 写入图片
    outFilename = "aligned.jpg"
    print("Saving aligned image : ", outFilename)
    cv2.imwrite(outFilename, imReg)

    # 输出仿射变换矩阵
    print("Estimated homography : \n", h)