def ransac(points_1, points_2, transform_function, num_points, num_steps, inliners_margin): good_indexes = range(len(points_1)) best_inliners = [] for iterator in xrange(num_steps): n_indexes = random.sample(good_indexes, num_points) points_1_transform = [points_1[i] for i in n_indexes] points_2_transform = [points_2[i] for i in n_indexes] T = transform_function(points_1_transform, points_2_transform) inliners = [] for index in good_indexes: point_it_should_be = transforms.applyHomogTransformation( T, points_1[index]) point_it_is = points_2[index] distance = ssdPoint(point_it_is, point_it_should_be) if distance < inliners_margin: inliners.append(index) if len(inliners) > len(best_inliners): best_inliners = inliners good_points_1 = [] good_points_2 = [] for index in best_inliners: if points_1[index] not in good_points_1 and points_2[ index] not in good_points_2: good_points_1.append(points_1[index]) good_points_2.append(points_2[index]) return good_points_1, good_points_2
def defineMergedImageDimensions(image_1, image_2, H): image_1_height = image_1.shape[0] image_2_height = image_2.shape[0] image_1_width = image_1.shape[1] image_2_width = image_2.shape[1] img1_pts = [(0,0),(image_1_width-1,0),(0,image_1_height-1),(image_1_width-1,image_1_height-1)] img2_pts = [(0,0),(image_2_width-1,0),(0,image_2_height-1),(image_2_width-1,image_2_height-1)] trans_img1_pts = [] for pt in img1_pts: trans_img1_pts.append(transforms.applyHomogTransformation(H,pt)) pts_to_check = trans_img1_pts + img2_pts pts_x, pts_y = zip(*pts_to_check) top_left = (min(pts_x),min(pts_y)) bottom_right = (max(pts_x),max(pts_y)) return top_left, bottom_right
def defineMergedImageDimensions(image_1, image_2, H): image_1_height = image_1.shape[0] image_2_height = image_2.shape[0] image_1_width = image_1.shape[1] image_2_width = image_2.shape[1] img1_pts = [(0, 0), (image_1_width - 1, 0), (0, image_1_height - 1), (image_1_width - 1, image_1_height - 1)] img2_pts = [(0, 0), (image_2_width - 1, 0), (0, image_2_height - 1), (image_2_width - 1, image_2_height - 1)] trans_img1_pts = [] for pt in img1_pts: trans_img1_pts.append(transforms.applyHomogTransformation(H, pt)) pts_to_check = trans_img1_pts + img2_pts pts_x, pts_y = zip(*pts_to_check) top_left = (min(pts_x), min(pts_y)) bottom_right = (max(pts_x), max(pts_y)) return top_left, bottom_right
def fill(source, target, T, current_source_mask=None): source_height = source.shape[0] source_width = source.shape[1] target_height = target.shape[0] target_width = target.shape[1] pts_source = [(0, 0), (source_width - 1, 0), (source_width - 1, source_height - 1), (0, source_height - 1)] pts_target = [] for pt in pts_source: pts_target.append(transforms.applyHomogTransformation(T, pt)) pts_target_x, pts_target_y = zip(*pts_target) is_someone_in = False for pt in pts_target: if pt[0] < target_width and pt[0] >= 0 and pt[ 1] < target_height and pt[1] >= 0: is_someone_in = True break if is_someone_in: fill_mask = np.zeros((target_height, target_width)) rr, cc = polygon(np.array(pts_target_y), np.array(pts_target_x), fill_mask.shape) fill_mask[rr, cc] = 1 target_pixels = np.where(fill_mask == 1) else: fill_mask = np.ones((target_height, target_width)) target_pixels = np.where(fill_mask == 1) T_inv = np.linalg.inv(T) transformed_mask = np.zeros((target_height, target_width)) if current_source_mask == None: blend_mask = generateDistMask(source_width, source_height) else: blend_mask = current_source_mask mask_height = blend_mask.shape[0] mask_width = blend_mask.shape[1] for y, x in zip(target_pixels[0], target_pixels[1]): original_pixel_coords = transforms.applyHomogTransformation( T_inv, (x, y)) original_pixel_x = int(original_pixel_coords[0]) original_pixel_y = int(original_pixel_coords[1]) if original_pixel_x < source_width and original_pixel_x >= 0 and original_pixel_y < source_height and original_pixel_y >= 0: original_pixel = source[original_pixel_y, original_pixel_x] else: original_pixel = 0 if original_pixel_x < mask_width and original_pixel_x >= 0 and original_pixel_y < mask_height and original_pixel_y >= 0: mask_pixel = blend_mask[original_pixel_y, original_pixel_x] else: mask_pixel = 0 transformed_mask[y, x] = mask_pixel if target[y, x][0] == 0 or target[y, x][1] == 0 or target[y, x][1] == 0: target[y, x] = original_pixel else: target[y, x] = original_pixel * transformed_mask[y, x] + ( 1 - transformed_mask[y, x]) * target[y, x] return target
def fill(source,target,T,current_source_mask=None): source_height = source.shape[0] source_width = source.shape[1] target_height = target.shape[0] target_width = target.shape[1] pts_source = [(0,0),(source_width-1,0),(source_width-1,source_height-1),(0,source_height-1)] pts_target = [] for pt in pts_source: pts_target.append(transforms.applyHomogTransformation(T,pt)) pts_target_x, pts_target_y = zip(*pts_target) is_someone_in = False for pt in pts_target: if pt[0] < target_width and pt[0] >= 0 and pt[1] < target_height and pt[1] >= 0: is_someone_in = True break if is_someone_in: fill_mask = np.zeros((target_height,target_width)) rr, cc = polygon(np.array(pts_target_y), np.array(pts_target_x),fill_mask.shape) fill_mask[rr, cc] = 1 target_pixels = np.where(fill_mask == 1) else: fill_mask = np.ones((target_height,target_width)) target_pixels = np.where(fill_mask == 1) T_inv = np.linalg.inv(T) transformed_mask = np.zeros((target_height,target_width)) if current_source_mask == None: blend_mask = generateDistMask(source_width,source_height) else: blend_mask = current_source_mask mask_height = blend_mask.shape[0] mask_width = blend_mask.shape[1] for y,x in zip(target_pixels[0],target_pixels[1]): original_pixel_coords = transforms.applyHomogTransformation(T_inv,(x,y)) original_pixel_x = int(original_pixel_coords[0]) original_pixel_y = int(original_pixel_coords[1]) if original_pixel_x < source_width and original_pixel_x >= 0 and original_pixel_y < source_height and original_pixel_y >= 0: original_pixel = source[original_pixel_y,original_pixel_x] else: original_pixel = 0 if original_pixel_x < mask_width and original_pixel_x >= 0 and original_pixel_y < mask_height and original_pixel_y >= 0: mask_pixel = blend_mask[original_pixel_y,original_pixel_x] else: mask_pixel = 0 transformed_mask[y,x] = mask_pixel if target[y,x][0] == 0 or target[y,x][1] == 0 or target[y,x][1] == 0: target[y,x] = original_pixel else: target[y,x] = original_pixel*transformed_mask[y,x] + (1-transformed_mask[y,x])*target[y,x] return target