def test_transform(self): with self.subTest("Shape & Homogeneous"): # Just test identity # Single entry self.assertEqual(cvext.AffineTransform().forward([1, 1]).ndim, 1) self.assertEqual(len(cvext.AffineTransform().forward([1, 1])), 2) self.assertTrue( np.array_equal(cvext.AffineTransform().forward([1, 1]), [1, 1])) self.assertEqual(cvext.AffineTransform().forward([1, 1, 1]).ndim, 1) self.assertEqual(len(cvext.AffineTransform().forward([1, 1, 1])), 2) self.assertTrue( np.array_equal(cvext.AffineTransform().forward([1, 1, 1]), [1, 1])) # Multi points pts, pts_h = [[1, 1], [0, 5], [1, 3]], [[1, 1, 1], [0, 2.5, 0.5], [2, 6, 2]] self.assertEqual(cvext.AffineTransform().forward(pts).ndim, 2) self.assertEqual(len(cvext.AffineTransform().forward(pts)), 3) self.assertEqual(cvext.AffineTransform().forward(pts).shape[1], 2) self.assertTrue( np.array_equal(cvext.AffineTransform().forward(pts), pts)) self.assertEqual(cvext.AffineTransform().forward(pts_h).ndim, 2) self.assertEqual(len(cvext.AffineTransform().forward(pts_h)), 3) self.assertEqual(cvext.AffineTransform().forward(pts_h).shape[1], 2) self.assertTrue( np.array_equal(cvext.AffineTransform().forward(pts_h), pts)) with self.subTest("Transform"): # Compare with SKAffine for i in range(10): mdl = self.random_transform() affine = cvext.AffineTransform(scale=mdl["S"], rotation=mdl["R"], shear=mdl["M"], translation=mdl["T"]) skaffine = SKAffine(matrix=np.append( affine.matrix_f, np.asarray([[0, 0, 1]]), axis=0)) pts = np.random.randint(-10, 20, size=[10, 2]) self.assertTrue(np.allclose(affine.forward(pts), skaffine(pts))) self.assertTrue( np.allclose(affine.inverse(pts), skaffine.inverse(pts)))
def plot_annotations(imgid, clustered_annotations): try: img = mpimg.imread(IMAGE_DIR + imgid + ".JPG") pass except IOError: print "WARNING: couldn't find image '%s'" % imgid return False # Plot all annotations fig = plt.figure() ax = plt.gca() plt.imshow(img) for annotation, cluster_id in clustered_annotations: color = COLOR_MAP[cluster_id] rect = Rectangle((annotation.left, annotation.top), annotation.width, annotation.height, fill=False, color=color) ax.add_patch(rect) plt.show() # Plot median human and computer annotations by_cluster = annotations_by_cluster(clustered_annotations) all_medians = { clusterid : (median_annotation(annotations), median_annotation([annotation for annotation in annotations if annotation[0].is_human]), median_annotation([annotation for annotation in annotations if not annotation[0].is_human])) for clusterid, annotations in by_cluster.iteritems() } plt.figure() ax = plt.gca() plt.imshow(img) for clusterid, medians in all_medians.iteritems(): color = COLOR_MAP[clusterid] for median in medians: if median is None: continue rect = Rectangle((median.left, median.top), median.width, median.height, fill=False, color=color) ax.add_patch(rect) plt.show() # Affine transform image to consistent shape and plot again. from skimage.transform import AffineTransform, warp # calculate scale and coreners row_scale = float(img.shape[0]) / 400.0 col_scale = float(img.shape[1]) / 400.0 src_corners = np.array([[1, 1], [1, 400.0], [400.0, 400.0]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5 # do the transformation tform = AffineTransform() tform.estimate(src_corners, dst_corners) resized = warp(img, tform, output_shape=[400.0, 400.0], order=1, mode='constant', cval=0) # plot the transformed image plt.figure() ax = plt.gca() plt.imshow(resized) for clusterid, medians in all_medians.iteritems(): color = COLOR_MAP[clusterid] for median in medians: if median is None: continue # apply the transformation to each rectangle corners = np.array([[median.left, median.top], [median.left + median.width, median.top + median.height]]) new_corners = tform.inverse(corners) rect = Rectangle(new_corners[0, :], new_corners[1,0] - new_corners[0,0], new_corners[1,1] - new_corners[0,1], fill=False, color=color) ax.add_patch(rect) plt.show() return True
class DefaultRS(ReferenceSpace): """Default reference space. Attributes ---------- tform : skimage.transform.GeometricTransform Affine transformation. keypoints : dict Defining landmarks used for estimating the parameters of the model. """ def __init__(self): """Construct.""" self.tform = AffineTransform() self.keypoints = { 'CHIN': (0, 1), 'UPPER_TEMPLE_L': (-1, -1), 'UPPER_TEMPLE_R': (1, -1), 'UPPERMOST_NOSE': (0, -1), 'MIDDLE_NOSTRIL': (0, 0) } def estimate(self, lf): """Estimate parameters of the affine transformation. Parameters ---------- lf : pychubby.detect.LandmarFace Instance of the ``LandmarkFace``. """ src = [] dst = [] for name, ref_coordinate in self.keypoints.items(): dst.append(ref_coordinate) src.append(lf[name]) src = np.array(src) dst = np.array(dst) self.tform.estimate(src, dst) def ref2inp(self, coords): """Transform from reference to input space. Parameters ---------- coords : np.ndarray Array of shape `(N, 2)` where the columns represent x and y reference coordinates. Returns ------- tformed_coords : np.ndarray Array of shape `(N, 2)` where the columns represent x and y coordinates in the input image correspoding row-wise to `coords`. """ return self.tform.inverse(coords) def inp2ref(self, coords): """Transform from input to reference space. Parameters ---------- coords : np.ndarray Array of shape `(N, 2)` where the columns represent x and y coordinates in the input space. Returns ------- tformed_coords : np.ndarray Array of shape `(N, 2)` where the columns represent x and y coordinates in the reference space correspoding row-wise to `coords`. """ return self.tform(coords)