コード例 #1
0
ファイル: morphing.py プロジェクト: samlaujw/image-morphing
    def _compute_frame(self, triangulation, t, shape, source_img, target_img, source_points, target_points):
        """
        Computes a frame of the image morph.

        Args:
          - triangulation: The scipy.spatial.Delaunay triangulation.
          - t: The time value in the range [0,1]
          - shape: The shape of the frame which should match the shape of
            the original source and target images.

        Returns:
          - The frame of the morphing at time t.
        """
        frame = np.zeros(shape=shape, dtype='uint8')

        # The number of triangles is determined by the simplices attribute
        num_triangles = len(triangulation.simplices)
        average_triangles = np.zeros(shape=(num_triangles, 3, 2), dtype=np.float32)

        for triangle_index in range(0, num_triangles):
            simplices = triangulation.simplices[triangle_index]
            for v in range(0, 3):
                simplex = triangulation.simplices[triangle_index][v]
                P = source_points[simplex]
                Q = target_points[simplex]
                average_triangles[triangle_index][v] = P + t * (Q - P)

            # Compute the affine projection to the source and target triangles
            source_triangle = np.float32([
                source_points[simplices[0]],
                source_points[simplices[1]],
                source_points[simplices[2]]
            ])
            target_triangle = np.float32([
                target_points[simplices[0]],
                target_points[simplices[1]],
                target_points[simplices[2]]
            ])
            average_triangle = np.float32(average_triangles[triangle_index])
            source_transform = cv2.getAffineTransform(average_triangle, source_triangle)
            target_transform = cv2.getAffineTransform(average_triangle, target_triangle)

            average_triangulation = Delaunay(average_triangle)

            # For each point in the average triangle, find the corresponding points
            # in the source and target triangle, and find the weighted average.
            average_points = util.get_points_in_triangulation(average_triangle, average_triangulation)
            for point in average_points:
                source_point = np.transpose(np.dot(source_transform, np.transpose(np.array([point[0], point[1], 1]))))
                target_point = np.transpose(np.dot(target_transform, np.transpose(np.array([point[0], point[1], 1]))))

                # Perform a weighted average per-channel
                for c in range(0, shape[2]):
                    source_val = source_img[int(source_point[1]), int(source_point[0]), c]
                    target_val = target_img[int(target_point[1]), int(target_point[0]), c]
                    frame[point[1], point[0], c] = round((1 - t) * source_val + t * target_val)
        return frame
コード例 #2
0
    def __init__(self, ngsim_origin: np.ndarray, carla_origin: np.ndarray):
        assert ngsim_origin.shape == (2,), "Expected 2d point"
        assert carla_origin.shape == (2,), "Expected 2d point"

        ngsim_base_vectors = np.array([[1, 0], [0, 1]])
        scale = PIXELS_TO_METERS
        carla_base_vectors = np.array([[1, 0], [0, 1]]) * scale

        self._transformation_matrix = cv2.getAffineTransform(
            src=np.float32([ngsim_origin, *(ngsim_origin + ngsim_base_vectors)]),
            dst=np.float32([carla_origin, *(carla_origin + carla_base_vectors)]),
        )
        NO_TRANSLATION = 0
        self._rotation = Vector2.from_numpy(
            self._transformation_matrix @ np.array([1, 0, NO_TRANSLATION])
        ).yaw_radians
コード例 #3
0
def calc_matrix(reference_points_target):

    global img
    cam = VideoCapture(0)  # 0 -> index of camera
    s, img = cam.read()

    cv2.namedWindow("image", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("image", 1280, 720)
    cv2.setMouseCallback("image", draw_circle)

    while (1):
        cv2.imshow('image', img)
        k = cv2.waitKey(20) & 0xFF
        if k == 27:
            break
        elif k == ord("p"):
            print(mouseX, mouseY)
        elif k == ord("b"):
            pos_blue = [mouseX, mouseY]
            print('Blue Marked at ' + str(pos_blue))
        elif k == ord("r"):
            pos_red = [mouseX, mouseY]
            print('Red Marked at ' + str(pos_red))
        elif k == ord("y"):
            pos_yellow = [mouseX, mouseY]
            print('Yellow Marked at ' + str(pos_yellow))
        elif k == ord("q"):
            cv2.destroyAllWindows()
            break

    try:
        pos_blue
        pos_red
        pos_yellow
    except NameError:
        raise Exception("Not all three reference points were assigned")

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # set CV2 color scale to RGB
    rows, cols, ch = img.shape

    pts1 = np.float32([pos_blue, pos_red, pos_yellow])
    pts2 = np.float32(reference_points_target)

    M = cv2.getAffineTransform(pts1, pts2)

    return M
コード例 #4
0
ファイル: data_enhance.py プロジェクト: h123c/img_enhance
def transform_img(img, plane1, plane2):
    # 仿射
    # 对图像进行变换(三点得到一个变换矩阵)
    # 我们知道三点确定一个平面,我们也可以通过确定三个点的关系来得到转换矩阵
    # 然后再通过warpAffine来进行变换
    rows, cols = img.shape[:2]

    point1 = np.float32(plane1)
    point2 = np.float32(plane2)
    #point2=np.float32([[10,100],[300,50],[100,250]])

    M = cv.getAffineTransform(point1, point2)
    dst = cv.warpAffine(img, M, (cols, rows), borderValue=(255, 255, 255))

    # cv.imshow("1",dst)
    # cv.waitKey(0)
    # cv.destroyAllWindows()
    return dst
コード例 #5
0
def cropBox(img, ul, br, resH, resW):
    ul = ul.int()
    br = (br - 1).int()
    # br = br.int()
    lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
    lenW = lenH * resW / resH
    if img.dim() == 2:
        img = img[np.newaxis, :]

    box_shape = [(br[1] - ul[1]).item(), (br[0] - ul[0]).item()]
    pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
    # Padding Zeros
    if ul[1] > 0:
        img[:, :ul[1], :] = 0
    if ul[0] > 0:
        img[:, :, :ul[0]] = 0
    if br[1] < img.shape[1] - 1:
        img[:, br[1] + 1:, :] = 0
    if br[0] < img.shape[2] - 1:
        img[:, :, br[0] + 1:] = 0

    src = np.zeros((3, 2), dtype=np.float32)
    dst = np.zeros((3, 2), dtype=np.float32)

    src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]],
                         np.float32)
    src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]],
                         np.float32)
    dst[0, :] = 0
    dst[1, :] = np.array([resW - 1, resH - 1], np.float32)

    src[2:, :] = get_3rd_point(src[0, :], src[1, :])
    dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])

    trans = cv.getAffineTransform(np.float32(src), np.float32(dst))

    dst_img = cv.warpAffine(torch_to_im(img),
                            trans, (resW, resH),
                            flags=cv.INTER_LINEAR)

    return im_to_torch(torch.Tensor(dst_img))
 def transform(self, image):
     rows, cols = image.shape[:2]
     scale1 = (random.randrange(30, 45) / 100)
     scale2 = (random.randrange(30, 45) / 100)
     scale3 = (random.randrange(1, 5) / 10)
     scale4 = (random.randrange(1, 5) / 10)
     point1 = numpy.float32([[50, 50], [rows - 50, 50], [50, cols - 50],
                             [rows - 50, cols - 50]])
     point2 = numpy.float32([[(100 * scale1), (100 * scale2)],
                             [(rows * (1 - scale1)), (cols * scale3)],
                             [(rows * scale4), (cols * (1 - scale2))],
                             [(rows * (1 - scale3)),
                              (cols * (1 - scale4))]])
     point1_ = point1[0:3]
     point2_ = point2[0:3]
     Matirx_aff = cv2.getAffineTransform(point1_, point2_)
     img_aff = cv2.warpAffine(image,
                              Matirx_aff, (cols, rows),
                              borderValue=(255, 255, 255))
     Matirx_per = cv2.getPerspectiveTransform(point1, point2)
     img_per = cv2.warpPerspective(image,
                                   Matirx_per, (cols, rows),
                                   borderValue=(255, 255, 255))
     return img_aff, img_per
コード例 #7
0
# translation
tx = 100
ty = 50
m = np.float32([[1, 0, tx], [0, 1, ty]])
res = cv2.warpAffine(img, m, (cols, rows))
# Third argument of the cv2.warpAffine() function is the size of the output image, which should be in the form of (width, height). Remember width = number of columns, and height = number of rows.
cv2.imshow('trans', res)

# rotation
m = cv2.getRotationMatrix2D((cols / 2, rows / 2), 45, 1)
res = cv2.warpAffine(img, m, (cols, rows))
cv2.imshow('rotat', res)

# affine trans
ps1 = np.float32([[50, 100], [200, 100], [50, 200]])
ps2 = np.float32([[10, 10], [280, 20], [150, 150]])
m = cv2.getAffineTransform(ps1, ps2)
res = cv2.warpAffine(cv2.imread('resource/drawing.png'), m, (300, 400))
cv2.imshow('affine', res)

# Perspective
ps1 = np.float32([[53, 58], [355, 51], [26, 371], [379, 379]])
ps2 = np.float32([[0, 0], [400, 0], [0, 400], [400, 400]])
m = cv2.getPerspectiveTransform(ps1, ps2)
res = cv2.warpPerspective(cv2.imread('resource/sudo.png'), m, (400, 400))
cv2.imshow('perspective', res)

# hold window
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #8
0
ファイル: day01.py プロジェクト: wenatie/study
# In[48]:
'''getAffineTransform(InputArray src, InputArray dst)
    src: 输入图像的三角形顶点坐标。
    dst: 输出图像的三角形顶点坐标。
    均需要三组点坐标
warpAffine(img,M,(cols,rows))
'''

img = cv2.imread('D:/my_code_text/lena.jpg', 1)
rows, cols = img.shape[:2]
pts1 = np.float32([[0, 0], [cols - 1, 0], [0, rows - 1]])
pts2 = np.float32([[cols * 0.3, rows * 0.1], [cols * 0.9, rows * 0.2],
                   [cols * 0.1, rows * 0.9]])

M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('dst', dst)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()

# In[ ]:

# 图片的透视变换

# In[ ]:
'''getPerspectiveTransform(src,dst)
    src:输入四边形顶点坐标
    dst:输出四边形顶点坐标
warpPerspective(img,M,(cols,rows))
コード例 #9
0
ファイル: transform.py プロジェクト: roc-n/Opencv_learning
# 平移(warpAffine函数)
img = cv2.imread('tree.jpg')
# translation = np.float32([[1, 0, 20], [0, 1, 50]])
# dst=cv2.warpAffine()

# 旋转
rows, cols = img.shape[:2]
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -35, 1)
dst = cv2.warpAffine(img, M, (2 * cols, 2 * rows))
while (1):
    cv2.imshow('img', dst)
    if cv2.waitKey(0):
        break
    cv2.destroyAllWindows()

# 仿射变换
img = cv2.imread('image/transform.jpg')
rows, cols, ch = img.shape

pst1 = np.float32([[50, 50], [200, 50], [50, 200]])
pst2 = np.float32([[10, 100], [200, 50], [100, 250]])

M = cv2.getAffineTransform(pst1, pst2)
dst = cv2.warpAffine(img, M, (cols, rows))

plt.subplot(121, plt.imshow(img), plt.title('Input'))
plt.subplot(122, plt.imshow(img), plt.title('Outplot'))
plt.show()

# 透视变换