img_star = cv2.imread('resource/star.png')
img = img_star.copy()
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, 2, 1)
cnt = contours[0]

hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)

for i in range(defects.shape[0]):
    s, e, f, d = defects[i, 0]
    start = tuple(cnt[s][0])
    end = tuple(cnt[e][0])
    far = tuple(cnt[f][0])
    cv2.line(img, start, end, [0, 255, 0], 2)
    cv2.circle(img, far, 5, [0, 0, 255], -1)

cv2.imshow('img', img)

# Point Polygon Test

# This function finds the shortest distance between a point in the image and a contour. It returns the distance which is negative when point is outside the contour, positive when point is inside and zero if point is on the contour.

# In the function, third argument is measureDist. If it is True, it finds the signed distance. If False, it finds whether the point is inside or outside or on the contour (it returns +1, -1, 0 respectively).

# If you don’t want to find the distance, make sure third argument is False, because, it is a time consuming process. So, making it False gives about 2-3X speedup.

dist = cv2.pointPolygonTest(cnt, (50, 50), True)
inside = cv2.pointPolygonTest(cnt, (50, 50), False)
print 'dist = ' + str(dist)
    def draw_pose_rgb(npimg, humans, imgcopy=False):
        if imgcopy:
            npimg = np.copy(npimg)
        image_h, image_w = npimg.shape[:2]
        joints, bboxes, xcenter = [], [], []

        # for record and get dataset
        record_joints_norm = []

        for human in humans:
            xs, ys, centers = [], [], {}
            # 将所有关节点绘制到图像上
            for i in range(CocoPart.Background.value):
                if i not in human.body_parts.keys():

                    # 对于缺失的数据,补0
                    record_joints_norm += [0.0, 0.0]
                    continue

                body_part = human.body_parts[i]
                center_x = body_part.x * image_w + 0.5
                center_y = body_part.y * image_h + 0.5
                center = (int(center_x), int(center_y))
                centers[i] = center

                record_joints_norm += [
                    round(center_x / 1280, 2),
                    round(center_y / 720, 2)
                ]

                xs.append(center[0])
                ys.append(center[1])
                # 绘制关节点
                cv.circle(npimg,
                          center,
                          3,
                          CocoColors[i],
                          thickness=TfPoseVisualizer.Thickness_ratio * 2,
                          lineType=8,
                          shift=0)
            # 将属于同一人的关节点按照各个部位相连
            for pair_order, pair in enumerate(CocoPairsRender):
                if pair[0] not in human.body_parts.keys(
                ) or pair[1] not in human.body_parts.keys():
                    continue
                cv.line(npimg,
                        centers[pair[0]],
                        centers[pair[1]],
                        CocoColors[pair_order],
                        thickness=TfPoseVisualizer.Thickness_ratio,
                        lineType=8,
                        shift=0)

            # 根据每个人的关节点信息生成ROI区域
            tl_x = min(xs)
            tl_y = min(ys)
            width = max(xs) - min(xs)
            height = max(ys) - min(ys)
            bboxes.append([tl_x, tl_y, width, height])

            # 记录每一帧的所有关节点
            joints.append(centers)

            # 记录coco的1号点作为xcenter
            if 1 in centers:
                xcenter.append(centers[1][0])
        return npimg, joints, bboxes, xcenter, record_joints_norm
Beispiel #3
0
    # Resetting display
    display = np.ones((720, 1280, 3), dtype=np.uint8) * 255

    # Transforming points to image center and meter
    points_list = []
    for p in refPt:
        point = np.array([(p[0] - center[0]) / mtp_ratio,
                          (center[1] - p[1]) / mtp_ratio],
                         dtype=np.float)
        points_list.append(point)

    # Re drawing points for confirmation
    for i in range(1, len(points_list)):
        cv2.line(
            display,
            tuple((points_list[i - 1] * mtp_ratio + center).astype(np.int32)),
            tuple((points_list[i] * mtp_ratio + center).astype(np.int32)),
            (255, 0, 0), 1)

    for point in points_list:
        cv2.circle(display, tuple(
            (point * mtp_ratio + center).astype(np.int32)), 3, (0, 255, 0), -1)

    # Time calculations
    delay_milliseconds = 20
    dt = delay_milliseconds / 1000

    init_position = points_list[0].copy()
    sec_point = points_list[1].copy()
    diff = sec_point - init_position
    init_heading = diff / np.linalg.norm(diff)
Beispiel #4
0
    if len(corners) > 0:
        frame = cv2.flip(frame, 1)
        ids = ids.flatten()

        for (markerCorner, markerID) in zip(corners, ids):
            corners = markerCorner.reshape((4, 2))
            (topLeft, topRight, bottomRight, bottomLeft) = corners

            topRight = (int(topRight[0]), int(topRight[1]))
            bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
            bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
            topLeft = (int(topLeft[0]), int(topLeft[1]))

            # draw the bounding box of the ArUCo detection
            cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
            cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
            cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
            cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)

            # compute and draw the center (x, y)-coordinates of the ArUco marker
            cX = int((topLeft[0] + bottomRight[0]) / 2.0)
            cY = int((topLeft[1] + bottomRight[1]) / 2.0)
            centroid = [cX, cY]
            cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)

            #Flip before text is written
            frame = cv2.flip(frame, 1)

            # draw the ArUco marker ID on the frame - topLeft[0], topLeft[1] - 15
            cv2.putText(frame, str(markerID), (10, 75),
Beispiel #5
0
def get_contours(raw_img, img, img_contour, frame_w, frame_h, dead_zone,
                 area_min, green_trackbars):
    direction = 0
    area = 0
    crop_xywh = None

    contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
    dead_zone_w = dead_zone
    dea_zone_h = dead_zone - 15

    for cnt in contours:

        area = cv2.contourArea(cnt)
        if area > area_min:

            cv2.drawContours(img_contour, cnt, -1, (255, 0, 255), 3)
            peri = cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)

            x, y, w, h = cv2.boundingRect(approx)
            crop_xywh = x, y, w, h

            if crop_xywh is not None:
                x, y, w, h = crop_xywh
                candidate_img = raw_img[y:y + h, x:x + w]
                candidate_img = cv2.resize(candidate_img, (320, 240))

                mask = np.ones(raw_img.shape[:2])
                mask = cv2.drawContours(mask, [cnt], -1, 0, cv2.FILLED)
                candidate_img2 = raw_img.copy()
                candidate_img2[mask.astype(np.bool), :] = 0

                candidate_img2 = candidate_img2[y:y + h, x:x + w]
                candidate_img2 = cv2.resize(candidate_img2, (320, 240))
            else:
                candidate_img = np.zeros((240, 320, 3), np.uint8)
                candidate_img2 = np.zeros((240, 320, 3), np.uint8)

            cand_contour = candidate_img.copy()
            data_green = green_trackbars.get_trackbar_values()
            area_min_green = data_green[-1]

            cand_dil, cand_res = prepare_img(data_green, candidate_img)
            correct_candidate = get_candidate_contours(cand_dil, cand_contour,
                                                       area_min_green)

            if not correct_candidate:
                no_candidate = np.zeros((240, 320, 3), np.uint8)
                candidate_img2 = np.zeros((240, 320, 3), np.uint8)
                stack_cand = stack_images(
                    1, ([no_candidate, cand_res, img_contour], [
                        cand_dil, candidate_img2,
                        np.zeros((240, 320, 3), np.uint8)
                    ]))
                cv2.imshow('candidate', stack_cand)

            elif correct_candidate:
                cv2.rectangle(img_contour, (x, y), (x + w, y + h), (0, 255, 0),
                              5)

                stack_cand = stack_images(
                    1, ([candidate_img, cand_res, img_contour], [
                        cand_dil, candidate_img2,
                        np.zeros((240, 320, 3), np.uint8)
                    ]))
                cv2.imshow('candidate', stack_cand)

                # cv2.putText(img_contour, "Points: " + str(len(approx)), (x + w + 20, y + 20), cv2.FONT_HERSHEY_COMPLEX, .7,
                #             (0, 255, 0), 2)
                # cv2.putText(img_contour, "Area: " + str(int(area)), (x + w + 20, y + 45), cv2.FONT_HERSHEY_COMPLEX, 0.7,
                #             (0, 255, 0), 2)
                # print(area)
                # cv2.putText(img_contour, " " + str(int(x)) + " " + str(int(y)), (x - 20, y - 45), cv2.FONT_HERSHEY_COMPLEX,
                #             0.7,
                #             (0, 255, 0), 2)

                cx = int(x + (w / 2))
                cy = int(y + (h / 2))
                # print(cx, cy)

                # if cx < int(frame_w / 2) - dead_zone_w:
                #     cv2.putText(img_contour, " GO LEFT ", (20, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
                #     cv2.rectangle(img_contour, (0, int(frame_h / 2 - dea_zone_h)),
                #                   (int(frame_w / 2) - dead_zone_w, int(frame_h / 2) + dea_zone_h), (0, 0, 255),
                #                   cv2.FILLED)
                #     direction = 1
                # elif cx > int(frame_w / 2) + dead_zone_w:
                #     cv2.putText(img_contour, " GO RIGHT ", (20, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
                #     cv2.rectangle(img_contour, (int(frame_w / 2 + dead_zone_w), int(frame_h / 2 - dea_zone_h)),
                #                   (frame_w, int(frame_h / 2) + dea_zone_h), (0, 0, 255), cv2.FILLED)
                #     direction = 2
                # elif cy < int(frame_h / 2) - dea_zone_h:
                #     cv2.putText(img_contour, " GO UP ", (20, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
                #     cv2.rectangle(img_contour, (int(frame_w / 2 - dead_zone_w), 0),
                #                   (int(frame_w / 2 + dead_zone_w), int(frame_h / 2) - dea_zone_h), (0, 0, 255),
                #                   cv2.FILLED)
                #     direction = 3
                # elif cy > int(frame_h / 2) + dea_zone_h:
                #     cv2.putText(img_contour, " GO DOWN ", (20, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
                #     cv2.rectangle(img_contour, (int(frame_w / 2 - dead_zone_w), int(frame_h / 2) + dea_zone_h),
                #                   (int(frame_w / 2 + dead_zone_w), frame_h), (0, 0, 255), cv2.FILLED)
                #     direction = 4
                # else:
                #     direction = 0

                cv2.line(img_contour, (int(frame_w / 2), int(frame_h / 2)),
                         (cx, cy), (0, 0, 255), 3)

                return direction, area, crop_xywh
    return direction, area, crop_xywh
#
# 이 방법은 기본 이미지 분석을 수행하고 이미지에서 프리미티브를 찾을 때 유용하다
#   (  ㅇ [그래픽]  그래픽 표현으로써의 `프리미티브`
#      - 그림을 만드는 기본 요소 즉, 그래픽(그림)에 대한 가장 기초가되는 표현(서술) 요소
#         . 그래픽 요소를 기술하는데 사용되는 그래픽 패키지 내의 관련 함수를 지칭하기도 함
#      - `점은 좌표 위치를 지정`하게되며, `선분은 두 좌표를 지정`하는 등)
#
# 참고: https://m.blog.naver.com/samsjang/220588392347
#
# cv2.HoughLineP(입력 이미지, 직선의 거리에 대한 투표 공간(?), 직선의 각도에 대한 투표 공간, 최소 투표 임계값, 최소 직선 길이, 동일 직선상의 점들 사이 최대 간격)
# cv2.HoughCircles(입력 이미지, 검출방법, 해상도에 대한 투표 공간의 반비례 비율, 검출된 원 중심 사이의 최소거리, Canny 에지 검출과정에서 사용되는 상위 임계값, 투표수에 대한 임계값)

# %%
img = np.zeros((500, 500), np.uint8)
cv2.circle(img, (200, 200), 50, 255, 3)
cv2.line(img, (100, 400), (400, 350), 255, 3)

# img = cv2.imread('../data/minguri.jpg', 0)
# thr, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)
# 실제 이미지를 바로 사용하면 상당히 복잡한 사진이 나온다

lines = cv2.HoughLinesP(img, 1, np.pi / 180, 100, 100, 10)[0]  # 확률 허프변환 함수

circles = cv2.HoughCircles(img,
                           cv2.HOUGH_GRADIENT,
                           1,
                           15,
                           param1=200,
                           param2=30)[0]

dbg_img = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
Beispiel #7
0
#Setup Communication path for arduino (In place of 'COM5' put the port to which your arduino is connected)
arduino = serial.Serial('COM5', 9600)
time.sleep(2)
print("Connected to arduino...")

#importing the Haarcascade for face detection
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

#To capture the video stream from webcam.
cap = cv2.VideoCapture(0)

#Read the captured image, convert it to Gray image and find faces
while 1:
    ret, img = cap.read()
    cv2.resizeWindow('img', 500, 500)
    cv2.line(img, (500, 250), (0, 250), (0, 255, 0), 1)
    cv2.line(img, (250, 0), (250, 500), (0, 255, 0), 1)
    cv2.circle(img, (250, 250), 5, (255, 255, 255), -1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3)

    #detect the face and make a rectangle around it.
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 5)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = img[y:y + h, x:x + w]

        arr = {y: y + h, x: x + w}
        print(arr)

        print('X :' + str(x))
# coding: utf-8
from cv2 import cv2
import numpy as np

white_img = np.ones((512, 512, 3), np.uint8)  # init to [1, 1, 1]
white_img = 255 * white_img  # [255, 255, 255]
cv2.imshow('white_img', white_img)
cv2.waitKey(1000)
cv2.destroyAllWindows()

# img: 表示需要进行绘制的图像对象ndarray
# color: 表示绘制几何图形的颜色,采用BGR即上述说的(B、G、R)
# thickness: 表示绘制几何图形中线的粗细,默认为1, 对于圆、椭圆等封闭图像取-1时是填充图形内部
# lineType : 表示绘制几何图形线的类型,默认8-connected线是光滑的,当取cv2.LINE_AA时线呈现锯齿状
img = np.ones((512, 512, 3), np.uint8)
img = 255 * img
img = cv2.line(img, (100, 100), (400, 400), (255, 0, 0), 5)
img = cv2.rectangle(img, (200, 20), (400, 120), (0, 255, 0), 3)
img = cv2.circle(img, (100, 400), 50, (0, 0, 255), 2)
img = cv2.circle(img, (250, 400), 50, (0, 0, 255), 0, cv2.LINE_AA)
img = cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, (0, 255, 255), -1)
pts = np.array([[100, 50], [200, 300], [70, 200], [50, 100]], np.int32)
img = cv2.polylines(img, [pts], True, (0, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 250), font, 4, (255, 255, 0), 2, cv2.LINE_AA)

cv2.imshow('img', img)
if cv2.waitKey(0) == 27:
    cv2.destroyAllWindows()
def detectPlatesInScene(imgOriginalScene):
    listOfPossiblePlates = []

    height, width, numChannels = imgOriginalScene.shape

    imgGrayscaleScene = np.zeros((height, width, 1), np.uint8)
    imgThreshScene = np.zeros((height, width, 1), np.uint8)
    imgContours = np.zeros((height, width, 3), np.uint8)

    cv2.destroyAllWindows()

    if LicensePlateRecognition.showSteps == True:
        cv2.imshow("0", imgOriginalScene)

    imgGrayscaleScene, imgThreshScene = Preprocess.preprocess(imgOriginalScene)

    if LicensePlateRecognition.showSteps == True:
        cv2.imshow("1a", imgGrayscaleScene)
        cv2.imshow("1b", imgThreshScene)

    listOfPossibleCharsInScene = findPossibleCharsInScene(imgThreshScene)

    if LicensePlateRecognition.showSteps == True:
        print("step 2 - ", end=" ")
        print(len(listOfPossibleCharsInScene), end=" ")
        print(" = ", end="")
        print(str(len(listOfPossibleCharsInScene)))
        imgContours = np.zeros((height, width, 3), np.uint8)
        contours = []

        for possibleChar in listOfPossibleCharsInScene:
            contours.append(possibleChar.contour)

        cv2.drawContours(imgContours, contours, -1,
                         LicensePlateRecognition.SCALAR_WHITE)
        cv2.imshow("2b", imgContours)

    listOfListsOfMatchingCharsInScene = DetectChars.findListOfListsOfMatchingChars(
        listOfPossibleCharsInScene)

    if LicensePlateRecognition.showSteps == True:
        print("step 3 - ", end="")
        print("listOfListsOfMatchingCharsInScene.Count = ", end="")
        print(str(len(listOfListsOfMatchingCharsInScene)))

        imgContours = np.zeros((height, width, 3), np.uint8)

        for listOfMatchingChars in listOfListsOfMatchingCharsInScene:
            intRandomBlue = random.randint(0, 255)
            intRandomGreen = random.randint(0, 255)
            intRandomRed = random.randint(0, 255)
            contours = []

            for matchingChar in listOfMatchingChars:
                contours.append(matchingChar.contour)

            cv2.drawContours(imgContours, contours, -1,
                             (intRandomBlue, intRandomGreen, intRandomRed))

        cv2.imshow("3", imgContours)

    for listOfMatchingChars in listOfListsOfMatchingCharsInScene:
        possiblePlate = extractPlate(imgOriginalScene, listOfMatchingChars)

        if possiblePlate.imgPlate is not None:
            listOfPossiblePlates.append(possiblePlate)

    print(str(len(listOfPossiblePlates)), end="")
    print(" possible plates found")

    if LicensePlateRecognition.showSteps == True:
        print()
        cv2.imshow("4a", imgContours)

        for i in range(0, len(listOfPossiblePlates)):
            p2fRectPoints = cv2.boxPoints(
                listOfPossiblePlates[i].rrLocationOfPlateInScene)
            cv2.line(imgContours, tuple(p2fRectPoints[0]),
                     tuple(p2fRectPoints[1]),
                     LicensePlateRecognition.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[1]),
                     tuple(p2fRectPoints[2]),
                     LicensePlateRecognition.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[2]),
                     tuple(p2fRectPoints[3]),
                     LicensePlateRecognition.SCALAR_RED, 2)
            cv2.line(imgContours, tuple(p2fRectPoints[3]),
                     tuple(p2fRectPoints[0]),
                     LicensePlateRecognition.SCALAR_RED, 2)
            cv2.imshow("4a", imgContours)
            print("possible plate " + str(i) +
                  ", click on any image and press a key to continue . . .")
            cv2.imshow("4b", listOfPossiblePlates[i].imgPlate)
            cv2.waitKey(0)

        print(
            "plate detection complete, click on any image and press a key to begin char recognition . . ."
        )
        cv2.waitKey(0)

    return listOfPossiblePlates
Beispiel #10
0
# img = np.zeros((512,512))  #filling matrix with 512 by 512 boxes/pixels. It's a grayscale image
# print(img.shape)

img = np.zeros(
    (512, 512, 3), np.uint8
)  #filling matrix with 512 by 512 boxes/pixels with 3 channels, from 0 to 255

######coloring the image
# print(img)
#img[:]= 255,0,0   #coloring blue for whole image.

######Drawing line over image
# cv2.line(img,(0,0),(300,300),(0,255,0),3)  #0,0 is starting and 300,300 is ending point, color is (0,255,0), thickness is 3
cv2.line(
    img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), 3
)  #0,0 is starting and shape[1] is width, shape[0] is height, color is (0,255,0), thickness is 3

######Ractangle
# cv2.rectangle(img,(0,0),(250,350),(0,0,255),cv2.FILLED)
cv2.rectangle(img, (0, 0), (250, 350), (0, 0, 255), 2)

######Circle
cv2.circle(
    img, (400, 50), 30, (255, 255, 0),
    5)  #400,50 is center, 30 is radius, (255,255,0) is color & thickness is 5

######Putting text on image
cv2.putText(
    img, " OpenCV ", (300, 200), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 150, 0), 2
)  #origin is 300,300, fontFace is cv2's own font family, fontScale is how much big the font is, (0,150,0) is color, thickness=2
Beispiel #11
0
marks = []  # mark the row we need
while row < height:
    column = 0
    all_num = 0
    while column < width:
        if abs(
                int(gray_image[row, column]) -
                int(gray_image[row - 1, column])) > 25:
            all_num += 1
        column += 1
    if all_num / width * 100 > 50:
        marks.append(row)
    row += 1
showimage = image.copy()
for mark in marks:
    cv2.line(showimage, (0, mark), (width, mark), (0, 255, 0), 5)
cv2.namedWindow('split')
cv2.imshow('split', showimage)
cv2.waitKey(0)
marks.append(height)

# 通过记录下来的行号,最终分割图片,这里高度<10将被视为分割线,过滤掉
for inx, mark in enumerate(marks):
    if inx == 0:
        height_start = 0
    else:
        height_start = marks[inx - 1]
    height_end = mark
    if height_end - height_start > 10:
        output = image[height_start:height_end, 0:width]
        cv2.imwrite(filename[:-4] + '_' + str(inx) + filename[-4:], output)
from cv2 import cv2
import numpy as np

img = cv2.imread("sudoku-original.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)

for line in lines:
    rho, theta = line[0]
    a = np.cos(theta)
    b = np.sin(theta)
    x0 = a * rho
    y0 = b * rho
    x1 = int(x0 + 1000 * (-b))
    y1 = int(y0 + 1000 * (a))
    x2 = int(x0 - 1000 * (-b))
    y2 = int(y0 - 1000 * (a))
    cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)

cv2.imshow('image', img)
cv2.imshow('edges', edges)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #13
0
def tf_main(ns, event):
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:

            frame_skip = 300

            speed = 100

            ##START DRONE CONNECTION
            drone = tellopy.Tello()
            drone.connect()
            #drone.start_video()
            drone.wait_for_connection(60.0)
            #drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
            #container = av.open(ns.drone.get_video_stream())
            container = av.open(drone.get_video_stream())

            while True:
                for frame in container.decode(video=0):
                    ##Control Drone With Controller
                    if ns.type == 10:
                        if ns.button == 4:
                            drone.takeoff()
                        elif ns.button == 5:
                            drone.land()
                        elif ns.button == 3:
                            drone.forward(speed)
                        elif ns.button == 1:
                            drone.forward(speed)
                        elif ns.button == 0:
                            drone.left(speed)
                        elif ns.button == 2:
                            drone.right(speed)
                    elif ns.type == 11:
                        if ns.button == 3:
                            drone.forward(0)
                        elif ns.button == 1:
                            drone.forward(0)
                        elif ns.button == 0:
                            drone.left(0)
                        elif ns.button == 2:
                            drone.right(0)
                    elif ns.type == 9:
                        if ns.value[0] < 0:
                            drone.counter_clockwise(speed)
                        if ns.value[0] == 0:
                            drone.clockwise(0)
                        if ns.value[0] > 0:
                            drone.clockwise(speed)
                        if ns.value[1] < 0:
                            drone.down(speed)
                        if ns.value[1] == 0:
                            drone.up(0)
                        if ns.value[1] > 0:
                            drone.up(speed)


##Control Drone With Controller

                    if 0 < frame_skip:
                        frame_skip = frame_skip - 1
                        continue
                    start_time = time.time()

                    image_np = cv2.cvtColor(numpy.array(frame.to_image()),
                                            cv2.COLOR_RGB2BGR)

                    height = image_np.shape[0]
                    width = image_np.shape[1]
                    obstacle_list = []
                    stop_list = []
                    wall_list = []
                    counter = 0

                    image_np_expanded = np.expand_dims(image_np, axis=0)
                    image_tensor = detection_graph.get_tensor_by_name(
                        'image_tensor:0')
                    boxes = detection_graph.get_tensor_by_name(
                        'detection_boxes:0')
                    scores = detection_graph.get_tensor_by_name(
                        'detection_scores:0')
                    classes = detection_graph.get_tensor_by_name(
                        'detection_classes:0')
                    num_detections = detection_graph.get_tensor_by_name(
                        'num_detections:0')

                    # Actual detection.
                    (boxes, scores, classes, num_detections) = sess.run(
                        [boxes, scores, classes, num_detections],
                        feed_dict={image_tensor: image_np_expanded})

                    #DRAW BOXES
                    for box in boxes[0]:
                        if scores[0][counter] > min_score:
                            bx = ObjectDetection(box, scores[0][counter],
                                                 classes[0][counter])
                            bx_obj = bx.getObj()
                            if bx_obj['cat'] == 3:  #WALL 2D LIST
                                cat_list = CatList(16, 16, 0, bx_obj['ctr'])
                                wall_list = mergeList(wall_list,
                                                      cat_list.fillCat())
                            elif bx_obj['cat'] == 2 and bx_obj[
                                    'pyt']:  #OBSTACLE 2D LIST
                                #Ternary
                                cat_list = CatList(
                                    16, 16, 3 if bx_obj['pyt'] > 200 else
                                    2 if bx_obj['pyt'] > 100 else 1,
                                    bx_obj['ctr'])
                                obstacle_list = mergeList(
                                    obstacle_list, cat_list.fillCat())
                                #obstacle_list = cat_list.fillCat()
                            elif bx_obj['cat'] == 1:  #STOP 2D LIST
                                cat_list = CatList(16, 16, 0, bx_obj['ctr'])
                                stop_list = mergeList(stop_list,
                                                      cat_list.fillCat())

                            #Draw Box
                            cv2.rectangle(image_np, bx_obj['tl'], bx_obj['br'],
                                          cat_colors[bx_obj['cat']], 2)
                            #Draw Box Center Point
                            cv2.circle(image_np, bx_obj['ctr'], 2,
                                       (89, 255, 249), -1)
                            #Draw Box Text Detail
                            bx_txt = bx.getTxt()
                            cv2.putText(image_np, bx_txt['txt'], bx_txt['pos'],
                                        font, 1, (255, 255, 255))

                        counter += 1

                    if False:
                        #DRAW LINE GUIDES
                        x_lines = 16
                        y_lines = 16
                        lines_color = (112, 255, 183)

                        def retLineList(value, lines):
                            min_cal = int(value / lines)
                            res = []
                            x = 0
                            while x < lines:
                                res.append(min_cal * (x + 1))
                                x += 1
                            return res

                        for x_line in retLineList(height, x_lines):
                            cv2.line(image_np, (0, x_line), (width, x_line),
                                     lines_color, 1)
                        for y_line in retLineList(width, y_lines):
                            cv2.line(image_np, (y_line, 0), (y_line, height),
                                     lines_color, 1)
                        #DRAW LINE GUIDES

                    #VIDEO
                    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
                    cv2.imshow('image', image_np)
                    frame_skip = int(
                        (time.time() - start_time) / frame.time_base)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        drone.quit()
                        cv2.destroyAllWindows()
                        break
Beispiel #14
0
import numpy as np
from cv2 import cv2

img = cv2.imread("images/image_2.jpg", cv2.IMREAD_COLOR)

# drawing a line
cv2.line(img, (100, 150), (150, 350), (255, 255, 255), 30)

# drawing a rectangle
cv2.rectangle(img, (500, 250), (1000, 500), (0, 155, 255), 15)

# drawing a circle
cv2.circle(img, (447, 63), 63, (0, 255, 0), -1)

# drawing a polygon
pts = np.array([[100, 50], [200, 300], [700, 200], [500, 100]], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, (0, 255, 255), 3)

# writing some text on image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, "OpenCV Hack", (10, 500), font, 2, (100, 155, 155), 3, cv2.LINE_AA)

# displaying the image
cv2.imshow("image", img)

cv2.waitKey(0)
cv2.destroyAllWindows()
g,angle = sobel(blur_img)
# 最大值抑制
border = nms(g,angle)
# 找有和強像素相連的弱像素
canvas = connected(border,100,200)
# 貼簽名
paste_signature(canvas,signature)

# hough transform
lines = cv2.HoughLines(canvas,1,np.pi / 180,80)
lines = np.squeeze(lines)
black = np.zeros_like(img)
for rho,theta in lines: 
    a = np.cos(theta)
    b = np.sin(theta)
    x0 = a*rho
    y0 = b*rho
    x1 = int(x0 + 1000*(-b))
    y1 = int(y0 + 1000*(a))
    x2 = int(x0 - 1000*(-b))
    y2 = int(y0 - 1000*(a)) 
    cv2.line(black,(x1,y1),(x2,y2),(255,0,0),1)
paste_signature(black,signature)

# cv2.imwrite('myCanny.jpg',canvas)

cv2.imshow('canny',canvas)
cv2.imshow('line',black)

cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #16
0
from cv2 import cv2 as cv
import numpy as np

# Create a black image
img = np.zeros((512, 512, 3), np.uint8)

# Draw a diagonal blue line with thickness of 5 px
# 四个参数参数分别为起始点,终点,颜色,线条厚度,-1则全部填充
cv.line(img, (0, 0), (511, 511), (255, 0, 0), 6)
cv.imshow('line', img)

key = cv.waitKey(0)
if key == ord('q'):
    cv.destroyAllWindows()
def main():
    drone = tellopy.Tello()
    try:
        drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)
        drone.connect()
        drone.wait_for_connection(30.0)
        # 设置连接等待的时间,若超时则抛出错误

        drone.takeoff()
        time.sleep(3)

        container = av.open(drone.get_video_stream())
        # skip first 300 frames
        frame_skip = 300
        while True:

            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time()
                hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, redLower, redUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]

                if len(cnts) > 0:
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
                    if radius > 10:
                        cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
                        cv2.circle(frame, center, 5, (0, 0, 255), -1)
                        pts.appendleft(center)
                else:
                    pts.clear()

                length = len(pts)
                for i in range(1, length):
                    if pts[i - 1] is None or pts[i] is None:
                        continue
                    thickness = int(np.sqrt(mybuffer / float(i + 1)) * 2.5)
                    cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
                cv2.imshow('Frame', frame)
                image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2GRAY)
                cv2.imshow('Original', image)
                # cv2.imshow('Canny', cv2.Canny(image, 100, 200))

                interrupt = cv2.waitKey(10)
                if interrupt & 0xFF == ord('q'):
                    drone.down(30)
                    time.sleep(3)
                    drone.land()
                    time.sleep(3)
                    print('successfully land!')
                    break
                frame_skip = int((time.time() - start_time)/frame.time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Beispiel #18
0
    img = cv2.imread('/home/wangsen/ws/video2frame/' + city + '/' + video +
                     '/' + frame_num + '.png')

    cv2.circle(img, (mid_x, mid_y), 5, (0, 0, 255), -1)
    pre_gt_x, pre_gt_y = int(mid_x), int(mid_y)
    pre_pred_x, pre_pred_y = int(mid_x), int(mid_y)

    for i in range(0, len(cv_x), 1):
        tmpx = eval(cv_x[i])
        tmpy = eval(cv_y[i])
        gt_x = int(tmpx + gt[0][i][0])
        gt_y = int(tmpy + gt[0][i][1])
        pred_x = int(tmpx + pred[0][i][0])
        pred_y = int(tmpy + pred[0][i][1])
        cv2.line(img, (pre_gt_x, pre_gt_y), (gt_x, gt_y), (0, 255, 0), 3)
        cv2.line(img, (pre_pred_x, pre_pred_y), (pred_x, pred_y), (255, 0, 0),
                 3)
        pre_gt_x, pre_gt_y = gt_x, gt_y
        pre_pred_x, pre_pred_y = pred_x, pred_y
        # cv2.circle(img, (gt_x, gt_y), 3, (0, 255, 0), -1)  # 绿色gt
        # cv2.circle(img, (pred_x, pred_y), 3, (255, 0, 0), -1)  # 蓝色预测
        if i == len(
                cv_yolo_predictions_rn50_flow_css_9stack_myvideo_training_proportion_100_shuffled_dispx
        ) - 1:
            # gt
            cv2.line(img, (gt_x, gt_y), (gt_x - 15, gt_y - 15), (0, 255, 0), 3)
            cv2.line(img, (gt_x, gt_y), (gt_x - 15, gt_y + 15), (0, 255, 0), 3)
            cv2.line(img, (gt_x, gt_y), (gt_x + 15, gt_y - 15), (0, 255, 0), 3)
            cv2.line(img, (gt_x, gt_y), (gt_x + 15, gt_y + 15), (0, 255, 0), 3)
def gameControllerFunction(topLeft, centerLeft, bottomLeft, topCenter, middle, bottomCenter, topRight, centerRight, bottomRight):

    print("Booting the Video stream,")
    vs = cv2.VideoCapture(0)  # start the video stream.
    time.sleep(2.0)  # set sleep time to 2.0 seconds

    while True:
        frame = vs.read()  # Read off the frame from the video stream
        ret, frame = frame  # Use this if you want to load in your video
        output = "None"
        key = None

        if frame is None:  # If there is no frame, save my pc from going through any stress at all
            break
        # otherwise, if we have a frame, we proceed with the following code
        # so much easier than open cv, keeping aspect ratio intact
        frame = imutils.resize(frame, width=700)
        # i want the mirror view, it's very helpful especially if i'm streaming
        frame = cv2.flip(frame, 1)

        windowDetails = cv2.getWindowImageRect('frame')
        # print(windowDetails)
        totalWidth = windowDetails[2]
        totalHeight = windowDetails[3]
        verLine1 = {
            'start': (totalWidth//3, 0),
            'end': (totalWidth//3, totalHeight)
        }
        verLine2 = {
            'start': (totalWidth//3 * 2, 0),
            'end': (totalWidth//3 * 2, totalHeight)
        }
        horLine1 = {
            'start': (0, totalHeight//3),
            'end': (totalWidth, totalHeight//3)
        }
        horLine2 = {
            'start': (0, totalHeight//3 * 2),
            'end': (totalWidth, totalHeight//3 * 2)
        }

        # processing the frame
        # blurr helps to reduce high frequency noise, definately helps model
        blurred = cv2.GaussianBlur(frame, (11, 11), 0)
        # convert my color to the HSV format
        hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

        # Create a mask
        # mask other regions except colors in range of upper to lower (thresholding)
        mask = cv2.inRange(hsv, lower_color_boundary, upper_color_boundary)
        # Reduce noise caused by thresholding
        mask = cv2.erode(mask, None, iterations=2)
        # foreground the found object i.e futher reduce noise.
        mask = cv2.dilate(mask, None, iterations=2)

        contours = cv2.findContours(
            mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)  # find contours
        # Grab the contours using imutils
        contours = imutils.grab_contours(contours)
        center = None  # center is initially set to none
        if len(contours) > 0:  # if the contours list is not empty proceed
            # select contour with maximum Area, most likely our object
            contour = max(contours, key=cv2.contourArea)
            # pick up co-ordinates for drawing a circle around the object
            ((x, y), radius) = cv2.minEnclosingCircle(contour)
            M = cv2.moments(contour)  # Extract moments from the contour.
            # Obtain the centre of mass of the object.
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            if radius > 10:  # if we have a reasonable radius for the proposed object detected
                # Draw a circle to bound the Object
                cv2.circle(frame, (int(x), int(y)),
                           int(radius), (0, 255, 255), 2)
                # Draw a filled in dot at the centre of the circle
                cv2.circle(frame, center, 5, (0, 0, 225), -1)

        if center:
            if center[0] <= totalWidth//3:
                if center[1] <= totalHeight//3:
                    output = "Top Left"
                    key = topLeft
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Left"
                    key = bottomLeft
                else:
                    output = "Center Left"
                    key = centerLeft
            elif center[0] >= totalWidth//3*2:
                if center[1] <= totalHeight//3:
                    output = "Top Right"
                    key = topRight
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Right"
                    key = bottomRight
                else:
                    output = "Center Right"
                    key = centerRight
            else:
                if center[1] <= totalHeight//3:
                    output = "Top Center"
                    key = topCenter
                elif center[1] >= totalHeight//3*2:
                    output = "Bottom Center"
                    key = bottomCenter
                else:
                    output = "Center"
                    key = middle

        if key:
            key_arr = key.split('+')

            # Key Presses
            for k in key_arr:
                pyautogui.keyDown(k.strip())

            time.sleep(0.08)

            for k in key_arr:
                pyautogui.keyUp(k.strip())

        # Drawing the grid
        cv2.putText(frame,  output,  (50, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1,  (0, 0, 0),  2,  cv2.LINE_4)

        cv2.line(frame, verLine1['start'], verLine1['end'], (255, 255, 255), 5)
        cv2.line(frame, verLine2['start'], verLine2['end'], (255, 255, 255), 5)
        cv2.line(frame, horLine1['start'], horLine1['end'], (255, 255, 255), 5)
        cv2.line(frame, horLine2['start'], horLine2['end'], (255, 255, 255), 5)
        cv2.imshow("frame", frame)  # let's see the frame X frame

        # Closing a video frame
        key = cv2.waitKey(1)  # wait for the cv key
        if key == ord("q"):  # If the x button is pressed
            break  # Break from the loop

    vs.release()  # Let opencv release the video loader
    cv2.destroyAllWindows()  # Destroy all windows to close it
Beispiel #20
0
import numpy as np
from cv2 import cv2 as cv

# Create a black image
img = np.zeros((512,512,3), np.uint8)

# Draw a diagonal blue line with thickness of 5 px
img = cv.line(img,(0,0),(511,511),(255,0,0),5)

img = cv.rectangle(img,(384,0),(510,128),(0,255,0),3)

img = cv.circle(img,(447,63), 63, (0,0,255), -1)

img = cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1)

pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
img = cv.polylines(img,[pts],True,(0,255,255))

font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv.LINE_AA)

cv.imshow('image',img)
k = cv.waitKey(0) & 0xFF
cv.destroyAllWindows()
Beispiel #21
0
    a = cos(theta)
    b = sin(theta)
    print("a, b: ", a, b)
    x0 = a * rho
    y0 = b * rho
    print("xo, yo: ", x0, y0)
    # 1200 - длина линии
    x1 = int(x0 + 1200 * (-b))
    y1 = int(y0 + 1200 * (a))

    x2 = int(x0 - 1200 * (-b))
    y2 = int(y0 - 1200 * (a))

    cv2.line(img=img,
             pt1=(x1, y1),
             pt2=(x2, y2),
             color=(255, 255, 255),
             thickness=15)

cv2.imshow('Edges', edges)
cv2.imshow('Image', img)
cv2.waitKey()
cv2.destroyAllWindows()

# 2й способ: HoughLinesP() - прогрессивное вероятностное преобразование Хафа
img_orig = cv2.imread('6_2.png')
img_gray = cv2.cvtColor(img_orig, cv2.COLOR_BGR2GRAY)
img_result = img_orig.copy()

_, thresh = cv2.threshold(img_gray, 125, 255, cv2.THRESH_BINARY_INV)
Beispiel #22
0
 def paintLines(self, image):
     cv2.line(image, self.topLeft, self.bottomLeft, self.color, 1)
     cv2.line(image, self.bottomLeft, self.bottomRight, self.color, 1)
     cv2.line(image, self.bottomRight, self.topRight, self.color, 1)
     cv2.line(image, self.topRight, self.topLeft, self.color, 1)
Beispiel #23
0
def laneDetection(img):

    # resize the image to (450, 325)
    img = cv2.resize(img, (450, 325))

    # show og img
    print(img.shape)
    cv2.imshow('OG', img)
    cv2.waitKey(0)

    # turn the image into gray scale
    gr_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # show the gray img
    print(gr_img.shape)
    cv2.imshow('GRAY', gr_img)
    cv2.waitKey(0)

    # blur the img
    blr_img = cv2.GaussianBlur(gr_img, (7, 7), 0)
    # show blur img
    print(blr_img.shape)
    cv2.imshow('BLUR', blr_img)
    cv2.waitKey(0)

    # canny the img for edge detection
    thr_low = 10
    thr_high = 200
    can_img = cv2.Canny(blr_img, thr_low, thr_high)
    # show canny img
    cv2.imshow('CANNY', can_img)
    cv2.waitKey(0)

    # region of interest
    vertices = np.array([[(0, 275), (200, 115), (275, 115), (315, 186),
                          (449, 324)]],
                        dtype=np.int32)
    mask = np.zeros_like(gr_img)
    cv2.fillPoly(mask, vertices, 255)
    masked_img = cv2.bitwise_and(gr_img, mask)
    masked_img = cv2.bitwise_and(can_img, mask)
    # show
    cv2.imshow("MASKED", masked_img)
    cv2.waitKey(0)

    # hough lines detection
    rho = 2
    theta = np.pi / 180
    thres = 40
    min_line_len = 100
    max_line_len = 50
    lines = cv2.HoughLinesP(masked_img, rho, theta, thres, np.array([]),
                            min_line_len, max_line_len)

    # create an empty black img
    line_image = np.zeros((masked_img.shape[0], masked_img.shape[1], 3),
                          dtype=np.uint8)

    cv2.imshow("LINES", line_image)
    cv2.waitKey(0)

    for line in lines:
        # print(line)
        for x1, y1, x2, y2 in line:
            cv2.line(line_image, (x1, y1), (x2, y2), [255, 0, 0], 20)

    cv2.imshow("LINES1", line_image)
    cv2.waitKey(0)

    # print(lines)

    # putting it together
    alpha = 1
    beta = 1
    gamma = 0

    print(can_img.shape)
    print(line_image.shape)

    image_with_lines = cv2.addWeighted(img, alpha, line_image, beta, gamma)

    return image_with_lines