def get_corners(painting_roi, draw=False):
    gray = cv2.cvtColor(painting_roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.bilateralFilter(gray, 9, 75, 75)
    erosion = cv2.erode(blur, np.ones((9, 9), np.uint8), iterations=2)
    dilation = cv2.dilate(erosion, np.ones((9, 9), np.uint8), iterations=2)
    _, thresh = cv2.threshold(blur, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    # edges = auto_canny(thresh)

    h, w = thresh.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)
    flood = thresh.copy()
    cv2.floodFill(flood, mask, (0, 0), 255)
    im_floodfill_inv = cv2.bitwise_not(flood)
    im_out = thresh | im_floodfill_inv

    contours, _ = cv2.findContours(im_out, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)

    # find the biggest countour (c) by the area
    c = max(contours, key=cv2.contourArea)

    x, y, w, h = cv2.boundingRect(c)
    bbox = x, y, w, h

    corners = cv2.goodFeaturesToTrack(im_out[y - 10:y + h + 10, x:x + w + 10],
                                      4,
                                      0.01,
                                      painting_roi.shape[0] / 3,
                                      useHarrisDetector=True)
    corners = np.int0(corners)
    corners = np.squeeze(corners)
    corners_img = painting_roi.copy()

    # draw the biggest contour (c) in green
    cv2.rectangle(corners_img, (x, y), (x + w + 10, y + h + 10), (0, 255, 0),
                  2)

    if draw:
        for i, corner in enumerate(corners):
            x_corner, y_corner = corner.ravel()
            cv2.circle(corners_img, (x_corner, y_corner), 3, 255, -1)
            cv2.putText(corners_img,
                        f'{i}', (x_corner + 3, y_corner + 3),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.75,
                        color=(255, 0, 0))
        cv2.imshow("corners", corners_img)

    # for corner in corners:
    #     corner[0] += x
    #     corner[1] += y

    return corners, bbox
Beispiel #2
0
def optical_flow(imgs, dst='./capture_folder'):
    for idx, file in enumerate(imgs):
        copyfile(file, dst + '/' + str(idx) + '.bmp')
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    cap = cv2.VideoCapture(dst + "/%01d.bmp")
    color = np.random.randint(0, 255, (100, 3))
    # Take first frame and find corners in it
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)

    while (1):
        ret, frame = cap.read()
        if frame is None:
            break
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **lk_params)
        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]
        # draw the tracks
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
            frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
        img = cv2.add(frame, mask)
        cv2.imshow('frame', img)
        #k = cv2.waitKey(30) & 0xff
        #if k == 27:
        #    break
        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1, 1, 2)
    cv2.destroyAllWindows()
    cap.release()
    return mask
Beispiel #3
0
def calc_features_by_lk(base_out, input_video_filename, base_output_filename):
    capture = cv2.VideoCapture(input_video_filename)
    while (True):
        ret1, m1 = capture.read()
        ret2, m2 = capture.read()
        if (not ret1 or not ret2):
            break

        frame1 = m1.copy()
        frame2 = m2.copy()

        frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        sc = frame1.mean()

        if (sc < 1.6):
            continue

        frame1_features = cv2.goodFeaturesToTrack(frame1, FEATURE_MAX_NUM,
                                                  0.01, 0.01)

        frame2_features, found_futures, found_err = cv2.calcOpticalFlowPyrLK(
            frame1,
            frame2,
            frame1_features,
            None,
            winSize=(5, 5),
            maxLevel=5,
            criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                      0.03))

        draw_path_on_baseimage(m1, frame1_features, frame2_features,
                               found_futures, found_err)
        draw_features_in_base_image(base_out, frame1_features, frame2_features,
                                    found_futures, found_err)

        cv2.imshow("output_window", m1)
        cv2.imshow("superposition_window", base_out)

    capture.release()
    pass
Beispiel #4
0
def apply_shi_tomasi_corners(image):
    """apply the shi_tomasi corner detection algorithm.

    :param img: the loaded image
    :type img: cv2 image
    :return: copy of the image with detected corners marked
    :rtype: cv2 image
    """
    # copy the image
    img = np.copy(image)

    # convert to gray before corner detection
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # employ the shi-tomasi corner algorithm to detect corners
    corners = cv2.goodFeaturesToTrack(gray, 25, 0.05, 20)

    # for each corner draw it on the original image!
    for i in corners:
        x, y = i.ravel()
        cv2.circle(img, (x, y), 5, [0, 0, 255], -1)

    return img
def main():

    lk_params = dict(winSize=(5, 5),
                     maxLevel=2,
                     criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT,
                               10, 0.03))

    feature_params = dict(maxCorners=200,
                          qualityLevel=0.03,
                          minDistance=30,
                          blockSize=7)

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        pygame.init()
        pygame.joystick.init()

        track_len = 10
        detect_interval = 5
        tracks = []
        frame_idx = 0
        VIDEO_SCALE = 0.35

        #drone.set_video_encoder_rate(2)
        container = av.open(drone.get_video_stream())

        js = pygame.joystick.Joystick(0)
        js.init()
        js_name = js.get_name()
        print('Joystick name: ' + js_name)

        while True:
            time.sleep(0.01)
            for frameRaw in container.decode(video=0):
                checkController()
                frame1 = cv.cvtColor(np.array(frameRaw.to_image()),
                                     cv.COLOR_RGB2BGR)
                frame = cv.resize(frame1, (0, 0),
                                  fx=VIDEO_SCALE,
                                  fy=VIDEO_SCALE)
                frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                vis = frame.copy()

                if len(tracks) > 0:
                    img0, img1 = prev_gray, frame_gray
                    p0 = np.float32([tr[-1]
                                     for tr in tracks]).reshape(-1, 1, 2)
                    p1, _st, _err = cv.calcOpticalFlowPyrLK(
                        img0, img1, p0, None, **lk_params)
                    p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                        img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 1
                    new_tracks = []

                    for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2),
                                                     good):
                        if not good_flag:
                            continue
                        tr.append((x, y))
                        if len(tr) > track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
                    tracks = new_tracks
                    cv.polylines(vis, [np.int32(tr) for tr in tracks], False,
                                 (0, 255, 0))
                    draw_str(vis, (20, 20), 'track count: %d' % len(tracks))

                if frame_idx % detect_interval == 0:
                    mask = np.zeros_like(frame_gray)
                    mask[:] = 255
                    for x, y in [np.int32(tr[-1]) for tr in tracks]:
                        cv.circle(mask, (x, y), 5, 0, -1)
                    p = cv.goodFeaturesToTrack(frame_gray,
                                               mask=mask,
                                               **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            tracks.append([(x, y)])

                frame_idx += 1
                prev_gray = frame_gray
                cv.imshow('Tello Dense Optical - Middlebury Research', vis)

                ch = cv.waitKey(1)
                if ch == 27:
                    break

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        drone.quit()
        cv.destroyAllWindows()
def OpenCV():

    tello.connect()

    tello.streamon()
    frame_read = tello.get_frame_read()
            
    frame_skip = 300                               #動画接続前
        
    while True:
        if 0 < frame_skip: #フレームスキップ処理
          frame_skip = frame_skip - 1
          continue
            
        start_time = time.time()                   # time.time UNIX time(0:0:0)からの経過時間

        img = frame_read.frame
          
        image_origin = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)     #RGB convert
        
        r = image_origin[:,:,2]
        g = image_origin[:,:,1]
        b = image_origin[:,:,0]

        R = np.array(r).flatten()
        G = np.array(g).flatten()
        B = np.array(b).flatten()

        R = [x for x in R if x > 15]
        G = [x for x in G if x > 15]
        B = [x for x in B if x > 15]

        V1 = np.std(R)
        V2 = np.std(G)
        V3 = np.std(B)

        mode = sstats.mode(R)[0]
        mode1 = sstats.mode(G)[0]
        mode2 = sstats.mode(B)[0]

        h, w, c = image_origin.shape
        for y in range(0, h, 2):
            for x in range(0, w ,2):      
                if  mode - 4.7*V1 < image_origin[y,x,2] < mode + 4.8*V1:
                    if  mode1 - 4.8*V2 < image_origin[y,x,1] < mode1 + 4.8*V2 and mode2 - 4.8*V3 < image_origin[y,x,0] < mode2 + 4.8*V3:
                        image_origin[y,x] = 0
                        image_origin[y,x+1] = 0
                        image_origin[y+1,x+1] = 0
                    else:
                        image_origin[y,x] = 255
                else:
                    image_origin[y,x+1] = 0
                    image_origin[y,x] = 0

        image_origin = cv2.blur(image_origin, (3, 3))

        A = np.uint8(image_origin[:,:,2])

        feature_params = {"maxCorners": 4,  "qualityLevel": 0.5,  "minDistance": 30, "blockSize": 5}#10 }  #特徴点検出
        #  特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外) 30
        p0 = cv2.goodFeaturesToTrack(A, mask=None, **feature_params)             
        p0 = np.int0(p0)
        
        # 特徴点をプロットして可視化
        if len(p0) == 4:
          for p in p0:               #p0 x,y座標
            x,y = p.ravel()                                             #p0の要素を分解 no youso wo bunkai
            cv2.circle(image_origin, (x, y), 5, (0, 255, 255) , -1)
                    
            x0 = p0[:,:,0].ravel()                                             #x zahyou 
            y0 = p0[:,:,1].ravel()                                             #y zahyou
            l1 = np.sqrt((x0[0])**2+(y0[0])**2)
            l2 = np.sqrt((x0[1])**2+(y0[1])**2)
            l3 = np.sqrt((x0[2])**2+(y0[2])**2)
            l4 = np.sqrt((x0[3])**2+(y0[3])**2)
                    
            l = [l1, l2, l3, l4]

            a = [0]*4
            b = [0]*4
            nn = [0, 1, 2, 3]
            for i in range(len(l)):
              if l[i] == min(l):
                a[0] = x0[i]
                b[0] = y0[i]
                s = i
            nn.remove(s)
            j=0
            for j in nn:
              n=nn.copy()
              A = (b[0]-y0[j])/(a[0]-x0[j])
              B = b[0] - A*a[0]
              n.remove(j)
              C = A*x0[n[0]] + B
              D = A*x0[n[1]] + B
              if C - y0[n[0]] > 0 and D - y0[n[1]] < 0:
                a[1] =  x0[n[0]]
                b[1] =  y0[n[0]]
                a[3] =  x0[n[1]]
                b[3] =  y0[n[1]]
                a[2] =  x0[j]
                b[2] =  y0[j]
                break
              elif C - y0[n[0]] < 0 and D - y0[n[1]] > 0:
                a[3] =  x0[n[0]]
                b[3] =  y0[n[0]]
                a[1] =  x0[n[1]]
                b[1] =  y0[n[1]]
                a[2] =  x0[j]
                b[2] =  y0[j]
                break

          d1 = np.sqrt((a[0]-a[1])**2+(b[0]-b[1])**2)
          d2 = np.sqrt((a[1]-a[2])**2+(b[1]-b[2])**2)
          d3 = np.sqrt((a[2]-a[3])**2+(b[2]-b[3])**2)
          d4 = np.sqrt((a[3]-a[0])**2+(b[3]-b[0])**2)
          line1 = cv2.line(image_origin,(a[0],b[0]),(a[1],b[1]),1000)
          line2 = cv2.line(image_origin,(a[1],b[1]),(a[2],b[2]),1000)
          line3 = cv2.line(image_origin,(a[2],b[2]),(a[3],b[3]),1000)
          line4 = cv2.line(image_origin,(a[3],b[3]),(a[0],b[0]),1000)
        
          #中点
          c1 = (a[0]+a[2]) / 2
          c2 = (b[0]+b[2]) / 2
          c11 = int(c1)
          c21 = int(c2)
          cv2.circle(image_origin, (c11, c21), 5, (0, 255, 255) , -1)

          filename = 'telloimage' + str(frame) + '.jpg'
          cv2.imwrite(filename,image_origin)
          
          #S = cv2.countNonZero(G1)
          S = abs((1/2)*((a[3]-a[0])*(b[1]-b[0])-(a[1]-a[0])*(b[3]-b[0])))+abs((1/2)*((a[1]-a[2])*(b[3]-b[2])-(a[3]-a[2])*(b[1]-b[2])))

          with open("S1 2021.3.10 8:19.txt", "a") as f:
            result = "{:.7f}\n".format(S)
            f.write(result)

          with open("d1 2021.3.10 8:19..txt", "a") as f:
            result = "{:.7f}\n".format(S)
            f.write(result)

          with open("d2 2021.3.10 8:19..txt", "a") as f:
            result = "{:.7f}\n".format(d2)
            f.write(result)


          print(S)
          print(d1)
          print(d2)


          cy = h / 2
          cx = w / 2

          data = [S,c1,c2,p0,cx,cy]
          return data

        if frame.time_base < 1.0/60:
          time_base = 1.0/60                 #機械のエラーを判別するための基準
        else:
          time_base = frame.time_base
          #フレームスキップ値を算出
          frame_skip = int((time.time() - start_time) / time_base)
def of_demo():
    pixels_cut = 50
    pixels_cut_left = 100

    cap = cv2.VideoCapture('rally.avi')
    # cap = cv2.VideoCapture('input.mp4')

    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()
    old_frame = old_frame[:-pixels_cut, pixels_cut_left:, :]
    out = cv2.VideoWriter('output2.avi', fourcc, 30.0,
                          (old_frame.shape[1], old_frame.shape[0]))

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)
    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        frame = frame[:-pixels_cut, pixels_cut_left:, :]
        if ret and frno < 70:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                p0 = cv2.goodFeaturesToTrack(old_gray,
                                             mask=None,
                                             **feature_params)
                restart = False
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
                                                   None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = p0[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # mask_of_mask = cv2.inRange(mask, (0, 0, 0), (3, 3, 3))/255
            # frame = frame*(np.expand_dims(mask_of_mask.astype(np.uint8),axis=2))
            img = cv2.add(frame, mask)

            mask = np.round(mask.astype(np.float) / 1.1).astype(np.uint8)

            cv2.imshow('frame', img)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)
            out.write(img)
        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
Beispiel #8
0
def start_tracking():
    # Initialize the video capture object
    cap = cv.VideoCapture(0)

    # Define the scaling factor for the frames
    scaling_factor = 0.5

    # Number of frames to track
    num_frames_to_track = 5

    # Skipping factor
    num_frames_jump = 2

    # Initialize variables
    tracking_paths = []
    frame_index = 0

    # Define tracking parameters
    tracking_params = dict(winSize=(11, 11),
                           maxLevel=2,
                           criteria=(cv.TERM_CRITERIA_EPS
                                     | cv.TERM_CRITERIA_COUNT, 10, 0.03))

    # Iterate until the user hits the 'Esc' key
    while True:
        # Capture the current frame
        _, frame = cap.read()

        # Resize the frame
        frame = cv.resize(frame,
                          None,
                          fx=scaling_factor,
                          fy=scaling_factor,
                          interpolation=cv.INTER_AREA)

        # Convert to grayscale
        frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

        # Create a copy of the frame
        output_img = frame.copy()

        if len(tracking_paths) > 0:
            # Get images
            prev_img, current_img = prev_gray, frame_gray

            # Organize the feature points
            feature_points_0 = np.float32([tp[-1] for tp in \
                    tracking_paths]).reshape(-1, 1, 2)

            # Compute optical flow
            feature_points_1, _, _ = cv.calcOpticalFlowPyrLK(
                prev_img, current_img, feature_points_0, None,
                **tracking_params)

            # Compute reverse optical flow
            feature_points_0_rev, _, _ = cv.calcOpticalFlowPyrLK(
                current_img, prev_img, feature_points_1, None,
                **tracking_params)

            # Compute the difference between forward and
            # reverse optical flow
            diff_feature_points = abs(feature_points_0 - \
                    feature_points_0_rev).reshape(-1, 2).max(-1)

            # Extract the good points
            good_points = diff_feature_points < 1

            # Initialize variable
            new_tracking_paths = []

            # Iterate through all the good feature points
            for tp, (x, y), good_points_flag in zip(
                    tracking_paths, feature_points_1.reshape(-1, 2),
                    good_points):
                # If the flag is not true, then continue
                if not good_points_flag:
                    continue

                # Append the X and Y coordinates and check if
                # its length greater than the threshold
                tp.append((x, y))
                if len(tp) > num_frames_to_track:
                    del tp[0]

                new_tracking_paths.append(tp)

                # Draw a circle around the feature points
                cv.circle(output_img, (x, y), 3, (0, 255, 0), -1)

            # Update the tracking paths
            tracking_paths = new_tracking_paths

            # Draw lines
            cv.polylines(output_img, [np.int32(tp) for tp in \
                    tracking_paths], False, (0, 150, 0))

        # Go into this 'if' condition after skipping the
        # right number of frames
        if not frame_index % num_frames_jump:
            # Create a mask and draw the circles
            mask = np.zeros_like(frame_gray)
            mask[:] = 255
            for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
                cv.circle(mask, (x, y), 6, 0, -1)

            # Compute good features to track
            feature_points = cv.goodFeaturesToTrack(frame_gray,
                                                    mask=mask,
                                                    maxCorners=500,
                                                    qualityLevel=0.3,
                                                    minDistance=7,
                                                    blockSize=7)

            # Check if feature points exist. If so, append them
            # to the tracking paths
            if feature_points is not None:
                for x, y in np.float32(feature_points).reshape(-1, 2):
                    tracking_paths.append([(x, y)])

        # Update variables
        frame_index += 1
        prev_gray = frame_gray

        # Display output
        cv.imshow('Optical Flow', output_img)

        # Check if the user hit the 'Esc' key
        c = cv.waitKey(1)
        if c == 27:
            break
# calcOpticalFlowPyrLK 중지 요건 설정
termcriteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
    img_draw = frame.copy()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # 최초 프레임 경우
    if prevImg is None:
        prevImg = gray
        # 추적선 그릴 이미지를 프레임 크기에 맞게 생성
        lines = np.zeros_like(frame)
        # 추적 시작을 위한 코너 검출  ---①
        prevPt = cv2.goodFeaturesToTrack(prevImg, 200, 0.01, 10)
    else:
        nextImg = gray
        # 옵티컬 플로우로 다음 프레임의 코너점  찾기 ---②
        nextPt, status, err = cv2.calcOpticalFlowPyrLK(prevImg, nextImg,
                                                       prevPt, None, criteria=termcriteria)
        # 대응점이 있는 코너, 움직인 코너 선별 ---③
        prevMv = prevPt[status == 1]
        nextMv = nextPt[status == 1]
        for i, (p, n) in enumerate(zip(prevMv, nextMv)):
            px, py = p.ravel()
            nx, ny = n.ravel()
            # 이전 코너와 새로운 코너에 선그리기 ---④
            cv2.line(lines, (px, py), (nx, ny), color[i].tolist(), 2)
            # 새로운 코너에 점 그리기
            cv2.circle(img_draw, (nx, ny), 2, color[i].tolist(), -1)
Beispiel #10
0
plt.show()

# 가능성이 높은 모서리를 출력합니다.
plt.imshow(detector_responses, cmap='gray'), plt.axis("off")
plt.show()

# 모서리 감지
image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)

# 감지할 모서리 개수
corners_to_detect = 10
minimum_quality_score = 0.05
minimum_distance = 25

corners = cv2.goodFeaturesToTrack(image_gray, corners_to_detect,
                                  minimum_quality_score,
                                  minimum_distance)  # 모서리를 감지
corners = np.float32(corners)

for corner in corners:
    x, y = corner[0]
    cv2.circle(image_bgr, (x, y), 10, (255, 255, 255), -1)  # 모서리마다 흰 원을 그립니다.

image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)  # 흑백 이미지로 변환
plt.imshow(image_rgb, cmap='gray'), plt.axis("off")  # 이미지를 출력
plt.show()

## 머신러닝 특성 생성
# 이미지를 머신러닝에 필요한 샘플로 변환하기 위해 Numpy의 flatten(이미지 데이터가 담긴 다차원 배열을 샘플 값이 담긴 벡터로 변환) 사용
# 이미지가 흑백일 때 각 픽셀은 하나의 값으로 표현
# 컬럼 이미지라면 각 픽셀이 하나의 값이 아닌 여러 개의 값으로 표현
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 5 )

# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

# Create some random colors
color = np.random.randint(0,255,(100,3))

# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)

while(1):
    ret,frame = cap.read()
    if ret == False:
        break
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    # Parameters for calculating optical flow.
    # See https://docs.opencv.org/3.0-beta/modules/video/doc/motion_analysis_and_object_tracking.html
    lk_params = {  # TODO: Tweak these values for performance and accuracy
        'winSize': (64, 64),
        'maxLevel': 2,
        'criteria': (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
    }

    # Pre-allocate two frames of the image, so I don't have to allocate an entire new array for every frame
    new_frame = np.zeros(CAMERA_RESOLUTION, dtype=np.uint8)
    old_frame = np.zeros_like(new_frame)

    camera.copy_last_frame(new_frame)
    new_points = cv2.goodFeaturesToTrack(new_frame.transpose(),
                                         mask=None,
                                         **feature_params)

    # Represents the total motion in the frame since the program started running.
    # Only useful for debugging, since in real use, we'll use the relative motion over the past few frames
    total_diff = np.zeros([2])

    total_time = 0
    num_iterations = 0

    while True:
        event.wait()
        event.clear()
        num_iterations += 1

        # Used only for timing, to see how processor intensive this stuff is.
Beispiel #13
0
def getVideo():
    global tracks
    global track_len
    global detect_interval
    global frame_idx
    global VIDEO_SCALE
    global videoLabel
    global typeOfVideo
    global connectingToDrone
    global takePicture
    frameCount = 0  # Stores the current frame being processed
    frame1Optical = None  # Store variables for first frame
    frame2Optical = None  # Store variables for second frame
    prvs = None
    hsv = None

    try:
        while connectingToDrone:
            #time.sleep(0.03)
            for frameRaw in container.decode(video=0):
                checkController()
                if takePicture:
                    frame1 = np.array(frameRaw.to_image())
                    #im = Image.fromarray(frame1, 'RGB')
                    cv.imwrite(
                        "pics/" + datetime.datetime.now().isoformat() + ".jpg",
                        frame1)
                    #imageTk = ImageTk.PhotoImage(image=im)
                    #videoLabel.configure(image=imageTk)
                    #videoLabel.image = imageTk
                    #videoLabel.update()
                    takePicture = False
                if typeOfVideo.get() == "Canny Edge Detection":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    frameCanny = cv.Canny(frame1, 50, 100)
                    im = Image.fromarray(frameCanny)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "LK Optical Flow":
                    frame1 = np.array(frameRaw.to_image())
                    frame = frame1
                    frame = cv.resize(frame1, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE)
                    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                    vis = frame.copy()
                    if len(tracks) > 0:
                        img0, img1 = prev_gray, frame_gray
                        p0 = np.float32([tr[-1]
                                         for tr in tracks]).reshape(-1, 1, 2)
                        p1, _st, _err = cv.calcOpticalFlowPyrLK(
                            img0, img1, p0, None, **lk_params)
                        p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                            img1, img0, p1, None, **lk_params)
                        d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                        good = d < 1
                        new_tracks = []

                        for tr, (x,
                                 y), good_flag in zip(tracks,
                                                      p1.reshape(-1, 2), good):
                            if not good_flag:
                                continue
                            tr.append((x, y))
                            if len(tr) > track_len:
                                del tr[0]
                            new_tracks.append(tr)
                            cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
                        tracks = new_tracks
                        cv.polylines(vis, [np.int32(tr) for tr in tracks],
                                     False, (0, 255, 0))
                        draw_str(vis, (20, 20),
                                 'track count: %d' % len(tracks))

                    if frame_idx % detect_interval == 0:
                        mask = np.zeros_like(frame_gray)
                        mask[:] = 255
                        for x, y in [np.int32(tr[-1]) for tr in tracks]:
                            cv.circle(mask, (x, y), 5, 0, -1)
                        p = cv.goodFeaturesToTrack(frame_gray,
                                                   mask=mask,
                                                   **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1, 2):
                                tracks.append([(x, y)])

                    frame_idx += 1
                    prev_gray = frame_gray
                    #cv.imshow('Tello Dense Optical - Middlebury Research', vis)
                    im = Image.fromarray(vis, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Optical Flow":
                    frameCount += 1
                    if frameCount == 1:  # If first frame
                        frame1Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        prvs = cv.cvtColor(frame1Optical, cv.COLOR_BGR2GRAY)
                        hsv = np.zeros_like(frame1Optical)
                        hsv[..., 1] = 255
                    else:  # If not first frame
                        frame2Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        next = cv.cvtColor(frame2Optical, cv.COLOR_BGR2GRAY)
                        flow = cv.calcOpticalFlowFarneback(
                            prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
                        mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
                        hsv[..., 0] = ang * 180 / np.pi / 2
                        hsv[..., 2] = cv.normalize(mag, None, 0, 255,
                                                   cv.NORM_MINMAX)
                        bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
                        im = Image.fromarray(
                            cv.resize(frame2Optical, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE))
                        imageTk = ImageTk.PhotoImage(image=im)
                        videoLabel.configure(image=imageTk)
                        videoLabel.image = imageTk
                        videoLabel.update()
                        k = cv.waitKey(30) & 0xff
                        if k == 27:
                            break
                        prvs = next
                elif typeOfVideo.get() == "Grayscale":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "BGR":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Black & White":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    # Pixel range is 0...255, 256/2 = 128
                    bw[bw < 128] = 0  # Black
                    bw[bw >= 128] = 255  # White
                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                else:  # typeOfVideo.get() == "Normal":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()

            ch = cv.waitKey(1)
            if ch == 27:
                break
    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                # 動画の受信開始を処理
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        frame_skip = 300
        while True:
            for frame in container.decode(video=0):
                if 0 < frame_skip:  #フレームスキップ処理
                    frame_skip = frame_skip - 1
                    continue
                start_time = time.time()

                image_origin = cv2.cvtColor(np.array(frame.to_image()),
                                            cv2.COLOR_RGB2HSV_FULL)

                # 150フレームから210フレームまで5フレームごとに切り出す
                #start_frame = 150
                #end_frame = 9000
                #interval_frames = 5
                #i = start_frame + interval_frames

                # 最初のフレームに移動して取得
                #container.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
                #ret, prev_frame = container.read()

                img_mask = cv2.GaussianBlur(image_origin, (15, 15),
                                            0)  #フィルタの中の引数
                canny = cv2.Canny(img_mask, 100, 150)

                #色の抽出

                def red_detect(canny):

                    HSVLower1 = np.array([0, 50, 50])  # 抽出する色の下限(BGR)
                    HSVUpper1 = np.array([20, 255, 255])  # 抽出する色の上限(BGR)
                    mask1 = cv2.inRange(image_origin, HSVLower1, HSVUpper1)

                    #HSVLower2 = np.array([202, 185, 115])
                    #HSVUpper2 = np.array([255, 255, 148])
                    #mask2 = cv2.inRange(image_origin,HSVLower2, HSVUpper2)

                    return mask1

                mask = red_detect(canny)

                feature_params = {
                    "maxCorners": 200,
                    "qualityLevel": 0.2,
                    "minDistance": 12,
                    "blockSize": 12
                }
                #  特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外)
                p0 = cv2.goodFeaturesToTrack(mask, mask=None, **feature_params)

                # 特徴点をプロットして可視化
                for p in p0:
                    x, y = p.ravel()
                    cv2.circle(image_origin, (x, y), 5, (0, 255, 255), -1)

                cv2.imshow('mask', mask)
                cv2.imshow('image_origin', image_origin)
                cv2.waitKey(1)

                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                # フレームスキップ値を算出
                frame_skip = int((time.time() - start_time) / time_base)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
def OpenCV():

    retry = 3
    container = None
    while container is None and 0 < retry:
        retry -= 1
        try:
            # 動画の受信開始を処理
            container = av.open(drone.get_video_stream()
                                )  # container = tello video 映像の圧縮データを展開
        except av.AVError as ave:
            print(ave)
            print('retry...')

    frame_skip = 300  #動画接続前

    while True:
        for frame in container.decode(video=0):  # .decode byte(映像の中身) -> 文字列
            if 0 < frame_skip:  #フレームスキップ処理
                frame_skip = frame_skip - 1
                continue

            start_time = time.time()

            image_origin = cv2.cvtColor(np.array(frame.to_image()),
                                        cv2.COLOR_RGB2BGR)  #RGB convert

            h, w, c = image_origin.shape

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            r = image[:, :, 2]
            g = image[:, :, 1]
            b = image[:, :, 0]

            R = np.array(r).flatten()
            G = np.array(g).flatten()
            B = np.array(b).flatten()

            #R = [x for x in R if x > 15]
            #G = [x for x in G if x > 15]
            #B = [x for x in B if x > 15]

            V1 = np.std(R)
            V2 = np.std(G)
            V3 = np.std(B)

            mode = sstats.mode(R)[0]
            mode1 = sstats.mode(G)[0]
            mode2 = sstats.mode(B)[0]

            threshold_img = image.copy()

            threshold_img[r < mode1 - 3.7 * V1] = 0

            threshold_img[r >= mode1 - 3.7 * V1] = 255

            threshold_img[r > mode1 + 3.7 * V1] = 0

            threshold_img[g > mode2 - 3.7 * V2] = 0

            #feature_params = {"maxCorners": 4,  "qualityLevel": 0.3,  "minDistance": 30,  "blockSize": 12}

            #feature_params = {"maxCorners": 8,  "qualityLevel": 0.3,  "minDistance": 10,  "blockSize": 12}

            feature_params = {
                "maxCorners": 12,
                "qualityLevel": 0.3,
                "minDistance": 5,
                "blockSize": 9
            }

            #feature_params = {"maxCorners": 4,  "qualityLevel": 0.3,  "minDistance": 5,  "blockSize": 9}
            #特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外)

            image2 = cv2.blur(image, (3, 3))
            A = np.uint8(image[:, :, 2])

            image2 = cv2.blur(threshold_img, (3, 3))
            A = np.uint8(image2[:, :, 2])

            p0 = cv2.goodFeaturesToTrack(A, mask=None, **feature_params)

            for p in p0:  #p0 x,y zahyou 3image_origin
                x, y = p.ravel()  #p0 no youso wo bunkai
                cv2.circle(image, (x, y), 5, (0, 255, 255), -1)
            """
        cv2.imshow("image", image)
        cv2.imshow("image1", image1)
        cv2.imshow("image2", image2)
        cv2.imshow("image_thresh", threshold_img)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
        """

            x0 = p0[:, :, 0].ravel()  #x座標
            y0 = p0[:, :, 1].ravel()  #y座標

            MX = max(x0)
            mx = min(x0)

            MY = max(y0)
            my = min(y0)

            avex = (MX + mx) / 2
            avey = (MY + my) / 2
            """
        print(x0)
        print(y0)

        print(MX)
        print(mx)

        print(MY)
        print(my)

        print(avex)
        print(avey)

        print(type(avex))
        print(type(x0[0]))
        """

            MX = int(MX)
            mx = int(mx)

            MY = int(MY)
            my = int(my)

            avex = int(avex)
            avey = int(avey)

            a = [0] * 12
            b = [0] * 12

            x1 = []
            y1 = []
            for i in range(len(x0)):
                if y0[i] < avey:
                    x1.append(x0[i])
                    y1.append(y0[i])

            l11 = np.sqrt((x1[0])**2 + (y1[0])**2)
            l21 = np.sqrt((x1[1])**2 + (y1[1])**2)
            l31 = np.sqrt((x1[2])**2 + (y1[2])**2)
            l41 = np.sqrt((x1[3])**2 + (y1[3])**2)

            l1 = [l11, l21, l31, l41]

            print(l1)

            c = [0, 1, 2, 3]

            for i in range(len(l1)):
                if l1[i] == min(l1):
                    a[0] = x1[i]
                    b[0] = y1[i]
                    s = i
            c.remove(s)
            j = 0
            for j in c:
                n = c.copy()
                A = (b[0] - y1[j]) / (a[0] - x1[j])
                B = b[0] - A * a[0]
                n.remove(j)
                C = A * x1[n[0]] + B

                D = A * x1[n[1]] + B
                if C - y1[n[0]] > 0 and D - y1[n[1]] < 0:
                    a[1] = x1[n[0]]
                    b[1] = y1[n[0]]
                    a[3] = x1[n[1]]
                    b[3] = y1[n[1]]
                    a[2] = x1[j]
                    b[2] = y1[j]
                    break
                elif C - y1[n[0]] < 0 and D - y1[n[1]] > 0:
                    a[3] = x1[n[0]]
                    b[3] = y1[n[0]]
                    a[1] = x1[n[1]]
                    b[1] = y1[n[1]]
                    a[2] = x1[j]
                    b[2] = y1[j]
                    break

            d1 = np.sqrt((a[0] - a[1])**2 + (b[0] - b[1])**2)
            d2 = np.sqrt((a[1] - a[2])**2 + (b[1] - b[2])**2)
            d3 = np.sqrt((a[2] - a[3])**2 + (b[2] - b[3])**2)
            d4 = np.sqrt((a[3] - a[0])**2 + (b[3] - b[0])**2)

            line1 = cv2.line(image, (a[0], b[0]), (a[1], b[1]), 100)
            line2 = cv2.line(image, (a[1], b[1]), (a[2], b[2]), 100)
            line3 = cv2.line(image, (a[2], b[2]), (a[3], b[3]), 100)
            line4 = cv2.line(image, (a[3], b[3]), (a[0], b[0]), 100)

            x2 = []
            y2 = []
            for i in range(len(x0)):
                if y0[i] > avey and x0[i] < avex:
                    x2.append(x0[i])
                    y2.append(y0[i])

            l12 = np.sqrt((x2[0])**2 + (y2[0])**2)
            l22 = np.sqrt((x2[1])**2 + (y2[1])**2)
            l32 = np.sqrt((x2[2])**2 + (y2[2])**2)
            l42 = np.sqrt((x2[3])**2 + (y2[3])**2)

            l2 = [l12, l22, l32, l42]

            print(l2)

            d = [0, 1, 2, 3]

            for i in range(len(l2)):
                if l2[i] == min(l2):
                    a[4] = x2[i]
                    b[4] = y2[i]
                    s = i
            d.remove(s)
            k = 0
            for k in d:
                n = d.copy()
                A = (b[4] - y2[k]) / (a[4] - x2[k])
                B = b[4] - A * a[4]
                n.remove(k)
                C = A * x2[n[0]] + B

                D = A * x2[n[1]] + B
                if C - y2[n[0]] > 0 and D - y2[n[1]] < 0:
                    a[5] = x2[n[0]]
                    b[5] = y2[n[0]]
                    a[7] = x2[n[1]]
                    b[7] = y2[n[1]]
                    a[6] = x2[k]
                    b[6] = y2[k]
                    break
                elif C - y2[n[0]] < 0 and D - y2[n[1]] > 0:
                    a[5] = x2[n[0]]
                    b[5] = y2[n[0]]
                    a[6] = x1[n[1]]
                    b[6] = y1[n[1]]
                    a[7] = x1[k]
                    b[7] = y1[k]
                    break

            d4 = np.sqrt((a[4] - a[5])**2 + (b[4] - b[5])**2)
            d5 = np.sqrt((a[5] - a[6])**2 + (b[5] - b[6])**2)
            d6 = np.sqrt((a[6] - a[7])**2 + (b[6] - b[7])**2)
            d7 = np.sqrt((a[7] - a[4])**2 + (b[7] - b[4])**2)

            line4 = cv2.line(image, (a[4], b[4]), (a[5], b[5]), 100)
            line5 = cv2.line(image, (a[5], b[5]), (a[6], b[6]), 100)
            line6 = cv2.line(image, (a[6], b[6]), (a[7], b[7]), 100)
            line7 = cv2.line(image, (a[7], b[7]), (a[4], b[4]), 100)

            x3 = []
            y3 = []
            for i in range(len(x0)):
                if y0[i] > avey and x0[i] > avex:
                    x3.append(x0[i])
                    y3.append(y0[i])

            l13 = np.sqrt((x3[0])**2 + (y3[0])**2)
            l23 = np.sqrt((x3[1])**2 + (y3[1])**2)
            l33 = np.sqrt((x3[2])**2 + (y3[2])**2)
            l43 = np.sqrt((x3[3])**2 + (y3[3])**2)

            l3 = [l13, l23, l33, l43]

            print(l3)

            e = [0, 1, 2, 3]

            for i in range(len(l3)):
                if l3[i] == min(l3):
                    a[8] = x3[i]
                    b[8] = y3[i]
                    s = i
            e.remove(s)
            z = 0
            for z in e:
                n = e.copy()
                A = (b[8] - y3[z]) / (a[8] - x3[z])
                B = b[8] - A * a[8]
                n.remove(z)
                C = A * x3[n[0]] + B

                D = A * x3[n[1]] + B

                #if C - y3[n[0]] > 0 and D - y3[n[1]] < 0:
                if C - y3[n[0]] < 0 and D - y3[n[1]] > 0:
                    a[9] = x3[n[0]]
                    b[9] = y3[n[0]]
                    a[11] = x3[n[1]]
                    b[11] = y3[n[1]]
                    a[10] = x3[z]
                    b[10] = y3[z]
                    break

                #elif C - y3[n[0]] < 0 and D - y3[n[1]] > 0:
                elif C - y3[n[0]] > 0 and D - y3[n[1]] < 0:
                    a[9] = x3[n[0]]
                    b[9] = y3[n[0]]
                    a[10] = x3[n[1]]
                    b[10] = y3[n[1]]
                    a[11] = x3[z]
                    b[11] = y3[z]
                    break

            d8 = np.sqrt((a[8] - a[9])**2 + (b[8] - b[9])**2)
            d9 = np.sqrt((a[9] - a[10])**2 + (b[9] - b[10])**2)
            d10 = np.sqrt((a[10] - a[11])**2 + (b[10] - b[11])**2)
            d11 = np.sqrt((a[11] - a[8])**2 + (b[11] - b[8])**2)

            line8 = cv2.line(image, (a[8], b[8]), (a[9], b[9]), 100)
            line9 = cv2.line(image, (a[9], b[9]), (a[10], b[10]), 100)
            line10 = cv2.line(image, (a[10], b[10]), (a[11], b[11]), 100)
            line11 = cv2.line(image, (a[11], b[11]), (a[8], b[8]), 100)
            """
        print(avex)
        print(avey)

        print(x0)
        print(y0)
        print('x1[0]:%d' % x1[0])
        print('y1[0]:%d' % y1[0])
        print('x1[1]:%d' % x1[1])
        print('y1[1]:%d' % y1[1])
        print('x1[2]:%d' % x1[2])
        print('y1[2]:%d' % y1[2])
        print('x1[3]:%d' % x1[3])
        print('y1[3]:%d' % y1[3])

        print('a[0]:%d' % a[0])
        print('b[0]:%d' % b[0])
        print('a[1]:%d' % a[1])
        print('b[1]:%d' % b[1])
        print('a[2]:%d' % a[2])
        print('b[2]:%d' % b[2])
        print('a[3]:%d' % a[3])
        print('b[3]:%d' % b[3])

        print('a[4]:%d' % a[4])
        print('b[4]:%d' % b[4])
        print('a[5]:%d' % a[5])
        print('b[5]:%d' % b[5])
        print('a[6]:%d' % a[6])
        print('b[6]:%d' % b[6])
        print('a[7]:%d' % a[7])
        print('b[7]:%d' % b[7])


        print('a[8]:%d' % a[8])
        print('b[8]:%d' % b[8])
        print('a[9]:%d' % a[9])
        print('b[9]:%d' % b[9])
        print('a[10]:%d' % a[10])
        print('b[10]:%d' % b[10])
        print('a[11]:%d' % a[11])
        print('b[11]:%d' % b[11])

        cv2.imshow("image", image)
        cv2.imshow("thresh", threshold_img)

        cv2.waitKey(0)
        cv2.destroyAllWindows()
        """

            c1 = (a[0] + a[2]) / 2
            c2 = (b[0] + b[2]) / 2
            c3 = (a[4] + a[6]) / 2
            c4 = (b[4] + b[6]) / 2
            c5 = (a[8] + a[10]) / 2
            c6 = (b[8] + b[10]) / 2

            print(x0)
            print(y0)

            c11 = int(c1)
            c12 = int(c2)
            c13 = int(c3)
            c14 = int(c4)
            c15 = int(c5)
            c16 = int(c6)

            #line1 = cv2.line(image,(c11,c12),(c13,c14),100)
            line = cv2.line(image, (c13, c14), (c15, c16), 100)
            #line3 = cv2.line(image,(c15,c16),(c11,c12),100)

            D = np.sqrt((c3 - c5)**2 + (c4 - c6)**2)

            Tx = (c3 + c5) / 2
            Ty = (c4 + c6) / 2

            Da = (c4 - c6) / (c3 - c5)
            DDa = -1 / Da

            Dx = (c6 - Da * c4 + DDa * c1 - c2) / (DDa - Da)
            Dy = Da * Dx + c6 - Da * c4

            #lineT = cv2.line(image,(int(Tx),int(Ty)),(c11,c12),100)

            lineDT = cv2.line(image, (int(Dx), int(Dy)), (c11, c12), 100)

            T = np.sqrt((Tx - c1)**2 + (Ty - c2)**2)
            DD = np.sqrt((Dx - c1)**2 + (Dy - c2)**2)

            X = (T * 0.6) / D

            XD = (DD * 0.6) / D

            print(DD)
            print(XD)

            print(T)
            print(D)
            print(X)
            """
        l1 = np.sqrt((c11-c13)**2 + (c12-c14)**2)
        l2 = np.sqrt((c13-c14)**2 + (c15-c16)**2)
        l3 = np.sqrt((c15-c16)**2 + (c11-c13)**2)

        s = (l1 + l2 + l3) / 2
        Sh = np.sqrt(s*(s-l1)*(s-l2)*(s-l3))

        sin1 = Sh / (l1*l3) 
        theta1 = math.degrees(math.asin(sin1))

        sin2 = Sh / (l2*l3)
        theta2 = math.degrees(math.asin(sin2))

        sin3 = Sh / (l2*l1)
        theta3 = math.degrees(math.asin(sin3))

        print(Sh)
        print(l1)
        print(l2)
        print(l3)
        print(theta1)
        print(theta2)
        print(theta3)
        """

            S = abs((1 / 2) * ((a[3] - a[0]) * (b[1] - b[0]) - (a[1] - a[0]) *
                               (b[3] - b[0]))) + abs(
                                   (1 / 2) * ((a[1] - a[2]) * (b[3] - b[2]) -
                                              (a[3] - a[2]) * (b[1] - b[2])))

            filename = 'telloimage' + str(frame) + '.jpg'
            cv2.imwrite(filename, image_origin)

            with open("S1 2021.3.10 8:19.txt", "a") as f:
                result = "{:.7f}\n".format(S)
                f.write(result)

            with open("d1 2021.3.10 8:19..txt", "a") as f:
                result = "{:.7f}\n".format(S)
                f.write(result)

            with open("d2 2021.3.10 8:19..txt", "a") as f:
                result = "{:.7f}\n".format(d2)
                f.write(result)

            print(S)
            print(d1)
            print(d2)

            cy = h / 2
            cx = w / 2

            data = [S, c1, c2, p0, cx, cy]
            return data

            if frame.time_base < 1.0 / 60:
                time_base = 1.0 / 60  #機械のエラーを判別するための基準
            else:
                time_base = frame.time_base
                #フレームスキップ値を算出
                frame_skip = int((time.time() - start_time) / time_base)
Beispiel #16
0
from cv2 import cv2
import numpy as np

img = cv2.imread("./sources/contour.png")
img2 = cv2.imread("./sources/text.png")

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
gray1 = np.float32(gray1)

corners = cv2.goodFeaturesToTrack(gray, 50, 0.01, 10)
corners1 = cv2.goodFeaturesToTrack(gray1, 50, 0.01, 10)
#işlem yapılacak resim,bulunacak köşe sayısı,kalite(girilen değer deneysel)
#noktalar arası mesafe

corners = np.int0(corners)
corners1 = np.int0(corners1)

for corner in corners:
    x, y = corner.ravel()
    cv2.circle(img, (x, y), 3, (0, 0, 255), -1)
for corner in corners1:
    x, y = corner.ravel()
    cv2.circle(img2, (x, y), 3, (0, 0, 255), -1)

cv2.imshow("img", img)
cv2.imshow("img2", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #17
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        cap = drone.get_video_stream()

        # params for ShiTomasi corner detection
        feature_params = dict(maxCorners=100,
                              qualityLevel=0.3,
                              minDistance=7,
                              blockSize=7)

        # Parameters for lucas kanade optical flow
        lk_params = dict(winSize=(15, 15),
                         maxLevel=2,
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        # Create some random colors
        color = np.random.randint(0, 255, (100, 3))

        while True:
            for frame in container.decode(video=0):
                old_frame = cv2.cvtColor(np.array(frame.to_image()),
                                         cv2.COLOR_RGB2BGR)
                old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
                p0 = cv2.goodFeaturesToTrack(old_gray,
                                             mask=None,
                                             **feature_params)
                mask = np.zeros_like(old_frame)

                frame = cv2.cvtColor(np.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                # calculate optical flow
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                       p0, None, **lk_params)
                # Select good points
                good_new = p1[st == 1]
                good_old = p0[st == 1]
                # draw the tracks
                for i, (new, old) in enumerate(zip(good_new, good_old)):
                    a, b = new.ravel()
                    c, d = old.ravel()
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
                img = cv2.add(frame, mask)
                cv2.imshow('frame', img)
                k = cv2.waitKey(30) & 0xff
                if k == 27:
                    break
                # Now update the previous frame and previous points
                old_gray = frame_gray.copy()
                p0 = good_new.reshape(-1, 1, 2)

                #image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', img)
                print("ImgShow")
                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                cv2.waitKey(1)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
# OpenCV has a function, cv2.goodFeaturesToTrack(). It finds N strongest corners in the image by Shi-Tomasi method (or Harris Corner Detection, if you specify it). As usual, image should be a grayscale image. Then you specify number of corners you want to find. Then you specify the quality level, which is a value between 0-1, which denotes the minimum quality of corner below which everyone is rejected. Then we provide the minimum euclidean distance between corners detected.

# With all these informations, the function finds corners in the image. All corners below quality level are rejected. Then it sorts the remaining corners based on quality in the descending order. Then function takes first strongest corner, throws away all the nearby corners in the range of minimum distance and returns N strongest corners.

# In below example, we will try to find 25 best corners:

import numpy as np
from cv2 import cv2
from matplotlib import pyplot as plt

img = cv2.imread('resource/block_test.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
corners = np.uint8(corners)

for i in corners:
    x, y = i.ravel()
    cv2.circle(img, (x, y), 3, 255, -1)

cv2.imshow('shi tamasi', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #19
0
def OpenCV():

    retry = 3
    container = None
    while container is None and 0 < retry:
        retry -= 1
        try:
            # 動画の受信開始を処理
            container = av.open(drone.get_video_stream(
            ))  # container = tello video   eizou no assyuku de-ta wo tenkai
        except av.AVError as ave:
            print(ave)
            print('retry...')

    frame_skip = 300  #douga setuzokumaeno

    while True:
        for frame in container.decode(
                video=0):  # .decode byte(eizou no nakami) -> moziretu
            if 0 < frame_skip:  #フレームスキップ処理
                frame_skip = frame_skip - 1
                continue

            start_time = time.time(
            )  # time.time UNIX time(0:0:0) karano keika zikan

            image_origin = cv2.cvtColor(np.array(frame.to_image()),
                                        cv2.COLOR_RGB2BGR)  #RGB convert
            gray = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2GRAY)
            #th, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)

            r, g, b = image_origin[:, :,
                                   0], image_origin[:, :,
                                                    1], image_origin[:, :, 2]

            h, w, c = image_origin.shape

            G = g / gray

            #shape = image_origin.shape
            for y in range(0, h):
                for x in range(0, w):
                    if G[y, x] < 0.85:
                        G[y, x] = 255
                    else:
                        G[y, x] = 0

            G1 = np.uint8(G)

            feature_params = {
                "maxCorners": 4,
                "qualityLevel": 0.5,
                "minDistance": 30,
                "blockSize": 5
            }  #10 }  # tokutyoute kensyutu
            #  特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外) 30
            p0 = cv2.goodFeaturesToTrack(G1, mask=None, **feature_params)
            p0 = np.int0(p0)

            # 特徴点をプロットして可視化
            if len(p0) == 4:
                for p in p0:  #p0 x,y zahyou 3image_origin
                    x, y = p.ravel()  #p0 no youso wo bunkai
                    cv2.circle(image_origin, (x, y), 5, (0, 255, 255), -1)

                    x0 = p0[:, :, 0].ravel()  #x zahyou
                    y0 = p0[:, :, 1].ravel()  #y zahyou
                    l1 = np.sqrt((x0[0])**2 + (y0[0])**2)
                    l2 = np.sqrt((x0[1])**2 + (y0[1])**2)
                    l3 = np.sqrt((x0[2])**2 + (y0[2])**2)
                    l4 = np.sqrt((x0[3])**2 + (y0[3])**2)

                    l = [l1, l2, l3, l4]

                    a = [0] * 4
                    b = [0] * 4
                    nn = [0, 1, 2, 3]
                    for i in range(len(l)):
                        if l[i] == min(l):
                            a[0] = x0[i]
                            b[0] = y0[i]
                            s = i
                    nn.remove(s)
                    j = 0
                    for j in nn:
                        n = nn.copy()
                        A = (b[0] - y0[j]) / (a[0] - x0[j])
                        B = b[0] - A * a[0]
                        n.remove(j)
                        C = A * x0[n[0]] + B
                        D = A * x0[n[1]] + B
                        if C - y0[n[0]] > 0 and D - y0[n[1]] < 0:
                            a[1] = x0[n[0]]
                            b[1] = y0[n[0]]
                            a[3] = x0[n[1]]
                            b[3] = y0[n[1]]
                            a[2] = x0[j]
                            b[2] = y0[j]
                            break
                        elif C - y0[n[0]] < 0 and D - y0[n[1]] > 0:
                            a[3] = x0[n[0]]
                            b[3] = y0[n[0]]
                            a[1] = x0[n[1]]
                            b[1] = y0[n[1]]
                            a[2] = x0[j]
                            b[2] = y0[j]
                            break

                d1 = np.sqrt((a[0] - a[1])**2 + (b[0] - b[1])**2)
                d2 = np.sqrt((a[1] - a[2])**2 + (b[1] - b[2])**2)
                d3 = np.sqrt((a[2] - a[3])**2 + (b[2] - b[3])**2)
                d4 = np.sqrt((a[3] - a[0])**2 + (b[3] - b[0])**2)
                line1 = cv2.line(image_origin, (a[0], b[0]), (a[1], b[1]),
                                 1000)
                line2 = cv2.line(image_origin, (a[1], b[1]), (a[2], b[2]),
                                 1000)
                line3 = cv2.line(image_origin, (a[2], b[2]), (a[3], b[3]),
                                 1000)
                line4 = cv2.line(image_origin, (a[3], b[3]), (a[0], b[0]),
                                 1000)

                #tyuuten
                c1 = (a[0] + a[2]) / 2
                c2 = (b[0] + b[2]) / 2
                c11 = int(c1)
                c21 = int(c2)
                cv2.circle(image_origin, (c11, c21), 5, (0, 255, 255), -1)

                filename = 'telloimage' + str(frame) + '.jpg'
                cv2.imwrite(filename, image_origin)

                #S = cv2.countNonZero(G1)

                #s1 = (d1 + d4 + d5) / 2
                #Sh1 = np.sqrt(s1*(s1-d1)*(s1-d4)*(s1-d5))

                #s2 = (d2 + d3 + d5) / 2
                #Sh2 = np.sqrt(s2*(s2-d2)*(s2-d3)*(s2-d5))

                #S = Sh1 + Sh2

                S = abs(
                    (1 / 2) * ((a[3] - a[0]) * (b[1] - b[0]) - (a[1] - a[0]) *
                               (b[3] - b[0]))) + abs(
                                   (1 / 2) * ((a[1] - a[2]) * (b[3] - b[2]) -
                                              (a[3] - a[2]) * (b[1] - b[2])))

                with open("S.txt", "a") as f:
                    result = "{:.7f}\n".format(S)
                    f.write(result)

                cy = h / 2
                cx = w / 2

                data = [S, c1, c2, p0, cx, cy]
                return data

            if frame.time_base < 1.0 / 60:
                time_base = 1.0 / 60  #kikai no error wo hanbetu surutame no kizyunn
            else:
                time_base = frame.time_base
                #フレームスキップ値を算出
                frame_skip = int((time.time() - start_time) / time_base)
Beispiel #20
0
    #cv2.destroyAllWindows()

    # Copies the original image for Contour detection
    imgcontour1 = img.copy()
    contours1, hierarchy1 = cv2.findContours(ImageCanny, cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
    big, maxarea = getBiggestcontour(contours1)
    cv2.drawContours(imgcontour1, big, -1, (0, 0, 255), 15)
    cv2.imshow("Contours on Original Image found on Canny Image1", imgcontour1)
    img = drawRectangle(imgcontour1, big, 15)
    cv2.imshow("Contours on Original Image found on Canny Image2", img)
    cv2.waitKey(0)

    # Copies the Original image for corner Detection
    img_with_corners = img.copy()
    corners = cv2.goodFeaturesToTrack(imgBlur, 4, 0.4, 50)
    corners = np.int0(corners)
    for corner in corners:
        x, y = corner.ravel()
        cv2.circle(img_with_corners, (x, y), 4, (200, 0, 255), -1)
    cv2.imshow("Image With corners", img_with_corners)

    pts1 = np.float32(big)  # PREPARE POINTS FOR WARP
    pts2 = np.float32([[widthImg, 0], [0, 0], [0, heightImg],
                       [widthImg, heightImg]])  # PREPARE POINTS FOR WARP
    matrix = cv2.getPerspectiveTransform(pts1, pts2)
    imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))

    #REMOVE 20 PIXELS FORM EACH SIDE
    imgWarpColored = imgWarpColored[20:imgWarpColored.shape[0] - 20,
                                    20:imgWarpColored.shape[1] - 20]
def main():
    drone = tellopy.Tello()  #tello controller

    try:
        drone.connect()  #tello connection
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                # 動画の受信開始を処理
                container = av.open(
                    drone.get_video_stream()
                )  # container = tello video   eizou no assyuku de-ta wo tenkai
            except av.AVError as ave:
                print(ave)
                print('retry...')

        frame_skip = 300  #douga setuzokumaeno
        while True:
            for frame in container.decode(
                    video=0):  # .decode byte(eizou no nakami) -> moziretu
                if 0 < frame_skip:  #フレームスキップ処理
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time(
                )  # time.time UNIX time(0:0:0) karano keika zikan

                image_origin = cv2.cvtColor(np.array(frame.to_image()),
                                            cv2.COLOR_RGB2BGR)  #RGB convert
                gray = cv2.cvtColor(np.array(frame.to_image()),
                                    cv2.COLOR_BGR2GRAY)

                imagesplit1 = cv2.split(image_origin)

                #Gg = imagesplit1[1] / gray
                Gg = image_origin[:, :, 1] / gray

                shape = image_origin.shape
                for x in range(0, shape[0]):
                    for y in range(0, shape[1]):
                        if Gg[x, y] < 0.89:
                            Gg[x, y] = 1
                        else:
                            Gg[x, y] = 0

                G1 = np.uint8(Gg)

                feature_params = {
                    "maxCorners": 4,
                    "qualityLevel": 0.5,
                    "minDistance": 30,
                    "blockSize": 5
                }  # tokutyoute kensyutu
                #  特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外)
                p0 = cv2.goodFeaturesToTrack(G1, mask=None, **feature_params)
                #p0 = cv2.goodFeaturesToTrack(mask, 4, 0.5, 30)
                p0 = np.int0(p0)
                #print(p0)

                if len(p0) >= 4:
                    # 特徴点をプロットして可視化
                    for p in p0:  #p0 x,y zahyou 3image_origin
                        x, y = p.ravel()  #p0 no youso wo bunkai
                        cv2.circle(image_origin, (x, y), 5, (0, 255, 255), -1)

                        x0 = p0[:, :, 0].ravel()  #x zahyou
                        y0 = p0[:, :, 1].ravel()  #y zahyou
                        l1 = np.sqrt((x0[0])**2 + (y0[0])**2)
                        l2 = np.sqrt((x0[1])**2 + (y0[1])**2)
                        l3 = np.sqrt((x0[2])**2 + (y0[2])**2)
                        l4 = np.sqrt((x0[3])**2 + (y0[3])**2)

                        l = [l1, l2, l3, l4]

                        a = [0] * 4
                        b = [0] * 4
                        nn = [0, 1, 2, 3]
                        for i in range(len(l)):
                            if l[i] == min(l):
                                a[0] = x0[i]
                                b[0] = y0[i]
                                s = i
                        nn.remove(s)
                        j = 0
                        for j in nn:
                            n = nn.copy()
                            A = (b[0] - y0[j]) / (a[0] - x0[j])
                            B = b[0] - A * a[0]
                            n.remove(j)
                            C = A * x0[n[0]] + B
                            D = A * x0[n[1]] + B
                            if C - y0[n[0]] > 0 and D - y0[n[1]] < 0:
                                a[1] = x0[n[0]]
                                b[1] = y0[n[0]]
                                a[3] = x0[n[1]]
                                b[3] = y0[n[1]]
                                a[2] = x0[j]
                                b[2] = y0[j]
                                break
                            elif C - y0[n[0]] < 0 and D - y0[n[1]] > 0:
                                a[3] = x0[n[0]]
                                b[3] = y0[n[0]]
                                a[1] = x0[n[1]]
                                b[1] = y0[n[1]]
                                a[2] = x0[j]
                                b[2] = y0[j]
                                break

                    d1 = np.sqrt((a[0] - a[1])**2 + (b[0] - b[1])**2)
                    d2 = np.sqrt((a[1] - a[2])**2 + (b[1] - b[2])**2)
                    d3 = np.sqrt((a[2] - a[3])**2 + (b[2] - b[3])**2)
                    d4 = np.sqrt((a[3] - a[0])**2 + (b[3] - b[0])**2)

                    #s = (d1 + d2 + d3 + d4) / 2
                    #Sh = np.sqrt((s-d1)*(s-d2)*(s-d3)*(s-d4))

                    #s1 = (d1 + d4 + d5) / 2
                    #Sh1 = np.sqrt(s1*(s1-d1)*(s1-d4)*(s1-d5))

                    #s2 = (d2 + d3 + d5) / 2
                    #Sh2 = np.sqrt(s2*(s2-d2)*(s2-d3)*(s2-d5))

                    #SH = Sh1 + Sh2

                    #Sg = abs((1/2)*((a[3]-a[0])*(b[1]-b[0])-(a[1]-a[0])*(b[3]-b[0])))+abs((1/2)*((a[1]-a[2])*(b[3]-b[2])-(a[3]-a[2])*(b[1]-b[2])))
                    #Sw = cv2.countNonZero(G1)

                    S1 = d1 * d2
                    S2 = d1 * d4
                    S3 = d3 * d2
                    S4 = d3 * d4

                    c1 = (a[0] + a[2]) / 2
                    c2 = (b[0] + b[2]) / 2
                    c11 = int(c1)
                    c21 = int(c2)
                    cv2.circle(image_origin, (c11, c21), 5, (0, 255, 255), -1)

                    #line1 = cv2.line(image_origin,(c11+100,c21-100),(c11+100,c21+100),1000)
                    #line2 = cv2.line(image_origin,(c11+100,c21+100),(c11-100,c21+100),1000)
                    #line3 = cv2.line(image_origin,(c11-100,c21+100),(c11-100,c21-100),1000)
                    #line4 = cv2.line(image_origin,(c11+100,c21-100),(c11-100,c21-100),1000)

                    cy = shape[0] / 2
                    cy1 = shape[0] / 3
                    cx = shape[1] / 2

                    cv2.circle(image_origin, (int(cx), int(cy)), 5,
                               (0, 255, 255), -1)
                    #with open("0.3m_S1_2020_10_17.txt", "a") as f:
                    #  result = "{:.7f}\n".format(S1)
                    #  f.write(result)

                    #cv2.imshow('img_mask1', Gg)
                    cv2.imshow('image_origin', image_origin)
                    cv2.waitKey(1)

                    if frame.time_base < 1.0 / 60:
                        time_base = 1.0 / 60
                        #print("T:",time_base)
                        #print("frame",frame_skip)
                    else:
                        time_base = frame.time_base
                        #print("T:",time_base)
                        # フレームスキップ値を算出
                        frame_skip = (time.time() - start_time) / time_base
                        #print("frame",frame_skip)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info(
        )  # zikkoutyuuno sagyou no zyouhou teizi
        traceback.print_exception(
            exc_type, exc_value,
            exc_traceback)  # zikkoukatei deno stuck frame no kiroku wo print
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Beispiel #22
0
def Feature(image, mask):
    feature = cv2.goodFeaturesToTrack(image, 50, 0.1, 5, mask=mask)
    return feature