Beispiel #1
0
def LK_Optical_Flow(image, p0, mask=None):
    lk_params = dict(winSize=(50, 50),
                     maxLevel=5,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    image_old = image
    image_old = cv2.add(image_old,
                        np.zeros(np.shape(image_old), dtype=np.uint8),
                        mask=mask)
    linemask = np.zeros_like(image)
    while 1:
        #image=cv2.add(image,np.zeros(np.shape(image),dtype=np.uint8),mask=mask)
        p1, st, err = cv2.calcOpticalFlowPyrLK(image_old, image, p0, None,
                                               **lk_params)
        try:
            good_new = p1[st == 1]
            good_old = p0[st == 1]
        except TypeError:
            warn("Lose track")
            return
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            linemask = cv2.line(linemask, (a, b), (c, d), color[i].tolist(), 2)
            image = cv2.circle(image, (a, b), 5, color[i].tolist(), -1)
        img = cv2.add(image, linemask)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        image_old = image.copy()
        p0 = good_new.reshape(-1, 1, 2)
        image = (yield img)
Beispiel #2
0
def streamVisionSensor():

    # Mouse function
    def select_point(event, x, y, flags, params):
        global point, point_selected, old_points
        if event == cv2.EVENT_LBUTTONDOWN:
            point = (x, y)
            point_selected = True
            old_points = np.array([[x, y]], dtype=np.float32)
            print("TRU")

    cv2.namedWindow('frame')

    point_selected = False
    point = ()
    old_points = np.array([[]])
    #Get the handle of vision sensor
    errorCode, visionSensorHandle = sim.simxGetObjectHandle(
        clientID, 'Vision_sensor', sim.simx_opmode_oneshot_wait)
    #Get the image
    errorCode, resolution, image = sim.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, sim.simx_opmode_streaming)
    time.sleep(0.5)

    errorCode, resolution, image = sim.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, sim.simx_opmode_buffer)
    sensorImage = np.array(image, dtype=np.uint8)
    sensorImage.resize([resolution[1], resolution[0], 3])
    old_image = sensorImage.copy()

    while (sim.simxGetConnectionId(clientID) != -1):
        cv2.setMouseCallback('frame', select_point)

        #Get the image of the vision sensor
        errorCode, resolution, image = sim.simxGetVisionSensorImage(
            clientID, visionSensorHandle, 0, sim.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        sensorImage = np.array(image, dtype=np.uint8)
        sensorImage.resize([resolution[1], resolution[0], 3])
        displayedImage = cv2.resize(sensorImage, (480, 480))

        if point_selected is True:
            print("coco")
            new_points, status, error = cv2.calcOpticalFlowPyrLK(
                old_image, sensorImage, old_points, None, **lk_params)
            old_image = sensorImage.copy()  #current frame becomes previous
            old_points = new_points  #current x,y points become previous
            x, y = new_points.ravel()
            cv2.circle(sensorImage, (x, y), 8, (0, 0, 0), -1)

        cv2.imshow('frame', displayedImage)
        if cv2.waitKey(30) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    print('End of Simulation')
Beispiel #3
0
def opticalFlow(black, frame, squares):
    global previous_black, previous_pos, mask
    pos = []
    for sq in squares:
        pos.append([(sq.x, sq.y)])

    pos = np.float32([tr[-1] for tr in pos]).reshape(-1, 1, 2)

    if len(pos) > 0:
        # init or regenerate points
        if previous_black is None or previous_pos is None or len(
                previous_pos) == 0:
            next_points, status, _ = cv2.calcOpticalFlowPyrLK(
                black, black, pos, None, **lk_params)
            previous_pos = next_points
            mask = np.zeros_like(frame)
        else:
            next_points, status, _ = cv2.calcOpticalFlowPyrLK(
                previous_black, black, previous_pos, None)

        if next_points is not None:
            good_new = next_points[status == 1]
            good_old = previous_pos[status == 1]

            previous_pos = good_new.reshape(-1, 1, 2)

            for _, (new, old) in enumerate(zip(good_new, good_old)):
                # Returns a contiguous flattened array as (x, y) coordinates for new point
                a, b = new.ravel()
                # Returns a contiguous flattened array as (x, y) coordinates for old point
                c, d = old.ravel()
                # Draws line between new and old position with green color and 2 thickness
                mask = cv2.line(mask, (a, b), (c, d), (255), 4)
                # Draws filled circle (thickness of -1) at new position with green color and radius of 3
                frame = cv2.circle(frame, (a, b), 3, (255), -1)

    previous_black = black
Beispiel #4
0
def optical_flow(imgs, dst='./capture_folder'):
    for idx, file in enumerate(imgs):
        copyfile(file, dst + '/' + str(idx) + '.bmp')
    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    cap = cv2.VideoCapture(dst + "/%01d.bmp")
    color = np.random.randint(0, 255, (100, 3))
    # Take first frame and find corners in it
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)

    while (1):
        ret, frame = cap.read()
        if frame is None:
            break
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **lk_params)
        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]
        # draw the tracks
        for i, (new, old) in enumerate(zip(good_new, good_old)):
            a, b = new.ravel()
            c, d = old.ravel()
            mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
            frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
        img = cv2.add(frame, mask)
        cv2.imshow('frame', img)
        #k = cv2.waitKey(30) & 0xff
        #if k == 27:
        #    break
        # Now update the previous frame and previous points
        old_gray = frame_gray.copy()
        p0 = good_new.reshape(-1, 1, 2)
    cv2.destroyAllWindows()
    cap.release()
    return mask
Beispiel #5
0
def calc_features_by_lk(base_out, input_video_filename, base_output_filename):
    capture = cv2.VideoCapture(input_video_filename)
    while (True):
        ret1, m1 = capture.read()
        ret2, m2 = capture.read()
        if (not ret1 or not ret2):
            break

        frame1 = m1.copy()
        frame2 = m2.copy()

        frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

        sc = frame1.mean()

        if (sc < 1.6):
            continue

        frame1_features = cv2.goodFeaturesToTrack(frame1, FEATURE_MAX_NUM,
                                                  0.01, 0.01)

        frame2_features, found_futures, found_err = cv2.calcOpticalFlowPyrLK(
            frame1,
            frame2,
            frame1_features,
            None,
            winSize=(5, 5),
            maxLevel=5,
            criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                      0.03))

        draw_path_on_baseimage(m1, frame1_features, frame2_features,
                               found_futures, found_err)
        draw_features_in_base_image(base_out, frame1_features, frame2_features,
                                    found_futures, found_err)

        cv2.imshow("output_window", m1)
        cv2.imshow("superposition_window", base_out)

    capture.release()
    pass
def main():

    lk_params = dict(winSize=(5, 5),
                     maxLevel=2,
                     criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT,
                               10, 0.03))

    feature_params = dict(maxCorners=200,
                          qualityLevel=0.03,
                          minDistance=30,
                          blockSize=7)

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        pygame.init()
        pygame.joystick.init()

        track_len = 10
        detect_interval = 5
        tracks = []
        frame_idx = 0
        VIDEO_SCALE = 0.35

        #drone.set_video_encoder_rate(2)
        container = av.open(drone.get_video_stream())

        js = pygame.joystick.Joystick(0)
        js.init()
        js_name = js.get_name()
        print('Joystick name: ' + js_name)

        while True:
            time.sleep(0.01)
            for frameRaw in container.decode(video=0):
                checkController()
                frame1 = cv.cvtColor(np.array(frameRaw.to_image()),
                                     cv.COLOR_RGB2BGR)
                frame = cv.resize(frame1, (0, 0),
                                  fx=VIDEO_SCALE,
                                  fy=VIDEO_SCALE)
                frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                vis = frame.copy()

                if len(tracks) > 0:
                    img0, img1 = prev_gray, frame_gray
                    p0 = np.float32([tr[-1]
                                     for tr in tracks]).reshape(-1, 1, 2)
                    p1, _st, _err = cv.calcOpticalFlowPyrLK(
                        img0, img1, p0, None, **lk_params)
                    p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                        img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 1
                    new_tracks = []

                    for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2),
                                                     good):
                        if not good_flag:
                            continue
                        tr.append((x, y))
                        if len(tr) > track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
                    tracks = new_tracks
                    cv.polylines(vis, [np.int32(tr) for tr in tracks], False,
                                 (0, 255, 0))
                    draw_str(vis, (20, 20), 'track count: %d' % len(tracks))

                if frame_idx % detect_interval == 0:
                    mask = np.zeros_like(frame_gray)
                    mask[:] = 255
                    for x, y in [np.int32(tr[-1]) for tr in tracks]:
                        cv.circle(mask, (x, y), 5, 0, -1)
                    p = cv.goodFeaturesToTrack(frame_gray,
                                               mask=mask,
                                               **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            tracks.append([(x, y)])

                frame_idx += 1
                prev_gray = frame_gray
                cv.imshow('Tello Dense Optical - Middlebury Research', vis)

                ch = cv.waitKey(1)
                if ch == 27:
                    break

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        drone.quit()
        cv.destroyAllWindows()
def of_demo():
    pixels_cut = 50
    pixels_cut_left = 100

    cap = cv2.VideoCapture('rally.avi')
    # cap = cv2.VideoCapture('input.mp4')

    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()
    old_frame = old_frame[:-pixels_cut, pixels_cut_left:, :]
    out = cv2.VideoWriter('output2.avi', fourcc, 30.0,
                          (old_frame.shape[1], old_frame.shape[0]))

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

    # Create a mask image for drawing purposes
    mask = np.zeros_like(old_frame)
    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        frame = frame[:-pixels_cut, pixels_cut_left:, :]
        if ret and frno < 70:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                p0 = cv2.goodFeaturesToTrack(old_gray,
                                             mask=None,
                                             **feature_params)
                restart = False
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0,
                                                   None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = p0[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # mask_of_mask = cv2.inRange(mask, (0, 0, 0), (3, 3, 3))/255
            # frame = frame*(np.expand_dims(mask_of_mask.astype(np.uint8),axis=2))
            img = cv2.add(frame, mask)

            mask = np.round(mask.astype(np.float) / 1.1).astype(np.uint8)

            cv2.imshow('frame', img)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)
            out.write(img)
        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
Beispiel #8
0
def start_tracking():
    # Initialize the video capture object
    cap = cv.VideoCapture(0)

    # Define the scaling factor for the frames
    scaling_factor = 0.5

    # Number of frames to track
    num_frames_to_track = 5

    # Skipping factor
    num_frames_jump = 2

    # Initialize variables
    tracking_paths = []
    frame_index = 0

    # Define tracking parameters
    tracking_params = dict(winSize=(11, 11),
                           maxLevel=2,
                           criteria=(cv.TERM_CRITERIA_EPS
                                     | cv.TERM_CRITERIA_COUNT, 10, 0.03))

    # Iterate until the user hits the 'Esc' key
    while True:
        # Capture the current frame
        _, frame = cap.read()

        # Resize the frame
        frame = cv.resize(frame,
                          None,
                          fx=scaling_factor,
                          fy=scaling_factor,
                          interpolation=cv.INTER_AREA)

        # Convert to grayscale
        frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

        # Create a copy of the frame
        output_img = frame.copy()

        if len(tracking_paths) > 0:
            # Get images
            prev_img, current_img = prev_gray, frame_gray

            # Organize the feature points
            feature_points_0 = np.float32([tp[-1] for tp in \
                    tracking_paths]).reshape(-1, 1, 2)

            # Compute optical flow
            feature_points_1, _, _ = cv.calcOpticalFlowPyrLK(
                prev_img, current_img, feature_points_0, None,
                **tracking_params)

            # Compute reverse optical flow
            feature_points_0_rev, _, _ = cv.calcOpticalFlowPyrLK(
                current_img, prev_img, feature_points_1, None,
                **tracking_params)

            # Compute the difference between forward and
            # reverse optical flow
            diff_feature_points = abs(feature_points_0 - \
                    feature_points_0_rev).reshape(-1, 2).max(-1)

            # Extract the good points
            good_points = diff_feature_points < 1

            # Initialize variable
            new_tracking_paths = []

            # Iterate through all the good feature points
            for tp, (x, y), good_points_flag in zip(
                    tracking_paths, feature_points_1.reshape(-1, 2),
                    good_points):
                # If the flag is not true, then continue
                if not good_points_flag:
                    continue

                # Append the X and Y coordinates and check if
                # its length greater than the threshold
                tp.append((x, y))
                if len(tp) > num_frames_to_track:
                    del tp[0]

                new_tracking_paths.append(tp)

                # Draw a circle around the feature points
                cv.circle(output_img, (x, y), 3, (0, 255, 0), -1)

            # Update the tracking paths
            tracking_paths = new_tracking_paths

            # Draw lines
            cv.polylines(output_img, [np.int32(tp) for tp in \
                    tracking_paths], False, (0, 150, 0))

        # Go into this 'if' condition after skipping the
        # right number of frames
        if not frame_index % num_frames_jump:
            # Create a mask and draw the circles
            mask = np.zeros_like(frame_gray)
            mask[:] = 255
            for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
                cv.circle(mask, (x, y), 6, 0, -1)

            # Compute good features to track
            feature_points = cv.goodFeaturesToTrack(frame_gray,
                                                    mask=mask,
                                                    maxCorners=500,
                                                    qualityLevel=0.3,
                                                    minDistance=7,
                                                    blockSize=7)

            # Check if feature points exist. If so, append them
            # to the tracking paths
            if feature_points is not None:
                for x, y in np.float32(feature_points).reshape(-1, 2):
                    tracking_paths.append([(x, y)])

        # Update variables
        frame_index += 1
        prev_gray = frame_gray

        # Display output
        cv.imshow('Optical Flow', output_img)

        # Check if the user hit the 'Esc' key
        c = cv.waitKey(1)
        if c == 27:
            break
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)

while(1):
    ret,frame = cap.read()
    if ret == False:
        break
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]

    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel()
        c,d = old.ravel()
        mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
        frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
    img = cv2.add(frame,mask)

    cv2.imshow('frame',img)
    k = cv2.waitKey(60) & 0xff
Beispiel #10
0
def tracking_lucas_kanade():

    cap = cv2.VideoCapture('find_chocolate.mp4')

    img_chocolate = cv2.imread('marker.jpg')
    gray_chocolate = cv2.cvtColor(img_chocolate, cv2.COLOR_BGR2GRAY)

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.2,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(35, 35),
                     maxLevel=4,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (1000, 3))

    # Take first frame and find corners in it

    ret, old_frame = cap.read()

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    orb = cv2.ORB_create(1000, 1.1, 13)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    kpts1, descs1 = orb.detectAndCompute(gray_chocolate, None)

    # Setting video format. Google for "fourcc"
    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    # Setting up new video writer
    image_size = (old_frame.shape[1], old_frame.shape[0])
    # writer = cv2.VideoWriter('sample_tracking_orb.avi', fourcc, frames_per_second, image_size)
    out = cv2.VideoWriter('sample_tracking_lucas_kanade.avi', fourcc, 30.0,
                          image_size)

    frno = 0
    restart = False
    while (1):
        frno += 1
        ret, frame = cap.read()
        if ret:

            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if restart:
                orb = cv2.ORB_create(1000, 1.1, 13)
                kpts2, descs2 = orb.detectAndCompute(frame_gray, None)
                restart = False

            kpts2, descs2 = orb.detectAndCompute(frame_gray, None)

            matches = bf.match(descs1, descs2)
            # Sort them in the order of their distance.
            dmatches = sorted(matches, key=lambda x: x.distance)

            ## extract the matched keypoints
            src_pts = np.float32([kpts1[m.queryIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kpts2[m.trainIdx].pt
                                  for m in dmatches]).reshape(-1, 1, 2)

            ## find homography matrix and do perspective transform
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            h, w = img_chocolate.shape[:2]
            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                              [w - 1, 0]]).reshape(-1, 1, 2)
            dst = cv2.perspectiveTransform(pts, M)

            ## draw found regions
            frm = cv2.polylines(frame, [np.int32(dst)], True, (0, 0, 255), 1,
                                cv2.LINE_AA)

            # ## draw match lines
            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches[:8], None, flags=2)

            p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                   dst_pts, None, **lk_params)
            successful = (st == 1)
            if np.sum(successful) == 0:
                restart = True
            # Select good points
            good_new = p1[successful]
            good_old = dst_pts[successful]

            # draw the tracks
            count_of_moved = 0
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                a, b = new.ravel()
                c, d = old.ravel()
                velocity = np.sqrt((a - c)**2 + (b - d)**2)
                if velocity > 1:
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 4, color[i].tolist(), -1)
                    count_of_moved += 1

            # res = cv2.drawMatches(img_chocolate, kpts1, frm, kpts2, dmatches, None, flags=2) #[:8]
            out.write(frame)

            cv2.namedWindow('orb_match', cv2.WINDOW_NORMAL)

            cv2.imshow('orb_match', frame)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

            # Now update the previous frame and previous points
            old_gray = frame_gray.copy()
            p0 = good_new.reshape(-1, 1, 2)

        else:
            break

    cv2.destroyAllWindows()
    cap.release()
    out.release()
Beispiel #11
0
def getVideo():
    global tracks
    global track_len
    global detect_interval
    global frame_idx
    global VIDEO_SCALE
    global videoLabel
    global typeOfVideo
    global connectingToDrone
    global takePicture
    frameCount = 0  # Stores the current frame being processed
    frame1Optical = None  # Store variables for first frame
    frame2Optical = None  # Store variables for second frame
    prvs = None
    hsv = None

    try:
        while connectingToDrone:
            #time.sleep(0.03)
            for frameRaw in container.decode(video=0):
                checkController()
                if takePicture:
                    frame1 = np.array(frameRaw.to_image())
                    #im = Image.fromarray(frame1, 'RGB')
                    cv.imwrite(
                        "pics/" + datetime.datetime.now().isoformat() + ".jpg",
                        frame1)
                    #imageTk = ImageTk.PhotoImage(image=im)
                    #videoLabel.configure(image=imageTk)
                    #videoLabel.image = imageTk
                    #videoLabel.update()
                    takePicture = False
                if typeOfVideo.get() == "Canny Edge Detection":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    frameCanny = cv.Canny(frame1, 50, 100)
                    im = Image.fromarray(frameCanny)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "LK Optical Flow":
                    frame1 = np.array(frameRaw.to_image())
                    frame = frame1
                    frame = cv.resize(frame1, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE)
                    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                    vis = frame.copy()
                    if len(tracks) > 0:
                        img0, img1 = prev_gray, frame_gray
                        p0 = np.float32([tr[-1]
                                         for tr in tracks]).reshape(-1, 1, 2)
                        p1, _st, _err = cv.calcOpticalFlowPyrLK(
                            img0, img1, p0, None, **lk_params)
                        p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                            img1, img0, p1, None, **lk_params)
                        d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                        good = d < 1
                        new_tracks = []

                        for tr, (x,
                                 y), good_flag in zip(tracks,
                                                      p1.reshape(-1, 2), good):
                            if not good_flag:
                                continue
                            tr.append((x, y))
                            if len(tr) > track_len:
                                del tr[0]
                            new_tracks.append(tr)
                            cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
                        tracks = new_tracks
                        cv.polylines(vis, [np.int32(tr) for tr in tracks],
                                     False, (0, 255, 0))
                        draw_str(vis, (20, 20),
                                 'track count: %d' % len(tracks))

                    if frame_idx % detect_interval == 0:
                        mask = np.zeros_like(frame_gray)
                        mask[:] = 255
                        for x, y in [np.int32(tr[-1]) for tr in tracks]:
                            cv.circle(mask, (x, y), 5, 0, -1)
                        p = cv.goodFeaturesToTrack(frame_gray,
                                                   mask=mask,
                                                   **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1, 2):
                                tracks.append([(x, y)])

                    frame_idx += 1
                    prev_gray = frame_gray
                    #cv.imshow('Tello Dense Optical - Middlebury Research', vis)
                    im = Image.fromarray(vis, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Optical Flow":
                    frameCount += 1
                    if frameCount == 1:  # If first frame
                        frame1Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        prvs = cv.cvtColor(frame1Optical, cv.COLOR_BGR2GRAY)
                        hsv = np.zeros_like(frame1Optical)
                        hsv[..., 1] = 255
                    else:  # If not first frame
                        frame2Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        next = cv.cvtColor(frame2Optical, cv.COLOR_BGR2GRAY)
                        flow = cv.calcOpticalFlowFarneback(
                            prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
                        mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
                        hsv[..., 0] = ang * 180 / np.pi / 2
                        hsv[..., 2] = cv.normalize(mag, None, 0, 255,
                                                   cv.NORM_MINMAX)
                        bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
                        im = Image.fromarray(
                            cv.resize(frame2Optical, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE))
                        imageTk = ImageTk.PhotoImage(image=im)
                        videoLabel.configure(image=imageTk)
                        videoLabel.image = imageTk
                        videoLabel.update()
                        k = cv.waitKey(30) & 0xff
                        if k == 27:
                            break
                        prvs = next
                elif typeOfVideo.get() == "Grayscale":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "BGR":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Black & White":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    # Pixel range is 0...255, 256/2 = 128
                    bw[bw < 128] = 0  # Black
                    bw[bw >= 128] = 255  # White
                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                else:  # typeOfVideo.get() == "Normal":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()

            ch = cv.waitKey(1)
            if ch == 27:
                break
    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
Beispiel #12
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        container = av.open(drone.get_video_stream())
        cap = drone.get_video_stream()

        # params for ShiTomasi corner detection
        feature_params = dict(maxCorners=100,
                              qualityLevel=0.3,
                              minDistance=7,
                              blockSize=7)

        # Parameters for lucas kanade optical flow
        lk_params = dict(winSize=(15, 15),
                         maxLevel=2,
                         criteria=(cv2.TERM_CRITERIA_EPS
                                   | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        # Create some random colors
        color = np.random.randint(0, 255, (100, 3))

        while True:
            for frame in container.decode(video=0):
                old_frame = cv2.cvtColor(np.array(frame.to_image()),
                                         cv2.COLOR_RGB2BGR)
                old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
                p0 = cv2.goodFeaturesToTrack(old_gray,
                                             mask=None,
                                             **feature_params)
                mask = np.zeros_like(old_frame)

                frame = cv2.cvtColor(np.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                # calculate optical flow
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                       p0, None, **lk_params)
                # Select good points
                good_new = p1[st == 1]
                good_old = p0[st == 1]
                # draw the tracks
                for i, (new, old) in enumerate(zip(good_new, good_old)):
                    a, b = new.ravel()
                    c, d = old.ravel()
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
                    frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
                img = cv2.add(frame, mask)
                cv2.imshow('frame', img)
                k = cv2.waitKey(30) & 0xff
                if k == 27:
                    break
                # Now update the previous frame and previous points
                old_gray = frame_gray.copy()
                p0 = good_new.reshape(-1, 1, 2)

                #image = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
                cv2.imshow('Original', img)
                print("ImgShow")
                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                cv2.waitKey(1)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
 ret, frame = cap.read()
 if not ret:
     break
 img_draw = frame.copy()
 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 # 최초 프레임 경우
 if prevImg is None:
     prevImg = gray
     # 추적선 그릴 이미지를 프레임 크기에 맞게 생성
     lines = np.zeros_like(frame)
     # 추적 시작을 위한 코너 검출  ---①
     prevPt = cv2.goodFeaturesToTrack(prevImg, 200, 0.01, 10)
 else:
     nextImg = gray
     # 옵티컬 플로우로 다음 프레임의 코너점  찾기 ---②
     nextPt, status, err = cv2.calcOpticalFlowPyrLK(prevImg, nextImg,
                                                    prevPt, None, criteria=termcriteria)
     # 대응점이 있는 코너, 움직인 코너 선별 ---③
     prevMv = prevPt[status == 1]
     nextMv = nextPt[status == 1]
     for i, (p, n) in enumerate(zip(prevMv, nextMv)):
         px, py = p.ravel()
         nx, ny = n.ravel()
         # 이전 코너와 새로운 코너에 선그리기 ---④
         cv2.line(lines, (px, py), (nx, ny), color[i].tolist(), 2)
         # 새로운 코너에 점 그리기
         cv2.circle(img_draw, (nx, ny), 2, color[i].tolist(), -1)
     # 누적된 추적 선을 출력 이미지에 합성 ---⑤
     img_draw = cv2.add(img_draw, lines)
     # 다음 프레임을 위한 프레임과 코너점 이월
     prevImg = nextImg
     prevPt = nextMv.reshape(-1, 1, 2)
        # This function finds good "tracking points" in the image, such as sharp corners. Returns a numpy array of
        # shape (n, 2), where n is the number of points found.
        old_points = cv2.goodFeaturesToTrack(old_frame.transpose(),
                                             mask=None,
                                             **feature_params)
        camera.copy_last_frame(new_frame)

        # This function looks at the tracking points in the previous frame, and tries to find the same points in the
        # new frame. The output new_points is a numpy array of shape (n, 2), where n is the same n in old_points.
        # status is a numpy array of shape (n), where each entry is 1 if the corresponding point was successfully found
        # in the new frame, or 0 if the tracking point was lost.
        # I don't fully understand what error is, but I don't think I have to.
        # TODO: Check to make sure there are more than 0 points in old_points, or else bad error happens.
        new_points, status, error = cv2.calcOpticalFlowPyrLK(
            old_frame.transpose(), new_frame.transpose(), old_points, None,
            **lk_params)

        # This filters out the points so that we are left with only points that were found in both the old and new frame
        old_points = old_points[status == 1]
        new_points = new_points[status == 1]

        # This code draws the frames with tracking points overlaid.
        frame_drawing = np.copy(new_frame)
        for old_point, new_point in zip(old_points, new_points):
            frame_drawing[int(old_point[0]), int(old_point[1])] = 0
            frame_drawing[int(new_point[0]), int(new_point[1])] = 255
        cv2.imshow('frame', frame_drawing.transpose())
        if (cv2.waitKey(2) & 0xFF) == 27:
            break