Exemplo n.º 1
0
def main():
    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        pygame.init()
        pygame.joystick.init()

        container = av.open(drone.get_video_stream())
        frameCount = 0  # Stores the current frame being processed
        frame1 = None  # Store variables for first frame
        frame2 = None  # Store variables for second frame
        prvs = None
        hsv = None

        while True:
            for frame in container.decode(video=0):
                checkController()
                frameCount += 1
                if frameCount == 1:  # If first frame
                    frame1 = cv2.cvtColor(np.array(frame.to_image()),
                                          cv2.COLOR_RGB2BGR)
                    prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
                    hsv = np.zeros_like(frame1)
                    hsv[..., 1] = 255
                else:  # If not first frame
                    frame2 = cv2.cvtColor(np.array(frame.to_image()),
                                          cv2.COLOR_RGB2BGR)
                    next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
                    flow = cv2.calcOpticalFlowFarneback(
                        prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
                    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
                    hsv[..., 0] = ang * 180 / np.pi / 2
                    hsv[..., 2] = cv2.normalize(mag, None, 0, 255,
                                                cv2.NORM_MINMAX)
                    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
                    cv2.imshow('frame2', bgr)
                    k = cv2.waitKey(30) & 0xff
                    if k == 27:
                        break
                    elif k == ord('s'):
                        cv2.imwrite('opticalfb.png', frame2)
                        cv2.imwrite('opticalhsv.png', bgr)
                    prvs = next
                print(frameCount)

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        cv2.destroyAllWindows()
Exemplo n.º 2
0
def optical_flow(img: np.ndarray, previous: np.ndarray) -> np.ndarray:
    mask = np.zeros(shape=img.shape, dtype=np.uint8)
    mask[..., 1] = 255

    gray = to_gray(img)
    previous = to_gray(previous)

    flow = cv2.calcOpticalFlowFarneback(previous, gray, None, 0.5, 3, 15, 3, 5,
                                        1.2, 0)

    magnitude, polar_angle = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    mask[..., 0] = polar_angle * 180 / np.pi / 2
    mask[..., 2] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
    return rgb
Exemplo n.º 3
0
 def optical_flow(previous_, current_):
     """
     光流
     :param previous_:
     :param current_:
     :return:
     """
     previous = cv2.cvtColor(previous_, cv2.COLOR_BGR2GRAY)
     current = cv2.cvtColor(current_, cv2.COLOR_BGR2GRAY)
     flow = cv2.calcOpticalFlowFarneback(previous, current, None, 0.5, 3, 15, 3, 5, 1.2, 0)
     hsv = np.zeros_like(previous_)
     mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
     hsv[..., 0] = ang * 180 / np.pi / 2
     hsv[..., 1] = 255
     hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
     flow = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
     return flow
Exemplo n.º 4
0
def Dense_Optical_Flow(image):
    image_old = image
    frame1 = image
    next = image
    hsv = np.zeros([frame1.shape[0], frame1.shape[1], 3], dtype=np.uint8)
    hsv[..., 1] = 255
    while (1):
        flow = cv2.calcOpticalFlowFarneback(image_old, next, None, 0.5, 3, 50,
                                            3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        image_old = next
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        next = (yield bgr)
Exemplo n.º 5
0
def flowFarnebackWithFrames(frame1, frame2, fps=30):
    prevF = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    nextF = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    _, cols, _ = frame1.shape
    somesize = int(cols / 30)
    # flow 为每一个像素点的偏移
    flow = cv2.calcOpticalFlowFarneback(prev=prevF,
                                        next=nextF,
                                        flow=None,
                                        pyr_scale=0.5,
                                        levels=10,
                                        winsize=somesize,
                                        iterations=10,
                                        poly_n=5,
                                        poly_sigma=1.2,
                                        flags=0)
    res = frame1.copy()
    res = roughlyBlur(src=frame1, dsc=res, allflow=flow, fps=fps)
    return res
Exemplo n.º 6
0
    def next_frame(self):
        """
        This function advances to the next frame of video. This entails:
         - Clearing the canvas and displaying the next frame of video
         - If the checkbox is checked, copy the mask over from the last frame to the next
         - Save the annotations to a file, if needed
        :return: None
        """
        if self.current_frame < len(self.image_seq) - 1:
            self.current_frame += 1
            self.show_image(self.current_frame)
            if self.mask_modified:
                np.save(self.annotation_file, self.get_final_mask())
                self.mask_modified = False
                print("Saved annotations to {}".format(self.annotation_file))
            if self.copy_mask_var.get() == 1 and np.all(
                    self.image_mask[self.current_frame, :, :, 3] == 0):
                self.image_mask[self.current_frame, :, :,
                                3] = self.image_mask[self.current_frame -
                                                     1, :, :, 3]
                self.mask_modified = True
                if self.optical_flow_var.get() == 1:
                    flow = cv2.calcOpticalFlowFarneback(
                        cv2.cvtColor(self.image_seq[self.current_frame - 1],
                                     cv2.COLOR_RGB2GRAY),
                        cv2.cvtColor(self.image_seq[self.current_frame],
                                     cv2.COLOR_RGB2GRAY), None, 0.5, 3, 15, 3,
                        5, 1.2, 0)
                    flow = np.mean(flow,
                                   axis=(0, 1)) * self.flow_scale_slider.get()
                    dx = round(flow[1])
                    dy = round(flow[0])
                    self.image_mask[self.current_frame, :, :, :] = np.roll(
                        self.image_mask[self.current_frame, :, :, :],
                        (int(dx), int(dy)),
                        axis=(0, 1))

            self.display_mask()

            print("Now on frame {} out of {}.".format(self.current_frame,
                                                      len(self.image_seq) - 1))
Exemplo n.º 7
0
    def read_data(self):
        print("Hello")
        prvs = None
        hsv = None
        num_img = 0
        while (cap.isOpened()):
            ret, frame = cap.read()
            if frame is not None:
                if (num_img == 0):
                    num_img += 1
                    prvs = frame.copy()
                    prvs_gray = cv2.cvtColor(prvs, cv2.COLOR_RGB2GRAY)
                    continue

                curr = frame.copy()
                curr_gray = cv2.cvtColor(curr, cv2.COLOR_RGB2GRAY)
                flow = cv2.calcOpticalFlowFarneback(prvs_gray, curr_gray, None,
                                                    0.5, 3, 15, 3, 5, 1.2, 0)
                hsv = np.zeros_like(prvs)
                mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
                hsv[..., 0] = ang * 180 / np.pi / 2
                hsv[..., 1] = 255
                hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
                rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                number_str = str(num_img)
                print("Saving file number: {}".format(number_str), end='\r')
                zero_filled_number = number_str.zfill(5)
                cv2.imwrite(
                    PATH_TO_SAVE_DIRECTORY + zero_filled_number + '.jpg', rgb)
                num_img += 1

                prvs = curr
                cv2.imshow('frame2', rgb)
                k = cv2.waitKey(30) & 0xff
                if k == 27:
                    break
            else:
                break
Exemplo n.º 8
0
def main():
    drone = tellopy.Tello()
    VIDEO_SCALE = 0.25

    try:
        drone.connect()
        drone.wait_for_connection(60.0)
        frameCount = 0
        prev = None
        prevgray = None
        show_hsv = False
        show_glitch = False
        cur_glitch = None

        container = av.open(drone.get_video_stream())

        while True:
            for frameRaw in container.decode(video=0):
                frameCount += 1
                if frameCount == 1:
                    prev1 = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    prev = cv.resize(prev1, (0, 0),
                                     fx=VIDEO_SCALE,
                                     fy=VIDEO_SCALE)
                    prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
                    cur_glitch = prev.copy()
                else:
                    img1 = cv.cvtColor(np.array(frameRaw.to_image()),
                                       cv.COLOR_RGB2BGR)
                    img = cv.resize(img1, (0, 0),
                                    fx=VIDEO_SCALE,
                                    fy=VIDEO_SCALE)
                    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
                    flow = cv.calcOpticalFlowFarneback(prevgray, gray, None,
                                                       0.5, 3, 15, 3, 5, 1.2,
                                                       0)
                    prevgray = gray

                    cv.imshow('flow', draw_flow(gray, flow))
                    if show_hsv:
                        cv.imshow('flow HSV', draw_hsv(flow))
                    if show_glitch:
                        cur_glitch = warp_flow(cur_glitch, flow)
                        cv.imshow('glitch', cur_glitch)

                    ch = cv.waitKey(5)
                    if ch == 27:
                        break
                    if ch == ord('1'):
                        show_hsv = not show_hsv
                        print('HSV flow visualization is', ['off',
                                                            'on'][show_hsv])
                    if ch == ord('2'):
                        show_glitch = not show_glitch
                        if show_glitch:
                            cur_glitch = img.copy()
                        print('glitch is', ['off', 'on'][show_glitch])

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)

    finally:
        drone.quit()
        cv.destroyAllWindows()
Exemplo n.º 9
0
def dense_of_demo():
    def draw_flow(img, flow, step=16):
        h, w = img.shape[:2]
        y, x = np.mgrid[step / 2:h:step,
                        step / 2:w:step].reshape(2, -1).astype(np.int32)
        fx, fy = flow[y, x].T
        lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
        lines = np.int32(lines + 0.5)
        vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        cv2.polylines(vis, lines, 0, (0, 255, 0))
        for (x1, y1), (x2, y2) in lines:
            cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
        return vis

    def draw_hsv(flow):
        h, w = flow.shape[:2]
        fx, fy = flow[:, :, 0], flow[:, :, 1]
        ang = np.arctan2(fy, fx) + np.pi
        v = np.sqrt(fx * fx + fy * fy)
        hsv = np.zeros((h, w, 3), np.uint8)
        hsv[..., 0] = ang * (180 / np.pi / 2)
        hsv[..., 1] = 255
        hsv[..., 2] = np.minimum(v * 15, 255)
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return bgr

    def warp_flow(img, flow):
        h, w = flow.shape[:2]
        flow = -flow
        flow[:, :, 0] += np.arange(w)
        flow[:, :, 1] += np.arange(h)[:, np.newaxis]
        res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
        return res

    cam = cv2.VideoCapture('rally.avi')

    fourcc = cv2.VideoWriter_fourcc(*'XVID')

    ret, prev = cam.read()
    prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    out = cv2.VideoWriter('farn.avi', fourcc, 30.0,
                          (prevgray.shape[1], prevgray.shape[0]))

    show_hsv = False
    show_glitch = False
    cur_glitch = prev.copy()

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        out.write(img)
        flow = cv2.calcOpticalFlowFarneback(prevgray,
                                            gray,
                                            1,
                                            0.5,
                                            5,
                                            15,
                                            3,
                                            1,
                                            1.2,
                                            flags=1)
        prevgray = gray

        cv2.imshow('flow', draw_flow(gray, flow))
        if show_hsv:
            hsv_img = draw_hsv(flow)
            cv2.imshow('flow HSV', hsv_img)
            out.write(hsv_img)
        if show_glitch:
            cur_glitch = warp_flow(cur_glitch, flow)
            cv2.imshow('glitch', cur_glitch)
            out.write(cur_glitch)

        ch = 0xFF & cv2.waitKey(5)
        if ch == 27:
            break
        if ch == ord('1'):
            show_hsv = not show_hsv

        if ch == ord('2'):
            show_glitch = not show_glitch
            if show_glitch:
                cur_glitch = img.copy()
    out.release()
    cv2.destroyAllWindows()
Exemplo n.º 10
0
from cv2 import cv2
import numpy as np
cap = cv2.VideoCapture("video_test_p.mp4")

ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros(frame1.shape, np.uint8)  # simular with np.zeros_like
hsv[..., 1] = 255

while (1):
    ret, frame2 = cap.read()
    if ret == False:
        break
    next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

    flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5,
                                        1.2, 0)

    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    cv2.imshow('frame2', rgb)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    elif k == ord('s'):
        cv2.imwrite('opticalfb.png', frame2)
        cv2.imwrite('opticalhsv.png', rgb)
    prvs = next
cv2.waitKey(0)
Exemplo n.º 11
0
def getVideo():
    global tracks
    global track_len
    global detect_interval
    global frame_idx
    global VIDEO_SCALE
    global videoLabel
    global typeOfVideo
    global connectingToDrone
    global takePicture
    frameCount = 0  # Stores the current frame being processed
    frame1Optical = None  # Store variables for first frame
    frame2Optical = None  # Store variables for second frame
    prvs = None
    hsv = None

    try:
        while connectingToDrone:
            #time.sleep(0.03)
            for frameRaw in container.decode(video=0):
                checkController()
                if takePicture:
                    frame1 = np.array(frameRaw.to_image())
                    #im = Image.fromarray(frame1, 'RGB')
                    cv.imwrite(
                        "pics/" + datetime.datetime.now().isoformat() + ".jpg",
                        frame1)
                    #imageTk = ImageTk.PhotoImage(image=im)
                    #videoLabel.configure(image=imageTk)
                    #videoLabel.image = imageTk
                    #videoLabel.update()
                    takePicture = False
                if typeOfVideo.get() == "Canny Edge Detection":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    frameCanny = cv.Canny(frame1, 50, 100)
                    im = Image.fromarray(frameCanny)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "LK Optical Flow":
                    frame1 = np.array(frameRaw.to_image())
                    frame = frame1
                    frame = cv.resize(frame1, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE)
                    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
                    vis = frame.copy()
                    if len(tracks) > 0:
                        img0, img1 = prev_gray, frame_gray
                        p0 = np.float32([tr[-1]
                                         for tr in tracks]).reshape(-1, 1, 2)
                        p1, _st, _err = cv.calcOpticalFlowPyrLK(
                            img0, img1, p0, None, **lk_params)
                        p0r, _st, _err = cv.calcOpticalFlowPyrLK(
                            img1, img0, p1, None, **lk_params)
                        d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                        good = d < 1
                        new_tracks = []

                        for tr, (x,
                                 y), good_flag in zip(tracks,
                                                      p1.reshape(-1, 2), good):
                            if not good_flag:
                                continue
                            tr.append((x, y))
                            if len(tr) > track_len:
                                del tr[0]
                            new_tracks.append(tr)
                            cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
                        tracks = new_tracks
                        cv.polylines(vis, [np.int32(tr) for tr in tracks],
                                     False, (0, 255, 0))
                        draw_str(vis, (20, 20),
                                 'track count: %d' % len(tracks))

                    if frame_idx % detect_interval == 0:
                        mask = np.zeros_like(frame_gray)
                        mask[:] = 255
                        for x, y in [np.int32(tr[-1]) for tr in tracks]:
                            cv.circle(mask, (x, y), 5, 0, -1)
                        p = cv.goodFeaturesToTrack(frame_gray,
                                                   mask=mask,
                                                   **feature_params)
                        if p is not None:
                            for x, y in np.float32(p).reshape(-1, 2):
                                tracks.append([(x, y)])

                    frame_idx += 1
                    prev_gray = frame_gray
                    #cv.imshow('Tello Dense Optical - Middlebury Research', vis)
                    im = Image.fromarray(vis, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Optical Flow":
                    frameCount += 1
                    if frameCount == 1:  # If first frame
                        frame1Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        prvs = cv.cvtColor(frame1Optical, cv.COLOR_BGR2GRAY)
                        hsv = np.zeros_like(frame1Optical)
                        hsv[..., 1] = 255
                    else:  # If not first frame
                        frame2Optical = cv.cvtColor(
                            np.array(frameRaw.to_image()), cv.COLOR_RGB2BGR)
                        next = cv.cvtColor(frame2Optical, cv.COLOR_BGR2GRAY)
                        flow = cv.calcOpticalFlowFarneback(
                            prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
                        mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])
                        hsv[..., 0] = ang * 180 / np.pi / 2
                        hsv[..., 2] = cv.normalize(mag, None, 0, 255,
                                                   cv.NORM_MINMAX)
                        bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
                        im = Image.fromarray(
                            cv.resize(frame2Optical, (0, 0),
                                      fx=VIDEO_SCALE,
                                      fy=VIDEO_SCALE))
                        imageTk = ImageTk.PhotoImage(image=im)
                        videoLabel.configure(image=imageTk)
                        videoLabel.image = imageTk
                        videoLabel.update()
                        k = cv.waitKey(30) & 0xff
                        if k == 27:
                            break
                        prvs = next
                elif typeOfVideo.get() == "Grayscale":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "BGR":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                elif typeOfVideo.get() == "Black & White":
                    frame = cv.cvtColor(np.array(frameRaw.to_image()),
                                        cv.COLOR_RGB2BGR)
                    frame1 = cv.resize(frame, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)

                    im = Image.fromarray(frame1, 'RGB')
                    gray = im.convert('L')

                    # Using numpy, convert pixels to pure black or white
                    bw = np.asarray(gray).copy()

                    # Pixel range is 0...255, 256/2 = 128
                    bw[bw < 128] = 0  # Black
                    bw[bw >= 128] = 255  # White
                    im = Image.fromarray(bw)
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()
                else:  # typeOfVideo.get() == "Normal":
                    frame1 = np.array(frameRaw.to_image())
                    frame1 = cv.resize(frame1, (0, 0),
                                       fx=VIDEO_SCALE,
                                       fy=VIDEO_SCALE)
                    im = Image.fromarray(frame1, 'RGB')
                    imageTk = ImageTk.PhotoImage(image=im)
                    videoLabel.configure(image=imageTk)
                    videoLabel.image = imageTk
                    videoLabel.update()

            ch = cv.waitKey(1)
            if ch == 27:
                break
    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)