示例#1
0
    def stabilize(self, new_frame: np.ndarray) -> np.ndarray:
        self.frame_n += 1
        frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY)

        flow = np.zeros((self.frame_height, self.frame_width, 2))
        flow = self.retval.calc(self.old_gray, frame_gray, flow)

        good_new = np.copy(self.good_old)
        # good_new_matrix=np.copy(good_old_matrix)

        index = 0
        for i in range(self.frame_height):
            index = i * self.frame_width
            good_new[index:index + self.frame_width, 0] += flow[i, :, 0]
            good_new[index:index + self.frame_width, 1] += flow[i, :, 1]

        x_motion_mesh, y_motion_mesh = motion_propagate(
            self.good_old, good_new, new_frame, self.good_old_matrix)
        self.x_paths, self.y_paths = generate_vertex_profiles(
            self.x_paths, self.y_paths, x_motion_mesh, y_motion_mesh)

        if (self.frame_n > self.BUFFER_SZ):
            self.x_paths = self.x_paths[:, :, 1:]
            self.y_paths = self.y_paths[:, :, 1:]

        if self.frame_n == 2:
            tmp = 1
            while (tmp <= 2):
                self.sx_paths, self.sy_paths, self.x_paths_pre, self.y_paths_pre = self.__stabilize(
                    self.x_paths, self.y_paths, self.x_paths_pre,
                    self.y_paths_pre, self.sx_paths, self.sy_paths, tmp)
                new_x_motion_meshes, new_y_motion_meshes = self.__get_frame_warp(
                    self.x_paths, self.y_paths, self.sx_paths, self.sy_paths)
                if tmp == 1:
                    new_frame = self.__generate_stabilized_video(
                        self.old_frame, tmp, new_x_motion_meshes,
                        new_y_motion_meshes)
                else:
                    new_frame = self.__generate_stabilized_video(
                        new_frame, tmp, new_x_motion_meshes,
                        new_y_motion_meshes)
                tmp += 1
        else:
            self.sx_paths, self.sy_paths, self.x_paths_pre, self.y_paths_pre = self.__stabilize(
                self.x_paths, self.y_paths, self.x_paths_pre, self.y_paths_pre,
                self.sx_paths, self.sy_paths, self.frame_n)
            new_x_motion_meshes, new_y_motion_meshes = self.__get_frame_warp(
                self.x_paths, self.y_paths, self.sx_paths, self.sy_paths)
            new_frame = self.__generate_stabilized_video(
                new_frame, self.frame_n, new_x_motion_meshes,
                new_y_motion_meshes)

        self.old_frame = new_frame.copy()
        self.old_gray = frame_gray.copy()
        # self.good_old = temp_good_old.copy()
        return new_frame
def read_video(cap):
    """
    @param: cap is the cv2.VideoCapture object that is
            instantiated with given video

    Returns:
            returns mesh vertex motion vectors & 
            mesh vertex profiles 
    """

    # params for ShiTomasi corner detection
    feature_params = dict(maxCorners=1000,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               20, 0.03))

    # Take first frame
    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # preserve aspect ratio
    global HORIZONTAL_BORDER
    HORIZONTAL_BORDER = 30

    global VERTICAL_BORDER
    VERTICAL_BORDER = (HORIZONTAL_BORDER *
                       old_gray.shape[1]) / old_gray.shape[0]

    # motion meshes in x-direction and y-direction
    x_motion_meshes = []
    y_motion_meshes = []

    # path parameters
    x_paths = np.zeros(
        (old_frame.shape[0] / PIXELS, old_frame.shape[1] / PIXELS, 1))
    y_paths = np.zeros(
        (old_frame.shape[0] / PIXELS, old_frame.shape[1] / PIXELS, 1))

    frame_num = 1
    bar = tqdm(total=frame_count)
    while frame_num < frame_count:

        # processing frames
        ret, frame = cap.read()
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # find corners in it
        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                               **lk_params)

        # Select good points
        good_new = p1[st == 1]
        good_old = p0[st == 1]

        # estimate motion mesh for old_frame
        x_motion_mesh, y_motion_mesh = motion_propagate(
            good_old, good_new, frame)
        try:
            x_motion_meshes = np.concatenate(
                (x_motion_meshes, np.expand_dims(x_motion_mesh, axis=2)),
                axis=2)
            y_motion_meshes = np.concatenate(
                (y_motion_meshes, np.expand_dims(y_motion_mesh, axis=2)),
                axis=2)
        except:
            x_motion_meshes = np.expand_dims(x_motion_mesh, axis=2)
            y_motion_meshes = np.expand_dims(y_motion_mesh, axis=2)

        # generate vertex profiles
        x_paths, y_paths = generate_vertex_profiles(x_paths, y_paths,
                                                    x_motion_mesh,
                                                    y_motion_mesh)

        # updates frames
        bar.update(1)
        frame_num += 1
        old_frame = frame.copy()
        old_gray = frame_gray.copy()

    bar.close()
    return [x_motion_meshes, y_motion_meshes, x_paths, y_paths]
def read_video(cap):
    """
    @param: cap - это объект cv2.VideoCapture, который
             инициализруется с подаваемым видео

    Returns:
            возвращает векторы движения вершины сетки и
            профили вершин сетки
    """

    # Параметры для определиетля угла ShiTomasi
    feature_params = dict( maxCorners = 1000,
                        qualityLevel = 0.3,
                        minDistance = 7,
                        blockSize = 7 )

    # Параметры для оптического потока Лукаса Канаде
    lk_params = dict( winSize  = (15, 15),
                    maxLevel = 2,
                    criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03))

    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    ret, old_frame = cap.read()
    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Сохранить соотношение сторон
    global HORIZONTAL_BORDER
    HORIZONTAL_BORDER = 30

    global VERTICAL_BORDER
    VERTICAL_BORDER = (HORIZONTAL_BORDER*old_gray.shape[1])//old_gray.shape[0]

    # движение сетки в направлении х и у
    x_motion_meshes = []; y_motion_meshes = []

    x_paths = np.zeros((old_frame.shape[0]//PIXELS, old_frame.shape[1]//PIXELS, 1))
    y_paths = np.zeros((old_frame.shape[0]//PIXELS, old_frame.shape[1]//PIXELS, 1))

    frame_num = 1
    bar = tqdm(total=frame_count)

    while frame_num < frame_count:

        ret, frame = cap.read()
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
        p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

        good_new = p1[st==1]
        good_old = p0[st==1]

        # Оценка движения сетки для старого кадра
        x_motion_mesh, y_motion_mesh = motion_propagate(good_old, good_new, frame)
        try:
            x_motion_meshes = np.concatenate((x_motion_meshes, np.expand_dims(x_motion_mesh, axis=2)), axis=2)
            y_motion_meshes = np.concatenate((y_motion_meshes, np.expand_dims(y_motion_mesh, axis=2)), axis=2)
        except:
            x_motion_meshes = np.expand_dims(x_motion_mesh, axis=2)
            y_motion_meshes = np.expand_dims(y_motion_mesh, axis=2)

        # Генерация профилей вершин
        x_paths, y_paths = generate_vertex_profiles(x_paths, y_paths, x_motion_mesh, y_motion_mesh)

        bar.update(1)
        frame_num += 1
        old_gray = frame_gray.copy()
    bar.close()
    return [x_motion_meshes, y_motion_meshes, x_paths, y_paths]