Ejemplo n.º 1
0
import sys
import os
sys.path.append('../mgmodule')
sys.path.append('..')
import mgmodule
import cv2

#import any non-module functions you want to use (see documentation) this way:
from _average import average_image
from _history import history

#CREATE MODULE OBJECT: Here is an example call to create an mg Object, using loads of parameters
mg = mgmodule.MgObject('pianist.avi', color=False, crop='auto', skip=3)
#USE MODULE METHOD: To run the motionvideo analysis, run the function using your object
mg.mg_motionvideo(inverted_motionvideo=True,
                  inverted_motiongram=True,
                  thresh=0.1,
                  blur='Average',
                  normalize=False)
#This runs the motion history on the motion video
mg.mg_motionhistory(history_length=25,
                    thresh=0.1,
                    inverted_motionhistory=True,
                    blur='Average')

#USE NON-MODULE FUNCTION, this one can find an average image of any video, here using the mb objects filename

# Average image of original video
#average_image(mg.filename)

# Average image of pre-processed video
Ejemplo n.º 2
0
import sys
import os
sys.path.append('../mgmodule')
sys.path.append('..')
import mgmodule
import cv2

# CREATE MODULE OBJECT: Here is an example call to create an mg Object, using loads of parameters
mg = mgmodule.MgObject('../dance.avi',
                       starttime=2,
                       endtime=20,
                       contrast=100,
                       brightness=50)

# USE MODULE METHOD: To run the motionvideo analysis, run the function using your object
mg.motion(inverted_motionvideo=False,
          inverted_motiongram=False,
          thresh=0.05,
          unit='seconds')

# History video
mg.history(history_length=25)
# Motion history video
mg.history(mg.of + '_motion.avi', history_length=25)

# Average image of original video
# mg.average('../dance.avi')

# Average image of pre-processed video
mg.average()
Ejemplo n.º 3
0
def mg_motionvideo(self,
                   filtertype='Regular',
                   thresh=0.05,
                   blur='None',
                   kernel_size=5,
                   inverted_motionvideo=False,
                   inverted_motiongram=False,
                   unit='seconds',
                   equalize_motiongram=True,
                   save_plot=True,
                   save_data=True,
                   data_format="csv",
                   save_motiongrams=True,
                   save_video=True):
    """
    Finds the difference in pixel value from one frame to the next in an input video, 
    and saves the frames into a new video. Describes the motion in the recording.

    Parameters
    ----------
    - filtertype : {'Regular', 'Binary', 'Blob'}, optional

        `Regular` turns all values below `thresh` to 0.
        `Binary` turns all values below `thresh` to 0, above `thresh` to 1.
        `Blob` removes individual pixels with erosion method.
    - thresh : float, optional

        A number in the range of 0 to 1. Default is 0.05.
        Eliminates pixel values less than given threshold.
    - blur : {'None', 'Average'}, optional

        `Average` to apply a 10px * 10px blurring filter, `None` otherwise.
    - kernel_size : int, optional

        Default is 5. Size of structuring element.
    - inverted_motionvideo : bool, optional

        Default is `False`. If `True`, inverts colors of the motion video.
    - inverted_motiongram : bool, optional

        Default is `False`. If `True`, inverts colors of the motiongrams.
    - unit : {'seconds', 'samples'}, optional

        Unit in QoM plot.
    - equalize_motiongram : bool, optional

        Default is `True`. If `True`, converts the motiongrams to hsv-color 
        space and flattens the value channel (v).
    - save_plot : bool, optional

        Default is `True`. If `True`, outputs motion-plot.
    - save_data : bool, optional

        Default is `True`. If `True`, outputs motion-data.
    - data_format : {'csv', 'tsv', 'txt'}, optional

        Specifies format of motion-data.
    - save_motiongrams : bool, optional

        Default is `True`. If `True`, outputs motiongrams.
    - save_video : bool, optional

        Default is `True`. If `True`, outputs the motion video.

    Outputs
    -------
    - `filename`_motion.avi

        A video of the absolute difference between consecutive frames in the source video. 
    - `filename`_motion_com_qom.png

        A plot describing the centroid of motion and the quantity of motion in the source video.
    - `filename`_mgx.png

        A horizontal motiongram of the source video.
    - `filename`_mgy.png

        A vertical motiongram of the source video.
    - `filename`_motion.csv

        A text file containing the quantity of motion and the centroid of motion for each frame 
        in the source video with timecodes in milliseconds. Available formats: csv, tsv, txt.

    Returns
    -------
    - MgObject 

        A new MgObject pointing to the output '_motion' video file. If `save_video=False`, it 
        returns an MgObject pointing to the input video file.
    """

    if save_plot | save_data | save_motiongrams | save_video:

        self.blur = blur
        self.thresh = thresh
        self.filtertype = filtertype

        vidcap = cv2.VideoCapture(self.of + self.fex)
        ret, frame = vidcap.read()

        if save_video:
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            out = cv2.VideoWriter(self.of + '_motion' + self.fex, fourcc,
                                  self.fps, (self.width, self.height))

        if save_motiongrams:
            gramx = np.zeros([1, self.width, 3])
            gramy = np.zeros([self.height, 1, 3])
        if save_data | save_plot:
            time = np.array([])  # time in ms
            qom = np.array([])  # quantity of motion
            com = np.array([])  # centroid of motion

        ii = 0

        if self.color == False:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if save_motiongrams:
                gramx = np.zeros([1, self.width])
                gramy = np.zeros([self.height, 1])

        while (vidcap.isOpened()):
            if self.blur.lower() == 'average':
                prev_frame = cv2.blur(frame, (10, 10))
            elif self.blur.lower() == 'none':
                prev_frame = frame

            ret, frame = vidcap.read()
            if ret == True:
                if self.blur.lower() == 'average':
                    # The higher these numbers the more blur you get
                    frame = cv2.blur(frame, (10, 10))

                if self.color == False:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                frame = np.array(frame)
                frame = frame.astype(np.int32)

                if self.color == True:
                    motion_frame_rgb = np.zeros([self.height, self.width, 3])

                    for i in range(frame.shape[2]):
                        motion_frame = (np.abs(frame[:, :, i] -
                                               prev_frame[:, :, i])).astype(
                                                   np.uint8)
                        motion_frame = filter_frame(motion_frame,
                                                    self.filtertype,
                                                    self.thresh, kernel_size)
                        motion_frame_rgb[:, :, i] = motion_frame

                    if save_motiongrams:
                        movement_y = np.mean(motion_frame_rgb, axis=1).reshape(
                            self.height, 1, 3)
                        movement_x = np.mean(motion_frame_rgb,
                                             axis=0).reshape(1, self.width, 3)
                        gramy = np.append(gramy, movement_y, axis=1)
                        gramx = np.append(gramx, movement_x, axis=0)

                else:
                    motion_frame = (np.abs(frame - prev_frame)).astype(
                        np.uint8)
                    motion_frame = filter_frame(motion_frame, self.filtertype,
                                                self.thresh, kernel_size)

                    if save_motiongrams:
                        movement_y = np.mean(motion_frame,
                                             axis=1).reshape(self.height, 1)
                        movement_x = np.mean(motion_frame,
                                             axis=0).reshape(1, self.width)
                        gramy = np.append(gramy, movement_y, axis=1)
                        gramx = np.append(gramx, movement_x, axis=0)

                if self.color == False:
                    motion_frame = cv2.cvtColor(motion_frame,
                                                cv2.COLOR_GRAY2BGR)
                    motion_frame_rgb = motion_frame

                if save_video:
                    if inverted_motionvideo:
                        out.write(
                            cv2.bitwise_not(motion_frame_rgb.astype(np.uint8)))
                    else:
                        out.write(motion_frame_rgb.astype(np.uint8))

                if save_plot | save_data:
                    combite, qombite = centroid(
                        motion_frame_rgb.astype(np.uint8), self.width,
                        self.height)
                    if ii == 0:
                        time = frame2ms(ii, self.fps)
                        com = combite.reshape(1, 2)
                        qom = qombite
                    else:
                        time = np.append(time, frame2ms(ii, self.fps))
                        com = np.append(com, combite.reshape(1, 2), axis=0)
                        qom = np.append(qom, qombite)
            else:

                mg_progressbar(self.length, self.length, pgbar_text,
                               'Complete')
                break
            ii += 1
            pgbar_text = 'Rendering motion' + ", ".join(
                np.array(["-video", "-grams", "-plots", "-data"])[np.array([
                    save_video, save_motiongrams, save_plot, save_data
                ])]) + ":"
            mg_progressbar(ii, self.length, pgbar_text, 'Complete')

        if save_motiongrams:
            if self.color == False:
                # Normalize before converting to uint8 to keep precision
                gramx = gramx / gramx.max() * 255
                gramy = gramy / gramy.max() * 255
                gramx = cv2.cvtColor(gramx.astype(np.uint8),
                                     cv2.COLOR_GRAY2BGR)
                gramy = cv2.cvtColor(gramy.astype(np.uint8),
                                     cv2.COLOR_GRAY2BGR)

            gramx = (gramx - gramx.min()) / (gramx.max() - gramx.min()) * 255.0
            gramy = (gramy - gramy.min()) / (gramy.max() - gramy.min()) * 255.0

            if equalize_motiongram:
                gramx = gramx.astype(np.uint8)
                gramx_hsv = cv2.cvtColor(gramx, cv2.COLOR_BGR2HSV)
                gramx_hsv[:, :, 2] = cv2.equalizeHist(gramx_hsv[:, :, 2])
                gramx = cv2.cvtColor(gramx_hsv, cv2.COLOR_HSV2BGR)

                gramy = gramy.astype(np.uint8)
                gramy_hsv = cv2.cvtColor(gramy, cv2.COLOR_BGR2HSV)
                gramy_hsv[:, :, 2] = cv2.equalizeHist(gramy_hsv[:, :, 2])
                gramy = cv2.cvtColor(gramy_hsv, cv2.COLOR_HSV2BGR)

            if inverted_motiongram:
                cv2.imwrite(self.of + '_mgx.png',
                            cv2.bitwise_not(gramx.astype(np.uint8)))
                cv2.imwrite(self.of + '_mgy.png',
                            cv2.bitwise_not(gramy.astype(np.uint8)))
            else:
                cv2.imwrite(self.of + '_mgx.png', gramx.astype(np.uint8))
                cv2.imwrite(self.of + '_mgy.png', gramy.astype(np.uint8))

        if save_data:
            save_txt(self.of, time, com, qom, self.width, self.height,
                     data_format)

        if save_plot:
            plot_motion_metrics(self.of, self.fps, com, qom, self.width,
                                self.height, unit)

        if save_video:
            out.release()
            source_audio = extract_wav(self.of + self.fex)
            destination_video = self.of + '_motion' + self.fex
            embed_audio_in_video(source_audio, destination_video)
            os.remove(source_audio)
            return mgmodule.MgObject(destination_video,
                                     color=self.color,
                                     returned_by_process=True)
        else:
            return mgmodule.MgObject(self.of + self.fex,
                                     color=self.color,
                                     returned_by_process=True)

    else:
        print("Nothing to render. Exiting...")
        return mgmodule.MgObject(self.of + self.fex, returned_by_process=True)
Ejemplo n.º 4
0
def history(self, filename='', history_length=10):
    """
    This function  creates a video where each frame is the average of the 
    n previous frames, where n is determined by `history_length`.
    The history frames are summed up and normalized, and added to the 
    current frame to show the history. 

    Parameters
    ----------
    - filename : str, optional

        Path to the input video file. If not specified the video file pointed to by the MgObject is used.
    - history_length : int, optional

        Default is 10. Number of frames to be saved in the history tail.

    Outputs
    -------
    - `filename`_history.avi

    Returns
    -------
    - MgObject 
        A new MgObject pointing to the output '_history' video file.
    """

    if filename == '':
        filename = self.filename

    of = os.path.splitext(filename)[0]
    fex = os.path.splitext(filename)[1]
    video = cv2.VideoCapture(filename)
    ret, frame = video.read()
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')

    fps = int(video.get(cv2.CAP_PROP_FPS))
    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

    out = cv2.VideoWriter(of + '_history' + fex, fourcc, fps, (width, height))

    ii = 0
    history = []

    while(video.isOpened()):
        ret, frame = video.read()
        if ret == True:
            if self.color == False:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            frame = (np.array(frame)).astype(np.float64)

            if len(history) > 0:
                history_total = frame/(len(history)+1)
            else:
                history_total = frame
            for newframe in history:
                history_total += newframe/(len(history)+1)
            # or however long history you would like
            if len(history) > history_length or len(history) == history_length:
                history.pop(0)  # pop first frame
            history.append(frame)
            # 0.5 to not overload it poor thing
            total = history_total.astype(np.uint64)

            if self.color == False:
                total = cv2.cvtColor(total.astype(
                    np.uint8), cv2.COLOR_GRAY2BGR)
                out.write(total)
            else:
                out.write(total.astype(np.uint8))

        else:
            mg_progressbar(
                length, length, 'Rendering history video:', 'Complete')
            break
        ii += 1
        mg_progressbar(ii, length+1, 'Rendering history video:', 'Complete')

    out.release()
    source_audio = extract_wav(self.of + self.fex)
    destination_video = self.of + '_history' + self.fex
    embed_audio_in_video(source_audio, destination_video)
    os.remove(source_audio)

    return mgmodule.MgObject(destination_video, color=self.color, returned_by_process=True)
Ejemplo n.º 5
0
def mg_motionhistory(self,
                     history_length=10,
                     kernel_size=5,
                     filtertype='Regular',
                     thresh=0.05,
                     blur='None',
                     inverted_motionhistory=False):
    """
    Finds the difference in pixel value from one frame to the next in an input video, 
    and saves the difference frame to a history tail. The history frames are summed up 
    and normalized, and added to the current difference frame to show the history of 
    motion. 

    Parameters
    ----------
    - history_length : int, optional

        Default is 10. Number of frames to be saved in the history tail.
    - kernel_size : int, optional

        Default is 5. Size of structuring element.
    - filtertype : {'Regular', 'Binary', 'Blob'}, optional

        `Regular` turns all values below `thresh` to 0.
        `Binary` turns all values below `thresh` to 0, above `thresh` to 1.
        `Blob` removes individual pixels with erosion method.
    - thresh : float, optional

        A number in the range of 0 to 1. Default is 0.05.
        Eliminates pixel values less than given threshold.
    - blur : {'None', 'Average'}, optional

        `Average` to apply a 10px * 10px blurring filter, `None` otherwise.
    - inverted_motionhistory : bool, optional

        Default is `False`. If `True`, inverts colors of the motionhistory video.

    Outputs
    -------
    - `filename`_motionhistory.avi

    Returns
    -------
    - MgObject

        A new MgObject pointing to the output '_motionhistory' video file.
    """
    enhancement = 1  # This can be adjusted to higher number to make motion more visible. Use with caution to not make it overflow.
    self.filtertype = filtertype
    self.thresh = thresh
    self.blur = blur

    vidcap = cv2.VideoCapture(self.of + self.fex)
    ret, frame = vidcap.read()
    #of = os.path.splitext(self.filename)[0]
    fex = os.path.splitext(self.filename)[1]
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(self.of + '_motionhistory' + fex, fourcc, self.fps,
                          (self.width, self.height))

    ii = 0
    history = []

    if self.color == False:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    while (vidcap.isOpened()):
        if self.blur.lower() == 'average':
            prev_frame = cv2.blur(frame, (10, 10))
        elif self.blur.lower() == 'none':
            prev_frame = frame

        ret, frame = vidcap.read()

        if ret == True:
            if self.blur.lower() == 'average':
                # The higher these numbers the more blur you get
                frame = cv2.blur(frame, (10, 10))

            if self.color == False:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            frame = (np.array(frame)).astype(np.float64)

            if self.color == True:
                motion_frame_rgb = np.zeros([self.height, self.width, 3])
                for i in range(frame.shape[2]):
                    motion_frame = (np.abs(frame[:, :, i] -
                                           prev_frame[:, :, i])).astype(
                                               np.float64)
                    motion_frame = filter_frame(motion_frame, self.filtertype,
                                                self.thresh, kernel_size)
                    motion_frame_rgb[:, :, i] = motion_frame

                if len(history) > 0:
                    motion_history = motion_frame_rgb / (len(history) + 1)
                else:
                    motion_history = motion_frame_rgb

                for newframe in history:
                    motion_history += newframe / (len(history) + 1)
                # or however long history you would like
                if len(history) > history_length or len(
                        history) == history_length:
                    history.pop(0)  # pop first frame
                history.append(motion_frame_rgb)
                motion_history = motion_history.astype(
                    np.uint64)  # 0.5 to not overload it poor thing

            else:  # self.color = False
                motion_frame = (np.abs(frame - prev_frame)).astype(np.float64)
                motion_frame = filter_frame(motion_frame, self.filtertype,
                                            self.thresh, kernel_size)
                if len(history) > 0:
                    motion_history = motion_frame / (len(history) + 1)
                else:
                    motion_history = motion_frame

                for newframe in history:
                    motion_history += newframe / (len(history) + 1)

                # or however long history you would like
                if len(history) > history_length or len(
                        history) == history_length:
                    history.pop(0)  # pop first frame

                history.append(motion_frame)
                motion_history = motion_history.astype(np.uint64)

            if self.color == False:
                motion_history_rgb = cv2.cvtColor(
                    motion_history.astype(np.uint8), cv2.COLOR_GRAY2BGR)
            else:
                motion_history_rgb = motion_history
            if inverted_motionhistory:
                out.write(
                    cv2.bitwise_not(enhancement *
                                    motion_history_rgb.astype(np.uint8)))
            else:
                out.write(enhancement * motion_history_rgb.astype(np.uint8))
        else:
            mg_progressbar(self.length, self.length,
                           'Rendering motion history video:', 'Complete')
            break
        ii += 1
        mg_progressbar(ii, self.length, 'Rendering motion history video:',
                       'Complete')

    out.release()
    source_audio = extract_wav(self.of + self.fex)
    destination_video = self.of + '_motionhistory' + self.fex
    embed_audio_in_video(source_audio, destination_video)
    os.remove(source_audio)

    return mgmodule.MgObject(destination_video,
                             color=self.color,
                             returned_by_process=True)
Ejemplo n.º 6
0
    def dense(self,
              filename='',
              pyr_scale=0.5,
              levels=3,
              winsize=15,
              iterations=3,
              poly_n=5,
              poly_sigma=1.2,
              flags=0,
              skip_empty=False):
        """
        Renders a dense optical flow video of the input video file using `cv2.calcOpticalFlowFarneback()`.
        For more details about the parameters consult the cv2 documentation.

        Parameters
        ----------
        - filename : str, optional

            Path to the input video file. If not specified the video file pointed to by the MgObject is used.
        - pyr_scale : float, optional

            Default is 0.5.
        - levels : int, optional

            Default is 3.
        - winsize : int, optional

            Default is 15.
        - iterations : int, optional

            Default is 3.
        - poly_n : int, optional

            Default is 5.
        - poly_sigma : float, optional

            Default is 1.2.
        - flags : int, optional

            Default is 0.
        - skip_empty : bool, optional

            Default is `False`. If `True`, repeats previous frame in the output when encounters an empty frame.

        Outputs
        -------
        - `filename`_flow_dense.avi

        Returns
        -------
        - MgObject

            A new MgObject pointing to the output '_flow_dense' video file.
        """

        if filename == '':
            filename = self.filename

        of = os.path.splitext(filename)[0]
        fex = os.path.splitext(filename)[1]
        vidcap = cv2.VideoCapture(filename)
        ret, frame = vidcap.read()
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')

        fps = int(vidcap.get(cv2.CAP_PROP_FPS))
        width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        out = cv2.VideoWriter(of + '_flow_dense' + fex, fourcc, fps,
                              (width, height))

        ret, frame1 = vidcap.read()
        prev_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        hsv = np.zeros_like(frame1)
        hsv[..., 1] = 255

        ii = 0

        while (vidcap.isOpened()):
            ret, frame2 = vidcap.read()
            if ret == True:
                next_frame = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)

                flow = cv2.calcOpticalFlowFarneback(prev_frame, next_frame,
                                                    None, pyr_scale, levels,
                                                    winsize, iterations,
                                                    poly_n, poly_sigma, flags)

                mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
                hsv[..., 0] = ang * 180 / np.pi / 2
                hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
                rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

                if skip_empty:
                    if np.sum(rgb) > 0:
                        out.write(rgb.astype(np.uint8))
                    else:
                        out.write(prev_rgb.astype(np.uint8))
                else:
                    out.write(rgb.astype(np.uint8))

                prev_frame = next_frame
                prev_rgb = rgb

            else:
                mg_progressbar(length, length,
                               'Rendering dense optical flow video:',
                               'Complete')
                break

            ii += 1

            mg_progressbar(ii, length + 1,
                           'Rendering dense optical flow video:', 'Complete')

        out.release()
        source_audio = extract_wav(of + fex)
        destination_video = of + '_flow_dense' + fex
        embed_audio_in_video(source_audio, destination_video)
        os.remove(source_audio)

        return mgmodule.MgObject(destination_video,
                                 color=self.color,
                                 returned_by_process=True)
Ejemplo n.º 7
0
    def sparse(self,
               filename='',
               corner_max_corners=100,
               corner_quality_level=0.3,
               corner_min_distance=7,
               corner_block_size=7,
               of_win_size=(15, 15),
               of_max_level=2,
               of_criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                            10, 0.03)):
        """
        Renders a sparse optical flow video of the input video file using `cv2.calcOpticalFlowPyrLK()`.
        `cv2.goodFeaturesToTrack()` is used for the corner estimation.
        For more details about the parameters consult the cv2 documentation.

        Parameters
        ----------
        - filename : str, optional

            Path to the input video file. If not specified the video file pointed to by the MgObject is used.
        - corner_max_corners : int, optional

            Default is 100.
        - corner_quality_level : float, optional

            Default is 0.3.
        - corner_min_distance : int, optional

            Default is 7.
        - corner_block_size : int, optional

            Default is 7.
        - of_win_size : tuple (int, int), optional

            Default is (15, 15).
        - of_max_level : int, optional

            Default is 2.
        - of_criteria : optional

            Default is `(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)`.

        Outputs
        -------
        - `filename`_flow_sparse.avi

        Returns
        -------
        - MgObject

            A new MgObject pointing to the output '_flow_sparse' video file.
        """

        if filename == '':
            filename = self.filename

        of = os.path.splitext(filename)[0]
        fex = os.path.splitext(filename)[1]
        vidcap = cv2.VideoCapture(filename)
        ret, frame = vidcap.read()
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')

        fps = int(vidcap.get(cv2.CAP_PROP_FPS))
        width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

        out = cv2.VideoWriter(of + '_flow_sparse' + fex, fourcc, fps,
                              (width, height))

        # params for ShiTomasi corner detection
        feature_params = dict(maxCorners=corner_max_corners,
                              qualityLevel=corner_quality_level,
                              minDistance=corner_min_distance,
                              blockSize=corner_block_size)

        # Parameters for lucas kanade optical flow
        lk_params = dict(winSize=of_win_size,
                         maxLevel=of_max_level,
                         criteria=of_criteria)

        # Create some random colors
        color = np.random.randint(0, 255, (100, 3))

        # Take first frame and find corners in it
        ret, old_frame = vidcap.read()
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

        # Create a mask image for drawing purposes
        mask = np.zeros_like(old_frame)

        ii = 0

        while (vidcap.isOpened()):
            ret, frame = vidcap.read()
            if ret == True:
                frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # calculate optical flow
                p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray,
                                                       p0, None, **lk_params)

                # Select good points
                good_new = p1[st == 1]
                good_old = p0[st == 1]

                # draw the tracks
                for i, (new, old) in enumerate(zip(good_new, good_old)):
                    a, b = new.ravel()
                    c, d = old.ravel()
                    mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)

                    if self.color == False:
                        frame = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR)

                    frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)

                img = cv2.add(frame, mask)

                out.write(img.astype(np.uint8))

                # Now update the previous frame and previous points
                old_gray = frame_gray.copy()
                p0 = good_new.reshape(-1, 1, 2)

            else:
                mg_progressbar(length, length,
                               'Rendering sparse optical flow video:',
                               'Complete')
                break

            ii += 1

            mg_progressbar(ii, length + 1,
                           'Rendering sparse optical flow video:', 'Complete')

        out.release()
        source_audio = extract_wav(of + fex)
        destination_video = of + '_flow_sparse' + fex
        embed_audio_in_video(source_audio, destination_video)
        os.remove(source_audio)

        return mgmodule.MgObject(destination_video,
                                 color=self.color,
                                 returned_by_process=True)