Example #1
0
def dense_flow(augs):
    '''
    To extract dense_flow images
    :param augs:the detailed augments:
        video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
        save_dir: the destination path's final direction name.
        step: num of frames between each two extracted frames
        bound: bi-bound parameter
    :return: no returns
    '''
    video_name, save_dir, step, bound = augs
    video_path = str(video_name)
    print(video_name)

    cap_iter = cap_vid(video_path, save_dir, skip=6, init_frame=3)

    dtvl1 = cv2.createOptFlow_DualTVL1()

    for prev_image, next_frame, frame_num in cap_iter:
        if not dtvl1.getUseInitialFlow():
            flowDTVL1 = dtvl1.calc(prev_image, next_frame, None)
            dtvl1.setUseInitialFlow(True)
        else:
            flowDTVL1 = dtvl1.calc(prev_image, next_frame, flowDTVL1)

        # # # this is to save flows and img.
        save_flows(flowDTVL1.copy(), next_frame, save_dir, frame_num, bound)

    #cv2.destroyAllWindows()
    print(f'{video_path} captured')
Example #2
0
def cal_optical_cv2(augs):
    dir_name, bound = augs
    dir_name = dir_name.split(' ')[0]
    parent, dirnames, imagenames = next(os.walk(dir_name))
    imagenames = sorted(imagenames)
    for i in range(len(imagenames)-1): # there is no flow for the last image
        s = time.time()
        image_path = os.path.join(parent, imagenames[i])
        frame_0 = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
        image_path = os.path.join(parent, imagenames[i+1])
        frame_1 = cv2.imread(image_path,cv2.IMREAD_GRAYSCALE)
        dtvl1=cv2.createOptFlow_DualTVL1()
        flowDTVL1=dtvl1.calc(frame_0,frame_1,None)
        e = time.time()
        print(e-s)
        u = flowDTVL1[:,:,0]
        v = flowDTVL1[:,:,1]
        mag = np.sqrt(u**2+v**2)
        u, v = [ToImg(i,bound) for i in [u, v]]
        mag = ToImg(mag, (0, bound*1.414))
        flow = np.stack((u, v, mag), axis=2)
        # flow = v
        save_path = dir_name.replace('UCF-101', 'UCF-101-optical-flow')
        save_name = os.path.join(save_path, '{:04d}.jpg'.format(i))
        print(save_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        else:
            cv2.imwrite(save_name, flow)
Example #3
0
def DenseOpticalFlows():
    print("Dense Optical Flow Demo ")

    img1 = cv2.imread("D:/OpenCv4Programmers/datas/frm1.png")
    img2 = cv2.imread("D:/OpenCv4Programmers/datas/frm2.png")

    cv2.imshow("Img1", img1)
    cv2.imshow("Img2", img2)
    cv2.waitKey(10)

    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    #  Farneback
    start = cv2.getTickCount()
    flowFarneback = cv2.calcOpticalFlowFarneback(img1, img2, None, 0.25, 3, 15,
                                                 5, 5, 1.2, 0)
    timeSec = (cv2.getTickCount() - start) / cv2.getTickFrequency()
    print(" Farnback Optical flow time ", timeSec, " sec")

    # DualTVL1
    start = cv2.getTickCount()
    tvl1 = cv2.createOptFlow_DualTVL1()
    flowDualTVL1 = tvl1.calc(img1, img2, None)
    timeSec = (cv2.getTickCount() - start) / cv2.getTickFrequency()
    print(" DualTVL1  Optical flow time ", timeSec, " sec")

    outImgFarneback = drawFlow(flowFarneback, img2)
    outImgDualTVL1 = drawFlow(flowDualTVL1, img2)

    cv2.imshow("Farneback Optical Flow ", outImgFarneback)
    cv2.imshow("DualTVL1 Optical Flow ", outImgDualTVL1)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #4
0
def test_cv2_flann():
    """
    Ignore:
        [name for name in dir(cv2) if 'create' in name.lower()]
        [name for name in dir(cv2) if 'stereo' in name.lower()]

        ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.')
    """
    import cv2
    from vtool.tests import dummy
    import plottool as pt
    import vtool as vt
    img1 = vt.imread(ut.grab_test_imgpath('easy1.png'))
    img2 = vt.imread(ut.grab_test_imgpath('easy2.png'))

    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
    disparity = stereo.compute(img1, img2)
    pt.imshow(disparity)
    pt.show()

    #cv2.estima

    flow = cv2.createOptFlow_DualTVL1()
    img1, img2 = vt.convert_image_list_colorspace([img1, img2], 'gray', src_colorspace='bgr')
    img2 = vt.resize(img2, img1.shape[0:2][::-1])
    out = img1.copy()
    flow.calc(img1, img2, out)

    orb = cv2.ORB_create()
    kp1, vecs1 = orb.detectAndCompute(img1, None)
    kp2, vecs2 = orb.detectAndCompute(img2, None)

    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")

    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)

    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)

    out = img1.copy()
    cv2.drawKeypoints(img1, kp1, outImage=out)
    pt.imshow(out)

    vecs1 = dummy.testdata_dummy_sift(10)
    vecs2 = dummy.testdata_dummy_sift(10)  # NOQA

    FLANN_INDEX_KDTREE = 0  # bug: flann enums are missing
    #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)   # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)  # NOQA

    cv2.flann.Index(vecs1, index_params)

    #cv2.FlannBasedMatcher(flann_params)

    cv2.flann.Index(vecs1, flann_params)  # NOQA
Example #5
0
def main(args=None, parser=None):

    data_dir = '/home/ye/Works/diving/frames'
    flow_dir = '/home/ye/Works/diving/flow'

    folder = flow_dir
    file_dir = data_dir

    video_files = os.listdir(file_dir)
    # print video_files
    nVideos = len(video_files)

    start_time = time.time()
    for i in range(0, nVideos):
        print i, '/', nVideos

        vid_file = video_files[i]
        bn = os.path.basename(vid_file)
        prefix = os.path.splitext(bn)[0]
        print prefix
        imgae_folder = join(folder, prefix)
        vid_file = join(data_dir, vid_file)
        print vid_file

        if vid_file != '/home/ye/Works/diving/frames/._.DS_Store' and vid_file != '/home/ye/Works/diving/frames/.DS_Store':
            img_list = collect_files(vid_file,
                                     file_ext=".jpg",
                                     sort_files=True)

            for i in range(len(img_list) - 1):
                img_0 = cv2.imread(join(vid_file, img_list[i]))
                img_1 = cv2.imread(join(vid_file, img_list[i + 1]))

                frame_0 = cv2.cvtColor(img_0, cv2.COLOR_RGB2GRAY)
                frame_1 = cv2.cvtColor(img_1, cv2.COLOR_RGB2GRAY)

                dtvl1 = cv2.createOptFlow_DualTVL1()
                flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)

                flow_x = ToImg(flowDTVL1[..., 0], 20)
                flow_y = ToImg(flowDTVL1[..., 1], 20)

                if not os.path.exists(imgae_folder):
                    os.makedirs(imgae_folder)

                save_x = os.path.join(imgae_folder,
                                      'flow_x_{:05d}.jpg'.format(i))
                save_y = os.path.join(imgae_folder,
                                      'flow_y_{:05d}.jpg'.format(i))

                cv2.imwrite(save_x, flow_x)
                cv2.imwrite(save_y, flow_y)

    print '\nDONE\n'
    elapsed_time = time.time() - start_time
    print 'time: ', elapsed_time

    return 0
Example #6
0
def find_differences_cc(healthy, impaired, enhanced, Th=0.25, scale=20):
    optical_flow = cv2.createOptFlow_DualTVL1()
    scale1 = 0.5
    # RGB DIFFERENCE
    t = time()
    difference = compute_difference(impaired, enhanced, kernel=5)
    #print('Difference in %.2f'%(time()-t))
    difference[0:difference.shape[0] // 6] = 0
    difference[-difference.shape[0] // 8:] = 0
    difference[:, 0:difference.shape[1] // 6] = 0
    difference[:, -difference.shape[1] // 6:] = 0
    t = time()
    diff_image = difference2color(difference, impaired,
                                  Th).astype('float32') / 255
    #print('DiffImage in %.2f'%(time()-t))
    # FLOW
    t = time()
    impaired, enhanced = resize(impaired, scale1), resize(enhanced, scale1)
    impaired_gray = cv2.cvtColor(impaired, cv2.COLOR_RGB2GRAY)
    enhanced_gray = cv2.cvtColor(enhanced, cv2.COLOR_RGB2GRAY)
    flow = optical_flow.calc(impaired_gray, enhanced_gray,
                             None)  # compute flow
    #print('Flow in %.2f'%(time()-t))
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    flow = flow / np.repeat(mag[:, :, np.newaxis], 2, axis=2)
    # MASK
    t = time()
    mask = difference >= Th
    #mask = np.logical_and(mask,mag>1.0)
    mask = dilation(mask, disk(6))
    mask = erosion(mask, disk(3))
    #print('Mask in %.2f'%(time()-t))
    t = time()
    mask_labels = measure.label(mask, background=0)
    labels = np.unique(mask_labels)
    flow_sparse, areas = [], []
    for l in range(1, labels.max() + 1):
        m = np.where(mask_labels == l)
        #print(len(m[0]))
        if len(m[0]) < 150: continue

        m = [(m[0] * scale1).astype(int), (m[1] * scale1).astype(int)]
        fx, fy = flow[m[0], m[1], 0].max(), flow[m[0], m[1], 1].max()
        x, y = int(np.median(m[0]) / scale1), int(np.median(m[1]) / scale1)
        flow_sparse.append([x, y, fx, fy])
        areas.append(len(m[0]))

    #print('CC in %.2f'%(time()-t))
    #diff_image = mask_labels
    diff_image = resize(diff_image, 1 / scale1)
    #print diff_image.dtype
    if len(flow_sparse) == 0:
        return diff_image, np.zeros([1, 2]), [0], [0], np.zeros(1)

    flow_sparse = np.array(flow_sparse)
    return diff_image, flow_sparse[:, 2:], flow_sparse[:, 1].astype(
        int), flow_sparse[:, 0].astype(int), np.array(areas)
def get_optical_flow_function(high_quality=False):
    opt_flow = cv2.createOptFlow_DualTVL1()
    opt_flow.setUseInitialFlow(True)
    if not high_quality:
        # see https://stackoverflow.com/questions/19309567/speeding-up-optical-flow-createoptflow-dualtvl1
        opt_flow.setTau(1 / 4)
        opt_flow.setScalesNumber(3)
        opt_flow.setWarpingsNumber(3)
        opt_flow.setScaleStep(0.5)
    return opt_flow
    def set_optical_flow_function(self):
        if self.opt_flow:
            return

        self.opt_flow = cv2.createOptFlow_DualTVL1()
        self.opt_flow.setUseInitialFlow(True)
        if not self.config.high_quality_optical_flow:
            # see https://stackoverflow.com/questions/19309567/speeding-up-optical-flow-createoptflow-dualtvl1
            self.opt_flow.setTau(1 / 4)
            self.opt_flow.setScalesNumber(3)
            self.opt_flow.setWarpingsNumber(3)
            self.opt_flow.setScaleStep(0.5)
Example #9
0
def python_extractor(op_method, videocapture, name, params):
    if videocapture.sum() == 0:
        print 'could not initialize capturing {}'.format()
        exit()
    if op_method == 'tvl1':
        calc_method = cv.createOptFlow_DualTVL1()
    else:  #op_method=='fb'
        calc_method = cv.calcOpticalFlowFarneback

    out_frames_path = os.path.join(out_path, name)
    len_frame = len(videocapture)
    frame_num = 0
    image, prev_image, gray, prev_gray = None, None, None, None
    num0 = 0
    while True:
        if num0 >= len_frame:
            break
        frame = videocapture[num0]
        num0 += 1
        if frame_num == 0:
            image = np.zeros_like(frame)
            gray = np.zeros_like(frame)
            prev_gray = np.zeros_like(frame)
            prev_image = frame
            prev_gray = cv.cvtColor(prev_image, cv.COLOR_BGR2GRAY)
            frame_num += 1

            step_t = params['step']
            while step_t > 1:
                num0 += 1
                step_t -= 1
            continue
        image = frame
        gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

        frame_0 = prev_gray
        frame_1 = gray

        if op_method == 'tvl1':
            OutFlow = calc_method.calc(frame_0, frame_1, None)
        else:
            OutFlow = calc_method(frame_0, frame_1, None, 0.6, 3, 25, 7, 5,
                                  1.2, cv.OPTFLOW_FARNEBACK_GAUSSIAN)
        save_flows(OutFlow, image, out_frames_path, frame_num, params['bound'])
        prev_gray = gray
        prev_image = image
        frame_num += 1

        step_t = params['step']
        while step_t > 1:
            num0 += 1
            step_t -= 1
Example #10
0
def find_differences(healthy, impaired, enhanced, Th=0.25, scale=20):
    scale1, scale2 = 1.0, scale
    optical_flow = cv2.createOptFlow_DualTVL1()
    #optical_flow = cv2.DualTVL1OpticalFlow_create()
    # RGB DIFFERENCE
    difference = compute_difference(impaired, enhanced, kernel=5)
    #difference[0:difference.shape[0]/5] = 0
    #difference[difference.shape[0]/8:]  = 0
    #difference[:,0:difference.shape[1]/5] = 0
    #difference[:,difference.shape[1]/5:]  = 0
    diff_image = difference2color(difference, impaired, Th)
    # FLOW
    impaired_small = (resize(impaired, scale1) * 255).astype(
        'uint8')  # resize for reducing flow resolution
    enhanced_small = (resize(enhanced, scale1) * 255).astype('uint8')
    diff_rescale = np.linspace(0, difference.shape[0] - 1,
                               int(scale1 * difference.shape[0])).astype(int)
    difference_small = difference[diff_rescale,
                                  diff_rescale]  #resize(difference,scale1)
    impaired_gray = cv2.cvtColor(impaired_small, cv2.COLOR_RGB2GRAY)
    enhanced_gray = cv2.cvtColor(enhanced_small, cv2.COLOR_RGB2GRAY)
    flow = optical_flow.calc(enhanced_gray, impaired_gray,
                             None)  # compute flow
    # FLOW VECTOR TO IMAGE
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    flow = flow / np.repeat(mag[:, :, np.newaxis], 2, axis=2)
    mask = np.logical_and(difference_small >= Th, mag > 2)
    flow_filter = np.stack([flow[:, :, 0] * mask, flow[:, :, 1] * mask],
                           axis=2)
    flow_filter_sparse = np.zeros([
        int(flow_filter.shape[0] / scale1),
        int(flow_filter.shape[1] / scale1), flow_filter.shape[2]
    ], flow_filter.dtype)
    for x in range(0, flow_filter.shape[0] - scale2 // 2, scale2):
        for y in range(0, flow_filter.shape[1] - scale2 // 2, scale2):
            X, Y = int((x + scale2 / 2) / scale1), int(
                (y + scale2 / 2) / scale1)
            m = flow_filter[x:x + scale2, y:y + scale2].reshape([-1, 2])
            mx, my = m[:, 0], m[:, 1]
            #m = np.stack([mx[mx!=0].mean(0),my[my!=0].mean(0)])
            m = np.stack([mx.max(), my.max()])
            flow_filter_sparse[X, Y] = m if not np.isnan(m).any() else 0

    mag, ang = cv2.cartToPolar(flow_filter_sparse[..., 0],
                               flow_filter_sparse[..., 1])
    flow_filter_sparse = flow_filter_sparse / (
        np.repeat(mag[:, :, np.newaxis], 2, axis=2) + 0.01)
    #flow_filter_sparse = np.stack([resize(flow_filter[:,:,0],1/scale1),resize(flow_filter[:,:,1],1/scale1)],2)
    X, Y = np.where(
        np.logical_or(flow_filter_sparse[:, :, 0] != 0,
                      flow_filter_sparse[:, :, 1] != 0))
    return diff_image, flow_filter_sparse, X, Y
Example #11
0
def compute_TVL1(prev, curr, bound=15):
    """Compute the TV-L1 optical flow."""
    #TVL1 = cv2.DualTVL1OpticalFlow_create()

    # TVL1 = cv2.DualTVL1OpticalFlow_create()
    TVL1 = cv2.createOptFlow_DualTVL1()
    flow = TVL1.calc(prev, curr, None)

    assert flow.dtype == np.float32

    flow = (flow + bound) * (255.0 / (2 * bound))
    flow = np.round(flow).astype(int)
    flow[flow >= 255] = 255
    flow[flow <= 0] = 0

    return flow
Example #12
0
def get_optflow_retval(algorithm):
  if algorithm.lower() == 'deepflow':
    retval = cv2.optflow.createOptFlow_DeepFlow()
  elif algorithm.lower() == 'farneback':
    retval = cv2.optflow.createOptFlow_Farneback()
  elif algorithm.lower() == 'tvl1':
    retval = cv2.createOptFlow_DualTVL1()
  elif algorithm.lower() == 'sparse2dense':
    retval = cv2.optflow.createOptFlow_SparseToDense()
  elif algorithm == 'DISflow_ultrafast':
    retval = cv2.optflow.createOptFlow_DIS(0)
  elif algorithm == 'DISflow_fast':
    retval = cv2.optflow.createOptFlow_DIS(1)
  elif algorithm == 'DISflow_medium':
    retval = cv2.optflow.createOptFlow_DIS(2)
  else: raise ValueError('algorithm is not found : {:}'.format( algorithm ))
  return retval
Example #13
0
    def __init__(self, model):

        self.model = model

        # create a reduced quality optical flow.
        self.opt_flow = cv2.createOptFlow_DualTVL1()
        self.opt_flow.setTau(1 / 4)
        self.opt_flow.setScalesNumber(3)
        self.opt_flow.setWarpingsNumber(3)
        self.opt_flow.setScaleStep(0.5)
        self.opt_flow.setUseInitialFlow(True)

        self.prev = None
        self.thermal = None
        self.filtered = None
        self.flow = None
        self.mask = None
def tvl1_simple(img1, img2, imtype="none", Lambda=0.9, **kwargs):
    #    gray1 = get_gray(img1,imtype)
    #    gray2 = get_gray(img2,imtype)
    tvl1 = cv2.createOptFlow_DualTVL1()

    #    opflow = np.zeros_like(gray1)
    tvl1.setLambda(Lambda)

    print("Running...", img1.shape)
    img1 = np.array(img1, dtype=np.float32)
    img2 = np.array(img2, dtype=np.float32)
    opflow = tvl1.calc(img1, img2, None)

    #    rgb = of2rgb(opflow[:,:,0],opflow[:,:,1])
    #    imgi = warp_flow(img1, opflow, 2.0)

    #    return imgi, rgb
    return opflow[:, :, 0], opflow[:, :, 1], np.nan
def dense_flow(augs):
    '''
    To extract dense_flow images
    :param augs:the detailed augments:
        video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
        save_dir: the destination path's final direction name.
        step: num of frames between each two extracted frames
        bound: bi-bound parameter
    :return: no returns
    '''
    video_name, save_dir, step, bound = augs
    #video_name=video_name.split('/')[-1]

    if not os.path.exists(video_name):
        print 'Could not find image folder! ', video_name
        exit()

    image, prev_image, gray, prev_gray = None, None, None, None
    flow_num = 0
    imagelist = os.listdir(video_name)
    for element in range(1, len(imagelist) + 1, step):
        if flow_num == 0:
            image_path = os.path.join(video_name, "%03d.jpg" % (element))
            prev_image = cv2.imread(image_path)
            prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
            flow_num += 1
            continue
        image_path = os.path.join(video_name, "%03d.jpg" % (element))
        current_image = cv2.imread(image_path)
        img1 = prev_image[100, 100]
        img2 = current_image[100, 100]
        current_gray = cv2.cvtColor(current_image, cv2.COLOR_RGB2GRAY)
        frame_0 = prev_gray
        frame_1 = current_gray
        dtvl1 = cv2.createOptFlow_DualTVL1()
        flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)
        save_flows(flowDTVL1, save_dir, flow_num,
                   bound)  #this is to save flows and img.
        prev_gray = current_gray
        prev_image = current_image
        flow_num += 1
def dense_flow(augs):
    '''
    To extract dense_flow images
    :param augs:the detailed augments:
        video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
        save_dir: the destination path's final direction name.
        step: num of frames between each two extracted frames
        bound: bi-bound parameter
    :return: no returns
    '''
    video_name, save_dir, step, bound = augs
    video_path = os.path.join(videos_root,
                              video_name.split('_')[1], video_name)

    # provide two video-read methods: cv2.VideoCapture() and skvideo.io.vread(), both of which need ffmpeg support

    # videocapture=cv2.VideoCapture(video_path)
    # if not videocapture.isOpened():
    #     print 'Could not initialize capturing! ', video_name
    #     exit()
    try:
        videocapture = skvideo.io.vread(video_path)
    except:
        print '{} read error! '.format(video_name)
        return 0
    print video_name
    # if extract nothing, exit!
    if videocapture.sum() == 0:
        print 'Could not initialize capturing', video_name
        exit()
    len_frame = len(videocapture)
    frame_num = 0
    image, prev_image, gray, prev_gray = None, None, None, None
    num0 = 0
    while True:
        #frame=videocapture.read()
        if num0 >= len_frame:
            break
        frame = videocapture[num0]
        num0 += 1
        if frame_num == 0:
            image = np.zeros_like(frame)
            gray = np.zeros_like(frame)
            prev_gray = np.zeros_like(frame)
            prev_image = frame
            prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_BGR2GRAY)
            frame_num += 1
            # to pass the out of stepped frames
            step_t = step
            while step_t > 1:
                #frame=videocapture.read()
                num0 += 1
                step_t -= 1
            continue

        image = frame
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        frame_0 = prev_gray
        frame_1 = gray
        ##default choose the tvl1 algorithm
        dtvl1 = cv2.createOptFlow_DualTVL1()
        flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)
        save_flows(flowDTVL1, image, save_dir, frame_num,
                   bound)  #this is to save flows and img.
        prev_gray = gray
        prev_image = image
        frame_num += 1
        # to pass the out of stepped frames
        step_t = step
        while step_t > 1:
            #frame=videocapture.read()
            num0 += 1
            step_t -= 1
Example #17
0
    def extract_tracks(self):
        """
        Extracts tracks from given source.  Setting self.tracks to a list of good tracks within the clip
        :param source_file: filename of cptv file to process
        :returns: True if clip was successfully processed, false otherwise
        """

        assert self.frame_buffer.thermal, "Must call load before extract tracks"

        frames = self.frame_buffer.thermal
        self.reject_reason = None

        # for now just always calculate as we are using the stats...
        background, background_stats = self.process_background(frames)

        if self.config.background_calc == self.PREVIEW:
            if self.preview_secs > 0:
                self.background_is_preview = True
                background = self.calculate_preview(frames)
            else:
                logging.info("No preview secs defined for CPTV file - using statistical background measurement")

        if len(frames) <= 9:
            self.reject_reason = "Clip too short {} frames".format(len(frames))
            return False

        if self.reject_non_static_clips and not self.stats['is_static']:
            self.reject_reason = "Non static background deviation={:.1f}".format(
                background_stats.background_deviation)
            return False

        # don't process clips that are too hot.
        if self.config.max_mean_temperature_threshold and background_stats.mean_temp > self.config.max_mean_temperature_threshold:
            self.reject_reason = "Mean temp too high {}".format(
                background_stats.mean_temp)
            return False

        # don't process clips with too large of a temperature difference
        if self.config.max_temperature_range_threshold and (background_stats.max_temp - background_stats.min_temp > self.config.max_temperature_range_threshold):
            self.reject_reason = "Temp delta too high {}".format(
                background_stats.max_temp - background_stats.min_temp)
            return False

        # reset the track ID so we start at 1
        Track._track_id = 1
        self.tracks = []
        self.active_tracks = []
        self.region_history = []

        # create optical flow
        self.opt_flow = cv2.createOptFlow_DualTVL1()
        self.opt_flow.setUseInitialFlow(True)
        if not self.config.high_quality_optical_flow:
            # see https://stackoverflow.com/questions/19309567/speeding-up-optical-flow-createoptflow-dualtvl1
            self.opt_flow.setTau(1 / 4)
            self.opt_flow.setScalesNumber(3)
            self.opt_flow.setWarpingsNumber(3)
            self.opt_flow.setScaleStep(0.5)

        # process each frame
        self.frame_on = 0
        for frame in frames:
            self.track_next_frame(frame, background)
            self.frame_on += 1

        # filter out tracks that do not move, or look like noise
        self.filter_tracks()

        # apply smoothing if required
        if self.config.track_smoothing and len(frames) > 0:
            frame_height, frame_width = frames[0].shape
            for track in self.tracks:
                track.smooth(Rectangle(0, 0, frame_width, frame_height))

        return True
Example #18
0
def flow(prev_frame, cur_frame, tvl1=cv2.createOptFlow_DualTVL1()):
    prev_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
    cur_frame = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)
    return tvl1.calc(prev_frame, cur_frame, None)
Example #19
0
def test_cv2_flann():
    """
    Ignore:
        [name for name in dir(cv2) if 'create' in name.lower()]
        [name for name in dir(cv2) if 'stereo' in name.lower()]

        ut.grab_zipped_url('https://priithon.googlecode.com/archive/a6117f5e81ec00abcfb037f0f9da2937bb2ea47f.tar.gz', download_dir='.')
    """
    import cv2
    from vtool.tests import dummy
    import plottool as pt
    import vtool as vt
    img1 = vt.imread(ut.grab_test_imgpath('easy1.png'))
    img2 = vt.imread(ut.grab_test_imgpath('easy2.png'))

    stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
    disparity = stereo.compute(img1, img2)
    pt.imshow(disparity)
    pt.show()

    #cv2.estima

    flow = cv2.createOptFlow_DualTVL1()
    img1, img2 = vt.convert_image_list_colorspace([img1, img2],
                                                  'gray',
                                                  src_colorspace='bgr')
    img2 = vt.resize(img2, img1.shape[0:2][::-1])
    out = img1.copy()
    flow.calc(img1, img2, out)

    orb = cv2.ORB_create()
    kp1, vecs1 = orb.detectAndCompute(img1, None)
    kp2, vecs2 = orb.detectAndCompute(img2, None)

    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")

    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)

    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)

    out = img1.copy()
    cv2.drawKeypoints(img1, kp1, outImage=out)
    pt.imshow(out)

    vecs1 = dummy.testdata_dummy_sift(10)
    vecs2 = dummy.testdata_dummy_sift(10)  # NOQA

    FLANN_INDEX_KDTREE = 0  # bug: flann enums are missing
    #flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)  # or pass empty dictionary
    flann = cv2.FlannBasedMatcher(index_params, search_params)  # NOQA

    cv2.flann.Index(vecs1, index_params)

    #cv2.FlannBasedMatcher(flann_params)

    cv2.flann.Index(vecs1, flann_params)  # NOQA
frames = []

for frame, _ in video:
    frames.append(frame.copy())

first = np.float32(frames[0])
second = np.float32(frames[1])

print(np.mean(first))
print(np.mean(second))

first = preprocess(first)
second = preprocess(second)

opt_flow = cv2.createOptFlow_DualTVL1()

opt_flow.setTau(1 / 4)
opt_flow.setScalesNumber(3)
opt_flow.setWarpingsNumber(3)
opt_flow.setScaleStep(0.5)

height = 120
width = 160

flow = np.zeros([height, width, 2], dtype=np.float32)
cv2.setNumThreads(0)
flow = opt_flow.calc(first, second, flow)

t0 = time.time()
cv2.setNumThreads(0)
Example #21
0
def new_dense_flow(augs, enableSecond):
    '''
    use cv2.VideoCapture()
    '''
    video_name, save_dir, step, bound = augs
    video_path = os.path.join(videos_root,
                              video_name.split('_')[1], video_name)
    print(video_path)
    videocapture = cv2.VideoCapture(video_path)
    fps = videocapture.get(
        cv2.CAP_PROP_FPS)  # get fps and set gap frame for seconds
    print(fps)

    if True == enableSecond:
        step = step * fps
    if not videocapture.isOpened():
        print 'Could not initialize capturing! ', video_name
        exit()

    print(video_name)
    frame_num = 0
    image, prev_image, gray, prev_gray = None, None, None, None
    num0 = 0
    while True:
        ret, frame = videocapture.read()
        if False == ret:
            break
        num0 += 1
        if 0 == frame_num:
            image = np.zeros_like(frame)
            gray = np.zeros_like(frame)
            prev_gray = np.zeros_like(frame)
            prev_image = frame
            prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
            frame_num += 1

            step_t = step
            while step_t > 1:
                frame = videocapture.read()
                num0 += 1
                step_t -= 1
            continue

        image = frame
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        frame_0 = prev_gray
        frame_1 = gray

        dtvl1 = cv2.createOptFlow_DualTVL1()
        flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)

        save_flows(flowDTVL1, image, save_dir, frame_num,
                   bound)  #this is to save flows and img.
        prev_gray = gray
        prev_image = image
        frame_num += 1

        # to pass the out of stepped frames
        step_t = step
        while step_t > 1:
            frame = videocapture.read()
            num0 += 1
            step_t -= 1
Example #22
0
def flow(prev_frame, cur_frame, tvl1=cv2.createOptFlow_DualTVL1()):
    prev_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
    cur_frame = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)
    return tvl1.calc(prev_frame, cur_frame, None)
Example #23
0
def compute_TVL1flow(args):
    '''
    Parameters (args)
    ----------
    in_path : The path to every video's rgb directory
    flow_x_path : The path to flow_x directory that corresponds with every video.
    flow_y_path : The path to flow_x directory that corresponds with every video.

    Returns
    -------
    None
    '''
    bound = 20
    in_path, flow_x_path, flow_y_path = args
    rgb_frames = os.listdir(in_path)
    rgb_frames.sort()
    flow = []
    tv_l1 = cv2.createOptFlow_DualTVL1()

    def obtain_flows(in_path, rgb_frames):
        prev_frame = cv2.imread(os.path.join(in_path, rgb_frames[0]))
        prev_frame = cv2.UMat(
            prev_frame
        )  # Convert to UMat to speed up computation by small factor
        prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_RGB2GRAY)
        prev_gray = cv2.UMat(prev_gray)

        for i, frames in enumerate(rgb_frames):
            curr_frame = cv2.imread(os.path.join(in_path, frames))
            curr_frame = cv2.UMat(curr_frame)
            curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_RGB2GRAY)
            curr_gray = cv2.UMat(curr_gray)

            tvl1_flow = tv_l1.calc(prev_gray, curr_gray, None)
            tvl1_flow = cv2.UMat.get(tvl1_flow)
            assert (tvl1_flow.dtype == np.float32)
            tvl1_flow = (tvl1_flow + bound) * (255.0 / (2 * bound))
            tvl1_flow = np.round(tvl1_flow).astype(int)
            tvl1_flow[tvl1_flow >= 255] = 255
            tvl1_flow[tvl1_flow <= 0] = 0
            prev_gray = curr_gray
            flow.append(tvl1_flow)

        for i, flow_val in enumerate(flow):
            cv2.imwrite(
                os.path.join(flow_x_path.format('u'),
                             "flow_x{:07d}.jpg".format(i)), flow_val[:, :, 0])
            cv2.imwrite(
                os.path.join(flow_y_path.format('v'),
                             "flow_y{:07d}.jpg".format(i)), flow_val[:, :, 1])

    max_frame_per_dir = 15000  # Smaller groups
    # If total number of frames higher, incrementally calculate flows
    if len(rgb_frames) > max_frame_per_dir:
        for groups in range(int(np.ceil(len(rgb_frames) / max_frame_per_dir))):
            rgb_frames_tmp = rgb_frames[groups *
                                        max_frame_per_dir:(groups + 1) *
                                        max_frame_per_dir]
            obtain_flows(in_path, rgb_frames_tmp)
        print('Obtained flows for {}'.format(in_path))

    # Else, directly compute flows
    else:
        obtain_flows(in_path, rgb_frames)
        print('Obtained flows for {}'.format(in_path))
Example #24
0
 def __init__(self) -> None:
     super().__init__()
     self.processor = cv2.createOptFlow_DualTVL1()
Example #25
0
def dense_flow(augs):
    '''
    To extract dense_flow images
    :param augs:the detailed augments:
        video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
        save_dir: the destination path's final direction name.
        step: num of frames between each two extracted frames
        bound: bi-bound parameter
    :return: no returns
    '''
    video_name, save_dir, step, bound = augs
    video_path = os.path.join(videos_root,
                              video_name.split('_')[1], video_name)
    print(video_path)
    # provide two video-read methods: cv2.VideoCapture() and skvideo.io.vread(), both of which need ffmpeg support

    videocapture = cv2.VideoCapture(video_path)
    # if not videocapture.isOpened():
    #    print 'Could not initialize capturing! ', video_name
    #    exit()
    # try:
    #     videocapture = skvideo.io.vread(video_path)
    # except:
    #     print '{} read error! '.format(video_name)
    #     return 0
    print video_name
    # if extract nothing, exit!
    # if videocapture.sum() == 0:
    #    print 'Could not initialize capturing', video_name
    #    exit()
    # len_frame = len(videocapture)
    len_frame = videocapture.get(cv2.CAP_PROP_FRAME_COUNT)
    frame_num = 0
    image, prev_image, gray, prev_gray = None, None, None, None
    num0 = 0
    while True:
        success, frame = videocapture.read()
        # 读取帧是正确的,则返回True;如果文件读取到结尾,它的返回值就为False
        if success == False:
            print('finished reading frame of video_{}!'.format(video_name))
            break
        if num0 >= len_frame:
            break

        # frame = videocapture[num0]
        cv2.imshow("img", frame)
        cv2.waitKey(20)
        # if image is None:
        #     print("image is None")
        #     continue;
        # print ("image is ok ")
        # print (num0)
        # print ("\n")
        num0 += 1
        if frame_num == 0:
            image = np.zeros_like(frame)
            gray = np.zeros_like(frame)
            prev_gray = np.zeros_like(frame)
            prev_image = frame
            prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
            frame_num += 1
            # to pass the out of stepped frames
            step_t = step
            while step_t > 1:
                # frame=videocapture.read()
                num0 += 1
                step_t -= 1
            continue

        image = frame
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        frame_0 = prev_gray
        frame_1 = gray
        ##default choose the tvl1 algorithm
        dtvl1 = cv2.createOptFlow_DualTVL1()
        flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)
        save_flows(flowDTVL1, image, save_dir, frame_num,
                   bound)  # this is to save flows and img.
        prev_gray = gray
        prev_image = image
        frame_num += 1
        # to pass the out of stepped frames
        step_t = step
        while step_t > 1:
            # frame=videocapture.read()
            num0 += 1
            step_t -= 1
    def extract_tracks(self):
        """
        Extracts tracks from given source.  Setting self.tracks to a list of good tracks within the clip
        :param source_file: filename of cptv file to process
        :returns: True if clip was successfully processed, false otherwise
        """

        assert self.reader, "Must call load before extracting tracks."

        self.reject_reason = None

        # we need to load the entire video so we can analyse the background.
        frames = [frame for frame, offset in self.reader]
        self.frame_buffer.thermal = frames

        # first we get the background.  This requires reading the entire source into memory.
        background, background_stats = self.analyse_background(frames)
        is_static_background = background_stats.background_deviation < self.STATIC_BACKGROUND_THRESHOLD

        self.stats['threshold'] = background_stats.threshold
        self.stats['average_background_delta'] = background_stats.background_deviation
        self.stats['average_delta'] = background_stats.average_delta
        self.stats['mean_temp'] = background_stats.mean_temp
        self.stats['max_temp'] = background_stats.max_temp
        self.stats['min_temp'] = background_stats.min_temp
        self.stats['is_static'] = is_static_background

        self.threshold = background_stats.threshold

        # if the clip is moving then remove the estimated background and just use a threshold.
        if not is_static_background or self.disable_background_subtraction:
            background = None

        if len(frames) <= 9:
            self.reject_reason = "Clip too short {} frames".format(len(frames))
            return False

        if self.reject_non_static_clips and not is_static_background:
            self.reject_reason = "Non static background deviation={:.1f}".format(background_stats.background_deviation)
            return False

        # don't process clips that are too hot.
        if self.MAX_MEAN_TEMPERATURE_THRESHOLD and background_stats.mean_temp > self.MAX_MEAN_TEMPERATURE_THRESHOLD:
            self.reject_reason = "Mean temp too high {}".format(background_stats.mean_temp)
            return False

        # don't process clips with too large of a temperature difference
        if self.MAX_TEMPERATURE_RANGE_THRESHOLD and (background_stats.max_temp - background_stats.min_temp > self.MAX_TEMPERATURE_RANGE_THRESHOLD):
            self.reject_reason = "Temp delta too high {}".format(background_stats.max_temp - background_stats.min_temp)
            return False

        # reset the track ID so we start at 1
        Track._track_id = 1
        self.tracks = []
        self.active_tracks = []
        self.region_history = []

        # create optical flow
        self.opt_flow = cv2.createOptFlow_DualTVL1()
        self.opt_flow.setUseInitialFlow(True)
        if not self.high_quality_optical_flow:
            # see https://stackoverflow.com/questions/19309567/speeding-up-optical-flow-createoptflow-dualtvl1
            self.opt_flow.setTau(1 / 4)
            self.opt_flow.setScalesNumber(3)
            self.opt_flow.setWarpingsNumber(3)
            self.opt_flow.setScaleStep(0.5)

        # process each frame
        self.frame_on = 0
        for frame in frames:
            self.track_next_frame(frame, background)

        # filter out tracks that do not move, or look like noise
        self.filter_tracks()

        # apply smoothing if required
        if self.TRACK_SMOOTHING and len(frames) > 0:
            frame_height, frame_width = frames[0].shape
            for track in self.tracks:
                track.smooth(Rectangle(0,0,frame_width, frame_height))

        return True
    def track(self, original_img, filtered_img, prev_data):
        n_objects = self.nObjectsSpinBox.value()
        n_k_means = self.nKmeansSpinBox.value()

        if self.k_means is None:
            self.k_means = cluster.KMeans(n_clusters=n_objects)
        elif n_k_means!=self.k_means.n_clusters:
            self.k_means = cluster.KMeans(n_clusters=n_k_means)

        non_zero_pos = np.transpose(np.nonzero(filtered_img.T))
        center_pos = self.k_means.fit(non_zero_pos).cluster_centers_

        windows = np.zeros(center_pos.shape)
        windows[:] = self.windowHeightSpinBox.value()
        windows[:,0] = self.windowWidthSpinBox.value()

        gray_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
        shape = gray_img.shape
        if self.opt_flow is None:
            self.opt_flow = cv2.createOptFlow_DualTVL1()
            self.flow = np.zeros(shape + (2,))
            self.prev_pos = center_pos
        else:
            self.flow = self.opt_flow.calc(self.prev_img, gray_img, self.flow)
            for p,w in zip(self.prev_pos, windows):
                region_min_x, region_min_y = (p-w/2).astype(int)
                region_max_x, region_max_y = (p+w/2).astype(int)

                region_min_x = max(0, region_min_x)
                region_min_y = max(0, region_min_y)

                roi = self.flow[region_min_y:region_max_y, region_min_x:region_max_x]
                roi_shape = roi.shape
                print(region_min_x, region_max_x, roi_shape)

                vecs = roi.reshape((np.prod(roi_shape[:2]), 2))
                dists = np.linalg.norm(vecs, axis=1)
                print(np.max(dists))
                vec = np.mean(vecs[dists>np.mean(dists)], axis=0)
                print(vec)
                next_p = p + vec
                p[:] = next_p

                # lb = (0,0)<=next_p
                # ub = next_p<=shape
                #
                # if lb[0] and ub[0]:
                #     p[0] = next_p[0]
                #
                # if lb[1] and ub[1]:
                #     p[1] = next_p[1]

        self.prev_img = gray_img
        res = self.prev_pos

        out = {
                'position': res,
                'rect': [
                    [
                        p-w/2.,
                        p+w/2.
                        ]
                    for p, w in zip(res, windows)
                    ]
                }

        return out
Example #28
0
def generate_flow(img1, img2):
    gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
    flowGenerator = cv2.createOptFlow_DualTVL1()
    flow = flowGenerator.calc(gray1, gray2, None)
    return flow
Example #29
0
def dense_flow(augs):
    '''
    To extract dense_flow images
    :param augs:the detailed augments:
        video_name: the video name which is like: 'v_xxxxxxx',if different ,please have a modify.
        save_dir: the destination path's final direction name.
        step: num of frames between each two extracted frames
        bound: bi-bound parameter
    :return: no returns
    '''
    print(augs)
    video_name,save_dir,step,bound,reduce_n=augs
    save_dir='/nfs/syzhou/github/two-stream-action-recognition/tvl1_flow_own/'

    video_path=os.path.join(videos_root,video_name.split('_')[0],video_name)#0代表前面1代表后面
    #/nfs/syzhou/github/two-stream-action-recognition/UCF-101_own/dancing/dancing_1.avi
    # provide two video-read methods: cv2.VideoCapture() and skvideo.io.vread(), both of which need ffmpeg support

    # videocapture=cv2.VideoCapture(video_path)
    # if not videocapture.isOpened():
    #     print 'Could not initialize capturing! ', video_name
    #     exit()
    try:
        print(video_path)
        videocapture=skvideo.io.vread(video_path)
    except:
        print('{} read error! '.format(video_name))
        return 0
    print (video_name)
    # if extract nothing, exit!
    if videocapture.sum()==0:
        print ('Could not initialize capturing',video_name)
        exit()
    len_frame=len(videocapture)
    print(len_frame)
    frame_num=0
    image,prev_image,gray,prev_gray=None,None,None,None
    num0=0
    while True:
        #frame=videocapture.read()
        if num0>=len_frame:
            break
        frame=videocapture[num0]
        #print(frame.size)
        num0+=1
        if frame_num==0:
            image=np.zeros_like(frame)
            gray=np.zeros_like(frame)
            prev_gray=np.zeros_like(frame)
            prev_image=frame
            prev_gray=cv2.cvtColor(prev_image,cv2.COLOR_RGB2GRAY)
            frame_num+=1
            # to pass the out of stepped frames
            step_t=step
            while step_t>1:
                #frame=videocapture.read()
                num0+=1
                step_t-=1
            continue

        image=frame
        gray=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
        frame_0=prev_gray
        frame_1=gray
        #将图片缩小8倍
        '''
        size = (int(width * 0.3), int(height * 0.5))
        shrink = cv2.resize(img, size, interpolation=cv2.INTER_AREA)
        print('宽:%d,高:%d'%(im.size[0],im.size[1]))
        '''
        #print(frame_0.shape)
        #print(image.size[0])
        width_0=frame_0.shape[0]
        heigh_0=frame_0.shape[1]
        width_1=frame_1.shape[0]
        heigh_1=frame_1.shape[1]
        #w 2160 h 4096

        size_0=(int(heigh_0/reduce_n),int(width_0/reduce_n))
        frame_0=cv2.resize(frame_0,size_0,interpolation=cv2.INTER_AREA)
        size_1=(int(heigh_1/reduce_n),int(width_1/reduce_n))
        frame_1=cv2.resize(frame_1,size_1,interpolation=cv2.INTER_AREA)

        ##default choose the tvl1 algorithm
        dtvl1=cv2.createOptFlow_DualTVL1()#createOptFlow_DualTVL1()
        #dtvl1=cv2.createOptFlow_PCAFlow()

        #dtvl1 = cv2.optflow.createOptFlow_DualTVL1(	)

        flowDTVL1=dtvl1.calc(frame_0,frame_1,None)
        print("complete",num0,video_name)
        save_flows(flowDTVL1,image,save_dir,frame_num,bound,video_name,reduce_n,width_1,heigh_1) #this is to save flows and img.
        prev_gray=gray
        prev_image=image
        frame_num+=1
        # to pass the out of stepped frames
        step_t=step
        while step_t>1:
            #frame=videocapture.read()
            num0+=1
            step_t-=1
os.mkdir(maindir + '/Data/Flow')
output = maindir + '/Data/Flow'
dirrr = maindir + '/Data/Resize'

framename = os.listdir(dirrr)
framename.sort()

prvs = cv2.imread(os.path.join(dirrr, framename[0]), cv2.IMREAD_GRAYSCALE)
hsv = np.zeros_like(
    cv2.imread(os.path.join(dirrr, framename[0]), cv2.IMREAD_UNCHANGED))
hsv[..., 1] = 255

framename = framename[1:]

for frame in tqdm(framename):
    inimg = os.path.join(dirrr, frame)
    outimg = os.path.join(output, frame)

    next = cv2.imread(inimg, cv2.IMREAD_GRAYSCALE)
    flow = np.zeros_like(prvs)
    dualTV = cv2.createOptFlow_DualTVL1()
    flow = dualTV.calc(prvs, next, flow)

    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    cv2.imwrite(outimg, rgb)
    prvs = next
Example #31
0
def dense_flow(video_list, activity):

    base_path = "D:/HMDB51/videos/" + activity + "/"

    for i in range(0, len(video_list)):

        #capture video information
        videocapture = cv2.VideoCapture(base_path + video_list[i])
        print(base_path + video_list[i])
        frame_width = int(videocapture.get(3))
        frame_height = int(videocapture.get(4))
        len_frame = number_of_frames = int(
            videocapture.get(cv2.CAP_PROP_FRAME_COUNT))

        #instantiate a video writer
        out = cv2.VideoWriter(base_path + "/flow/"
                              "flow_" + video_list[i],
                              cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                              (frame_width, frame_height))

        #iterate over frames
        image, prev_image, gray, prev_gray = None, None, None, None
        frame_num = 0
        for ziv in range(0, len_frame):
            ret, frame = videocapture.read()  #reading frames

            #getting first frame
            if frame_num == 0:
                prev_image = frame
                #cv2.imshow('image', frame)
                prev_gray = cv2.cvtColor(prev_image, cv2.COLOR_RGB2GRAY)
                frame_num += 1
                continue

            image = frame

            blank_image = np.zeros((frame_height, frame_width, 3), np.uint8)

            try:
                gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                frame_0 = prev_gray
                frame_1 = gray

                ##default choose the tvl1 algorithm
                dtvl1 = cv2.createOptFlow_DualTVL1()
                flowDTVL1 = dtvl1.calc(frame_0, frame_1, None)

                flow_x = ToImg(flowDTVL1[..., 0], bound)
                flow_y = ToImg(flowDTVL1[..., 1], bound)
                #flow_x_img=Image.fromarray(flow_x)
                #flow_y_img=Image.fromarray(flow_y)

                #flow_x_img = rescale_flow(flow_x_img)
                #flow_y_img = rescale_flow(flow_y_img)

                #print(np.shape(flowDTVL1[...,0]))
                blank_image[:, :, 0] = flow_x
                blank_image[:, :, 1] = np.zeros_like(flow_x)
                blank_image[:, :, 2] = flow_y

                out.write(blank_image)
                #save_flows(flowDTVL1,image,save_dir,frame_num,bound) #this is to save flows and img.
                prev_gray = gray
                prev_image = image
                frame_num += 1
            except:
                print("frame is already in grayscale")

        out.release()
        videocapture.release()
Example #32
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset",
                        type=str,
                        default="youtube",
                        help="dataset to parse flow")

    args = parser.parse_args()
    data_root = Path("data/{}_tempered".format(args.dataset))

    im_root = data_root / "vid"
    flow_root = data_root / "flow"

    fn_read = lambda x: cv2.cvtColor(cv2.imread(str(x)), cv2.COLOR_BGR2GRAY)

    dtvl1 = cv2.createOptFlow_DualTVL1()

    for cnt, vid in enumerate(tqdm(im_root.iterdir())):
        print(cnt, " : ", vid)
        vid_name = vid.name
        this_flow_dir = flow_root / vid_name
        this_flow_dir.mkdir(exist_ok=True, parents=True)

        files = sorted(vid.iterdir(), key=lambda x: int(x.stem))

        first_file = files[0]
        prev_im = fn_read(first_file)

        for i in range(1, len(files)):
            fp = files[i]
            img = fn_read(fp)
Example #33
0
def findOpticalFlow(inputImage1, 
    inputImage2,
    outputWarpedImage,
    outputFlowImage,
    verbose,
    opticalFlowImplementation="simpleflow"):

    oiioImageBuffer1 = ImageBuf( inputImage1 )
    ImageBufReorient(oiioImageBuffer1, oiioImageBuffer1.orientation)

    oiioImageBuffer2 = ImageBuf( inputImage2 )
    ImageBufReorient(oiioImageBuffer2, oiioImageBuffer2.orientation)

    if verbose:
        print( "load and convert 1 - %s" % inputImage1 )
    openCVImageBuffer1 = OpenCVImageBufferFromOIIOImageBuffer(oiioImageBuffer1)
    if verbose:
        print( "load and convert 2 - %s" % inputImage2 )
    openCVImageBuffer2 = OpenCVImageBufferFromOIIOImageBuffer(oiioImageBuffer2)

    if verbose:
        print( "resolution : %s" % str(openCVImageBuffer1.shape) )
        print( "calculate optical flow 1 -> 2")

    if opticalFlowImplementation == "old_farneback":
        if verbose:
            print( "older farneback implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        previous_flow = None
        pyramid_scale = 0.5
        pyramid_levels = 5
        window_size = 50
        iterations_per_pyramid_level = 20
        pixel_neighborhood_size = 3
        neighborhood_match_smoothing_factor = 1.0
        flags = cv2.OPTFLOW_FARNEBACK_GAUSSIAN

        if verbose:
            print( "calculate")
        opencvFlow = cv2.calcOpticalFlowFarneback(gray1, gray2, 
            previous_flow, pyramid_scale, pyramid_levels, window_size, iterations_per_pyramid_level, 
            pixel_neighborhood_size, neighborhood_match_smoothing_factor, flags)

    elif opticalFlowImplementation == "farneback":
        if verbose:
            print( "farneback implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_Farneback()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "dualtvl1":
        if verbose:
            print( "dualtvl1 implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.createOptFlow_DualTVL1()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "sparsetodense":
        if verbose:
            print( "sparse to dense implementation" )
        # Current set of constants... Ranges and good values should be documented
        if verbose:
            print( "calculate")
        opencvFlow = cv2.optflow.calcOpticalFlowSparseToDense(openCVImageBuffer1, openCVImageBuffer2, None,
            8, 128, 0.05, True, 500.0, 1.5)

    elif opticalFlowImplementation == "deepflow":
        if verbose:
            print( "deep flow implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_DeepFlow()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "dis":
        if verbose:
            print( "dis implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_DIS()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "pcaflow":
        if verbose:
            print( "pca flow implementation" )

        if verbose:
            print( "to grey 1")
        gray1 = cv2.cvtColor(openCVImageBuffer1, cv2.COLOR_BGR2GRAY)

        if verbose:
            print( "to grey 2")
        gray2 = cv2.cvtColor(openCVImageBuffer2, cv2.COLOR_BGR2GRAY)

        # Set of constants should be added
        implementation = cv2.optflow.createOptFlow_PCAFlow()
        if verbose:
            print( "calculate")
        opencvFlow = implementation.calc(gray1, gray2, None)

    elif opticalFlowImplementation == "simpleflow":
        if verbose:
            print( "simple flow implementation" )
        # Current set of constants... Ranges and good values should be documented
        opencvFlow = cv2.optflow.calcOpticalFlowSF(openCVImageBuffer1, openCVImageBuffer2, 
            3, 2, 4, 4.1, 25.5, 18, 55.0, 25.5, 0.35, 18, 55.0, 25.5, 10)

    else:
        print( "Unknown optical flow implementation : %s" % opticalFlowImplementation )
        opencvFlow = None

    if outputWarpedImage and (opencvFlow is not None):
        if verbose:
            print( "warping 1 -> 2")
        opencvWarped = applyOpticalFlow(openCVImageBuffer1, opencvFlow)

        if verbose:
            print( "converting and writing warped image - %s" % outputWarpedImage )
        oiioWarped = OIIOImageBufferFromOpenCVImageBuffer( opencvWarped )
        oiioWarped.write( outputWarpedImage )
    else:
        opencvWarped = None

    if outputFlowImage and (opencvFlow is not None):
        if verbose:
            print( "converting and writing flow image - %s" % outputFlowImage )

        oiioFlowBuffer = OIIOImageBufferFromOpenCVImageBuffer( opencvFlow )
        oiioFlowBuffer.write( outputFlowImage )

    return (opencvWarped, opencvFlow)