Ejemplo n.º 1
0
    def preprocess(self, img1, img2, boxes1, boxes2):
            # Read image and compute difference
            h, w, _ = img1.shape
            img1 = cv2.resize(img1, self.img_size)
            img2 = cv2.resize(img2, self.img_size)
            img1 = img1.astype(float) / 255.
            img2 = img2.astype(float) / 255.
            diff = img2 - img1

            # Compute difference of gradients
            sobelx1 = cv2.Sobel(img1, cv2.CV_64F, 1, 0, ksize=5)
            sobelx2 = cv2.Sobel(img2, cv2.CV_64F, 1, 0, ksize=5)
            sobely1 = cv2.Sobel(img1, cv2.CV_64F, 0, 1, ksize=5)
            sobely2 = cv2.Sobel(img2, cv2.CV_64F, 0, 1, ksize=5)
            gradx_diff = sobelx2 - sobelx1
            grady_diff = sobely2 - sobely1

            # Compute optical flow
            u, v, _ = pyflow.coarse2fine_flow(img1, img2, 0.012, 0.75, 20, 7, 1, 30, 0)
            u = np.expand_dims(u, 2)
            v = np.expand_dims(v, 2)

            # bbox mask
            mask = np.zeros([self.img_size[0], self.img_size[1], 1])
            for box in boxes1:
                x1, y1, x2, y2 = rescale_bbox(box, (h,w), self.img_size)
                mask[y1:y2, x1:x2, :] = 1

            inputs = np.concatenate([mask, diff, gradx_diff, grady_diff, u, v], axis=2).transpose(2, 0, 1)
            label = 0 if len(boxes1) == len(boxes2) else 1
            return torch.FloatTensor(inputs), torch.FloatTensor([label])
Ejemplo n.º 2
0
def optical_flow(img1, img2):
    im1 = img1.astype(float) / 255.
    im2 = img2.astype(float) / 255.

    # Flow Options:
    alpha = 0.012
    ratio = 0.75
    min_width = 20
    n_outer_fp_iterations = 7
    n_inner_fp_iterations = 1
    n_sor_iterations = 30
    col_type = 0  # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))

    u, v, im2W = pyflow.coarse2fine_flow(im1, im2, alpha, ratio, min_width,
                                         n_outer_fp_iterations,
                                         n_inner_fp_iterations,
                                         n_sor_iterations, col_type)
    flow = np.concatenate((u[..., None], v[..., None]), axis=2)

    hsv = np.zeros(im1.shape, dtype=np.uint8)
    hsv[:, :, 0] = 255
    hsv[:, :, 1] = 255
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    return rgb
Ejemplo n.º 3
0
def run_flow(dir_path):
    '''

    Args:
        dir_path: it should be like '/dataset/smoke_dataset/wildfire_smoke_1

    Returns:

    '''
    print("run dense flow for images in {}".format(dir_path))
    # get the images
    images_dir = os.path.join(dir_path, 'Image')
    assert os.path.exists(images_dir), "the image foler {} does not exist.".format(images_dir)
    output_dir = os.path.join(dir_path, 'Flow')
    image_paths = glob_files(images_dir)
    image_paths.sort(key=natural_keys)
    num_images = len(image_paths)
    # pad first and last images by copying the data
    padded_image_paths = [image_paths[0]] + image_paths + [image_paths[-1]]
    for i in range(num_images):
        prev_image_path, next_image_path = padded_image_paths[i], padded_image_paths[i+1]
        pre_image = np.array(Image.open(prev_image_path), dtype=np.double) / 255.
        next_image = np.array(Image.open(next_image_path), dtype=np.double) / 255.
        print(pre_image)
        u, v, im2W = pyflow.coarse2fine_flow(
            pre_image, next_image, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,
            nSORIterations, colType)
        flow = np.concatenate((u[..., None], v[..., None]), axis=2)
        np.save(os.path.join(output_dir, next_image_path.split('/')[-1].split('.')[0] + '.npy'), flow)
Ejemplo n.º 4
0
    def preprocess(self, img1, img2, boxes1, boxes2):
            # Read image
            h, w, _ = img1.shape
            img1 = cv2.resize(img1, (520, 520))
            img2 = cv2.resize(img2, (520, 520))
            img1 = img1.astype(float) / 255.
            img2 = img2.astype(float) / 255.

            # Extract deep feature
            ts1 = torch.from_numpy(img1.transpose(2,0,1)).unsqueeze(0).to(self.device).float()
            ts2 = torch.from_numpy(img2.transpose(2,0,1)).unsqueeze(0).to(self.device).float()
            feat1 = self.feature(ts1)
            feat2 = self.feature(ts2)
            diff = (feat2 - feat1)**2
            diff = diff.squeeze().data.cpu()

            # Compute optical flow
            img1 = cv2.resize(img1, self.img_size)
            img2 = cv2.resize(img2, self.img_size)
            u, v, _ = pyflow.coarse2fine_flow(img1, img2, 0.012, 0.75, 20, 7, 1, 30, 0)
            u = np.expand_dims(u, 2)
            v = np.expand_dims(v, 2)

            # bbox mask
            mask = np.zeros([self.img_size[0], self.img_size[1], 1])
            for box in boxes1:
                x1, y1, x2, y2 = rescale_bbox(box, (h,w), self.img_size)
                mask[y1:y2, x1:x2, :] = 1

            inputs = np.concatenate([mask, u, v], axis=2).transpose(2, 0, 1)
            inputs = torch.cat((torch.FloatTensor(inputs), diff), 0)
            label = torch.FloatTensor([0 if len(boxes1) == len(boxes2) else 1])
            return inputs, label
Ejemplo n.º 5
0
    def calculate_optical_flow(self, prev_frame, next_frame):
        # prev_frame_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
        # next_frame_gray = cv2.cvtColor(next_frame, cv2.COLOR_BGR2GRAY)

        alpha = 0.012
        ratio = 1  # 0.75
        minWidth = int(180 / 4)
        nOuterFPIterations = 1
        nInnerFPIterations = 1
        nSORIterations = 30
        colType = 0  # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))
        img_shape = (int(256 / 4), int(128 / 4))

        prev_img = Image.fromarray(prev_frame)
        prev_img = prev_img.resize(img_shape, Image.ANTIALIAS)

        next_img = Image.fromarray(next_frame)
        next_img = next_img.resize(img_shape, Image.ANTIALIAS)

        u, v, im2W = pyflow.coarse2fine_flow(
            np.array(prev_img).astype(float) / 255.0, np.array(next_img).astype(float) / 255.0, alpha, ratio, minWidth,
            nOuterFPIterations, nInnerFPIterations,
            nSORIterations, colType)

        flow = np.concatenate((u[..., None], v[..., None]), axis=2)

        # flow = cv2.calcOpticalFlowFarneback(prev_frame_gray, next_frame_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        # flow[prev_frame_gray < 15] = 0
        flow = flow * 4.0

        flow = zoom(flow, (4, 4, 1))
        return flow.astype(np.float32)
Ejemplo n.º 6
0
	def oflow(self,im1,im2):
			alpha = 0.012
			ratio = 0.75
			minWidth = 20
			nOuterFPIterations = 7
			nInnerFPIterations = 1
			nSORIterations = 30
			colType = 0  # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))
			u, v, im2W = pyflow.coarse2fine_flow(\
					im1, im2, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,\
					nSORIterations, colType)
			flow = np.concatenate((u[..., None], v[..., None]), axis=2)
			
			return flow 
Ejemplo n.º 7
0
def get_flow(im1, im2):
    im1 = np.array(im1)
    im2 = np.array(im2)
    im1 = im1.astype(float) / 255.
    im2 = im2.astype(float) / 255.
    u, v, im2W = pyflow.coarse2fine_flow(im1,
                                         im2,
                                         alpha=0.012,
                                         ratio=0.75,
                                         minWidth=20,
                                         nOuterFPIterations=7,
                                         nInnerFPIterations=1,
                                         nSORIterations=30,
                                         colType=0)
    flow = np.concatenate((u[..., None], v[..., None]),
                          axis=2).transpose(2, 0, 1)
    return flow
Ejemplo n.º 8
0
def calculate_flow(im1,
                   im2,
                   alpha=0.012,
                   ratio=0.75,
                   minWidth=20,
                   nOuterFPIterations=7,
                   nInnerFPIterations=1,
                   nSORIterations=30,
                   colType=0):
    im1 = im1.astype(float) / 255.
    im2 = im2.astype(float) / 255.
    u, v, im2W = pyflow.coarse2fine_flow(im1, im2, alpha, ratio, minWidth,
                                         nOuterFPIterations,
                                         nInnerFPIterations, nSORIterations,
                                         colType)
    flow = numpy.concatenate((u[..., None], v[..., None]), axis=2)
    return flow
Ejemplo n.º 9
0
def run_flow(dir_path):
    '''

    Args:
        dir_path: it should be like '/dataset/smoke_dataset/wildfire_smoke_1

    Returns:

    '''
    print("run dense flow for images in {}".format(dir_path))
    # get the images
    images_dir = os.path.join(dir_path, 'Image')
    assert os.path.exists(images_dir), "the image foler {} does not exist.".format(images_dir)
    output_dir = os.path.join(dir_path, 'Flow_rgb')
    image_paths = glob_files(images_dir)
    image_paths.sort(key=natural_keys)
    num_images = len(image_paths)
    # pad first and last images by copying the data
    padded_image_paths = [image_paths[0]] + image_paths + [image_paths[-1]]
    for i in range(num_images):
        prev_image_path, next_image_path = padded_image_paths[i], padded_image_paths[i+1]
        pre_image = np.array(Image.open(prev_image_path), dtype=np.double) / 255.
        next_image = np.array(Image.open(next_image_path), dtype=np.double) / 255.
#        print(pre_image)
        u, v, im2W = pyflow.coarse2fine_flow(
            pre_image, next_image, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,
            nSORIterations, colType)
        flow = np.concatenate((u[..., None], v[..., None]), axis=2)
        # np.save(os.path.join(output_dir, next_image_path.split('/')[-1].split('.')[0] + '.npy'), flow)
        # im1 = np.array(Image.open(im1_path))
        # im2 = np.array(Image.open(im2_path))
        # im1 = im1.astype(float) / 255.
        # im2 = im2.astype(float) / 255.
        import cv2
        hsv = np.zeros(pre_image.shape, dtype=np.uint8)
        hsv[:, :, 0] = 255
        hsv[:, :, 1] = 255
        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        cv2.imwrite(os.path.join(output_dir, next_image_path.split('/')[-1].split('.')[0] + '.png'), rgb)
Ejemplo n.º 10
0
def flow_pyflow(img_prev, img_next):
    img_prev = img_prev.astype(float) / 255.
    img_next = img_next.astype(float) / 255.

    # Flow Options:
    alpha = 0.012
    ratio = 0.75
    minWidth = 20
    nOuterFPIterations = 7
    nInnerFPIterations = 1
    nSORIterations = 30
    colType = 0  # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))

    start = time.time()
    u, v, _ = pyflow.coarse2fine_flow(img_prev, img_next, alpha, ratio,
                                      minWidth, nOuterFPIterations,
                                      nInnerFPIterations, nSORIterations,
                                      colType)
    end = time.time()
    flow = np.concatenate((u[..., None], v[..., None]), axis=2)

    return flow, end - start
Ejemplo n.º 11
0
    def preprocess(self, img1, img2, boxes1, boxes2):
            # Read image
            h, w, _ = img1.shape
            img1 = cv2.resize(img1, self.img_size)
            img2 = cv2.resize(img2, self.img_size)
            img1 = img1.astype(float) / 255.
            img2 = img2.astype(float) / 255.
            diff = np.square(img1 - img2)

            # Compute optical flow
            u, v, img2_warp = pyflow.coarse2fine_flow(img2, img1, 0.012, 0.75, 20, 7, 1, 30, 0)
            u = np.expand_dims(u, 2)
            v = np.expand_dims(v, 2)
            diff_warp = np.square(img2 - img2_warp)

            # bbox mask
            mask = np.zeros([self.img_size[0], self.img_size[1], 1])
            for box in boxes2:
                x1, y1, x2, y2 = rescale_bbox(box, (h,w), self.img_size)
                mask[y1:y2, x1:x2, :] = 1

            inputs = np.concatenate([mask, u, v, img2, diff, diff_warp], axis=2).transpose(2, 0, 1)
            label = 1 if len(boxes1) < len(boxes2) else 0
            return torch.FloatTensor(inputs), torch.FloatTensor([label])
Ejemplo n.º 12
0
def main():
    """
    Add documentation.

    :return: Nothing
    """
    DATA_ROOT = os.path.join(ROOT_DIR, 'data', 'AICity_data')
    # Set useful directories
    frames_dir = os.path.join(DATA_ROOT, 'train', SEQ, CAM, 'frames')
    results_dir = os.path.join(OUTPUT_DIR, WEEK, TASK, EXP_NAME)

    # Create folders if they don't exist
    if not os.path.isdir(results_dir):
        os.mkdir(results_dir)
    # Ground truth file path
    gt_file = os.path.join(DATA_ROOT, 'train', SEQ, CAM, 'gt', 'gt.txt')

    #gt_file = os.path.join(ROOT_DIR,'data', 'm6-full_annotation.xml')
    #gt_file = os.path.join(ROOT_DIR,'data', 'm6-full_annotation2.pkl')
    # get camera offsets
    time_offset, fps = ut.obtain_timeoff_fps(DATA_ROOT, SEQ, CAM)
    print('Seq :{}, Camera;{} has on offset of {} sec, and {} fps'.format(
        SEQ, CAM, time_offset, fps))
    #df = ut.getBBox_from_gt(gt_file,save_in = out_file)
    df = ut.getBBox_from_gt(gt_file)
    #print(gt_file)

    det_file = os.path.join(ROOT_DIR, 'data', 'AICity_data', 'train', SEQ, CAM,
                            'det', 'det_yolo3.txt')
    # Get BBox detection from list
    #det_file = gt_file
    df = ut.getBBox_from_gt(det_file)
    #print(df.dtypes)
    #df = ut.get_bboxes_from_MOTChallenge(gt_file)
    # Get BBox from xml gt_file
    #df = get_bboxes_from_aicity_file(fname, save_in=None)
    df.sort_values(by=['frame'])

    df.loc[:, 'track_id'] = -1
    # New columns for tracking

    df.loc[:, 'track_iou'] = -1.0
    # Motion
    df.loc[:, 'Dx'] = -300.0
    df.loc[:, 'Dy'] = -300.0
    df.loc[:, 'rot'] = -1.0
    df.loc[:, 'zoom'] = -1.0
    df.loc[:, 'Mx'] = -1.0
    df.loc[:, 'My'] = -1.0
    df.loc[:, 'area'] = -1.0
    df.loc[:, 'ratio'] = -1.0
    df.loc[:, 'ofDx'] = 0.0
    df.loc[:, 'ofDy'] = 0.0
    df.loc[:, 'time_stamp_stamp'] = 0.0
    # Group bbox by frame
    df_grouped = df.groupby('frame')

    vals = list()

    # Get first bbox

    frame_p = 0
    df_gt_p = []

    # iterate over each group
    #df_track = pd.DataFrame({'frame':[]:'ymin':[], 'xmin':[], 'ymax':[], 'xmax':[]})
    headers = list(df.head(0))
    print(headers)
    print('---------')
    df_track = pd.DataFrame(columns=headers)

    #Initialize Track ID - unique ascending numbers
    Track_id = 8

    for f, df_group in df_grouped:
        df_group = df_group.reset_index(drop=True)
        if f % 50 == 0:
            print(f)
        if f > 3000:
            break

        im_path = os.path.join(frames_dir,
                               'frame_' + str(int(f)).zfill(3) + '.jpg')

        # 1st frame -
        if frame_p == 0:

            frame_p = df_group['frame'].values[0]
            print('First detected object at frame {}'.format(frame_p))

            # Assign new tracks
            for t in range(len(df_group)):

                #print(df_group.loc['track_id'])
                df_group.at[t, 'track_id'] = Track_id
                bAc, bAa, bAr = bb.getBboxDescriptor(df_group.ix[[t]])
                df_group.at[t, 'track_id'] = Track_id
                df_group.at[t, 'Mx'] = bAc[0]
                df_group.at[t, 'My'] = bAc[1]
                df_group.at[t, 'area'] = bAa
                df_group.at[t, 'ratio'] = bAr
                df_group.at[t, 'time_stamp'] = ut.timestamp_calc(
                    f, time_offset, fps)
                Track_id += 1

            df_p_group = pd.DataFrame(columns=headers)
            df_p_group = df_p_group.append(df_group, ignore_index=True)
            df_p_group = df_p_group.dropna()
            Track_id += len(df_group)

            df_track = df_track.append(df_p_group, ignore_index=True)

            #plot 1st frame
            if PLOT_FLAG:

                plt.ion()
                plt.show()
                fig = plt.figure()
                ax = fig.add_subplot(111)

                bbox = bb.bbox_list_from_pandas(df_p_group)
                ut.plot_bboxes(cv.imread(im_path, cv.CV_LOAD_IMAGE_GRAYSCALE),
                               bbox,
                               l=df_p_group['track_id'].tolist(),
                               ax=ax,
                               title=str(f))

                if SAVE_FLAG:
                    img = np.fromstring(fig.canvas.tostring_rgb(),
                                        dtype=np.uint8,
                                        sep='')
                    img = img.reshape(fig.canvas.get_width_height()[::-1] +
                                      (3, ))
                    img = cv.cvtColor(img, cv.COLOR_RGB2BGR)

                    #cv.imshow(img)
                    if VID_FLAG:
                        fourcc = cv.cv.CV_FOURCC('M', 'J', 'P', 'G')

                        s = np.shape(img)
                        out = cv.VideoWriter(
                            os.path.join(results_dir, "IOU.avi"), fourcc, 30.0,
                            (s[0], s[1]))
                        out.write(img)
            continue

        frame_p = df_group['frame'].values[0]

        # if there is more than N frames between detection - it is a new track - even if it in the bbox overlaps

        if df_p_group['frame'].values[0] + DET_GAP > df_group['frame'].values[
                0]:

            iou_mat = bb.bbox_lists_iou(df_p_group, df_group)
            #print('first',iou_mat)
            matchlist, iou_score = bb.match_iou(iou_mat, iou_th=0)
            #print('second',iou_mat)
            #print(iou_score)
            # sort it according to the new frame

            offset = np.min(df_group.index.values.tolist())
            #print(offset)
            if OF_FLAG:
                bbox = bb.bbox_list_from_pandas(df_group)
                im_path_curr = os.path.join(
                    frames_dir, 'frame_' + str(int(f)).zfill(3) + '.jpg')
                im_path_prev = os.path.join(
                    frames_dir, 'frame_' + str(int(f) - 1).zfill(3) + '.jpg')
                print(im_path_prev)
                print(im_path_curr)
                im1 = np.array(Image.open(im_path_curr))
                im2 = np.array(Image.open(im_path_prev))
                im1 = im1.astype(float) / 255.
                im2 = im2.astype(float) / 255.
                u, v, im2W = pyflow.coarse2fine_flow(im1, im2, alpha, ratio,
                                                     minWidth,
                                                     nOuterFPIterations,
                                                     nInnerFPIterations,
                                                     nSORIterations, colType)

            for t, iou_s in zip(matchlist, iou_score):
                df_group.at[offset + t[1], 'track_id'] = df_p_group.get_value(
                    t[0], 'track_id')
                df_group.at[offset + t[1], 'track_iou'] = iou_s
                # Motion parameters
                #------------------
                # Dx ,Dv - of bbox center
                # Zoom - Ratio of areas
                # Rot - Ratio of Ratio(h/w)'' -describes rotation
                if OF_FLAG:
                    c_bbox = bbox[t[1]]
                    xv, yv = np.meshgrid(range(int(c_bbox[1]), int(c_bbox[3])),
                                         range(int(c_bbox[0]), int(c_bbox[2])))

                    c_u = u[yv, xv]
                    c_v = v[yv, xv]
                else:
                    c_u = 0.0
                    c_v = 0.0

                box_motion = bb.getMotionBbox(df_p_group.ix[[t[0]]],
                                              df_group.ix[[offset + t[1]]])

                df_group.loc[[offset + t[1]], 'rot'] = box_motion[
                    3]  #.columns = ['Dy', 'Dx','zoom','rot','Mx','My']
                df_group.loc[[offset + t[1]], 'zoom'] = box_motion[2]
                df_group.loc[[offset + t[1]], 'Dx'] = box_motion[0]
                df_group.loc[[offset + t[1]], 'Dy'] = box_motion[1]
                df_group.loc[[offset + t[1]], 'Mx'] = box_motion[4]
                df_group.loc[[offset + t[1]], 'My'] = box_motion[5]
                df_group.loc[[offset + t[1]], 'area'] = box_motion[6]
                df_group.loc[[offset + t[1]], 'ratio'] = box_motion[7]
                df_group.loc[[offset + t[1]], 'ofDx'] = np.mean(c_u)
                df_group.loc[[offset + t[1]], 'ofDy'] = np.mean(c_v)
                df_group.loc[[offset + t[1]],
                             'time_stamp'] = ut.timestamp_calc(
                                 f, time_offset, fps)
                #print(df_group)

                # Setting the confidence as the iou score
                # TODO
        else:
            print(
                'All Tracks were initialized becaue there was no detection fot {} frames'
                .format(DET_GAP))

            #print(df_group)
        # Assign new tracks
        for t in df_group.index[df_group['track_id'] == -1].tolist():
            bAc, bAa, bAr = bb.getBboxDescriptor(df_group.ix[[t]])
            df_group.at[t, 'track_id'] = Track_id
            df_group.at[t, 'Mx'] = bAc[0]
            df_group.at[t, 'My'] = bAc[1]
            df_group.at[t, 'area'] = bAa
            df_group.at[t, 'ratio'] = bAr
            df_group.at[t,
                        'time_stamp'] = ut.timestamp_calc(f, time_offset, fps)
            Track_id += 1

        #print(df_group)
        df_p_group = pd.DataFrame(columns=headers)
        df_p_group = df_p_group.append(df_group, ignore_index=True)
        df_p_group = df_p_group.dropna()
        #print(df_p_group)
        df_track = df_track.append(df_p_group, ignore_index=True)
        #print(df_track)
        if PLOT_FLAG:
            bbox = bb.bbox_list_from_pandas(df_p_group)
            #print(df_p_group['track_id'].tolist())
            ut.plot_bboxes(cv.imread(im_path, cv.CV_LOAD_IMAGE_GRAYSCALE),
                           bbox,
                           l=[
                               df_p_group['track_id'].tolist(),
                               df_p_group['track_iou'].tolist()
                           ],
                           ax=ax,
                           title="frame: " + str(f))

            if SAVE_FLAG:
                img = np.fromstring(fig.canvas.tostring_rgb(),
                                    dtype=np.uint8,
                                    sep='')
                img = img.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
                img = cv.cvtColor(img, cv.COLOR_RGB2BGR)
                #print(np.shape(img))
                cv.imwrite(os.path.join(results_dir, "{}.png".format(f)), img)
                #cv.imshow('test',img)
                if VID_FLAG:

                    out.write(img)
        #bbox_iou(bboxA, bboxB)

    #    END OF PROCESS

    if REFINE:
        #df_det = ut.track_cleanup(df_det,MIN_TRACK_LENGTH=10)
        #df_track = ut.track_cleanup(df_track,MIN_TRACK_LENGTH=10,MOTION_MERGE=5)
        save_in = os.path.join(results_dir, "pred_tracks0.pkl")
        df_track.to_pickle(save_in)
        df_track = ut.track_cleanup(
            df_track, MIN_TRACK_LENGTH=10,
            STATIC_OBJECT=15)  # object moved less than 15 pix

    if VID_FLAG:
        out.release()

    print('Number of Detections:')
    print(np.shape(df_track))

    if SAVE_FLAG:
        save_in = os.path.join(results_dir, "pred_tracks.pkl")
        df_track.to_pickle(save_in)
        csv_file = os.path.splitext(save_in)[0] + "1.csv"
        export_csv = df_track.to_csv(csv_file, sep='\t', encoding='utf-8')
Ejemplo n.º 13
0
pyflow_alpha = 1        # Regulization weight
pyflow_ratio = 0.75     # Downsampling ratio
pyflow_minWidth = 3    # Width of the coarsest level
pyflow_nOuterFPIterations = 5   # Number of outer fixed point iterations
pyflow_nInnerFPIterations = 1   # Number of inner fixed point iterations
pyflow_nSORIterations = 30  # Number of SOR iterations
pyflow_colType = 0      # 0: RGB; 1: Grayscale

image1 = skimage.io.imread(argv[1])
image2 = skimage.io.imread(argv[2])

h, w = image1.shape[:2]

u, v, im2W = pyflow.coarse2fine_flow(
    skimage.util.img_as_float(image1),
    skimage.util.img_as_float(image2),
    pyflow_alpha,
    pyflow_ratio,
    pyflow_minWidth,
    pyflow_nOuterFPIterations,
    pyflow_nInnerFPIterations,
    pyflow_nSORIterations,
    pyflow_colType
)

mag = np.sum(np.sqrt(u * u + v * v))
meanMag = mag / (h * w)

print ('Mag: {} ; MeanMag: {}'.format(mag, meanMag))
Ejemplo n.º 14
0
    for i in range(25, 100, 1):
        sample = torch.FloatTensor(2, N_FRAME - 1, 128 * 160)
        model, dictionary, Drr, Dtheta = loadModel(ckpt_file)
        print(i)
        for ii in range(i, FRA + i):  # for INCR WNDW range(25,FRA+i)
            imgname = os.path.join(rootDir, folder, frames[ii])
            img = Image.open(imgname)
            img1 = process_im(np.array(img)) / 255.

            imgname = os.path.join(rootDir, folder, frames[ii + 1])
            print(imgname)
            img = Image.open(imgname)
            img2 = process_im(np.array(img)) / 255.

            u, v, _ = pyflow.coarse2fine_flow(img2, img1, alpha, ratio,
                                              minWidth, nOuterFPIterations,
                                              nInnerFPIterations,
                                              nSORIterations, colType)

            flow = np.concatenate((u[..., None], v[..., None]), axis=2)

            flow = np.transpose(flow, (2, 0, 1))

            sample[:, ii -
                   i, :] = torch.from_numpy(flow.reshape(2, 128 * 160)).type(
                       torch.FloatTensor)  # for INCR WNDW sample[:,ii-25,:]

        # sample[0,0,10290] = 2
        alSample = alignOFtest(sample.view(2, FRA, 128, 160).unsqueeze(0), FRA)
        showOFs(alSample, sample.view(2, FRA, 128, 160).unsqueeze(0), FRA)
        # alSample = sample
        imgname = os.path.join(rootDir, folder, frames[ii + 2])