Esempio n. 1
0
        def onmouse(event, x, y, flags, param):
            crt_vis = vis
            if flags & cv2.EVENT_FLAG_LBUTTON:
                crt_vis = vis0.copy()
                r = 3

                m = (common_cv.anorm(p1_local - (x, y)) < r) | \
                    (common_cv.anorm(p2_local - (x, y)) < r)
                idxs = np.where(m)[0]
                kp1s, kp2s = [], []

                assert len(p1_local) == len(p2_local)

                for i in idxs:
                    (x1, y1), (x2, y2) = p1_local[i], p2_local[i]

                    try:
                        col = (red, green)[status_local[i]]
                    except:
                        common.DebugPrintErrorTrace()
                        common.DebugPrint("onmouse() exception: i = %d, "
                                          "len(statusLocal) = %d" %
                                          (i, len(status_local)))
                        col = red

                    cv2.line(crt_vis, (x1, y1), (x2, y2), col)

                    try:
                        kp1, kp2 = kp_pairs[i]
                        kp1s.append(kp1)
                        kp2s.append(kp2)
                    except:
                        common.DebugPrintErrorTrace()
                        common.DebugPrint(
                            "onmouse() exception2: i = %d, "
                            "len(kp_pairs)=%d, len(idxs)=%d, idxs=%s" %
                            (i, len(kp_pairs), len(idxs), str(idxs)))

                crt_vis = cv2.drawKeypoints(crt_vis,
                                            kp1s,
                                            flags=4,
                                            color=kp_color)
                crt_vis[:, w_q:] = cv2.drawKeypoints(crt_vis[:, w_q:],
                                                     kp2s,
                                                     flags=4,
                                                     color=kp_color)

            cv2.imshow(win, crt_vis)
def multiscale_quad_tree(r_path, threshold, scale_index):
    common.DebugPrint("Entered multiscale_quad_tree(scale_index=%d)" %
                      scale_index)

    t1 = float(cv2.getTickCount())

    found_files = False

    try:
        all_quads = np.load("all_quads%d.npz" % scale_index)['arr_0']
        all_id = np.load("all_id%d.npz" % scale_index)['arr_0']
        all_cen = np.load("all_cen%d.npz" % scale_index)['arr_0']
        all_max = np.load("all_max%d.npz" % scale_index)['arr_0']
        all_ori = np.load("all_ori%d.npz" % scale_index)['arr_0']
        n_d = np.load("n_d%d.npz" % scale_index)['arr_0']
        found_files = True

        common.DebugPrint("multiscale_quad_tree(): loaded the NPZ files for "
                          "this scale.")
        print(
            "multiscale_quad_tree(scale_index=%d): time before starting the "
            "processing for the given query video = %s" % scale_index,
            common.GetCurrentDateTimeStringWithMilliseconds())
    except:
        common.DebugPrintErrorTrace()

    if not found_files:
        rd = r_path

        # OpenCV's KD-tree implementation does NOT accept float64
        all_quads = np.array([]).astype(np.float32)
        all_cen = np.array([]).astype(np.float32)
        all_id = np.array([]).astype(np.float32)
        all_max = np.array([]).astype(np.float32)
        all_ori = np.array([]).astype(np.float32)

        n_d = np.zeros((len(rd), 1))

        if common.MY_DEBUG_STDOUT:
            common.DebugPrint("multiscale_quad_tree(): n_d.shape = "
                              "%s" % str(n_d.shape))
            common.DebugPrint("multiscale_quad_tree(): n_d = %s" % str(n_d))

        # Alex: for each reference video frame we compute the quads
        for iFor in range(len(rd)):
            # Alex: IMPORTANT: This loads into pp the multiscale Harris feature
            # saved in file r_path+rd(i).name
            # load harris locations of image (already computed)
            pp = r_path[iFor]

            if iFor % 10 == 0:
                common.DebugPrint("multiscale_quad_tree(): iFor = %d" % iFor)
                common.DebugPrint("multiscale_quad_tree():   scale_index = "
                                  "%s" % str(scale_index))
                common.DebugPrint("multiscale_quad_tree():   threshold = "
                                  "%s" % str(threshold))

            # Alex: We put in points the rows of pp having the
            # 3rd element == scale_index, and we take out the 3rd column of pp
            # [out,cen,maxdis,ori]=findquads(pp(pp(:,3)==scale_index,1:2),threshold,1)
            points = pp[pp[:, 2] == scale_index, 0:2]
            out, cen, maxdis, ori = findquads.findquads(points, threshold, 1)

            n_d[iFor] = out.shape[0]

            temp = np.zeros((out.shape[0], 1)) + iFor

            if common.MY_DEBUG_STDOUT:
                common.DebugPrint("Initially:")
                common.DebugPrint(
                    "  multiscale_quad_tree(): all_quads.shape = %s" %
                    str(all_quads.shape))
                common.DebugPrint("  multiscale_quad_tree(): all_quads = %s" %
                                  str(all_quads))

                common.DebugPrint(
                    "  multiscale_quad_tree(): all_max.shape = %s" %
                    str(all_max.shape))
                common.DebugPrint("  multiscale_quad_tree(): all_max = %s" %
                                  str(all_max))

                common.DebugPrint(
                    "  multiscale_quad_tree(): maxdis.shape = %s" %
                    str(maxdis.shape))
                common.DebugPrint("  multiscale_quad_tree(): maxdis = %s" %
                                  str(maxdis))

                common.DebugPrint(
                    "  multiscale_quad_tree(): all_ori.shape = %s" %
                    str(all_ori.shape))
                common.DebugPrint("  multiscale_quad_tree(): ori.shape = %s" %
                                  str(ori.shape))
                common.DebugPrint("  multiscale_quad_tree(): ori = %s" %
                                  str(ori))

                common.DebugPrint(
                    "  multiscale_quad_tree(): all_cen.shape = %s" %
                    str(all_cen.shape))
                common.DebugPrint("  multiscale_quad_tree(): all_cen = %s" %
                                  str(all_cen))

                common.DebugPrint("  multiscale_quad_tree(): cen.shape = %s" %
                                  str(cen.shape))
                common.DebugPrint("  multiscale_quad_tree(): cen = %s" %
                                  str(cen))

                common.DebugPrint("  multiscale_quad_tree(): out.shape = %s" %
                                  str(out.shape))
                common.DebugPrint("  multiscale_quad_tree(): out = %s" %
                                  str(out))

            if out.size == 0:
                assert (cen.size == 0)
                assert (maxdis.size == 0)
                assert (ori.size == 0)
                continue
            """
            It crashes at
                all_quads = np.r_[all_quads, out]
            with "ValueError: array dimensions must agree except for d_0"
            because:
                multiscale_quad_tree(): out = []
                multiscale_quad_tree(): out.shape = (2, 0)
            """

            if all_quads.size == 0:
                all_quads = out.copy()
            else:
                all_quads = np.r_[all_quads, out]

            if all_cen.size == 0:
                all_cen = cen.copy()
            else:
                all_cen = np.r_[all_cen, cen]

            if all_id.size == 0:
                all_id = temp.copy()
            else:
                all_id = np.r_[all_id, temp]

            if all_max.size == 0:
                all_max = maxdis.copy()
            else:
                all_max = np.r_[all_max, maxdis]

            if all_ori.size == 0:
                all_ori = ori.copy()
            else:
                all_ori = np.r_[all_ori, ori]

        try:
            np.savez_compressed("all_quads%d" % scale_index, all_quads)
            np.savez_compressed("all_id%d" % scale_index, all_id)
            np.savez_compressed("all_cen%d" % scale_index, all_cen)
            np.savez_compressed("all_max%d" % scale_index, all_max)
            np.savez_compressed("all_ori%d" % scale_index, all_ori)
            np.savez_compressed("n_d%d" % scale_index, n_d)
        except:
            common.DebugPrintErrorTrace()

        if all_quads.size == 0:
            t2 = float(cv2.getTickCount())
            my_time = (t2 - t1) / cv2.getTickFrequency()
            common.DebugPrint("multiscale_quad_tree() "
                              "took %.6f [sec]" % my_time)

            return None, all_id, all_cen, all_max, all_ori, n_d, all_quads

    t_build1 = float(cv2.getTickCount())

    if config.KDTREE_IMPLEMENTATION == 0:
        tree = spatial.KDTree(all_quads)
    elif config.KDTREE_IMPLEMENTATION == 1:
        # TODO: try to use exact NN-search for the kd-tree -
        #  see http://docs.opencv.org/trunk/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html
        all_quads = all_quads.astype(np.float32)
        tree = cv2.flann_Index(features=all_quads, params=config.FLANN_PARAMS)

    t_build2 = float(cv2.getTickCount())
    my_time_build = (t_build2 - t_build1) / cv2.getTickFrequency()
    print("multiscale_quad_tree(): KD-tree build "
          "took %.6f [sec]" % my_time_build)

    if common.MY_DEBUG_STDOUT:
        common.DebugPrint("At the end:")
        common.DebugPrint("  multiscale_quad_tree(): all_id.shape = %s" %
                          str(all_id.shape))
        common.DebugPrint("  multiscale_quad_tree(): all_id = %s" %
                          str(all_id))
        common.DebugPrint("  multiscale_quad_tree(): all_cen.shape = %s" %
                          str(all_cen.shape))
        common.DebugPrint("  multiscale_quad_tree(): all_cen = %s" %
                          str(all_cen))
        common.DebugPrint("  multiscale_quad_tree(): all_max.shape = %s" %
                          str(all_max.shape))
        common.DebugPrint("  multiscale_quad_tree(): all_max = %s" %
                          str(all_max))
        common.DebugPrint("  multiscale_quad_tree(): all_ori.shape = %s" %
                          str(all_ori.shape))
        common.DebugPrint("  multiscale_quad_tree(): all_ori = %s" %
                          str(all_ori))
        common.DebugPrint("  multiscale_quad_tree(): n_d.shape = %s" %
                          str(n_d.shape))
        common.DebugPrint("  multiscale_quad_tree(): n_d = %s" % str(n_d))
        common.DebugPrint("  multiscale_quad_tree(): all_quads.shape = %s" %
                          str(all_quads.shape))
        common.DebugPrint("  multiscale_quad_tree(): all_quads = %s" %
                          str(all_quads))
        common.DebugPrint("  multiscale_quad_tree(): all_quads.shape before "
                          "kd-tree = %s" % str(all_quads.shape))

        try:
            common.DebugPrint(
                "multiscale_quad_tree(): sys.getsizeof(tree) = %s" %
                str(sys.getsizeof(tree)))
        except:
            pass
            common.DebugPrintErrorTrace()

    t2 = float(cv2.getTickCount())
    my_time = (t2 - t1) / cv2.getTickFrequency()
    print("multiscale_quad_tree() took %.6f [sec]" % my_time)

    return tree, all_id, all_cen, all_max, all_ori, n_d, all_quads
Esempio n. 3
0
def QuadTreeDecision():
    """
    global r_path, q_path
    common.DebugPrint("QuadTreeDecision(): r_path = %s" % r_path);
    common.DebugPrint("QuadTreeDecision(): q_path = %s" % q_path);
    """
    #global harlocsQ, harlocsR

    common.DebugPrint("Entered QuadTreeDecision().")

    totalT1 = float(cv2.getTickCount())

    r_quadsTree = None
    """
    Matlab code:
    www=whos('tree');
    if size(www,1)>0
        kdtree_delete(tree);
        clear tree;
    end
    """

    #clear Votes_space H;
    Votes_space = None
    H = None

    #['arr_0']

    if r_quadsTree != None:
        # TODO: clear tree
        pass

    if method == 1:
        #%% search among all reference quads
        common.DebugPrint(
            "\nQuadTreeDecision(): Search among all reference quads...(Tree method)"
        )

        try:
            crossref = np.load("crossref.npz")['arr_0']
            common.DebugPrint(
                "\nQuadTreeDecision(): Found already precomputed crossref.npz - returning it)"
            )
            return crossref
        except:
            common.DebugPrintErrorTrace()

        #BOV_flag=0;
        BOV_flag = 0

        foundFiles = False
        try:
            Votes_space = np.load("Votes_space.npz")['arr_0']
            H = np.load("H.npz")['arr_0']
            foundFiles = True
        except:
            common.DebugPrintErrorTrace()

        if foundFiles == False:
            """
            Alex: scale s is 1 for original frame resolution and the higher
              we go we have lower image resolutions (we go higher in the
              Guassian pyramid I think).
            """
            #for s=1:nos
            for s in range(1, nos + 1):
                common.DebugPrint("QuadTreeDecision(): Scale %d" % s)

                #md_threshold=round(s*100+100^(log(s)));
                md_threshold = round(s * 100 + pow(100, math.log(s)))

                #[tree,all_id,all_cen,all_max,all_ori,n_d,all_quads]=multiscale_quad_tree(r_path, md_threshold,s);
                #tree, all_id, all_cen, all_max, all_ori, n_d, all_quads = multiscale_quad_tree.multiscale_quad_tree(r_path, md_threshold, s)
                r_quadsTree, all_id, all_cen, all_max, all_ori, n_d, all_quads = \
                        multiscale_quad_tree.multiscale_quad_tree(harlocsR, \
                                                                md_threshold, s)

                if config.PREPROCESS_REFERENCE_VIDEO_ONLY == True:
                    continue

                if r_quadsTree == None:
                    continue

                common.DebugPrint("QuadTreeDecision(): md_threshold = %s" %
                                  str(md_threshold))

                #[Votes_space(:,:,s),H(:,:,s)]=multiscale_quad_retrieval(tree, r_path, q_path, md_threshold, st_threshold, all_ori, all_id, all_max, all_cen,nos, s, cropflag, sequence);
                # Votes_space(:,:,s),H(:,:s)  =multiscale_quad_retrieval(tree, r_path, q_path, md_threshold, st_threshold, all_ori, all_id, all_max, all_cen, nos, s, cropflag, sequence)
                #Votes_space[:, :, s - 1], H[:, :,s - 1] = multiscale_quad_retrieval.multiscale_quad_retrieval(r_quadsTree, harlocsR, harlocsQ, md_threshold, st_threshold, all_ori, all_id, all_max, all_cen, nos, s, cropflag, sequence)
                Votes_space_res, H_res = multiscale_quad_retrieval.multiscale_quad_retrieval(r_quadsTree, \
                                            harlocsR, harlocsQ, md_threshold, \
                                            st_threshold, all_ori, all_id, \
                                            all_max, all_cen, nos, s, cropflag, \
                                            sequence)

                if Votes_space == None:
                    Votes_space = np.zeros((Votes_space_res.shape[0],
                                            Votes_space_res.shape[1], nos))
                    """
                    Inspired from https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
                        BUT doesn't help in this case:
                    Votes_space = np.asfortranarray(np.zeros( (Votes_space_res.shape[0], Votes_space_res.shape[1], nos) ));
                    """
                if H == None:
                    H = np.zeros((H_res.shape[0], H_res.shape[1], nos),
                                 dtype=np.int8)
                    """
                    Inspired from https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
                        BUT doesn't help in this case:
                    H = np.asfortranarray(np.zeros( (H_res.shape[0], H_res.shape[1], nos) ));
                    """
                Votes_space[:, :, s - 1] = Votes_space_res
                H[:, :, s - 1] = H_res

                if common.MY_DEBUG_STDOUT:
                    common.DebugPrint("QuadTreeDecision(): For scale %d: " \
                        "Votes_space_res = %s,\n      H_res = %s" % \
                        (s, str(Votes_space_res), str(H_res)))

                    common.DebugPrint("QuadTreeDecision(): For scale %d: " \
                        "Votes_space_res.shape = %s,\n      H_res.shape = %s" % \
                        (s, str(Votes_space_res.shape), str(H_res.shape)))
                #quit();
                #kdtree_delete(tree); # TODO: think if want to delete kdtree
                if config.KDTREE_IMPLEMENTATION == 1:
                    r_quadsTree.release()

        if config.PREPROCESS_REFERENCE_VIDEO_ONLY == True:
            common.DebugPrint("QuadTreeDecision(): Exiting program " \
                              "since we finished preprocessing the reference video")
            common.DebugPrint("QuadTreeDecision(): time before exit = %s" % \
                    common.GetCurrentDateTimeStringWithMilliseconds())
            return None
            #quit();

        if common.MY_DEBUG_STDOUT:
            common.DebugPrint("QuadTreeDecision(): Before multiscale_synchro_decision(): " \
                "Votes_space = %s,\n      H = %s" % (str(Votes_space), str(H)))

        try:
            # See http://docs.scipy.org/doc/numpy/reference/generated/numpy.savez.html
            np.savez_compressed("Votes_space", Votes_space)
            np.savez_compressed("H", H)
        except:
            common.DebugPrintErrorTrace()

        #q_path = [None] * len(harlocsQ);
        numFramesQ = len(harlocsQ)
        #r_path = [None] * len(harlocsR);
        numFramesR = len(harlocsR)

        if config.temporalDecisionType == 1:
            # causal solution - "local"

            #cross=multiscale_synchro_decision(Votes_space, H, q_path, r_path, BOV_flag, cropflag, const_type);
            crossref = multiscale_synchro_decision.causal( \
                        Votes_space, H, numFramesQ, numFramesR, BOV_flag, cropflag, \
                        const_type)

            # str=['save ' q_path 'cross_baseline cross'];
            # eval(str)
        elif config.temporalDecisionType == 0:
            # decision (non-causal solution)

            #[y,x,D,Tback,cross] = dp3(Votes_space, r_path, q_path, BOV_flag);
            y, x, D, Tback, crossref = multiscale_synchro_decision.dp3( \
                                        Votes_space, numFramesR, numFramesQ, BOV_flag)
            #     str=['save ' q_path 'cross_baseline_dp cross'];
            #     eval(str)
    else:
        """
        !!!!TODO: implement if useful VD (or BoW)
          NOTE: see config.py for Evangelidis' comments from email of Apr 14, 2014:
            Basically he argues that:
            - the VD method is similar in quality with the full-search VS
            - BoW is not great.
        """
        assert False
        # not implemented

    crossref[:, 1] += config.initFrame[1]

    #myText = "crossref = \n%s" % crossref;
    myText = ""
    for r in range(crossref.shape[0]):
        myText += "  %d  %d\n" % (crossref[r][0], crossref[r][1])
    fOutput = open("crossref.txt", "wt")
    fOutput.write(myText)
    fOutput.close()

    try:
        # See http://docs.scipy.org/doc/numpy/reference/generated/numpy.savez.html
        np.savez_compressed("crossref", crossref)
    except:
        common.DebugPrintErrorTrace()

    totalT2 = float(cv2.getTickCount())
    myTime = (totalT2 - totalT1) / cv2.getTickFrequency()
    print("QuadTreeDecision() took %.6f [sec]" % (myTime))

    return crossref
Esempio n. 4
0
def annotate_vis(win, vis, input_frame, ref_frame, status_local):
    global vis_orig
    """
    global vis, vis0, winGlobal, p1, p2, statusGlobal
    winGlobal = win
    """

    vis_orig = vis.copy()
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    h_q, w_q = input_frame.shape[:2]
    h_r, w_r = ref_frame.shape[:2]

    blue = (255, 0, 0)
    pink = (255, 128, 255)
    white = (255, 255, 255)

    kp_color = (51, 103, 236)
    red = (0, 0, 255)
    green = (0, 255, 0)
    green_gray = (51, 173, 136)

    if config.USE_GUI or config.SAVE_FRAMES:
        # DRAW THE KEYPOINTS

        # Alex: we represent also ALL THE features of the 2 images:
        for e in kp1[counter_q]:
            cv2.circle(vis, (int(e.pt[0]), int(e.pt[1])), 10, blue, -1)

        # We draw for image2, hence + wQ translation on horizontal
        for e in kp2[counter_r]:
            cv2.circle(vis, (int(e.pt[0] + w_q), int(e.pt[1])), 10, pink, -1)
        """
        # Alex: we represent also all the matched features of the 2 images:
        for e in ftImg1:
            cv2.circle(vis, (e[0], e[1]), 4, blue, -1)
        # We draw for image2, hence + wQ translation on horizontal
        for e in ftImg2:
            #print "e =", e
            cv2.circle(vis, (int(e[0]) + int(wQ), int(e[1])), 5, pink, -1)
            #cv2.circle(vis, (e[0], e[1]), 2, pink, -1)
        """
        """
        Note: nonp1 is updated by Clustering.HierarchicalClustering() - it
            represents the non-matched features that form a dense
            cluster of more than THRESHOLD_NUM_NONMATCHED_ELEMENTS_IN_CLUSTER
            elements.

        We draw the NON-matched features for frame of video A:
        """
        for e in nonp1:
            cv2.circle(vis, (int(e[0]), int(e[1])), 7, green_gray, -1)

        # We draw the NON-matched features for frame of video B:
        for e in nonp2:
            cv2.circle(vis, (int(e[0] + w_q), int(e[1])), 7, green_gray, -1)

        # END DRAW THE KEYPOINTS

    if config.USE_GUI or config.SAVE_FRAMES:
        """
        We draw the homography transformation by looking at the identified
            features - H is basically a rotation and zoom (transformation in
            homogenous?? coordinates).
        """
        if H is not None:
            corners = [[0, 0], [w_q, 0], [w_q, h_q], [0, h_q]]
            print("corners = %s" % str(corners))

            corners = np.float32(corners)

            # See http://stackoverflow.com/questions/6627647/reshaping-a-numpy-array-in-python for description of numpy.reshape()
            try:
                cornersT = cv2.perspectiveTransform(src=corners.reshape(
                    1, -1, 2),
                                                    m=H)
                print("cornersT = %s" % str(cornersT))
                corners = np.int32(cornersT.reshape(-1, 2) + (w_q, 0))
                print("corners = %s" % str(corners))
            except:
                common.DebugPrintErrorTrace()
                common.DebugPrint("Exception at perspectiveTransform(): H=%s" %
                                  str(H))
                corners = np.int32(corners.reshape(-1, 2) + (w_q, 0))
            """
            print "cornersT = %s" % str(cornersT)
            corners = np.int32(cornersT.reshape(-1, 2) + (wQ, 0))
            print "corners = %s" % str(corners)
            """
            cv2.polylines(img=vis,
                          pts=[corners],
                          isClosed=True,
                          color=(255, 255, 255))

    if config.USE_GUI:
        if status_local is None:
            status_local = np.ones(len(kp_pairs), np.bool_)

        if not kp_pairs:
            p1_local = []
            p2_local = []
        else:
            p1_local = np.int32([kpp[0].pt for kpp in kp_pairs])
            p2_local = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w_q, 0)

        common.DebugPrint("len(p1_local) = %d" % len(p1_local))
        common.DebugPrint("len(kp_pairs) = %d" % len(kp_pairs))

        for (x1, y1), (x2, y2), inlier in zip(p1_local, p2_local,
                                              status_local):
            if inlier:
                col = green
                cv2.circle(vis, (x1, y1), 5, col, -1)
                cv2.circle(vis, (x2, y2), 8, col, -1)
            else:
                col = red
                r = 2
                thickness = 4
                # We draw some sort of squares for the
                cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col,
                         thickness)
                cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col,
                         thickness)
                cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col,
                         thickness)
                cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col,
                         thickness)
        """
          They use it when clicking on mouse to make the corresponding lines
            disappear between the 2 images.
        """
        vis0 = vis_orig.copy()

        # This is where we draw the green lines between the 2 images
        for (x1, y1), (x2, y2), inlier in zip(p1_local, p2_local,
                                              status_local):
            if inlier:
                cv2.line(vis, (x1, y1), (x2, y2), green, 2)

        cv2.imshow(win, vis)
        """
        # Used if we make onmouse() a global function
        statusGlobal = statusLocal
        """
        """
        IMPORTANT: a limitation of Python 2.x is that inner functions
            do a symboltable lookup of non-local variables in the global
            scope not the immediately outter scope of the function - in
            this case the one of annotate_vis().
          This is why we differentiate variables with the same name:
            statusLocal and statusGlobal
            p1_local and p1 .
        """
        def onmouse(event, x, y, flags, param):
            crt_vis = vis
            if flags & cv2.EVENT_FLAG_LBUTTON:
                crt_vis = vis0.copy()
                r = 3

                m = (common_cv.anorm(p1_local - (x, y)) < r) | \
                    (common_cv.anorm(p2_local - (x, y)) < r)
                idxs = np.where(m)[0]
                kp1s, kp2s = [], []

                assert len(p1_local) == len(p2_local)

                for i in idxs:
                    (x1, y1), (x2, y2) = p1_local[i], p2_local[i]

                    try:
                        col = (red, green)[status_local[i]]
                    except:
                        common.DebugPrintErrorTrace()
                        common.DebugPrint("onmouse() exception: i = %d, "
                                          "len(statusLocal) = %d" %
                                          (i, len(status_local)))
                        col = red

                    cv2.line(crt_vis, (x1, y1), (x2, y2), col)

                    try:
                        kp1, kp2 = kp_pairs[i]
                        kp1s.append(kp1)
                        kp2s.append(kp2)
                    except:
                        common.DebugPrintErrorTrace()
                        common.DebugPrint(
                            "onmouse() exception2: i = %d, "
                            "len(kp_pairs)=%d, len(idxs)=%d, idxs=%s" %
                            (i, len(kp_pairs), len(idxs), str(idxs)))

                crt_vis = cv2.drawKeypoints(crt_vis,
                                            kp1s,
                                            flags=4,
                                            color=kp_color)
                crt_vis[:, w_q:] = cv2.drawKeypoints(crt_vis[:, w_q:],
                                                     kp2s,
                                                     flags=4,
                                                     color=kp_color)

            cv2.imshow(win, crt_vis)

        cv2.setMouseCallback(win, onmouse)

    return vis
Esempio n. 5
0
def multiscale_quad_retrieval(r_quads_tree, r_harlocs, q_harlocs, md_threshold,
                              st_threshold, all_ori, all_id, all_max, all_cen,
                              nos, scale_index, crop_flag, sequence):
    common.DebugPrint("Entered multiscale_quad_retrieval(): "
                      "md_threshold = %s, st_threshold = %s." %
                      (str(md_threshold), str(st_threshold)))

    assert len(r_harlocs) != 0
    assert len(q_harlocs) != 0

    try:
        votes_space = np.load("votes_space%d.npz" % scale_index)['arr_0']
        HH = np.load("HH%d.npz" % scale_index)['arr_0']
        return votes_space, HH
    except:
        common.DebugPrintErrorTrace()

    if common.MY_DEBUG_STDOUT:
        common.DebugPrint("multiscale_quad_retrieval(): r_quads_tree = %s" %
                          str(r_quads_tree))
        common.DebugPrint(
            "multiscale_quad_retrieval(): len(r_harlocs) = %d" % len(r_harlocs))
        common.DebugPrint(
            "multiscale_quad_retrieval(): r_harlocs = %s" % str(r_harlocs))
        common.DebugPrint(
            "multiscale_quad_retrieval(): q_harlocs = %s" % str(q_harlocs))
        common.DebugPrint(
            "multiscale_quad_retrieval(): md_threshold = %s" % str(
                md_threshold))
        print("multiscale_quad_retrieval(): st_threshold = %s" % str(
            st_threshold))
        common.DebugPrint(
            "multiscale_quad_retrieval(): all_id = %s" % str(all_id))
        common.DebugPrint("multiscale_quad_retrieval(): all_id.shape = %s" % (
            str(all_id.shape)))
        common.DebugPrint(
            "multiscale_quad_retrieval(): sequence = %s" % str(sequence))
        print("multiscale_quad_retrieval(): crop_flag = %s" % str(crop_flag))

    t1 = float(cv2.getTickCount())

    if scale_index > nos:
        assert scale_index <= nos

    # TODO: take out rd_start
    rd_start = 0
    rd_end = len(r_harlocs) - 1

    j = 1

    """
    Inspired from
      https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
        BUT doesn't help in this case:
    votes_space = np.asfortranarray(np.zeros( (len(RD), len(QD)) ))
    """
    votes_space = np.zeros((len(r_harlocs), len(q_harlocs)))

    # Make a distinct copy of HH from votes_space...
    # TODO: use MAYBE even np.bool - OR take it out
    HH = np.zeros((len(r_harlocs), len(q_harlocs)), dtype=np.int8)

    # it helps to make more strict the threshold as the scale goes up
    tolers = 0.1 - float(scale_index) / 100.0

    maxdis = 3 + scale_index
    maxori = 0.25

    # TODO: I am using multiprocessing.Poll and return votes the dispatcher
    #  assembles the results, but the results are NOT the same with the serial
    #  case - although they look pretty decent, but they seem to be
    #  suboptimal - dp_alex returns suboptimal cost path for
    #  USE_MULTITHREADING == True instead of False.
    #         (Note: running under the same preconditions
    #             multiscale_quad_retrieval I got the same results in dp_alex().
    """
    if False: #config.USE_MULTITHREADING == True:
        global g
        g.r_quads_tree = r_quads_tree
        g.r_harlocs = r_harlocs
        g.q_harlocs = q_harlocs
        g.md_threshold = md_threshold
        g.st_threshold = st_threshold
        g.all_ori = all_ori
        g.all_id = all_id
        g.all_max = all_max
        g.all_cen = all_cen
        g.nos = nos
        g.scale_index = scale_index
        g.crop_flag = crop_flag
        g.sequence = sequence
        g.RD_start = RD_start
        g.RD_end = RD_end
        g.maxdis = maxdis
        g.maxori = maxori
        g.tolers = tolers

        #Start worker processes to use on multi-core processor (able to run
        #   in parallel - no GIL issue if each core has it's own VM)
    
        pool = multiprocessing.Pool(processes=config.numProcesses)
        print("multiscale_quad_retrieval(): Spawned a pool of %d workers" %
                                config.numProcesses)

        listParams = range(0, len(q_harlocs)) #!!!!TODO: use counterStep, config.initFrame[indexVideo]

        #res = pool.map(iteration_standalone_mqr, listParams)
        # See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
        res = pool.map(func=iteration_standalone_mqr, iterable=listParams,
                       chunksize=1)

        print("Pool.map returns %s" % str(res)) #x0.size + 1

        # From https://medium.com/building-things-on-the-internet/40e9b2b36148
        #    close the pool and wait for the work to finish
        pool.close()
        pool.join()

        # Doing the "reduce" phase after the workers have finished :)
        assert len(res) == len(q_harlocs)
        for query_frame, resE in enumerate(res):
            resEIndex = resE[0]
            resE = resE[1]
            assert resEIndex == query_frame
            # Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
            #votes_space[:, query_frame - 1] = votes
            votes_space[:, query_frame] = resE

        for query_frame in range(len(q_harlocs)):
            if crop_flag == 0:
                HH[:, query_frame] = 1
            else:
                HH[:, query_frame] = spatial_consistency.spatial_consistency(space_xy,
                                            qcen, len(r_harlocs), st_threshold, crop_flag)

        try:
            np.savez_compressed("votes_space%d" % scale_index, votes_space)
            np.savez_compressed("HH%d" % scale_index, HH)
        except:
            common.DebugPrintErrorTrace()

        return votes_space, HH
        """

    """
    We substitute q - 1 with q, since we want
      to number arrays from 0 (not from 1 like in Matlab).
    """
    for query_frame in range(len(q_harlocs)):
        common.DebugPrint("multiscale_quad_retrieval(): Starting iteration "
                          "query_frame = %d" % query_frame)

        """
        We make pp reference the desired multiharloc list for the query video
           frame query_frame
        """
        pp = q_harlocs[query_frame]

        points = pp[pp[:, 2] == scale_index, 0:2]
        qout, qcen, qmaxdis, qori = findquads.findquads(points, md_threshold, 0)

        if common.MY_DEBUG_STDOUT:
            print("multiscale_quad_retrieval(): query_frame = %d, "
                  "qout.shape (number of quads for query frame query_frame) = "
                  "%s" % (query_frame, str(qout.shape)))

        space_xy = np.zeros((qcen.shape[0], 2 * len(r_harlocs))) + np.nan
        votes = np.zeros((len(r_harlocs), 1))

        assert isinstance(tolers, float)

        if common.MY_DEBUG_STDOUT:
            common.DebugPrint("multiscale_quad_retrieval(): quads of query "
                              "frame %d are: " % query_frame)
            common.DebugPrint("  qout = %s" % str(qout))

        """
        Alex: for each quad (4 floats) of the query frame from Harris feature of
        scale scale_index
          Note: all_id stores the reference frame id for each quad descriptor.
        """
        """
        We substitute queryFrameQuad - 1 with queryFrameQuad, since we want
            to number arrays from 0 (not from 1 like in Matlab).
        """
        for queryFrameQuad in range(qout.shape[0]):
            common.DebugPrint("multiscale_quad_retrieval(): Starting iteration "
                              "queryFrameQuad = %d" % queryFrameQuad)
            """
            Matlab's polymorphism is really bugging here: although it's
                normally a float, tolers is considered to be a size 1 vector...
                so len(tolers) == 1
            """
            """
            We substitute tol_i - 1 with tol, since we want
                to number arrays from 0 (not from 1 like in Matlab).
            """
            for tol_i in range(1):
                tol = tolers

                # default for first PAMI with tol= 0.1 approximately
                # NOTE: SciPy's KDTree finds a few more results, in some cases,
                #    than the Matlab code from Evangelidis.
                # tol is a scalar representing the radius of the ball
                if config.KDTREE_IMPLEMENTATION == 0:
                    idx = r_quads_tree.query_ball_point(qout[queryFrameQuad, :],
                                                        tol)
                elif config.KDTREE_IMPLEMENTATION == 1:
                    pt = qout[queryFrameQuad, :]
                    pt = np.array([[pt[0], pt[1], pt[2], pt[3]]],
                                  dtype=np.float32)
                    retval, idx, dists = r_quads_tree.radiusSearch(
                        query=pt,
                        radius=(tol ** 2),
                        maxResults=NUM_MAX_ELEMS,
                        params=search_params)
                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "radiusSearch's retval (at "
                                          "query_frame=%d, queryFrameQuad=%d) "
                                          "is %d" %
                                          (query_frame, queryFrameQuad, retval))
                    idx = idx[0]
                    dists = dists[0]
                    """
                    Note: retval is the number of neighbors returned from the 
                    radiusSearch().
                    But the idx and the dists can have more elements than the
                    returned retval.
                    """
                    idx = idx[: retval]
                    dists = dists[: retval]

                if common.MY_DEBUG_STDOUT:
                    print("multiscale_quad_retrieval(): "
                          "qout[queryFrameQuad, :] = %s" %
                          str(qout[queryFrameQuad, :]))
                    print("multiscale_quad_retrieval(): "
                          "idx = %s" % str(idx))
                    print("multiscale_quad_retrieval(): "
                          "dists = %s" % str(dists))
                    print("multiscale_quad_retrieval(): "
                          "tol = %s" % str(tol))
                    if config.KDTREE_IMPLEMENTATION == 0:
                        print("multiscale_quad_retrieval(): "
                              "r_quads_tree.data[idx] = %s" %
                              str(r_quads_tree.data[idx]))

                if common.MY_DEBUG_STDOUT:
                    a = qout[queryFrameQuad, :]
                    if config.KDTREE_IMPLEMENTATION == 0:
                        for myI, index in enumerate(idx):
                            b = r_quads_tree.data[index]
                    else:
                        pass
                idx = np.array(idx)

                if common.MY_DEBUG_STDOUT:
                    common.DebugPrint("multiscale_quad_retrieval(): "
                                      "all_max.shape = %s" % str(all_max.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): "
                                      "qmaxdis.shape = %s" % str(qmaxdis.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): "
                                      "qmaxdis = %s" % str(qmaxdis))
                    common.DebugPrint("multiscale_quad_retrieval(): "
                                      "qori.shape = %s" % str(qori.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): "
                                      "qori = %s" % str(qori))

                if len(idx) == 0:
                    # NOT A GOOD IDEA: continue
                    dis_idx = np.array([])
                    ori_idx = np.array([])
                else:
                    if common.MY_DEBUG_STDOUT:
                        print("multiscale_quad_retrieval(): "
                              "queryFrameQuad = %s" % str(queryFrameQuad))
                        print("multiscale_quad_retrieval(): "
                              "all_max[idx] = %s" % str(all_max[idx]))
                        print("multiscale_quad_retrieval(): "
                              "qmaxdis[queryFrameQuad] = %s" %
                              str(qmaxdis[queryFrameQuad]))

                    if USE_GPS_COORDINATES:
                        # We look only at a part of the reference video
                        """
                        Since in some cases the video temporal alignment is
                            difficult to do due to similar portions in the
                            trajectory (see the drone videos, clip 3_some_lake)
                            we "guide" the temporal alignment by restricting
                            the reference frame search space - this is useful
                            when we have the geolocation (GPS) coordinate for
                            each frame.
                        """
                        if common.MY_DEBUG_STDOUT:
                            print("multiscale_quad_retrieval(): "
                                  "all_id = %s" % str(all_id))

                        if all_id.ndim == 2:
                            # TODO: put this at the beginning of the
                            #  function
                            assert all_id.shape[1] == 1
                            """
                            We flatten the array all_id
                              Note: We don't use order="F" since it's
                                    basically 1-D array
                            """
                            all_id = np.ravel(all_id)

                        # TODO: put start and end frame in config - or compute
                        #  it from geolocation
                        sub_idx = np.logical_and((all_id[idx] >= 2030 - 928),
                                                 (all_id[idx] <= 2400 - 928))
                        idx = idx[sub_idx]

                        if common.MY_DEBUG_STDOUT:
                            print("multiscale_quad_retrieval(): "
                                  "all_id = %s" % str(all_id))
                            print("multiscale_quad_retrieval(): "
                                  "sub_idx = %s" % str(sub_idx))
                            print("multiscale_quad_retrieval(): "
                                  "idx = %s" % str(idx))

                    if FILTER:
                        dis_idx = np.abs(
                            qmaxdis[queryFrameQuad] - all_max[idx]) < maxdis

                        if common.MY_DEBUG_STDOUT:
                            common.DebugPrint("multiscale_quad_retrieval(): "
                                              "dis_idx = %s" % str(dis_idx))

                        idx = idx[dis_idx]

                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "idx (after idx = idx[dis_idx]) = "
                                          "%s" % str(idx))

                    if FILTER:
                        ori_idx = np.abs(
                            qori[queryFrameQuad] - all_ori[idx]) < maxori

                        if common.MY_DEBUG_STDOUT:
                            common.DebugPrint("multiscale_quad_retrieval(): "
                                              "ori_idx = %s" % str(ori_idx))

                        idx = idx[ori_idx]

                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                # spatio-temporal consistency
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################

                if idx.size > 0:
                    if crop_flag == 0:
                        if FILTER:
                            """
                            Alex: this is a simple procedure of eliminating 
                            False Positive (FP) matches, as presented in 
                            Section 4.2 of TPAMI 2013 paper.
                            Basically it filters out quad matches that have
                            centroids st_threshold away from the query quad.
                            Note: all_cen are the controids of all reference
                                quads.
                            """
                            dy = qcen[queryFrameQuad, 0] - all_cen[idx, 0]
                            dx = qcen[queryFrameQuad, 1] - all_cen[idx, 1]
                            D = dy ** 2 + dx ** 2
                            co_idx = D < pow(st_threshold, 2)
                            idx = idx[co_idx]
                    else:
                        """
                        We substitute iii - 1 with iii, since we want
                            to number arrays from 0 (not from 1 like in Matlab).
                        """
                        for iii in range(len(idx)):
                            space_xy[queryFrameQuad,
                            (all_id[idx[iii]] - rd_start) * 2: (all_id[idx[
                                iii] - 1] - rd_start) * 2 + 1] = \
                                all_cen[idx[iii], :]

                    # It has to be an np.array because we multiply it with a
                    # scalar
                    histo_range = np.array(range(rd_start, rd_end + 1))
                    hh = Matlab.hist(x=all_id[idx], binCenters=histo_range)

                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "hh = %s" % (str(hh)))
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "hh.shape = %s" % (str(hh.shape)))
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "all_id.shape = %s" % (
                                              str(all_id.shape)))
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "idx = %s" % (str(idx)))
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "idx.shape = %s" % (str(idx.shape)))

                    # % nz can be computed more optimally
                    nz = np.nonzero(hh != 0)[0]
                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "nz = %s" % (str(nz)))
                        common.DebugPrint("multiscale_quad_retrieval(): "
                                          "nz.shape = %s" % (str(nz.shape)))

                    if nz.size > 0:
                        my_val = pow(
                            math.log10(float(len(r_harlocs)) / len(nz)), 2)

                        if common.MY_DEBUG_STDOUT:
                            common.DebugPrint("multiscale_quad_retrieval(): "
                                              "len(r_harlocs) = %d" % len(
                                               r_harlocs))
                            common.DebugPrint("multiscale_quad_retrieval(): "
                                              "len(nz) = %d" % len(nz))
                            common.DebugPrint("multiscale_quad_retrieval(): "
                                              "my_val = %.5f" % my_val)
                        # PREVIOUSLY
                        votes[nz, tol_i] = votes[nz, tol_i] + my_val

        if common.MY_DEBUG_STDOUT:
            print("multiscale_quad_retrieval(): "
                  "votes.shape = %s" % (str(votes.shape)))
            if (np.abs(votes) < 1.0e-10).all():
                print("multiscale_quad_retrieval(): votes = 0 (all zeros)")
            else:
                print("multiscale_quad_retrieval(): votes = %s" % (str(votes)))

        # Note: since votes is basically a 1-D vector, we don't use the
        # Fortran order
        votes_space[:, query_frame] = np.ravel(votes)

        if crop_flag == 0:
            HH[:, query_frame] = 1
        else:
            HH[:, query_frame] = spatial_consistency.spatial_consistency(
                space_xy,
                qcen, len(r_harlocs), st_threshold, crop_flag)

    if common.MY_DEBUG_STDOUT:
        print("multiscale_quad_retrieval(scale_index=%d): "
              "votes_space =\n%s" % (scale_index, str(votes_space)))

    try:
        np.savez_compressed("votes_space%d" % scale_index, votes_space)
        np.savez_compressed("HH%d" % scale_index, HH)
    except:
        common.DebugPrintErrorTrace()

    t2 = float(cv2.getTickCount())
    my_time = (t2 - t1) / cv2.getTickFrequency()
    print("multiscale_quad_retrieval() took %.6f [sec]" % my_time)

    return votes_space, HH
Esempio n. 6
0
def multiscale_quad_retrieval(r_quadsTree, r_harlocs, q_harlocs, md_threshold, st_threshold, \
            all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag, \
            sequence):
    common.DebugPrint("Entered multiscale_quad_retrieval(): " \
                        "md_threshold = %s, st_threshold = %s." % \
                        (str(md_threshold), \
                        str(st_threshold)))

    assert len(r_harlocs) != 0
    assert len(q_harlocs) != 0

    try:
        Votes_space = np.load("Votes_space%d.npz" % scale_index)['arr_0']
        HH = np.load("HH%d.npz" % scale_index)['arr_0']
        return Votes_space, HH
    except:
        common.DebugPrintErrorTrace()

    if common.MY_DEBUG_STDOUT and DBGPRINT:
        common.DebugPrint("multiscale_quad_retrieval(): r_quadsTree = %s" % \
                            str(r_quadsTree))

        common.DebugPrint("multiscale_quad_retrieval(): len(r_harlocs) = %d" %
                          len(r_harlocs))
        common.DebugPrint("multiscale_quad_retrieval(): r_harlocs = %s" %
                          str(r_harlocs))

        common.DebugPrint("multiscale_quad_retrieval(): q_harlocs = %s" %
                          str(q_harlocs))
        common.DebugPrint("multiscale_quad_retrieval(): md_threshold = %s" %
                          str(md_threshold))
        print("multiscale_quad_retrieval(): st_threshold = %s" %
              str(st_threshold))
        #common.DebugPrint("multiscale_quad_retrieval(): all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_ori, all_id, all_max, all_cen, nos, scale_index, cropflag));
        common.DebugPrint("multiscale_quad_retrieval(): all_id = %s" %
                          str(all_id))
        common.DebugPrint("multiscale_quad_retrieval(): all_id.shape = %s" %
                          (str(all_id.shape)))
        #common.DebugPrint("multiscale_quad_retrieval(): all_max, all_cen, nos, scale_index, cropflag = %s" % str(all_max, all_cen, nos, scale_index, cropflag));
        #common.DebugPrint("multiscale_quad_retrieval(): all_max = %s" % str(all_max));
        #common.DebugPrint("multiscale_quad_retrieval(): all_cen, nos, scale_index, cropflag = %s" % str(all_cen, nos, scale_index, cropflag));
        common.DebugPrint("multiscale_quad_retrieval(): sequence = %s" %
                          str(sequence))
        print("multiscale_quad_retrieval(): cropflag = %s" % str(cropflag))

    t1 = float(cv2.getTickCount())

    if scale_index > nos:
        assert scale_index <= nos
        #error('Wrong scale index or number-of-scales');

    #QD = dir([q_path "multiharlocs*.mat"])
    #QD = [q_path + "multiharlocs*.mat"]
    #QD = q_harlocs;

    #RD = dir([r_path "multiharlocs*.mat"])
    #RD = [r_path + "multiharlocs*.mat"]
    #RD = r_harlocs;

    #TODO: take out RD_start
    #RD_start = str2num(RD(1).name(end - 9 : end - 4))
    #RD_start = int(RD[0][-9 : -4])
    RD_start = 0

    #RD_end = str2num(RD(end).name(end - 9 : end - 4))
    #RD_end = int(RD[-1][-9 : -4])
    #RD_end = len(RD) - 1;
    RD_end = len(r_harlocs) - 1

    if False:  # n_d not used anywhere
        #n_d = hist(all_id, RD_start : RD_end)
        #n_d = hist[all_id, RD_start : RD_end]
        n_d = Matlab.hist(x=all_id, \
                      binCenters=np.array(range(RD_start, RD_end + 1)) )

        #cross_indices = np.zeros( (len(QD), 2) );
        cross_indices = np.zeros((len(q_harlocs), 2))

    j = 1

    #tic
    #ORI = np.array([]); # ORI NOT used anywhere
    """
    Inspired from
      https://stackoverflow.com/questions/17559140/matlab-twice-as-fast-as-numpy
        BUT doesn't help in this case:
    Votes_space = np.asfortranarray(np.zeros( (len(RD), len(QD)) ));
    """
    #Votes_space = np.zeros( (len(RD), len(QD)) );
    Votes_space = np.zeros((len(r_harlocs), len(q_harlocs)))

    # Make a distinct copy of HH from Votes_space...
    #HH = Votes_space.copy().astype(np.int16); #Votes_space + 0;
    #HH = np.zeros((len(RD), len(QD)), dtype=np.int8);
    HH = np.zeros((len(r_harlocs), len(q_harlocs)), dtype=np.int8)
    #!!!!TODO use MAYBE even np.bool - OR take it out

    #common.DebugPrint("multiscale_quad_retrieval(): Votes_space = %s,\n       HH = %s" % (str(Votes_space), str(HH)))

    tolers = 0.1 - float(scale_index) / 100.0
    # it helps to make more strict the threshold as the scale goes up
    # tolers = 0.15 - float(scale_index) / 100.0;

    MAXDIS = 3 + scale_index
    MAXORI = 0.25
    """
    !!!!TODO TODO: I am using multiprocessing.Poll and return votes;
      the dispatcher assembles the results,
        but the results are NOT the same with the serial case - although they
           look pretty decent, but they seem to be suboptimal - dp_Alex returns
             suboptimal cost path for USE_MULTITHREADING == True instead of
             False.
             (Note: running under the same preconditions
                 multiscale_quad_retrieval I got the same results in dp_Alex().
    """
    if False:  #config.USE_MULTITHREADING == True:
        global g
        g.r_quadsTree = r_quadsTree
        g.r_harlocs = r_harlocs
        g.q_harlocs = q_harlocs
        g.md_threshold = md_threshold
        g.st_threshold = st_threshold
        g.all_ori = all_ori
        g.all_id = all_id
        g.all_max = all_max
        g.all_cen = all_cen
        g.nos = nos
        g.scale_index = scale_index
        g.cropflag = cropflag
        g.sequence = sequence
        g.RD_start = RD_start
        g.RD_end = RD_end
        g.MAXDIS = MAXDIS
        g.MAXORI = MAXORI
        g.tolers = tolers
        """
        Start worker processes to use on multi-core processor (able to run
           in parallel - no GIL issue if each core has it's own VM)
        """
        pool = multiprocessing.Pool(processes=config.numProcesses)
        print("multiscale_quad_retrieval(): Spawned a pool of %d workers" % \
                                config.numProcesses)

        listParams = range(0, len(q_harlocs))
        #!!!!TODO: use counterStep, config.initFrame[indexVideo]

        #res = pool.map(IterationStandaloneMQR, listParams);
        # See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
        res = pool.map(func=IterationStandaloneMQR, iterable=listParams, \
                        chunksize=1)

        print("Pool.map returns %s" % str(res))
        #x0.size + 1
        """
        From https://medium.com/building-things-on-the-internet/40e9b2b36148
            close the pool and wait for the work to finish
        """
        pool.close()
        pool.join()

        # Doing the "reduce" phase after the workers have finished :)
        assert len(res) == len(q_harlocs)
        for queryFrame, resE in enumerate(res):
            resEIndex = resE[0]
            resE = resE[1]
            assert resEIndex == queryFrame
            # Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
            #Votes_space[:, queryFrame - 1] = votes;
            Votes_space[:, queryFrame] = resE

        for queryFrame in range(len(q_harlocs)):
            if cropflag == 0:
                HH[:, queryFrame] = 1
            else:
                """
                HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
                                            qcen, len(RD), st_threshold, cropflag);
                """
                HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
                                            qcen, len(r_harlocs), st_threshold, cropflag)

        try:
            np.savez_compressed("Votes_space%d" % scale_index, Votes_space)
            np.savez_compressed("HH%d" % scale_index, HH)
        except:
            common.DebugPrintErrorTrace()

        return Votes_space, HH
    """
    We substitute q - 1 with q, since we want
      to number arrays from 0 (not from 1 like in Matlab).
    """
    #for q=1:length(QD)
    #for q in range(1, len(QD) + 1):
    #for queryFrame in range(len(QD)):
    for queryFrame in range(len(q_harlocs)):
        common.DebugPrint(
            "multiscale_quad_retrieval(): Starting iteration queryFrame = %d" %
            queryFrame)
        # tic
        """
        str1=['load ' q_path QD(q).name]
        eval(str1)
        """
        """
        We make pp reference the desired multiharloc list for the query video
           frame queryFrame
        """
        pp = q_harlocs[queryFrame]
        #pp = np.array(pp);

        #common.DebugPrint("multiscale_quad_retrieval(): pp = %s" % str(pp));

        #[qout,qcen,qmaxdis,qori]=findquads(pp(pp(:,3)==scale_index,1:2),md_threshold,0);
        points = pp[pp[:, 2] == scale_index, 0:2]
        qout, qcen, qmaxdis, qori = findquads.findquads(
            points, md_threshold, 0)

        if common.MY_DEBUG_STDOUT and DBGPRINT:
            print("multiscale_quad_retrieval(): queryFrame = %d, " \
                          "qout.shape (number of quads for query frame queryFrame) = %s" % \
                                                 (queryFrame, str(qout.shape)))

        # disp([num2str(q) ' of ' num2str(length(QD)) ' -> ' num2str(size(qout,1)) ' quads'])

        #space_xy=zeros(size(qcen,1),2*length(RD))+nan;
        #space_xy = np.zeros( (qcen.shape[0], 2 * len(RD)) ) + np.nan;
        space_xy = np.zeros((qcen.shape[0], 2 * len(r_harlocs))) + np.nan

        #     votes=zeros(length(RD),1)
        #votes=zeros(length(RD),length(tolers));
        #votes = np.zeros( (len(RD), 1) );
        votes = np.zeros((len(r_harlocs), 1))

        #nep = np.array([]);
        #m_points = np.array([]);

        assert isinstance(tolers, float)

        if common.MY_DEBUG_STDOUT:
            common.DebugPrint(
                "multiscale_quad_retrieval(): quads of query frame %d are: " %
                queryFrame)
            common.DebugPrint("  qout = %s" % str(qout))
        """
        Alex: for each quad (4 floats) of the query frame from Harris feature of scale scale_index
          Note: all_id stores the reference frame id for each quad descriptor.
        """
        """
        We substitute queryFrameQuad - 1 with queryFrameQuad, since we want
            to number arrays from 0 (not from 1 like in Matlab).
        """
        #for queryFrameQuad in range(1, qout.shape[0] + 1):
        for queryFrameQuad in range(qout.shape[0]):
            common.DebugPrint(
                "multiscale_quad_retrieval(): Starting iteration queryFrameQuad = %d"
                % queryFrameQuad)
            """
            Matlab's polymorphism is really bugging here: although it's
                normally a float, tolers is considered to be a size 1 vector...
                so len(tolers) == 1
            """
            #for tol_i in range(1, len(tolers) + 1):
            #    tol = tolers[tol_i - 1]
            """
            We substitute tol_i - 1 with tol, since we want
                to number arrays from 0 (not from 1 like in Matlab).
            """
            #for tol_i in range(1, 1 + 1):
            for tol_i in range(1):
                tol = tolers
                """
                # TODO: done below - take out this dbg print
                if DBGPRINT:
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                        "qout[queryFrameQuad, :] = %s" % \
                                        str(qout[queryFrameQuad, :]))
                """

                #% default for first PAMI with tol= 0.1 approximately

                # NOTE: SciPy's KDTree finds a few more results, in some cases,
                #    than the Matlab code from Evangelidis.

                #idx, di = kdtree_ball_query(tree, qout(i, :), tol)
                #idx, distKD = kdtree_ball_query(tree, qout[i - 1, :], tol)
                #idx, di = tree.query(x=xQuery, k=4)
                #resPoints = [data[i] for i in resBallIndices]
                # tol is a scalar representing the radius of the ball
                if config.KDTREE_IMPLEMENTATION == 0:
                    idx = r_quadsTree.query_ball_point(qout[queryFrameQuad, :],
                                                       tol)
                elif config.KDTREE_IMPLEMENTATION == 1:
                    #pt = qout[queryFrameQuad - 1, :].astype(np.float32);
                    pt = qout[queryFrameQuad, :]
                    pt = np.array([[pt[0], pt[1], pt[2], pt[3]]],
                                  dtype=np.float32)
                    retval, idx, dists = r_quadsTree.radiusSearch( \
                                                query=pt, \
                                                radius=(tol**2), \
                                                maxResults=NUM_MAX_ELEMS, \
                                                params=search_params)
                    if common.MY_DEBUG_STDOUT and DBGPRINT:
                        """
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                        "retval (number NNs) = %s" % str(retval));
                        """
                        common.DebugPrint( \
                            "multiscale_quad_retrieval(): radiusSearch's retval " \
                            "(at queryFrame=%d, queryFrameQuad=%d) is %d" % (queryFrame, queryFrameQuad, retval))

                    idx = idx[0]
                    dists = dists[0]
                    """
                    Note: retval is the number of neighbors returned from the radiusSearch().
                      But the idx and the dists can have more elements than the returned retval.
                    """
                    idx = idx[:retval]
                    dists = dists[:retval]

                if common.MY_DEBUG_STDOUT and DBGPRINT:
                    print("multiscale_quad_retrieval(): " \
                            "qout[queryFrameQuad, :] = %s" % str(qout[queryFrameQuad, :]))
                    print("multiscale_quad_retrieval(): " \
                                      "idx = %s" % str(idx))
                    print("multiscale_quad_retrieval(): " \
                                      "dists = %s" % str(dists))
                    print("multiscale_quad_retrieval(): " \
                                      "tol = %s" % str(tol))
                    if config.KDTREE_IMPLEMENTATION == 0:
                        print("multiscale_quad_retrieval(): " \
                                "r_quadsTree.data[idx] = %s" % \
                                str(r_quadsTree.data[idx]))

                # We print the distances to the points returned in idx
                if common.MY_DEBUG_STDOUT and DBGPRINT:  # This is just for debugging purposes
                    a = qout[queryFrameQuad, :]
                    if config.KDTREE_IMPLEMENTATION == 0:
                        for myI, index in enumerate(idx):
                            b = r_quadsTree.data[index]
                            """
                            if False:
                                common.DebugPrint("multiscale_quad_retrieval(): distance to " \
                                    "%d point (%s) inside ball = %.4f" % \
                                    (myI, str(b), npla.norm(a - b)));
                            """
                    else:
                        pass
                idx = np.array(idx)

                #if False:
                if common.MY_DEBUG_STDOUT:
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                "all_max.shape = %s" % str(all_max.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                "qmaxdis.shape = %s" % str(qmaxdis.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                      "qmaxdis = %s" % str(qmaxdis))
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                      "qori.shape = %s" % str(qori.shape))
                    common.DebugPrint("multiscale_quad_retrieval(): " \
                                      "qori = %s" % str(qori))

                #dis_idx=abs(qmaxdis(i)-all_max(idx))<MAXDIS;
                if len(idx) == 0:
                    # NOT A GOOD IDEA: continue;
                    #idx = np.array([]);
                    dis_idx = np.array([])
                    ori_idx = np.array([])
                else:
                    if common.MY_DEBUG_STDOUT and DBGPRINT:
                        print("multiscale_quad_retrieval(): " \
                                            "queryFrameQuad = %s" % str(queryFrameQuad))
                        print("multiscale_quad_retrieval(): " \
                            "all_max[idx] = %s" % str(all_max[idx]))
                        print("multiscale_quad_retrieval(): " \
                            "qmaxdis[queryFrameQuad] = %s" % str(qmaxdis[queryFrameQuad]))

                    if USE_GPS_COORDINATES:
                        # We look only at a part of the reference video
                        """
                        Since in some cases the video temporal alignment is
                            difficult to do due to similar portions in the
                            trajectory (see the drone videos, clip 3_some_lake)
                            we "guide" the temporal alignment by restricting
                            the reference frame search space - this is useful
                            when we have the geolocation (GPS) coordinate for
                            each frame.
                        """
                        if common.MY_DEBUG_STDOUT and DBGPRINT:
                            print("multiscale_quad_retrieval(): " \
                                "all_id = %s" % str(all_id))

                        if True:
                            #assert (all_id.ndim == 2) and (all_id.shape[1] == 1);
                            if all_id.ndim == 2:
                                #!!!!TODO TODO: put this at the beginning of the function
                                assert all_id.shape[1] == 1
                                """
                                We flatten the array all_id
                                  Note: We don't use order="F" since it's
                                        basically 1-D array
                                """
                                all_id = np.ravel(all_id)

                        #!!!!TODO: put start and end frame in config - or compute it from geolocation
                        sub_idx = np.logical_and( (all_id[idx] >= 2030 - 928), \
                                                    (all_id[idx] <= 2400 - 928) )
                        idx = idx[sub_idx]

                        if common.MY_DEBUG_STDOUT and DBGPRINT:
                            print("multiscale_quad_retrieval(): " \
                                "all_id = %s" % str(all_id))
                            print("multiscale_quad_retrieval(): " \
                                "sub_idx = %s" % str(sub_idx))
                            print("multiscale_quad_retrieval(): " \
                                "idx = %s" % str(idx))

                    if FILTER:
                        dis_idx = np.abs(qmaxdis[queryFrameQuad] -
                                         all_max[idx]) < MAXDIS

                        #if False:
                        if common.MY_DEBUG_STDOUT:
                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                                "idx = %s" % str(idx));
                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                            "dis_idx = %s" % str(dis_idx))

                        #idx=idx(dis_idx)
                        idx = idx[dis_idx]

                    #if False:
                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                            "idx (after idx = idx[dis_idx]) = %s" % str(idx))

                    if FILTER:
                        #ori_idx=abs(qori(i)-all_ori(idx))<MAXORI;
                        ori_idx = np.abs(qori[queryFrameQuad] -
                                         all_ori[idx]) < MAXORI

                        #if False:
                        if common.MY_DEBUG_STDOUT:
                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                                    "all_ori = %s" % str(all_ori));
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                    "qori[queryFrameQuad] = %s" % str(qori[queryFrameQuad]));

                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                            "ori_idx = %s" % str(ori_idx))

                        #idx=idx(ori_idx);
                        idx = idx[ori_idx]

                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                #% spatio-temporal consistency
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################
                # IMPORTANT ###################################################

                #if numel(idx) > 0:
                if idx.size > 0:
                    if cropflag == 0:
                        if FILTER:
                            """
                            Alex: this is a simple procedure of eliminating False
                            Positive (FP) matches, as presented in Section 4.2 of
                            TPAMI 2013 paper.
                            Basically it filters out quad matches that have
                            centroids st_threshold away from the query quad.
                            Note: all_cen are the controids of all reference
                                quads.
                            """
                            dy = qcen[queryFrameQuad, 0] - all_cen[idx, 0]
                            dx = qcen[queryFrameQuad, 1] - all_cen[idx, 1]

                            #D=dy.^2+dx.^2;
                            D = dy**2 + dx**2

                            co_idx = D < pow(st_threshold, 2)

                            idx = idx[co_idx]
                    else:
                        """
                        We substitute iii - 1 with iii, since we want
                            to number arrays from 0 (not from 1 like in Matlab).
                        """
                        #for iii in range(1, len(idx) + 1):
                        for iii in range(len(idx)):
                            #space_xy(i,(all_id(idx(iii))-RD_start)*2+1:(all_id(idx(iii))-RD_start)*2+2) = all_cen(idx(iii),:)
                            space_xy[queryFrameQuad, \
                                    (all_id[idx[iii]] - RD_start) * 2: (all_id[idx[iii] - 1] - RD_start) * 2 + 1] = \
                                    all_cen[idx[iii], :]

                    #hh=hist(all_id(idx),RD_start:RD_end);
                    # It has to be an np.array because we multiply it with a scalar
                    histoRange = np.array(range(RD_start, RD_end + 1))
                    hh = Matlab.hist(x=all_id[idx], binCenters=histoRange)

                    #if False:
                    #if True:
                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                            "hh = %s" % (str(hh)))
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                            "hh.shape = %s" % (str(hh.shape)))
                        """
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                            "all_id = %s" % (str(all_id)));
                        """
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                    "all_id.shape = %s" % (str(all_id.shape)))
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                    "idx = %s" % (str(idx)))
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                    "idx.shape = %s" % (str(idx.shape)))

                    # % nz can be computed more optimally
                    #nz=find(hh~=0); # nz can be computed more optimally
                    # np.nonzero() always returns a tuple, even if it contains 1 element since hh has only 1 dimension
                    nz = np.nonzero(hh != 0)[0]
                    #if False:
                    if common.MY_DEBUG_STDOUT:
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                          "nz = %s" % (str(nz)))
                        common.DebugPrint("multiscale_quad_retrieval(): " \
                                          "nz.shape = %s" % (str(nz.shape)))

                    #if numel(nz) > 0
                    if nz.size > 0:
                        #%%----text-retrieval-like
                        #votes(nz, tol_i) = votes(nz, tol_i) + log10(length(RD) / (length(nz)))^2 #PREVIOUSLY
                        #myVal = pow(math.log10(float(len(RD)) / len(nz)), 2);
                        myVal = pow(
                            math.log10(float(len(r_harlocs)) / len(nz)), 2)
                        """
                        try:
                            myVal = pow(math.log10(float(len(r_harlocs)) / len(nz)), 2);
                        except:
                            print("Error: len=%d len(nz)=%d nz.size=%d" % \
                                            (len(r_harlocs), len(nz), nz.size));
                            common.DebugPrintErrorTrace();
                        """

                        #if False:
                        if common.MY_DEBUG_STDOUT:
                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                              "len(RD) = %d" % len(RD));
                            """
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                              "len(r_harlocs) = %d" % len(r_harlocs))
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                              "len(nz) = %d" % len(nz))
                            common.DebugPrint("multiscale_quad_retrieval(): " \
                                               "myVal = %.5f" % myVal)

                        # PREVIOUSLY
                        votes[nz, tol_i] = votes[nz, tol_i] + myVal
                        #   votes(nz)=votes(nz)+log10(length(RD)/(length(nz)));
                        #   votes(nz)=votes(nz)+1;

        if common.MY_DEBUG_STDOUT and DBGPRINT:
            """
            common.DebugPrint("multiscale_quad_retrieval(): " \
                    "Votes_space.shape = %s" % (str(Votes_space.shape)));
            common.DebugPrint("multiscale_quad_retrieval(): " \
                    "votes.shape = %s" % (str(votes.shape)));
            """

            print("multiscale_quad_retrieval(): " \
                              "votes.shape = %s" % (str(votes.shape)))
            if (np.abs(votes) < 1.0e-10).all():
                print( \
                      "multiscale_quad_retrieval(): votes = 0 (all zeros)")
            else:
                print("multiscale_quad_retrieval(): " \
                              "votes = %s" % (str(votes)))

        #Votes_space(:,q)=votes;
        # Gives: "ValueError: output operand requires a reduction, but reduction is not enabled"
        #Votes_space[:, queryFrame - 1] = votes;
        # Note: since votes is basically a 1-D vector, we don't use the Fortran order
        Votes_space[:, queryFrame] = np.ravel(votes)
        # order="F");

        if cropflag == 0:
            HH[:, queryFrame] = 1
        else:
            """
            HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
                                        qcen, len(RD), st_threshold, cropflag);
            """
            HH[:, queryFrame] = spatial_consistency.spatial_consistency(space_xy, \
                                        qcen, len(r_harlocs), st_threshold, cropflag)

    if common.MY_DEBUG_STDOUT and DBGPRINT:
        print("multiscale_quad_retrieval(scale_index=%d): " \
                            "Votes_space =\n%s" % (scale_index, str(Votes_space)))

    try:
        np.savez_compressed("Votes_space%d" % scale_index, Votes_space)
        np.savez_compressed("HH%d" % scale_index, HH)
    except:
        common.DebugPrintErrorTrace()

    t2 = float(cv2.getTickCount())
    myTime = (t2 - t1) / cv2.getTickFrequency()
    print("multiscale_quad_retrieval() took %.6f [sec]" % myTime)
    """
    common.DebugPrint("multiscale_quad_retrieval(): " \
                        "%d corresponding frames retrieved in %.6f secs" % \
                        (len(q_harlocs), myTime));
    """

    return Votes_space, HH
Esempio n. 7
0
def IterationStandalone(iWhile):
    crossref = g.crossref
    captureQ = g.captureQ
    captureR = g.captureR

    if common.MY_DEBUG_STDOUT:
        common.DebugPrint("Entered IterationStandalone(): crossref=%s, captureQ=%s, "\
                            "captureR=%s, refined_crossref=%s, warp_p=%s, "
                            "x0=%s, y0=%s, start=%s, t=%d, iWhile=%d." % \
                        (str(crossref), str(captureQ), str(captureR), \
                         str(g.refined_crossref), str(g.warp_p), \
                         str(g.x0), str(g.y0), str(g.start), g.t, iWhile))
        common.DebugPrint("IterationStandalone(): id(g)=%s" % str(id(g)))

    r_path = captureQ
    q_path = captureR

    x0 = g.x0
    y0 = g.y0
    """
    x0 = crossref[5: -5, 0].T;
    y0 = crossref[5: -5, 1].T;
    """
    """
    x0 = np.array(range(10));
    y0 = np.array(range(10));
    """

    start = g.start
    #start = y0.T; #% Alex: So start is related to crossref(:,2) from the end of dp3.m
    #x_init = 0;

    refined_crossref = g.refined_crossref
    #refined_crossref = crossref.copy();
    ##refined_crossref = np.array([]);

    #% fprintf('\nRetrieval using visual dictionary and inverted index list...(VD method)\n');
    """
    if config.USE_ECC_FROM_OPENCV:
        H = np.eye(3, dtype=np.float32); #% use feature matching if you need a good initialization
    else:
        H = np.eye(3); #% use feature matching if you need a good initialization
    warp_p = H; #%initial warp
    """
    warp_p = g.warp_p
    #%initial warp

    t = g.t
    #t = 0; #%initial subframe correction

    if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR == False:
        fit = []

    #if config.pixel_select == 1:
    #    config.weighted_flag = 0;

    try:
        common.DebugPrint("SpatialAlignment.IterationStandalone(iWhile=%d)\n" % \
                                                                    iWhile)
        """
        if i < 5:
            tic
        """

        if config.affine_time == 0:
            if config.seq2seq == 0:
                common.DebugPrint(
                    "Alex: NOT affine temporal model, NOT seq2seq")

                #% frame-to-subframe scheme (one query frame against a reference subsequence
                if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR:
                    fitecc = ecc_homo_spacetime.ecc_homo_spacetime( \
                                                img_index=start[iWhile],
                                                tmplt_index=x0[iWhile],
                                                p_init=warp_p,
                                                t0=t,
                                                n_iters=config.iterECC,
                                                levels=config.levelsECC,
                                                r_capture=r_path,
                                                q_capture=q_path,
                                                nof=config.nof,
                                                time_flag=config.time_flag,
                                                weighted_flag=config.weighted_flag,
                                                pixel_select=config.pixel_select,
                                                mode="ecc",
                                                imformat=config.imformat,
                                                save_image=config.verboseECC)
                else:
                    fit[iWhile].ecc = ecc_homo_spacetime.ecc_homo_spacetime( \
                                                img_index=start[iWhile],
                                                tmplt_index=x0[iWhile],
                                                p_init=warp_p,
                                                t0=t,
                                                n_iters=config.iterECC,
                                                levels=config.levelsECC,
                                                r_capture=r_path,
                                                q_capture=q_path,
                                                nof=nof,
                                                time_flag=config.time_flag,
                                                weighted_flag=config.weighted_flag,
                                                pixel_select=config.pixel_select,
                                                mode="ecc",
                                                imformat=config.imformat,
                                                save_image=config.verboseECC)
            else:  # seq2seq == 1, affine_time == 0
                common.DebugPrint(
                    "Alex: NOT affine temporal model, seq2seq == 1")

                #% sequence-to-sequence alignment (one temporal parameter)
                fit[iWhile].ecc = ecc_homo_spacetime_seq(
                    start[iWhile], x0[iWhile], warp_p, t, iterECC, levels,
                    r_path, q_path, nof, time_flag, weighted_flag,
                    pixel_select, config.imformat, verbose)
        else:  #% affine temporal model (two parameters) for the case frame-to-subframe
            common.DebugPrint("Alex: Affine temporal model\n")

            fit[iWhile].ecc = ecc_homo_spacetime.ecc_homo_affine_spacetime( \
                                                img_index=start[iWhile],
                                                tmplt_index=x0[iWhile],
                                                p_init=warp_p,
                                                t0=t,
                                                n_iters=config.iterECC,
                                                levels=config.levelsECC,
                                                r_capture=r_path,
                                                q_capture=q_path,
                                                nof=config.nof,
                                                time_flag=config.time_flag,
                                                weighted_flag=config.weighted_flag,
                                                pixel_select=config.pixel_select,
                                                mode="ecc",
                                                imformat=config.imformat,
                                                save_image=config.verboseECC)
        """
        if i < 5:
            toc;
        """

        iterECC0 = config.iterECC - 1
        if fitecc[0][iterECC0].t == None:
            while iterECC0 > 0:
                iterECC0 -= 1
                if fitecc[0][iterECC0].t != None:
                    break

        #!!!!TODO: understand ecc_homo_spacetime - should we use fitecc[config.levelsECC - 1][iterECC0].t instead of fitecc[0][iterECC0].t? (when levelsECC != 1)?

        #% synchronization correction
        #%synchro with subframe correction
        #refined_crossref(i,2)=refined_crossref(i,2)+fit(i).ecc(1,iter).t;
        if config.USE_ECC_FROM_OPENCV:
            pass
        else:
            if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR:
                refined_crossref[iWhile, 1] = refined_crossref[iWhile, 1] + \
                                            fitecc[0][iterECC0].t
                #fitecc[0][config.iterECC - 1].t;
            else:
                refined_crossref[iWhile, 1] = refined_crossref[iWhile, 1] + \
                                            fit[iWhile].ecc[iterECC0].t
                #fit[iWhile].ecc[0, config.iterECC - 1].t;
        if common.MY_DEBUG_STDOUT:
            common.DebugPrint("Exiting IterationStandalone(iWhile=%d)" %
                              iWhile)
    except:
        common.DebugPrintErrorTrace()
Esempio n. 8
0
def SpatialAlignmentEvangelidis(crossref, captureQ, captureR):
    """
    % Alex:
    % We entere a bogus crossref, since for the 720x480 MIT video Matlab
    %    crashed with out of memory - it ended up using 1GB of data
    %crossref=zeros(1594,2);
    %for i=1:size(crossref,1)
    %    crossref(i,1)=1000+i;
    %    crossref(i,2)=2000+i;
    %end
    %crossref
    """

    #fprintf(1, 'Alex: Entered alignment_script\n');

    #% Alex: tic starts a stopwatch timer to measure performance. The function records the internal time at execution of the tic command. Display the elapsed time with the toc function.
    #tic

    #% alignment parameters
    """
    % Alex: From [TPAMI_2013] paper:
    % "Given a pair (m; n), we consider the temporally local
    %subsequence In-mu;...,In+mu where mu is a small integer. After
    %defining Phi(), we look for the image warped in space and time
    %from the above subsequence that aligns with Im. To this
    %end, we extend the ECC alignment algorithm [3] to the
    %space-time dimensions, i.e., the extended scheme estimates
    %the spatiotemporal parameterspthat maximize the correla-
    %tion coefficient between the input frame Iq(x) and the
    %warped reference subframeIr(Phi(x_hat; p)."
    """
    #nof = 5; #%number of frames for sub-sequences
    #cropflag = 0; #% flag for cropped images (use 0)
    #iterECC = 15; #%iterations of ECC
    #levelsECC = 1; #%levels of multi-resolution ECC (use 1)

    #verboseECC = 1; #% save/see the spatial alignment (and if want, uncomment to get a pause per frame)

    if config.TESTING_IDENTICAL_MATLAB:
        #q_path = "video_data/input/";
        #r_path = "video_data/reference/";
        q_path = "Videos/input/"
        r_path = "Videos/reference/"

        if not os.path.exists(q_path):
            os.makedirs(q_path)

        if not os.path.exists(r_path):
            os.makedirs(r_path)
    else:
        q_path = captureQ
        r_path = captureR

    #% reject a few frames in the beginning and at the end, because of the sub-sequence
    #% Alex: crossref comes from the synchro_script (via the global scope)
    #x0=crossref(6:end-5,1)';
    x0 = crossref[5:-5, 0].T
    #y0=crossref(6:end-5,2)';
    y0 = crossref[5:-5, 1].T

    #imformat = "png"; #"jpeg"; #%image format

    start = y0.T
    #% Alex: So start is related to crossref(:,2) from the end of dp3.m
    x_init = 0

    refined_crossref = crossref.copy()

    #% fprintf('\nRetrieval using visual dictionary and inverted index list...(VD method)\n');

    if config.USE_ECC_FROM_OPENCV:
        H = np.eye(3, dtype=np.float32)
        #% use feature matching if you need a good initialization
    else:
        H = np.eye(3)
        #% use feature matching if you need a good initialization
    warp_p = H
    #%initial warp
    t = 0
    #%initial subframe correction

    if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR == False:
        fit = []

    #pixel_select=0; #% when 1, it considers only pixels around salient points
    #time_flag=0; #% when 0, it does only spatial alignment even in seq2seq
    #weighted_flag=1; #% when 1, it considers a self-weighted version of ECC, not explained in PAMI paper

    #%fprintf(1, 'Am here before\n');

    if config.pixel_select == 1:
        config.weighted_flag = 0
    """
    % !!!!NEEDS NORMALLY MEX: concat_3d, etc (UNLESS on Win64, for which we have binaries for the MEXes)
    % when affine_time is 1, an affine temporal model is considered with the frame-to-subframe scheme
    % Alex: affine temporal model means two parameters, for the case frame-to-subframe
    %    Alex: if affine_time == 0 --> it has one temporal parameter. We can also use here sequence-to-sequence alignment
    % Alex: From [TPAMI_2013] paper: "there will be a global affine temporal transformation
    %       t=alpha * t_hat + tau t_hat determines corre-spondences between the
    %       indices t and t_hat, regardless of scene content or camera motion."
    """
    #config.affine_time = 0;
    """
    % Alex: From [TPAMI_2013] paper:
    %"Note, however, that if the input frames are weakly textured,
    %sequence-to-sequence schemes may be preferable to image-to-sequence counterparts."
    % when seq2seq is 1, it considers a sequence-to-sequence alignment (seq2seq in PAMI paper)
    % Alex: the PAMI paper seems to be [1] Y. Caspi and M. Irani,
                Spatio-Temporal Alignment of Se-quences,
                IEEE Trans. Pattern Analysis and Machine Intelligence,
                vol. 24, no. 11, pp. 1409-1424, Nov. 2002
    """
    #config.seq2seq = 0;

    #% if seq2seq==1:
    #%     pixel_select = 0;

    if common.MY_DEBUG_STDOUT:
        common.DebugPrint("SpatialAlignmentEvangelidis(): crossref.shape = %s" % \
                                                        str(crossref.shape))
        common.DebugPrint("SpatialAlignmentEvangelidis(): crossref = %s" %
                          str(crossref))

        common.DebugPrint("SpatialAlignmentEvangelidis(): x0.shape = %s" %
                          str(x0.shape))
        common.DebugPrint("SpatialAlignmentEvangelidis(): x0 = %s" % str(x0))

        common.DebugPrint("SpatialAlignmentEvangelidis(): y0.shape = %s" %
                          str(y0.shape))
        common.DebugPrint("SpatialAlignmentEvangelidis(): y0 = %s" % str(y0))

        common.DebugPrint("SpatialAlignmentEvangelidis(): start.shape = %s" % \
                                                        str(start.shape))
        common.DebugPrint("SpatialAlignmentEvangelidis(): (used to generate reference " \
                                "frame to read) start = %s" % str(start))

    if config.USE_MULTITHREADING == True:
        global g
        g.crossref = crossref
        g.captureQ = captureQ
        g.captureR = captureR
        g.x0 = x0
        g.y0 = y0
        g.start = start
        g.refined_crossref = refined_crossref
        g.warp_p = warp_p
        g.t = t

        # We consider only this case:
        assert config.affine_time == 0
        assert config.seq2seq == 0

        if common.MY_DEBUG_STDOUT:
            common.DebugPrint("SpatialAlignmentEvangelidis(): id(g)=%s" %
                              str(id(g)))
            common.DebugPrint("SpatialAlignmentEvangelidis(): g.crossref=%s, " \
                                    "g.captureQ=%s, g.captureR=%s." % \
                                    (g.crossref, g.captureQ, g.captureR))
        """
        Start worker processes to use on multi-core processor (able to run
           in parallel - no GIL issue if each core has it's own VM)
        """
        pool = multiprocessing.Pool(processes=config.numProcesses)
        """
        common.DebugPrint("SpatialAlignment(): Spawning a new " \
                            "IterationStandalone(iWhile=%d) thread" % iWhile);
        """
        print("SpatialAlignment(): Spawned a pool of %d workers" % \
                                config.numProcesses)
        if False:
            # The multithreaded solution is not great, since it encounters the GIL limitation
            #NOT_TODO: this is not working here, outside the while loop
            t = Thread(target=IterationStandalone, args=(iWhile, ))
            t.start()
        common.DebugPrint("SpatialAlignment(): __name__ = %s" % str(__name__))

        if True:
            #listParams = [(crossref, captureQ, captureR) for x in range(1, 8 + 1)];
            #listParams = range(1, x0.size + 1);
            listParams = range(0, x0.size)
            #!!!!TODO: use counterStep

            #% listParams is the vector with query frame IDs. y0 with the corresponding ref frames.
            # See https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool
            res = pool.map(func=IterationStandalone, iterable=listParams, \
                            chunksize=1)
            print("Pool.map returns %s" % str(res))
            #x0.size + 1
            """
            From https://medium.com/building-things-on-the-internet/40e9b2b36148
              close the pool and wait for the work to finish
            """
            pool.close()
            pool.join()
            """
            We passed refined_crossref to the workers, so it should be
                updated here. !!!!TODO: check if so
            """
            """
            !!!!TODO: check to see if refined_crossref was updated properly by the workers

            !!!!TODO: seems you need to use muliprocessing.Value - see http://stackoverflow.com/questions/14124588/python-multiprocessing-shared-memory:
                <<Python's multithreading is not suitable for CPU-bound tasks
                   (because of the GIL), so the usual solution in that case is
                    to go on multiprocessing.
                  However, with this solution you need to explicitly share
                    the data, using multiprocessing.Value and
                     multiprocessing.Array.>>

            From http://stackoverflow.com/questions/10721915/shared-memory-objects-in-python-multiprocessing :
                <<If you use an operating system that uses copy-on-write fork() semantics (like any common unix), then as long as you never alter your data structure it will be available to all child processes without taking up additional memory. You will not have to do anything special (except make absolutely sure you don't alter the object).
                The most efficient thing you can do for your problem would be to pack your array into an efficient array structure (using numpy or array), place that in shared memory, wrap it with multiprocessing.Array, and pass that to your functions. This answer shows how to do that.
                If you want a writeable shared object, then you will need to wrap it with some kind of synchronization or locking. multiprocessing provides two methods of doing this: one using shared memory (suitable for simple values, arrays, or ctypes) or a Manager proxy, where one process holds the memory and a manager arbitrates access to it from other processes (even over a network).
                The Manager approach can be used with arbitrary Python objects, but will be slower than the equivalent using shared memory because the objects need to be serialized/deserialized and sent between processes.
                There are a wealth of parallel processing libraries and approaches available in Python. multiprocessing is an excellent and well rounded library, but if you have special needs perhaps one of the other approaches may be better.>>

            From http://en.wikipedia.org/wiki/Fork_%28system_call%29 :
                <<In Unix systems equipped with virtual memory support
                    (practically all modern variants), the fork operation
                    creates a separate address space for the child.
                  The child process has an exact copy of all the memory
                    segments of the parent process, though if copy-on-write
                    semantics are implemented, the physical memory need
                    not be actually copied.
                  Instead, virtual memory pages in both processes may refer to
                    the same pages of physical memory until one of them writes
                    to such a page: then it is copied.>>

             From http://en.wikipedia.org/wiki/Copy-on-write
                <<Copy-on-write finds its main use in virtual memory operating systems; when a process creates a copy of itself, the pages in memory that might be modified by either the process or its copy are marked copy-on-write. When one process modifies the memory, the operating system's kernel intercepts the operation and copies the memory thus a change in the memory of one process is not visible in another's.>>
            """

            return refined_crossref

    #i=1;
    # IMPORTANT NOTE: We substitute i - 1 --> iWhile (since array numbering
    #     starts with 0, not like in Matlab from 1)
    iWhile = 0

    #% x0 is the vector with query frame IDs. y0 with the corresponding ref frames.
    #while(i<(numel(x0)+1))
    while iWhile < x0.size:
        try:
            common.DebugPrint(
                "SpatialAlignmentEvangelidis(): Iteration(iWhile=%d)\n" %
                iWhile)
            """
            if i < 5:
                tic
            """

            #if ~affine_time
            if config.affine_time == 0:
                if config.seq2seq == 0:
                    common.DebugPrint(
                        "SpatialAlignmentEvangelidis(): NOT affine temporal model, NOT seq2seq"
                    )

                    #% We do spatial alignment for query frame x0(i) and ref frame start(i) = y0(i)
                    #% frame-to-subframe scheme (one query frame against a reference subsequence
                    if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR:
                        fitecc = ecc_homo_spacetime.ecc_homo_spacetime( \
                                                    img_index=start[iWhile],
                                                    tmplt_index=x0[iWhile],
                                                    p_init=warp_p,
                                                    t0=t,
                                                    n_iters=config.iterECC,
                                                    levels=config.levelsECC,
                                                    r_capture=r_path,
                                                    q_capture=q_path,
                                                    nof=config.nof,
                                                    time_flag=config.time_flag,
                                                    weighted_flag=config.weighted_flag,
                                                    pixel_select=config.pixel_select,
                                                    mode="ecc",
                                                    imformat=config.imformat,
                                                    save_image=config.verboseECC)
                    else:
                        fit[iWhile].ecc = ecc_homo_spacetime.ecc_homo_spacetime( \
                                                    img_index=start[iWhile],
                                                    tmplt_index=x0[iWhile],
                                                    p_init=warp_p,
                                                    t0=t,
                                                    n_iters=config.iterECC,
                                                    levels=config.levelsECC,
                                                    r_capture=r_path,
                                                    q_capture=q_path,
                                                    nof=config.nof,
                                                    time_flag=config.time_flag,
                                                    weighted_flag=config.weighted_flag,
                                                    pixel_select=config.pixel_select,
                                                    mode="ecc",
                                                    imformat=config.imformat,
                                                    save_image=config.verboseECC)
                else:
                    common.DebugPrint(
                        "SpatialAlignmentEvangelidis(): NOT affine temporal model, seq2seq == 1"
                    )

                    #% sequence-to-sequence alignment (one temporal parameter)
                    fit[iWhile].ecc = ecc_homo_spacetime_seq(
                        start[iWhile], x0[iWhile], warp_p, t, iterECC, levels,
                        r_path, q_path, nof, time_flag, weighted_flag,
                        pixel_select, config.imformat, verbose)
            else:  #% affine temporal model (two parameters) for the case frame-to-subframe
                common.DebugPrint(
                    "SpatialAlignmentEvangelidis(): Affine temporal model\n")

                fit[iWhile].ecc = ecc_homo_spacetime.ecc_homo_affine_spacetime( \
                                                    img_index=start[iWhile],
                                                    tmplt_index=x0[iWhile],
                                                    p_init=warp_p,
                                                    t0=t,
                                                    n_iters=config.iterECC,
                                                    levels=config.levelsECC,
                                                    r_capture=r_path,
                                                    q_capture=q_path,
                                                    nof=config.nof,
                                                    time_flag=config.time_flag,
                                                    weighted_flag=config.weighted_flag,
                                                    pixel_select=config.pixel_select,
                                                    mode="ecc",
                                                    imformat=config.imformat,
                                                    save_image=config.verboseECC)
            """
            if i < 5:
                toc;
            """

            #% synchronization correction
            #%synchro with subframe correction
            #refined_crossref(i,2)=refined_crossref(i,2)+fit(i).ecc(1,iter).t;
            if config.USE_ECC_FROM_OPENCV:
                pass
            else:
                iterECC0 = config.iterECC - 1
                if fitecc[0][iterECC0].t == None:
                    while iterECC0 > 0:
                        iterECC0 -= 1
                        if fitecc[0][iterECC0].t != None:
                            break
                if fitecc[0][iterECC0].t == None:
                    continue

                #!!!!TODO: understand ecc_homo_spacetime - should we use fitecc[config.levelsECC - 1][iterECC0].t instead of fitecc[0][iterECC0].t? (when levelsECC != 1)?

                if DONT_USE_MATLAB_FIT_ECC_RECORD_SYNTACTIC_SUGAR:
                    refined_crossref[iWhile, 1] = refined_crossref[iWhile, 1] + \
                                                fitecc[0][iterECC0].t
                else:
                    refined_crossref[iWhile, 1] = refined_crossref[iWhile, 1] + \
                                                fit[iWhile].ecc[0, iterECC0].t
                common.DebugPrint(
                    "SpatialAlignment(): Finished iteration iWhile=%d" % \
                                                                    iWhile)

            common.DebugPrint("SpatialAlignmentEvangelidis(): warp_p " \
                                  "(at end of iteration) = %s" % \
                                                            str(warp_p))
        # Inspired from http://effbot.org/zone/stupid-exceptions-keyboardinterrupt.htm
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            common.DebugPrintErrorTrace()
            quit()

        iWhile += 1
        """
        We stop the spatial alignment after generating the
            first "pair" of visuals.
        """
        #break;

        #print("SpatialAlignment(): while loop: iWhile = %d" % iWhile);

    #% fit struct containes the results of the spatial alignment.

    #toc
    return refined_crossref
def ComputeCost(crossref, V, fileName="crossref.txt"):
    # V[r][q] = votes of ref frame r for query frame q

    print("ComputeCost(): V.shape = %s" % str(V.shape))
    print("ComputeCost(): crossref.shape = %s" % str(crossref.shape))

    if False:
        for q in range(67, 71):
            for r in range(V.shape[0]):
                print("  V[%d, %d] = %.7f" %
                      (r + config.initFrame[1], q, V[r, q]))
            print
    """
    print("ComputeCost(): crossref and V =");
    cost = 0.0;
    myText2 = "";
    for i in range(crossref.shape[0]):
        assert crossref[i][0] == i;
        cost += V[crossref[i][1]][i];
        print("[%d %d] %.7f" % (i, crossref[i][1], V[crossref[i][1]][i]));

    print("ComputeCost(): cost computed is %.7f" % cost);
    """

    #!!!!TODO TODO: print also a synchronization error (look at TPAMI 2013 Evangelidis)

    #crossref2 = crossref.copy();
    #crossref2[:, 1] += config.initFrame[1];

    numBack = 0
    totalStep = 0
    penaltyCost = 0
    myMin = crossref[0][1]
    #1000000;
    myMax = crossref[0][1]
    #-1;
    for i in range(1, crossref.shape[0]):
        if myMin > crossref[i][1]:
            myMin = crossref[i][1]
        if myMax < crossref[i][1]:
            myMax = crossref[i][1]

        totalStep += abs(crossref[i][1] - crossref[i - 1][1])
        penaltyCost += abs(crossref[i][1] - crossref[i - 1][1])
        #!!!!TODO: check also if we stay too long in the same ref frame and penalize if more than 10-20 same value in a row

        if crossref[i][1] < crossref[i - 1][1]:
            numBack += 1
    absAvgStep = totalStep / (crossref.shape[0] - 1)
    avgStep = (crossref[crossref.shape[0] - 1][1] -
               crossref[0][1]) / (crossref.shape[0] - 1)

    cost = 0.0
    myText2 = "ComputeCost(): crossref and V =\n"
    for q in range(crossref.shape[0]):
        assert crossref[q][0] == q
        try:
            cost += V[crossref[q][1]][q]
            myText2 += "[%d %d] %.7f; " % \
                        (q, crossref[q][1] + config.initFrame[1], \
                        V[crossref[q][1]][q])
            #for r in range(int(crossref[q][1]) + config.initFrame[1] - 5, \
            #               int(crossref[q][1]) + config.initFrame[1] + 5):
            for r in range(int(crossref[q][1]) - 5, int(crossref[q][1]) + 5):
                if r < 0:
                    continue
                if r >= V.shape[0]:
                    break
                myText2 += "%.7f " % V[r, q]
        except:
            common.DebugPrintErrorTrace()
        """
        We print the first to nth order statistics - e.g., the first 5 biggest
          vote values.
        I got inspired from
          https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
         (see also
          https://stackoverflow.com/questions/10337533/a-fast-way-to-find-the-largest-n-elements-in-an-numpy-array)
        """
        myArr = V[:, q].copy()
        myArrIndices = myArr.argsort()[-5:][::-1]
        myText2 += "; max ind = %s" % str(myArrIndices + config.initFrame[1])
        myText2 += "; max vals = %s" % str(myArr[myArrIndices])
        myText2 += "\n"

    myText2 += "\n\ncost computed is %.7f\n" % cost
    myText2 += "penalty is %.7f\n" % penaltyCost
    myText2 += "reference frames are in the interval [%d, %d]\n" % \
                (myMin + config.initFrame[1], myMax + config.initFrame[1])
    myText2 += "absolute avg step computed is %.7f\n" % absAvgStep
    myText2 += "  avg step computed is %.7f\n" % avgStep
    myText2 += "Number of times going back (numBack) is %d" % numBack

    #!!!!TODO TODO: print also a synchronization error (look at TPAMI 2013 Evangelidis)

    #myText = "crossref = %s" % crossref2;
    #fOutput = open("crossref.txt", "wt");
    fOutput = open(fileName, "wt")
    fOutput.write(myText2)
    fOutput.close()