def match_edge_end_frames_with_frame(
         possible_edge,
         i: int,
         query_video_ith_frame: vo2.ImgObj,
         no_of_edge_end_frames_to_consider: int = 2):
     edge: Edge = possible_edge["edge"]
     j_max = possible_edge["edge"].distinct_frames.no_of_frames()
     j = j_max - no_of_edge_end_frames_to_consider
     match, maxmatch = None, 0
     while j < 0:
         j += 1
     while j < j_max:
         img_obj_from_edge: vo2.ImgObj = edge.distinct_frames.get_object(j)
         image_fraction_matched, min_good_matches = mt.SURF_returns(
             img_obj_from_edge.get_elements(),
             query_video_ith_frame.get_elements(), 2500, 0.7)
         if image_fraction_matched != -1:
             if image_fraction_matched > 0.09:
                 if image_fraction_matched > maxmatch:
                     match, maxmatch = j, image_fraction_matched
         j = j + 1
     if match:
         print("edge end has matched")
         possible_edge["edge_ended_probability"] = possible_edge[
             "edge_ended_probability"] + 0.5
         return True
     else:
         return False
예제 #2
0
    def match_edges(self, query_index):
        """
        Finds matches of query frame with frames in possible edges and updates last 5 matches
        :param:
        query_index: current index (to be queried) of query frames
        :return:
        progress : bool -> if a match has been found or not
        """
        # Assume all possible edge objects are there in possible_edges
        progress = False
        match, maxmatch, maxedge = None, 0, None
        # These 3 variables correspond to the best match for the given query_index frame
        # match : edge_index (int), maxmatch: fraction_matched(float), maxedge: edge_name(str)
        for i, possible_edge in enumerate(self.possible_edges):
            for j in range(possible_edge.to_match_params[0],
                           possible_edge.to_match_params[1]):
                fraction_matched, features_matched = mt.SURF_returns(
                    possible_edge.get_frame_params(j),
                    self.get_query_params(query_index))
                if fraction_matched > 0.09 or features_matched > 200:
                    progress = True

                    if fraction_matched > maxmatch:
                        match, maxmatch, maxedge = j, fraction_matched, possible_edge.name

            # First check best match in the max confidence edges. If yes, then no need to check others
            if i == self.max_confidence_edges - 1 and match is not None:
                print("---Max match for " + str(query_index) + ": ", end="")
                print((match, maxedge))
                if match is None:
                    self.current_location_str = "---Max match for " + str(
                        query_index) + ": (None, None)"
                else:
                    self.current_location_str = "---Max match for " + str(
                        query_index) + ": (" + str(match) + " ," + str(
                            maxedge) + " )"
                self.graph_obj.display_path(0, self.current_location_str)
                # Update last_5_matches
                self.last_5_matches.append((match, maxedge))
                if len(self.last_5_matches) > 5:
                    self.last_5_matches.remove(self.last_5_matches[0])
                return progress

        print("---Max match for " + str(query_index) + ": ", end="")
        print((match, maxedge))
        if match is None:
            self.current_location_str = "---Max match for " + str(
                query_index) + ": (None, None)"
        else:
            self.current_location_str = "---Max match for " + str(
                query_index) + ": (" + str(match) + " ," + str(maxedge) + " )"
        self.graph_obj.display_path(0, self.current_location_str)
        # Update last_5_matches
        self.last_5_matches.append((match, maxedge))
        if len(self.last_5_matches) > 5:
            self.last_5_matches.remove(self.last_5_matches[0])
        return progress
 def match_node_with_frames(some_query_img_objects: list, graph_obj: Graph):
     search_list = graph_obj.Nodes
     node_confidence = []
     # node_confidence is list of (node.identity:int , confidence:int , total_fraction_matched:float)
     for node in search_list:
         for img_obj in some_query_img_objects:
             node_images: vo2.DistinctFrames = node.node_images
             if node_images is not None:
                 for data_obj in node_images.get_objects():
                     image_fraction_matched, min_good_matches = mt.SURF_returns(
                         img_obj.get_elements(), data_obj.get_elements(),
                         2500, 0.7)
                     if min_good_matches > 100 and image_fraction_matched != -1:
                         if image_fraction_matched > 0.05 or min_good_matches > 225:
                             print("Match found btw" +
                                   str(img_obj.get_time()) +
                                   " of query video and " +
                                   str(data_obj.get_time()) +
                                   " of node data")
                             if len(node_confidence
                                    ) > 0 and node_confidence[-1][
                                        0] == node.identity:
                                 entry = node_confidence[-1]
                                 node_confidence[-1] = (
                                     node.identity, entry[1] + 1,
                                     entry[2] + image_fraction_matched)
                                 # print(str(node.identity) + " matched by " + str(image_fraction_matched))
                             else:
                                 node_confidence.append(
                                     (node.identity, 1,
                                      image_fraction_matched))
     node_confidence = sorted(node_confidence,
                              key=lambda x: (x[1], x[2]),
                              reverse=True)
     print(node_confidence)
     final_node_list = []
     for entry in node_confidence:
         final_node_list.append(graph_obj.get_node(entry[0]))
     return final_node_list
def save_distinct_ImgObj(video_str, folder, frames_skipped: int = 0, check_blurry: bool = True,
                         hessian_threshold: int = 2500, ensure_min=True):
    """Saves non redundent and distinct frames of a video in folder
    Parameters
    ----------
    video_str : is video_str = "webcam" then loads webcam. O.W. loads video at video_str location,
    folder : folder where non redundant images are to be saved,
    frames_skipped: Number of frames to skip and just not consider,
    check_blurry: If True then only considers non blurry frames but is slow
    hessian_threshold
    ensure_min: whether a minimum no of frames (at least one per 50) is to be kept irrespective of
        whether they are distinct or not

    Returns
    -------
    array,
        returns array contaning non redundant frames(mat format)
    """

    ensure_path(folder + "/jpg")

    frames_skipped += 1

    if video_str == "webcam":
        video_str = 0
    cap = cv2.VideoCapture(video_str)
    # cap= cv2.VideoCapture(0)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 200)
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 200)

    distinct_frames = DistinctFrames()
    i = 0
    a = None
    b = None
    check_next_frame = False
    i_prev = 0  # the last i which was stored

    detector = cv2.xfeatures2d_SURF.create(hessian_threshold)

    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('frame', gray)
    keypoints, descriptors = detector.detectAndCompute(gray, None)

    a = (len(keypoints), descriptors, serialize_keypoints(keypoints), gray.shape)
    img_obj = ImgObj(a[0], a[1], i, a[2], a[3])
    save_to_memory(img_obj, 'image' + str(i) + '.pkl', folder)
    cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)
    distinct_frames.add_img_obj(img_obj)
    i_of_a=0
    while True:
        ret, frame = cap.read()
        if ret:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if i % frames_skipped != 0 and not check_next_frame:
                i = i + 1
                continue

            cv2.imshow('frame', gray)
            # print(i)

            if check_blurry:
                if is_blurry_grayscale(gray):
                    check_next_frame = True
                    print("frame " + str(i) + " skipped as blurry")
                    i = i + 1
                    continue
                check_next_frame = False

            keypoints, descriptors = detector.detectAndCompute(gray, None)
            b = (len(keypoints), descriptors, serialize_keypoints(keypoints), gray.shape)
            if len(keypoints)<100:
                print("frame "+str(i)+ " skipped as "+str(len(keypoints))+" <100")
                i = i+1
                continue
            import matcher as mt
            image_fraction_matched, min_good_matches = mt.SURF_returns(a, b, 2500, 0.7, True)
            if image_fraction_matched == -1:
                check_next_frame = True
                i=i+1
                continue
            check_next_frame = False
            if 0< image_fraction_matched < 0.1 or min_good_matches<50 or (ensure_min and i - i_prev > 50):
                img_obj2 = ImgObj(b[0], b[1], i, b[2], b[3])
                print(str(image_fraction_matched)+ " fraction match between "+str(i_of_a)+" and "+ str(i))
                save_to_memory(img_obj2, 'image' + str(i) + '.pkl', folder)
                cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)
                distinct_frames.add_img_obj(img_obj2)
                a = b
                i_of_a=i
                i_prev = i

            i = i + 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    print("Created distinct frames object")
    cap.release()
    cv2.destroyAllWindows()
    distinct_frames.calculate_time()
    return distinct_frames
def save_distinct_realtime_modified_ImgObj(video_str: str,
                                           folder: str,
                                           frames_skipped: int = 0,
                                           check_blurry: bool = True,
                                           hessian_threshold: int = 2500,
                                           ensure_min=True,
                                           livestream=False):
    ensure_path(folder + "/jpg")

    frames_skipped += 1

    if video_str == "webcam":
        video_str = 0
    cap = cv2.VideoCapture(video_str)
    # cap= cv2.VideoCapture(0)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 200)
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 200)

    i = 0
    a = None
    b = None
    check_next_frame = False
    i_prev = 0  # the last i which was stored

    detector = cv2.xfeatures2d_SURF.create(hessian_threshold)

    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow('frame', gray)
    keypoints, descriptors = detector.detectAndCompute(gray, None)

    a = (len(keypoints), descriptors, vo2.serialize_keypoints(keypoints),
         gray.shape)
    img_obj = ImgObj(a[0], a[1], i, a[2], a[3])
    save_to_memory(img_obj, 'image' + str(i) + '.pkl', folder)
    cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)
    query_video_distinct_frames.add_img_obj(img_obj)
    node_and_edge_real_time_matching.find_edge_with_nodes()
    while True:
        if livestream:
            cap = cv2.VideoCapture(video_str)
        ret, frame = cap.read()
        if ret:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            if i % frames_skipped != 0 and not check_next_frame:
                i = i + 1
                continue

            cv2.imshow('frame', gray)
            # print(i)
            if check_blurry:
                if is_blurry_grayscale(gray):
                    check_next_frame = True
                    # print("frame " + str(i) + " skipped as blurry")
                    i = i + 1
                    continue
                check_next_frame = False
            keypoints, descriptors = detector.detectAndCompute(gray, None)
            if len(keypoints) < 50:
                print("frame " + str(i) + " skipped as " +
                      str(len(keypoints)) + " <50")
                i = i + 1
                continue
            b = (len(keypoints), descriptors,
                 vo2.serialize_keypoints(keypoints), gray.shape)
            image_fraction_matched, min_good_matches = mt.SURF_returns(
                a, b, 2500, 0.7, True)
            if image_fraction_matched == -1:
                check_next_frame = True
                i = i + 1
                continue
            check_next_frame = False
            if 0 < image_fraction_matched < 0.10 or min_good_matches < 50 or (
                    ensure_min and i - i_prev > 50):
                img_obj2 = ImgObj(b[0], b[1], i, b[2], b[3])
                save_to_memory(img_obj2, 'image' + str(i) + '.pkl', folder)
                cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)
                query_video_distinct_frames.add_img_obj(img_obj2)
                node_and_edge_real_time_matching.find_edge_with_nodes()
                a = b
                i_prev = i

            i = i + 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    print("released")
    cap.release()
    cv2.destroyAllWindows()
    global query_video_ended
    query_video_ended = True
    query_video_distinct_frames.calculate_time()
    return query_video_distinct_frames
    def match_edge_with_frame(possible_edge, i: int,
                              query_video_ith_frame: vo2.ImgObj):
        # Match a possible edge object with query_video_ith_frame
        # possible edge here is passed as reference.

        j = possible_edge["last_matched_j"]
        # if last_matched_j is 3rd frame, now j will start matching from 4th frame,
        # gave better and more real time results

        # max value upto which j should be iterated
        jmax = possible_edge["last_matched_j"] + possible_edge[
            "no_of_frames_to_match"]

        # let pe = possible_edge["edge"]
        # query_video_ith_frame will be matched with pe[j...max(jmax, maximum elements in pe)]
        # frame in pe[j...max(jmax, maximum elements in pe)] with maxmatch will be stored in case of multiple matches
        match, maxmatch = None, 0
        while j < jmax and j < possible_edge[
                "edge"].distinct_frames.no_of_frames():
            # print(j)
            edge = possible_edge["edge"]
            img_obj_from_edge: vo2.ImgObj = edge.distinct_frames.get_object(j)
            image_fraction_matched, min_good_matches = mt.SURF_returns(
                img_obj_from_edge.get_elements(),
                query_video_ith_frame.get_elements(), 2500, 0.7)
            # print("query i: ", i, ", jth frame of " + str(possible_edge["edge"].src) + "to" +
            # str(possible_edge["edge"].dest) + " :", j, image_fraction_matched)
            if image_fraction_matched != -1:
                if image_fraction_matched > 0.09:
                    # print("query i: ", i, ", jth frame of " + str(possible_edge["edge"].src) + "to" +
                    #   str(possible_edge["edge"].dest) + " :", j, image_fraction_matched)
                    if image_fraction_matched > maxmatch:
                        match, maxmatch = j, image_fraction_matched
            j = j + 1
        if match is None:
            # no match is found in the j to jmax interval
            possible_edge["last_matched_i_with_j"] = i
            possible_edge["confidence"] = possible_edge[
                "confidence"] - 0.5  # decreasing confidence
            possible_edge["no_of_continuous_no_match"] = possible_edge[
                "no_of_continuous_no_match"] + 1
            # if possible_edge["no_of_frames_to_match"] < 5:
            possible_edge["no_of_frames_to_match"] = possible_edge[
                "no_of_frames_to_match"] + 1
            # else:
            #     possible_edge["last_matched_j"] = possible_edge["last_matched_j"] + 1
            #     possible_edge["no_of_frames_to_match"] = 3
            if possible_edge["no_of_continuous_no_match"] >= 3:
                # handling the case if the query frame is just not matching
                # possible_edge["last_matched_i_with_j"] = possible_edge["last_matched_i_with_j"] + 1
                # restoring some confidence
                possible_edge["confidence"] = possible_edge["confidence"] + 1
                # also little restoration in no_of_frames_to_match
                # possible_edge["no_of_frames_to_match"] = possible_edge["no_of_frames_to_match"] - 1
                possible_edge["no_of_continuous_no_match"] = 1
        else:
            # match is found in the j to jmax interval
            img_obj_from_edge: vo2.ImgObj = edge.distinct_frames.get_object(
                match)
            print("popo query i: ", i, ", jth frame", match,
                  img_obj_from_edge.time_stamp, maxmatch)
            possible_edge["last_matched_j"] = match
            possible_edge["last_matched_i_with_j"] = i
            possible_edge["confidence"] = possible_edge["confidence"] + 1
            if possible_edge["no_of_frames_to_match"] > 3:
                possible_edge["no_of_frames_to_match"] = possible_edge[
                    "no_of_frames_to_match"] - 1
            possible_edge["no_of_continuous_no_match"] = 0

        if j == possible_edge["edge"].distinct_frames.no_of_frames():
            # in this case the edge is being ended
            # ---- improvement required in this, for possible_edge with low no of distinct frames
            # j will end up reaching the end and without any match there will be a inc in edge_ended_probability ----
            possible_edge["edge_ended_probability"] = possible_edge[
                "edge_ended_probability"] + 0.4
예제 #7
0
    def handle_edges(self):
        """
        Updates possible_edges, next_possible_edges and
        decides most_occuring_edge and cur_edge_index ( which give the current location )
        based on last_5_matches
        :return: None
        """
        # if self.confirmed_path is empty then starting pt is not defined yet.
        if len(self.confirmed_path) == 0:

            # Append all edges in self.possible_edges with the to_match_params being only the first frame of each edge
            for nd in self.graph_obj.Nodes[0]:
                for edge in nd.links:
                    possible_edge_node = PossibleEdge(edge)
                    possible_edge_node.to_match_params = (
                        0, 1
                    )  # <- Change this to include more frames of each edge
                    # in determination of initial node
                    self.possible_edges.append(possible_edge_node)

            # Pick up the last query index

            query_index = self.query_objects.no_of_frames() - 1
            progress = self.match_edges(query_index)

            # We need at least 2 matches to consider first node
            if not progress or len(
                    self.last_5_matches
            ) < 2:  # <- Change this to set no of matches reqd for
                # determination of first node
                return

            # To find the most occuring edge in last_5_matches
            last_5_edges_matched = []
            for i in range(len(self.last_5_matches)):
                if self.last_5_matches[i][1] is not None:
                    last_5_edges_matched.append(self.last_5_matches[i][1])
            maxCount, most_occuring_edge, most_occuring_second = 0, None, None
            for edge in last_5_edges_matched:
                coun = last_5_edges_matched.count(edge)
                if coun > maxCount:
                    most_occuring_edge = edge
                    most_occuring_second = None
                elif coun == maxCount and edge != most_occuring_edge:
                    most_occuring_second = edge

            # If most_occuring_second is not None it implies 2 edges are having max count
            if most_occuring_edge is None or most_occuring_second is not None:
                return

            # At this point we have the most occuring edge
            for possible_edge in self.possible_edges:
                if possible_edge.name == most_occuring_edge:

                    # Setting self.probable_path, self.confirmed_path
                    self.probable_path = possible_edge
                    self.probable_path.to_match_params = (
                        0, possible_edge.no_of_frames)
                    self.max_confidence_edges = 1
                    src, dest = most_occuring_edge.split("_")
                    self.confirmed_path = [int(src)]

            # Setting self.next_possible_edges in this order:
            # 1. current edge
            # 2. nearby edges
            self.next_possible_edges = [self.probable_path]
            nd = self.graph_obj.get_node(self.probable_path.edge.dest)
            for edge in nd.links:
                present = False
                for possible_edg in self.next_possible_edges:
                    if possible_edg.name == edge.name:
                        present = True
                        break
                if present: continue
                possibleEdge = PossibleEdge(edge)
                self.next_possible_edges.append(possibleEdge)
            nd = self.graph_obj.get_node(self.probable_path.edge.src)
            for edge in nd.links:
                if edge.dest == self.probable_path.edge.dest:
                    continue
                possibleEdge = PossibleEdge(edge)
                self.next_possible_edges.append(possibleEdge)

        # If something is already there is self.next_possible_edges, use that
        elif len(self.next_possible_edges) != 0:
            self.possible_edges = self.next_possible_edges

        # Else use the node identity stored in self.confirmed_path
        # This should be deprecated i guess
        elif len(self.possible_edges) == 0:
            if type(self.confirmed_path[-1]) == int:
                identity = self.confirmed_path[-1]
                nd = self.graph_obj.get_node(identity)
                if nd is not None:
                    for edge in nd.links:
                        possible_edge = PossibleEdge(edge)
                        self.possible_edges.append(possible_edge)

        query_index = self.query_objects.no_of_frames() - 1
        progress = self.match_edges(query_index)

        if not progress:
            # print("err 0")
            return

        if len(self.last_5_matches) < 5:
            self.next_possible_edges = self.possible_edges
            # print("err 1")
            return

        # To find the most occuring edge in last_5_matches
        last_5_edges_matched = []
        for i in range(len(self.last_5_matches)):
            if self.last_5_matches[i][1] is not None:
                last_5_edges_matched.append(self.last_5_matches[i][1])
        maxCount, most_occuring_edge, most_occuring_second = 0, None, None
        for edge in last_5_edges_matched:
            coun = last_5_edges_matched.count(edge)
            if coun > maxCount:
                most_occuring_edge = edge
                most_occuring_second = None
                maxCount = coun
            elif coun == maxCount and edge != most_occuring_edge:
                most_occuring_second = edge

        # If most_occuring_second is not None it implies 2 edges are having max count
        if most_occuring_edge is None or most_occuring_second is not None:
            # print("err 2")
            return

        if (None, None) in self.last_5_matches and maxCount < 3:
            # print("err 3")
            return

        # At this point we have the most occuring edge
        for possible_edge in self.possible_edges:
            if possible_edge.name == most_occuring_edge:
                # Setting self.probable_path
                self.probable_path = possible_edge
                self.max_confidence_edges = 1

        # Finding the most occuring edge index (in the last 5 matches) on the current edge
        edge_indexes = []
        for matches in self.last_5_matches:
            if matches[1] == most_occuring_edge:
                edge_indexes.append(matches[0])
        cur_edge_index = -1
        maxCount = 0
        for index in edge_indexes:
            coun = edge_indexes.count(index)
            if coun > maxCount or (coun == maxCount
                                   and index > cur_edge_index):
                cur_edge_index = index
                maxCount = coun

        # cur_edge_index holds the most occuring edge index (in the last 5 matches) on the current edge

        # Setting self.next_possible_edges in this order:
        # 1. current edge
        # 2. Edge with src as dest of current edge , and with its angle being <20 deg deviated from current edge
        #        ( will be added only if cur_edge_index is the last index of current edge)
        # 3. Other nearby edges
        self.next_possible_edges = [self.probable_path]
        nd = self.graph_obj.get_node(self.probable_path.edge.dest)
        if cur_edge_index > self.probable_path.no_of_frames - 2:
            count_of_straight_edges, straightPossibleEdge = 0, None
            for tup in self.probable_path.edge.angles:
                if abs(tup[1]) < 20:
                    count_of_straight_edges += 1
                    src, dest = tup[0].split('_')
                    edg = self.graph_obj.get_edge(int(src), int(dest))
                    possible_edge = PossibleEdge(edg)
                    straightPossibleEdge = possible_edge
                    self.next_possible_edges.append(possible_edge)
                    self.max_confidence_edges += 1
            if count_of_straight_edges == 1:  # Setting next_pos
                # If cur_edge_index is last index of current edge, and
                # If only one edge is straight ahead (angle < 20 deg) and its first frame matches, then the next edge
                # is set as self.probable_path (i.e., it is set as the current edge)
                fraction_matched, features_matched = mt.SURF_returns(
                    straightPossibleEdge.get_frame_params(0),
                    self.get_query_params(query_index))
                if fraction_matched >= 0.1:  # maybe changed to
                    # 0.7 * self.probable_path.matches_found[-1].fraction_matched:
                    # or something
                    self.probable_path = straightPossibleEdge
                    cur_edge_index = 0
                    self.next_possible_edges = [self.probable_path]
                    nd = self.graph_obj.get_node(self.probable_path.edge.dest)
        for edge in nd.links:
            present = False
            for possible_edg in self.next_possible_edges:
                if possible_edg.name == edge.name:
                    present = True
                    break
            if present: continue
            possibleEdge = PossibleEdge(edge)
            self.next_possible_edges.append(possibleEdge)
        nd = self.graph_obj.get_node(self.probable_path.edge.src)
        for edge in nd.links:
            if edge.dest == self.probable_path.edge.dest:
                continue
            possibleEdge = PossibleEdge(edge)
            self.next_possible_edges.append(possibleEdge)

        # Displaying current location on graph
        # print(str(most_occuring_edge)+", "+str(cur_edge_index))
        edgeObj, allow = None, True
        for nd in self.graph_obj.Nodes[0]:
            if not allow: break
            for edge in nd.links:
                if edge.name == most_occuring_edge:
                    edgeObj = edge
                    allow = False
                    break
        last_jth_matched_img_obj = edgeObj.distinct_frames.get_object(
            cur_edge_index)
        time_stamp = last_jth_matched_img_obj.get_time()
        total_time = edgeObj.distinct_frames.get_time()
        fraction = time_stamp / total_time if total_time != 0 else 0
        self.graph_obj.on_edge(edgeObj.src, edgeObj.dest, fraction)
        # print("graph called")
        self.graph_obj.display_path(0, self.current_location_str)
        return