コード例 #1
0
 def __init__(self, do_tf_logging=False):  
     print('Using L2NetKerasFeature2D')   
     
     #  One of "L2Net-HP", "L2Net-HP+", "L2Net-LIB", "L2Net-LIB+", "L2Net-ND", "L2Net-ND+", "L2Net-YOS", "L2Net-YOS+",
     self.net_name = 'L2Net-HP+'
             
     # mag_factor is how many times the original keypoint scale
     # is enlarged to generate a patch from a keypoint
     self.mag_factor = 3
     
     # inference batch size        
     self.batch_size = 512 
     self.process_all = True # process all the patches at once   
     
     print('==> Loading pre-trained network.')
     self.l2net = L2Net(self.net_name, do_tf_logging=do_tf_logging)        
     print('==> Successfully loaded pre-trained network.')         
コード例 #2
0
def build_graph():

    print("Starting...")

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    # initialize L2Net
    l2_net = L2Net("L2Net-HP+", True)

    # memory graph
    memory_graph = MemoryGraph()
    memory_graph_walker = MemoryGraphWalker(memory_graph)

    total_frame_count = 0

    # for each run though the video
    for r in range(runs):

        print("Run", r)

        # open video file for a run though
        cap = cv2.VideoCapture(video_file)

        # select a random starting position
        pos = [None for _ in range(walker_count)]

        done = False

        # for each frame
        for t in range(max_frames):
            if done:
                break

            ret, frame = cap.read()

            if ret == False:
                done = True
                break

            frame = resize_frame(frame)

            for i in range(walker_count):
                if pos[i] is None:
                    pos[i] = (frame.shape[0] * random.random(),
                              frame.shape[1] * random.random())

            key_points = [(kp.pt[1], kp.pt[0])
                          for kp in sift.detect(frame, None)]

            for i in range(walker_count):
                pos[i] = next_pos(key_points, pos[i], frame.shape)

            windows = extract_windows(frame, pos)

            # extract cnn features from windows
            feats = l2_net.calc_descriptors(windows)

            ids = memory_graph_walker.add_parrelell_observations(t, pos, feats)

            if save_windows:
                for i in range(walker_count):
                    cv2.imwrite('./patches/patch' + str(ids[i]) + '.png',
                                windows[i])

            total_frame_count += 1

        cap.release()
        cv2.destroyAllWindows()

    memory_graph.save_graph(graph_file)
    memory_graph.save_index(index_file)

    print("Done")
コード例 #3
0
def play_video():
    def on_click(event, x, y, flags, param):
        if event != cv2.EVENT_LBUTTONUP:
            return

        kp = clostest_key_points(key_points, (x, y), 1)[0]
        windows = np.expand_dims(extract_window(resize_frame(frame),
                                                (kp[1], kp[0])),
                                 axis=0)
        feats = l2_net.calc_descriptors(windows)

        print("windows.shape, feats.shape", windows.shape, feats.shape)
        show_patches(windows[0], feats, kp, frame.shape, memory_graph)

    memory_graph = MemoryGraph(graph_path=graph_file, index_path=index_file)

    l2_net = L2Net("L2Net-HP+", True)

    cap = cv2.VideoCapture(video_file)

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video  file")

    sift = cv2.xfeatures2d.SIFT_create()

    cv2.namedWindow("preview")
    cv2.setMouseCallback("preview", on_click)

    # Read until video is completed
    while (cap.isOpened()):

        # Capture frame-by-frame
        ret, frame = cap.read()

        if ret == True:

            key_points = [(kp.pt[0], kp.pt[1])
                          for kp in sift.detect(frame, None)]

            for kp in key_points:
                cv2.circle(frame, (int(round(kp[0])), int(round(kp[1]))), 1,
                           colors[2], cv2.FILLED)

            # Display the resulting frame
            cv2.imshow('preview', frame)

            # Press Q on keyboard to  exit
            key = cv2.waitKey(0)

            if key == 27:  # exit on ESC
                break

        # Break the loop
        else:
            break

    # When everything done, release
    # the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()
コード例 #4
0
ファイル: test_l2net_keras.py プロジェクト: jsBrique/pyslam-1
import sys 
sys.path.append("../../")
import config
config.cfg.set_lib('l2net_keras') 

import cv2 
import numpy as np 

from L2_Net import L2Net 


#  One of "L2Net-HP", "L2Net-HP+", "L2Net-LIB", "L2Net-LIB+", "L2Net-ND", "L2Net-ND+", "L2Net-YOS", "L2Net-YOS+",
net_name = 'L2Net-HP'
l2net = L2Net(net_name,do_tf_logging=False)

if False: 
    patches = np.random.rand(100, 32, 32, 1)
else: 
    patches = np.random.rand(100, 32, 32)    
    patches = np.expand_dims(patches, -1)
descrs = l2net.calc_descriptors(patches)

print('done!')
コード例 #5
0
def play_annotated_video():

    # Video
    cv2.namedWindow("preview")
    vc = cv2.VideoCapture('./media/cows.mp4')

    # CNN
    l2_net = L2Net("L2Net-HP+", True)

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    memory_graph = MemoryGraph(index_path="./data/index.bin")

    nx = networkx_graph()

    # group_id = 0
    # group_id_dict = {}

    while vc.isOpened():

        rval, frame = vc.read()
        if rval == False:
            break

        frame = resize_frame(frame)

        key_points = [(kp.pt[1], kp.pt[0]) for kp in sift.detect(frame, None)]
        random.shuffle(key_points)

        windows = np.empty((len(key_points), window_size, window_size, 1))

        for i in range(len(key_points)):
            windows[i] = extract_window(frame, key_points[i])

        print("windows.shape", windows.shape)

        # extract cnn features from windows
        feats = l2_net.calc_descriptors(windows)
        print("feats.shape", windows.shape)

        ids, distances = memory_graph.knn_query(feats, k=1)

        observation_id = None

        print(distances.shape)

        for i in range(distances.shape[0]):
            # print(distances[i][0])
            if distances[i][0] < 0.95:
                random_keypoint = key_points[i]
                observation_id = ids[i][0]
                print("distances[i][0]", distances[i][0])
                break

        if observation_id is None:
            print("no close observation found")
            continue

        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)

        counts, node_ids = random_walk(nx, observation_id, 100, 1000)

        n = 0
        for i in range(len(counts)):
            count = counts[i]
            if count < 200:
                break
            n += 1

        nodes = memory_graph.get_nodes(list(node_ids[:n]))

        for i in range(n):
            node = nodes[i]
            x = node["x"]
            y = node["y"]
            t = node["t"]
            cv2.circle(frame, (int(round(x)), int(round(y))), 3, colors[0],
                       cv2.FILLED)

        cv2.circle(
            frame,
            (int(round(random_keypoint[1])), int(round(random_keypoint[0]))),
            7, colors[2], cv2.FILLED)

        cv2.imshow("preview", frame)

        key = cv2.waitKey(0)

        if key == 27:  # exit on ESC
            break

    vc.release()
    cv2.destroyWindow("preview")
コード例 #6
0
def build_graph():

    print("Starting...")

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    #initialize L2Net
    l2_net = L2Net("L2Net-HP+", True)

    memory_graph = MemoryGraph()

    total_frame_count = 0

    # for each run though the video
    for r in range(runs):

        print("Run", r)

        # open video file for a run though
        cap = cv2.VideoCapture('./media/cows.mp4')

        # select a random starting position
        pos = None

        done = False

        # for each frame
        for t in range(max_frames):
            if done:
                break

            ret, frame = cap.read()

            if ret == False:
                done = True
                break

            frame = resize_frame(frame)

            if pos is None:
                pos = (frame.shape[0] * random.random(),
                       frame.shape[1] * random.random())

            key_points = [(kp.pt[1], kp.pt[0])
                          for kp in sift.detect(frame, None)]
            key_points.extend(random_keypoints(frame))

            pos, adjacency_broken = next_pos(key_points, pos, frame.shape)

            window = extract_window(frame, pos)

            windows = window[np.newaxis, ...]

            # extract cnn features from windows
            feats = l2_net.calc_descriptors(windows)

            id = memory_graph.add_observation(t, pos, feats, adjacency_broken)
            cv2.imwrite('./output/testing' + str(id) + '.jpg', window)

            print("frame", t, id, pos, adjacency_broken)

            total_frame_count += 1

        cap.release()
        cv2.destroyAllWindows()

    memory_graph.save_index("./data/index.bin")
    memory_graph.close()

    print("Done")
コード例 #7
0
def play_annotated_video():

    # Video
    cv2.namedWindow("preview")
    vc = cv2.VideoCapture('./media/cows.mp4')

    # CNN
    l2_net = L2Net("L2Net-HP+", True)

    # initialize SIFT
    sift = cv2.xfeatures2d.SIFT_create()

    memory_graph = MemoryGraph(index_path="./data/index.bin")

    group_id = 0
    group_id_dict = {}

    while vc.isOpened():

        rval, frame = vc.read()
        if rval == False:
            break

        frame = resize_frame(frame)

        key_points = [(kp.pt[1], kp.pt[0]) for kp in sift.detect(frame, None)]
        key_points.extend(random_keypoints(frame))

        windows = np.empty((len(key_points), window_size, window_size, 1))

        for i in range(len(key_points)):
            windows[i] = extract_window(frame, key_points[i])

        # extract cnn features from windows
        feats = l2_net.calc_descriptors(windows)

        ids, _ = memory_graph.knn_query(feats, k=1)

        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)

        for i in range(len(key_points)):
            observation_id = ids[i][0]

            # get category of observation with observation_id
            g = memory_graph.get_observation_group(observation_id)
            if g not in group_id_dict:
                group_id_dict[g] = group_id
                group_id += 1
            g = group_id_dict[g]

            print("observation_group", g)

            c = colors[g % len(colors)]

            cv2.circle(
                frame,
                (int(round(key_points[i][1])), int(round(key_points[i][0]))),
                3, c, cv2.FILLED)

        cv2.imshow("preview", frame)

        key = cv2.waitKey(0)

        if key == 27:  # exit on ESC
            break

    vc.release()
    cv2.destroyWindow("preview")
コード例 #8
0
def build_graph():

    print("Starting...")

    #initialize CNN and KNN index
    l2_net = L2Net("L2Net-HP+", True)

    #initialize KNN index
    p = hnswlib.Index(space='l2', dim=256)
    p.init_index(max_elements=50000, ef_construction=100, M=16)
    p.set_ef(10)

    #initialize graph database
    uri = "neo4j://localhost:7687"
    driver = GraphDatabase.driver(uri, auth=("neo4j", "password"))

    total_frame_count = 0

    # for each run though the video
    for r in range(runs):

        print("Run", r)

        # open video file for a run though
        cap = cv2.VideoCapture('./media/cows.mp4')

        # select a random starting position
        pos = (random.randint(0,
                              steps[0] - 1), random.randint(0, steps[1] - 1))

        done = False

        last_label = None
        last_labels = None
        last_distances = None

        run_frame_count = 0

        # for each batch
        for t in range(max_batches):
            if done:
                break

            print("Batch", t)

            windows = np.empty((frame_batch_size, window_size, window_size, 1))
            positions = []
            ids = []
            batch_frame_count = 0

            # read frames from video and walk window
            for b in range(frame_batch_size):
                ret, frame = cap.read()

                if ret == False:
                    done = True
                    break

                print("pos", pos)

                print("frame.shape", frame.shape)
                frame = resize_frame(frame, window_size, stride, steps)
                print("frame.shape", frame.shape)

                windows[b] = frame[(stride * pos[0]):(stride * pos[0] +
                                                      window_size),
                                   (stride * pos[1]):(stride * pos[1] +
                                                      window_size)]

                cv2.imwrite(
                    './output/testing' + str(total_frame_count) + '.jpg',
                    windows[b])

                positions.append(pos)

                t = run_frame_count - batch_frame_count + b
                ids.append(window_id(t, pos[0], pos[1]))

                total_frame_count += 1
                batch_frame_count += 1
                run_frame_count += 1

                pos = move(pos)

            # if no frames were read break
            if batch_frame_count == 0:
                break

            # if batch is short resize windows array to match
            if batch_frame_count != frame_batch_size:
                windows = windows[0:batch_frame_count]

            # extract cnn features from windows
            feats = l2_net.calc_descriptors(windows)
            print("feats.shape", feats.shape)

            for b in range(batch_frame_count):

                id = ids[b]

                t = run_frame_count - batch_frame_count + b
                y = positions[b][0]
                x = positions[b][1]

                # print(t,y,x,id)

                with driver.session() as session:
                    session.write_transaction(insert_observation, id, t, y, x,
                                              feats_to_json(feats[b]))

            if p.get_current_count() >= knn:

                labels, distances = p.knn_query(feats, k=knn)

                for b in range(batch_frame_count):

                    current_label = ids[b]

                    if b == 0:
                        if last_labels is None or last_distances is None:
                            last_label = current_label
                            continue
                        l = last_labels[last_labels.shape[0] - 1]
                        d = last_distances[last_labels.shape[0] - 1]
                    else:
                        l = labels[b - 1]
                        d = distances[b - 1]

                    print("--", last_label, current_label)

                    with driver.session() as session:
                        session.write_transaction(insert_adjacency, last_label,
                                                  current_label, 0.0)

                    for n in range(knn):
                        label = l[n]
                        distance = d[n]

                        if distance <= distance_threshold:

                            print("distance", distance)

                            with driver.session() as session:
                                session.write_transaction(
                                    insert_adjacency, label, current_label,
                                    distance)

                    last_label = current_label

                last_labels = labels
                last_distances = distances

            p.add_items(feats, ids)

        cap.release()
        cv2.destroyAllWindows()

    p.save_index("./data/index.bin")

    driver.close()
    print("Done")
コード例 #9
0
def play_annotated_video():

    # Video
    cv2.namedWindow("preview")
    vc = cv2.VideoCapture('./media/cows.mp4')

    # CNN
    l2_net = L2Net("L2Net-HP+", True)

    # KNN
    p = hnswlib.Index(space='l2', dim=256)
    p.load_index("./data/index.bin")

    # DB
    uri = "neo4j://localhost:7687"
    driver = GraphDatabase.driver(uri, auth=("neo4j", "password"))

    group_id = 0
    group_id_dict = {}

    while vc.isOpened():

        rval, frame = vc.read()
        if rval == False:
            break

        frame = resize_frame(frame, window_size, stride, steps)

        # print(frame.shape)

        windows = np.empty((steps[0] * steps[1], window_size, window_size, 1))

        for x in range(steps[0]):
            for y in range(steps[1]):
                w = x * steps[0] + y
                #print((stride*y), (stride*y+window_size), (stride*x), (stride*x+window_size))
                windows[w] = frame[(stride * x):(stride * x + window_size),
                                   (stride * y):(stride * y + window_size)]
                #cv2.imwrite('./output/testing'+str(w)+'.jpg', windows[w])

        # extract cnn features from windows
        feats = l2_net.calc_descriptors(windows)

        labels, distances = p.knn_query(feats, k=1)

        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)

        for x in range(steps[0]):
            for y in range(steps[1]):
                label = labels[x * steps[0] + y][0]

                # get category of observation with label
                with driver.session() as session:
                    g = session.read_transaction(get_observation_group, label)
                    if g not in group_id_dict:
                        group_id_dict[g] = group_id
                        group_id += 1
                    g = group_id_dict[g]

                print("observation_group", g)

                if g >= len(colors):
                    c = colors[len(colors) - 1]
                else:
                    c = colors[g]

                cv2.circle(frame, (stride * y + round(window_size / 2),
                                   stride * x + round(window_size / 2)), 3, c,
                           cv2.FILLED)

        cv2.imshow("preview", frame)

        key = cv2.waitKey(0)

        if key == 27:  # exit on ESC
            break

    vc.release()
    cv2.destroyWindow("preview")