示例#1
0
def simple_multitracker(img_list):
    n_frames = len(img_list)
    cur_frame = cv2.imread(img_list[0])
    cur_labels = detect.label_img(detect.segment_morph(cur_frame, False),
                                  centroids=False)
    trackers = cv2.MultiTracker()
    ret_images = []
    colors = []
    ret_frame = cur_frame.copy()
    for obj_label in np.unique(
            cur_labels):  #initializing trackers for each detected object
        if obj_label == 0:
            continue
        bbox = cv2.boundingRect(
            np.array(cur_labels == obj_label, dtype='uint8'))
        trackers.add(cv2.TrackerCSRT_create(), cur_frame, bbox)
        cv2.rectangle(ret_frame, (bbox[0], bbox[1]),
                      (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 2)
        colors.append((np.random.randint(0, 255), np.random.randint(0, 255),
                       np.random.randint(0, 255)))
    ret_images.append(ret_frame)

    for frame_idx in range(1, n_frames):
        cur_frame = cv2.imread(img_list[frame_idx])
        #cur_labels = detect.label_img(detect.segment_morph(cur_frame, False), centroids=False)
        ret_frame = cur_frame.copy()
        success, boxes = trackers.update(cur_frame)
        for i, newbox in enumerate(boxes):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
        ret_images.append(ret_frame)

    return ret_images
示例#2
0
def StartTracking(RectsQueue, TrackQueue):
    """
	The co-ords of the object to be tracked is received through the Rects Queue. MIL MultiTracking is used through OpenCV Contrib
	Drawn rectangle is passed over the TrackQueue
    """
    threading.Timer(60, StartTracking).start()
    tracker = cv2.MultiTracker("MIL")
    video = cv2.VideoCapture(0)
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    x_pixel = 800
    y_pixel = 470
    ok, frame = video.read()
    if not ok:
        sys.exit()
    image = cv2.resize(frame, (int(1 * x_pixel), int(1 * y_pixel)))
    while True:
        print("Detection again")
        boxes = []
        for i in range(5):
            rects = RectsQueue.get()
        #fourcc = cv2.VideoWriter_fourcc(*'XVID')
        #outVideo = cv2.VideoWriter('trackingWITHDLIB.mp4',fourcc, 20.0, (800,470))
        for a, rect in enumerate(rects):
            #print ("Rect is:",rect)

            trackpoint = rect.left(), rect.top(
            ), rect.right() - rect.left(), rect.bottom() - rect.top()
            #print (trackpoint)
            tracker.add(image, trackpoint)
        counter = 0

        while counter < 100:
            counter += 1
            ok, frame = video.read()
            img = cv2.resize(frame, (int(1 * x_pixel), int(1 * y_pixel)))
            found, boxes = tracker.update(img)
            undrawn = img.copy()
            #TrackQueue.put(img)
            print(found)
            if not found:
                TrackQueue.put(undrawn)
            else:
                for newbox in boxes:
                    #print (boxes)
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]),
                          int(newbox[1] + newbox[3]))
                    #print ("p1: ",newbox[2],"p2:",newbox[3])
                    cv2.rectangle(img, p1, p2, (0, 0, 255, 10))
                    TrackQueue.put(img)
示例#3
0
 def __init__(self, tracker_model):
     print("setting up {} Tracker".format(tracker_model))
     self.tracker_model = tracker_model
     # https://www.pyimagesearch.com/2018/07/30/opencv-object-tracking/
     # recommends CSRT for slower fps/higher accuracy
     # KCF for higher fps, slightly lower accuracy
     # MOSSE for speed
     # initialize a dictionary that maps strings to their corresponding
     # OpenCV object tracker implementations
     self.OPENCV_OBJECT_TRACKERS = {
         "csrt": cv2.TrackerCSRT_create,
         "kcf": cv2.TrackerKCF_create,
         "boosting": cv2.TrackerBoosting_create,
         "mil": cv2.TrackerMIL_create,
         "tld": cv2.TrackerTLD_create,
         "medianflow": cv2.TrackerMedianFlow_create,
         "mosse": cv2.TrackerMOSSE_create
     }
     assert self.tracker_model in self.OPENCV_OBJECT_TRACKERS.keys(
     ), "tracker_model input must be in {}".format(
         OPENCV_OBJECT_TRACKERS.keys())
     self.multi_tracker = cv2.MultiTracker()
示例#4
0
def main():
    count = 0
    repo = ImageRepository()
    multi_tracker = cv2.MultiTracker(TRACKER_TYPE)
    cap = cv2.VideoCapture(CAM_ID)  # IndustriPi's camera ID is 1
    visualizer = Visualizer(th_init=8)
    is_first = True
    start_ssd(SSD_TH)

    while is_first:
        # read frames
        success, frame = cap.read()
        if not success:
            print('Can not read frame!')
            break
        frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
        count += 1
        if count == 1:
            save_image(frame)
        repo.add_image(frame)
        if os.path.exists(OUT_FILE):
            time.sleep(0.015)  # TODO: if there need delay operation?
            boxes, ids = detection(OUT_FILE)
            os.remove(OUT_FILE)
            is_first = False
            repo.update()

    count = 0
    max_id = 0 if ids is None else max(ids)
    pre_boxes, pre_ids = None, None
    while cap.isOpened():
        t1 = time.time()
        # read frames
        success, frame = cap.read()
        if not success:
            print('Camera is closed!')
            break
        frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))

        count += 1
        repo.add_image(frame)
        pre_img = repo.get_image()
        #print('Start update')
        if count == 1:
            #multi_tracker = cv2.MultiTracker(TRACKER_TYPE)
            save_image(frame)
            ids, is_new, new_boxes, max_id = compare(boxes,
                                                     pre_boxes,
                                                     pre_ids,
                                                     max_id=max_id,
                                                     th=IOU_TH)
            if is_new:
                if not init_tracker(multi_tracker, pre_img, new_boxes):
                    print('Initialize multi_tracker failed!')
        else:
            if count % 5 == 0:
                ok, boxes_array = multi_tracker.update(pre_img)


#                if ok and boxes_array != ():
#                    boxes = boxes_array.tolist()
#                    boxes = [tuple(box) for box in boxes]
#                    ids, _, _, max_id = compare(boxes, pre_boxes, pre_ids, max_id=max_id, th=IOU_TH)
#                else:
#                    boxes = None
#                    ids = None
            else:
                boxes = pre_boxes
                ids = pre_ids
        #print('Start show image')
        visualizer.update_info(pre_img)
        visualizer.draw(boxes, ids)
        visualizer.run()

        pre_boxes = boxes
        pre_ids = ids

        if os.path.exists(OUT_FILE):  #and count>=len(repo.pre_images):
            #print('Detected out file!')
            time.sleep(0.015)
            boxes, ids = detection(OUT_FILE)
            os.remove(OUT_FILE)
            count = 0
            repo.update()

        fps = 1. / (time.time() - t1)
        print("fps= %f" % (fps))

    free_ssd()
    cap.release()
    cv2.destroyAllWindows()
示例#5
0
def simple_multitracker_greedy(img_list):
    def dist(point, point_dict):
        min_dist = 1000000
        min_dist_idx = -1
        for key in point_dict.keys():
            distance = np.sqrt(
                np.sum((np.array(point) - np.array(point_dict[key]))**2))
            if distance < min_dist:
                min_dist = distance
                min_dist_idx = key
        return min_dist, min_dist_idx

    def best_overlap(bbox,
                     labels):  #finding the object that overlaps with the bbox
        max_overlap = 0
        max_overlap_idx = -1
        bbox_arr = [int(i) for i in bbox]
        for obj_idx in np.unique(labels)[1:]:
            mask = labels == obj_idx
            overlap = np.sum(
                mask[bbox_arr[1]:bbox_arr[1] + bbox_arr[3],
                     bbox_arr[0]:bbox_arr[0] + bbox_arr[2]]) / (
                         bbox_arr[2] * bbox_arr[3])  #/np.sum(mask)
            if overlap > max_overlap:
                max_overlap = overlap
                max_overlap_idx = obj_idx
        return max_overlap, max_overlap_idx

    n_frames = len(img_list)
    cur_frame = cv2.imread(img_list[0])
    cur_labels = detect.label_img(detect.segment_morph(cur_frame, False),
                                  centroids=False)
    trackers = cv2.MultiTracker()
    ret_images = []
    colors = []
    ret_frame = cur_frame.copy()
    for obj_label in np.unique(
            cur_labels):  #initializing trackers for each detected object
        if obj_label == 0:
            continue
        bbox = cv2.boundingRect(
            np.array(cur_labels == obj_label, dtype='uint8'))
        trackers.add(cv2.TrackerCSRT_create(), cur_frame, bbox)
        cv2.rectangle(ret_frame, (bbox[0], bbox[1]),
                      (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 2)
        colors.append((np.random.randint(0, 255), np.random.randint(0, 255),
                       np.random.randint(0, 255)))
    ret_images.append(ret_frame)
    min_overlap = 0.3

    for frame_idx in range(1, n_frames):
        cur_frame = cv2.imread(img_list[frame_idx])
        cur_labels = detect.label_img(detect.segment_morph(cur_frame, False),
                                      centroids=False)
        ref_labels = cur_labels.copy()
        _, cur_centroids = detect.mark_object_centroids(cur_labels)
        ret_frame = cur_frame.copy()
        used_box_idx = []
        success, boxes = trackers.update(cur_frame)
        found_objects = cur_centroids.copy()
        for i, newbox in enumerate(boxes):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            if np.sum(cur_labels[int(newbox[1]):int(newbox[1] + newbox[3]),
                                 int(newbox[0]):int(newbox[0] + newbox[2])]
                      ) > 0:  #not showing the 'dead' tracks
                overlap, best_matching_obj_idx = best_overlap(
                    newbox, ref_labels)
                if overlap > min_overlap:  #asign the object with the closest centroid
                    #cx, cy = cur_centroids[best_matching_obj_idx]
                    # p1 = (int(newbox[0]), int(newbox[1]))
                    # p2 =  (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))

                    cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                    found_objects.pop(best_matching_obj_idx
                                      )  #remove this object from the list
                    ref_labels[ref_labels == best_matching_obj_idx] = 0
                    used_box_idx.append(i)
                    #ref_labels[int(newbox[1]): int(newbox[1] + newbox[3]), int(newbox[0]):int(newbox[0] + newbox[2])] = 0
                # else: #trying to find the occluding objects
                #     overlap, best_matching_obj_idx = best_overlap(newbox, cur_labels)
                #     if distance < max_dist//4:
                #         cx, cy = cur_centroids[best_matching_obj_idx]
                #         p1 = (int(newbox[0]), int(newbox[1]))
                #         p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                #
                #         cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                #         #found_objects.pop(best_matching_obj_idx)  # remove this object from the list

        if len(found_objects) > 0:
            for obj in found_objects.keys():
                bbox = cv2.boundingRect(
                    np.array(cur_labels == obj, dtype='uint8'))
                mask = ref_labels == obj
                good_bbox = False
                for i, newbox in enumerate(boxes):
                    overlap = np.mean(
                        mask[int(newbox[1]):int(newbox[1] + newbox[3]),
                             int(newbox[0]):int(newbox[0] + newbox[2])])

                    if overlap > min_overlap / 5:
                        if (i in used_box_idx):
                            good_bbox = True
                            break
                        else:
                            p1 = (int(newbox[0]), int(newbox[1]))
                            p2 = (int(newbox[0] + newbox[2]),
                                  int(newbox[1] + newbox[3]))
                            cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                            ref_labels[ref_labels == obj] = 0
                            used_box_idx.append(i)
                            good_bbox = True
                            break

                if not good_bbox:
                    trackers.add(cv2.TrackerCSRT_create(), cur_frame, bbox)
                    colors.append(
                        (np.random.randint(0, 255), np.random.randint(0, 255),
                         np.random.randint(0, 255)))
                    cv2.rectangle(ret_frame, (bbox[0], bbox[1]),
                                  (bbox[0] + bbox[2], bbox[1] + bbox[3]),
                                  colors[-1], 2)
        ret_images.append(ret_frame)

    return ret_images
示例#6
0
# this will ensure that new ants are tracked


# TO DO:
    # Tracker now RESETS when two bounding boxes are too close to each other. I need to also check if a box is left behind.
    # This will not work for a Social Carry

import numpy as np
import cv2
from scipy.spatial import distance


camera = cv2.VideoCapture("/home/seth/Host_AntVideos/Examples/edited_video/00001a.mp4")
mask = cv2.imread('mask.png')
algorithm = "KCF"
tracker = cv2.MultiTracker(algorithm)
file = open("tracks.txt","w+")

# Write the header of the file
file.write("FRAME   ID  X   Y\r\n")

init_once = False

meas=[]
mp = np.array((2,1), np.float32) # measurement

##################################################################################################
# MASK STUFF 

x_offset=y_offset= 0
 
        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
        else :
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
 
        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
     
        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
 
        # Display result
        cv2.imshow("Tracking", frame)
 
        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27 : break
cv2.destroyAllWindows()
    
    
    
    
    
    cv2.MultiTracker()
示例#8
0
 def clear_tracker(self):
     self.multi_tracker = cv2.MultiTracker()
import numpy as np
import cv2

cv2.namedWindow("tracking")
camera = cv2.VideoCapture("E:/code/opencv/samples/data/768x576.avi")
tracker = cv2.MultiTracker("MIL")
bbox1 = (638.0,230.0,56.0,101.0)
bbox2 = (240.0,210.0,60.0,104.0)
bbox3 = (486.0,149.0,54.0,83.0)
init_once = False

while camera.isOpened():
    ok, image=camera.read()
    if not ok:
        print 'no image read'
        break

    if not init_once:
        # add a list of boxes:
        ok = tracker.add(image, (bbox1,bbox2))
        # or add single box:
        ok = tracker.add(image, bbox3)
        init_once = True

    ok, boxes = tracker.update(image)
    print ok, boxes

    for newbox in boxes:
        p1 = (int(newbox[0]), int(newbox[1]))
        p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
        cv2.rectangle(image, p1, p2, (200,0,0))
def track():
    print "teacker initialization"
    return cv2.MultiTracker("MIL")
示例#11
0
 def __init__(self, algorithm='KCF'):
     self.tracker = cv2.MultiTracker(algorithm)
    def __init__(self, video_path, fps = None, fourcc = None, window_name = None, track_alg = None, object_name = None):
        
        self._video = video_path
        video = cv2.VideoCapture(find_data_file(self._video))
        self.fourcc = fourcc if fourcc else FOURCC
        self.fps = fps if fps else FPS
        self.width = int(video.get(3))
        self.height = int(video.get(4))
        self.fps = int(video.get(5))
        self.resolution = (self.width, self.height)
        self.file_name = self._video.split('/')[-1]
        self.video_name = self.file_name.split('.')[0]

        self.window_name = window_name if window_name else WINDOW_NAME
        self.track_alg = track_alg if track_alg else TRACK_ALGORITHM
        self.color = COLOR
        self.font = FONT
        self.count = args['frame_ind']# index of frame
        self.orig_gray = None # original grayscale frame
        self.orig_col = None # original BGR frame
        self.frame = None # current frame
        self.object_name = []
        self.deleted_name = []
        
        self._start = None
        self._n_pass_frame = 0
        
        # setup tracker
        self.tracker = cv2.MultiTracker(self.track_alg)
        
        # different mode while tracking
        self._add_box = True # flag of add bounding box mode
        self._retargeting = False # flag of retargeting bounding box mode
        self._delete_box = False # flag of delete bounding box mode
        self._pause = False # flag of pause mode
        
        # initialize tkinter GUI variable
        self._askname  = None
        self.root = None
        self.cb = None

        # mouse coordinate
        self._roi_pts = []
        self._mv_pt = None
        
        # initiate bounding box
        self._bboxes = []
        self._roi = []
        self._init_bbox = []
        self._len_bbox = 0
        
        # index of beetle
        self._n = 0
        self._fix_target = False
        
        # variables for detector model
        # flag
        self._run_model = args['run_model']
        self._update = False
        # load model data or weight
        self._model = load_model('model/%s.h5' % args['model_name'])
        
        self._stop_obj = None
        self._is_stop = None

        # background subtractor model, for adding potential bounding box
        self._run_motion = True
        self._bs = cv2.createBackgroundSubtractorMOG2()
        self._pot_rect =  []

        # variables for online update model
        self._record = {}
        self._n_angle = N_ANGLE
        self._ratio = RATIO
        self._itv_f = INTERVAL_FRAME

        # initiate rat contour
        self._show_rat = False
        self.on_rat = []
        self.rat_cnt = []
示例#13
0
import cv2

win = "C:/Users/jc306494/Documents/PythonAnalysis/SampleVid/GP010016_fast.mp4"
mac = "/Users/Cesar/PyCode_MacOSv1/GP010016_fast.mp4"

print(
    'Select 3 crabs using the bounding box, and press enter or space after each selection'
)

cv2.namedWindow("tracking")
vid = cv2.VideoCapture(mac)
tracker = cv2.MultiTracker('MIL')
init_once = False

ok, image = vid.read()
if not ok:
    print('Failed to read video')
    exit()

bbox1 = cv2.selectROI('tracking', image, fromCenter=False)
bbox2 = cv2.selectROI('tracking', image, fromCenter=False)
bbox3 = cv2.selectROI('tracking', image, fromCenter=False)
bbox4 = cv2.selectROI('tracking', image, fromCenter=False)
while vid.isOpened():
    ok, image = vid.read()
    if not ok:
        print('no image to read')
        break

    if not init_once:
        ok = tracker.add(image, bbox1)
示例#14
0
 def __init__(self, algorithm):
     self.algorithm = algorithm
     self.__tracker = cv2.MultiTracker(algorithm)
     self.trackResult = []
示例#15
0
 def reset(self):
     self.__tracker = cv2.MultiTracker(self.algorithm)
     self.trackResult = []
示例#16
0
                        print("NORMAL")
                        # Need to print something out here in the file, because the reset is back to how it was

                    # Else, update the nex boxes
                    else:
                        x = keyPointListTemp[0][
                            0] - 5  #i is the index of the blob you want to get the position
                        y = keyPointListTemp[0][1] - 5
                        bbox1 = (x, y, length, length)
                        x1 = keyPointListTemp[1][
                            0] - 5  #i is the index of the blob you want to get the position
                        y1 = keyPointListTemp[1][1] - 5
                        bbox2 = (x1, y1, length, length)
                        print("The Other")

                    tracker = cv2.MultiTracker("KCF")
                    ok = tracker.add(image, (bbox1, bbox2))
                    ok, boxes = tracker.update(image)

##################################################################################################

# Show blobs
    im_with_keypoints = cv2.drawKeypoints(
        image, keypoints, np.array([]), (0, 0, 255),
        cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    key = cv2.waitKey(1) & 0xff

    if not ok:
        break

    if key == ord('p'):
示例#17
0
                default=(1270, 720),
                type=tuple,
                help="number of frame to detect objects")
ap.add_argument("-w",
                "--wait",
                default=1,
                type=int,
                help="delay between frames")

args = vars(ap.parse_args())

W, H = args["frame_parameters"]

WAIT = args["wait"]

trackers = cv2.MultiTracker()
cam = cv2.VideoCapture(args["video"])

i = 0

#F = 28.0 * 102 / 1.7
#F = 1680.0
F = 5.5 * 300 / 1.75  # Tur
alpha = args["angle"] * 3.14 / 180
Hc = args["hight"]
Hch = args["object_hight"]

D = lambda x: -math.tan(alpha - math.atan((H / 2 - x) / F)) * Hc
Dx = lambda x, d, y: -(W / 2 - x) * ((Hc**2 + d**2) /
                                     ((H / 2 - y)**2 + F**2))**0.5
 def _initialize_tracker(self):
     self.tracker = cv2.MultiTracker(self.track_alg)
     if len(self._bboxes) > 0:
         self.tracker.add(self.frame, tuple(self._bboxes))
people_count = 0

options = {
    "model": "cfg/yolo.cfg",
    "load": "yolov2.weights",
    "threshold": 0.25
}
tfnet = TFNet(options)

cv.namedWindow("tracking")
camera = cv.VideoCapture("sample_img/pedestrians.mp4")

fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (1920 / 3, 1080 / 3))

tracker = cv.MultiTracker()
trackerObjects = []
newTrackerObjects = []
boxesHistory = []
boxesStopped = []

ok, image = camera.read()
image = cv.resize(image, (1920 / 3, 1080 / 3))
if not ok:
    print('Failed to read video')
    exit()
count = 0

while camera.isOpened():
    count += 1
    print count
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = '/home/georges/models-master/object_detection/output_inference_graph_mobilenet.pb/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(
    'data',
    '/home/georges/models-master/object_detection/data/mscoco_label_map.pbtxt')

NUM_CLASSES = 30

trackerTypes = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']

trackerType = trackerTypes[int(args["type"])]

# initialize  multiple Tracker object with tracking algo
multipleTrackerOpenCV = cv2.MultiTracker(trackerType)
# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
#   file_name = os.path.basename(file.name)
#   if 'frozen_inference_graph.pb' in file_name:
#     tar_file.extract(file, os.getcwd())

detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')