示例#1
0
X = []
Y = []

np.array(X)
np.array(Y)

finger_center = (int(finger_box[0] + finger_box[2] / 2),
                 int(finger_box[1] + finger_box[3] / 2))
phone_center = (int(phone_box[0] + phone_box[2] / 2),
                int(phone_box[1] + phone_box[3] / 2))
X.append(finger_center[0] - phone_center[0])
Y.append(finger_center[1] - phone_center[1])

# Set up tracker.
finger_tracker = cv2.TrackerCSRT_create()
phone_tracker = cv2.TrackerCSRT_create()

finger_ok = finger_tracker.init(frame, finger_box)
phone_ok = phone_tracker.init(frame, phone_box)

while True:
    # Read a new frame
    ok, frame = video.read()
    if not ok:
        break

    # Start timer
    timer = cv2.getTickCount()

    # Update tracker
示例#2
0
#else, need to explicitly call appropriate object tracker constructor
#else:
#initialise a dict that maps string to their corresponding opencv obj tracker implementation
#OPENCV_OBJECT_TRACKERS = {
#"csrt": cv2.TrackerCSRT_create,
#"kcf": cv2.TrackerKCF_create,
#"boosting": cv2.TrackerBoosting_create,
#"mil": cv2.TrackerMIL_create
#"tld": cv2.TrackerTLD_create,
#"medianflow": cv2.TrackerMedianFlow_create,
#"mosse": cv2.TrackerMOSSE_create
#}

#grab appropriate obj tracker using dict of opencv obj tracker objects
else:
    tracker = cv2.TrackerCSRT_create(
    )  #OPENCV_OBJECT_TRACKERS[args["tracker"]]()

#init bounding box coords of obj to track
initBB = None

print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)

#else, grab ref to vid file
#else:
#    vs = cv2.VideoCapture(args["video"])

#init fps throughput estimator
fps = None
示例#3
0
def get_axis(path, bbox):
    # Set up tracker.

    sentinel = True
    initial_threshold = 0
    displacement_nodes = []

    # Check if the initial threshold was exceeded by the projectile
    threshold_crossed = False

    tracker = cv2.TrackerCSRT_create()

    # Read video
    video = cv2.VideoCapture(path)

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Uncomment the line below to select a different bounding box
    if args.select:
        bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while sentinel:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            displacement_nodes.append(
                (int(bbox[0] + bbox[2] // 2), int(bbox[1] + bbox[3] // 2)))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
            if len(displacement_nodes) > 1:
                initial_threshold = displacement_nodes[0][1] - 15
                if displacement_nodes[-1][0] <= 2:
                    sentinel = False
                if displacement_nodes[-2][1] < displacement_nodes[-1][
                        1] and not threshold_crossed and len(
                            displacement_nodes) > 30:
                    sentinel = False
                for i in range(len(displacement_nodes) - 1):
                    cv2.line(frame, displacement_nodes[i],
                             displacement_nodes[i + 1], (0, 255, 0), 3)
            if int(bbox[1] + bbox[3] // 2) < initial_threshold:
                cv2.putText(frame, "Crossed Threshold Upwards", (100, 110),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                threshold_crossed = True
            else:
                threshold_crossed = False
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, "CSRT Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX,
                    0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        if args.show:
            # Display result
            cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    df = pd.DataFrame(displacement_nodes)
    return df
示例#4
0
 def __init__(self, box, frame):
     bbox = (box[0], box[1], box[2] - box[0], box[3] - box[1])
     self.tracker = cv2.TrackerCSRT_create()
     self.tracker.init(frame, bbox)
 def __init__(self):
     self.tracker = cv2.TrackerCSRT_create()
示例#6
0
        multiTracker = cv2.MultiTracker_create()
        cv2.putText(frame,
                    "please hold the marker still and close to the camera",
                    (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        if object != (0, 0, 0, 0):
            if np.all(np.abs(np.subtract(posrange, object))) < 8:
                counter += 1
                cv2.rectangle(frame, (object[0], object[1]),
                              (object[0] + object[2], object[2] + object[3]),
                              (0, 0, 255), 2, 1)
                print(counter)
            else:
                counter = 0
            posrange = object
            if counter > 100:
                multiTracker.add(cv2.TrackerCSRT_create(), frame, object)
                counter = 0
        else:
            counter = 0

    success, track = multiTracker.update(frame)
    if not success:
        track = []
        multiTracker.clear()
    for i, newbox in enumerate(track):
        cv2.putText(frame, "tracking", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)
        p1 = (int(newbox[0]), int(newbox[1]))
        p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
        cv2.rectangle(frame, p1, p2, (0, 255, 0), 2, 1)
def tracking_video(root, name, point, edge = 20, save_img = True):
    # Set up tracker.
    # Instead of MIL, you can also use
    tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'CSRT', 'MOSSE']
    tracker_type = tracker_types[5]

    if int(minor_ver) >= 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'CSRT':
            tracker = cv2.TrackerCSRT_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()

    # Read video
    video = cv2.VideoCapture(join(root, name))

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()
    cv2.imwrite("result/%05d.jpg"%(0),frame)
    # Define an initial bounding box
    bbox = (point[0] - edge, point[1] - edge, edge*2 , edge*2)

    # Uncomment the line below to select a different bounding box
    # bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    
    ans = []
    index_ = 0
    while True:
        # Read a new frame
        index_ += 1
        ok, frame = video.read()
        if not ok:
            break
         
        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            p3 = (int(bbox[0] + bbox[2]/2), int(bbox[1] + bbox[3]/2))
            ans.append(p3)
            if (save_img):
                cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
        else :
            # Tracking failure
            if (save_img):
                cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)

        if (save_img):
            # Display tracker type on frame
            cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);

            # Display FPS on frame
            cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);

            # Display result
            cv2.imwrite("result/%05d.jpg"%(index_) , frame)
        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27 : break
    return np.array(ans)
示例#8
0
def simple_multitracker_greedy(img_list):
    def dist(point, point_dict):
        min_dist = 1000000
        min_dist_idx = -1
        for key in point_dict.keys():
            distance = np.sqrt(
                np.sum((np.array(point) - np.array(point_dict[key]))**2))
            if distance < min_dist:
                min_dist = distance
                min_dist_idx = key
        return min_dist, min_dist_idx

    def best_overlap(bbox,
                     labels):  #finding the object that overlaps with the bbox
        max_overlap = 0
        max_overlap_idx = -1
        bbox_arr = [int(i) for i in bbox]
        for obj_idx in np.unique(labels)[1:]:
            mask = labels == obj_idx
            overlap = np.sum(
                mask[bbox_arr[1]:bbox_arr[1] + bbox_arr[3],
                     bbox_arr[0]:bbox_arr[0] + bbox_arr[2]]) / (
                         bbox_arr[2] * bbox_arr[3])  #/np.sum(mask)
            if overlap > max_overlap:
                max_overlap = overlap
                max_overlap_idx = obj_idx
        return max_overlap, max_overlap_idx

    n_frames = len(img_list)
    cur_frame = cv2.imread(img_list[0])
    cur_labels = detect.label_img(detect.segment_morph(cur_frame, False),
                                  centroids=False)
    trackers = cv2.MultiTracker()
    ret_images = []
    colors = []
    ret_frame = cur_frame.copy()
    for obj_label in np.unique(
            cur_labels):  #initializing trackers for each detected object
        if obj_label == 0:
            continue
        bbox = cv2.boundingRect(
            np.array(cur_labels == obj_label, dtype='uint8'))
        trackers.add(cv2.TrackerCSRT_create(), cur_frame, bbox)
        cv2.rectangle(ret_frame, (bbox[0], bbox[1]),
                      (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 2)
        colors.append((np.random.randint(0, 255), np.random.randint(0, 255),
                       np.random.randint(0, 255)))
    ret_images.append(ret_frame)
    min_overlap = 0.3

    for frame_idx in range(1, n_frames):
        cur_frame = cv2.imread(img_list[frame_idx])
        cur_labels = detect.label_img(detect.segment_morph(cur_frame, False),
                                      centroids=False)
        ref_labels = cur_labels.copy()
        _, cur_centroids = detect.mark_object_centroids(cur_labels)
        ret_frame = cur_frame.copy()
        used_box_idx = []
        success, boxes = trackers.update(cur_frame)
        found_objects = cur_centroids.copy()
        for i, newbox in enumerate(boxes):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            if np.sum(cur_labels[int(newbox[1]):int(newbox[1] + newbox[3]),
                                 int(newbox[0]):int(newbox[0] + newbox[2])]
                      ) > 0:  #not showing the 'dead' tracks
                overlap, best_matching_obj_idx = best_overlap(
                    newbox, ref_labels)
                if overlap > min_overlap:  #asign the object with the closest centroid
                    #cx, cy = cur_centroids[best_matching_obj_idx]
                    # p1 = (int(newbox[0]), int(newbox[1]))
                    # p2 =  (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))

                    cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                    found_objects.pop(best_matching_obj_idx
                                      )  #remove this object from the list
                    ref_labels[ref_labels == best_matching_obj_idx] = 0
                    used_box_idx.append(i)
                    #ref_labels[int(newbox[1]): int(newbox[1] + newbox[3]), int(newbox[0]):int(newbox[0] + newbox[2])] = 0
                # else: #trying to find the occluding objects
                #     overlap, best_matching_obj_idx = best_overlap(newbox, cur_labels)
                #     if distance < max_dist//4:
                #         cx, cy = cur_centroids[best_matching_obj_idx]
                #         p1 = (int(newbox[0]), int(newbox[1]))
                #         p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                #
                #         cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                #         #found_objects.pop(best_matching_obj_idx)  # remove this object from the list

        if len(found_objects) > 0:
            for obj in found_objects.keys():
                bbox = cv2.boundingRect(
                    np.array(cur_labels == obj, dtype='uint8'))
                mask = ref_labels == obj
                good_bbox = False
                for i, newbox in enumerate(boxes):
                    overlap = np.mean(
                        mask[int(newbox[1]):int(newbox[1] + newbox[3]),
                             int(newbox[0]):int(newbox[0] + newbox[2])])

                    if overlap > min_overlap / 5:
                        if (i in used_box_idx):
                            good_bbox = True
                            break
                        else:
                            p1 = (int(newbox[0]), int(newbox[1]))
                            p2 = (int(newbox[0] + newbox[2]),
                                  int(newbox[1] + newbox[3]))
                            cv2.rectangle(ret_frame, p1, p2, colors[i], 2, 1)
                            ref_labels[ref_labels == obj] = 0
                            used_box_idx.append(i)
                            good_bbox = True
                            break

                if not good_bbox:
                    trackers.add(cv2.TrackerCSRT_create(), cur_frame, bbox)
                    colors.append(
                        (np.random.randint(0, 255), np.random.randint(0, 255),
                         np.random.randint(0, 255)))
                    cv2.rectangle(ret_frame, (bbox[0], bbox[1]),
                                  (bbox[0] + bbox[2], bbox[1] + bbox[3]),
                                  colors[-1], 2)
        ret_images.append(ret_frame)

    return ret_images
示例#9
0
def main():

    # capture video from webcam
    cap = cv2.VideoCapture(0)

    # set video size
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 480)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 800)

    # read first frame
    success, frame = cap.read()
    
    if not success:
        print('Failed to read video.')
        sys.exit(1)

    # create multi_tracker object
    multi_tracker = cv2.multi_tracker_create()

    # load mask image
    mask_img = cv2.imread('mask.png', cv2.IMREAD_GRAYSCALE)
    # width, height, channels = frame.shape
    # mask_img = cv2.resize(mask_img, (height, width))

    print('press SPACE to register cargo')

    colors = []
    labels = []

    first_frame = None
    
    while cap.isOpened():
        success, original_frame = cap.read()
        frame = original_frame.copy()
        if not success:
            print('Failed to read video')
            sys.exit(1)

        # get updated location of objects in subsequent frames
        success, boxes = multi_tracker.update(frame)

        # draw tracked objects
        for i, newbox in enumerate(boxes):
            x, y = newbox[0], newbox[1]
            w, h = newbox[2], newbox[3]
            p1 = (int(x), int(y))
            p2 = (int(x + w), int(y + h))
            cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
            cv2.putText(frame, labels[i], (int(x),int(y)), cv2.FONT_HERSHEY_SIMPLEX, int(w)*int(h)/100000+0.5,(0,0,255),2)

        # draw geofence
        frame = geofence(frame, mask_img)

        # show frame
        #cv2.namedWindow('Cargo Mana', cv2.WINDOW_NORMAL)
        cv2.imshow('Cargo Mana', frame)
        k = cv2.waitKey(1) & 0xFF
				
        # select object to track on SPACE button
        if k == ord(' '):
            bbox = cv2.selectROI('multi_tracker', frame)
            colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
            labels.append(f'{randint(0, 1000):03}-{randint(0, 1000):02} {randint(0, 1000):04}')
            # create CSRT algorithm tracker and add to multi_tracker
            multi_tracker.add(cv2.TrackerCSRT_create(), frame, bbox)
            success, boxes = multi_tracker.update(frame)
						
				# send data on S button
        if k == ord('s'):
            payload = {label: dict(zip(('x', 'y', 'w', 'h'), box))
            for label, box in zip(labels, boxes)}
            ok, original_image = cv2.imencode('.jpg', original_frame)
            content_settings = ContentSettings('image/jpeg')
            service.create_blob_from_bytes('box', 'frame.jpg', original_image.tobytes(), content_settings=content_settings)
            content_settings = ContentSettings('application/json')
            service.create_blob_from_text('box', 'box.json', json.dumps(payload), content_settings=content_settings)
            print(payload)
						
        # reset geofence on R button
        if k == ord('r'):
            first_frame = None

        # quit on ESC button
        if k == 27: # Esc pressed
            break
示例#10
0
 def __init__(self, frame: np.ndarray, bbox: np.ndarray, tid: int):
     self.id_ = tid
     self.tracker = cv2.TrackerCSRT_create()
     self.tracker.init(frame, tuple(bbox))
     self.age = 0
     self.misses = 0
示例#11
0
def Passing_Counter_Zone(Vehicle_x, Vehicle_y, Vehicle_w, Vehicle_h, initBB,
                         frame, tracker, Substracted, RED_cnt, BLUE_cnt,
                         vertices, inout):
    # Detecting Zone
    pts = detecting_zone(vertices)
    global car_cnt

    # 차량 검출시
    for d_num in range(0, len(Vehicle_x)):
        # P 좌표는 프레임에서 디텍팅 포인트 영역
        p_x = Vehicle_x[d_num] + int(Vehicle_w[d_num] / 2)
        p_y = Vehicle_y[d_num] + int(Vehicle_h[d_num])

        crosses = 0  # 교점의 개수(짝수개이면 영역 밖에 존재, 홀수개이면 영역 안에 존재)
        for p in range(0, 4):  # 항상 사각형 이므로 4
            next_p = (p + 1) % 4
            if (pts[p][1] > p_y) != (pts[next_p][1] >
                                     p_y):  ##디텍티드 포인트의 Y좌표가 사각형의 두점사이에 존재하면

                # atx가 오른쪽 반직선과의 교점이 맞으면 교점의 개수를 증가시킨다,
                atX = int((pts[next_p][0] - pts[p][0]) * (p_y - pts[p][1]) /
                          (pts[next_p][1] - pts[p][1]) + pts[p][0])
                if p_x < atX:
                    crosses = crosses + 1
                    ##텍스트로 인 아웃 여부
                    #cv2.putText(frame, str(crosses), (atX, p_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7,COLOR_GREEN, 3)

        if crosses % 2 == 0:  # 영역 밖에 존재하는 경우
            pass
        elif crosses % 2 == 1:  # 영역 안에 존재하는 경우
            if initBB is None:
                initBB = (Vehicle_x[d_num], Vehicle_y[d_num], Vehicle_w[d_num],
                          Vehicle_h[d_num])
                # 트래커 활성화
                tracker = cv2.TrackerCSRT_create()
                tracker.init(
                    Substracted,
                    initBB)  # 트래커를 원본이미지가 아닌  백그라운드 Substracted 된 이미지에서 트래킹함

    # 트래커 활성화시 동작
    if initBB is not None:
        # grab the new bounding box coordinates of the object
        (success, box) = tracker.update(Substracted)

        # check to see if the tracking was a success
        # 트래킹 성공시
        if success:
            (x, y, w, h) = [int(v) for v in box]

            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255),
                          2)  #yellow bounding box
            cv2.rectangle(Substracted, (x, y), (x + w, y + h), (0, 255, 255),
                          2)

            cv2.putText(frame, str(car_cnt), (x + int(w / 2) - 10, y + h - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)

            Tracking_Xp = x + int(w / 2)
            Tracking_Yp = y + h

            # Tracking Point 와 Detected Point의 거리가 150픽셀 이하인 경우 매칭 및 트래커 박스 재조정
            Matched = False
            Matched_Xp = 0
            Matched_Yp = 0

            for i in range(0, len(Vehicle_x), 1):

                Vehicle_Xp = Vehicle_x[i] + int(Vehicle_w[i] / 2)
                Vehicle_Yp = Vehicle_y[i] + Vehicle_h[i]

                #트래커 포인트와 디텍팅 포인트와의 거리가 150이하인 경우
                if int(
                        math.sqrt(
                            pow(abs(Tracking_Xp - Vehicle_Xp), 2) +
                            pow(abs(Tracking_Yp - Vehicle_Yp), 2))) < 200:
                    cv2.line(frame, (Tracking_Xp, Tracking_Yp),
                             (Vehicle_Xp, Vehicle_Yp), (125, 255, 125), 2)

                    Matched = True
                    Matched_Xp = Vehicle_Xp
                    Matched_Yp = Vehicle_Yp

                    #트래커의 박스를 재조정하기위한 Bounding box
                    tempBB = (Vehicle_x[i], Vehicle_y[i], Vehicle_w[i],
                              Vehicle_h[i])

                    #트래커삭제 및 갱신
                    tracker = cv2.TrackerCSRT_create()
                    tracker.init(
                        Substracted, tempBB
                    )  # 트래커를 원본이미지가 아닌  백그라운드 Substracted 된 이미지에서 트래킹함
                    break

            # tracker가 영역 밖에 존재하는 경우 - 삭제 (트래커기준이 아니라, 매칭된 디텍티드 포인트를 통해서 카운팅)
            #if (Tracking_Xp < DetectingZone[0]) or Tracking_Xp > (DetectingZone[2]) or Tracking_Yp < (DetectingZone[1]-10) or Tracking_Yp > (DetectingZone[3]+10)or \
            #(Matched is True and (Matched_Xp < DetectingZone[0]) or Matched_Xp > (DetectingZone[2]) or Matched_Yp < (DetectingZone[1]-10) or Matched_Yp > (DetectingZone[3]+10)):

            #매칭이 트루이고, 매칭된 디텍티드 포인트가 영역밖에 존재하는 경우 - 삭제
            if (Matched == True):

                initBB_xy = (initBB[0] + int(initBB[2] / 2),
                             initBB[1] + initBB[3])

                Matched_xy = (Matched_Xp, Matched_Yp)

                RED_line_start_xy = (vertices[0][0][0], vertices[0][0][1])
                RED_line_end_xy = (vertices[0][3][0], vertices[0][3][1])

                BLUE_line_start_xy = (vertices[0][1][0], vertices[0][1][1])
                BLUE_line_end_xy = (vertices[0][2][0], vertices[0][2][1])

                #tracker에서 매칭된 디텍티드 포인트로 변경하여 주석처리

                global count
                if intersect(initBB_xy, Matched_xy, RED_line_start_xy,
                             RED_line_end_xy):
                    RED_cnt = RED_cnt + 1
                    if inout == 1:  # inout을 구분하여 전역변수 변경
                        count = count + 1
                    else:
                        count = count - 1
                    car_cnt += 1
                    # initBB,lastBB, tracker 초기화
                    cv2.line(frame, (initBB[0] + int(initBB[2] / 2),
                                     initBB[1] + initBB[3]),
                             (Matched_Xp, Matched_Yp), COLOR_RED, 2)
                    initBB = None
                    tracker = cv2.TrackerCSRT_create()

                if intersect(initBB_xy, Matched_xy, BLUE_line_start_xy,
                             BLUE_line_end_xy):
                    BLUE_cnt = BLUE_cnt + 1
                    car_cnt += 1
                    cv2.line(frame, (initBB[0] + int(initBB[2] / 2),
                                     initBB[1] + initBB[3]),
                             (Matched_Xp, Matched_Yp), COLOR_RED, 2)
                    # initBB,lastBB, tracker 초기화
                    initBB = None
                    tracker = cv2.TrackerCSRT_create()

    return tracker, initBB, RED_cnt, BLUE_cnt
示例#12
0
cent2 = get_centroid(bbox2)

draw_circle(frame, cent)
draw_circle(frame, cent2)  # circle, outside this movements will happend

BB = bbox
BB2 = bbox2  # saving it for later

cv2.destroyAllWindows()

# fvs = FileVideoStream(path_vid).start() #vs Video Stream
bbox = BB
bbox2 = BB2

# Creating the CSRT Tracker
tracker = cv2.TrackerCSRT_create()
tracker2 = cv2.TrackerCSRT_create()  # left hand for steering

# Initialize tracker with first frame and bounding box
tracker.init(FRAME, bbox)
tracker2.init(FRAME, bbox2)

cv2.putText(frame.copy(), 'Put both your hands in Postion', (100,70), \
    cv2.FONT_HERSHEY_SIMPLEX, 0.75, COLOR_BLACK, 2)

TIMER_SETUP = 8
t = time.time()

while True:
    frame = get_frame()
    curr = (time.time() - t)
示例#13
0
import os
import cv2
from vidstab import VidStab, layer_overlay, download_ostrich_video

# Download test video to stabilize
if not os.path.isfile("ostrich.mp4"):
    download_ostrich_video("ostrich.mp4")

# Initialize object tracker, stabilizer, and video reader
object_tracker = cv2.TrackerCSRT_create()
stabilizer = VidStab()
vidcap = cv2.VideoCapture("ostrich.mp4")

# Initialize bounding box for drawing rectangle around tracked object
object_bounding_box = None
cap = cv2.VideoCapture(0)

while True:
    # grabbed_frame, frame = vidcap.read()
    grabbed_frame, frame = cap.read()

    # Pass frame to stabilizer even if frame is None
    stabilized_frame = stabilizer.stabilize_frame(input_frame=frame,
                                                  border_size=50)

    # If stabilized_frame is None then there are no frames left to process
    if stabilized_frame is None:
        break

    # Draw rectangle around tracked object if tracking has started
    if object_bounding_box is not None:
示例#14
0
def object_tracker(tracker_algo="MDF", video_path="road.mp4"):
    if tracker_algo == "boosting":
        tracker = cv2.TrackerBoosting_create()
    if tracker_algo == "CSRT":
        tracker = cv2.TrackerCSRT_create()
    if tracker_algo == "TLD":
        tracker = cv2.TrackerTLD_create()
    if tracker_algo == "MIL":
        tracker = cv2.TrackerMIL_create()
    if tracker_algo == "KCF":
        tracker = cv2.TrackerKCF_create()
    if tracker_algo == "MDF":
        tracker = cv2.TrackerMedianFlow_create()

    time.sleep(1)
    cap = cv2.VideoCapture(video_path)
    ret, frame = cap.read()
    width = cap.get(3)  # float
    height = cap.get(4)  # float
    bbox = cv2.selectROI("Tracking", frame, False)
    tracker.init(frame, bbox)

    def drawBox(img, bbox):
        x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
        cv2.rectangle(img, (x, y), ((x + w), (y + h)), (0, 255, 0), 3, 3)
        cv2.line(img, (0, int(height) - 5), (int(width), int(height) - 5),
                 (255, 0, 0), 3)
        print(((x + x + w) / 2), ((y + y + h) / 2))
        cv2.putText(img, "Tracking Started", (100, 75),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

    while True:

        timer = cv2.getTickCount()
        success, img = cap.read()
        success, bbox = tracker.update(img)

        if success:
            drawBox(img, bbox)
        else:
            cv2.putText(img, "Lost", (100, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

        cv2.rectangle(img, (15, 15), (200, 90), (255, 0, 255), 2)
        cv2.putText(img, "Fps:", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (255, 0, 255), 2)
        cv2.putText(img, "Status:", (20, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (255, 0, 255), 2)

        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
        if fps > 60: myColor = (20, 230, 20)
        elif fps > 20: myColor = (230, 20, 20)
        else: myColor = (20, 20, 230)
        cv2.putText(img, str(int(fps)), (75, 40), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, myColor, 2)

        cv2.imshow("Tracking", img)

        if cv2.waitKey(1) & 0xff == ord('q'):
            break

    cv2.destroyAllWindows()
    cap.release()
示例#15
0
                height = endY - startY
                initBB = (startX, startY, width, height)
                #print(initBB)
                bboxes.append(initBB)
                found = 1
                totalFrames += 1

                #tracker.init(frame, initBB)
                #(x, y, w, h) = [int(v) for v in box]
                #cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
                #fps = FPS().start()
                #totalFrames+=1
                #found = 1

for bbox in bboxes:
    multiTracker.add(cv2.TrackerCSRT_create(), frame, bbox)
    retval = multiTracker.getObjects()
    #print(type(bbox))
    #print(retval)
    #(x, y, w, h) = [int(v) for v in bbox]
    #cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)

cv2.imshow("MultiTracker", frame)
fps.update()
anobox = []
dellist = []
while True:

    frame = vs.read()
    frame = frame[1] if args.get("input", False) else frame
示例#16
0
        print('finding ROI: ', not tracking)


cv2.namedWindow('image')
cv2.setMouseCallback('image', onMouse)

cv2.imshow('image', img)

# detector = cv2.SimpleBlobDetector_create()

#tracker = cv2.TrackerBoosting_create()
#tracker = cv2.TrackerMIL_create()
#tracker = cv2.TrackerKCF_create()
#tracker = cv2.TrackerTLD_create()
#tracker = cv2.TrackerMedianFlow_create()
tracker = cv2.TrackerCSRT_create(
)  # ensures enlarging and localization of the selected region and improved tracking of the non-rectangular regions or objects. It uses only 2 standard features (HoGs and Colornames)
#tracker = cv2.TrackerMOSSE_create()  #  good accuracy; loses with quick movements
tracker.save('tracker_params.json')

#fs = cv2.FileStorage("tracker_params.json", cv2.FileStorage_READ)
#tracker.read(fs.getFirstTopLevelNode())

display_boxes = []
display_scores = []
counter = 0
display_box = None

while True:

    success, img = cap.read()
    #print('{} tracking: {}'.format(counter, tracking))
示例#17
0
    def __createTracker(self):
        tracker = None
        if self.methodName == '' or self.methodName == 'CASCADE' or self.methodName == None:
            pass
        elif self.methodName == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
            ''' PyImgSearch
            Based on the same algorithm used to power the machine learning behind Haar cascades (AdaBoost),
            but like Haar cascades, is over a decade old. This tracker is slow and doesn’t work very well.
            Interesting only for legacy reasons and comparing other algorithms. (minimum OpenCV 3.0.0)
            '''
            ''' Paulo
            Extremamente lento
            '''
        elif self.methodName == 'MIL':
            tracker = cv2.TrackerMIL_create()
            ''' PyImgSearch
            Better accuracy than BOOSTING tracker but does a poor job of reporting failure. (minimum OpenCV 3.0.0)
            '''
        elif self.methodName == 'KCF':
            tracker = cv2.TrackerKCF_create()
            ''' PyImgSearch
            Kernelized Correlation Filters. Faster than BOOSTING and MIL.
            Similar to MIL and KCF, does not handle full occlusion well. (minimum OpenCV 3.1.0)
            '''
        elif self.methodName == 'TLD':
            tracker = cv2.TrackerTLD_create()
            ''' PyImgSearch
            I’m not sure if there is a problem with the OpenCV implementation of the TLD tracker or the actual algorithm itself,
            but the TLD tracker was incredibly prone to false-positives. I do not recommend using this OpenCV object tracker. (minimum OpenCV 3.0.0)
            '''
        elif self.methodName == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
            ''' PyImgSearch
            Does a nice job reporting failures; however, if there is too large of a jump in motion,
            such as fast moving objects, or objects that change quickly in their appearance, the model will fail. (minimum OpenCV 3.0.0)
            '''
            ''' Paulo
            É bem rápido, responde a mudanças de escala, reporta oclusão.
            A principio é o melhor algoritmo que temos.
            '''
        elif self.methodName == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
            ''' PyImgSearch
            The only deep learning-based object detector included in OpenCV.
            It requires additional model files to run (will not be covered in this post). 
            My initial experiments showed it was a bit of a pain to use even though it reportedly handles viewing changes well
            (my initial experiments didn’t confirm this though).
            I’ll try to cover it in a future post, but in the meantime, take a look at Satya’s writeup. (minimum OpenCV 3.2.0)
            '''
            ''' Paulo
            Testado com o reconhecimento de face.
            Não apresentou um funcionamento muito bom, devido a constante reinicialização do rastreador.
            Também não reportou corretamente oclusão do objeto.
            Performance bem fraca.
            
            Seria interessante testar com o cone.
            Caso desejem fazer isso baixem os arquivos nesse link: https://www.dropbox.com/sh/77frbrkmf9ojfm6/AACgY7-wSfj-LIyYcOgUSZ0Ua?dl=0
            e extraiam eles no diretório do main.py
            '''
        elif self.methodName == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
            ''' PyImgSearch
            Very, very fast. Not as accurate as CSRT or KCF but a good choice if you need pure speed. (minimum OpenCV 3.4.1)
            '''
            ''' Paulo
            Realmente muito rápido e extremamente estável na detecção,
            mas não responde a mudança no tamanho dos objetos que está rastreando.
            Ou seja, não seriamos capazes de verificar a aproximação do objeto.
            '''
        elif self.methodName == 'CSRT':
            tracker = cv2.TrackerCSRT_create()
            ''' PyImgSearch
            Discriminative Correlation Filter (with Channel and Spatial Reliability). 
            Tends to be more accurate than KCF but slightly slower. (minimum OpenCV 3.4.2)
            '''
            ''' Paulo:
            Responde muito bem a escola, é relativamente estável, tem uma precisão muito boa.
            Performance ruim para o Pi, e não reporta oclusão.
            '''
        else:
            print("Algortimo '" + self.methodName + "' não reconhecido")
            self.methodName = ""

        return tracker
示例#18
0
def tracking_face(img_path="./data_try/0__0", t_type=0):
    # Set up tracker.
    # Instead of MIL, you can also use
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[t_type]
    global tracker

    # if int(minor_ver) < 3:
    if int(major_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    images_list = [img for img in os.listdir(img_path) if img.endswith(".png")]
    images_list = sort_img(images_list)
    img_txt_path = img_path.replace(img_path.split("/")[0], "./txt")
    # txt_path = img_txt_path+"_txt"
    txt_path = img_txt_path

    if not os.path.isdir(txt_path):
        os.makedirs(txt_path)
    frame = cv2.imread(os.path.join(img_path, images_list[0]))
    # Define an initial bounding box
    height, width, layers = frame.shape
    for i in range(len(images_list)):

        frame = cv2.imread(os.path.join(img_path, images_list[i]))

        bbox, score = bbox_score(frame)
        bbox = np.squeeze(bbox)[0]
        score = np.squeeze(score)[0]

        # write_txt(txt_path,images_list[i])

        if score <= 0.7:
            bbox = bbox_transfer(bbox, height, width)
            write_txt(txt_path, images_list[i], bbox, "bad")
        else:
            bbox = bbox_transfer(bbox, height, width)  #(269,47,62,80)
            # bbox = (269,47,62,80)
            tracker.init(frame, bbox)

            while i < len(images_list):
                frame = cv2.imread(os.path.join(img_path, images_list[i]))

                timer = cv2.getTickCount()

                # Update tracker
                ok, bbox = tracker.update(frame)
                bbox_w = bbox_int(bbox)
                write_txt(txt_path, images_list[i], bbox_w)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    # Tracking success
                    p1 = (int(bbox[0]), int(bbox[1]))
                    p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                    cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                    i = i + 1
                else:
                    # Tracking failure
                    cv2.putText(frame, "Tracking failure, re-initialize ...",
                                (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                (0, 0, 255), 2)

                # Display tracker type on frame
                cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

                # Display FPS on frame
                cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

                # Display result
                cv2.imshow("Tracking", frame)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27: break

            break
def tracking_video_rectangle_tovideo(root, name, ad_name, point, result = 'cool_project.avi', edge = 20, save_img = False, save_result = True, method_num = 5, save_img2 = True, middle_halt = -1):
    # Set up tracker.
    # Instead of MIL, you can also use
    tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'CSRT', 'MOSSE']
    tracker_type = tracker_types[method_num]
    
    ad = cv2.imread(join(root,ad_name))
    
    if int(minor_ver) >= 3:
        tracker = [cv2.Tracker_create(tracker_type) for _ in range(4)]
    else:
        if tracker_type == 'BOOSTING':
            tracker = [cv2.TrackerBoosting_create() for _ in range(4)]
        if tracker_type == 'MIL':
            tracker = [cv2.TrackerMIL_create() for _ in range(4)]
        if tracker_type == 'KCF':
            tracker = [cv2.TrackerKCF_create() for _ in range(4)]
        if tracker_type == 'TLD':
            tracker = [cv2.TrackerTLD_create() for _ in range(4)]
        if tracker_type == 'MEDIANFLOW':
            tracker = [cv2.TrackerMedianFlow_create() for _ in range(4)]
        if tracker_type == 'CSRT':
            tracker = [cv2.TrackerCSRT_create() for _ in range(4)]
        if tracker_type == 'MOSSE':
            tracker = [cv2.TrackerMOSSE_create() for _ in range(4)]

    # Read video
    video = cv2.VideoCapture(join(root, name))

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()
    cv2.imwrite("result/%05d.jpg"%(0),frame)
    out = cv2.VideoWriter(result , cv2.VideoWriter_fourcc(*'DIVX'), 10, (frame.shape[1],frame.shape[0]))
    
    # Define an initial bounding box
    bbox = [(point[_][0] - edge, point[_][1] - edge, edge*2 , edge*2) for _ in range(4)]
    
    # Uncomment the line below to select a different bounding box
    # bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    for _ in range(4):
        ok = tracker[_].init(frame, bbox[_])
    
    
    index_ = 0
    while True:
        # Read a new frame
        index_ += 1
        ok, frame = video.read()
        if not ok:
            break
         
        # Start timer
        timer = cv2.getTickCount()

        
        ans = []
        for _ in range(4):
            # Update tracker
            ok, bbox = tracker[_].update(frame)
            # Calculate Frames per second (FPS)
            # fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
            # Draw bounding box
            if ok:
                # Tracking success
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                p3 = (int(bbox[0] + bbox[2]/2), int(bbox[1] + bbox[3]/2))
                ans.append(p3)
                if (save_img or save_img2):
                    cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
            else :
                # Tracking failure
                if (save_img or save_img2):
                    cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)

            if (save_img):
                # Display tracker type on frame
                cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);

                # Display FPS on frame
                # cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);

                # Display result
                cv2.imwrite("result/%05d_%d.jpg"%(index_, _) , frame)
        if (save_img2):
            cv2.imwrite("result/%05d_.jpg"%(index_), frame)
        if (save_result):
            frame = Trans_forward(frame, np.array(ans , dtype = np.float32), ad)
            cv2.imwrite("result2/%05d_.jpg"%(index_), frame)
            out.write(frame)
        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27 : break
        if index_ == middle_halt : break
    out.release()
    return np.array(ans)
示例#20
0
def detect_video(yolo: YOLO, video_path: str, output_path: str = ""):
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps = vid.get(cv2.CAP_PROP_FPS)
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC),
              type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    detected = False
    trackers = []
    font = ImageFont.truetype(font='font/FiraMono-Medium.otf', size=30)
    thickness = 1
    frame_count = 0
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        image_data = np.array(image) / 255.
        draw = ImageDraw.Draw(image)
        if detected:
            for tracker, predicted_class in trackers:
                success, box = tracker.update(frame)
                left, top, width, height = box
                right = left + width
                bottom = top + height

                label = '{}'.format(predicted_class)

                label_size = draw.textsize(label, font)
                if top - label_size[1] >= 0:
                    text_origin = np.array([left, top - label_size[1]])
                else:
                    text_origin = np.array([left, top + 1])

                # My kingdom for a good redistributable image drawing library.
                for i in range(thickness):
                    draw.rectangle([left + i, top + i, right - i, bottom - i],
                                   outline=yolo.colors[c])
                draw.rectangle(
                    [tuple(text_origin),
                     tuple(text_origin + label_size)],
                    fill=yolo.colors[c])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                frame_count += 1
                if frame_count == 100:
                    for tracker in trackers:
                        del tracker
                    trackers = []
                    frame_count = 0
                    detected = False
        else:
            if tf.executing_eagerly():
                boxes, scores, classes = yolo.detect_image(image_data, False)
            else:
                boxes, scores, classes = yolo.detect_image(image, False)
            for i, c in enumerate(classes):
                predicted_class = yolo.class_names[c]
                top, left, bottom, right = boxes[i]
                height = abs(bottom - top)
                width = abs(right - left)
                tracker = cv2.TrackerCSRT_create()
                #tracker = cv2.TrackerKCF_create()
                #tracker = cv2.TrackerMOSSE_create()
                tracker.init(frame, (left, top, width, height))
                trackers.append([tracker, predicted_class])

                label = '{}'.format(predicted_class)
                label_size = draw.textsize(label, font)
                if top - label_size[1] >= 0:
                    text_origin = np.array([left, top - label_size[1]])
                else:
                    text_origin = np.array([left, top + 1])

                # My kingdom for a good redistributable image drawing library.
                for i in range(thickness):
                    draw.rectangle([left + i, top + i, right - i, bottom - i],
                                   outline=yolo.colors[c])
                draw.rectangle(
                    [tuple(text_origin),
                     tuple(text_origin + label_size)],
                    fill=yolo.colors[c])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            detected = True
        del draw
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result,
                    text=fps,
                    org=(3, 15),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50,
                    color=(255, 0, 0),
                    thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)

        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session()
示例#21
0
import cv2

# cap = cv2.VideoCapture(0) # for cam
cap = cv2.VideoCapture('vtest.avi')  # for video
while True:
    success, img = cap.read()
    cv2.putText(img, "When You Ready Press 's' ", (100, 100),
                cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.7, (255, 23, 100), 2)
    cv2.imshow('Tracking', img)
    if cv2.waitKey(80) & 0xFF == ord('s'):
        break
# install opencv-contrib-python
# Trackers
# tracker = cv2.TrackerMOSSE_create()  # high speed & low accuracy
tracker = cv2.TrackerCSRT_create()  # low speed & high accuracy
# tracker = cv2.TrackerGOTURN_create() # some error
# tracker = cv2.TrackerMedianFlow_create()  # working accurate
# tracker = cv2.TrackerTLD_create() #working
# tracker = cv2.TrackerKCF_create() #working
# tracker = cv2.TrackerMIL_create()
# tracker = cv2.TrackerBoosting_create()
success, img = cap.read()
bbox = cv2.selectROI('Tracking', img, True)
tracker.init(img, bbox)


def drawBox(img, bbox):
    x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 3, 1)
    cv2.putText(img, 'Tracking', (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
示例#22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--classification_model',
        help='Path of classification model.',
        required=False,
        default='all_models/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite'
    )
    parser.add_argument(
        '--detection_model',
        help='Path of detection model.',
        required=False,
        default=
        'all_models/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite')
    parser.add_argument('--image', help='Path of the image.', required=False)
    parser.add_argument('--classification_labels',
                        required=False,
                        default='all_models/inat_bird_labels.txt')
    parser.add_argument('--detection_labels',
                        required=False,
                        default='all_models/coco_labels.txt')
    args = parser.parse_args()

    # initialize the video stream and allow the camera sensor to warmup
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0, resolution=(2048, 1536)).start()
    #vs = VideoStream(usePiCamera=False).start()
    time.sleep(2.0)

    detection_model = DetectionEngine(args.detection_model)
    classification_model = ClassificationEngine(args.classification_model)

    detection_labels = load_labels(args.detection_labels)
    print("detection_labels : {}".format(len(detection_labels)))
    classification_labels = load_labels(args.classification_labels)

    multiTracker = cv2.MultiTracker_create()
    tracking_mode = False
    tracking_expire = None

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 500 pixels
        frame = vs.read()
        #resized_frame = imutils.resize(frame, width=500)
        resized_frame = frame
        orig = resized_frame.copy()
        # prepare the frame for classification by converting (1) it from
        # BGR to RGB channel ordering and then (2) from a NumPy array to
        # PIL image format
        resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)
        resized_frame = Image.fromarray(resized_frame)

        # make predictions on the input frame
        start = time.time()

        success, boxes = multiTracker.update(orig)
        if tracking_expire and time.time() > tracking_expire:
            tracking_mode = False
            for tracker in multiTracker.getObjects():
                tracker.clear()
            multiTracker = cv2.MultiTracker_create()

        print('success {}'.format(success))
        print('boxes {}'.format(boxes))
        if success:
            for box in boxes:
                (x, y, w, h) = [int(v) for v in box]
                cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
                text = "{}: {:.2f}% ({:.4f} sec)".format(
                    "bird", score * 100, end - start)
                cv2.putText(orig, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 0, 255), 2)

        objs = detection_model.detect_with_image(resized_frame, top_k=1)
        end = time.time()
        for obj in objs:

            # draw the predicted class label, probability, and inference
            # time on the output frame
            score = obj.score
            box = obj.bounding_box
            height, width, channels = orig.shape
            label = detection_labels[obj.label_id]

            if label == "bird":

                p0, p1 = list(box)
                x0, y0 = list(p0)
                x1, y1 = list(p1)
                x0, y0, x1, y1 = int(x0 * width), int(y0 * height), int(
                    x1 * width), int(y1 * height)
                cv2.rectangle(orig, (x0, y0), (x1, y1), (0, 255, 0), 2)
                text = "{}: {:.2f}% ({:.4f} sec)".format(
                    "bird", score * 100, end - start)
                cv2.putText(orig, text, (x0, y0), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 255, 0), 2)

                if score > 0.2:
                    #im = Image.new('RGB', (x1-x0, y1-y0))
                    #im.putdata(frame[y0:y1,x0:x1])
                    #print("raw {}".format(frame[y0:y1,x0:x1]))
                    #classification_thread = threading.Thread(target=classification_job,args=(classification_model, frame[y0:y1,x0:x1], 1))
                    #classification_thread.start()
                    #classification_thread.join()

                    is_intersection = False
                    for box in boxes:
                        (x, y, w, h) = [int(v) for v in box]
                        if bb_intersection_over_union(
                            [x0, y0, x1, y1], [x, y, x + w, y + h]) > 0:
                            is_intersection = True
                            print("intersect.. already tracking")

                    if not is_intersection:
                        tracking_expire = time.time() + 60
                        tracker = cv2.TrackerCSRT_create()
                        print("add tracker {} {} {} {}".format(
                            x0, y0, width, height))
                        multiTracker.add(tracker, orig,
                                         (x0, y0, width / 2, height / 2))

        # show the output frame and wait for a key press
        cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Frame", 800, 600)
        cv2.imshow("Frame", orig)
        key = cv2.waitKey(1) & 0xFF
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
示例#23
0
def get_points(filename, tracker):

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
    tracker_type = tracker_types[tracker]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # Read video
    path = '/home/ilkka/Python/trajectory/videos/' + filename
    video = cv2.VideoCapture(path)

    # Exit if video not opened.
    if not video.isOpened():
        print('Could not open video')
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    # The list of points
    points = np.array([[0, 0]])

    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (0,0,255), 2, 1)

            # Center of bbox
            x = (2*bbox[0] + bbox[2])/2
            y = (2*bbox[1] + bbox[3])/2

            # Add (x, y) to the list of points
            points = np.append(points, [[x, y]], axis=0)

            # Add a mark in the center of bbox
            q1 = (int(x)-1, int(y)-1)
            q2 = (int(x)+1, int(y)+1)
            cv2.rectangle(frame, q1, q2, (0,0,255), 2, 1)

        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100,80),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100,20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100,50),
                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27 : break

    # Remove the initial zero row from points
    points = points[1:len(points)]

    return points
示例#24
0
 def __init__(self):
     self.position_arr = []
     self.position_dq = deque(maxlen=16)
     self.roi_tracker = cv2.TrackerCSRT_create()
     self.init_bb = None
示例#25
0
文件: goturn.py 项目: vhvkhoa/sort
def main(args):
    input_video_paths = glob(
        path.join(args.input_video_dir, '*.' + args.video_extension))

    for input_video_path in input_video_paths:
        print(input_video_path)
        cam_name = '_'.join(
            path.basename(input_video_path).split('.')[0].split('_')[:2])
        with open(
                path.join(args.input_bbox_dir,
                          path.basename(input_video_path) + '.pkl'),
                'rb') as f:
            bboxes = pkl.load(f)
        with open(path.join(args.input_roi_dir, cam_name + '.txt')) as f:
            roi_coords = [[int(coord) for coord in line.split(',')]
                          for line in f.read().split('\n')[:-1]]

        roi = np.load(path.join(args.input_roi_dir, cam_name + '.npy'))

        input_video = cv2.VideoCapture(input_video_path)
        print(input_video.get(cv2.CAP_PROP_FPS))
        continue
        width = int(input_video.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(input_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
        output_video = cv2.VideoWriter(filename=path.join(
            args.output_video_dir, path.basename(input_video_path)),
                                       fourcc=cv2.VideoWriter_fourcc(*'mp4v'),
                                       fps=30.,
                                       frameSize=(width, height),
                                       isColor=True)
        num_frames = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT))

        trackers = []
        tracked_bboxes = []
        bbox_ids = []
        current_bbox_id = 0

        for frame_idx in tqdm(range(num_frames)):
            success, frame = input_video.read()

            # Keep cars and trucks
            frame_bboxes = np.concatenate(
                [bboxes[frame_idx][1], bboxes[frame_idx][2]], axis=0)

            # Keep bboxes with confidence score more than threshold
            frame_bboxes = [
                bbox[:4].astype(np.int32) for bbox in frame_bboxes
                if bbox[4] > args.confidence_thresh
            ]

            # Remove bboxes that cannot be tracked or exists over a threshold
            untracked_ids = []
            for i, bbox_id in enumerate(bbox_ids):
                success, bbox = trackers[i].update(frame)

                bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
                if success and verify_bbox(roi, bbox, tracked_bboxes[i],
                                           args.dist_thresh, args.time_thresh,
                                           bbox_id):
                    tracked_bboxes[i].append(np.array(bbox))
                else:
                    untracked_ids.append(i)
            if len(untracked_ids) > 0:
                for index in untracked_ids[::-1]:
                    del tracked_bboxes[index]
                    del trackers[index]
                    del bbox_ids[index]

            if len(frame_bboxes) > 0 and len(tracked_bboxes) > 0:
                latest_bboxes = [
                    tracked_car[-1] for tracked_car in tracked_bboxes
                ]
                ious = mask_util.iou(
                    np.array(frame_bboxes), np.array(latest_bboxes),
                    np.zeros((len(latest_bboxes), ), dtype=np.bool))
            elif len(frame_bboxes) > 0:
                ious = np.zeros((len(frame_bboxes), 1))

            max_iou_per_new = np.asarray(ious).max(axis=1).tolist()
            arg_max_iou_per_new = np.asarray(ious).argmax(axis=1).tolist()
            for iou, arg, xyxy in zip(max_iou_per_new, arg_max_iou_per_new,
                                      frame_bboxes):
                if iou <= args.iou_lower_thresh:
                    if verify_bbox(roi, xyxy):
                        tracked_bboxes.append([xyxy])
                        bbox_ids.append(current_bbox_id)
                        trackers.append(cv2.TrackerCSRT_create())
                        xywh = (xyxy[0], xyxy[1], xyxy[2] - xyxy[0],
                                xyxy[3] - xyxy[1])
                        trackers[-1].init(frame, xywh)
                        current_bbox_id += 1

                elif iou >= args.iou_upper_thresh:
                    tracked_bboxes[arg][-1] = xyxy
                    trackers[arg] = cv2.TrackerCSRT_create()
                    xywh = (xyxy[0], xyxy[1], xyxy[2] - xyxy[0],
                            xyxy[3] - xyxy[1])
                    trackers[arg].init(frame, xywh)

            for tracked_seq, bbox_id in zip(tracked_bboxes, bbox_ids):
                frame = draw_bbox(frame, tracked_seq[-1], bbox_id)
            frame = draw_roi(frame, roi_coords)
            output_video.write(frame)
示例#26
0
 def StartTracking(self,frame, object_box):
     self.__tracker = cv2.TrackerCSRT_create()
     self.__tracker.init(frame,object_box)
 def __init__(self):
     self.mtracker = cv2.TrackerCSRT_create() # CSRT, KCF,Boosting,MIL,TLD,MedianFlow,MOSSE
     self.trackingBoundingBox = (0,0,0,0)
示例#28
0
def tracking_face(vi_path="try.avi", t_type=7):
    # Set up tracker.
    # Instead of MIL, you can also use
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[t_type]

    # if int(minor_ver) < 3:
    if int(major_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # Read video
    video = cv2.VideoCapture(vi_path)

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Define an initial bounding box
    height, width, layers = frame.shape

    # box
    # bbox_transfer()
    bbox, score = bbox_score(frame)
    bbox = np.squeeze(bbox)[0]
    score = np.squeeze(score)[0]

    while True:
        if score > 0.7:
            break
        ok, frame = video.read()
        bbox, score = bbox_score(frame)
        bbox = np.squeeze(bbox)[0]
        score = np.squeeze(score)[0]

    bbox = bbox_transfer(bbox, height, width)  #(269,47,62,80)
    # bbox = (bbox[1]*width,bbox[0]*height,bbox[2]-bbox[0],bbox[3]-bbox[1])
    # bbox = tuple(bbox)

    # bbox = (265, 48, 70, 82)# x,y, w, h
    # bbox = (48,151,128,186) # *H ymin, xmin, ymax, xmax
    # bbox = (85,270,228,332) # *W
    # bbox = (85,151,228,186) A -  151 85 (228-85) 186-151
    # bbox = (48,270,128,332) B

    # print(bbox.shape,score.shape)
    # Uncomment the line below to select a different bounding box
    # bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    # bbox = bbox_transfer(bbox)

    while True:
        # Read a new frame
        ok, frame = video.read(
        )  # I think this line should be put after tracker.update
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)  # bbox --> [x1,y1,width,height]

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))  #(x1,y1)
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))  #(x2,y2)
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
示例#29
0
tracker_types = ['MIL', 'KCF', 'GOTURN', 'CSRT']
print(f"Select tracker type by index: {tracker_types}")
i = int(input(">> "))
tracker_type = tracker_types[i]

if int(minor_ver) < 3:
    tracker = cv2.Tracker_create(tracker_type)
else:
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    if tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()
    if tracker_type == "CSRT":
        tracker = cv2.TrackerCSRT_create()

# Read video
video = cv2.VideoCapture("vid.avi")

# Exit if video not opened.
if not video.isOpened():
    print("Could not open video")
    sys.exit()

# Read first frame.
ok, frame = video.read()
if not ok:
    print('Cannot read video file')
    sys.exit()
示例#30
0
def main():

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    x0 = 200
    y0 = 200
    w0 = 224
    h0 = 224
    track_window = (x0, y0, w0, h0)
    # Reference Distance
    L0 = 100
    S0 = 50176  #224x224 #take#here.

    # Base Distance
    LB = 100
    # Define an initial bounding box
    bbox = (x0, y0, w0, h0)  #(287, 23, 86, 320)
    #CX=int(bbox[0]+0.5*bbox[2]+3) #adding
    #CY=int(bbox[1]+0.5*bbox[3]+3) #adding

    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        drone.takeoff()

        # skip first 300 frames
        frame_skip = 300
        while True:
            #------------------------------------------for start
            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time()

                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)

                # Start timer
                timer = cv2.getTickCount()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

                # Update tracker
                ok, bbox = tracker.update(image)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    #print('Tracking ok')
                    (x, y, w, h) = (int(bbox[0]), int(bbox[1]), int(bbox[2]),
                                    int(bbox[3]))
                    CX = int(bbox[0] + 0.5 * bbox[2])  #Center of X
                    CY = int(bbox[1] + 0.5 * bbox[3])
                    S0 = bbox[2] * bbox[3]
                    print("CX,CY,S0,x,y=", CX, CY, S0, x, y)
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255, 0, 0), 2, 1)
                    p10 = (x0, y0)
                    p20 = (x0 + w0, y0 + h0)
                    cv2.rectangle(image, p10, p20, (0, 255, 0), 2, 1)

                    d = round(L0 * m.sqrt(S0 / (w * h)))
                    dx = x + w / 2 - CX0  #no change dx
                    dy = y + h / 2 - CY0
                    print(d, dx, dy)

                    tracking(drone, d, dx, dy, LB)

                else:
                    # Tracking failure
                    #print('Tracking failure')
                    cv2.putText(image, "Tracking failure detected", (100, 80),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                cv2.imshow('Original', image)

                key = cv2.waitKey(1) & 0xff
                if key == ord('q'):
                    print('Q!')
                    break

                if key == ord('r'):
                    roi_time = time.time()
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x0, y0, w0, h0) = (int(bbox[0]), int(bbox[1]),
                                        int(bbox[2]), int(bbox[3]))

                    CX0 = int(x0 + 0.5 * w0)  #Center of X
                    CY0 = int(y0 + 0.5 * h0)

                    # Initialize tracker with first frame and bounding box
                    ok = tracker.init(image, bbox)
                    '''
		    if frame.time_base < 1.0/60:
                        time_base = 1.0/60
                    else:
                        time_base = frame.time_base
                    frame_skip2 = int((time.time() - roi_time)/time_base)

		    if 0 < frame_skip2:
                        frame_skip2 = frame_skip2 - 1
                        continue
		    '''

                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)


#-------------------------------------------------for end
            break
        print('stop fly')

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        drone.land()
        cv2.destroyAllWindows()