コード例 #1
0
ファイル: object_tracking.py プロジェクト: paul-freeman/big_g
def tracking():
    """Perform object tracking"""
    print(LICENSE_MSG)
    args = parse_args()
    try:
        write_mode = 'wb' if args.force else 'xb'
        with open(args.output_file, write_mode) as output_file:
            video = open_video(args.video_file)
            read_frame(video)
            print(SET_SCALE_MSG)
            press_enter()
            print('<scale window displayed>')
            distance = get_scale_distance()
            print('\nThe line drawn has a distance of {:.1f} pixels.'.format(
                distance))
            measure = float(
                input('Tell me how many metres this should represent. > '))
            scale = distance / measure
            print(ROI_BOX_MSG)
            press_enter()
            print('<object tracking window displayed>')
            bbox = select_bounding_box()
            if args.algorithm == 'KCF':
                tracker = cv2.TrackerKCF_create()
            elif args.algorithm == 'MIL':
                tracker = cv2.TrackerMIL_create()
            elif args.algorithm == 'Median-Flow':
                tracker = cv2.TrackerMedianFlow_create()
            else:
                raise ValueError('Unknown algorithm type')
            if args.suppress_live_plot:
                print(TRACKING_MSG)
            else:
                print(TRACKING_MSG_W_PLOT)
            press_enter()
            print('<object tracking window displayed>')
            speed_up = 1
            if args.speed_up:
                speed_up = int(args.speed_up[:-1])
            points = track_video(video, tracker, bbox, scale,
                                 args.suppress_live_plot, args.algorithm,
                                 speed_up)
            np.save(output_file, np.asarray(points))
            print(LAST_PLOT_MSG)
            press_enter()
            fig = plt.figure()
            axes = fig.gca(projection='3d')
            axes.plot(points.T[1], points.T[2], zs=points.T[0])
            axes.set_title('Tracked object motion')
            axes.set_aspect('equal')
            xmin, xmax = axes.get_xlim()
            x_range = abs(xmin - xmax)
            ymin, ymax = axes.get_ylim()
            y_range = abs(ymin - ymax)
            half_diff = abs(x_range - y_range) / 2
            if x_range > y_range:
                axes.set_ylim(ymin - half_diff, ymax + half_diff)
            if y_range > x_range:
                axes.set_xlim(xmin - half_diff, xmax + half_diff)
            plt.show()
    except FileExistsError:
        print('This directory already contains a file named: {}'.format(
            args.output_file))
        print('Please move, rename, or delete this file and try again.')
コード例 #2
0
#Initialize the looping variable to one.
loop = 1

#Socket Setup - socket was used because the client script needs libraries of opencv that were not installed.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 9547
s.bind(('', port))
s.listen(5)

#Wait until a connection request is received, then proceed.
clientSocket, addr = s.accept()
print("got a connection from %s" % str(addr))

tracker = cv2.TrackerKCF_create()  #create tracker object
cap = cv2.VideoCapture(1)  #load video (0 or 1 for camera device number)
ok, frame = cap.read()  #read one frame of the stream
listofbbox = []  #initialize the vector of bbox lists

#Setup code so that the output can be saved to file.
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output12_10.avi', fourcc, 20.0, (640, 480))

#Select the bbox for each character in order. Press enter after each one.
for i in range(0, int(chars)):
    listofbbox.append(cv2.selectROI(
        frame, False))  #select location of ROI (region of interest)

#Set bbox equal to the first bbox specified.
bbox = listofbbox[0]
コード例 #3
0
tracker_types = [
    'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'
]
tracker_type = tracker_types[7]

if int(minor_ver) < 3:
    tracker = cv2.TrackerMOSSE_create()
    #tracker = cv2.Tracker_create(tracker_type)
else:
    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    elif tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    elif tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    elif tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    elif tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    elif tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()
    elif tracker_type == 'MOSSE':
        tracker = cv2.TrackerMOSSE_create()
    elif tracker_type == 'CSRT':
        tracker = cv2.TrackerCSRT_create()
print("type is ", tracker_type)
video = cv2.VideoCapture(1)
cv2.namedWindow('result')
cv2.setMouseCallback('result', draw_rectangle)
if not video.isOpened():
コード例 #4
0
    def run(self):
        # Capture, process, display loop
        while True:
            # Read a new frame
            ok, self.frame = self.video.read()
            display = self.frame.copy(
            )  # Frame we'll do all the graphical drawing to
            data_display = np.zeros_like(
                display, dtype=np.uint8)  # Black screen to display data
            if not ok:
                break

            # Start FPS timer
            timer = cv2.getTickCount()

            if self.bg is None:
                cv2.putText(display,
                            "Press 'r' to reset the foreground extraction.",
                            self.positions['hand_pose'],
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 127, 64), 2)
                cv2.imshow("display", display)

                k = cv2.waitKey(1) & 0xff
                if k == 27: break  # ESC pressed
                elif k == 114 or k == 108:
                    # r pressed
                    self.bg = self.frame.copy()
                    self.hand_bbox = (116, 116, 170, 170)
                    self.is_tracking = False
            else:
                # Extract the foreground
                foreground, mask = self.extract_foreground()
                foreground_display = foreground.copy()

                # Get hand from mask using the bounding box
                hand_crop = mask[int(self.hand_bbox[1]):int(self.hand_bbox[1] +
                                                            self.hand_bbox[3]),
                                 int(self.hand_bbox[0]):int(self.hand_bbox[0] +
                                                            self.hand_bbox[2])]

                # Update tracker
                if self.is_tracking:
                    tracking, self.hand_bbox = self.tracker.update(foreground)

                try:
                    # Resize cropped hand and make prediction on gesture
                    hand_crop_resized = np.expand_dims(cv2.resize(
                        hand_crop, (54, 54)),
                                                       axis=0).reshape(
                                                           (1, 54, 54, 1))
                    prediction = self.recognizer.predict(hand_crop_resized)
                    predi = prediction[0].argmax(
                    )  # Get the index of the greatest confidence
                    gesture = self.CLASSES[predi]

                    for i, pred in enumerate(prediction[0]):
                        # Draw confidence bar for each gesture
                        barx = self.positions['hand_pose'][0]
                        bary = 60 + i * 60
                        bar_height = 20
                        bar_length = int(
                            400 *
                            pred) + barx  # calculate length of confidence bar

                        # Make the most confidence prediction green
                        if i == predi:
                            colour = (0, 255, 0)
                        else:
                            colour = (0, 0, 255)

                        cv2.putText(
                            data_display,
                            "{}: {}".format(self.CLASSES[i], pred),
                            (self.positions['hand_pose'][0], 30 + i * 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
                        cv2.rectangle(data_display, (barx, bary),
                                      (bar_length, bary - bar_height), colour,
                                      -1, 1)

                    cv2.putText(display, "hand pose: {}".format(gesture),
                                self.positions['hand_pose'],
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                    cv2.putText(foreground_display,
                                "hand pose: {}".format(gesture),
                                self.positions['hand_pose'],
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                except Exception as ex:
                    cv2.putText(display, "hand pose: error",
                                self.positions['hand_pose'],
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                    cv2.putText(foreground_display, "hand pose: error",
                                self.positions['hand_pose'],
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                # Draw bounding box
                p1 = (int(self.hand_bbox[0]), int(self.hand_bbox[1]))
                p2 = (int(self.hand_bbox[0] + self.hand_bbox[2]),
                      int(self.hand_bbox[1] + self.hand_bbox[3]))
                cv2.rectangle(foreground_display, p1, p2, (255, 0, 0), 2, 1)
                cv2.rectangle(display, p1, p2, (255, 0, 0), 2, 1)

                # Calculate difference in hand position
                hand_pos = ((p1[0] + p2[0]) // 2, (p1[1] + p2[1]) // 2)
                mouse_change = ((p1[0] + p2[0]) // 2 -
                                self.positions['null_pos'][0],
                                self.positions['null_pos'][0] -
                                (p1[1] + p2[1]) // 2)

                # Draw hand moved difference
                cv2.circle(display, self.positions['null_pos'], 5, (0, 0, 255),
                           -1)
                cv2.circle(display, hand_pos, 5, (0, 255, 0), -1)
                cv2.line(display, self.positions['null_pos'], hand_pos,
                         (255, 0, 0), 5)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Display FPS on frame
                cv2.putText(foreground_display, "FPS : " + str(int(fps)),
                            self.positions['fps'], cv2.FONT_HERSHEY_SIMPLEX,
                            0.65, (50, 170, 50), 2)
                cv2.putText(display, "FPS : " + str(int(fps)),
                            self.positions['fps'], cv2.FONT_HERSHEY_SIMPLEX,
                            0.65, (50, 170, 50), 2)

                # Display pause command text
                cv2.putText(
                    foreground_display,
                    "hold 'r' to recalibrate until the screen is black",
                    (15, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
                cv2.putText(foreground_display, "to recalibrate", (15, 420),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
                cv2.putText(foreground_display,
                            "press 'p' to return to paused state", (15, 450),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)

                # Display foreground_display
                cv2.imshow("foreground_display", foreground_display)
                cv2.imshow("display", display)
                # Display result
                cv2.imshow("data", data_display)

                k = cv2.waitKey(1) & 0xff
                if k == 27: break  # ESC pressed
                elif k == 114 or k == 108:  # r pressed
                    self.bg = self.frame.copy()
                    self.hand_bbox = (116, 116, 170, 170)
                    self.is_tracking = False
                elif k == 116:  # t pressed
                    # Initialize tracker with first frame and bounding box
                    self.tracker = cv2.TrackerKCF_create()
                    self.tracker.init(self.frame, self.hand_bbox)
                elif k == 112:  # p pressed
                    # Reset to paused state
                    self.is_tracking = False
                    self.bg = None
                    cv2.destroyAllWindows()
                elif k != 255:
                    print(k)
コード例 #5
0
def Analise(result, ui=False, vid_folder="/content/TRAIN_0"):
    data = {
        'ATA': 0.0,
        'F': 0.0,
        'F1': 0.0,
        'OTP': 0.0,
        'OTA': 0.0,
        'Deviation': 0.0,
        'PBM': 0.0,
        'FPS': 0.0,
        'Ms': 0,
        'fp': 0,
        'tp': 0,
        'fn': 0,
        'g': 0
    }
    result[1] = data
    tracker_type = result[0]
    result = result[1:]
    seq_ID = result[1]["my_object"]["video"]
    del result[1]["my_object"]["video"]
    frames_folder = os.path.join(vid_folder, "frames", seq_ID)
    anno_file = os.path.join(vid_folder, "anno", seq_ID + ".txt")
    anno = np.loadtxt(anno_file, delimiter=",")
    frames_list = [
        frame for frame in os.listdir(frames_folder) if frame.endswith(".jpg")
    ]
    if not len(anno) == len(frames_list):
        print("Not the same number of frames and annotation!")
        return
        # Define an initial bounding box
    bbox = (anno[0][0], anno[0][1], anno[0][2], anno[0][3])
    frame = cv2.imread(os.path.join(frames_folder, "0.jpg"))
    if tracker_type == "MOSSE":
        tracker = cv2.TrackerMOSSE_create()
    elif tracker_type == "TLD":
        tracker = cv2.TrackerTLD_create()
    elif tracker_type == "GOTURN":
        tracker = cv2.TrackerGOTURN_create()
    elif tracker_type == "BOOSTING":
        tracker = cv2.TrackerBoosting_create()
    elif tracker_type == "MIL":
        tracker = cv2.TrackerMIL_create()
    elif tracker_type == "KCF":
        tracker = cv2.TrackerKCF_create()
    elif tracker_type == "MEDIANFLOW":
        tracker = cv2.TrackerMedianFlow_create()
    elif tracker_type == "CSRT":
        tracker = cv2.TrackerCSRT_create()
    file_path = rndStr() + ".json"
    file1 = open(file_path, 'w')
    file1.writelines(json.dumps(result[1]))
    file1.close()
    fs = cv2.FileStorage(file_path, cv2.FILE_STORAGE_READ)
    tracker.read(fs.getFirstTopLevelNode())
    os.remove(file_path)
    ok = tracker.init(frame, bbox)
    if not ok:
        print("Initialisation error")
        continue
    data["Ms"] += 1
    data["tp"] += 1
    data["ATA"] += IOU(bbox, bbox)
    data["Deviation"] += NCDist(bbox, bbox)
    data["F1"] += F1(bbox, bbox)
    data["PBM"] += 1 - L1(bbox, bbox)
    for i in range(1, len(frames_list)):
        frame_file = str(i) + ".jpg"
        imgs_file = os.path.join(frames_folder, frame_file)

        frame = cv2.imread(imgs_file)
        anno_bbox = (anno[i][0], anno[i][1], anno[i][2], anno[i][3])
        x_min = int(anno[i][0])
        x_max = int(anno[i][2] + anno[i][0])
        y_min = int(anno[i][1])
        y_max = int(anno[i][3] + anno[i][1])

        cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 0, 255), 2, 1)

        # Start timer
        timer = cv2.getTickCount()
        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fpS = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            if ui:
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            # print("Tracking failure detected")
            if True:
                data["fn"] += 1
            if ui:
                cv2.putText(frame, "Tracking failure detected", (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        if ui:
            cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
            # Display FPS on frame
            cv2.putText(frame, "FPS : " + str(int(fpS)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
            # Display result
            cv2.imshow("Tracking", frame)
        iou = IOU(bbox, anno_bbox)
        data["ATA"] += iou
        data["FPS"] += fpS
        data["F1"] += F1(bbox, anno_bbox)
        data["PBM"] += 1 - (L1(bbox, anno_bbox) if iou > 0 else Th(
            bbox, anno_bbox)) / Th(bbox, anno_bbox)
        if True:
            data["g"] += 1
        if iou >= 0.5:
            data["tp"] += 1
        elif iou > 0:
            data["OTP"] += iou
            data["Deviation"] += NCDist(bbox, anno_bbox)
            data["Ms"] += 1
        else:
            data["fp"] += 1

        # Exit if ESC pressed
        if ui:
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                sys.exit()
        # print(tracker_type, i)
    data["F"] = F(data["tp"][j], data["fp"][j], data["fn"][j])
    data["F1"] /= len(frames_list)
    data["OTA"] = 1 - (data["fp"][j] + data["fn"][j]) / data["g"][j]
    data["OTP"] /= data["Ms"][j]
    data["ATA"] /= len(frames_list)
    data["FPS"] /= len(frames_list)
    data["Deviation"] = 1 - data["Deviation"][j] / data["Ms"][j]
    if data["Deviation"] < -1:
        data["Deviation"] = -1
    data["PBM"] /= len(frames_list)
    result[0].update(result[1]["my_object"])
    result = result[0]
コード例 #6
0
ファイル: objDetect.py プロジェクト: DMANSS/AuroraCV
    def start(self):
        (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

        # Задаем трекер.
        tracker_types = [
            'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'
        ]
        tracker_type = tracker_types[4]

        if int(minor_ver) < 3:
            tracker = cv2.Tracker_create(tracker_type)
        else:
            if tracker_type == 'BOOSTING':
                tracker = cv2.TrackerBoosting_create()
            if tracker_type == 'MIL':
                tracker = cv2.TrackerMIL_create()
            if tracker_type == 'KCF':
                tracker = cv2.TrackerKCF_create()
            if tracker_type == 'TLD':
                tracker = cv2.TrackerTLD_create()
            if tracker_type == 'MEDIANFLOW':
                tracker = cv2.TrackerMedianFlow_create()
            if tracker_type == 'GOTURN':
                tracker = cv2.TrackerGOTURN_create()

        # Захват камеры
        video = cv2.VideoCapture(0)

        if not video.isOpened():
            print("Камера не в 0 порту")
            sys.exit()

        # берем первый кадр
        ok, frame = video.read()
        if not ok:
            print('Ошибка чтения видео')
            sys.exit()

        # Рамкой выделяем объект
        bbox = (287, 23, 86, 320)

        bbox = cv2.selectROI(frame, False)

        # Инициализируем трекер с помощью первого кадра и ограничительной рамки
        ok = tracker.init(frame, bbox)

        while True:
            # Читаем новый фрейм
            ok, frame = video.read()
            if not ok:
                break

            # Запуск таймера
            timer = cv2.getTickCount()

            # Обновляем трекер
            ok, bbox = tracker.update(frame)

            # Вычисляем fps
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            # рисуем рамку
            if ok:
                # объект найден
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
            else:
                # Объект не найден
                cv2.putText(frame, "Cant find any", (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

            # HUD трекера( какой трекер используется)
            cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # HUD fps
            cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
            # Вывод
            cv2.imshow("Tracking", frame)

            # Выход по клавише esc
            k = cv2.waitKey(1) & 0xff
            if k == 27: break
コード例 #7
0
ファイル: tracker.py プロジェクト: tsaxena/UltimateLabeling
 def init(self, image_path, bbox):
     img = cv2.imread(image_path)
     self.tracker = cv2.TrackerKCF_create()
     self.tracker.init(img, tuple(bbox.to_json()))
     self.tracker.update(img)
コード例 #8
0
ap.add_argument("--tracker",
                required=False,
                help="Path to where the video to track is stored")
args = vars(ap.parse_args())

video = args["image"]
tracker_type = args["tracker"]

cap = cv2.VideoCapture(video)
ret, frame = cap.read()
initBB = cv2.selectROI("Object to track", frame)
cv2.destroyWindow("Object to track")

OPENCV_OBJECT_TRACKERS = {
    "csrt": cv2.TrackerCSRT_create(),
    "kcf": cv2.TrackerKCF_create(),
    "boosting": cv2.TrackerBoosting_create(),
    "mil": cv2.TrackerMIL_create(),
    "tld": cv2.TrackerTLD_create(),
    "medianflow": cv2.TrackerMedianFlow_create(),
    "mosse": cv2.TrackerMOSSE_create()
}

if tracker_type:
    tracker = OPENCV_OBJECT_TRACKERS[tracker_type]
else:
    tracker = cv2.TrackerKCF_create()
tracker.init(frame, initBB)

while cap.isOpened():
    ret, frame = cap.read()
コード例 #9
0
    def __init__(self,
                 x_center,
                 half_width,
                 y_center,
                 half_height,
                 y_bar_start,
                 half_bar_width,
                 half_bar_height,
                 tracker_type='BOOSTING'):
        """
        Parameters
        ----------
        x_center : int
            Horizontal position of the center of the main area.
        half_width : int
            Half the width of the main area.
        y_center : int
            Vertical position of the center of the main area.
        half_height : int
            Half the height of the main area.
        y_bar_start : int
            Vertical position of the inner area. This value is relative to the
            main area.
        half_bar_width : int
            Half the width of the inner area.
        half_bar_height : int
            Half the height of the inner area.
        tracker_type : str
            Name of one of the built-in trackers in OpenCV.
        """
        SectionProcessor.__init__(self)
        # Top-left corner of the main area.
        self.x_start = int(x_center - half_width)
        self.y_start = int(y_center - half_height)

        # Bottom-right corner of the main area.
        self.x_end = int(x_center + half_width)
        self.y_end = int(y_center + half_height)

        # Top-left corner of the inner area. This position is relative to the
        # main area.
        self.y_bar_start = y_bar_start
        self.x_bar_start = int(half_width - half_bar_width)

        # Inner area
        self.bbox = (self.x_bar_start, self.y_bar_start, half_bar_width * 2,
                     half_bar_height * 2)

        # Vertical positions of the top-left corner of the inner area
        self.y_pos_history = None

        # Indices of the frames where the inner area is in the bottom position.
        self.peaks = None

        self.events = None

        # Tracker
        (major_ver, minor_ver, subminor_ver) = (cv.__version__).split('.')
        if int(minor_ver) < 3:
            self.tracker = cv.Tracker_create(tracker_type)
        else:
            if tracker_type == 'BOOSTING':
                self.tracker = cv.TrackerBoosting_create()
            if tracker_type == 'MIL':
                self.tracker = cv.TrackerMIL_create()
            if tracker_type == 'KCF':
                self.tracker = cv.TrackerKCF_create()
            if tracker_type == 'TLD':
                self.tracker = cv.TrackerTLD_create()
            if tracker_type == 'MEDIANFLOW':
                self.tracker = cv.TrackerMedianFlow_create()
            if tracker_type == 'GOTURN':
                self.tracker = cv.TrackerGOTURN_create()
            if tracker_type == 'MOSSE':
                self.tracker = cv.TrackerMOSSE_create()
            if tracker_type == "CSRT":
                self.tracker = cv.TrackerCSRT_create()
    def trackAllJetsonCam(self,
                          inputVideo,
                          save_tracking=False,
                          show_tracking=True,
                          Frame_int_Goturn=0,
                          TimeToRun=30,
                          camera=False):
        """Track the objects in the video
        TimeToRun IN SECONDS !
        """
        objRegressor = self.regressor
        objTracker = self.tracker

        if camera:
            vid = cv2.VideoCapture(
                "nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720,format=(string)I420, "
                "framerate=(fraction)20/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! "
                "video/x-raw, format=(string)BGR ! appsink")
            num_frames = TimeToRun * 20
        else:
            vid = cv2.VideoCapture(inputVideo)
            num_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
        ok, frame_0 = vid.read()
        if not ok:
            print('Couldnt read first frame. Exit.')
            exit()
        FPS = vid.get(cv2.CAP_PROP_FPS)

        if save_tracking:
            movie_number = 903
            tracker_type = 'GoturnGPU'
            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
            output_video_name = 'output_videos/movie_' + str(
                movie_number) + time.strftime(
                    "_%d%m%Y_%H%M%S") + '_' + tracker_type + '.avi'
            outVideo = cv2.VideoWriter(output_video_name, fourcc, int(60),
                                       (width, height))

        box_0 = cv2.selectROI(frame_0)
        cv2.destroyAllWindows()

        bbox_0 = BoundingBox(x1=box_0[0],
                             y1=box_0[1],
                             x2=box_0[0] + box_0[2],
                             y2=box_0[1] + box_0[3])
        # Init Goturn
        objTracker.init(frame_0, bbox_0, objRegressor)

        # Init KCF
        multiTracker = cv2.MultiTracker_create()
        multiTracker.add(cv2.TrackerKCF_create(), frame_0,
                         (bbox_0.x1, bbox_0.y1, (bbox_0.x2 - bbox_0.x1),
                          (bbox_0.y2 - bbox_0.y1)))
        num_KCF = 0
        bbox = bbox_0
        timerI = cv2.getTickCount()
        for i in range(2, num_frames):
            print('Tracking on frame:' + str(i) + '/' + str(num_frames))
            ok, frame = vid.read()
            if not ok:
                break
            ### Start timer
            timer = cv2.getTickCount()

            ### Update Tracker
            if num_KCF < Frame_int_Goturn:
                ok_KCF, box = multiTracker.update(frame)
                if ok_KCF:
                    bbox = BoundingBox(x1=box[0][0],
                                       y1=box[0][1],
                                       x2=box[0][0] + box[0][2],
                                       y2=box[0][1] + box[0][3])
                    num_KCF += 1
                    color = (50, 255, 50)
                if num_KCF == Frame_int_Goturn or not ok_KCF:
                    objTracker.update__prev_Jo(frame, bbox)
            if not num_KCF < Frame_int_Goturn or not ok_KCF:
                num_KCF = 0
                bbox = objTracker.track(frame, objRegressor)

                multiTracker = cv2.MultiTracker_create()
                multiTracker.add(
                    cv2.TrackerKCF_create(), frame,
                    (int(bbox.x1), int(bbox.y1), int(bbox.x2 - bbox.x1),
                     int(bbox.y2 - bbox.y1)))
                color = (255, 0, 0)

            ### Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            ### Draw bounding box
            ImageDraw = frame.copy()
            ImageDraw = cv2.rectangle(ImageDraw, (int(bbox.x1), int(bbox.y1)),
                                      (int(bbox.x2), int(bbox.y2)), color, 2)

            # Display FPS on frame
            cv2.putText(ImageDraw, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)

            if show_tracking:
                cv2.imshow('Results', ImageDraw)
                cv2.waitKey(1)
            if save_tracking:
                outVideo.write(ImageDraw)
        timerF = cv2.getTickCount()
        Time_proc = (timerF - timerI) / cv2.getTickFrequency()
        print('Total processing time =' + str(int(Time_proc)) + 'sec')
        print('Average FPS =' + str(i / Time_proc))
        cv2.imshow('Results', ImageDraw)
        cv2.waitKey(5000)
        vid.release()
        if save_tracking:
            outVideo.release()
    def trackAllRealTimeSimu(self,
                             input_folder,
                             inputVideo,
                             save_tracking=False,
                             show_tracking=True,
                             Frame_int_Goturn=0):
        """Track the objects in the video
        Simulates real time by taking the frame according to computation time of each itteration.
        """
        objRegressor = self.regressor
        objTracker = self.tracker

        vid = cv2.VideoCapture(input_folder + inputVideo)
        FPS = vid.get(cv2.CAP_PROP_FPS)
        ok, frame_0 = vid.read()
        if not ok:
            print('Couldnt read first frame. Exit.')
            exit()

        if save_tracking:

            movie_number = 0
            tracker_type = 'GoturnGPU'
            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
            output_video_name = 'output_videos/movie_' + inputVideo + str(
                movie_number) + time.strftime(
                    "_%d%m%Y_%H%M%S") + '_' + tracker_type + '.avi'
            outVideo = cv2.VideoWriter(output_video_name, fourcc, int(FPS),
                                       (width, height))

        box_0 = cv2.selectROI(frame_0)
        cv2.destroyAllWindows()
        num_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))

        bbox = BoundingBox(x1=box_0[0],
                           y1=box_0[1],
                           x2=box_0[0] + box_0[2],
                           y2=box_0[1] + box_0[3])
        # Init Goturn
        objTracker.init(frame_0, bbox, objRegressor)

        # Init KCF
        multiTracker = cv2.MultiTracker_create()
        multiTracker.add(cv2.TrackerKCF_create(), frame_0,
                         (bbox.x1, bbox.y1, (bbox.x2 - bbox.x1),
                          (bbox.y2 - bbox.y1)))
        num_KCF = 0
        TimeSimulation = 0
        i = 0
        color = (50, 255, 50)
        while True:
            i += 1
            # Simulate real time by taking the next frame
            TimeSimuFrame = TimeSimulation * FPS
            NumframeSimu = int(TimeSimuFrame)
            print('Tracking on frame:' + str(NumframeSimu) + '/' +
                  str(num_frames))
            vid.set(1, NumframeSimu)
            ok, frame = vid.read()
            if not ok:
                break

            ### Start timer
            timeA = cv2.getTickCount()

            ### Update Tracker
            if num_KCF < Frame_int_Goturn:
                ok_KCF, box = multiTracker.update(frame)
                if ok_KCF:
                    bbox = BoundingBox(x1=box[0][0],
                                       y1=box[0][1],
                                       x2=box[0][0] + box[0][2],
                                       y2=box[0][1] + box[0][3])
                    num_KCF += 1
                if num_KCF == Frame_int_Goturn or not ok_KCF:
                    objTracker.update__prev_Jo(frame, bbox)
                    color = (255, 0, 0)
            if not num_KCF < Frame_int_Goturn or not ok_KCF:
                num_KCF = 0
                bbox = objTracker.track(frame, objRegressor)

                multiTracker = cv2.MultiTracker_create()
                multiTracker.add(
                    cv2.TrackerKCF_create(), frame,
                    (int(bbox.x1), int(bbox.y1), int(bbox.x2 - bbox.x1),
                     int(bbox.y2 - bbox.y1)))

            bbox = objTracker.track(frame, objRegressor)
            ### Calculate Frames per second (FPS)
            timeB = cv2.getTickCount()
            fps = cv2.getTickFrequency() / (timeB - timeA)
            TimeSimulation += 1 / fps

            ### Draw bounding box
            ImageDraw = frame.copy()
            ImageDraw = cv2.rectangle(ImageDraw, (int(bbox.x1), int(bbox.y1)),
                                      (int(bbox.x2), int(bbox.y2)), color, 2)
            color = (50, 255, 50)

            # Display FPS on frame
            cv2.putText(ImageDraw, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)

            if show_tracking:
                cv2.imshow('Results', ImageDraw)
                cv2.waitKey(1)
            if save_tracking:
                outVideo.write(ImageDraw)
        timerF = cv2.getTickCount()
        print('Total processing time =' + str(TimeSimulation))
        print('Average FPS =' + str(i / TimeSimulation))
        if save_tracking:
            outVideo.release()
    def trackAllGoturnKcf(self,
                          inputVideo,
                          save_tracking=False,
                          show_tracking=True,
                          Frame_int_Goturn=0):
        """Track the objects in the video
        Combining Opencv's KCF tracking with the goturn tracking.
        """
        objRegressor = self.regressor
        objTracker = self.tracker

        vid = cv2.VideoCapture(inputVideo)
        ok, frame_0 = vid.read()
        if not ok:
            print('Couldnt read first frame. Exit.')
            exit()

        if save_tracking:
            movie_number = 0
            tracker_type = 'GoturnGPU'
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
            FPS = vid.get(cv2.CAP_PROP_FPS)
            output_video_name = 'output_videos/movie_' + str(
                movie_number) + time.strftime(
                    "_%d%m%Y_%H%M%S") + '_' + tracker_type + '.mp4'
            outVideo = cv2.VideoWriter(output_video_name, fourcc, int(FPS),
                                       (width, height))

        box_0 = cv2.selectROI(frame_0)
        cv2.destroyAllWindows()
        num_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))

        bbox_0 = BoundingBox(x1=box_0[0],
                             y1=box_0[1],
                             x2=box_0[0] + box_0[2],
                             y2=box_0[1] + box_0[3])
        # Init Goturn
        objTracker.init(frame_0, bbox_0, objRegressor)

        # Init KCF
        multiTracker = cv2.MultiTracker_create()
        multiTracker.add(cv2.TrackerKCF_create(), frame_0,
                         (bbox_0.x1, bbox_0.y1, (bbox_0.x2 - bbox_0.x1),
                          (bbox_0.y2 - bbox_0.y1)))
        num_KCF = 0
        bbox = bbox_0
        timerI = cv2.getTickCount()
        for i in range(2, num_frames):
            print('Tracking on frame:' + str(i) + '/' + str(num_frames))
            ok, frame = vid.read()
            if not ok:
                break
            ### Start timer
            timer = cv2.getTickCount()

            ### Update Tracker
            if num_KCF < Frame_int_Goturn:
                ok_KCF, box = multiTracker.update(frame)
                if ok_KCF:
                    bbox = BoundingBox(x1=box[0][0],
                                       y1=box[0][1],
                                       x2=box[0][0] + box[0][2],
                                       y2=box[0][1] + box[0][3])
                    num_KCF += 1
                    color = (50, 255, 50)
                if num_KCF == Frame_int_Goturn or not ok_KCF:
                    objTracker.update__prev_Jo(frame, bbox)
            if not num_KCF < Frame_int_Goturn or not ok_KCF:
                num_KCF = 0
                bbox = objTracker.track(frame, objRegressor)

                multiTracker = cv2.MultiTracker_create()
                multiTracker.add(
                    cv2.TrackerKCF_create(), frame,
                    (int(bbox.x1), int(bbox.y1), int(bbox.x2 - bbox.x1),
                     int(bbox.y2 - bbox.y1)))
                color = (255, 0, 0)

            ### Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            ### Draw bounding box
            ImageDraw = frame.copy()
            ImageDraw = cv2.rectangle(ImageDraw, (int(bbox.x1), int(bbox.y1)),
                                      (int(bbox.x2), int(bbox.y2)), color, 2)

            # Display FPS on frame
            cv2.putText(ImageDraw, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), 2)

            if show_tracking:
                cv2.imshow('Results', ImageDraw)
                cv2.waitKey(1)
            if save_tracking:
                outVideo.write(ImageDraw)
        timerF = cv2.getTickCount()
        Time_proc = (timerF - timerI) / cv2.getTickFrequency()
        print('Total processing time =' + str(int(Time_proc)) + 'sec')
        print('Average FPS =' + str(i / Time_proc))
        cv2.imshow('Results', ImageDraw)
        cv2.waitKey(5000)
        vid.release()
        if save_tracking:
            outVideo.release()
コード例 #13
0
 def __init__(self, frame, face):
     (x, y, w, h) = face
     self.face = (x, y, w, h)
     # Arbitrarily picked KCF tracking
     self.tracker = cv2.TrackerKCF_create()
     self.tracker.init(frame, self.face)
コード例 #14
0
def main():
    """Control function that reads webcam, and tracks a marked object."""
    global x0, y0, x1, y1, drawing, mode, frame, bbox, tracker, tracker_initialized
    global MODE_MARK

    #
    # initialization
    #
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    
    MODE_TRACK = 0          # track an object
    MODE_SHOW = 1           # only show tracking markers on video
    MODE_MARK = 2           # mark region color to track
    MODE_TRACK_HOLD = 3     # temporarily suspend tracking until object is recaptured

    tracker_initialized = False
    bbox = (0, 0, 0, 0)
    last_good_bbox = bbox
    
    mode = MODE_SHOW
    mode_text = 'Show'
    fps_text = '?? Fps'
    batt_level = 0.0
    batt_level_text = '?? [v]'
    cvs_title_printed = False
    
    drawing = False         # true if mouse is pressed
    x0, y0 = -1, -1
    x1, y1 = -1, -1
    
    #
    # PID constants for pan and tilt PID controller.
    # These constants are tuned for the worm-gear mechanical setup
    #
    pan_P = 0.8
    pan_I = 0.05
    pan_D = 0.1
    
    tilt_P = 0.6
    tilt_I = 0.05
    tilt_D = 0.1

    print ' m - mark color region to track\n t - track\n s - display tracking marker only\n ESC - quit'

    #
    # connect to NXT device
    # define motor references
    #
    devNXT = nxt.locator.find_one_brick()
    tilt_motor = nxt.motor.Motor(devNXT, nxt.motor.PORT_A)
    pan_motor = nxt.motor.Motor(devNXT, nxt.motor.PORT_B)

    #
    # Initialize start time and frame count.
    # Initialize a reference start time for the CSV output trace
    #
    frame_count = 0
    start = time.time()
    ref_time = time.time()

    #
    # link event callback function
    #
    cv2.namedWindow('image', cv2.WINDOW_GUI_NORMAL+cv2.WINDOW_AUTOSIZE)
    cv2.setMouseCallback('image', mark_rect)

    #
    # setup font for overlay text 
    #
    font = cv2.FONT_HERSHEY_SIMPLEX

    #
    # Set up tracker.
    # Tracker algorithm is hard coded here to default tracker KCF.
    # 
    tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]
 
    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    #
    # Open the capture device and print some
    # useful properties.
    # This tracker program will leave the default webcam frame size
    # that is 640x480 for the webcam I am using.
    #
    cap = cv2.VideoCapture(0)
    
    if cap.isOpened():
        #cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)
        #cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
        
        frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
        frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
        
        print 'frame: width {}, height {}'.format(frameWidth, frameHeight)
        
        frameCenterX = int(frameWidth/2)
        frameCenterY = int(frameHeight/2)
    else:
        sys.exit()

    #
    # frame capture and processing loop
    #
    while(True):
        #
        # capture a frame
        # cover to appropriate color space to improve detection
        # in different lighting conditions
        #
        cap_ok, frame = cap.read()
        if not cap_ok:
            break

        #
        # Operations on the captured image done here.
        # If marking a section on the frame for tracking
        # then only display the ROI selection
        #
        if mode == MODE_MARK:
            cv2.rectangle(frame, (x0, y0), (x1, y1), (0, 255, 0), 1)

        #
        # If tracking or only displaying object tracking information
        # then draw the tracking markers on the frame before it is displayed.
        # Only do this if the tracker was initialized
        #
        elif tracker_initialized:
            #
            # Update the tracker with the newly acquired frame.
            #
            track_ok, bbox = tracker.update(frame)

            #
            # If the tracker update was successful, object still being tracked, then
            # update the prev bounding box position and proceed to:
            # - display the tracker bounding box
            # - an arrow line from frame center to the object center
            # - calculate pan and tilt error from frame center
            #
            if track_ok:
                last_good_bbox = bbox
                
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2)

                object_x = int(bbox[0] + bbox[2]/2)
                object_y = int(bbox[1] + bbox[3]/2)

                err_pan_i = frameCenterX - object_x
                err_tilt_i = frameCenterY - object_y

                cv2.arrowedLine(frame, (frameCenterX, frameCenterY), (object_x, object_y), (255, 0, 0), 2)

                if mode == MODE_TRACK_HOLD:
                    mode = MODE_TRACK
            
            #
            # If tracking is lost for some reason then use the last location
            # of the bounding box to mark that last location with a red box
            #
            else:
                p1 = (int(last_good_bbox[0]), int(last_good_bbox[1]))
                p2 = (int(last_good_bbox[0] + last_good_bbox[2]), int(last_good_bbox[1] + last_good_bbox[3]))
                cv2.rectangle(frame, p1, p2, (0, 0, 255), 1)

                if mode == MODE_TRACK:
                    mode = MODE_TRACK_HOLD

        #
        # Only when in tracking mode activate the motors,
        # and use PID calculations to control the pan-tilt device
        #
        if mode == MODE_TRACK and tracker_initialized:
            #
            # First apply an exponential filter to the tracker position error.
            # info: https://en.wikipedia.org/wiki/Exponential_smoothing
            # Then do PID tracking for NXT motors' pan-tilt control.
            #
            err_pan = exp_filter_pan(err_pan_i)
            err_tilt = exp_filter_tilt(err_tilt_i)
            control_pan = pid_pan(err_pan, pan_P, pan_I, pan_D)
            control_tilt = -1.0 * pid_tilt(err_tilt, tilt_P, tilt_I, tilt_D)
            
            #
            # Print out some data in a CSV compatible format for graphing
            #
            if not cvs_title_printed:
                print 'rel_time,err_tilt_i,err_tilt,control_tilt,err_pan_i,err_pan,control_pan'
                cvs_title_printed = True
               
            now = time.time() - ref_time 
            print '{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f}'.format(now,err_tilt_i, err_tilt, control_tilt, err_pan_i, err_pan, control_pan)
            
            #
            # activate NXT motors
            #
            control_pan = power_limit(control_pan, 90.0)
            control_tilt = power_limit(control_tilt, 90.0)
            
            # uncomment one of the following lines
            # in order to isolate pan or tilt for PID tuning/testing
            #control_pan = 0.0
            #control_tilt = 0.0
            
            pan_motor.run(int(control_pan), True)
            tilt_motor.run(int(control_tilt), True)

        #
        # This section will turn motors off
        # when not in tracking mode. Note above code lines
        # force MODE_TRACK_HOLD if no objects exist or if more than one
        # object is detected. This state will shut motors off.
        #
        else:
            pan_motor.idle()
            tilt_motor.idle()

        #
        # Calculate and display FPS.
        # Use the 10sec interval to also poll the NXT for battery level.
        #
        frame_count = frame_count + 1
        end = time.time()
        measure_interval = end - start
        if measure_interval > 10:
            fps = frame_count / measure_interval
            fps_text = '{:.2f} Fps'.format(fps)
            frame_count = 0
            start = time.time()
            batt_level = devNXT.get_battery_level() / 1000.0
            batt_level_text = '{:.2f} [v]'.format(batt_level)

        #
        # Add text and other markers to the image
        #
        if mode == MODE_TRACK_HOLD:
            cv2.putText(frame, mode_text, (1, 20), font, 0.4, (0, 0, 255), 1, cv2.LINE_AA)
        else:
            cv2.putText(frame, mode_text, (1, 20), font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)

        cv2.putText(frame, tracker_type, (1, 40), font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
        cv2.putText(frame, fps_text, (1, 60), font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
        
        if batt_level > 6.50:
            cv2.putText(frame, batt_level_text, (1, 80), font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
        else:
            cv2.putText(frame, batt_level_text, (1, 80), font, 0.4, (0, 0, 255), 1, cv2.LINE_AA)

        #
        # Display the resulting frame
        #
        cv2.imshow('image', frame)
              
        #
        #   key input mode/command
        #
        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break
        elif key == ord('m'):
            x0,y0  = -1,-1
            x1,y1  = -1,-1
            mode_text = 'Mark'
            mode = MODE_MARK
        elif key == ord('t'):
            mode_text = 'Track'
            if tracker_initialized:
                mode = MODE_TRACK
            else:
                mode = MODE_TRACK_HOLD
        elif key == ord('s'):
            mode_text = 'Show'
            mode = MODE_SHOW
        else:
            pass

    #
    # When done, stop motors and release the capture.
    #
    pan_motor.idle()
    tilt_motor.idle()
    cap.release()
    cv2.destroyAllWindows()
コード例 #15
0
def object_track():
    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[0]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # Read video
    video = cv2.VideoCapture(0)

    # Exit if video not opened.
    if not video.isOpened():
        print "Could not open video"
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print 'Cannot read video file'
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)
    cv2.destroyAllWindows()
    # Initialize tracker with first frame and bounding box
    while (True):
        ret, frame = video.read()
        if ret:
            k = cv2.waitKey(30)
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
            frame = cv2.flip(frame, 1)
            cv2.imshow("Initialize Tracking", frame)
            if k == 32:
                break
    cv2.destroyAllWindows()
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        frame = cv2.flip(frame, 1)
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(30) & 0xff
        if k == 27: break
コード例 #16
0
    'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'
]
tracker_type = tracker_types[7]

if int(major_ver) < 3:
    finger_tracker = cv2.Tracker_create(tracker_type)
    phone_tracker = cv2.Tracker_create(tracker_type)
else:
    if tracker_type == 'BOOSTING':
        finger_tracker = cv2.TrackerBoosting_create()
        phone_tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        finger_tracker = cv2.TrackerMIL_create()
        phone_tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        finger_tracker = cv2.TrackerKCF_create()
        phone_tracker = cv2.TrackerKCF_create()
    if tracker_type == 'TLD':
        finger_tracker = cv2.TrackerTLD_create()
        phone_tracker = cv2.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        finger_tracker = cv2.TrackerMedianFlow_create()
        phone_tracker = cv2.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        finger_tracker = cv2.TrackerGOTURN_create()
        phone_tracker = cv2.TrackerGOTURN_create()
    if tracker_type == 'MOSSE':
        finger_tracker = cv2.TrackerMOSSE_create()
        phone_tracker = cv2.TrackerMOSSE_create()
    if tracker_type == "CSRT":
        finger_tracker = cv2.TrackerCSRT_create()
コード例 #17
0
    def __init__(self, flag):
        self.batch_size = 32  #How many experiences to use for each training step.
        self.update_freq = 4  #How often to perform a training step.
        self.y = .99  #Discount factor on the target Q-values
        self.startE = 1  #Starting chance of random action
        self.endE = 0.1  #Final chance of random action
        self.annealing_steps = 10000.  #How many steps of training to reduce startE to endE.
        self.num_episodes = 10000  #How many episodes of game environment to train network with.
        self.pre_train_steps = 10000  #How many steps of random actions before training begins.
        self.max_epLength = 500  #The max allowed length of our episode.
        self.load_model = False  #Whether to load a saved model.
        self.path = "./dqn"  #The path to save our model to.
        self.h_size = 512  #The size of the final convolutional layer before splitting it into Advantage and Value streams.
        self.tau = 0.001  #Rate to update target network toward primary network
        self.action_num = 5

        tf.reset_default_graph()
        self.mainQN = DQN(self.h_size, self.action_num)
        self.targetQN = DQN(self.h_size, self.action_num)

        self.init = tf.global_variables_initializer()

        self.saver = tf.train.Saver()

        self.trainables = tf.trainable_variables()

        self.targetOps = self.updateTargetGraph(self.trainables, self.tau)

        self.myBuffer = experience_buffer()

        # Set the rate of random action decrease.
        self.e = self.startE
        self.stepDrop = (self.startE - self.endE) / self.annealing_steps

        # create lists to contain total rewards and steps per episode
        self.jList = []
        self.rList = []
        self.total_steps = 0

        self.game = sim(200, True)

        self.is_Train = flag

        # for Tracking
        self.cap = None
        self.col = -1
        self.width = -1
        self.row = -1
        self.height = -1
        self.frame = None
        self.frame2 = None
        self.inputmode = False
        self.rectangle = False
        self.trackWindow = None
        self.roi_hist = None
        self.roi = None
        self.caffe_model_path = './MobileNetSSD_deploy.caffemodel'
        self.prorotxt_path = './MobileNetSSD_deploy.prototxt.txt'
        self.net = None
        self.obstacle_points = []
        self.target_point = None
        self.obstacle_box_color = (0, 0, 255)
        self.tracker_types = [
            'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'
        ]
        self.tracker_type = self.tracker_types[2]

        if self.tracker_type == 'BOOSTING':
            self.tracker = cv2.TrackerBoosting_create()
        if self.tracker_type == 'MIL':
            self.tracker = cv2.TrackerMIL_create()
        if self.tracker_type == 'KCF':
            self.tracker = cv2.TrackerKCF_create()
        if self.tracker_type == 'TLD':
            self.tracker = cv2.TrackerTLD_create()
        else:
            self.tracker = cv2.TrackerMedianFlow_create()
コード例 #18
0
def cv_tracking():
    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[0]

    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()

    # Read video
    video = cv2.VideoCapture("videos/car-without_obstacle_300.avi")
    ok, frame = video.read()

    # check for video
    if not video.isOpened():
        print("video not opened")
        sys.exit()

    # Read first frame.
    count = 0
    while (count < 290):
        ok, frame = video.read()
        if not ok:
            print('Cannot read video file')
            sys.exit()

        count += 1

    # Define an initial bounding box
    # bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)
        print(bbox)
        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
コード例 #19
0
def trackFaces(frame, faces, exitFlag, facesNumber, boxes, receiveQueue,
               sendQueue):

    boxes = boxes
    while (facesNumber != 0 and exitFlag == 0):

        # Create tracker
        tracker = cv2.MultiTracker_create()

        for face in faces:
            ok = tracker.add(
                cv2.TrackerKCF_create(), frame,
                (int(face[0]), int(face[1]), int(face[2]), int(face[3])))

        #time = cv2.getTickCount()
        while (exitFlag == 0):

            #print("Tracking...")

            # Start timer
            timer = cv2.getTickCount()

            # Update tracker
            ret, boxes = tracker.update(frame)

            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            if ret:  # Tracking success
                for box in boxes:
                    topLeft = (int(box[0]), int(box[1]))
                    bottomDown = (int(box[0] + box[2]), int(box[1] + box[3]))
                    cv2.rectangle(frame, topLeft, bottomDown, (255, 0, 0), 2,
                                  1)  # Draw box
            else:
                break

            # Display tracker type on frame
            cv2.putText(frame, "KCF Tracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display FPS on frame
            cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display result
            cv2.imshow("Tracking", frame)

            # Read frame
            ok, frame = video.read()
            frame = cv2.flip(frame, 1)  #flip image to adjust it
            if (sendQueue.empty() == True
                ):  #if detector did not consume the frame continue
                sendQueue.put([frame, faces, exitFlag, facesNumber])

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                exitFlag = 1
                sendQueue.put([frame, faces, exitFlag, facesNumber])
                break

            if (receiveQueue.empty() == 0):  # new face
                break

        print("Exited Tracking...")

        # Read frame
        ok, frame = video.read()
        frame = cv2.flip(frame, 1)  #flip image to adjust it
        if (sendQueue.empty() == True):
            sendQueue.put([frame, faces, exitFlag, facesNumber])

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            exitFlag = 1
            sendQueue.put([frame, faces, exitFlag, facesNumber])
            break

        for box in boxes:
            topLeft = (int(box[0]), int(box[1]))
            bottomDown = (int(box[0] + box[2]), int(box[1] + box[3]))
            cv2.rectangle(frame, topLeft, bottomDown, (255, 0, 0), 2,
                          1)  # Draw box

        # Display result
        cv2.imshow("Tracking", frame)

        if (receiveQueue.empty() == 0):  # new face
            break

    # Read frame
    ok, frame = video.read()
    frame = cv2.flip(frame, 1)  #flip image to adjust it
    if (sendQueue.empty() == True):
        sendQueue.put([frame, faces, exitFlag, facesNumber])

    # Exit if ESC pressed
    k = cv2.waitKey(1) & 0xff
    if k == 27:
        exitFlag = 1
        sendQueue.put([frame, faces, exitFlag, facesNumber])

    for box in boxes:
        topLeft = (int(box[0]), int(box[1]))
        bottomDown = (int(box[0] + box[2]), int(box[1] + box[3]))
        cv2.rectangle(frame, topLeft, bottomDown, (255, 0, 0), 2,
                      1)  # Draw box

    # Display result
    cv2.imshow("Tracking", frame)

    return frame, faces, exitFlag, facesNumber, boxes
コード例 #20
0
ファイル: process.py プロジェクト: Thanasispapam/miniproject2
def background_subtract(video):
    frameCounter = 0
    trackerArray = []
    #kalmanArray = []
    duplicate = False
    fgbg = cv2.createBackgroundSubtractorMOG2(
        detectShadows=False, history=200,
        varThreshold=200)  #Starting back ground subtractor object
    while (1):
        #Loading video, one frame at a time
        ret, frame = video.read()
        #transform the picture
        #frame = transform_perspective(frame)
        #Blurring and applying erosion and dilation to the element used for blob detection
        fgmask = fgbg.apply(frame)
        blur_size = 3
        cv2.blur(frame, (blur_size, blur_size), frame)
        elip_val = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        fgmask = cv2.erode(fgmask, elip_val)
        fgmask = cv2.dilate(fgmask, elip_val, iterations=5)

        #Finding new blobs each 30 frames
        if frameCounter % 200 == 0 or frameCounter == 2:
            print("200 frames passed, updating blobs")

            centroids = blob_detection('test', fgmask, fgmask)
            #Going through all the centroids and comparing them with the tracked blobs, not adding new ones whichs is too close to an existing tracked object
            for i in centroids:
                one_bbox = i
                bbox = (one_bbox[0] - 20, one_bbox[1] - 20, 40, 40)
                duplicate = False
                for j in range(len(trackerArray)):
                    ok, t = trackerArray[j].update(frame)
                    if sqrt((bbox[0] - t[0])**2 +
                            (bbox[1] - t[1])**2) < 50 and ok:
                        duplicate = True
                if duplicate == False:
                    #Creating a new tracker and a new kalman filter for the blob
                    tracker = cv2.TrackerKCF_create()
                    ok = tracker.init(frame, bbox)
                    #kFilter = Kalman(bbox[0], bbox[1])
                    trackerArray.append(tracker)
                    #kalmanArray.append(kFilter)
        '''
        # Update tracker & Kalman filter
        cnt = 0
        for i in trackerArray:
            ok, bbox = i.update(frame)
            kMan = kalmanArray[cnt]
            #Calculating velocity from old positional values and time since last measurements
            deltaX =  bbox[0] - kMan.getX()
            deltaY =  bbox[1] - kMan.getY()
            xVel = deltaX / 0.04 # 1/25 which is the time in seconds from last frame
            yVel = deltaY / 0.04
            kMan.update(bbox[0], bbox[1], xVel, yVel)
            estimate = kMan.estimate()
            if ok:
                # Tracking success
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
                cv2.rectangle(frame, (estimate[0], estimate[1]), (estimate[0]+10, estimate[1]+10), (0,255,0), 2, 1)
                distance = sqrt(estimate[2]**2+estimate[3]**2)
                #Going from pixels to km/h
                distance = (distance/10.1159156)*3.6
                cv2.putText(frame, str(float(("%.2f" % distance))) + "km/h", (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
            else:
                del trackerArray[cnt]
                del kalmanArray[cnt]
            cnt = cnt + 1
        '''
        frameCounter = frameCounter + 1

        # Display result
        cv2.imshow("Tracking", frame)
        cv2.imshow("blobs", fgmask)
        # Exit if q pressed
        if cv2.waitKey(25) & 0xFF == ord('q'):
            break

    video.release()
    cv2.destroyAllWindows()
コード例 #21
0
import cv2
import sys
import time

tracker = cv2.TrackerKCF_create() # KCF fast and accurate, CSRT high accurate but slower, MOSSE faster but low accuracy

# For video file
video = cv2.VideoCapture("./chaplin.mp4")

# for video stream / webcam
# video = cv2.VideoStream(src=0).start()
# time.sleep(2.0)

ok, frame = video.read()

bbox = cv2.selectROI(frame, False)
print(bbox)
#cv2.destroyWindow(frame)
tracker.init(frame, bbox)

while True:
	ok, frame = video.read()

	timer = cv2.getTickCount()
	ok, bbox = tracker.update(frame)
	fps = cv2.getTickFrequency() / (cv2.getTickCount()-timer)

	p1 = (int(bbox[0]), int(bbox[1]))
	p2 = (int(bbox[0]+bbox[2]), int(bbox[1]+bbox[3]))
	cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
コード例 #22
0
ファイル: ROItool.py プロジェクト: funkman56/ROItool
}

number = input(
    "1. CSRT       2. KCF\n3. TLD        4. MIL\n5. MOSSE      6. Boosting\n7. MedianFlow\n>>  "
)

try:
    print("\n追蹤算法 :", method[number])
    name = input("影片名稱 : ")
    filetype = input("影片格式 : ")
    '''
    if elif else 單行寫法
    True1 if Condition1 else True2 if Condition2 else True3 ... if Condition#n-1 else True#n  
    '''
    tracker = cv2.TrackerCSRT_create() if int(
        number) == 1 else cv2.TrackerKCF_create() if int(
            number) == 2 else cv2.TrackerTLD_create() if int(
                number) == 3 else cv2.TrackerMIL_create(
                ) if int(number) == 4 else cv2.TrackerMOSSE_create(
                ) if int(number) == 5 else cv2.TrackerBoosting_create(
                ) if int(number) == 6 else cv2.TrackerMedianFlow_create()

    #print(type(tracker))
    #tracker = cv2.TrackerCSRT_create()
    #tracker = cv2.TrackerKCF_create()
    #tracker = cv2.TrackerTLD_create()
    #tracker = cv2.TrackerMIL_create()
    #tracker = cv2.TrackerMOSSE_create()
    #tracker = cv2.TrackerBoosting_create()
    #tracker = cv2.TrackerMedianFlow_create()
コード例 #23
0
ファイル: provatrack.py プロジェクト: paddoum/tracking
        else:
            return  #print("il target si trova di fronte al robot")
    else:
        return print("il target è vicino")


def tracking_Qr(
        frame):  #nel momento in cui riconosco un qr code inizio a tracciarlo
    barcode = decode(frame)
    if (len(barcode) == 1):
        for obj in barcode:
            box = [obj.rect[0], obj.rect[1], obj.rect[2], obj.rect[3]]
            return box  #la funzione restituisce le dimensioni del box tracker


tracker = cv2.TrackerKCF_create()  #inizializzo un tracker(MedianFlow)
bbox = (20, 30, 50, 60)
cap = cv2.VideoCapture(0)  #accedo allo stream della videocamera

if (cap.isOpened() == False):  #controllo che il programma usi la videocamera
    print("Error opening video stream or file"
          )  #cap.isOpened() riporta True se è attiva la videocamera

lista = get_size_cam(cap)  #creo una lista con altezza e larghezza del frame
print("DIMENSIONE FRAME altezza= ", lista[0], " larghezza= ", lista[1])

while (cap.isOpened()):
    ret, frame = cap.read(
    )  #Ret riporta true se ho un frame in ingresso. Frame rappresenta la matrice dell'immagine
    gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    info = get_barcode_info(
コード例 #24
0
def main():

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    x = 200
    y = 200
    w = 224
    h = 224
    track_window=(x,y,w,h)
    # Reference Distance
    L0 = 100
    S0 = 50176 #224x224 #take#here.

    # Base Distance
    LB = 100
    # Define an initial bounding box
    bbox = (x, y, w, h)   #(287, 23, 86, 320)
    #CX=int(bbox[0]+0.5*bbox[2]+3) #adding
    #CY=int(bbox[1]+0.5*bbox[3]+3) #adding





    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        #drone.takeoff()
        #sleep(5)
        #drone.land()




        # skip first 300 frames
        frame_skip = 300
        while True:
#------------------------------------------for start
            for frame in container.decode(video=0):
                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

		image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)

                start_time = time.time()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

                if frame.time_base < 1.0/60:
                    time_base = 1.0/60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time)/time_base)



		# Update tracker
                ok, bbox = tracker.update(image)


# Draw bounding box
                if ok:
		    print('Tracking ok')
                    #(x,y,w,h) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
                    CX=int(bbox[0]+0.5*bbox[2]) #Center of X
                    CY=int(bbox[1]+0.5*bbox[3])
                    S0=bbox[2]*bbox[3]
                    print("CX,CY,S0,x,y=",CX,CY,S0,x,y)
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)

                else:
                # Tracking failure
		    print('Tracking failure')
                    cv2.putText(image, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)

                cv2.imshow('Original', image)

		key = cv2.waitKey(1)&0xff
		if key == ord('q'):
		    print('Q!')
		    break

		if key == ord('r'):
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x,y,w,h) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
                    # Initialize tracker with first frame and bounding box
                    ok = tracker.init(image, bbox)



#-------------------------------------------------for end
            break
	print('stop fly')
                    

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
	drone.land()
        cv2.destroyAllWindows()
    true_w, true_h, true_coords = extractor.get_coords(
        extractor.text_list[index][0])
    # videos_per_person = extractor.video_per_name[index]
    print(extractor.text_list[index][0])
    print(len(true_coords))

    multiTracker = cv2.MultiTracker_create()
    multiTracker_body = cv2.MultiTracker_create()
    face_boxes, names = recognize_faces(frame, frameWidth, frameHeight,
                                        true_w[frame_number],
                                        true_h[frame_number])
    body_boxes, openpose_faces = detect_bodies(frame, pose_pairs, map_Idx)

    # Initialize MultiTracker
    for bbox in face_boxes:
        multiTracker.add(cv2.TrackerKCF_create(), frame, tuple(bbox))

    # Initialize MultiTracker
    for body_box in body_boxes:
        multiTracker_body.add(cv2.TrackerKCF_create(), frame, body_box)

    compare_res = []

    while True:

        print("Writing frame {} / {}".format(frame_number + 1, length))

        if input_type == "video":
            # grab the next frame
            (grabbed, frame) = stream.read()
コード例 #26
0
ファイル: tracking.py プロジェクト: jsuana/tracking
 def create_tracker(self):
   self.tracker_type = "KCF"
   # Create a tracker
   tracker = cv2.TrackerKCF_create()
   return tracker
def tracking():
    arduino.write(struct.pack('>B', 0))
    global inFrame
    global personDetected
    global lost_frames
    global trackFail
    trackFail = False
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker40 = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

# Define an initial bounding box
#bbox = (287, 23, 86, 320)

# Uncomment the line below to select a different bounding box

# Initialize tracker with first frame and bounding box
#ok = tracker.init(frame, bbox)
    # Why do we have this twice?
    #ok, frame = video.read()
    while True:
        ok, frame = video.read()
        while inFrame and personDetected and not SLEEP:
            #last_coord = 300, 300
            tracker.clear()
            tracker = cv2.TrackerKCF_create()
            bbox, name = find_face()
            #print name + " was detected"
            if name != followName:
                #lost_frames = lost_frames + 1
                #if not lost_frames<3:
                if trackFail:
                    inFrame = False
                else:
                    personDetected = False
                continue
            trackFail = False

            #ask if you are okay because the face cannot be found
            #print bbox[2]
            ok = tracker.init(frame, bbox)
            n = 0
            #lost_frames = 0;

            while n < 100 and not SLEEP:

                # Read a new frame

                ok, frame = video.read()

                #if not ok:
                #    break

                # Start timertracker = cv2.TrackerKCF_create()
                timer = cv2.getTickCount()
                # Update tracker
                ok, bbox = tracker.update(frame)
                x = bbox[0] + bbox[2] / 2
                y = bbox[1] + bbox[3] / 2

                #print x, y

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    #last_coord = x, y
                    # Tracking success
                    #found = True
                    inFrame = True
                    #lost_frames = 0
                    p1 = (int(bbox[0]), int(bbox[1]))
                    p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                    cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                    x_center = 320
                    y_center = 160
                    x_window = 65
                    y_window = 20

                    #print x, y

                    if not (x_center - x_window) <= x <= (
                            x_center + x_window
                    ):  #Make horizontal displacement prioritized

                        if x < (x_center - x_window):  # face is left
                            arduino.write(struct.pack('>B', 2))

                        elif x > (x_center + x_window):  # face is right
                            arduino.write(struct.pack('>B', 3))

                        else:  # face is straight
                            arduino.write(struct.pack('>B', 0))

                    else:

                        if y < (y_center - y_window):  # face is too close
                            arduino.write(struct.pack('>B', 4))

                        elif y > (y_center + y_window):  # face is too far
                            arduino.write(struct.pack('>B', 1))

                        else:  # face is centered
                            arduino.write(struct.pack('>B', 0))

                else:
                    # Tracking failure
                    #lost_frames = lost_frames + 1
                    cv2.putText(frame, "Tracking failure detected", (100, 80),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                    arduino.write(struct.pack('>B', 0))
                    n = 100
                    trackFail = True

            # Display result
                cv2.imshow("Tracking", frame)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27: break
                n = n + 1
コード例 #28
0
    def __init__(self):
        self.confThreshold = 0.5  # Confidence threshold (0..1), higher for stricter detection confidence.
        self.nmsThreshold = 0.4  # Non-maximum suppression threshold (0..1), higher to remove more duplicate boxes.
        self.inpWidth = 160  # Resized image width passed to network
        self.inpHeight = 120  # Resized image height passed to network
        self.scale = 2 / 255  # Value scaling factor applied to input pixels
        self.mean = [127.5, 127.5,
                     127.5]  # Mean BGR value subtracted from input image
        self.rgb = True  # True if model expects RGB inputs, otherwise it expects BGR
        self.bbox = None
        self.tracker = cv.TrackerKCF_create()

        # Select one of the models:
        #model = 'Face'              # OpenCV Face Detector, Caffe model
        #model = 'MobileNetV2SSD'   # MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model
        #model = 'MobileNetSSD'     # MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model
        model = 'MobileNetSSDcoco'  # MobileNet + SSD trained on Coco (80 object classes), TensorFlow model
        #model = 'YOLOv3'           # Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model
        #model = 'YOLOv2'           # Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model

        # You should not have to edit anything beyond this point.
        backend = cv.dnn.DNN_BACKEND_DEFAULT
        target = cv.dnn.DNN_TARGET_CPU
        self.classes = None
        classnames = None
        if (model == 'MobileNetSSD'):
            classnames = '/jevois/share/darknet/yolo/data/voc.names'
            modelname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.caffemodel'
            configname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.prototxt'
            self.rgb = False
        elif (model == 'MobileNetV2SSD'):
            classnames = '/jevois/share/darknet/yolo/data/coco.names'
            modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pb'
            configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pbtxt'
        elif (model == 'MobileNetSSDcoco'):
            classnames = '/jevois/share/darknet/yolo/data/coconew.names'
            modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pb'
            configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pbtxt'
            self.rgb = False
            self.nmsThreshold = 0.1
        elif (model == 'YOLOv3'):
            classnames = '/jevois/share/darknet/yolo/data/coco.names'
            modelname = '/jevois/share/darknet/yolo/weights/yolov3-tiny.weights'
            configname = '/jevois/share/darknet/yolo/cfg/yolov3-tiny.cfg'
        elif (model == 'YOLOv2'):
            classnames = '/jevois/share/darknet/yolo/data/voc.names'
            modelname = '/jevois/share/darknet/yolo/weights/yolov2-tiny-voc.weights'
            configname = '/jevois/share/darknet/yolo/cfg/yolov2-tiny-voc.cfg'
            self.inpWidth = 320
            self.inpHeight = 240
        else:
            classnames = '/jevois/share/opencv-dnn/detection/opencv_face_detector.classes'
            modelname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.caffemodel'
            configname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.prototxt'
            self.scale = 1.0
            self.mean = [104.0, 177.0, 123.0]
            self.rgb = False

        # Load names of classes
        if classnames:
            with open(classnames, 'rt') as f:
                self.classes = f.read().rstrip('\n').split('\n')

        # Load a network
        self.net = cv.dnn.readNet(modelname, configname)
        self.net.setPreferableBackend(backend)
        self.net.setPreferableTarget(target)
        self.timer = jevois.Timer('Neural detection', 10, jevois.LOG_DEBUG)
        self.model = model
        garbageclasses = [
            "shoe", "hat", "eye glasses", "frisbee", "bottle", "plate",
            "wine glass", "cup", "fork", "spoon", "bowl", "banana", "apple",
            "sandwich", "orange", "broccoli", "carrot", "fruit", "hotdog",
            "pizza", "donut", "cake", "vase", "scissors", "toothbrush",
            "cardboard", "napkin", "net", "paper", "plastic", "straw"
        ]
        self.garbageclasses = garbageclasses
コード例 #29
0
import cv2
import sys
 
[major_ver, minor_ver, subminor_ver] = cv2.__version__.split(".")

if int(major_ver) < 3 and int(minor_ver) < 1:
    tracker = cv2.TrackerMIL_create()
else:
    tracker = cv2.TrackerKCF_create()
 
# Read video
video = cv2.VideoCapture("Video/CarGame.avi")
#video = cv2.VideoCapture(0)
 
# Exit if video not opened.
if not video.isOpened():
    print("Could not open video")
    sys.exit()
 
# Read first frame.
ok, frame = video.read()
if not ok:
    print("Cannot read video file")
    sys.exit()
    
tracker = cv2.MultiTracker_create()

tracker.add(cv2.TrackerKCF_create(), frame, (230, 200, 170, 130))

while cv2.waitKey() != 27:
    ok = tracker.add(cv2.TrackerKCF_create(), frame, cv2.selectROI('tracking', frame))
コード例 #30
0
ファイル: object_tracking.py プロジェクト: paul-freeman/big_g
def track_video(video, tracker, bbox, scale, suppress_live_plot, algorithm,
                speed):
    """Track a video"""
    fps = video.get(cv2.CAP_PROP_FPS)
    height, width, _ = FRAME.shape
    if not tracker.init(FRAME, bbox):
        raise RuntimeError('Could not initialize video file')
    frame_number = 0
    scaled_bbox = [n / scale for n in bbox]
    x_origin = (2.0 * scaled_bbox[0] + scaled_bbox[2]) / 2.0
    y_origin = (2.0 * scaled_bbox[1] + scaled_bbox[3]) / 2.0
    time_points = [frame_number / fps]
    x_points = [x_origin]
    y_points = [y_origin]
    if not suppress_live_plot:
        plt.ion()
        fig, axes = plt.subplots(1, 1)
        axes.imshow(FRAME)
        axes.plot(x_points, y_points)
        axes.set_title('Elapsed time: {:d} seconds'.format(int(
            time_points[-1])))
        axes.set_xlabel(PLOT_XLABEL)
        axes.set_ylabel(PLOT_YLABEL)
        plt.show()
    while True:
        try:
            for _ in range(speed):
                read_frame(video)
                frame_number += 1
        except RuntimeError:
            break
        tracking_success, bbox = tracker.update(FRAME)
        if not tracking_success:
            print(TRACKING_FAIL_MSG)
            if algorithm == 'KCF':
                tracker = cv2.TrackerKCF_create()
            elif algorithm == 'MIL':
                tracker = cv2.TrackerMIL_create()
            elif algorithm == 'Median-Flow':
                tracker = cv2.TrackerMedianFlow_create()
            else:
                raise ValueError('Unknown algorithm type')
            bbox = select_bounding_box()
            tracker.init(FRAME, bbox)
        scaled_bbox = [n / scale for n in bbox]
        #distance = sqrt((((2.0 * scaled_bbox[0] + scaled_bbox[2]) / 2.0) - x_origin)**2
        #                + (((2.0 * scaled_bbox[1] + scaled_bbox[3]) / 2.0) - y_origin)**2)
        time_points.append(frame_number / fps)
        x_points.append((2.0 * scaled_bbox[0] + scaled_bbox[2]) / 2.0)
        y_points.append((2.0 * scaled_bbox[1] + scaled_bbox[3]) / 2.0)
        if not suppress_live_plot:
            axes.clear()
            axes.imshow(cv2.cvtColor(FRAME, cv2.COLOR_BGR2RGB),
                        extent=[0, width / scale, height / scale, 0])
            axes.plot([
                x * (time_points[i] / time_points[-1])
                for i, x in enumerate(x_points)
            ],
                      y_points,
                      color='red',
                      alpha=0.3)
            axes.plot(x_points, [
                y * (time_points[i] / time_points[-1])
                for i, y in enumerate(y_points)
            ],
                      color='yellow',
                      alpha=0.3)
            axes.plot([
                x * (time_points[i] / time_points[-1])
                for i, x in enumerate(x_points)
            ], [
                y * (time_points[i] / time_points[-1])
                for i, y in enumerate(y_points)
            ],
                      color='green',
                      alpha=0.5)
            axes.plot([
                scaled_bbox[0], scaled_bbox[0] + scaled_bbox[2],
                scaled_bbox[0] + scaled_bbox[2], scaled_bbox[0], scaled_bbox[0]
            ], [
                scaled_bbox[1], scaled_bbox[1], scaled_bbox[1] +
                scaled_bbox[3], scaled_bbox[1] + scaled_bbox[3], scaled_bbox[1]
            ],
                      color='white',
                      alpha=0.3)
            axes.set_title('Elapsed time: {:d} seconds'.format(
                int(time_points[-1])))
            axes.set_xlabel(PLOT_XLABEL)
            axes.set_ylabel(PLOT_YLABEL)
            axes.set_ylim(height / scale, 0)
            plt.pause(0.001)
    if not suppress_live_plot:
        plt.close()
        plt.ioff()
    return np.array([time_points, x_points, y_points]).T