Ejemplo n.º 1
0
    k = cv2.waitKey(1) & 0xFF
    if k == 27:
        ##ser.close()
        cap.release()
        cv2.destroyAllWindows()
        sys.exit()

print("Obj Detected")

# End of Obj Detection
#-------------------------------------------------------------------------------------------
# Start of Trackers KCF and MEDIANFLOW

ser = serial.Serial("COM3", 9600)
tracker_KCF = cv2.TrackerKCF_create()
tracker_MEDIANFLOW = cv2.TrackerMedianFlow_create()
bbox = np.array(bbox)
bbox[0] = dst[0, 0, 0]
bbox[1] = dst[0, 0, 1]
bbox[2] = dst[2, 0, 0] - bbox[0]
bbox[3] = dst[2, 0, 1] - bbox[1]
bbox = tuple(bbox)

# Initialize tracker with first frame and bounding box
ok = tracker_KCF.init(frame_copy, bbox) and tracker_MEDIANFLOW.init(
    frame_copy, bbox)
print("Start Tracking")

while (True):
    ok, frame = cap.read()
    if not ok:
Ejemplo n.º 2
0
        out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (width, height))

    except Exception as e:
        print(e)

    return cap, out


if __name__ == '__main__':

    tracker_switcher = {
        'BOOSTING': cv2.TrackerBoosting_create(),
        'MIL': cv2.TrackerMIL_create(),
        'KCF': cv2.TrackerKCF_create(),
        'TLD': cv2.TrackerTLD_create(),
        'MEDIANFLOW': cv2.TrackerMedianFlow_create(),
        'CSRT': cv2.TrackerCSRT_create(),
        'MOSSE': cv2.TrackerMOSSE_create()
    }

    # Initialize the tracker with the configured type
    tracker = tracker_switcher[settings.TRACKER_TYPE]

    # Capture frame-by-frame
    cap, out = get_cam_frames()
    ok, frame = cap.read()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
Ejemplo n.º 3
0
 def create_tracker(self, img, bbox):
     self.tracker = cv2.TrackerMedianFlow_create()
     self.tracker.init(img, bbox)
Ejemplo n.º 4
0
def tracking_MEDIANFLOW(videofile, fps, save = False):
    # trackerのしくみを決める
    tracker_type = 'MEDIANFLOW'
    tracker = cv2.TrackerMedianFlow_create()
    
    # Read Video(sizeは512x512を推奨)
    video = cv2.VideoCapture(videofile)
    
    now = datetime.datetime.now()
    timestamp = '{0:%Y%m%d_%H%M%S}'.format(now)
    
    # Output setting
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    if save == True:
        out = cv2.VideoWriter('tracking_output_%s.mp4'%timestamp, fourcc , fps, (512,512))
    
    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()
        
    # Read first frame
    ok, frame = video.read()
    if not ok:
        print("Cannot read the video file")
        sys.exit()
    
    # Define an initial bouding box
    bbox = (144, 295, 60, 40)
        
    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    

    while True:
        # read a new frame
        ok, frame = video.read()
        if not ok:
            time.sleep(2)
            break

        # start timer
        timer = cv2.getTickCount()

        # update tracker
        ok, bbox = tracker.update(frame)

        #calculate frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bouding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)

        # Display result
        cv2.imshow("Tracking", frame)

        # Save result as movie file
        if save == True:
            out.write(frame)

        #Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
    
    # When everything done, release the capture
    video.release()
    if save == True:
        out.release()
    cv2.destroyAllWindows()
Ejemplo n.º 5
0
    def detect(self):
        cap = cv2.VideoCapture(0)

        if not cap.isOpened():
            print("Камера не в 0 порту")
            sys.exit()

        tracker = cv2.TrackerMedianFlow_create()

        ok, frame = cap.read()
        if not ok:
            print('Ошибка чтения видео')
            sys.exit()

        bbox = (287, 23, 86, 320)
        bbox = cv2.selectROI(frame, False)
        ok = tracker.init(frame, bbox)
        while True:
            # Читаем новый фрейм
            ok, frame = cap.read()
            if not ok:
                break

            # Запуск таймера
            timer = cv2.getTickCount()

            # Обновляем трекер
            ok, bbox = tracker.update(frame)

            # Вычисляем fps
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            # рисуем рамку
            if ok:
                # объект найден
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
            else:
                # Объект не найден
                cv2.putText(frame, "Cant find any", (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

            # HUD трекера( какой трекер используется)
            cv2.putText(frame, "MedianFlowTracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # HUD fps
            cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            face_cascade = cv2.CascadeClassifier(
                'haarcascades/haarcascade_frontalface_default.xml')
            eye_cascade = cv2.CascadeClassifier(
                'haarcascades/haarcascade_eye.xml')

            image = frame.copy()
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
                # roi_gray = gray[y:y + h, x:x + w]
                # roi_color = img[y:y + h, x:x + w]
                for_eyes = gray[y:y + h, x:x + w]
                eyes = eye_cascade.detectMultiScale(for_eyes)
                for (ex, ey, ew, eh) in eyes:
                    cv2.rectangle(image, (ex + x, ey + y),
                                  (x + ex + ew, y + ey + eh), (0, 255, 0), 2)

            # Вывод
            #cv2.imshow("Tracking", frame)
            cv2.imshow("Faces", image)

            # Выход по клавише esc
            k = cv2.waitKey(1) & 0xff
            if k == 27: break
Ejemplo n.º 6
0
def main():
    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    # tracker_type = tracker_types[2]
    tracker_type = 'MOSSE'

    if tracker_type == 'BOOSTING':
        tracker = cv.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        tracker = cv.TrackerGOTURN_create()
    if tracker_type == 'MOSSE':
        tracker = cv.TrackerMOSSE_create()
    if tracker_type == "CSRT":
        tracker = cv.TrackerCSRT_create()

    # Read video
    video = cv.VideoCapture(0)

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break

        # Start timer
        timer = cv.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv.getTickFrequency() / (cv.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv.putText(frame, "Tracking failure detected", (100, 80),
                       cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv.putText(frame, tracker_type + " Tracker", (100, 20),
                   cv.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                   cv.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        cv.imshow("Tracking", frame)

        # Exit if ESC pressed
        if cv.waitKey(1) == ord('q'):
            print('release')
            break
Ejemplo n.º 7
0
    def _display_camera(self):
        failed_frames_counter = 0
        while self.playing:
            if not self.paused:
                frame_start = time.time()
                ret, raw_frame = self.camera.read()
                if ret:
                    failed_frames_counter = 0

                    raw_frame = cv2.flip(raw_frame, 1)
                    frame = np.copy(raw_frame)

                    if len(frame.shape) != 3:
                        frame = cv2.merge((frame, frame, frame))

                    if self.measure_pulse:
                        if self.tracker == None:
                            print('Tracking initialized')
                            #self.tracker = cv2.TrackerCSRT_create()
                            self.tracker = cv2.TrackerMedianFlow_create()
                            self.tracker.init(frame, self.roi.to_tuple())
                            # analyze max 5 seconds
                            self.pulse_processor.buffer_size = math.ceil(
                                MAX_FPS *
                                5)  # round(1 / average(self.frame_times))*5

                            # open output files
                            means_file = open('mean.csv', 'w')
                            means_file.write('frame,x,y,w,h,g_mean\n')

                            fft_file = open('fft.csv', 'w')
                            fft_file.write(
                                'start_frame,end_frame,amp_bmp_48,amp_bmp_60,amp_bmp_72,amp_bmp_84,amp_bmp_96,amp_bmp_108,amp_bmp_120,amp_bmp_132,amp_bmp_144\n'
                            )

                            bpm_file = open('bpm.csv', 'w')
                            bpm_file.write('frame,bpm\n')

                        else:
                            ok, tracked = self.tracker.update(frame)
                            self.roi.set_roi(*tracked)
                            if ok:
                                x, y, w, h = self.roi.to_tuple()
                                pulse_visualized = self.pulse_processor.run(
                                    frame[y:y + h, x:x + w, :])
                                frame[y:y + h, x:x + w, :] = pulse_visualized

                                means_file.write('{},{},{},{},{},{}\n'.format(
                                    self.camera.get_frame_position(), x, y, w,
                                    h, self.pulse_processor.data_buffer[-1]))
                                means_file.flush()

                                if (len(self.pulse_processor.data_buffer) ==
                                        self.pulse_processor.buffer_size):
                                    fft_file.write(
                                        '{},{},{},{},{},{},{},{},{},{},{}\n'.
                                        format(
                                            self.camera.get_frame_position() -
                                            MAX_FPS * 5 - 1,
                                            self.camera.get_frame_position(),
                                            *self.pulse_processor.fft))
                                    fft_file.flush()
                                    bpm_file.write('{},{}\n'.format(
                                        self.camera.get_frame_position(),
                                        self.pulse_processor.bpm))
                                    bpm_file.flush()

                            else:
                                print('Tracking failed...')

                    if self.recording:
                        self._record_frame(frame)

                    if self.overlay_active:
                        self.add_overlay_to_frame(frame)

                    # frame[:, :, 0] = 0#blue
                    # frame[:, :, 1] = 0#green
                    # frame[:, :, 2] = 0#red
                    cv2.imshow(self.window_name, frame)
                    # cv2.waitKey(1)

                    # cap fps to MAX_FPS
                    diff = 1 / MAX_FPS - (time.time() - frame_start)
                    time.sleep(0 if diff < 0 else diff)

                    self.frame_times.append((time.time() - frame_start))

                else:
                    failed_frames_counter += 1
                    if failed_frames_counter > 100:
                        self.camera_defect = True
                        break
            #playback is paused
            else:
                frame = np.copy(raw_frame)
                if self.overlay_active:
                    self.add_overlay_to_frame(frame)

                cv2.imshow(self.window_name, frame)
Ejemplo n.º 8
0
    def start_tracking(self):
        tracker_types = [
            'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'CSRT', 'MOSSE'
        ]
        tracker_type = tracker_types[6]

        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'CSRT':
            tracker = cv2.TrackerCSRT_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()

        # Initialize tracker with first frame and bounding box
        while (self.frame == None):
            try:
                print("Waiting for image ...")
                rospy.spin()
            except Exception:
                break
        print("Got image...")
        cv2.imshow("Tracking", self.frame)

        print("Got bottle...")
        self.update_bbox()
        ok = tracker.init(self.frame, self.bbox)

        while True:
            # Start timer
            timer = cv2.getTickCount()

            # Update tracker

            ok, self.bbox = tracker.update(self.frame)

            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            # Draw bounding box
            if ok:
                # Tracking success
                p1 = (int(self.bbox[0]), int(self.bbox[1]))
                p2 = (int(self.bbox[0] + self.bbox[2]),
                      int(self.bbox[1] + self.bbox[3]))
                cv2.rectangle(self.frame, p1, p2, (255, 0, 0), 2, 1)
            else:
                # Tracking failure
                cv2.putText(self.frame, "Tracking failure detected", (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

            # Display tracker type on frame
            cv2.putText(self.frame, tracker_type + " Tracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display FPS on frame
            cv2.putText(self.frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display result
            cv2.imshow("Tracking", self.frame)

            rospy.spin()
Ejemplo n.º 9
0
def detect_moving_target(old_gray, new_gray):


# open the camera for warming up
cap = cv2.VideoCapture('slow.MOV')

# grab the first frame
old_frame = get_frame_from(cap)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

# initiate KCF tracker
tracker = cv2.TrackerKCF_create()

# possible movements ROI


# start the tracking
while True:
    # Record FPS
    timer = cv2.getTickCount()

    # read the current frame
    cur_frame = get_frame_from(cap)

    # covert to gray scale
    cur_gray = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)

    # Calculate Frames per second (FPS)
    fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
    # Display FPS on frame
    cv2.putText(cur_frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);
    # display cur frame
    cv2.imshow('video', cur_frame)
    # waitkey
    if cv2.waitKey(1) == ord('q'):
        break


#(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    # Read video
    video = cv2.VideoCapture('slow.MOV')

    # Exit if video not opened.
    if not video.isOpened():
        print
        "Could not open video"
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
    if not ok:
        print
        'Cannot read video file'
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
Ejemplo n.º 10
0
def main():
    """
    LoadsMulti-Level Otsu masks and tracks the face.
    Returns a set of ROIs containing only the face.
    :return:
    """
    tag = 'MLO_'
    filepath = 'E:\\GitHub\\CovPySourceFile\\MultiLevelOtsu\\'
    otsu_masks = load_images_from_folder(
        folder=filepath,
        name_tag=tag,
    )

    # region MIL Tracking
    # use binary otsu mask to detect the face
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')

    # Set up tracker
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[4]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    initial_frame = otsu_masks[0]
    # Define initial bounding box from roi
    bbox = cv2.selectROI(initial_frame, showCrosshair=True, fromCenter=False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(initial_frame, bbox)

    # roi points
    points = []
    failed_idx = []
    for n, mask in enumerate(otsu_masks):
        # Update tracker
        ok, bbox = tracker.update(mask)
        # Draw bounding box
        if ok:
            # Tracking success
            p1 = [int(bbox[0]), int(bbox[1])]
            p2 = [int(bbox[0] + bbox[2]), int(bbox[1])]
            p3 = [int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])]
            p4 = [int(bbox[0]), int(bbox[1] + bbox[3])]
            points.append([p1, p2, p3, p4])

            cv2.rectangle(mask, tuple(p1), tuple(p3), (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(mask, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            failed_idx.append(n)

        # Display result
        cv2.imshow("Tracking", mask)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cv2.destroyAllWindows()
    # endregion

    # get rois
    rois = []
    for n, rp in enumerate(points):
        img = Image.open(
            r"E:\GitHub\CovPySourceFile\Normalized\NF_{}.png".format(n))
        left = rp[0][0]
        top = rp[0][1]
        right = rp[2][0]
        bottom = rp[2][1]
        cropped = img.crop((left, top, right, bottom))
        rois.append(cropped)

        # plt.imshow(cropped)
        # plt.show()

    destination_dir = 'E:\\GitHub\\CovPySourceFile\\FaceROI\\'

    if not os.path.exists(destination_dir):
        os.makedirs(destination_dir)

    for n, r in enumerate(rois):
        r.save(destination_dir + 'FR_{}.png'.format(n))
Ejemplo n.º 11
0
def main():

    dataset, timestamps = load_thermal_file(
        _filename='ThermalData_18_06_2020_13_19_36.h5',
        _folder='E:\\GitHub\\CovPySourceFile')

    # region Control Variables
    is_writing = False
    is_drawing = False
    # endregion

    # region Data Pre-Processing

    # region Timestamps to Sampling Rate

    # # convert timestamps into datetime objects
    # dt_obj = [datetime.fromtimestamp(ts / 1000).time() for ts in timestamps]
    # # convert datetime objects into time strings
    # time_strings = [dt.strftime("%M:%S:%f") for dt in dt_obj]
    # # finally convert time strings into seconds
    # timestamp_in_seconds = []
    # for s in time_strings:
    #     date_time = datetime.strptime(s, "%M:%S:%f")
    #     a_timedelta = date_time - datetime(1900, 1, 1)
    #     in_seconds = a_timedelta.total_seconds()
    #     timestamp_in_seconds.append(in_seconds)
    #
    # # calculate the mean interval between samples from seconds
    # ts_mean = np.mean(np.diff(timestamp_in_seconds))
    # # finally calculate the mean sampling rate of the signal
    # fs = int(1 / ts_mean)
    # endregion

    # region Get Raw Thermal Data

    # get data set attributes
    n_frames, height, width, total_time_ms = [
        dataset.attrs[i] for i in list(dataset.attrs)
    ]
    # extract thermal frames from the hdf5 dataset
    thermal_frames = []
    # convert raw data into temperature values [deg Celsius]
    # temp_frames = []
    # normalize raw data for further processing steps [0 - 255]
    norm_frames = []
    for n in range(0, n_frames):
        raw_frame = load_frame_from_dataset(dataset, height, n)
        thermal_frames.append(raw_frame)
        # temp_frames.append(raw_frame * 0.1 - 273.15)
        norm_frames.append(
            cv2.normalize(raw_frame,
                          None,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U))

    # get unsharpened img for edge detection later on
    unsharp_frames = []
    # for n, n_frame in enumerate(norm_frames):
    #     u_frame = unsharp_mask(image=n_frame, radius=3, amount=2)
    #     unsharp_frames.append(u_frame)
    #
    #     if is_writing:
    #         cv2.imwrite('E:\\GitHub\\CovPySourceFile\\UnsharpenedMask\\UM_{}.png'.format(n), u_frame)
    #
    #     if is_drawing:
    #         fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 3.5))
    #
    #         # Plotting the original image.
    #         ax[0].imshow(norm_frames[n])
    #         ax[0].set_title('Thermal Data - Normalized')
    #
    #         # ax[1].imshow(temp_frames[n])
    #         # ax[1].set_title('Temp Frame [C]')
    #
    #         ax[1].imshow(unsharp_frames[n])
    #         ax[1].set_title('Unsharpened Image')
    #
    #         # ax[1].imshow(norm_frames[n])
    #         # ax[1].set_title('Thermal Data - Normalized [0-255]')
    #
    #         plt.subplots_adjust()
    #         plt.show()
    #
    # if is_drawing:
    #     plt.close('all')

    # endregion

    # endregion

    # region Feature Extraction Algorithm

    # region Automatic ROI Detection

    # face segmentation using multi-level Otsu
    otsu_masks = multi_level_otsu(images=norm_frames,
                                  n_regions=4,
                                  target_region=3,
                                  method=OtsuMethods.BINARY,
                                  write=is_writing,
                                  draw=is_drawing)

    # to proceed the masks need to be converted into 3d array
    empty_array = np.zeros((height, width))
    _3d_otsu_masks = [
        np.dstack((mask, empty_array, empty_array)) for mask in otsu_masks
    ]

    # use binary otsu mask to detect the face
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')

    # Set up tracker
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[4]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # video = cv2.VideoCapture('E:\\GitHub\\CovPySourceFile\\Video\\OtsuMask.avi')
    #
    # # Exit if video not opened.
    # if not video.isOpened():
    #     print("Could not open video file!")
    #     sys.exit()
    #
    # # Read first frame
    # ok, frame = video.read()
    # if not ok:
    #     print("Could not read video file!")
    #     sys.exit()

    tracked_frame = _3d_otsu_masks[0]
    # Define initial bounding box from roi
    bbox = cv2.selectROI(tracked_frame, showCrosshair=True, fromCenter=False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(tracked_frame, bbox)

    # roi points
    roi_points = []
    tracked_frames = []
    # while True:
    # # Read a new frame
    # ok, frame = video.read()
    # if not ok:
    #     break
    for mask in _3d_otsu_masks:
        tracked_frame = mask
        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(tracked_frame)
        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2])), int(bbox[1])
            p3 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            p4 = (int(bbox[0]), int(bbox[1] + bbox[3]))
            cv2.rectangle(tracked_frame, p1, p3, (255, 0, 0), 2, 1)
            points = [p1, p2, p3, p4]
            # roi_values = get_values_from_roi(points, t_frame)
            roi_points.append(points)
        else:
            # Tracking failure
            cv2.putText(tracked_frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            roi_points.append([])

        # Display tracker type on frame
        cv2.putText(tracked_frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(tracked_frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        tracked_frames.append(tracked_frame)
        # Display result
        cv2.imshow("Tracking", tracked_frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    is_writing = True
    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\TrackedFrames\\TF_{}.png'.format(
                    n), img)

    norm_face_rois = []
    for n in range(0, len(roi_points)):
        # get values inside of roi
        norm_roi_values = get_values_from_roi(roi_points[n], norm_frames[n])
        # my_roi = np.zeros((roi_shapes[n][2], roi_shapes[n][3]))
        x1 = roi_points[n][0][0]
        x2 = roi_points[n][2][0]
        y1 = roi_points[n][0][1]
        y2 = roi_points[n][2][1]

        norm_face_roi = norm_roi_values[y1:y2, x1:x2]

        if is_drawing:
            cv2.imshow("ROI", norm_face_roi)

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break

        norm_face_rois.append(norm_face_roi)

    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\FaceROI\\TF_{}.png'.format(n),
                img)
    # endregion

    # endregion

    print('Bye Bye')
Ejemplo n.º 12
0
import cv2

tracker = None
CORRELATION_TRACKER = 'csrt'  # OPENCV_OBJECT_TRACKERS or dlib
OPENCV_OBJECT_TRACKERS = {
    "csrt": cv2.TrackerCSRT_create(),
    "kcf": cv2.TrackerKCF_create(),
    "boosting": cv2.TrackerBoosting_create(),
    "mil": cv2.TrackerMIL_create(),
    "tld": cv2.TrackerTLD_create(),
    "medianflow": cv2.TrackerMedianFlow_create(),
    "mosse": cv2.TrackerMOSSE_create()
}


def init_tracker(frame):
    global tracker
    if CORRELATION_TRACKER == 'dlib':
        tracker = dlib.correlation_tracker()
        roi = cv2.selectROI("Frame",
                            frame,
                            fromCenter=False,
                            showCrosshair=False)
        roi = dlib.rectangle(roi[0], roi[1], roi[0] + roi[2], roi[1] + roi[3])
        tracker.start_track(frame, roi)

    else:
        tracker = OPENCV_OBJECT_TRACKERS[CORRELATION_TRACKER]
        roi = cv2.selectROI("Frame",
                            frame,
                            fromCenter=False,
Ejemplo n.º 13
0
    def run_concurrent(self, args, sign_progress):
        annotation_id = args[0]
        bbox = tuple(args[1])
        movie_path = args[2]
        fps = args[5]
        start_frame = ms_to_frames(args[3], fps)
        end_frame = ms_to_frames(args[4], fps)
        method = args[6]
        resolution = args[7]

        keys = []

        # TRACKING
        if method == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        elif method == 'MIL':
            tracker = cv2.TrackerMIL_create()
        elif method == 'KCF':
            tracker = cv2.TrackerKCF_create()
        elif method == 'TLD':
            tracker = cv2.TrackerTLD_create()
        elif method == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        elif method == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        else:
            raise Exception("Tracking Method not identifiable. " + str(method))

        # Read video
        capture = cv2.VideoCapture(movie_path)

        # Exit if video not opened.
        if not capture.isOpened():
            raise RuntimeError("Tracking: Could not open video.")

        # Read first frame.
        capture.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
        ok, frame = capture.read()
        if not ok:
            raise RuntimeError("Tracking: Could not read Frame.")

        # Initialize tracker with first frame and bounding box
        ok = tracker.init(frame, bbox)

        for i in range(start_frame, end_frame, 1):
            sign_progress(
                round(float(i - start_frame) / (end_frame - start_frame), 2))
            # Read a new frame
            ok, frame = capture.read()
            if not ok:
                break

            # Update tracker
            ok, bbox = tracker.update(frame)

            # Draw bounding box
            if ok:
                # Tracking success
                if i % resolution == 0:
                    time = frame2ms(i, fps)
                    pos = [bbox[0], bbox[1]]
                    keys.append([time, pos])

                    p1 = (int(bbox[0]), int(bbox[1]))
                    p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                    cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                    # cv2.imshow("Returned", frame)
                    # cv2.waitKey(30)

        return [annotation_id, keys]
Ejemplo n.º 14
0
 def initialize_track(self, image, boxes):
     # return boxes
     self.tracker = cv2.MultiTracker_create()
     for box in boxes:
         self.tracker.add(cv2.TrackerMedianFlow_create(), image, box)
Ejemplo n.º 15
0
def main():
    args = build_argparser().parse_args()

    log.basicConfig(
        format="[ %(levelname)s ] %(asctime)-15s %(message)s",
        level=log.INFO,
        stream=sys.stdout,
    )
    log.debug(str(args))

    log.info("Reading input data from '%s'" % (args.input))
    stream = args.input
    try:
        stream = int(args.input)
    except ValueError:
        pass
    cap = cv2.VideoCapture(stream)
    frame_size = (
        int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
    )

    input_crop = None
    if args.crop_width and args.crop_height:
        input_crop = np.array((args.crop_width, args.crop_height))
        crop_size = (args.crop_width, args.crop_height)
        frame_size = tuple(np.minimum(frame_size, crop_size))

    frame_timeout = 0 if args.timelapse else 1

    if args.tracker == "BOOSTING":
        tracker = cv2.TrackerBoosting_create()
    elif args.tracker == "MIL":
        tracker = cv2.TrackerMIL_create()
    elif args.tracker == "KCF":
        tracker = cv2.TrackerKCF_create()
    elif args.tracker == "TLD":
        tracker = cv2.TrackerTLD_create()
    elif args.tracker == "MEDIANFLOW":
        tracker = cv2.TrackerMedianFlow_create()
    elif args.tracker == "GOTURN":
        tracker = cv2.TrackerGOTURN_create()
    elif args.tracker == "CSRT":
        tracker = cv2.TrackerCSRT_create()

    tracker = tracker_initial(args.tracker)
    traceStart = 0
    traceWarning = True

    while True:
        (grabbed, frame) = cap.read()
        if not grabbed:
            log.error("no inputs")
            break

        if args.log:
            start = time.time()

        if input_crop is not None:
            frame = center_crop(frame, input_crop)

        if traceStart:
            ok, bbox = tracker.update(frame)

            if ok:
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (0, 255, 0), 3)
                if not traceWarning:
                    traceWarning = True
            else:
                if traceWarning:
                    log.warning("Tracking lost")
                    traceWarning = False

            if args.log:
                end = time.time()
                log.info("Tracking took {:.6f} seconds".format(end - start))

        cv2.imshow("Tracking", frame)

        getKey = cv2.waitKey(frame_timeout) & 0xFF
        if getKey in BREAK_KEYS:
            break
        elif getKey in TRACKING_KEYS:
            bbox = cv2.selectROI(frame, False)
            ok = tracker.init(frame, bbox)
            traceStart = 1
            cv2.destroyAllWindows()
        elif getKey in CAPTURE_KEYS:
            log.info("Screen captured")
            save_result(frame, "tracking")
        elif getKey in TRACKING_STOP_KEYS:
            tracker.clear()
            tracker = tracker_initial(args.tracker)
            traceStart = 0

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 16
0
            # Pass blob to model
            model.setInput(blob)
            # Execute forward pass
            outputs = model.forward(outputNames)
            bboxes, probs, class_ids = where_is_it(frame, outputs)

            if len(bboxes) > 0:
                # Init multitracker
                mtracker = cv2.MultiTracker_create()
                # Apply non-max suppression and pass boxes to the multitracker
                idxs = cv2.dnn.NMSBoxes(bboxes, probs, P_THRESH, NMS_THRESH)
                for i in idxs:
                    bbox = [int(v) for v in bboxes[i[0]]]
                    x, y, w, h = bbox
                    # Use median flow
                    mtracker.add(cv2.TrackerMedianFlow_create(), frame,
                                 (x, y, w, h))
                # Increase counter
                count += 1
            else:  # declare failure
                cv2.putText(frame, 'Detection failed', (20, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
        else:  # perform tracking
            is_tracking, bboxes = mtracker.update(frame)
            if is_tracking:
                for i, bbox in enumerate(bboxes):
                    x, y, w, h = [int(val) for val in bbox]
                    class_id = classes[class_ids[idxs[i][0]]]
                    col = [int(c) for c in colors[class_ids[idxs[i][0]], :]]
                    # Mark tracking frame with corresponding color, write class name on top
                    cv2.rectangle(frame, (x, y), (x + w, y + h), col, 2)
Ejemplo n.º 17
0
    def main_thread(self):
        self.cap = cv2.VideoCapture(self.data_bridge.selected_video_file_path)
        num = 0
        number=0
        while self.data_bridge.start_process_manager:
            if self.data_bridge.start_process_manager and (number==0 or number>99):
                if number>99:
                    number=0
                ret, frame = self.cap.read()
                frame = cv2.resize(frame, (720, 480))
                if num < self.skip_frames:

                    num += 1
                    cv2.imshow('Camera', frame)
                    cv2.waitKey(10)
                    continue
                result = self.detect(frame)
                # print(np.shape(result), "result shape", result[:][0])
                self.draw_result(frame, result)
                number+=1
                cv2.imshow('Camera', frame)
                cv2.waitKey(1)
                num += 1
                self.gui_root.update()

            if (self.data_bridge.start_process_manager and (number<100)):
                a = int(len(result))
                tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', ]
                tracker_type = self.data_bridge.methode_chosen_for_tracking
                print(tracker_type)
                if True:
                    if tracker_type == 'BOOSTING':
                        tracker = []
                        i = 0
                        while i < a:
                            tracker.append(cv2.TrackerBoosting_create())
                            i = i + 1
                    if tracker_type == 'MIL':
                        tracker = []
                        i = 0
                        while i < a:
                            tracker.append(cv2.TrackerMIL_create())
                            i = i + 1
                    if tracker_type == 'KCF':
                        tracker = []
                        i = 0
                        while i < a:
                            tracker.append(cv2.TrackerKCF_create())
                            i = i + 1
                    if tracker_type == 'TLD':
                        tracker = []
                        i = 0
                        while i < a:
                            tracker.append(cv2.TrackerTLD_create())
                            i = i + 1
                    if tracker_type == 'MEDIANFLOW':
                        tracker = []
                        i = 0
                        while i < a:
                            tracker.append(cv2.TrackerMedianFlow_create())
                            i = i + 1
                bbox = []
                i = 0
                while i < a:
                    x = int(result[i][1])
                    y = int(result[i][2])
                    w = int(result[i][3] / 2)
                    h = int(result[i][4] / 2)
                    res=(x-w,y-h,2*w,2*h)

                    bbox.append(res)
                    #print(bbox[i])
                    tracker[i].init(frame, bbox[i])
                    i = i+1

                while (number<100)and(self.data_bridge.start_process_manager):
                    ret, frame = self.cap.read()
                    frame = cv2.resize(frame, (720, 480))
                    timer = cv2.getTickCount()
                    i = 0
                    while i < a:
                        ret, bbox[i] = tracker[i].update(frame)
                        i = i + 1
                    fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

                    if ret:
                        i = 0
                        while i < a:
                            p1 = (int(bbox[i][0]), int(bbox[i][1]))
                            p2 = (int(bbox[i][0] + bbox[i][2]), int(bbox[i][1] + bbox[i][3]))
                            cv2.rectangle(frame, p1, p2, (255, 255, 255), 2, 1)
                            i = i + 1
                    else:
                        cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0, 0, 255),2)

                    cv2.putText(frame, str(i)+ " Object detected", (400, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                (50, 170, 50), 2);
                    cv2.putText(frame, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50),2);
                    cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50),2);
                    cv2.imshow("Camera", frame)
                    k = cv2.waitKey(1)
                    self.gui_root.update()
                    number+=1
                    print("number",number)

        cv2.destroyAllWindows()
        self.cap.release()
Ejemplo n.º 18
0
import cv2
import sys
import find as f
import numpy as np
import neural_network as nn
import os.path
import matplotlib.pyplot as plt

tracker = cv2.TrackerMedianFlow_create()
counter = 0
colors = []
limit = 4
kernel = np.ones((3, 3), np.uint8)
rectangle_distance_limit = 2
suma = 0
last_blue_roi_shape = (0, 0)
last_blue_predicted_number = 0
last_green_roi_shape = (0, 0)
last_green_predicted_number = 0

DIR = 'D:\\Boban\Fakultet\Soft\Projekti\softPredef'
file = open(DIR + '\\out.txt', 'w')
file.write('RA 89/2014 Boban Poznanovic\nfile	sum\n')

DIR = DIR + '\\videos'

video_names = []
for name in os.listdir(DIR):
    if os.path.isfile(os.path.join(DIR, name)):
        video_names.append(os.path.join(DIR, name))
Ejemplo n.º 19
0
        IMG_NAME = "img_%d.jpeg" % index
        PATH_TO_IMAGE = os.path.join(CWD_PATH, FILE_NAME, 'Figures/', IMG_NAME)
        image = cv2.imread(PATH_TO_IMAGE)
        tstart = time.time()
        if not trackEnable or not TRACK_FLAG:
            count += 1
            image_expanded = np.expand_dims(image, axis=0)
            (boxes_ml, scores_ml, classes_ml, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_expanded})
            boxes_ml = boxes_ml.reshape(-1, 4)
            if threshold is not False:
                boxes_ml = boxes_ml[(scores_ml > threshold).reshape(-1), ]
            boxes_ml = pdf.tfcv_convertor(boxes_ml, image.shape[0:2], source='tf')
            for bbox in boxes_ml:
                multitracker.add(cv2.TrackerMedianFlow_create(), image, bbox)
            boxes_ml = pdf.tfcv_convertor(boxes_ml, image.shape[0:2], source='cv')
            if boxes_ml.shape[0] <= 1:
                TRACK_FLAG = False
                multitracker = cv2.MultiTracker_create()
            else:
                TRACK_FLAG = True

        else:
            success, boxes_ml = multitracker.update(image)
            boxes_ml = pdf.tfcv_convertor(boxes_ml, image.shape[0:2], source='cv')
            if boxes_ml.shape[0] <= 1:
                TRACK_FLAG = False
                multitracker = cv2.MultiTracker_create()
                count += 1
            else:
Ejemplo n.º 20
0
def main(_argv):
    th = threading.Thread(target=arduino_read)
    th.start()
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)

    if FLAGS.tiny:
        yolo = YoloV3Tiny(classes=FLAGS.num_classes)
    else:
        yolo = YoloV3(classes=FLAGS.num_classes)

    yolo.load_weights(FLAGS.weights)
    logging.info('weights loaded')

    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')
    CUDA = torch.cuda.is_available()
    times = []

    inp_dim = int('416')
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    try:
        vid = cv2.VideoCapture(1)   #cam number - usb=1     vid = cap
    except:
        vid = cv2.VideoCapture(FLAGS.video)
    # vid = cv2.VideoCapture(0)

    out = None

    if FLAGS.output:
        # by default VideoCapture returns float instead of int
        width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = int(vid.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    tracker = cv2.TrackerKCF_create()
    fps = None
    initBB = None
    # initBB = True
    redetect = False
    failCnt = 0
    global start
    check_start = None

    while True:
        ret, frame = vid.read() #img = frame
        # if frmae is None:
        #     logging.warning("Empty Frame")
        #     time.sleep(0.1)
        #     continue
        # frame = imutils.resize(frame, width = 500)
        (H, W) = frame.shape[:2]

        img_in = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        img_in = tf.expand_dims(img_in, 0)
        img_in = transform_images(img_in, FLAGS.size)
        boxes, scores, classes, nums = yolo.predict(img_in)

        # t1 = time.time()
        # boxes, scores, classes, nums = yolo.predict(img_in)
        # t2 = time.time()
        # times.append(t2-t1)
        # times = times[]




        # print('initBB = ', initBB)
        if initBB is not None:
            (success, box) = tracker.update(frame)
            if start == 'q':
                initBB = None
                tracker = cv2.TrackerMedianFlow_create()
                ardu_stop()
            elif success:
                failCnt = 0
                (x, y, w, h) = [int(v) for v in box]
                cv2.rectangle(frame, (x+30, y), (x + w-30, y + h), (0, 255, 0), 2)
                ardu(box)
                # frame = draw_outputs(frame, (boxes, scores, classes, nums), class_names)
            else:
                failCnt += 1
                ardu_detect()
                if failCnt > 50:
                    redetect = True
                    initBB = None
                    tracker = cv2.TrackerKCF_create()
            # fps.update()
            # fps.stop()
            # info = [
            #     ('Tracker', 'kcf'),
            #     ('Success', 'yes' if success else 'No'),
            #     ('FPS', '{:.2f}'.format(fps.fps())),
            # ]
            #
            # for (i, (k, v)) in enumerate(info):
            #     text = '{}:{}'.format(k, v)
            #     cv2.putText(frame, text, (10, H-((i*20)+20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255, 2))

        # if redetect:
        #     ret, frame = vid.read()
        #     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #     template = cv2.imread('./user_faces/user.jpg', 0)
        #     w, h = template.shape[::-1]
        #
        #     res = cv2.matchTemplate(gray, template, cv2.TM_SQDIFF)
        #     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        #     top_left = min_loc
        #     bottom_right = (top_left[0] + w, top_left[1] + h)
        #     cv2.rectangle(frame, top_left, bottom_right, (0, 255, 0), 1)

            """
            imgray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            w, h = imgray.shape[::-1]
            templ = cv2.imread('./user.jpg', cv2.IMREAD_GRAYSCALE)
            templ_h, templ_w = templ.shape[::-1]
            res = cv2.matchTemplate(imgray, templ, cv2.TM_CCOEFF_NORMED)
            loc = np.where(res >= 0.6)
            for pt in zip(*loc[::-1]):
                cv2.rectangle(frame, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 1)
            """
        key = cv2.waitKey(1) & 0xFF
        if key == ord('s') or start == 's':
            check_start = start_tracker(frame, (boxes, scores, classes, nums), class_names)
            if check_start is not None:
                start = 'a'
                initBB = tuple(check_start)
                tracker.init(frame, initBB)
                x, y, w, h = check_start
                frame_user = frame[y:y+h,x+20:x+w-20]
                cv2.imwrite('./user_faces/user.jpg', frame_user)
                # fps = FPS().start()

            # img, orig_im, dim = prep_image(frame, inp_dim)
            # img = prep_image(frame, inp_dim)

            # cv2.imshow('img', frame)

            # if CUDA:
            #     # im_dim = im_dim.cuda()
            #     img = img.cuda()
            #
            # with torch.no_grad():
            #     output = model(Variable(img),CUDA)
            # output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
            # try:
            #     if output[0][0].tolist() == 0:
            #         #tensor[0][0] mean the category of predicted class and 0 is person in coco.names
            #         initBB = np.array([int(i) for i in output[0][1:5].tolist])
            #         x, y, w, h = initBB
            #         initBB = (x, y, w, h)
            #         frame_user = frame[x-10:w+10, y-10:h+10]
            #         cv2.imwrite('./user_faces/user.jpg', frame_user)
            #         tracker.init(frame, initBB)
            #         fps = FPS().start()
            # except:
            #     print("다시해주세요")
        #print(initBB)
        cv2.imshow('img', frame)

        if key == ord('q'):
            ardu_stop()
            break

    vid.release()

    cv2.destroyAllWindows()
Ejemplo n.º 21
0
 ntrackers = 2
 if int(minor_ver) < 3:
     for i in range(ntrackers):
         trackers.append(cv2.Tracker_create(tracker_type))
 else:
     for i in range(ntrackers):
         if tracker_type == 'BOOSTING':
             trackers.append(cv2.TrackerBoosting_create())
         if tracker_type == 'MIL':
             trackers.append(cv2.TrackerMIL_create())
         if tracker_type == 'KCF':
             trackers.append(cv2.TrackerKCF_create())
         if tracker_type == 'TLD':
             trackers.append(cv2.TrackerTLD_create())
         if tracker_type == 'MEDIANFLOW':
             trackers.append(cv2.TrackerMedianFlow_create())
         if tracker_type == 'GOTURN':
             trackers.append(cv2.TrackerGOTURN_create())
 
 go_live = True
 if go_live:
     # start video stream thread, allow buffer to fill
     print("[INFO] starting threaded video stream...")
     stream = WebcamVideoStream(src=0).start()  # default camera
     time.sleep(1.0)
     # first frame
     frame = stream.read()
 else:
     # Read video
     video = cv2.VideoCapture("chaplin.mp4")
     # Exit if video not opened.
Ejemplo n.º 22
0
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()
    '''
    # populates trackers dict with all types of trackers
    trackers = {}
    trackers["BOOSTING"] = cv2.TrackerBoosting_create()
    trackers["MIL"] = cv2.TrackerMIL_create()
    trackers["KCF"] = cv2.TrackerKCF_create()
    trackers["TLD"] = cv2.TrackerTLD_create()
    trackers["MEDIANFLOW"] = cv2.TrackerMedianFlow_create()
    trackers["MOSSE"] = cv2.TrackerMOSSE_create()
    trackers["CSRT"] = cv2.TrackerCSRT_create()
    #trackers = cv2.TrackerGOTURN_create()

    tracker = cv2.TrackerBoosting_create()

    # Read video
    video = cv2.VideoCapture(infilename)

    # Write video
    fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
    out = cv2.VideoWriter(outfilename, fourcc, video_fps,
                          (int(video.get(3)), int(video.get(4))), True)

    # Exit if video not opened.
Ejemplo n.º 23
0
def tracker():
    red = (0, 0, 255)
    blue = (255, 0, 0)
    green = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0

    # dictionary for locations
    previous_location = {}
    current_location = {}

    # dictionary for trackers
    carTracker = {}

    # dictionary for corners
    corners1 = {}
    corners2 = {}
    corners_update = {}
    corners_center = {}
    old_corners_center = {}
    width1 = {}
    width2 = {}
    speed = {}
    time1 = {}
    time2 = {}
    while True:
        # read frame and check it, if it is not frame break

        rc, image = video.read()
        if type(image) == type(None):
            break
        # start time of iteration
        # crop frame
        # copy cropped frame
        # add 1 to frame counter
        # start = time.time()
        image = image[150:600, 150:950]
        resultImage = image.copy()
        frameCounter = frameCounter + 1

        # create empty list for trackers you need to delete
        carIDtoDelete = []

        # iterate trough dictionary of created trackers and if object leave frame delete tracker
        for carID in carTracker.keys():
            # get position of bounding box
            trackedPosition = carTracker[carID].update(image)
            t_x, t_y, t_w, t_h = trackedPosition[1]
            t_x = int(t_x)
            t_y = int(t_y)
            t_w = int(t_w)
            t_h = int(t_h)

            # x_center = t_x + 0.5 * t_w
            # y_center = t_y + 0.5 * t_h

            # if object leave frame add tracker to delete list
            if t_x + t_w >= 750:
                carIDtoDelete.append(carID)
            elif t_y >= 570 or t_y <= 0:
                carIDtoDelete.append(carID)
            elif t_x <= 0:
                carIDtoDelete.append(carID)

        # delete all trackers in delete list
        for carID in carIDtoDelete:
            # print 'Tracker deleted: ' + str(carID) + '.'
            # print 'Current location deleted: ' + str(carID) + '.'
            # print 'Previous location deleted: ' + str(carID) + '.'
            # print 'Corners 1 deleted: ' + str(carID) + '.'
            # print 'Corners 2 deleted: ' + str(carID) + '.'
            # print 'Width 1 deleted: ' + str(carID) + '.'
            # print 'Width 2 deleted: ' + str(carID) + '.'
            # print '\n'
            carTracker.pop(carID, None)
            current_location.pop(carID, None)
            previous_location.pop(carID, None)
            corners1.pop(carID, None)
            corners2.pop(carID, None)
            width1.pop(carID, None)
            width2.pop(carID, None)
            time1.pop(carID, None)
            time2.pop(carID, None)
            corners_center.pop(carID, None)
            old_corners_center.pop(carID, None)

        # try to detect new object in frame in every 10 frames
        if not (frameCounter % 10):
            # convert frame to grayscale
            # try to detect new object in frame
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cars = carCascade.detectMultiScale(gray, 1.15, 15)
            # if object is detected, save it location and calculate center point of bounding box
            for (_x, _y, _w, _h) in cars:
                x = int(_x) + 7
                y = int(_y) + 7
                w = int(_w) - 5
                h = int(_h) - 5

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                # iterate trough tracked objects
                for carID in carTracker.keys():
                    # get object location and calculate center point of tracked object
                    trackedPosition = carTracker[carID].update(image)
                    t_x, t_y, t_w, t_h = trackedPosition[1]
                    t_x = int(t_x)
                    t_y = int(t_y)
                    t_w = int(t_w)
                    t_h = int(t_h)

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    # if condition is true, detected object already have tracker
                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                           (t_y + t_h))
                            and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                               (y + h))):
                        matchCarID = carID

                # if detected object don't have tracker yet, create new tracker and add it to tracker dictionary
                # save object location for speed estimation
                if matchCarID is None:
                    bbox = (x, y, w, h)
                    if bbox[0] + bbox[2] < 400 and bbox[1] < 100 and bbox[
                            0] > 70:
                        tracker = cv2.TrackerMedianFlow_create()
                        tracker.init(image, bbox)
                        carTracker[currentCarID] = tracker
                        previous_location[currentCarID] = bbox
                        ROI = gray[y:y + h, x:x + h]
                        corners1[currentCarID] = cv2.goodFeaturesToTrack(
                            ROI, 10, 0.25, 5)
                        corners1[currentCarID][:, 0, 0] += x
                        corners1[currentCarID][:, 0, 1] += y
                        for i in corners1[currentCarID]:
                            x, y = i.ravel()
                            cv2.circle(resultImage, (x, y),
                                       5,
                                       green,
                                       thickness=-1)
                        #--
                        width1[currentCarID] = bbox[2]
                        old_corners_center[currentCarID] = find_center(
                            corners1[currentCarID])
                        #--
                        time1[currentCarID] = time.time()
                        currentCarID = currentCarID + 1

        # in every frame iterate trough trackers
        for carID in carTracker.keys():
            # get position of object
            trackedPosition = carTracker[carID].update(image)
            t_x, t_y, t_w, t_h = trackedPosition[1]
            t_x = int(t_x)
            t_y = int(t_y)
            t_w = int(t_w)
            t_h = int(t_h)

            t_x_bar = t_x + 0.5 * t_w
            t_y_bar = t_y + 0.5 * t_h
            bbox = (t_x, t_y, t_w, t_h)
            width2[carID] = bbox[2]

            if len(corners1[carID]):
                ret, frame = video.read()
                if type(frame) == type(None):
                    break
                frame = frame[150:600, 150:950]
                gray2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                corners2[carID], st, err = cv2.calcOpticalFlowPyrLK(
                    gray, gray2, corners1[carID], None, **lk_params)
                corners_center[carID] = find_center(corners2[carID])
                # print(corners_center[carID])
                cv2.circle(resultImage,
                           corners_center[carID],
                           5,
                           blue,
                           thickness=-1)
                for corner in corners2[carID]:
                    cv2.circle(resultImage, (corner[0][0], corner[0][1]), 5,
                               green, -1)
                corners1[carID] = corners2[carID].copy()
                time2[carID] = time.time()
                gray = gray2.copy()

            # save location for speed estimation
            # draw new rectangle in frame
            current_location[carID] = bbox
            cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h), red,
                          2)

        #--
        # iterate trough locations
        for i in corners_center.keys():
            sec = time2[i] - time1[i]
            if old_corners_center[i] != corners_center[
                    i] and sec >= 0.01 and sec < 0.1:
                v = estimate_speed(old_corners_center[i], corners_center[i],
                                   sec, width1[i], width2[i], i)
                print v
                d = distance(width1[i], width2[i])
                cv2.putText(resultImage, 'Distance: ' + str(int(d)) + ' m.',
                            (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                            (0, 0, 255), 3)
                cv2.putText(resultImage,
                            'TTC: ' + str(int(timeToCollision(v, d))) + ' s.',
                            (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                            (0, 0, 255), 3)
            old_corners_center[i] = corners_center[i]
            time1[i] = time2[i]
            # if len(width1):
            width1[i] = width2[i]
        #--
        # show results
        # wait for esc to terminate
        if cv2.waitKey(33) == 27:
            break
        cv2.imshow('image', resultImage)

    # close all open
    cv2.destroyAllWindows()
def medianFlowTracker(video):
    tracker = cv2.TrackerMedianFlow_create()
    excute(video, tracker)
Ejemplo n.º 25
0
def process_video(groundtruth_path, image_path, out_video):
    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    print('processing sequence', out_video)
    with open(groundtruth_path) as f:
        groundtruth = f.readlines()

    groundtruth = [x.rstrip() for x in groundtruth]

    image_filenames = [y for x in walk(image_path) for y in glob(join(x[0], '*.jpg'))]
    image_filenames.sort()

    assert len(image_filenames) == len(groundtruth)

    image = cv2.imread(image_filenames[0])
    height, width = image.shape[:2]
    writer = cv2.VideoWriter(out_video, cv2.VideoWriter_fourcc('X','V','I','D'), 15, (width , height))

    if not writer.isOpened():
        print('Failed to open video')
        return

    # VOT sequence
    # polygon_ = parse_region(groundtruth[0])
    # cx, cy, w, h = get_axis_aligned_bbox(polygon_)
    # target_pos, target_sz = np.array([cx, cy]), np.array([w, h])

    polygon = [float(x) for x in groundtruth[0].split(',')]
    ok = tracker.init(image, (polygon[0], polygon[1], polygon[2], polygon[3]))

    for i in range(len(image_filenames)):
        image = cv2.imread(image_filenames[i])
        polygon = [float(x) for x in groundtruth[i].split(',')]
        polygon = [int(x) for x in polygon]
        
        # VOT sequence
        # cv2.line(image, (polygon[0], polygon[1]), (polygon[2], polygon[3]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[2], polygon[3]), (polygon[4], polygon[5]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[4], polygon[5]), (polygon[6], polygon[7]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[6], polygon[7]), (polygon[0], polygon[1]), (0, 0, 255), 2)

        cv2.rectangle(image, (polygon[0], polygon[1]), (polygon[0]+polygon[2], polygon[1]+polygon[3]), (0, 0, 255), 2)

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(image)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)
        else :
            # Tracking failure
            cv2.putText(image, "Tracking failure detected", (50,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,255,255),2)

        # Display tracker type on frame
        cv2.putText(image, tracker_type + " Tracker", (50,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (230,170,50),2)
        
        # Display FPS on frame
        cv2.putText(image, "FPS : " + str(int(fps)), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (230,170,50), 2)

        writer.write(image)

    writer.release()
Ejemplo n.º 26
0
    def objTrack(self):
        self.makeDirs()
        videoLocation = self.tcVars.filePaths
        cap = cv2.VideoCapture(videoLocation[0])
        vidNum = 0
        init = 1
        initScale = 1
        posCount = 0
        negCount = 0
        posList = []
        negList = []
        self.tcVars.statusBox.AppendText('Space -- to Pause/Play video\n')
        self.tcVars.statusBox.AppendText('w -- to select object to train on\n')
        self.tcVars.statusBox.AppendText(
            's -- to stop obj tracking (if it starts tracking something weird)\n'
        )
        self.tcVars.statusBox.AppendText('a -- rewind 3 seconds\n')
        self.tcVars.statusBox.AppendText('d -- fast foward 3 seconds\n')
        self.tcVars.statusBox.AppendText(
            'n -- to move on to next video in list\n')
        self.tcVars.statusBox.AppendText('q or Esc -- to quit\n\n')

        class Found(Exception):
            pass

        try:
            while True:
                success, frame = cap.read()
                if not success:
                    self.tcVars.statusBox.AppendText('didnt find video\n')
                    break
                refFrame = frame.copy()
                refFrame2 = frame.copy()
                key = cv2.waitKey(50) & 0xFF

                # Pause the Video
                if key == 32:
                    while True:
                        key2 = cv2.waitKey(1) or 0xFF
                        # Start obj tracking
                        if key2 == ord('w'):
                            if initScale:
                                yRatio = 1
                                box = areaSelector(frame, yRatio, initScale)
                                ratioInit = mask2Rect(box)
                                h = ratioInit[1][1] - ratioInit[0][1]
                                w = ratioInit[1][0] - ratioInit[0][0]
                                yRatio = round(h / w, 2)
                                initScale = 0
                                objFrame = refFrame[box]
                                w = objFrame.shape[1]
                                h = objFrame.shape[0]
                            else:
                                box = areaSelector(frame, yRatio, initScale)
                                objFrame = refFrame[box]
                                objFrame = cv2.resize(objFrame, (w, h))

                            init = 2
                            tracker = cv2.TrackerMedianFlow_create()
                            tracker.init(frame, mask2Box(box))
                            posCount += 1
                            posPath = os.path.abspath('data/pos')
                            cv2.imwrite(f'{posPath}\\pos{posCount}.jpg',
                                        objFrame)
                            posList.append(
                                f'pos/pos{posCount}.jpg  1  0 0 {w} {h}\n')
                            temp = mask2Rect(box)
                            refFrame = cv2.rectangle(refFrame, temp[0],
                                                     temp[1], (0, 0, 0),
                                                     cv2.FILLED)
                            negCount += 1
                            negPath = os.path.abspath('data/neg')
                            cv2.imwrite(f'{negPath}\\neg{negCount}.jpg',
                                        refFrame)
                            negList.append(f'neg/neg{negCount}.jpg\n')
                            break

                        cv2.imshow('Video', frame)
                        # Play the Video
                        if key2 == 32:
                            break
                        if key2 == ord('s'):
                            init = 1
                            break
                        if key2 == ord('n'):
                            vidNum += 1
                            if len(videoLocation) > 1 and vidNum < len(
                                    videoLocation):
                                cap.release()
                                cap = cv2.VideoCapture(videoLocation[vidNum])
                                break
                            else:
                                if len(videoLocation) == 1:
                                    self.statusBox.AppendText(
                                        'Only one video loaded\n')
                                else:
                                    self.statusBox.AppendText(
                                        'Last video in list\n')

                        if key2 == 27 or key2 == 113:
                            raise Found
                if success and init == 2:
                    trackSuccess, box = tracker.update(frame)
                    if trackSuccess:
                        rectPts = box2Rect(box)
                        objFrame = refFrame2[box2Mask(box)]
                        cv2.rectangle(frame, rectPts[0], rectPts[1],
                                      (255, 0, 0), 2, 1)
                        posCount += 1
                        resizedModImg = cv2.resize(objFrame, (w, h))
                        cv2.imwrite(f'{posPath}\\pos{posCount}.jpg',
                                    resizedModImg)
                        posList.append(
                            f'pos/pos{posCount}.jpg  1  0 0 {w} {h}\n')
                        refFrame = cv2.rectangle(refFrame, rectPts[0],
                                                 rectPts[1], (0, 0, 0),
                                                 cv2.FILLED)
                        negCount += 1
                        cv2.imwrite(f'{negPath}\\neg{negCount}.jpg', refFrame)
                        negList.append(f'neg/neg{negCount}.jpg\n')
                    else:
                        init = 1

                if key == ord('s'):
                    init = 1

                # Skip forward 3 seconds
                if key == ord('d'):
                    skip = cap.get(cv2.CAP_PROP_POS_MSEC) + 3000
                    cap.set(cv2.CAP_PROP_POS_MSEC, skip)
                    success, frame = cap.read()

                # Skip Back 3 seconds
                if key == ord('a'):
                    skip = cap.get(cv2.CAP_PROP_POS_MSEC) - 3000
                    cap.set(cv2.CAP_PROP_POS_MSEC, skip)
                    success, frame = cap.read()

                if key == ord('n'):
                    vidNum += 1
                    if len(videoLocation) > 1 and vidNum < len(videoLocation):
                        cap.release()
                        cap = cv2.VideoCapture(videoLocation[vidNum])
                    else:
                        if len(videoLocation) == 1:
                            self.tcVars.statusBox.AppendText(
                                'Only one video loaded\n')
                        else:
                            self.tcVars.statusBox.AppendText(
                                'Last video in list\n')

                # Quit Video Playback by pressing 'q' or ESC
                if key == 113 or key == 27:
                    break

                if success:
                    cv2.imshow('Video', frame)

        except Found:
            self.tcVars.statusBox.AppendText(
                'Finished making Pictures. Now review positives and delete false positives\n'
            )

        def reviewPics(posCount):
            self.tcVars.statusBox.AppendText(
                'a -- to go to previous picture\n')
            self.tcVars.statusBox.AppendText('d -- to go to next picture\n')
            self.tcVars.statusBox.AppendText(
                'x -- to delete picture from training arena\n')
            self.tcVars.statusBox.AppendText('q or Esc -- to skip review\n\n')
            count = posCount
            i = 1
            self.tcVars.statusBox.AppendText('image: ' + str(i) + '/' +
                                             str(count) + '\n')
            while i <= count:
                frame = cv2.imread(f'data/pos/pos{i}.jpg')
                while True:
                    key = cv2.waitKey(1) or 0xFF
                    cv2.imshow('Video', frame)
                    if key == ord('a'):
                        try:
                            if i > 1:
                                i -= 1
                                frame = cv2.imread(f'data/pos/pos{i}.jpg')
                                self.tcVars.statusBox.AppendText('image: ' +
                                                                 str(i) + '/' +
                                                                 str(count) +
                                                                 '\n')
                        except:
                            break

                    if key == ord('d'):
                        if i <= count:
                            i += 1
                            frame = cv2.imread(f'data/pos/pos{i}.jpg')
                            self.tcVars.statusBox.AppendText('image: ' +
                                                             str(i) + '/' +
                                                             str(count) + '\n')
                            break

                    if key == ord('x'):
                        if i <= count:
                            os.remove(f'data/pos/pos{i}.jpg')
                            posList.remove(
                                f'pos/pos{posCount}.jpg  1  0 0 {w} {h}\n')
                            posCount -= 1
                            i += 1
                            if i <= count:
                                frame = cv2.imread(f'data/pos/pos{i}.jpg')
                            self.tcVars.statusBox.AppendText('image: ' +
                                                             str(i) + '/' +
                                                             str(count) + '\n')
                            break

                    if key == 113 or key == 27:
                        i = count + 9000
                        break

            cv2.destroyAllWindows()
            retPath = os.getcwd()
            os.chdir('data/pos')
            renameList = [file for file in glob.glob('*.jpg')]

            def posSort(name):
                a = re.search('pos(\d+).jpg', name)
                return int(a.groups(0)[0])

            renameList.sort(key=posSort)

            renameNum = 1
            for j in renameList:
                os.rename(j, f'pos{renameNum}.jpg')
                renameNum += 1
            os.chdir(retPath)
            return posCount

        posCount = reviewPics(posCount)

        # write out background file
        bgFile = open('data/bg.txt', 'w+')
        [bgFile.write(i) for i in negList]
        bgFile.close()
        # write out dat file
        datFile = open('data/info.dat', 'w+')
        [datFile.write(i) for i in posList]
        datFile.close()
        self.tcVars.statusBox.AppendText("Positive Count: " + str(posCount) +
                                         '\n')
        self.tcVars.statusBox.AppendText("Negative Count: " + str(negCount) +
                                         '\n')
        cap.release()
        cv2.destroyAllWindows()
        try:
            self.tcVars.w = w
            self.tcVars.h = h
        except:
            pass
tracker_types = [
    'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'
]
tracker_type = tracker_types[2]

if tracker_type == 'BOOSTING':
    tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
    tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
    tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
    tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
    tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
    tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
    tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
    tracker = cv2.TrackerCSRT_create()

if not flag == 1:

    ok, f = video.read()
    frame = cv2.resize(f, (0, 0), fx=0.5, fy=0.5)

    ok = tracker.init(frame, bbox)

while video.isOpened():
Ejemplo n.º 28
0
def main():

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    x0 = 200
    y0 = 200
    w0 = 224
    h0 = 224
    track_window = (x0, y0, w0, h0)
    # Reference Distance
    L0 = 100
    S0 = 50176  #224x224 #take#here.

    # Base Distance
    LB = 100
    # Define an initial bounding box
    bbox = (x0, y0, w0, h0)  #(287, 23, 86, 320)
    #CX=int(bbox[0]+0.5*bbox[2]+3) #adding
    #CY=int(bbox[1]+0.5*bbox[3]+3) #adding

    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        drone.takeoff()

        # skip first 300 frames
        frame_skip = 300
        while True:
            #------------------------------------------for start

            for frame in container.decode(video=0):

                speed = 100

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time()

                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)

                # Start timer
                timer = cv2.getTickCount()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

                # Update tracker
                ok, bbox = tracker.update(image)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    #print('Tracking ok')
                    (x, y, w, h) = (int(bbox[0]), int(bbox[1]), int(bbox[2]),
                                    int(bbox[3]))
                    CX = int(bbox[0] + 0.5 * bbox[2])  #Center of X
                    CY = int(bbox[1] + 0.5 * bbox[3])
                    S0 = bbox[2] * bbox[3]
                    print("CX,CY,S0,x,y=", CX, CY, S0, x, y)
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255, 0, 0), 2, 1)
                    p10 = (x0, y0)
                    p20 = (x0 + w0, y0 + h0)
                    cv2.rectangle(image, p10, p20, (0, 255, 0), 2, 1)

                    d = round(L0 * m.sqrt(S0 / (w * h)))
                    dx = x + w / 2 - CX0  #no change dx
                    dy = y + h / 2 - CY0
                    print(d, dx, dy)

                    tracking(drone, d, dx, dy, LB)

                else:
                    # Tracking failure
                    #print('Tracking failure')
                    cv2.putText(image, "Tracking failure detected", (100, 80),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                cv2.imshow('Original', image)

                key = cv2.waitKey(1) & 0xff
                if key == ord('q'):
                    print('Q!')
                    break

                if key == ord('r'):
                    roi_time = time.time()
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x0, y0, w0, h0) = (int(bbox[0]), int(bbox[1]),
                                        int(bbox[2]), int(bbox[3]))

                    CX0 = int(x0 + 0.5 * w0)  #Center of X
                    CY0 = int(y0 + 0.5 * h0)

                    # Initialize tracker with first frame and bounding box
                    ok = tracker.init(image, bbox)
                    '''
		    if frame.time_base < 1.0/60:
                        time_base = 1.0/60
                    else:
                        time_base = frame.time_base
                    frame_skip2 = int((time.time() - roi_time)/time_base)

		    if 0 < frame_skip2:
                        frame_skip2 = frame_skip2 - 1
                        continue
		    '''

                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)


#-------------------------------------------------for end
            break
        print('stop fly')

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        drone.land()
        cv2.destroyAllWindows()
def multi_tracking(yolo, video_path):
    global percentResize
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    global multi_tracking_enable
    global arima_predict
    multi_tracking_enable = True
    tracker = cv2.MultiTracker_create()
    camera = cv2.VideoCapture(video_path)
    ok, image = camera.read()
    if not ok:
        print('Failed to read video')
        exit()
    boxes = []

    frameCount = 1
    startFrame = 1
    skipFrames = 25
    consecutiveframes = 1

    initialHistoryCount = 11
    skipHistory = 5

    extrapolate = 3
    dof = 0

    yoloCount = 0
    countCond = 0
    xhistory = []
    yhistory = []
    depth_history = []
    while(True):
        if(frameCount == startFrame):
            frame = Image.fromarray(image)
            frame, boxes = yolo.detect_image(frame)
            #yolo.close_session()
            break
        ok, image = camera.read()
        frameCount += 1
    #np.set_printoptions(suppress = True)
    boxes = np.asarray(boxes)
    boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
    boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
    boxes = np.ndarray.tolist(boxes)
    prevBoxes = len(boxes)
    curBoxes = prevBoxes
    #print(boxes)
    #np.savetxt('boxes.txt', boxes, fmt = "%i")
    #return boxes
    #boxes = []
    manualBox = 0
    for i in range(manualBox):
        box = cv2.selectROI('tracking', image)
        boxes.append(box)

    for eachBox in boxes:
        eachBox = tuple(eachBox)
        xhistory.append([int(eachBox[0] + eachBox[2] / 2)])
        yhistory.append([int(eachBox[1] + eachBox[3] / 2)])
        ok = tracker.add(cv2.TrackerMedianFlow_create(), image, eachBox)

    while(True):
        ok, image=camera.read()
        if not ok:
            break
        orig_image = image.copy()

        if(prevBoxes != curBoxes):
            countCond += 1
        if(frameCount % skipFrames == 0):
            #print(consecutiveframes)
            consecutiveframes = 1
            tracker = cv2.MultiTracker_create()
            frame = Image.fromarray(image)
            boxes = []
            frame, boxes = yolo.detect_image(frame)
            yoloCount += 1
            boxes = np.asarray(boxes)
            boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
            boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
            boxes = np.ndarray.tolist(boxes)
            prevBoxes = len(boxes)
            curBoxes = None
            xhistory = []
            yhistory = []
            depth_history = []
            for eachBox in boxes:
                eachBox = tuple(eachBox)
                xhistory.append([int(eachBox[0] + eachBox[2] / 2)])
                yhistory.append([int(eachBox[1] + eachBox[3] / 2)])
                ok = tracker.add(cv2.TrackerMedianFlow_create(), image, eachBox)
            #frameCount += 1
            #continue

        ok, boxes = tracker.update(image)
        for i in range(len(boxes)):
            xhistory[i].append(int(boxes[i][0] + boxes[i][2] / 2))
            yhistory[i].append(int(boxes[i][1] + boxes[i][3] / 2))
        if(arima_predict and len(xhistory[0]) > initialHistoryCount):

            #if(len(xhistory[i]) > 27): dof = 5
            #print(xhistory[0])

            for i in range(len(boxes)):
                history = xhistory[i].copy()
                history = [xhistory[i][t] for t in range(0, len(xhistory[i]), skipHistory)]
                xmin = min(history)
                history[:] = [x - xmin for x in history]
                xmax = max(history)
                if(xmax == 0): xmax = 1
                history[:] = [x / xmax for x in history]
                #print('xh', len(history))
                for j in range(extrapolate):
                    xmodel = ARIMA(history, order = (dof, 1, 0))
                    xmodel_fit = xmodel.fit(disp = 0, maxiter=200)
                    xoutput = xmodel_fit.forecast()
                    history.append(xoutput[0])
                xhat = int((xoutput[0] * xmax) + xmin)
                #xhat = xoutput[0]
                history = yhistory[i].copy()
                history = [yhistory[i][t] for t in range(0, len(yhistory[i]), skipHistory)]
                ymin = min(history)
                history[:] = [y - ymin for y in history]
                #history = [yhistory[i][0], yhistory[i][int(len(yhistory[i]) / 2)], yhistory[i][len(yhistory[i]) - 1]]
                ymax= max(history)
                if(ymax == 0): ymax = 1
                history[:] = [y / ymax for y in history]
                #print('yh', len(history))
                for j in range(extrapolate):
                    ymodel = ARIMA(history, order = (dof, 1, 0))
                    ymodel_fit = ymodel.fit(disp = 0, maxiter=200)
                    youtput = ymodel_fit.forecast()
                    history.append(youtput[0])
                yhat = int((youtput[0] * ymax) + ymin)
                #yhat = youtput[0]
                cp1 = int(boxes[i][0] + boxes[i][2] / 2)
                cp2 = int(boxes[i][1] + boxes[i][3] / 2)
                cv2.arrowedLine(image, (int(xhistory[i][0]),int(yhistory[i][0])), (cp1, cp2), (0, 255, 0), 2)
                cv2.arrowedLine(image, (cp1, cp2), (xhat, yhat), (0, 0, 255), 2)
                #slope = math.abs(math.atan((yhat - cp2) / (xhat - cp1)))
                #speed = math.sqrt((yhat - cp2) * (yhat - cp2) + (xhat - cp1) * (xhat - cp1))
                #percentChange = 0.0
                #if(yhat >= cp2):

                p1 = (int(xhat - boxes[i][2] / 2), int(yhat - boxes[i][3] / 2))
                p2 = (int(xhat + boxes[i][2] / 2), int(yhat + boxes[i][3] / 2))
                cv2.rectangle(image, p1, p2, (255, 255, 255), 1)
        for newbox in boxes:
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            cv2.rectangle(image, p1, p2, (200,0,0), 2)

        if(depthMapEstimation):
            depth_est = image_depth(orig_image)
            dof = 0
            current_depth_est = depth_est.copy()
            pred_depth_est = depth_est.copy()
            pd = 'OFF'
            for i in range(len(boxes)):
                p1 = (int(boxes[i][0]), int(boxes[i][1]))
                p2 = (int(boxes[i][0] + boxes[i][2]), int(boxes[i][1] + boxes[i][3]))
                current_depth = cal_depth_box(depth_est, p1, p2)
                if(len(depth_history) < len(boxes)):
                    depth_history.append([current_depth])
                else:
                    depth_history[i].append(current_depth)
                if(math.isnan(current_depth)):
                    continue
                if(len(depth_history[i]) > initialHistoryCount):
                    pd = 'ON'
                    history = depth_history[i].copy()
                    hisotry = np.nan_to_num(history)
                    history = [history[t] for t in range(0, len(history), skipHistory)]
                    dmin = min(history)
                    history[:] = [d - dmin for d in history]
                    dmax = max(history)
                    if(dmax == 0): dmax = 1
                    history[:] = [d / dmax for d in history]
                    for j in range(extrapolate):
                        dmodel = ARIMA(history, order = (0, 1, 0))
                        dmodel_fit = dmodel.fit(disp = 0, maxiter=200)
                        doutput = dmodel_fit.forecast()
                        history.append(doutput[0])
                    #print(doutput[0])
                    if(not math.isnan(doutput[0])):
                        dhat = int((doutput[0] * dmax) + dmin)
                    else:
                        dhat = current_depth

                    current_depth_est = set_depth(current_depth_est, p1, p2, current_depth)
                    if(math.isnan(current_depth)):
                        print("wtf just happened")
                    cv2.putText(current_depth_est,text=str(int(current_depth)), org=(p1[0], p1[1]), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=0.50, color=(0, 0, 255), thickness=1)
                    pred_depth_est = set_depth(pred_depth_est, p1, p2, dhat)
                    cv2.putText(pred_depth_est,text=str(int(dhat)), org=(p1[0], p1[1]), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=0.50, color=(0, 0, 255), thickness=1)

            cv2.putText(pred_depth_est, text=pd, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(0, 0, 255), thickness=2)
            #cv2.namedWindow("curdepth", cv2.WINDOW_NORMAL)
            current_depth_est = cv2.resize(current_depth_est, (0,0), fx=percentResize, fy=percentResize)
            cv2.imshow('curdepth', current_depth_est)
            #cv2.namedWindow("predepth", cv2.WINDOW_NORMAL)
            pred_depth_est = cv2.resize(pred_depth_est, (0,0), fx=percentResize, fy=percentResize)
            cv2.imshow('predepth', pred_depth_est)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(image, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50, color=(255, 0, 0), thickness=2)
        #cv2.namedWindow("tracking", cv2.WINDOW_NORMAL)
        image = cv2.resize(image, (0,0), fx=percentResize, fy=percentResize)
        cv2.imshow('tracking', image)
        frameCount += 1
        consecutiveframes += 1
        k = cv2.waitKey(1)
        if k == 27 : break # esc pressed
    print(yoloCount)
    print(countCond)
    yolo.close_session()
Ejemplo n.º 30
0
    #if video frame dosent exist, break
    if not ret: break
    #when tracking is not on
    if initBB is None:
        facerect = cascade.detectMultiScale(frame,
                                            scaleFactor=1.3,
                                            minNeighbors=1,
                                            minSize=(80, 80))
        # when face detected
        if len(facerect) > 0:
            initBB = (facerect[0][0] / 2, facerect[0][1] / 2,
                      facerect[0][0] + facerect[0][2],
                      facerect[0][1] + facerect[0][3])
            print("Detected!:")
            print(initBB)
            tracker = cv2.TrackerMedianFlow_create(
            )  #MedianFlow is also pretty good
            #tracker = cv2.TrackerTLD_create()
            tracker.init(frame, initBB)
            for rect in facerect:
                cv2.rectangle(frame,
                              tuple(rect[0:2]),
                              tuple(rect[0:2] + rect[2:4]),
                              rectangle_color,
                              thickness=2)

    #when tracking is on
    else:
        (success, box) = tracker.update(frame)
        # check to see if the tracking was a success
        if success:
            (x, y, w, h) = [int(v) for v in box]