Beispiel #1
0
def detect_moving_target(old_gray, new_gray):


# open the camera for warming up
cap = cv2.VideoCapture('slow.MOV')

# grab the first frame
old_frame = get_frame_from(cap)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

# initiate KCF tracker
tracker = cv2.TrackerKCF_create()

# possible movements ROI


# start the tracking
while True:
    # Record FPS
    timer = cv2.getTickCount()

    # read the current frame
    cur_frame = get_frame_from(cap)

    # covert to gray scale
    cur_gray = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)

    # Calculate Frames per second (FPS)
    fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
    # Display FPS on frame
    cv2.putText(cur_frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);
    # display cur frame
    cv2.imshow('video', cur_frame)
    # waitkey
    if cv2.waitKey(1) == ord('q'):
        break


#(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    # Read video
    video = cv2.VideoCapture('slow.MOV')

    # Exit if video not opened.
    if not video.isOpened():
        print
        "Could not open video"
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
    if not ok:
        print
        'Cannot read video file'
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
Beispiel #2
0
 # Track FPS
 fps_global = FPS().start()
 
 # Multitracker
 all_trackers = cv2.MultiTracker_create()
 
 
 # Set up tracker.
 # Instead of MIL, you can also use
 tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
 tracker_type = tracker_types[3]
 trackers = []
 ntrackers = 2
 if int(minor_ver) < 3:
     for i in range(ntrackers):
         trackers.append(cv2.Tracker_create(tracker_type))
 else:
     for i in range(ntrackers):
         if tracker_type == 'BOOSTING':
             trackers.append(cv2.TrackerBoosting_create())
         if tracker_type == 'MIL':
             trackers.append(cv2.TrackerMIL_create())
         if tracker_type == 'KCF':
             trackers.append(cv2.TrackerKCF_create())
         if tracker_type == 'TLD':
             trackers.append(cv2.TrackerTLD_create())
         if tracker_type == 'MEDIANFLOW':
             trackers.append(cv2.TrackerMedianFlow_create())
         if tracker_type == 'GOTURN':
             trackers.append(cv2.TrackerGOTURN_create())
 
def spider(video, algorithm, outSize):
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split(".")
    if int(major_ver) < 3:
        tracker = cv2.Tracker_create(algorithm)
    else:
        if algorithm == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()

        if algorithm == 'MIL':
            tracker = cv2.TrackerMIL_create()

        if algorithm == 'KCF':
            tracker = cv2.TrackerKCF_create()

        if algorithm == 'TLD':
            tracker = cv2.TrackerTLD_create()

        if algorithm == 'CSRT':
            tracker = cv2.TrackerCSRT_create()

    #读取视频及确认视频有效
    video_cap = cv2.VideoCapture(video)
    if not video_cap.isOpened():
        print("Video is not here :(")
        sys.exit()
    ok, frame = video_cap.read()
    if not ok:
        print("Something goes wrong with the video :(")
        sys.exit()

    #标注初始框
    bbox = cv2.selectROI(frame, False)
    ok = tracker.init(frame, bbox)
    # 记录原始框信息
    prev = bbox
    current = list(prev)
    dimensions = frame.shape

    # 获取窗口大小
    size = (int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    print(size)
    print(type(outSize))
    outSize_write = (int(outSize[0]), int(outSize[1]))

    # 调用VideoWrite()函数
    fps = 30
    output = cv2.VideoWriter('MySaveVideo.mp4',
                             cv2.VideoWriter_fourcc('M', 'P', '4', 'V'), fps,
                             outSize_write)
    # 截取信息初始化
    #chopped = frame[current[0]:(current[0]+int(outSize[0])),current[1]:(current[1]+int(outSize[1]))]
    chopped = frame[current[1]:current[1] + outSize[1],
                    current[0]:current[0] + outSize[0]]  #纵列,横列

    while True:
        ok, frame = video_cap.read()
        if not ok:
            print("All frames are shown :)")
            break

        timer = cv2.getTickCount()
        ok, bbox = tracker.update(frame)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        if ok:
            #原本检测结果
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 1, 1)

            #加入防抖
            for i in range(0, 3):
                if (abs(bbox[i] - prev[i]) > 5) or (abs(current[i] - bbox[i]) >
                                                    6):
                    current[i] = int(prev[i])
            p1 = (int(current[0]), int(current[1]))
            p2 = (int(current[0] + current[2]), int(current[1] + current[3]))
            cv2.rectangle(frame, p1, p2, (0, 255, 255), 4, 1)

            #输出视频
            up = current[0]
            left = current[1]
            down = current[2]
            right = current[3]
            # chopped = frame[current[0]:(current[0]+int(outSize[0])),current[1]:(current[1]+int(outSize[1]))]

            #print("left:",left,"   right:", right, "   up:",up,"   down:",down)

            if (current[1] < 0) or (
                    current[1] + outSize[1] > dimensions[0]
            ) or (current[0] < 0) or (current[0] + outSize[0] > dimensions[1]):
                print("ERROR:超出范围,无法录制 :(")
            else:
                chopped = frame[current[1]:current[1] + outSize[1],
                                current[0]:current[0] + outSize[0]]  #纵列,横列
                output.write(chopped)

        # else:
        #     cv2.putText(frame,"failure detected")
        prev = bbox

        cv2.imshow("Tracking", chopped)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_cap.release()
    output.release()
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
	help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
	help="OpenCV object tracker type")
args = vars(ap.parse_args())

# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]

# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
	tracker = cv2.Tracker_create(args["tracker"].upper())

# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
	# initialize a dictionary that maps strings to their corresponding
	# OpenCV object tracker implementations
	OPENCV_OBJECT_TRACKERS = {
		"csrt": cv2.TrackerCSRT_create,
		"kcf": cv2.TrackerKCF_create,
		"boosting": cv2.TrackerBoosting_create,
		"mil": cv2.TrackerMIL_create,
		"tld": cv2.TrackerTLD_create,
		"medianflow": cv2.TrackerMedianFlow_create,
		"mosse": cv2.TrackerMOSSE_create
	}
# Build box around both panels
print(boatD._bluePanelRect, boatD._redPanelRect)
panels = np.vstack((boatD._redPanelRect, boatD._bluePanelRect))

box = cv2.minAreaRect(panels)
box = cv2.boxPoints(box)

x, y = np.min(box, axis=0)
w, h = np.max(box, axis=0)
w -= x
h -= y

box = (x, y, w, h)

boatTracker = cv2.Tracker_create("MIL")

boatTracker.init(img, box)
print("Tracker initialized")

while (data.more()):
    img = data.read()
    boatDetected, box = boatTracker.update(img)

    if boatDetected:
        p1 = (int(box[0]), int(box[1]))
        p2 = (int(box[0] + box[2]), int(box[1] + box[3]))
        cv2.rectangle(img, p1, p2, (0, 0, 255), 2)

    cv2.imshow('img', img)
    k = cv2.waitKey(1) & 0xff
Beispiel #6
0
def main():

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    x0 = 200
    y0 = 200
    w0 = 224
    h0 = 224
    track_window = (x0, y0, w0, h0)
    # Reference Distance
    L0 = 100
    S0 = 50176  #224x224 #take#here.

    # Base Distance
    LB = 100
    # Define an initial bounding box
    bbox = (x0, y0, w0, h0)  #(287, 23, 86, 320)
    #CX=int(bbox[0]+0.5*bbox[2]+3) #adding
    #CY=int(bbox[1]+0.5*bbox[3]+3) #adding

    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        drone.takeoff()

        # skip first 300 frames
        frame_skip = 300
        while True:
            #------------------------------------------for start

            for frame in container.decode(video=0):

                speed = 100

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                start_time = time.time()

                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)

                # Start timer
                timer = cv2.getTickCount()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

                # Update tracker
                ok, bbox = tracker.update(image)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    #print('Tracking ok')
                    (x, y, w, h) = (int(bbox[0]), int(bbox[1]), int(bbox[2]),
                                    int(bbox[3]))
                    CX = int(bbox[0] + 0.5 * bbox[2])  #Center of X
                    CY = int(bbox[1] + 0.5 * bbox[3])
                    S0 = bbox[2] * bbox[3]
                    print("CX,CY,S0,x,y=", CX, CY, S0, x, y)
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255, 0, 0), 2, 1)
                    p10 = (x0, y0)
                    p20 = (x0 + w0, y0 + h0)
                    cv2.rectangle(image, p10, p20, (0, 255, 0), 2, 1)

                    d = round(L0 * m.sqrt(S0 / (w * h)))
                    dx = x + w / 2 - CX0  #no change dx
                    dy = y + h / 2 - CY0
                    print(d, dx, dy)

                    tracking(drone, d, dx, dy, LB)

                else:
                    # Tracking failure
                    #print('Tracking failure')
                    cv2.putText(image, "Tracking failure detected", (100, 80),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                cv2.imshow('Original', image)

                key = cv2.waitKey(1) & 0xff
                if key == ord('q'):
                    print('Q!')
                    break

                if key == ord('r'):
                    roi_time = time.time()
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x0, y0, w0, h0) = (int(bbox[0]), int(bbox[1]),
                                        int(bbox[2]), int(bbox[3]))

                    CX0 = int(x0 + 0.5 * w0)  #Center of X
                    CY0 = int(y0 + 0.5 * h0)

                    # Initialize tracker with first frame and bounding box
                    ok = tracker.init(image, bbox)
                    '''
		    if frame.time_base < 1.0/60:
                        time_base = 1.0/60
                    else:
                        time_base = frame.time_base
                    frame_skip2 = int((time.time() - roi_time)/time_base)

		    if 0 < frame_skip2:
                        frame_skip2 = frame_skip2 - 1
                        continue
		    '''

                if frame.time_base < 1.0 / 60:
                    time_base = 1.0 / 60
                else:
                    time_base = frame.time_base
                frame_skip = int((time.time() - start_time) / time_base)


#-------------------------------------------------for end
            break
        print('stop fly')

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        drone.land()
        cv2.destroyAllWindows()
# cvdrone.de
#
# evaluating different opencv trackers
#
# [email protected]

from pydrone import *

import cv2
import imutils
import detectors
import hud

tracker = cv2.Tracker_create("KCF")
video = cv2.VideoCapture("/home/user/opencv/videos/correlation-tracking.mp4")
#video = cv2.VideoCapture(0)

cv2.namedWindow("video")

person_found = False
frame_idx = 0
pad = 5


def prepare_frame(frame):
    frame = imutils.resize(frame, width=1024)
    return frame


def get_position(newbox):
    x1 = int(newbox[0])
Beispiel #8
0
    def init(self):

        # construct the argument parser and parse the arguments
        ap = argparse.ArgumentParser()

        # use for video file
        ap.add_argument("-v",
                        "--video",
                        type=str,
                        help="path to input video file")
        ap.add_argument("-t",
                        "--tracker",
                        type=str,
                        default="kcf",
                        help="OpenCV object tracker type")
        args = vars(ap.parse_args())

        # extract the OpenCV version info
        (major, minor) = cv2.__version__.split(".")[:2]

        # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
        # function to create our object tracker
        if int(major) == 3 and int(minor) < 3:
            tracker = cv2.Tracker_create(args["tracker"].upper())

        # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
        # approrpiate object tracker constructor:
        else:
            # initialize a dictionary that maps strings to their corresponding
            # OpenCV object tracker implementations
            OPENCV_OBJECT_TRACKERS = {
                "csrt": cv2.TrackerCSRT_create,
                "kcf": cv2.TrackerKCF_create,
                "boosting": cv2.TrackerBoosting_create,
                "mil": cv2.TrackerMIL_create,
                "tld": cv2.TrackerTLD_create,
                "medianflow": cv2.TrackerMedianFlow_create,
                "mosse": cv2.TrackerMOSSE_create
            }

            # grab the appropriate object tracker using our dictionary of
            # OpenCV object tracker objects
            self.tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()

            # initialize the bounding box coordinates of the object we are going
            # to track, and scale object
            self.initBB = None
            self.initScale = None

        # if a video path was not supplied, grab the reference to the web cam
        if not args.get("video", False):
            print('\n' + "[INFO] starting video stream...")
            self.vs = VideoStream(src=0).start()
            time.sleep(1.0)

        # otherwise, grab a reference to the video file
        else:
            selfvs = cv2.VideoCapture(args["video"])

        # initialize the FPS throughput estimator
        self.fps = None
        self.com_output = input('Output COM info? <Y/N>')
Beispiel #9
0
        speed_y_inner = 0
        open_1_inner = 0
        open_2_inner = 0
        open_3_inner = 0
        return point_x_inner, point_y_inner, speed_x_inner, speed_y_inner, open_1_inner, open_2_inner, open_3_inner


if __name__ == "__main__":

    # 全局变量
    fps = 29  # 获取参数一:帧率(暂时没用)
    pre_frame = None  # 获取参数一:前一帧图像(灰度),判断是否有运动物体
    entropy_last = 0  # 获取参数二:前一帧抖动数值
    flag = 0  # 是否选择跟踪目标
    point_x, point_y = 0, 0  # 获取参数三:初始化运动点
    tracker = cv2.Tracker_create(
        "KCF")  # BOOSTING, KCF, TLD, MEDIANFLOW or GOTURN

    # 视频输入:文件或摄像头
    # camera = cv2.VideoCapture("video/sample2_1.mp4")
    camera = cv2.VideoCapture(0)
    if camera is None:
        print('请先连接摄像头或视频')
        exit()

    while True:
        res, cur_frame = camera.read()
        if res is not True:
            break

        # 跳帧参数设置,读取视频用
        '''
Beispiel #10
0
def main():
    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT'] 
    tracker_type = tracker_types[2]
     
    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()
    
    # 追跡する枠の座標とサイズ
    x = 200
    y = 200
    w = 224
    h = 224
    track_window=(x,y,w,h)
    # Reference Distance
    L0 = 100
    #S0 = 50176 #224x224

    # Base Distance
    LB = 100        
    # Define an initial bounding box
    bbox = (x, y, w, h)   #(287, 23, 86, 320) 
    
    drone = tellopy.Tello()
    drone.connect()

    container = av.open(drone.get_video_stream()) 
    drone.takeoff()
    #drone.is_autopilot="True"
    drone.is_autopilot="False"    
    
    while True:
        for frame in container.decode(video=0):
        
            image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR)
               
            # Start timer
            timer = cv2.getTickCount()
 
            # Update tracker
            ok, bbox = tracker.update(image)
            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

            # Draw bounding box
            if ok:
                (x,y,w,h) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
                CX=int(bbox[0]+0.5*bbox[2])
                CY=int(bbox[1]+0.5*bbox[3])
                S0=bbox[2]*bbox[3]
                print("CX,CY,S0=",CX,CY,S0)
                # Tracking success
                p1 = (x, y)
                p2 = (x + w, y + h)
                cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)
            else :
                # Tracking failure
                cv2.putText(image, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
            # Display tracker type on frame
            cv2.putText(image, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2)
     
            # Display FPS on frame
            cv2.putText(image, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2)
            cv2.imshow('test',image)

            key = cv2.waitKey(1)&0xff
            if key == ord('a'):
                drone.is_autopilot="True"
            elif key == ord('s'):
                drone.is_autopilot="False"            
            
            if drone.is_autopilot=="True":
                d = round(L0 * m.sqrt(S0 / (w * h)))
                dx = x + w/2 - CX
                dy = y + h/2 - CY
                print(d,dx,dy,drone.is_autopilot,w,h)
                tracking(drone,d,dx,dy,LB)
            elif drone.is_autopilot=="False":
                key_Operation(drone,key)
                print("key=",key,ord('q'))
                
            #key = cv2.waitKey(1)&0xff
            print("key=",key,ord('q'))
            if key == ord('q'):
                cv2.destroyAllWindows()
                break
            elif key == ord('r'):
                bbox = cv2.selectROI(image, False)
                print(bbox)
                (x,y,w,h) = (int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3]))
                # Initialize tracker with first frame and bounding box
                ok = tracker.init(image, bbox)
                
        break
    drone.down(50)
    sleep(5)
    drone.land()    
    drone.subscribe(drone.EVENT_FLIGHT_DATA, handler)    
    drone.quit()   
def track_ecoli(regions):
    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    # MIL appears to work best...
    tracker_type = tracker_types[1]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
    root_path = 'C:\\dev\\courses\\2.131 - Advanced Instrumentation\\'
    img_path = root_path + 'E.coli.tif'
    pil_img = Image.open(img_path)
    frame = np.array(pil_img)
    # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    # out = cv2.VideoWriter(root_path + 'ecoli_track1.avi',
    #                       fourcc, 5.0,(640,480) )

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    csvfile = open(root_path + 'E.coli1.csv', 'w', newline='')
    bac_writer = csv.writer(csvfile,
                            delimiter=',',
                            quotechar='|',
                            quoting=csv.QUOTE_MINIMAL)

    bac_writer.writerow(['BacteriaId', 'Time', 'X', 'Y'])
    for i in range(149):
        # Read a new frame
        pil_img.seek(i)
        frame = np.array(pil_img)

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)

            bac_writer.writerow(
                [1, i,
                 int(bbox[0] + bbox[2] / 2),
                 int(bbox[1] + bbox[3] / 2)])
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, "2.131 E.Coli Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        frame = frame[:480, :640, ...]

        array_alpha = np.array([1.45])
        array_beta = np.array([-150.0])

        cv2.add(frame, array_beta, frame)
        # multiply every pixel value by alpha
        cv2.multiply(frame, array_alpha, frame)

        # out.write(frame)
        # out.write(frame)

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
    csvfile.close()
Beispiel #12
0
import cv2
import sys
import time

if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use
    # BOOSTING, KCF, TLD, MEDIANFLOW or GOTURN (GOTURN doesn't work)

    tracker = cv2.Tracker_create("BOOSTING")

    # Read video
    video = cv2.VideoCapture("test_video_trimmed.mp4")

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Define an initial bounding box
    bbox = (50, 250, 1150, 100)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
Beispiel #13
0
                frame = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
                frame_resized = cv2.resize(frame,
                                           (screen_width, screen_height))
                cv2.imshow('frame', frame_resized)

                if cv2.waitKey(1) == ord('q'):
                    break
                elif cv2.waitKey(1) == ord('p'):
                    print("Frame: " +
                          str(video_capture.get(cv2.CAP_PROP_POS_FRAMES)))
                    plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                    plt.show()
        elif algorithm == 3 or algorithm == 4:
            # create tracker
            if algorithm == 3:
                tracker = cv2.Tracker_create("MIL")
            elif algorithm == 4:
                tracker = cv2.Tracker_create("KCF")
            # todo second time around it doesn't recreate tracker with new algorithm. I think it needs to be destroyed

            # initialize tracker bounding box
            bbox = (int(x), int(y), int(w), int(h))

            ok = tracker.init(frame, bbox)

            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (0, 0, 255))

            while video_capture.isOpened():
                ret, frame = video_capture.read()
def extractFrame(videoPath):

    videoFrames = []
    playerBoxes = []

    # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
    # function to create our object tracker
    if args.detector == "tracker":
        if int(major) == 3 and int(minor) < 3:
            if args.singleTracker:
                tracker = cv2.Tracker_create(args.tracker.upper())

        # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
        # appropriate object tracker constructor:
        else:
            # initialize a dictionary that maps strings to their corresponding
            # OpenCV object tracker implementations

            if args.singleTracker:
                OPENCV_OBJECT_TRACKERS = {
                    "csrt": cv2.legacy.TrackerCSRT_create(),
                    "kcf": cv2.legacy.TrackerKCF_create(),
                    "mil": cv2.legacy.TrackerMIL_create()
                }

                tracker = OPENCV_OBJECT_TRACKERS[args.tracker]()

    # initialize the bounding box coordinates of the object we are going
    # to track
    initBB = None
    # initialize the FPS throughput estimator
    fps = None

    # Set up Neural Net
    net = cv2.dnn.readNet(args.weights, args.config)

    cap = cv2.VideoCapture(videoPath)

    player_threshold = 99999

    if not args.singleTracker:
        # Read first frame
        success, frame = cap.read()
        # quit if unable to read the video file
        if not success:
            print('Failed to read video')
            sys.exit(1)

        ## Select boxes
        bboxes = []
        colors = []

        # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
        # So we will call this function in a loop till we are done selecting all objects
        while True:
            # draw bounding boxes over objects
            # selectROI's default behaviour is to draw box starting from the center
            # when fromCenter is set to false, you can draw box starting from top left corner
            bbox = cv2.selectROI('MultiTracker', frame)
            bboxes.append(bbox)
            colors.append((randint(0, 255), randint(0, 255), randint(0, 255)))
            print("Press q to quit selecting boxes and start tracking")
            print("Press any other key to select next object")
            k = cv2.waitKey(0) & 0xFF
            print(k)
            if (k == 113):  # q is pressed
                break

        print('Selected bounding boxes {}'.format(bboxes))

        createTrackerByName(args.tracker)

        # Create MultiTracker object
        trackers = cv2.legacy.MultiTracker_create()

        # Initialize MultiTracker
        for bbox in bboxes:
            trackers.add(createTrackerByName(args.tracker), frame, bbox)

    frameCount = 0
    while (cap.isOpened()):
        print(frameCount)
        # Take each frame
        _, frame = cap.read()

        if not _:
            print("Video Ended")
            break

        Width = frame.shape[1]
        Height = frame.shape[0]

        # Convert BGR to HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # Hard-Coded Color
        # court_color = np.uint8([[[188, 218, 236]]])
        court_color = np.uint8([[[189, 204, 233]]])

        hsv_court_color = cv2.cvtColor(court_color, cv2.COLOR_BGR2HSV)
        hue = hsv_court_color[0][0][0]

        # define range of blue color in HSV - Again HARD CODED! :(
        lower_color = np.array([hue - 5, 10, 10])
        upper_color = np.array([hue + 5, 225, 225])

        # Threshold the HSV image
        mask = cv2.inRange(hsv, lower_color, upper_color)

        # Opening
        kernel = np.ones((2, 2), np.uint8)
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame, frame, mask=opening)
        cv2.imshow('res', res)

        if args.draw_line:
            # Canny Edge Detector
            gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

            high_thresh, thresh_im = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            low_thresh = 0.5 * high_thresh
            edges = cv2.Canny(gray, low_thresh, high_thresh, apertureSize=3)
            cv2.imshow('Canny Edge Detector', edges)

            # # Hough Lines
            minLineLength = 200
            maxLineGap = 500
            lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength=minLineLength, maxLineGap=maxLineGap)

            # Green color in BGR
            LINE_COLOR = (255, 0, 0)

            if lines is None:
                continue
            else:
                a, b, c = lines.shape
                for i in range(2):
                    for x1, y1, x2, y2 in lines[i]:
                        # cv2.line(image, start_point, end_point, color, thickness)
                        if args.draw_line:
                            cv2.line(frame, (x1, y1), (x2, y2), LINE_COLOR, 3)
                        # only compare the lower corner of y value
                        player_threshold = min(player_threshold, y1, y2)

        # Detect People
        if args.detector == "HOG":
            # initialize the HOG descriptor/person detector
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

            orig = frame.copy()

            # detect people in the image
            (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),
                                                    padding=(8, 8), scale=1.05)
            # draw the original bounding boxes
            for (x, y, w, h) in rects:
                cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.1)
            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)

        elif args.detector == "yolov3":
            scale = 0.00392
            blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0, 0, 0), True, crop=False)
            net.setInput(blob)
            outs = net.forward(get_output_layers(net))

            class_ids = []
            confidences = []
            boxes = []
            conf_threshold = 0.5
            nms_threshold = 0.4

            for out in outs:
                for detection in out:
                    scores = detection[5:]
                    class_id = np.argmax(scores)
                    confidence = scores[class_id]
                    if confidence > 0.5:
                        center_x = int(detection[0] * Width)
                        center_y = int(detection[1] * Height)
                        w = int(detection[2] * Width)
                        h = int(detection[3] * Height)
                        x = center_x - w / 2
                        y = center_y - h / 2
                        class_ids.append(class_id)
                        confidences.append(float(confidence))
                        boxes.append([x, y, w, h])

            indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)

            k = 0
            for i in indices:
                i = i[0]
                box = boxes[i]
                x = box[0]
                y = box[1]
                w = box[2]
                h = box[3]
                pad = 5
                # print(player_threshold)
                if (round(y + h) < player_threshold):
                    k += 1
                    continue
                else:
                    draw_prediction(frame, class_ids[i], round(x - pad), round(y - pad), round(x + w + pad),
                                    round(y + h + pad))

        elif args.detector == "tracker":

            # check to see if we are currently tracking an object
            if args.singleTracker:
                if initBB is not None:
                    # grab the new bounding box coordinates of the object
                    (success, box) = tracker.update(frame)
                    # check to see if the tracking was a success
                    if success:
                        (x, y, w, h) = [int(v) for v in box]
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                    # update the FPS counter
                    fps.update()
                    fps.stop()
                    # initialize the set of information we'll be displaying on
                    # the frame
                    info = [
                        ("Tracker", tracker),
                        ("Success", "Yes" if success else "No"),
                        ("FPS", "{:.2f}".format(fps.fps())),
                    ]
                    # loop over the info tuples and draw them on our frame
                    for (i, (k, v)) in enumerate(info):
                        text = "{}: {}".format(k, v)
                        cv2.putText(frame, text, (10, Height - ((i * 20) + 20)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            else:
                videoFrames.append(frame)
                # get updated location of objects in subsequent frames
                success, boxes = trackers.update(frame)
                playerBoxes.append(boxes)

                # draw tracked objects
                for i, newbox in enumerate(boxes):
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                    cv2.rectangle(frame, p1, p2, colors[i], 2, 1)

        else:
            continue

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if key == ord("s"):
            if args.singleTracker:
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                initBB = cv2.selectROI("Frame", frame, fromCenter=False,
                                       showCrosshair=True)
                # start OpenCV object tracker using the supplied bounding box
                # coordinates, then start the FPS throughput estimator as well
                tracker.init(frame, initBB)
                fps = FPS().start()
                # if the `q` key was pressed, break from the loop

        elif key == ord("q"):
            break

        frameCount += 1

    cap.release()
    cv2.destroyAllWindows()

    return videoFrames, playerBoxes, Width, Height, colors
Beispiel #15
0
win = "C:/Users/jc306494/Documents/PythonAnalysis/SampleVid/GP010016_fast.mp4"
win = "C:/Users/jc306494/Documents/PythonAnalysis/SampleVid/VIRB0009.mp4"
mac = "/Users/Cesar/PyCode_MacOSv1/GP010016_fast.mp4"
mac2 = '/Users/Cesar/PyCode_MacOSv1/VIRB0006.MP4'

resultFile = open("Tracking.csv", "w", newline='\n')
wr = csv.writer(resultFile, delimiter=",")
wr.writerow(['Coord x', 'Coord y'])
position = (0, 0)

# Set up tracker.
# Instead of MIL, you can also use
# BOOSTING, MIL, KCF, TLD, MEDIANFLOW or GOTURN

tracker = cv2.Tracker_create(args['method'])

# Read video
vid = cv2.VideoCapture(args['video'])

# Exit if video not opened
if not vid.isOpened():
    print("Could not open video")
    sys.exit()

# Read first frame
ok, frame = vid.read()
if not ok:
    print('Cannot read video file')
    sys.exit()
Beispiel #16
0
def create_trackers(new_item, trackers, frame):
    trackers.append(cv2.Tracker_create("MIL"))
    ok = trackers[-1].init(frame, new_item)
    ok, bb = trackers[-1].update(frame)
    return trackers, ok
Beispiel #17
0
        frame_tracker, best_box = 0, None
        tracker_start, lost_frames = True, 0
        while True:
            ret, frame = cap.read()

            if frame_tracker < DETECTION_FRAMES:
                put_text(frame, 'DETECTING', frame_tracker)
                boxes, scores = detect_hands(frame, sess, d_boxes, d_scores, d_classes, n_detections, i_tensor)
                best_box = track_boxes(best_box, frame, boxes, scores, width, height)
                tracker_start, lost_frames = True, 0
            else:
                if best_box is not None and lost_frames < 10:
                    put_text(frame, 'TRACKING', frame_tracker, color=(255, 255, 0))

                    if tracker_start:
                        tracker = cv.Tracker_create('MEDIANFLOW')
                        ok = tracker.init(frame, best_box)
                        tracker_start = False

                    ok, new_box = tracker.update(frame)
                    best_box = new_box if new_box is not None else best_box
                    if ok:
                        p1, p2 = (int(best_box[0]), int(best_box[1])), (int(best_box[0] + best_box[2]),
                                                                        int(best_box[1] + best_box[3]))
                        cv.rectangle(frame, p1, p2, (200, 0, 0), 2, 1)
                        lost_frames = 0
                    else:
                        cv.putText(frame, 'Hand Lost', (10, 100), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 200), 3,
                                   cv.LINE_AA)
                        lost_frames += 1
                else:
Beispiel #18
0
 def _init_tracker(self, windows, frame):
     x, y, w, h = windows  # KCF
     tracker = cv2.Tracker_create('KCF')
     tracker.init(frame, (x, y, w, h))  #初始化一个帧与目标的坐标位置
     return tracker
Beispiel #19
0
    for i in range(0, n_trackers):
        all_centroids.append([])

    rois = []
    roi_vertices = {k: () for k in range(0, n_rois)}

    roi_counts_per_tracker = {k: [] for k in range(0, n_trackers)}
    for i in range(0, n_trackers):
        for j in range(0, n_rois):
            roi_counts_per_tracker[i].append(0)
    total_count = 0

    #set up tracker(s)
    #tracking algorithms include: BOOSTING, MIL, KCF, TLD, MEDIANFLOW
    for i in range(0, n_trackers):
        trackers.append(cv2.Tracker_create("MIL"))

    #find video
    for file in os.listdir(path):
        if file.endswith(".avi"):
            video = file
            break

    #read video
    cap = cv2.VideoCapture(video)

    #exit if video didn't open
    if not cap.isOpened():
        print "Video did not open"
        sys.exit()
Beispiel #20
0

def jaccard((x1, y1, w1, h1), (x2, y2, w2, h2)):
    ix = max(x1, x2)
    iy = max(y1, y2)
    iw = max(min(x1 + w1, x2 + w2) - ix, 0)
    ih = max(min(y1 + h1, y2 + h2) - iy, 0)
    intersect = iw * ih
    union = w1 * h1 + w2 * h2 - intersect
    return intersect / union


jaccards = list()

# Initialize tracker
tracker = cv2.Tracker_create(args.tracker)
tracker.init(firstImage, firstROI)
score = 0
time = 0
cv2.namedWindow('Tracker')

minlen = min(len(images), len(rects))
exframes = args.eframes
if args.examples:
    os.mkdir('./examples/' + args.tracker)
    if exframes is None:
        exframes = list(np.linspace(0, minlen, 5))

for i in range(min(len(images), len(rects))):
    image = cv2.imread(args.dataset + '/img/' + images[i])
    tic = clock()
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[7]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
Beispiel #22
0
def main():
    """
    LoadsMulti-Level Otsu masks and tracks the face.
    Returns a set of ROIs containing only the face.
    :return:
    """
    tag = 'MLO_'
    filepath = 'E:\\GitHub\\CovPySourceFile\\MultiLevelOtsu\\'
    otsu_masks = load_images_from_folder(
        folder=filepath,
        name_tag=tag,
    )

    # region MIL Tracking
    # use binary otsu mask to detect the face
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')

    # Set up tracker
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[4]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    initial_frame = otsu_masks[0]
    # Define initial bounding box from roi
    bbox = cv2.selectROI(initial_frame, showCrosshair=True, fromCenter=False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(initial_frame, bbox)

    # roi points
    points = []
    failed_idx = []
    for n, mask in enumerate(otsu_masks):
        # Update tracker
        ok, bbox = tracker.update(mask)
        # Draw bounding box
        if ok:
            # Tracking success
            p1 = [int(bbox[0]), int(bbox[1])]
            p2 = [int(bbox[0] + bbox[2]), int(bbox[1])]
            p3 = [int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])]
            p4 = [int(bbox[0]), int(bbox[1] + bbox[3])]
            points.append([p1, p2, p3, p4])

            cv2.rectangle(mask, tuple(p1), tuple(p3), (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(mask, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            failed_idx.append(n)

        # Display result
        cv2.imshow("Tracking", mask)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cv2.destroyAllWindows()
    # endregion

    # get rois
    rois = []
    for n, rp in enumerate(points):
        img = Image.open(
            r"E:\GitHub\CovPySourceFile\Normalized\NF_{}.png".format(n))
        left = rp[0][0]
        top = rp[0][1]
        right = rp[2][0]
        bottom = rp[2][1]
        cropped = img.crop((left, top, right, bottom))
        rois.append(cropped)

        # plt.imshow(cropped)
        # plt.show()

    destination_dir = 'E:\\GitHub\\CovPySourceFile\\FaceROI\\'

    if not os.path.exists(destination_dir):
        os.makedirs(destination_dir)

    for n, r in enumerate(rois):
        r.save(destination_dir + 'FR_{}.png'.format(n))
Beispiel #23
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-p", "--prototxt", required=True,
        help="path to Caffe 'deploy' prototxt file")
    ap.add_argument("-m", "--model", required=True,
        help="path to Caffe pre-trained model")
    ap.add_argument("-c", "--confidence", type=float, default=0.5,
        help="minimum probability to filter weak detections")
    ap.add_argument("--fps",type=int, default=15,
        help="frame rate")
    ap.add_argument("-a", "--min-area", type=int, default=500,
                    help="minimum area size")
    ap.add_argument("-i","--motion-tracking",type=bool,default=True,
                    help="Enable motion tracking")
    ap.add_argument("-o", "--object-tracking", type=bool, default=True,
                    help="Enable selectable object tracking")
    ap.add_argument("-t","--tracker",type=str,default="kcf",
                    help="Tracker type for object tracking")
    ap.add_argument("-f", "--facial_recog", type=bool, default=True,
                    help="Facial Recognition Tracking")
    args = vars(ap.parse_args())
    
    
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    if args['facial_recog']:
        (H, W) = (None, None)
        # initialize our centroid tracker and frame dimensions
        ct = CentroidTracker()

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
        # initialize the video stream and allow the camera sensor to warmup
        
    if args['motion_tracking']:
        firstFrame=None
        
    if args["object_tracking"]:
        initBB = None
        # extract the OpenCV version info
        (major, minor) = cv2.__version__.split(".")[:2]
    
        # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
        # function to create our object tracker
        if int(major) == 3 and int(minor) < 3:
            tracker = cv2.Tracker_create(args["tracker"].upper())
    
        # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
        # approrpiate object tracker constructor:
        else:
            OPENCV_OBJECT_TRACKERS = {
                "csrt": cv2.TrackerCSRT_create,
                "kcf": cv2.TrackerKCF_create,
                "boosting": cv2.TrackerBoosting_create,
                "mil": cv2.TrackerMIL_create,
                "tld": cv2.TrackerTLD_create,
                "medianflow": cv2.TrackerMedianFlow_create,
                "mosse": cv2.TrackerMOSSE_create
            }
            tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()

    while True:
        st=time.time()
        #read the next frame from the video stream and resize it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        
        if args['motion_tracking']:
            
            firstFrame=motionTracker(frame.copy(),firstFrame,args["min_area"])
            
        if args['object_tracking']:
            initBB=objectTracker(frame.copy(),initBB,tracker)
        
        if args['facial_recog']:
            W,H = facialRecogTracker(frame.copy(),net,ct,W,H,args["confidence"])

        sleepTime = (1 / args['fps'])-(time.time()-st)
        if sleepTime > 0:
            time.sleep(sleepTime)

        key = cv2.waitKey(1) & 0xFF
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Beispiel #24
0
def main():

    dataset, timestamps = load_thermal_file(
        _filename='ThermalData_18_06_2020_13_19_36.h5',
        _folder='E:\\GitHub\\CovPySourceFile')

    # region Control Variables
    is_writing = False
    is_drawing = False
    # endregion

    # region Data Pre-Processing

    # region Timestamps to Sampling Rate

    # # convert timestamps into datetime objects
    # dt_obj = [datetime.fromtimestamp(ts / 1000).time() for ts in timestamps]
    # # convert datetime objects into time strings
    # time_strings = [dt.strftime("%M:%S:%f") for dt in dt_obj]
    # # finally convert time strings into seconds
    # timestamp_in_seconds = []
    # for s in time_strings:
    #     date_time = datetime.strptime(s, "%M:%S:%f")
    #     a_timedelta = date_time - datetime(1900, 1, 1)
    #     in_seconds = a_timedelta.total_seconds()
    #     timestamp_in_seconds.append(in_seconds)
    #
    # # calculate the mean interval between samples from seconds
    # ts_mean = np.mean(np.diff(timestamp_in_seconds))
    # # finally calculate the mean sampling rate of the signal
    # fs = int(1 / ts_mean)
    # endregion

    # region Get Raw Thermal Data

    # get data set attributes
    n_frames, height, width, total_time_ms = [
        dataset.attrs[i] for i in list(dataset.attrs)
    ]
    # extract thermal frames from the hdf5 dataset
    thermal_frames = []
    # convert raw data into temperature values [deg Celsius]
    # temp_frames = []
    # normalize raw data for further processing steps [0 - 255]
    norm_frames = []
    for n in range(0, n_frames):
        raw_frame = load_frame_from_dataset(dataset, height, n)
        thermal_frames.append(raw_frame)
        # temp_frames.append(raw_frame * 0.1 - 273.15)
        norm_frames.append(
            cv2.normalize(raw_frame,
                          None,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U))

    # get unsharpened img for edge detection later on
    unsharp_frames = []
    # for n, n_frame in enumerate(norm_frames):
    #     u_frame = unsharp_mask(image=n_frame, radius=3, amount=2)
    #     unsharp_frames.append(u_frame)
    #
    #     if is_writing:
    #         cv2.imwrite('E:\\GitHub\\CovPySourceFile\\UnsharpenedMask\\UM_{}.png'.format(n), u_frame)
    #
    #     if is_drawing:
    #         fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 3.5))
    #
    #         # Plotting the original image.
    #         ax[0].imshow(norm_frames[n])
    #         ax[0].set_title('Thermal Data - Normalized')
    #
    #         # ax[1].imshow(temp_frames[n])
    #         # ax[1].set_title('Temp Frame [C]')
    #
    #         ax[1].imshow(unsharp_frames[n])
    #         ax[1].set_title('Unsharpened Image')
    #
    #         # ax[1].imshow(norm_frames[n])
    #         # ax[1].set_title('Thermal Data - Normalized [0-255]')
    #
    #         plt.subplots_adjust()
    #         plt.show()
    #
    # if is_drawing:
    #     plt.close('all')

    # endregion

    # endregion

    # region Feature Extraction Algorithm

    # region Automatic ROI Detection

    # face segmentation using multi-level Otsu
    otsu_masks = multi_level_otsu(images=norm_frames,
                                  n_regions=4,
                                  target_region=3,
                                  method=OtsuMethods.BINARY,
                                  write=is_writing,
                                  draw=is_drawing)

    # to proceed the masks need to be converted into 3d array
    empty_array = np.zeros((height, width))
    _3d_otsu_masks = [
        np.dstack((mask, empty_array, empty_array)) for mask in otsu_masks
    ]

    # use binary otsu mask to detect the face
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')

    # Set up tracker
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[4]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # video = cv2.VideoCapture('E:\\GitHub\\CovPySourceFile\\Video\\OtsuMask.avi')
    #
    # # Exit if video not opened.
    # if not video.isOpened():
    #     print("Could not open video file!")
    #     sys.exit()
    #
    # # Read first frame
    # ok, frame = video.read()
    # if not ok:
    #     print("Could not read video file!")
    #     sys.exit()

    tracked_frame = _3d_otsu_masks[0]
    # Define initial bounding box from roi
    bbox = cv2.selectROI(tracked_frame, showCrosshair=True, fromCenter=False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(tracked_frame, bbox)

    # roi points
    roi_points = []
    tracked_frames = []
    # while True:
    # # Read a new frame
    # ok, frame = video.read()
    # if not ok:
    #     break
    for mask in _3d_otsu_masks:
        tracked_frame = mask
        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(tracked_frame)
        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2])), int(bbox[1])
            p3 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            p4 = (int(bbox[0]), int(bbox[1] + bbox[3]))
            cv2.rectangle(tracked_frame, p1, p3, (255, 0, 0), 2, 1)
            points = [p1, p2, p3, p4]
            # roi_values = get_values_from_roi(points, t_frame)
            roi_points.append(points)
        else:
            # Tracking failure
            cv2.putText(tracked_frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            roi_points.append([])

        # Display tracker type on frame
        cv2.putText(tracked_frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(tracked_frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        tracked_frames.append(tracked_frame)
        # Display result
        cv2.imshow("Tracking", tracked_frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    is_writing = True
    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\TrackedFrames\\TF_{}.png'.format(
                    n), img)

    norm_face_rois = []
    for n in range(0, len(roi_points)):
        # get values inside of roi
        norm_roi_values = get_values_from_roi(roi_points[n], norm_frames[n])
        # my_roi = np.zeros((roi_shapes[n][2], roi_shapes[n][3]))
        x1 = roi_points[n][0][0]
        x2 = roi_points[n][2][0]
        y1 = roi_points[n][0][1]
        y2 = roi_points[n][2][1]

        norm_face_roi = norm_roi_values[y1:y2, x1:x2]

        if is_drawing:
            cv2.imshow("ROI", norm_face_roi)

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break

        norm_face_rois.append(norm_face_roi)

    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\FaceROI\\TF_{}.png'.format(n),
                img)
    # endregion

    # endregion

    print('Bye Bye')
Beispiel #25
0
 def start(self, bbox, frame):
     # reset the tracker
     self.cnt_invis = 0
     self.tracker = cv2.Tracker_create('KCF')
     ret = self.tracker.init(frame, bbox)
     return ret
Beispiel #26
0
]
data = {
    "angle": 0,
    "found": False,
    "is_ball": False,
    "num_rot": 0,
    "ball_center": (-1, -1),
    "ball_radius": -1,
    "curr_marker": -1,
    "count": 0
}

(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
tracker = None
if int(minor_ver) < 3:
    tracker = cv2.Tracker_create('KCF')
else:
    tracker = cv2.TrackerKCF_create()
state_changed = False
curr_state = states[0]

car.initialize()

# keep looping
for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text
    image = frame.array
    print curr_state
Beispiel #27
0
def process_video(groundtruth_path, image_path, out_video):
    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    print('processing sequence', out_video)
    with open(groundtruth_path) as f:
        groundtruth = f.readlines()

    groundtruth = [x.rstrip() for x in groundtruth]

    image_filenames = [y for x in walk(image_path) for y in glob(join(x[0], '*.jpg'))]
    image_filenames.sort()

    assert len(image_filenames) == len(groundtruth)

    image = cv2.imread(image_filenames[0])
    height, width = image.shape[:2]
    writer = cv2.VideoWriter(out_video, cv2.VideoWriter_fourcc('X','V','I','D'), 15, (width , height))

    if not writer.isOpened():
        print('Failed to open video')
        return

    # VOT sequence
    # polygon_ = parse_region(groundtruth[0])
    # cx, cy, w, h = get_axis_aligned_bbox(polygon_)
    # target_pos, target_sz = np.array([cx, cy]), np.array([w, h])

    polygon = [float(x) for x in groundtruth[0].split(',')]
    ok = tracker.init(image, (polygon[0], polygon[1], polygon[2], polygon[3]))

    for i in range(len(image_filenames)):
        image = cv2.imread(image_filenames[i])
        polygon = [float(x) for x in groundtruth[i].split(',')]
        polygon = [int(x) for x in polygon]
        
        # VOT sequence
        # cv2.line(image, (polygon[0], polygon[1]), (polygon[2], polygon[3]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[2], polygon[3]), (polygon[4], polygon[5]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[4], polygon[5]), (polygon[6], polygon[7]), (0, 0, 255), 2)
        # cv2.line(image, (polygon[6], polygon[7]), (polygon[0], polygon[1]), (0, 0, 255), 2)

        cv2.rectangle(image, (polygon[0], polygon[1]), (polygon[0]+polygon[2], polygon[1]+polygon[3]), (0, 0, 255), 2)

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(image)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(image, p1, p2, (255,0,0), 2, 1)
        else :
            # Tracking failure
            cv2.putText(image, "Tracking failure detected", (50,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,255,255),2)

        # Display tracker type on frame
        cv2.putText(image, tracker_type + " Tracker", (50,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (230,170,50),2)
        
        # Display FPS on frame
        cv2.putText(image, "FPS : " + str(int(fps)), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (230,170,50), 2)

        writer.write(image)

    writer.release()
Beispiel #28
0
def tracking_face(vi_path="try.avi", t_type=7):
    # Set up tracker.
    # Instead of MIL, you can also use
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[t_type]

    # if int(minor_ver) < 3:
    if int(major_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # Read video
    video = cv2.VideoCapture(vi_path)

    # Exit if video not opened.
    if not video.isOpened():
        print("Could not open video")
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    if not ok:
        print('Cannot read video file')
        sys.exit()

    # Define an initial bounding box
    height, width, layers = frame.shape

    # box
    # bbox_transfer()
    bbox, score = bbox_score(frame)
    bbox = np.squeeze(bbox)[0]
    score = np.squeeze(score)[0]

    while True:
        if score > 0.7:
            break
        ok, frame = video.read()
        bbox, score = bbox_score(frame)
        bbox = np.squeeze(bbox)[0]
        score = np.squeeze(score)[0]

    bbox = bbox_transfer(bbox, height, width)
    # bbox = (bbox[1]*width,bbox[0]*height,bbox[2]-bbox[0],bbox[3]-bbox[1])
    # bbox = tuple(bbox)

    # bbox = (265, 48, 70, 82)# x,y, w, h
    # bbox = (48,151,128,186) # *H ymin, xmin, ymax, xmax
    # bbox = (85,270,228,332) # *W
    # bbox = (85,151,228,186) A -  151 85 (228-85) 186-151
    # bbox = (48,270,128,332) B

    # print(bbox.shape,score.shape)
    # Uncomment the line below to select a different bounding box
    # bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    # bbox = bbox_transfer(bbox)

    while True:
        # Read a new frame
        ok, frame = video.read(
        )  # I think this line should be put after tracker.update
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break