def main():
    cap = cv2.VideoCapture('goose.mp4')
    tracker = Tracker(160, 30, 5, 1)

    pause = False
    firstFrame = None
    counted = []
    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            frame = imutils.resize(frame, width=800)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (3, 3), 0)

            if firstFrame is None:
                firstFrame = gray
                continue

            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

            thresh = cv2.dilate(thresh, None, iterations=2)
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            centers = []
            for i, c in enumerate(cnts):
                if cv2.contourArea(c) > 500:
                    continue
                (x, y, w, h) = cv2.boundingRect(c)

                centers.append(np.array([[x + w / 2], [y + h / 2]]))
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

            if (len(centers) > 0):
                tracker.Update(centers)
                for track in tracker.tracks:
                    x = track.prediction[0]
                    y = track.prediction[1]
                    id = track.track_id
                    if x > 400 and id not in counted:
                        counted.append(id)
                    cv2.putText(frame, str(id), (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1)

                    cv2.putText(frame, str(len(counted)), (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)

                cv2.imshow('Tracking', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    cv2.destroyAllWindows()
Example #2
0
def main():

    # Create opencv video capture object
    cap = cv2.VideoCapture('project2.avi')

    # Create Object Detector
    detector = Detectors()
    # Create Object Tracker
    tracker = Tracker(40, 8, 5, 100)

    # Variables initialization

    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]

    out = cv2.VideoWriter('Tracking2_wait8.avi',
                          cv2.VideoWriter_fourcc(*'DIVX'), 5, (200, 200))

    while (True):
        ret, frame = cap.read()
        if ret == True:
            centers = detector.Detect(frame)

            # If centroids are detected then track them
            if (len(centers) >= 0):
                # Track object using Kalman Filter
                tracker.Update(centers)
                # For identified object tracks draw tracking line
                # Use various colors to indicate different track_id
                for i in range(len(tracker.tracks)):
                    if (len(tracker.tracks[i].trace) > 1):
                        for j in range(len(tracker.tracks[i].trace) - 1):
                            # Draw trace line
                            x1 = tracker.tracks[i].trace[j][0][0]
                            y1 = tracker.tracks[i].trace[j][1][0]

                            x2 = tracker.tracks[i].trace[j + 1][0][0]
                            y2 = tracker.tracks[i].trace[j + 1][1][0]

                            clr = tracker.tracks[i].track_id % 9
                            cv2.line(frame, (int(x1), int(y1)),
                                     (int(x2), int(y2)), track_colors[clr], 2)

                # Display the resulting tracking frame
                cv2.imshow('Tracking', frame)
                cv2.waitKey(100)
                out.write(frame)

        else:
            break

    cap.release()
    cv2.destroyAllWindows()

    out.release()
Example #3
0
def main():

    # Create opencv video capture object
    cap = cv2.VideoCapture('G:/cmu/colonoscopy/New folder/Cold.mp4')
    #cap = cv2.VideoCapture('G:/cmu/colonoscopy/imagemark/Color-Tracker-master/Retroflect-at-end.mp4')

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(160, 1000, 5, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False
    num = 0
    frame_num = 0

    # Infinite loop to process video frames
    while (True):
        frame_num += 1
        print(frame_num)
        # Capture frame-by-frame
        ret, frame = cap.read()
        frame = frame[30:550, 400:930]
        #frame = frame[40:400,130:450]

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        if (skip_frame_count < 15):
            skip_frame_count += 1
            continue

        # Detect and return centeroids of the objects in the frame
        centers = detector.Detect1(orig_frame)

        # If centroids are detected then track them
        if (len(centers) > 0):
            text = 'Biopsy'
            cv2.putText(orig_frame,
                        text, (100, 100),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.0, (0, 0, 255),
                        2,
                        lineType=cv2.LINE_AA)
            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)
            # Display the resulting tracking frame
            cv2.imshow('Tracking', frame)

        # Display the original frame
        cv2.imshow('Original', orig_frame)
        print(num)

        # Slower the FPS
        cv2.waitKey(20)

        # Check for key strokes
        k = cv2.waitKey(50) & 0xff
        if k == 27:  # 'esc' key has been pressed, exit program.
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
        corners[3][1] = min(corners[3][1], corners1[3][1])

        print(corners1)
        continue

    corners = np.float32(corners)

    # Perspective transform on frame to get table only with right measurements:
    frame = getTableFromFrame(corners, frame)

    # Detect box centers and angles:
    (centers, angles) = detector.Detect(frame)

    # Track box centers:
    if (len(centers) > 0):
        tracker.Update(centers)

    for i in range(len(tracker.tracks)):
        if (len(tracker.tracks[i].trace) > 1):
            for j in range(len(tracker.tracks[i].trace) - 1):
                # Draw trace line
                x1 = tracker.tracks[i].trace[j][0][0]
                y1 = tracker.tracks[i].trace[j][1][0]
                x2 = tracker.tracks[i].trace[j + 1][0][0]
                y2 = tracker.tracks[i].trace[j + 1][1][0]
                clr = tracker.tracks[i].track_id % 9
                cv.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                        track_colors[clr], 2)

    # Display the resulting tracking frame
    cv.imshow('Tracking', frame)
Example #5
0
def detect_video(yolo, video_path, output_path=""):

    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        #print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    tracker = Tracker(160, 30, 6, 100)
    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False
    while True:
        return_value, frame = vid.read()
        print(frame.shape)
        image = Image.fromarray(frame)
        image,centers,number = yolo.detect_image(image)
        print(image.size)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        font = cv2.FONT_HERSHEY_SIMPLEX
        #cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=0.50, color=(255, 0, 0), thickness=2)
        cv2.putText(result, str(number), (20,  40), font, 1, (0, 0, 255), 5)
        #for k in range(len(centers)):

            #cv2.circle(frame, centers[k], 3, (255, 255, 0), 3)
            #cv2.imshow("xiao",frame)
        #cv2.waitKey(100000)
        ############################################################################################
        #print(len(centers))
        #for i in range(len(centers)):
            #print(centers[i])
            #cv2.waitKey(0)
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]

                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 4)
                        #x3 = tracker.tracks[i].track_id
                        #cv2.putText(result,str(tracker.tracks[j].track_id),(int(x1),int(y1)),font,track_colors[j],3)
                        #cv2.circle(result,(int(x1),int(y1)),3,track_colors[j],3)
            # Display the resulting tracking frame
            cv2.imshow('Tracking', result)
            ###################################################
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(100) & 0xFF == ord('q'):
            break
    yolo.close_session()
Example #6
0
def main():
    command_time = False
    # Get a list of ALL the sticks that are plugged in
    # we need at least one
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    # The graph file that was created with the ncsdk compiler
    # graph_file_name = 'graph_heli'
    graph_file_name = 'graph_all'

    # read in the graph file to memory buffer
    with open(graph_file_name, mode='rb') as f:
        graph_in_memory = f.read()

    # create the NCAPI graph instance from the memory buffer containing the graph file.
    graph = device.AllocateGraph(graph_in_memory)

    drone = libardrone.ARDrone()
    # drone = False

    # cap = cv2.VideoCapture('unobstructed.m4v')
    cap = cv2.VideoCapture('tcp://192.168.1.1:5555')

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    center = [width / 2, height / 2]

    print ("width " , width)
    print ("height " , height)

    fps = 0.0
    i = 0

    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                (0, 255, 255), (255, 0, 255), (255, 127, 255),
                (127, 0, 255), (127, 0, 127)]

    tracker = Tracker(dist_thresh=50, max_frames_to_skip=5, max_trace_length=10, trackIdCount=100)

    detect = True
    AUTO = False

    while cap.isOpened():
        start = time.time()

        for j in range(10):
            ret = cap.grab()

        ret, img = cap.retrieve()
        ts = datetime.datetime.utcnow().isoformat()

        if ret == True and detect:
            i += 1
            # fileName = datetime.datetime.utcnow().isoformat().replace(":","").replace("-","").replace(".","");
            # cv2.imwrite('images/' + fileName + '.jpg', img) 

            if (SPLIT):
                split_width = 340
                split_height = 300

                #we do not need to copy the image, as we can pass it by reference to the NCS
                #we should make copies if we debug / want to separately redraw detections
                img00 = img[0:split_height, 0:split_width]#.copy()
                img01 = img[0:split_height, (width-split_width):width]#.copy()
                img10 = img[(height-split_height):height, 0:split_width]#.copy()
                img11 = img[(height-split_height):height, (width-split_width):width]#.copy()

                cv2.imshow('img00', img00)
                cv2.imshow('img01', img01)
                cv2.imshow('img10', img10)
                cv2.imshow('img11', img11)

                detectedBoxes = run_inference(img00, graph, split_width, split_height, 0, 0, box_color=box_color00) + \
                    run_inference(img01, graph, split_width, split_height, width - split_width, 0, box_color=box_color01) + \
                    run_inference(img10, graph, split_width, split_height, 0, height-split_height, box_color=box_color10) + \
                    run_inference(img11, graph, split_width, split_height, width - split_width, height-split_height, box_color=box_color11)
            else:
                detectedBoxes = run_inference(img, graph, width, height, 0, 0, global_box_color)

            if (CLUSTERING):
                distance = 35
                detectedBoxes = cluster(detectedBoxes, distance)

            tracker.Update(detectedBoxes)

            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace)-1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0]
                        y1 = tracker.tracks[i].trace[j][1]
                        x2 = tracker.tracks[i].trace[j+1][0]
                        y2 = tracker.tracks[i].trace[j+1][1]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)

            for box in detectedBoxes:
                cv2.circle(img, (int(box[1]), int(box[2])), distance, (255, 255, 255), thickness=3, lineType=8, shift=0)

            if (len(tracker.tracks) > 0):
                tracker_width = False
                tracker_height = False

                # if (tracker.tracks[0].last_detection_assigment is not None and tracker.tracks[0].skipped_frames == 0 and tracker.tracks[0].age > 1):
                if (tracker.tracks[0].last_detection_assigment is not None):
                    tracker_width = detectedBoxes[tracker.tracks[0].last_detection_assigment][3]
                    tracker_height = detectedBoxes[tracker.tracks[0].last_detection_assigment][4]

                if (AUTO):
                    updatePID(center, tracker.tracks[0].prediction, tracker_width, tracker_height, drone)
            elif (AUTO):
                drone.turn_left()

        end = time.time()
        seconds = end - start
        fps = 1 / seconds

        fpsImg = cv2.putText(img, "%.2f fps" % (fps), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 2)
        fpsImg = cv2.putText(img, "AUTO MODE: ", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 2)

        auto_mode_color = (0, 255, 0)
        if (AUTO):
            auto_mode_color = (0, 0, 255)
        fpsImg = cv2.putText(img, "%s" % (str(AUTO)), (110, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, auto_mode_color, 2)

        battery_text_color = (0, 255, 0)
        battery_text_font = 0.4
        if (drone):
            if (drone.navdata[0]['battery'] < 20):
                battery_text_color = (0, 0, 255)
                battery_text_font = 0.7
            fpsImg = cv2.putText(img, "%s battery" % (str(drone.navdata[0]['battery'])), (20, 310), cv2.FONT_HERSHEY_SIMPLEX, battery_text_font, battery_text_color, 2)

        cv2.imshow("detected", fpsImg)

        key = cv2.waitKey(33)

        if key == ord('t'):
            command_time = time.time()
            drone.takeoff()
        if key == ord('l'):
            command_time = time.time()
            drone.land()
        if key == ord('h'):
            command_time = time.time()
            drone.hover()

        if key == ord('r'):
            tracker = Tracker(dist_thresh=50, max_frames_to_skip=5, max_trace_length=10, trackIdCount=100)
            print ('RESETTING TRACKER')

        # left joystick
        if key == ord('a'):
            command_time = time.time()
            drone.move_left()
        if key == ord('d'):
            command_time = time.time()
            drone.move_right()
        if key == ord('w'):
            command_time = time.time()
            drone.move_forward()
        if key == ord('s'):
            command_time = time.time()
            drone.move_backward()


        # right joystick
        if key == ord(';'):
            command_time = time.time()
            drone.turn_left()
        if key == ord('\\'):
            command_time = time.time()
            drone.turn_right()
        if key == ord('['):
            command_time = time.time()
            drone.move_up()
        if key == ord('\''):
            command_time = time.time()
            drone.move_down()
        if key == ord('z'):
            AUTO = not AUTO
            print ("AUTO MODE ", AUTO)

        if key == ord('q'):
            break

        if (command_time):
            command_age = time.time() - command_time
            print (command_age)
            if (command_age > 0.7):
                drone.hover()
                command_time = False
                print ('hovering again')
        else:
            if (drone):
                drone.hover()


    # Clean up the graph and the device
    graph.DeallocateGraph()
    device.CloseDevice()
class DataWriter:
    def __init__(self,
                 save_video=False,
                 savepath='examples/res/1.avi',
                 fourcc=cv2.VideoWriter_fourcc(*'XVID'),
                 fps=25,
                 frameSize=(640, 480),
                 queueSize=1024,
                 bg_color=None):
        """
        提取骨架信息,合成最后视频
        :param save_video: 是否保存合成骨架信息的视频
        :param savepath: 保存视频的路径
        :param fourcc: 保存视频的格式
        :param fps: 保存视频的帧率
        :param frameSize: 保存视频的尺寸
        :param queueSize:
        :param bg_color:保存视频的背景,默认None使用原视频作为背景,white用纯白作为背景,black用黑色作为背景
        """
        if save_video:
            # initialize the file video stream along with the boolean
            # used to indicate if the thread should be stopped or not
            self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
            assert self.stream.isOpened(), 'Cannot open video for writing'
        self.save_video = save_video
        self.stopped = False
        self.final_result = []
        # initialize the queue used to store frames read from
        # the video file
        self.Q = Queue(maxsize=queueSize)
        if opt.save_img:
            if not os.path.exists(opt.outputpath + '/vis'):
                os.mkdir(opt.outputpath + '/vis')
        self.bg_color = bg_color
        self.current_frame = 0
        self.current_sec = 0
        self.out_img_dir = '0_3'
        self.tracker = Tracker(160, 30, 50, 100)

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name, shotname,
                 fps) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    print('boxes~~~~~~~~~~~~~~~~~~~~~is None')
                    img = orig_img
                    if opt.vis:
                        cv2.imshow("AlphaPose Demo", img)
                        cv2.waitKey(30)
                else:
                    centers = []
                    img = orig_img
                    pedestrians = {}
                    counter = 0
                    for i, box in enumerate(boxes):
                        cv2.putText(img, "i:%s" % i, (box[0], box[1] + 20),
                                    font, 0.6, (0, 255, 0), 1, cv2.LINE_AA)
                        cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]),
                                      (0, 255, 0), 1)
                        # center_point = self.center(box)
                        min_x = min(box[0].int(), box[2].int())
                        max_x = max(box[0].int(), box[2].int())
                        min_y = min(box[1].int(), box[3].int())
                        max_y = max(box[1].int(), box[3].int())

                        x = (min_x + max_x) / 2
                        y = (min_y + max_y) / 2
                        w = max_x - min_x
                        h = max_y - min_y

                        cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]),
                                      (0, 255, 0), 1)
                        # cv2.circle(img, (x, y), 4, (0, 0, 255), -1)
                        b = np.array([[x], [y]])
                        centers.append(np.round(b))
                        counter += 1

                    # Track object using Kalman Filter
                    # 使用卡尔曼滤波器跟踪对象
                    self.tracker.Update(centers)

                    # For identified object tracks draw tracking line
                    # 对于已识别的目标轨迹,绘制跟踪线
                    # Use various colors to indicate different track_id
                    # 使用不同的颜色表示不同的轨迹ID
                    for i in range(len(self.tracker.tracks)):
                        if (len(self.tracker.tracks[i].trace) > 1):
                            for j in range(
                                    len(self.tracker.tracks[i].trace) - 1):
                                # Draw trace line
                                x1 = self.tracker.tracks[i].trace[j][0][0]
                                y1 = self.tracker.tracks[i].trace[j][1][0]
                                x2 = self.tracker.tracks[i].trace[j + 1][0][0]
                                y2 = self.tracker.tracks[i].trace[j + 1][1][0]
                                clr = self.tracker.tracks[i].track_id % 9
                                cv2.line(img, (int(x1), int(y1)),
                                         (int(x2), int(y2)), track_colors[clr],
                                         2)

                    # Display the resulting tracking frame
                    cv2.imshow('Tracking', img)

                    # Slower the FPS
                    cv2.waitKey(50)
            else:
                time.sleep(0.1)

    def running(self):
        # indicate that the thread is still running
        time.sleep(0.2)
        return not self.Q.empty()

    def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name,
             shotname, fps):
        # save next frame in the queue
        self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name,
                    shotname, fps))

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
        time.sleep(0.2)

    def results(self):
        # return final result
        return self.final_result

    def len(self):
        # return queue len
        return self.Q.qsize()
Example #8
0
def main():
    fig = plt.figure()
    args = config_parse()
    imgs = []
    graphs = []
    if not (args.traj or args.plot):
        print("Please choose plot method")
        return
    if args.traj:
        ax = plt.axes(projection='3d')
    else:
        fig, axs = plt.subplots()
    dataset = ['dic', 'fluo', 'phc']
    if args.type in dataset:
        if args.type == 'dic':
            #this associate range when trace backward, used to detect mitosis
            # the new trace needs to link old trace in previous frame
            minD = 30
            maxD = 70
            #the dissassociateRange of kalman track
            dissassociateRange = 30
        if args.type == 'fluo':
            raise NotImplementedError
        if args.type == 'phc':
            raise NotImplementedError
    else:
        print(f"Please choose from {dataset}")
        return

    img = plt.imread("mask_DIC/mask000.tif")

    size = img.shape
    width = size[0]
    height = size[1]
    #we cant delete
    tracker = Tracker(dissassociateRange, 100, 0)
    detector = Detector()

    l = []
    for root, dirs, files in os.walk("mask_DIC"):
        for file in files:
            l.append(file)

    l = sorted(l)

    #original file
    ori = []
    for root, dirs, files in os.walk("Sequence 1"):
        for file in files:
            ori.append(file)
    ori = sorted(ori)

    first = -1
    second = 0
    #center list
    Clist = []
    plt_trace = []

    for name in l:

        imgN = os.path.join("mask_DIC/" + name)
        print(imgN)
        img = plt.imread(imgN)
        graph = Graph(img, height, width)

        centers = detector.Detect(graph)
        Clist.append(centers)
        tracker.Update(centers, second)

        if args.traj:
            #TODO repair traj hasn't been done
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    # for j in range(len(tracker.tracks[i].trace)-1):

                    x1 = tracker.tracks[i].trace[-2][0]
                    y1 = tracker.tracks[i].trace[-2][1]

                    x2 = tracker.tracks[i].trace[-1][0]
                    y2 = tracker.tracks[i].trace[-1][1]

                    if x1 == x2 and y1 == y2:
                        continue

                    ax.plot([x1, x2], [y1, y2], [first, second])
                    plt.draw()
        if args.plot:
            oriN = os.path.join("Sequence 1/" + ori[second])
            originImg = plt.imread(oriN)
            imgs.append(originImg)
            plt_trace.append(copy.deepcopy(tracker.tracks))
            graphs.append(copy.deepcopy(graph))

        first += 1
        second += 1

        if second == args.num:
            break

    plt_trace = MitosisRecovery(tracker, plt_trace, minD, maxD)

    if args.plot:
        ani = FuncAnimation(fig,
                            update,
                            fargs=(imgs, axs, Clist, plt_trace, graphs),
                            interval=args.interval,
                            frames=second)
    plt.show()
    if args.save:
        ani.save('myAnimation.gif', writer='imagemagick', fps=15)

    done = args.search
    while done:
        choice = input("> ")
        qlist = ['speed', 'total', 'net']
        if choice in qlist:
            id = int(input("ID: "))
            frame = int(input("which frame u are: "))
            if frame < 1:
                print("the frame has to be great than 0")
                continue

            t = tracker.findTrack(id)
            if t == None:
                print("Dont have this cell")
                continue
            if len(t.frame) == 1:
                print("Sorry, this cell only appear once")

            absolute_val_array = np.abs(np.array(t.frame) - frame)
            smallest_difference_inx = absolute_val_array.argmin()
            closet_frame = t.frame[smallest_difference_inx]
            if closet_frame != frame:
                print(
                    f"Sorry we can't find {id} in frame {frame}, the closet frame is {closet_frame}"
                )
            if choice == "speed":
                pre_frame = t.frame[closet_frame - 1]
                pre_loc = np.array(t.trace[smallest_difference_inx - 1])
                cur_loc = np.array(t.trace[smallest_difference_inx])
                dist = np.linalg.norm(cur_loc - pre_loc)
                speed = dist / (frame - pre_frame)
                print(
                    f"The {id} at frame {frame} has a speed {speed} pixel/frame"
                )
            if choice == "total":
                t.printTrace(smallest_difference_inx)
                dist = t.totalDistance(smallest_difference_inx)
                print(f"It has travelled {dist} in total")
            if choice == "net":
                loc = np.array(t.trace[smallest_difference_inx])
                start = np.array(t.trace[0])

                dist = np.linalg.norm(loc - start)
                print(f"The net distance is {dist}")

        if choice == "q":
            done = False
Example #9
0
            for iii in range (0,len(df)):
                if df.iat[iii,3] == (frameaa+1):
                    if df.iat[iii,2] in [3,6,8]:

                        bb = np.array([[df.iat[iii,0]],[df.iat[iii,1]]])
                        centers.append(np.round(bb))
                    iii = iii + 1

        # If centroids are detected then track them
        if (len(centers) > 0):
            
            print ('frame number=',framea)
            # Track object using Kalman Filter
            centers.sort(key=lambda x:x[1])
            
            tracker.Update(centers,frameaa+1)
            
            cv.putText(frame, str(frameaa+1),(20,20),cv.FONT_HERSHEY_DUPLEX,0.8,(255,255,255))

            for icc in range (0,len(centers)):
                cv.circle(frame, (centers[icc][0], centers[icc][1]), 5, (0, 255, 0), 2)


            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                print ('trackid=',tracker.tracks[i].track_id)
                trackidcheck = tracker.tracks[i].track_id
                if (len(tracker.tracks[i].trace) > 10):
    #------------------------------------------------------------------------------------------------                
                    tracks_dataframe = pd.DataFrame(columns = ['trackID','x','y','tcolor','framenum','detectedx','detectedy'])
Example #10
0
def detect_video(yolo, video_path, output_path=""):

    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")

    video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps = vid.get(cv2.CAP_PROP_FPS)
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    isOutput = True if output_path != "" else False
    if isOutput:
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)

    accum_time = 0
    curr_fps = 0
    prev_time = timer()

    # Variables initialization
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]

    # 初始化一个tracker, 用来管理Tracks
    tracker = Tracker(160, 30, 6, 100)

    while True:
        # 读取视频文件,得到每一帧图像
        return_value, frame = vid.read()
        image = Image.fromarray(frame)

        # 利用yolo检测
        image, centers, number = yolo.detect_image(image)

        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0

        cv2.putText(result, str(number), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 5)

        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # 画出跟踪轨迹
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]

                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(result, (int(x1), int(y1)),
                                 (int(x2), int(y2)), track_colors[clr], 4)

            cv2.imshow('Tracking', result)

        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(100) & 0xFF == ord('q'):
            break

    yolo.close_session()
def main():
    # Load model and run graph inception resnet v1 from models and file resnetv1_inception.py
    tf.reset_default_graph() 
    sess = tf.InteractiveSession()
    sess = tf.Session()
    images_pl = tf.placeholder(tf.float32, shape=[None, 160, 160, 3], name='input_image')
    images_norm = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), images_pl)
    train_mode = tf.placeholder(tf.bool)
    age_logits, gender_logits, _ = inception_resnet_v1.inference(images_norm, keep_probability=0.8,
                                                                 phase_train=train_mode,
                                                                 weight_decay=1e-5)

    gender = tf.argmax(tf.nn.softmax(gender_logits), 1)
    age_ = tf.cast(tf.constant([i for i in range(0, 101)]), tf.float32)
    age = tf.reduce_sum(tf.multiply(tf.nn.softmax(age_logits), age_), axis=1)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state("/home/neosai/Documents/projects/deep_face_recognition/weights/models_gender_and_age/")
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("restore model!")
    else:
        pass
        print("error")

    # faces = np.empty((1,160, 160, 3))
    aligned_images = []

    # upscale_factor = 4
    # model_name = "/home/neosai/Documents/projects/deep_face_recognition/weights/netG_epoch_4_100.pth"
    # # limit_mem()
    # model = Generator(upscale_factor).eval()
    # # model.cuda()
    # model.load_state_dict(torch.load(model_name, map_location=lambda storage, loc: storage))
    # Choose area of interest
    get_crop_size(path)
    # get_crop_size1(path)
    print('Your area of interest: ', ix, ' ', iy, ' ', ex, ' ', ey)
    area = (ix, iy, ex, ey)
    # mid_y = (iy + ey) / 2
    # print('point line: ', ix1, ' ', iy1, ' ', ix2, ' ', iy2)
    # point1 = (ix, ey)
    # point2 = (ex, iy)

    # y = ax + b
    # a = float((iy2 - iy1) / (ix2 - ix1))
    # b = iy2 - a * ix2
    # print("a,, b: ", a, b)

    # Create opencv video capture object
    cap = cv2.VideoCapture(path)
    w = int(cap.get(3))
    h = int(cap.get(4))
    if cap_from_stream:
        w = 1280
        h = 720
    # fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('../video/1_0222.avi', fourcc, 15, (w, h))

    # Create Object Detector
    detector = YOLO()

    # Create Object Tracker
    # tracker = Tracker(iou_thresh=0.3, max_frames_to_skip=5, max_trace_length=20, trackIdCount=0)
    tracker = Tracker(iou_thresh=0.1, max_frames_to_skip=10, max_trace_length=20, trackIdCount=0)

    # Variables initialization
    # track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
    #                 (0, 255, 255), (255, 0, 255), (255, 127, 255),
    #                 (127, 0, 255), (127, 0, 127)]
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]         
    # count_vehicle = {'person': 0, 'motorbike': 0, 'car': 0, 'truck': 0, 'bicycle': 0, 'bus': 0}
    # count_people = {'female_0_10':0, 'female_10_20': 0, 'female_20_30': 0,
    #         'female_30_40': 0, , 'female_40_50': 0, 'female_50_100': 0
    #         'male_0_10': 0, 'male_10_20': 0, 'male_20_30': 0, 'male_30_40': 0, 'male_40_50': 0, 'male_50_100': 0}
    count_people_come_in_out = {'female_come_in': 0, 'female_come_out': 0, 'male_come_in': 0, 'male_come_out': 0}

    count_people = {'people_come_in': 0, 'people_come_out': 0}
    id_number = 0
    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()

        if cap_from_stream:
            frame = cv2.resize(frame, (1280, 720))
        frame = Image.fromarray(frame)

        # Detect and return centeroids of the objects in the frame
        result, centers, box_detected, obj_type = detector.detect_image(frame, area)
        result = np.asarray(result)
        frame = np.asarray(frame)

        #####

        # for bbox in box_detected:
        #     a0, a1, a2, a3 = bbox[0], bbox[1], bbox[2], bbox[3]
        #     cv2.rectangle(result, (a0 - 10, a1 - 10), (a2 + 10, a3 + 10), (0, 255, 0), 3)
        #     if a1 < 480 + 50 and a1 > 480 -50:
        #         print(a1)
        #         image_crop = frame[a1:a3, a0:a2]
        #         # cv2.imwrite("image.jpg", image_crop)
        #         image_crop = Image.fromarray(image_crop, 'RGB')
        #         # image_crop = super_resolution_image(image_crop, model)
        #         image_crop_array = np.asarray(image_crop)

        #         face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
        #         face = np.array(face_male_resize)
        #         aligned_images.append(face)
        #         # faces[0, :, :, :] = face
        #         age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
        #         aligned_images = []
        #         # print(gender_predict)
        #         # print(type(gender_predict))
        #         label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
        #         id_number += 1
        #         print(label)
        #         name = "../image/102/id_{}, {}".format(id_number, label)
        #         cv2.imwrite(name + ".jpg", image_crop_array)
        #         cv2.rectangle(result, (a0 - 5, a1 - 5), (a2 + 5, a3 + 5), color=(0, 0, 255),
        #                       thickness=3)
        #         cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 255, 0), 3, cv2.LINE_AA)

        #####

        # print('Number of detections: ', len(centers))
        # a = 0
        # If centroids are detected then track them
        print("len box_detected: ", len(box_detected))
        if len(box_detected) > 0:

            # Track object using Kalman Filter
            tracker.Update(box_detected, obj_type)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                # print("trace of track i: ",len(tracker.tracks[i].trace))
                # print("len tracker: ", len(tracker.tracks[i].trace))
                # if len(tracker.tracks[i].trace) == 0:
                #     bbox = tracker.tracks[i].ground_truth_box.reshape((4, 1))
                #     a0, a1, a2, a3 = convert_bbox(bbox)
                #     image_crop = frame[a1:a3, a0:a2]
                #     cv2.imwrite("image.jpg", image_crop)
                #     image_crop = Image.fromarray(image_crop, 'RGB')
                #     image_crop = super_resolution_image(image_crop, model)
                #     image_crop_array = np.asarray(image_crop)

                #     face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
                #     face = np.array(face_male_resize)
                #     aligned_images.append(face)
                #     # faces[0, :, :, :] = face
                #     age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
                #     aligned_images = []
                #     # print(gender_predict)
                #     # print(type(gender_predict))
                #     label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                #     id_number += 1
                #     print(label)
                #     name = "../image/103/id_{}, {}".format(id_number, label)
                #     cv2.imwrite(name + ".jpg", image_crop_array)
                #     cv2.rectangle(result, (a0 - 5, a1 - 5), (a2 + 5, a3 + 5), color=(0, 0, 255),
                #                   thickness=3)
                #     cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 255, 0), 3, cv2.LINE_AA)
                # if len(tracker.tracks[i].trace) >= 9:
                if len(tracker.tracks[i].trace) > 0:
                    if len(tracker.tracks[i].trace) == 1:
                        print("a")
                        bbox = tracker.tracks[i].ground_truth_box.reshape((4, 1))
                        a0, a1, a2, a3 = convert_bbox(bbox)
                        image_crop = frame[a1:a3, a0:a2]
                        cv2.imwrite("image.jpg", image_crop)
                        image_crop = Image.fromarray(image_crop, 'RGB')
                        # image_crop = super_resolution_image(image_crop, model)
                        image_crop_array = np.asarray(image_crop)

                        face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
                        face = np.array(face_male_resize)
                        aligned_images.append(face)
                        # faces[0, :, :, :] = face
                        age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
                        aligned_images = []
                        # print(gender_predict)
                        # print(type(gender_predict))
                        label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                        tracker.tracks[i].age = str(int(age_predict[0]))
                        tracker.tracks[i].gender = "Female" if gender_predict[0] == 0 else "Male"

                        id_number += 1
                        print(label)
                        name = "../image/102/id_{}, {}".format(id_number, label)
                        cv2.imwrite(name + ".jpg", image_crop_array)
                        cv2.rectangle(result, (a1, a0), (a3, a2), color=(255, 0, 0),
                                      thickness=1)
                        cv2.putText(result, label, (a1 + 6, a0 - 6), font, 2, (255, 0, 0), 1, cv2.LINE_AA)
                    # x_center_first = tracker.tracks[i].trace[0][0][0]
                    # y_center_first = tracker.tracks[i].trace[0][1][0]

                    if (len(tracker.tracks[i].trace) > 1):
                        bbox = tracker.tracks[i].ground_truth_box.reshape((4, 1))
                        a0, a1, a2, a3 = convert_bbox(bbox)
                        label = "{}, {}".format(tracker.tracks[i].age, tracker.tracks[i].gender)
                        cv2.rectangle(result, (a0, a1), (a2, a3), color=(0, 0, 255),
                                      thickness=1)
                        cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 0, 255), 1, cv2.LINE_AA)

                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)
                        
                        # cv2.putText(result, label, ())
                    # classes = tracker.tracks[i].get_obj()
                    if (len(tracker.tracks[i].trace) >= 9) and (not tracker.tracks[i].counted):
                        bbox = tracker.tracks[i].ground_truth_box.reshape((4, 1))
                        tracker.tracks[i].counted = True
                        x_center_first = tracker.tracks[i].trace[0][0][0]
                        y_center_first = tracker.tracks[i].trace[0][1][0]
                        x_center_second = tracker.tracks[i].trace[7][0][0]
                        y_center_second = tracker.tracks[i].trace[7][1][0]
                        # if y_center_first > (a * x_center_first + b):
                        #     count_people["people_come_out"] += 1
                        # if y_center_first < (a * x_center_first + b):
                        #     count_people["people_come_in"] += 1
                        if y_center_second > y_center_first and x_center_second > x_center_first:
                            count_people["people_come_in"] += 1
                        if y_center_second < y_center_first and x_center_second < x_center_first:
                            count_people["people_come_out"] += 1
                        # a0, a1, a2, a3 = convert_bbox(bbox)


                        # image_crop = frame[a1:a3, a0:a2]
                        # # cv2.imwrite("image.jpg", image_crop)
                        # image_crop = Image.fromarray(image_crop, 'RGB')
                        # image_crop = super_resolution_image(image_crop, model)
                        # image_crop_array = np.asarray(image_crop)

                        # face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
                        # face = np.array(face_male_resize)
                        # aligned_images.append(face)
                        # # faces[0, :, :, :] = face
                        # age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
                        # aligned_images = []
                        # # print(gender_predict)
                        # # print(type(gender_predict))
                        # label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                        # id_number += 1
                        # print(label)
                        # name = "../image/102/id_{}, {}".format(id_number, label)
                        # cv2.imwrite(name + ".jpg", image_crop_array)
                        # cv2.rectangle(result, (a0, a1), (a2, a3), color=(255, 0, 0),
                        #               thickness=3)
                        # cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 255, 0), 3, cv2.LINE_AA)
                        

        # Display the resulting tracking frame
        x = 30
        y = 30
        dy = 20
        i = 0
        font = cv2.FONT_HERSHEY_COMPLEX_SMALL

        for key, value in count_people.items():
            text = key + ':' + str(value)
            cv2.putText(result, text, (x, y + dy * i), font, 1, (255, 0, 255), 2, cv2.LINE_AA)
            i += 1
        # cv2.line(result, (ix1, iy1), (ix2, iy2), (0, 0, 255), 2)
        cv2.circle(result, (ix1, iy1), 5, (0, 0, 255), 4)
        cv2.rectangle(result, (ix, iy), (ex, ey), (0, 255, 0), 0)
        cv2.imshow('Tracking', result)
        out.write(result)

        # Check for key strokes
        k = cv2.waitKey(1) & 0xff
        if k == ord('n'):
            continue
        elif k == 27:  # 'esc' key has been pressed, exit program.
            break

    # When everything done, release the capture
    out.release()
    cap.release()
    cv2.destroyAllWindows()
Example #12
0
def main():
    # Create opencv video capture object
    cap = cv2.VideoCapture('/home/deepak/innovation_lab_files/vid1_new.mp4')

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(160, 30, 5, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False
    # Infinite loop to process video frames
    mainlist = [[None, None]] * 1000
    CarCount = 0
    NoneCarCount = 0
    NoneVehicle = 0
    while (True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        if (skip_frame_count < 15):
            skip_frame_count += 1
            continue

        # Detect and return centeroids of the objects in the frame
        centers = detector.Detect(frame)
        newcenter = []
        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)
            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            # print(len(tracker.tracks))
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 4):
                    # print(tracker.tracks[i].trace)
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)
                    cv2.putText(frame, str(tracker.tracks[i].track_id),
                                (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX,
                                0.75, (0, 0, 255), 2)
                    newcenter.append(
                        [int(x1), int(y1), tracker.tracks[i].track_id])

            # Display the resulting tracking frame
            # cv2.line(frame,(0,0),(100,100),(22,122,222),8)
            cv2.line(frame, (200, 600), (960, 600), (139, 0, 0), 8)
            cv2.putText(frame, 'Car Count =' + str(CarCount), (30, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (92, 142, 215), 3)
            cv2.putText(frame, 'Non Car Count =' + str(NoneCarCount),
                        (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                        (92, 142, 215), 3)
            print(CarCount + NoneCarCount)
            # cv2.line(frame,(150,450),(280,370),(139,0,0),8)
            cv2.imshow('Tracking', frame)
        for j in range(len(centers)):
            for i in range(len(newcenter)):
                a = newcenter[i][0]
                b = newcenter[i][1]
                e = newcenter[i][2]
                c = centers[j][0][0]
                d = centers[j][1][0]
                temp_len = np.sqrt((a - c) * (a - c) + (b - d) * (b - d))

                if (temp_len < 7):
                    if (mainlist[e][0] != None):
                        c = mainlist[e][0]
                        d = mainlist[e][1]
                        if ((d <= 600) and (c >= 200) and (c <= 960)
                                and (a >= 200) and (a <= 960) and b >= 600):
                            CarCount += 1
                            s1 = orig_frame.shape[0]
                            s2 = orig_frame.shape[1]
                            # print('this')
                            # print(s1)
                            # print(s2)
                            # print(a)
                            # print(b)
                            # print('this')
                            # if((a-120>=0) and (a+120<=s2) and (b-120>=0) and (b+120<=s1)):
                            try:
                                img = orig_frame[a - 80:a + 80, b - 80:b + 80]
                                # cv2.imshow("cropped", img)
                                img = cv2.resize(img, (img_width, img_height))
                                arr = np.array(img).reshape(
                                    (3, img_width, img_height))
                                arr = np.expand_dims(arr, axis=0)
                                prediction = model.predict(arr)[0]
                                # print(prediction)
                                bestclass = ''
                                bestconf = -1
                                best = [
                                    'non-vehicle', 'vehicle', 'non-vehicle',
                                    'non-vehicle', 'non-vehicle',
                                    'non-vehicle', 'non-vehicle',
                                    'non-vehicle', 'non-vehicle', 'vehicle'
                                ]
                                for n in [0, 1, 2]:
                                    if (prediction[n] > bestconf):
                                        bestclass = n
                                        bestconf = prediction[n]
                                if (bestclass != 1 and bestclass != 9):
                                    NoneVehicle += 1
                                    if (NoneVehicle % 10 == 2):
                                        CarCount -= 1
                                        NoneCarCount += 1

                            # else :
                            except:
                                print('this is already vehicle')

                        mainlist[e][0] = a
                        mainlist[e][1] = b
                    else:
                        mainlist[e][0] = a
                        mainlist[e][1] = b
                    newcenter.pop(i)
                    break
        # for i in range(len(newcenter)):
        #     mainlist[newcenter[i][2]][0]=newcenter[i][0]
        #     mainlist[newcenter[i][2]][1]=newcenter[i][1]
        # Display the original frame
        # cv2.imshow('Original', orig_frame)

        # Slower the FPS
        cv2.waitKey(50)

        # Check for key strokes
        k = cv2.waitKey(50) & 0xff
        if k == 27:  # 'esc' key has been pressed, exit program.
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break

    # When everything done, release the capture
    print("this is final car count ")
    print(CarCount)
    cap.release()
    cv2.destroyAllWindows()
Example #13
0
def detect_video(yolo, video_path, output_path=""):

    vid = cv2.VideoCapture(1)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        #print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    tracker = Tracker(160, 1, 9, 100)
    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False
    select_area = SelectArea()
    area_list = []

    while True:
        return_value, matrix = vid.read()

        # # 选择区域
        # if len(area_list) == 0:
        #     select_area.run(matrix)
        #     if len(select_area.area_list) <= 0:
        #         if select_area.select_times < 5:
        #             continue
        #         else:
        #             exit(0)
        #         # end if
        #     # end if
        #     area_list = select_area.area_list
        #     temp_height = select_area.area_list[0]["height"]
        #     temp_width = select_area.area_list[0]["width"]
        #     # # 字体重新初始化
        #     # draw.set_font(temp_height//9, temp_height//27, (temp_width//20, temp_height//20), temp_height)
        #     # # 输出视频put流重新初始化
        #     # camera.width = temp_width
        #     # camera.height = temp_height
        # select_area.clear()

        # x1, y1, x2, y2 = area_list[0]["box"]
        # frame = matrix[y1:y2, x1:x2]
        frame = matrix

        print(frame.shape)
        image = Image.fromarray(frame)
        image,centers,number = yolo.detect_image(image)
        print(image.size)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        font = cv2.FONT_HERSHEY_SIMPLEX
        #cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=0.50, color=(255, 0, 0), thickness=2)
        # cv2.putText(result, str(number), (20,  40), font, 1, (0, 0, 255), 5)

        # Track object using Kalman Filter
        tracker.Update(centers)

        # For identified object tracks draw tracking line
        # Use various colors to indicate different track_id
        for i in range(len(tracker.tracks)):
            if (len(tracker.tracks[i].trace) > 1):
                for j in range(len(tracker.tracks[i].trace) - 1):
                    # Draw trace line
                    x1 = tracker.tracks[i].trace[j][0][0]
                    y1 = tracker.tracks[i].trace[j][1][0]
                    x2 = tracker.tracks[i].trace[j + 1][0][0]
                    y2 = tracker.tracks[i].trace[j + 1][1][0]

                    clr = tracker.tracks[i].track_id % 9
                    cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)),
                                track_colors[clr], 4)
                    #x3 = tracker.tracks[i].track_id
                    #cv2.putText(result,str(tracker.tracks[j].track_id),(int(x1),int(y1)),font,track_colors[j],3)
                    #cv2.circle(result,(int(x1),int(y1)),3,track_colors[j],3)
        # Display the resulting tracking frame
        cv2.imshow('Tracking', result)
            ###################################################
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(100) & 0xFF == ord('q'):
            break
    yolo.close_session()
Example #14
0
def main():
    """Main function for multi object tracking
    Usage:
        $ python2.7 objectTracking.py
    Pre-requisite:
        - Python2.7
        - Numpy
        - SciPy
        - Opencv 3.0 for Python
    Args:
        None
    Return:
        None
    """

    # Create opencv video capture object
    cap = cv2.VideoCapture(
        'C:\Users\user\Documents\Iot-Tracking\CV2\kalman_filter_multi_object_tracking-master\data\RAW_ Moment van mows down pedestrians in Barcelona caught on camera (DISTURBING FOOTAGE).mp4'
    )
    #cap = cv2.VideoCapture(0)

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(160, 30, 5, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False

    # Infinite loop to process video frames
    while (True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        if (skip_frame_count < 15):
            skip_frame_count += 1
            continue

        # Detect and return centeroids of the objects in the frame
        centers = detector.Detect(frame)

        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        #cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2)

            # Display the resulting tracking frame
            cv2.imshow('Tracking', frame)

        # Display the original frame
        cv2.imshow('Original', orig_frame)

        # Slower the FPS
        cv2.waitKey(50)

        # Check for key strokes
        k = cv2.waitKey(1) & 0xff
        if k == ord("q"):  # 'esc' key has been pressed, exit program.
            break
        if k == ord(
                "p"):  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(1) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Example #15
0
def main():
    # Create opencv video capture object
    # cap = cv2.VideoCapture('data/TrackingBugs.mp4')
    cap = cv2.VideoCapture('data/video_3_bin.mp4')

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(160, 30, 5, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False

    frame_count = 1
    # Infinite loop to process video frames
    while True:
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Convert binary image to greyscale
        frame[frame != 0] = 255

        # Skip initial frames that display logo
        if skip_frame_count < 1:
            skip_frame_count += 1
            continue

        # Detect and return centroids of the objects in the frame
        if ret:
            print "Processing frame " + format(frame_count)
            frame_count += 1
            centers = detector.Detect(frame)

            # If centroids are detected then track them
            if len(centers) > 0:

                # Track object using Kalman Filter
                tracker.Update(centers)

                # For identified object tracks draw tracking line
                # Use various colors to indicate different track_id
                for i in range(len(tracker.tracks)):
                    if len(tracker.tracks[i].trace) > 1:
                        for j in range(len(tracker.tracks[i].trace) - 1):
                            # Draw trace line
                            x1 = tracker.tracks[i].trace[j][0][0]
                            y1 = tracker.tracks[i].trace[j][1][0]
                            x2 = tracker.tracks[i].trace[j + 1][0][0]
                            y2 = tracker.tracks[i].trace[j + 1][1][0]
                            clr = tracker.tracks[i].track_id % 9
                            cv2.line(frame, (int(x1), int(y1)),
                                     (int(x2), int(y2)), track_colors[clr], 2)

                # Display the resulting tracking frame
                cv2.imshow('Tracking', frame)

            # Display the original frame
            cv2.imshow('Original', orig_frame)

            # Slower the FPS
            cv2.waitKey(50)

            # # # Check for key strokes
            # k = cv2.waitKey(50) & 0xff
            # if k == 27:  # 'esc' key has been pressed, exit program.
            #     break
            # if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            #     pause = not pause
            #     if pause is True:
            #         print "Code is paused. Press 'p' to resume.."
            #         while pause is True:
            #             # stay in this loop until
            #             key = cv2.waitKey(30) & 0xff
            #             if key == 112:
            #                 pause = False
            #                 print "Resume code..!!"
            #                 break
        else:
            print "All frames were processed"
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def main():
    tf.reset_default_graph()
    sess = tf.InteractiveSession()
    sess = tf.Session()
    aligned_images = []
    upscale_factor = 4
    img_size = 64
    depth = 16
    k = 8
    weight_file = "/home/neosai/Documents/github/age-gender-estimation/utkface/weights.29-3.76_utk.hdf5"
    model_predict_age_and_gender = WideResNet(img_size, depth=depth, k=k)()
    model_predict_age_and_gender.load_weights(weight_file)
    model_name = "/home/neosai/Documents/projects/deep_face_recognition/weights/netG_epoch_4_100.pth"
    model = Generator(upscale_factor).eval()
    model.load_state_dict(
        torch.load(model_name, map_location=lambda storage, loc: storage))
    get_crop_size(path)
    get_crop_size1(path)
    print('Your area of interest: ', ix, ' ', iy, ' ', ex, ' ', ey)
    area = (ix, iy, ex, ey)
    print("iy1", iy1)
    # Create opencv video capture object
    cap = cv2.VideoCapture(path)
    w = int(cap.get(3))
    h = int(cap.get(4))
    if cap_from_stream:
        w = 1280
        h = 720
    # fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        '/home/neosai/Documents/projects/deep_face_recognition/video/1_02222.avi',
        fourcc, 15, (w, h))

    # Create Object Detector
    detector = YOLO()
    tracker = Tracker(iou_thresh=0.3,
                      max_frames_to_skip=5,
                      max_trace_length=40,
                      trackIdCount=0)
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    count_people_come_in_out = {
        'female_come_in': 0,
        'female_come_out': 0,
        'male_come_in': 0,
        'male_come_out': 0
    }

    count_people = {'people_come_in': 0, 'people_come_out': 0}
    id_number = 0
    img_size = 64
    font = cv2.FONT_HERSHEY_COMPLEX_SMALL
    # depth = 16
    # k = 8
    # weight_file = "/home/neosai/Documents/github/age-gender-estimation/utkface/weights.29-3.76_utk.hdf5"
    # model_predict_age_and_gender = WideResNet(img_size, depth=depth, k=k)()
    # model_predict_age_and_gender.load_weights(weight_file)
    faces = np.empty((1, 64, 64, 3))
    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()
        if cap_from_stream:
            frame = cv2.resize(frame, (1280, 720))
        frame = Image.fromarray(frame)
        # Detect and return centeroids of the objects in the frame
        result, centers, box_detected, obj_type = detector.detect_image(
            frame, area)
        result = np.asarray(result)
        frame = np.asarray(frame)

        #####
        for bbox in box_detected:
            a0, a1, a2, a3 = bbox[0], bbox[1], bbox[2], bbox[3]
            cv2.rectangle(result, (a0 - 10, a1 - 10), (a2 + 10, a3 + 10),
                          (0, 255, 0), 3)
            print(a1)
            print(iy1)
            if a1 < iy1 + 50 and a1 > iy1 - 50:
                image_crop = frame[a1:a3, a0:a2]
                cv2.imwrite("image.jpg", image_crop)
                image_crop = Image.fromarray(image_crop, 'RGB')
                image_crop = super_resolution_image(image_crop, model)
                image_crop_array = np.asarray(image_crop)

                face_male_resize = image_crop.resize((img_size, img_size),
                                                     Image.ANTIALIAS)
                face = np.array(face_male_resize)
                aligned_images.append(face)
                faces[0, :, :, :] = face
                # age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})

                # predict ages and genders of the detected faces
                results = model_predict_age_and_gender.predict(faces)
                predicted_genders = results[0]
                ages = np.arange(0, 101).reshape(101, 1)
                predicted_ages = results[1].dot(ages).flatten()
                aligned_images = []
                label = "{}, {}".format(
                    int(predicted_ages[0]),
                    "F" if predicted_genders[0][0] > 0.5 else "M")
                # print(gender_predict)
                # print(type(gender_predict))
                # label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                id_number += 1
                print(label)
                name = "../image/102/id_{}, {}".format(id_number, label)
                cv2.imwrite(name + ".jpg", image_crop_array)
                cv2.rectangle(result, (a0 - 5, a1 - 5), (a2 + 5, a3 + 5),
                              color=(0, 0, 255),
                              thickness=3)
                cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2,
                            (0, 255, 0), 3, cv2.LINE_AA)

        #####

        # print('Number of detections: ', len(centers))
        # a = 0
        # If centroids are detected then track them
        if len(box_detected) > 0:

            # Track object using Kalman Filter
            tracker.Update(box_detected, obj_type)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                # print("trace of track i: ",len(tracker.tracks[i].trace))
                # print("len tracker: ", len(tracker.tracks[i].trace))
                # if len(tracker.tracks[i].trace) == 0:
                #	 bbox = tracker.tracks[i].ground_truth_box.reshape((4, 1))
                #	 a0, a1, a2, a3 = convert_bbox(bbox)
                #	 image_crop = frame[a1:a3, a0:a2]
                #	 cv2.imwrite("image.jpg", image_crop)
                #	 image_crop = Image.fromarray(image_crop, 'RGB')
                #	 # image_crop = super_resolution_image(image_crop, model)
                #	 image_crop_array = np.asarray(image_crop)

                #	 face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
                #	 face = np.array(face_male_resize)
                #	 aligned_images.append(face)
                #	 # faces[0, :, :, :] = face
                #	 age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
                #	 aligned_images = []
                #	 # print(gender_predict)
                #	 # print(type(gender_predict))
                #	 label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                #	 id_number += 1
                #	 print(label)
                #	 name = "../image/id_{}, {}".format(id_number, label)
                #	 cv2.imwrite(name + ".jpg", image_crop_array)
                #	 cv2.rectangle(result, (a0 - 5, a1 - 5), (a2 + 5, a3 + 5), color=(0, 0, 255),
                #				   thickness=3)
                #	 cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 255, 0), 3, cv2.LINE_AA)
                if len(tracker.tracks[i].trace) >= 0:
                    print(len(tracker.tracks[i].trace))
                    # cv2.circle(result, (int(x_center_first), int(y_center_first)), 5, (0, 0, 255), -1)
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(result, (int(x1), int(y1)),
                                 (int(x2), int(y2)), track_colors[clr], 2)
                    classes = tracker.tracks[i].get_obj()
                    if (len(tracker.tracks[i].trace) >=
                            9) and (not tracker.tracks[i].counted):
                        bbox = tracker.tracks[i].ground_truth_box.reshape(
                            (4, 1))
                        tracker.tracks[i].counted = True
                        x_center_first = tracker.tracks[i].trace[0][0][0]
                        y_center_first = tracker.tracks[i].trace[0][1][0]
                        x_center_second = tracker.tracks[i].trace[8][0][0]
                        y_center_second = tracker.tracks[i].trace[8][1][0]
                        # if y_center_first > (a * x_center_first + b):
                        #	 count_people["people_come_out"] += 1
                        # if y_center_first < (a * x_center_first + b):
                        #	 count_people["people_come_in"] += 1
                        if y_center_second > y_center_first and x_center_second > x_center_first:
                            count_people["people_come_in"] += 1
                        if y_center_second < y_center_first and x_center_second < x_center_first:
                            count_people["people_come_out"] += 1
                        # a0, a1, a2, a3 = convert_bbox(bbox)

                        # image_crop = frame[a1:a3, a0:a2]
                        # cv2.imwrite("image.jpg", image_crop)
                        # image_crop = Image.fromarray(image_crop, 'RGB')
                        # image_crop = super_resolution_image(image_crop, model)
                        # image_crop_array = np.asarray(image_crop)

                        # face_male_resize = image_crop.resize((160, 160), Image.ANTIALIAS)
                        # face = np.array(face_male_resize)
                        # aligned_images.append(face)
                        # # faces[0, :, :, :] = face
                        # age_predict, gender_predict = sess.run([age, gender], feed_dict={images_pl: aligned_images, train_mode: False})
                        # aligned_images = []
                        # # print(gender_predict)
                        # # print(type(gender_predict))
                        # label = "{}, {}".format(int(age_predict[0]), "Female" if gender_predict[0] == 0 else "Male")
                        # id_number += 1
                        # print(label)
                        # name = "../image/id_{}, {}".format(id_number, label)
                        # cv2.imwrite(name + ".jpg", image_crop_array)
                        # cv2.rectangle(result, (a0, a1), (a2, a3), color=(255, 0, 0),
                        #			   thickness=3)
                        # cv2.putText(result, label, (a0 + 6, a1 - 6), font, 2, (0, 255, 0), 3, cv2.LINE_AA)

        # Display the resulting tracking frame
        x = 30
        y = 30
        dy = 20
        i = 0
        font = cv2.FONT_HERSHEY_COMPLEX_SMALL

        for key, value in count_people.items():
            text = key + ':' + str(value)
            cv2.putText(result, text, (x, y + dy * i), font, 1, (255, 0, 255),
                        2, cv2.LINE_AA)
            i += 1
        # cv2.line(result, (ix1, iy1), (ix2, iy2), (0, 0, 255), 2)
        cv2.circle(result, (ix1, iy1), 5, (0, 0, 255), 4)
        cv2.rectangle(result, (ix, iy), (ex, ey), (0, 255, 0), 0)
        cv2.imshow('Tracking', result)
        out.write(result)

        # Check for key strokes
        k = cv2.waitKey(1) & 0xff
        if k == ord('n'):
            continue
        elif k == 27:  # 'esc' key has been pressed, exit program.
            break

    # When everything done, release the capture
    out.release()
    cap.release()
    cv2.destroyAllWindows()
Example #17
0
        centroids = []
        for contour in contours:
            moment = cv2.moments(contour)
            if moment[
                    "m00"] > args.contour:  # objects with area more than area threshold detected
                if args.debug:
                    cv2.drawContours(
                        frame, contours, index, GREEN, 2
                    )  # draw contour in frame, draw only if detected under contour threshold
                x = int(moment["m10"] / moment["m00"])
                y = int(moment["m01"] / moment["m00"])
                center = (x, y)  # calculate center of contour
                centroids.append((contour, center, moment["m00"], 0))
            index += 1

        tracker.Update(centroids, (count / fps))

        fishes = tracker.fishes

        # exit program if there are no fishes detected
        if len(fishes) is 0:
            print("No fish detected")
            sys.exit()

        total_avg_speed = 0
        total_area = 0

        for i in range(len(fishes)):
            fish = tracker.fishes[i]
            center = fish.centroid
            if args.debug:
Example #18
0
def detect_img_series(yolo, img_path, output_path=""):
    imgs = os.listdir(img_path)
    imgs.sort(key=lambda x: int(x[:-4]))  # 按照名字进行排序

    accum_time = 0
    curr_fps = 0
    prev_time = timer()

    # Variables initialization
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]

    # 初始化一个tracker, 用来管理Tracks
    tracker = Tracker(160, 30, 6, 100)

    for img in imgs:
        image = cv2.imread(img_path + '/' + img)
        image = Image.fromarray(image)

        # 利用yolo检测
        image, centers, number = yolo.detect_image(image)
        result = np.asarray(image)

        curr_time = timer()  #获取当前时刻时间
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0

        cv2.putText(result, str(number), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 5)

        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # 画出跟踪轨迹
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]

                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(result, (int(x1), int(y1)),
                                 (int(x2), int(y2)), track_colors[clr], 4)

            cv2.imshow('Tracking', result)

        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        #if isOutput:
        #    out.write(result)
        if cv2.waitKey(100) & 0xFF == ord('q'):
            break

    yolo.close_session()
Example #19
0
def task1_1(mogthr, inputpath, dataset):
    # Create opencv video capture object
    path = inputpath + 'in%06d.jpg'
    cap = cv2.VideoCapture(path)

    # Create Object Detector
    detector = Detectors(thr=mogthr, dataset=dataset)

    # Create Object Tracker
    if dataset == 'highway':
        tracker = Tracker(200, 0, 60, 100)  # Tracker(200, 0, 200, 100)
    elif dataset == 'traffic':
        tracker = Tracker(200, 0, 60, 100)  # Tracker(50, 0, 90, 100)
    elif dataset == 'ownhighway':
        tracker = Tracker(45, 0, 60, 100)  # Tracker(50, 0, 90, 100)
    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False

    if dataset == 'highway':
        pts1 = np.float32([[120, 100], [257, 100], [25, 200], [250, 200]])
    elif dataset == 'traffic':
        pts1 = np.float32([[0, 50], [160, 15], [110, 190], [320, 110]])
    elif dataset == 'ownhighway':
        pts1 = np.float32([[190, 100], [290, 100], [60, 200], [250, 200]])
    pts2 = np.float32([[0, 0], [320, 0], [0, 240], [320, 240]])

    M = cv2.getPerspectiveTransform(pts1, pts2)


    print M
    counter = 0
    # Infinite loop to process video frames
    while True:
        counter += 1
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Stop when no frame
        if frame is None:
            break

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        #if (skip_frame_count < 200):
        #    skip_frame_count += 1
        #    continue

        # Detect and return centeroids of the objects in the frame
        centers, xd, yd, wd, hd = detector.Detect(frame, counter)
        #print xd

        vel = []
        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers, dataset)



            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    vel = []
                    a=0
                    for j in range(5, len(tracker.tracks[i].trace) - 1):
                        a=a+1
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), track_colors[clr], 2)
                        if dataset == 'highway':
                            if y1 > 100 and y2 < 200:
                                x1r, y1r, z1r = np.dot(M,[x1, y1, 1])
                                x2r, y2r, z2r = np.dot(M, [x2, y2, 1])
                                x1r, y1r = x1r/z1r, y1r/z1r
                                x2r, y2r = x2r / z2r, y2r / z2r
                                dist = np.float(
                                    np.sqrt(((int(x2r) - int(x1r)) ** 2) + ((int(y2r) - int(y1r)) ** 2))) * np.float(
                                    30) / 20 * np.float(24) / 5  # euclidean distance between two points
                                vel.append(dist)
                        if dataset == 'ownhighway':
                            if y1 > 100 and y2 < 200:
                                x1r, y1r, z1r = np.dot(M,[x1, y1, 1])
                                x2r, y2r, z2r = np.dot(M, [x2, y2, 1])
                                x1r, y1r = x1r/z1r, y1r/z1r
                                x2r, y2r = x2r / z2r, y2r / z2r


                                dist = np.float(
                                    np.sqrt(((int(x2r) - int(x1r)) ** 2) + ((int(y2r) - int(y1r)) ** 2))) * np.float(
                                    18) / 20 * np.float(24) / 5  # euclidean distance between two points
                                vel.append(dist)

                    if not vel == []:
                        #if i==1:
                        #print xd[i]#'value ' + xd[i] + ' value ' + yd[i]#+ ' frame '+frame+ ' vel ' +vel
                        # if dataset == 'ownhighway':
                        #     #if i==0:
                        #     print counter,i, xd,np.mean(vel)
                        #     if counter>0:
                        #         a=0
                        #
                        #
                        #     #if xd==[]
                        #     # if len(vel)<4: #and int(np.mean(vel))>100:
                        #     #     cv2.putText(frame, '  vel ' + str(int(np.mean(vel))), (int(xd[a]), int(yd[a] - 4)),
                        #     #             cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                        #     if len(vel)>3:# and int(np.mean(vel))>100:
                        #         cv2.putText(frame, '  vel ' + str(int(np.mean(vel[-3:-1]))), (int(xd[0]), int(yd[0])),
                        #                 cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                        #     #cv2.putText(frame, '  vel ' + str(int(np.mean(vel))), (int(xd[0]), int(yd[0] - 4)),
                        #     #    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                        #     #print int(np.mean(vel)),i,j
                        if dataset == 'ownhighway':
                            #print i, xd
                            if len(vel)<10:
                                cv2.putText(frame, '  vel ' + str(int(np.mean(vel))), (int(xd[0]), int(yd[0] - 4)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                            else:
                                cv2.putText(frame, '  vel ' + str(int(np.mean(vel[-10:-1]))), (int(xd[0]), int(yd[0] - 4)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                        if dataset == 'highway':
                            #print i, xd
                            if len(vel)<20:
                                cv2.putText(frame, '  vel ' + str(int(np.mean(vel))), (int(xd[i]), int(yd[i] - 4)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                            else:
                                cv2.putText(frame, '  vel ' + str(int(np.mean(vel[-20:-1]))), (int(xd[i]), int(yd[i] - 4)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
                            #print int(np.mean(vel)), i, j
                    # x1 = tracker.tracks[i].trace[-2][0][0]
                    # y1 = tracker.tracks[i].trace[-2][1][0]
                    # x2 = tracker.tracks[i].trace[-1][0][0]
                    # y2 = tracker.tracks[i].trace[-1][1][0]
                    # if dataset == 'highway':
                    #     if y1 > 100 and y2 < 200:
                    #         x1r, y1r, z1r = np.dot(M,[x1, y1, 1])
                    #         x2r, y2r, z2r = np.dot(M, [x2, y2, 1])
                    #         x1r, y1r = x1r/z1r, y1r/z1r
                    #         x2r, y2r = x2r / z2r, y2r / z2r
                    #
                    #
                    #
                    #
                    #         dist = np.float(np.sqrt(((int(x2r) - int(x1r))**2) + ((int(y2r) - int(y1r))**2))) * np.float(30)/20 * np.float(24)/5#euclidean distance between two points
                    #         vel.append(dist)
                    #         cv2.putText(frame, '  vel '+str(int(dist)), (int(xd[i]), int(yd[i]-4)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),1,cv2.LINE_AA)
                                #print (x1, x2, y1, y2, dist, i)

                                # x1r,y1r = M * [x1,y1,1]            #(x,y,1) = M * (xold,yold,1)
                                # x2r, y2r = M * [x2,y2,1]
                                #
                                # vel.append[j] = int(np.sqrt(((int(x2r) - int(x1r)) ** 2) + (
                                #     (int(y2r) - int(y1r)) ** 2))) * int(3/50) * int(24/5) #     * (m/pixel) * (frame/sec)    # euclidean distance between two points
                                #
                                # if len(vel[j] > 10):
                                #     return#velocity = np.mean(vel[j](i:i-10))                 #     * (m/pixel) * (frame/sec)

                                #print 'car '+ str(i) +' velocity '+ str(dist) #(x pixels every frame) -> * (m/pixel) * (frame/sec) = (m/sec)















        # Display homography
        dst = cv2.warpPerspective(frame, M, (320, 240))
        cv2.imshow('Homography', dst)
        cv2.imwrite('../week5/results/hom' + str(counter) + '.png', dst)

        # Display the resulting tracking frame


        cv2.imshow('Tracking', frame)
        cv2.imwrite('../week5/results/out' + str(counter) + '.png', frame)
        cv2.imwrite('out' + str(frame) + '.jpg',frame)
        # Display the original frame
        cv2.imshow('Original', orig_frame)

        # Slower the FPS
        cv2.waitKey(1)

        # Check for key strokes
        k = cv2.waitKey(1) & 0xff
        if k == 27:  # 'esc' key has been pressed, exit program.
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break


    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def Trace_tracking(images):
    """tracking mutiple traces"""

    #extract tfirst file name
    first_filename = os.path.splitext(images[0])[0]

    #extract first file index
    #first_number = int(filter(str.isdigit, first_filename))

    first_number = int(re.search(r'\d+', first_filename).group(0))

    #define trace result path
    outfile = save_path_track + str('{:04}'.format(first_number)) + '.txt'

    print(outfile)

    image_path = os.path.join(dir_path, images[0])

    #load image from image path
    frame = cv2.imread(image_path)

    #Determine the width and height from the first image
    height, width, channels = frame.shape
    length = len(images)

    print("Image sequence size: {0} {1} {2}\n".format(width, height, length))

    #Create Object Detector
    detector = Detectors()

    #detector = Root_Detectors(pattern_id)

    # Create Object Tracker, arguments:
    # dist_thresh, max_frames_to_skip, max_trace_length, trackIdCount
    #distance threshold. When exceeds the threshold, track will be deleted and new track is created
    tracker = Tracker(dist_thresh, max_frames_to_skip, max_trace_length,
                      trackIdCount)

    # Variables initialization
    skip_frame_count = 0

    #frame ID
    ID = 0

    #stem_track = np.zeros(3)

    #initilize parameters for record radius and center locations
    radius_track = []

    centers_track = []

    #Begin of process each image for tracking
    ###################################################################################
    # loop to process video frames
    for frame_ID, image in enumerate(images):

        # Capture frame-by-frame
        image_path = os.path.join(dir_path, image)

        #load image frame
        frame = cv2.imread(image_path)

        # exit the loop if reach the end frame
        if ID == len(images):
            print("End of frame sequence!")
            break

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        print("Processing frame {}...".format(frame_ID))

        # Detect and return centeroids of the objects in the frame
        (centers, radius_rec) = detector.Detect(frame, ID, radius_min,
                                                radius_max)

        # record radius and center locations
        radius_track.append(radius_rec)
        centers_track.append(centers)

        #centers, stem_center = detector.Detect_root(frame, ID, pattern_id, stem_track)

        #centers = detector.Detect_root_blob(frame, ID, pattern_id, stem_track)

        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            print("Tracker size: {}...".format(len(tracker.tracks)))

    #End of process each image for tracking
    ###################################################################################

    radius_track = np.hstack(radius_track)

    coord_radius = []

    # combine x, y coordinates
    for i in range(0, len(centers_track)):
        for j in range(0, len(centers_track[i])):
            coord_radius.append(np.array(centers_track[i][j]))

    coord_radius = np.array(coord_radius)

    #start index value along Z axis
    offset = first_number

    # write output as txt file
    with open(outfile, 'w') as f:

        #loop all tracked objects
        for i in range(len(tracker.tracks)):

            if (len(tracker.tracks[i].trace) > 2):

                #accquire dimension of current tracker
                dim = len(tracker.tracks[i].trace)

                #extract point data from current tracker
                point = np.asarray(tracker.tracks[i].trace)

                #print(type(tracker.tracks[i].trace))

                # accquire shape of points
                nsamples, nx, ny = point.shape

                #reshape points
                point = point.reshape((nsamples, nx * ny))

                #extract x,y,z coordinates
                x = np.asarray(point[:, 0]).flatten()
                y = np.asarray(point[:, 1]).flatten()
                z = np.asarray(range(offset, dim + offset)).flatten()

                #curve fitting of xy trace in 2D space
                #popt, pcov = curve_fit(func, x, y)
                #y = func(x, *popt)

                #compute average radius
                avg_radius = center_radius(x, y, radius_track, coord_radius)

                #reshape radius array
                r = np.asarray(avg_radius * np.ones((len(x), 1))).flatten()

                #print("Average radius: {0} \n".format(avg_radius))

                # write out tracing trace result
                #if ( (len(x) == len(y) == len(z)) and (np.count_nonzero(x) == dim) and (np.count_nonzero(y) == dim) and sum(x) !=0 ):
                if ((len(x) == len(y) == len(z)) and sum(x) != 0):

                    # save trace points as txt file
                    f.write("#Trace {0} \n".format(i))
                    np.savetxt(f, np.c_[x, y, z, r], fmt='%-7.2f')

                #else:
                #print("Inconsistant length pf 3D array!")
                #ax.scatter(x, y, z, c = 'b', marker = 'o')
                #ax.plot(x, y, z, label='Tracking root trace')
                #f.write("#End\n")

        # write end mark and close file
        f.write("#End\n")
        f.close()
Example #21
0
def detect_video(yolo, video_path, output_path=""):
    vid = cv2.VideoCapture(video_path)
    tracker = Tracker(30, 0, 6, 0)
    width = 400
    height = 300

    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127), (50, 50, 98), (37, 37, 47)]
    pause = False
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps = vid.get(cv2.CAP_PROP_FPS)
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC),
              type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps,
                              (int(0.78 * width) - int(0.109 * width),
                               height - int(height * 0.0608)))

    fps = "FPS: ??"
    accum_time = 0
    curr_fps = 0
    clr = 0

    frame_counter = 0
    direction = ""
    saving_name = ""
    (dX, dY) = (0, 0)

    lane_number = 0
    centers = []

    prev_time = timer()
    while True:
        return_value, frame = vid.read()

        if return_value is False:
            print('frame is empty: break')
            break

        orig_frame = frame.copy()
        frame = cv2.resize(frame, (width, height))
        frame = frame[int(0.0608 * height):height,
                      int(0.109 * width):int(0.78 * width)]
        h, w, d = frame.shape

        frame_counter += 1

        image = Image.fromarray(frame)
        new_result, centers, scores, out_classes, bbox = yolo.detect_image(
            image)

        result = np.array(new_result)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1

        if len(centers) > 0 and len(out_classes) > 0:
            tracker.Update(centers)
            for i in range(len(tracker.tracks)):
                x1 = 0
                y1 = 0
                x2 = 0
                y2 = 0
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        y1 = tracker.tracks[i].trace[j][0][0]
                        x1 = tracker.tracks[i].trace[j][1][0]
                        y2 = tracker.tracks[i].trace[j + 1][0][0]
                        x2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id
                        try:
                            some_position = tracker.tracks[i].trace[2][1][0]
                            some_position_y = tracker.tracks[i].trace[2][0][0]

                        except:
                            some_position = tracker.tracks[i].trace[1][1][0]
                            some_position_y = tracker.tracks[i].trace[1][0][0]

                        #cv2.circle(result, (int(x2), int(y2)), 2, track_colors[clr%3], -1)
                        cv2.arrowedLine(
                            result, (int(some_position), int(some_position_y)),
                            (int(x2), int(y2)),
                            track_colors[clr % 3],
                            line_type=cv2.LINE_AA,
                            thickness=1)

                        dX = x2 - some_position
                        #dXCheck = x2-x1
                        dY = y2 - y1
                    (dirX, dirY) = ("", "")
                    #print(dX)
                    if np.abs(dX) >= 12:
                        dirX = "East" if np.sign(dX) == 1 else "West"
                    #if np.abs(dY)>=4:
                    #    dirY = "North" if np.sign(dY) == 1 else "South"

                    if dirX != "" and dirY != "":
                        direction = "{}-{}".format(dirY, dirX)
                    else:
                        direction = dirX if dirX != "" else dirY

                    if direction == "East":
                        if (y2 > 0 and y2 <= h / 2.28):
                            lane_number = 1
                        if (y2 > h / 3.68 and y2 <= h / 1.35):
                            lane_number = 2
                        if (y2 > h / 1.35 and y2 <= h):
                            lane_number = 3

                        cv2.putText(result, 'WRONG DIRECTION',
                                    (int(x1) - 25, int(y1) - 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.30,
                                    (0, 0, 255), 1)
                        now = dt.datetime.now()
                        raw_dir = './result/wrong_direction_'
                        lane_ = str(lane_number) + '_{0:02d}'.format(i)
                        seconds = now.second
                        right_now = now.strftime("%Y%m%d_%H%M%S_")
                        saving_name = "%s%s%s.jpg" % (raw_dir, right_now,
                                                      lane_)

                        if os.path.exists(saving_name) == False:
                            if os.path.exists("./result") == False:
                                os.mkdir("./result")
                            print(saving_name)
                            resized = resize(result)
                            cv2.imwrite(saving_name, resized)

                    else:
                        cv2.putText(result, direction,
                                    (int(x1) - 25, int(y1) - 25),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.30,
                                    (0, 255, 0), 1)

        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0

        cv2.putText(result, "Count: " + str(len(centers)), (3, 35),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1)
        cv2.putText(result,
                    text=fps,
                    org=(3, 15),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.40,
                    color=(0, 255, 0),
                    thickness=1)

        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        k = cv2.waitKey(1) & 0xff
        if isOutput:
            try:
                out.write(result)
            except:
                continue
        if k == ord('q'):
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break
    yolo.close_session()
            th1 = cv2.dilate(th1, kernel, iterations=4)
            th1 = cv2.erode(th1, kernel, iterations=2)
            cv2.imshow('diff', th1)

            t1 = time.time()

            find_frame, our_team_point, enemy_team_point, other_point = gop.main(
                frame)

            # If centroids are detected then track them
            if (len(our_team_point) > 0):

                KF_our_team = []

                # Track object using Kalman Filter
                tracker.Update(our_team_point)

                for i in range(len(tracker.tracks)):
                    if (len(tracker.tracks[i].trace) > 1):
                        for j in range(len(tracker.tracks[i].trace) - 1):
                            x2 = tracker.tracks[i].trace[j + 1][0][0]
                            y2 = tracker.tracks[i].trace[j + 1][1][0]
                            KF_our_team.append([x2, y2])

            # If centroids are detected then track them
            if (len(enemy_team_point) > 0):

                KF_enemy_team = []

                # Track object using Kalman Filter
                tracker.Update(enemy_team_point)
def main():
    # Create opencv video capture object
    cap = cv2.VideoCapture('output.avi')

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(160, 30, 5, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False

    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        if skip_frame_count < 15:
            skip_frame_count += 1
            continue

        # Detect and return centeroids of the objects in the frame
        centers = detector.Detect(frame)

        # If centroids are detected then track them
        if len(centers) > 0:

            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if len(tracker.tracks[i].trace) > 1:
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)

            # Display the resulting tracking frame
            cv2.imshow('Tracking', frame)

        # Display the original frame
        cv2.imshow('Original', orig_frame)

        # Slower the FPS
        cv2.waitKey(50)

        # Check for key
        k = cv2.waitKey(50) & 0xff
        if k == 27:  # 'esc' key has been pressed, exit program.
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if pause is True:
                print("Code is paused. Press 'p' to resume..")
                while pause is True:
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Example #24
0
def main():
    """Main function for multi object tracking
    Usage:
        $ python2.7 objectTracking.py
    Pre-requisite:
        - Python2.7
        - Numpy
        - SciPy
        - Opencv 3.0 for Python
    Args:
        None
    Return:
        None
    """
    '''
    options = {
            'model': 'cfg/tiny-yolo-voc-1c.cfg',
            'load': 4000,
            'threshold': 0.15,
            'gpu': 1.0
    }

    tfnet = TFNet(options)
    '''
    net = Detector(bytes("cfg/yolov3.cfg", encoding="utf-8"),
                   bytes("weights/yolov3.weights", encoding="utf-8"), 0,
                   bytes("cfg/coco.data", encoding="utf-8"))

    # Create opencv video capture object
    cap = cv2.VideoCapture('mansiroad_trimmed.mp4')
    #length = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
    '''
    # Create opencv video capture object
    cap = cv2.VideoCapture('data/TrackingBugs.mp4')

    # Create Object Detectorx
    detector = Detectors()
    '''
    # Create Object Tracker
    tracker = Tracker(25, 60, 1000, 10)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False
    frame_array = []
    first = 0
    # Infinite loop to process video frames
    size = (1920, 1080)
    c = 0
    #print(length)
    #while(c<=length-1):
    try:
        while True:
            # Capture frame-by-frame
            ret, frame = cap.read()
            #print('ret',ret)
            # Make copy of original frame
            orig_frame = copy.copy(frame)

            # Skip initial frames that display logo
            '''
            if (skip_frame_count < 15):
                skip_frame_count += 1
                continue
            '''
            #print('ssss')
            # Detect and return centeroids of the objects in the frame
            centers = vid.detect(ret, frame, net)  #,tfnet)
            print("centers :", centers)
            # If centroids are detected then track them
            if (len(centers) > 0):
                first = 1
                # Track object using Kalman Filter
                tracker.Update(centers, first)

                # For identified object tracks draw tracking line
                # Use various colors to indicate different track_id
                print('NUM OF OBJECTS : ', len(tracker.tracks))
                for i in range(len(tracker.tracks)):
                    if (len(tracker.tracks[i].trace) > 1):
                        #print('NUM OF OBJECTS : ',tracker.tracks[i].trace)
                        for j in range(len(tracker.tracks[i].trace) - 1):
                            # Draw trace line
                            x1 = tracker.tracks[i].trace[j][0][0]
                            y1 = tracker.tracks[i].trace[j][1][0]
                            x2 = tracker.tracks[i].trace[j + 1][0][0]
                            y2 = tracker.tracks[i].trace[j + 1][1][0]
                            clr = tracker.tracks[i].track_id % 9
                            cv2.line(frame, (int(x1), int(y1)),
                                     (int(x2), int(y2)), track_colors[clr], 2)

                # Display the resulting tracking frame
                #cv2.imshow('Tracking', frame)
            elif first == 1:
                tracker.Update(centers, 0)
                print('NUM OF OBJECTSno : ', len(tracker.tracks))
                for i in range(len(tracker.tracks)):
                    if (len(tracker.tracks[i].trace) > 1):
                        print(
                            'NUM OF OBJECTSnononono : ',
                            len(tracker.tracks[i].trace),
                        )
                        print(
                            'trace : ',
                            tracker.tracks[i].trace[
                                len(tracker.tracks[i].trace) - 1],
                        )

                        for j in range(len(tracker.tracks[i].trace) - 1):
                            # Draw trace line
                            x1 = tracker.tracks[i].trace[j][0][0]
                            y1 = tracker.tracks[i].trace[j][1][0]
                            x2 = tracker.tracks[i].trace[j + 1][0][0]
                            y2 = tracker.tracks[i].trace[j + 1][1][0]
                            clr = tracker.tracks[i].track_id % 9

                            cv2.line(frame, (int(x1), int(y1)),
                                     (int(x2), int(y2)), track_colors[clr], 2)

            height, width, layers = frame.shape
            size = (width, height)
            frame_array.append(frame)
            cv2.imshow('ss', frame)
            '''
            # Display the original frame
            #cv2.imshow('Original', orig_frame)
            if keyboard.is_pressed('q'):# 'q' key has been pressed, exit program.
                break
            # Slower the FPS
            cv2.waitKey(50)

            # Check for key strokes
            k = cv2.waitKey(50) & 0xff
            if k == 27:  # 'esc' key has been pressed, exit program.
                break
            if k == 112:  # 'p' has been pressed. this will pause/resume the code.
                pause = not pause
                if (pause is True):
                    print("Code is paused. Press 'p' to resume..")
                    while (pause is True):
                        # stay in this loop until
                        key = cv2.waitKey(30) & 0xff
                        if key == 112:
                            pause = False
                            print("Resume code..!!")
                            break
            '''
            key = cv2.waitKey(1) & 0xFF

            # Exit
            if key == ord('q'):
                break

            # Take screenshot
            if key == ord('s'):
                cv2.imwrite('frame_{}.jpg'.format(time.time()), frame)

            c += 1
    except:
        print('Video Ended')
    # When everything done, release the capture
    finally:
        #out = cv2.VideoWriter('result2.mp4',cv2.VideoWriter_fourcc(*'MP4V'), int(cap.get(cv2.CAP_PROP_FPS)), size)
        #print('11')
        #for i in range(len(frame_array)):
        # writing to a image array
        #cv2.imshow('ff',frame_array[i])
        # writing to a image array
        #out.write(frame_array[i])
        #out.release()

        #out.release()
        cap.release()
        cv2.destroyAllWindows()
Example #25
0
def main(tiffStep):
    """Main function for multi object tracking
    """
    #tiffStep = 512

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(200, 50, 25, 100)

    # Variables initialization
    pause = False
    track_colors = [random_color() for x in xrange(256)]
    # Infinite loop to process video frames
    stTmAv = time.time()
    outFName = imFolder + '_traced_0-' + str(tiffStep) + '.tiff'
    #memmap_image = tifffile.memmap(outFName, shape=(tiffStep, newx, newy, 3), dtype='uint8')
    imgs = np.zeros((tiffStep, newy, newx, 3), dtype=np.uint8)
    tTm = 0
    stTm = time.time()
    for fr in xrange(len(flyContours[0])):
        # Capture frame-by-frame
        frame = getBgSubIm((flyContours[0][fr], flyContours[1]))
        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
        outFrame = cv2.cvtColor(flyContours[0][fr], cv2.COLOR_GRAY2BGR)

        # Detect and return centeroids of the objects in the frame
        centers = detector.DetectCM(frame)
        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id

                        cv2.line(outFrame, (int(x1), int(y1)),
                                 (int(x2), int(y2)), track_colors[clr], 2)
                        cv2.circle(outFrame, (int(x2), int(y2)), 2,
                                   track_colors[clr], 2)
                        #cv2.circle(outFrame, (int(x1), int(y1)), 2, (255,25,255), 2)
                    cv2.circle(outFrame, (int(x2), int(y2)), 2,
                               track_colors[clr], 1)

            ## Display the resulting tracking frame
            #cv2.imshow('Tracking', outFrame)
            #cv2.waitKey(1)
            # outFName = imFolder+'_traced/'+flist[fr].split('/')[-1]
            # cv2.imwrite(outFName, outFrame)
            img = cv2.resize(outFrame, (newx, newy))
            imN = (fr % tiffStep)
            if (imN == 0 and fr > 0):
                outFName = imFolder + '_traced_' + str(
                    fr - tiffStep) + '-' + str(fr) + '.tiff'
                startNT(imageio.mimwrite, (outFName, imgs))
                imgs = np.zeros((tiffStep, newy, newx, 3), dtype=np.uint8)
                #memmap_image = tifffile.memmap(outFName, shape=(tiffStep, newx, newy, 3), dtype='uint8')
                #memmap_image[imN] = img
                tm = time.time()
                fps = (tiffStep / (tm - stTm))
                tTm += tm - stTm
                print('FPS: %0.3f (frame# %d)' % (fps, fr))
                stTm = tm
            #else:
            #    #print fr, imN
            imgs[imN] = img
    imageio.mimwrite(
        imFolder + '_traced_' + str(
            (fr / tiffStep) * tiffStep) + '-' + str(fr) + '.tiff', imgs[:imN])
    print('Tracking average FPS: %0.3f' % (float(fr) / (time.time() - stTmAv))
          )  #(1.0/(tm-stTm)))
    cv2.destroyAllWindows()
Example #26
0
def main():
    parser = argparse.ArgumentParser(description='Description of your program')
    parser.add_argument('-vid',
                        '--video',
                        required=True,
                        default="Input/project.avi",
                        help="Video File Path")
    parser.add_argument('-roi',
                        '--roi creation mode',
                        required=False,
                        default="manually",
                        help="Create region of interest-do it 'manually'," +
                        "or use the 'pre-tested' one which gives good results")
    args = vars(parser.parse_args())

    video = args['video']
    roi_mode = args['roi creation mode']

    videopath, __ = video.split(".", -1)
    __, videoname = videopath.split('/', -1)
    print("video", video)
    print("videopath", videopath)
    print("videoname", videoname)

    camera = cv2.VideoCapture(video)
    ret, frame = camera.read()

    human_detect = Human_Detectors()

    # Initialise Tracker
    tracker = Tracker(160, 30, 5, 100)

    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    if roi_mode == 'manually':
        roi = cv2.selectROI(frame, showCrosshair=False)
    elif roi_mode == 'pre-tested':
        try:
            roi_file = open(videopath + '_pre-testedROI.txt', 'r')
            rois = roi_file.read()
            rois = rois[1:-1]
            roi = rois.split(", ")
            for i in range(len(roi)):
                roi[i] = int(roi[i])
        except:
            print(
                "The pre-tested Region of Interest file does not exist yet. Please create it manually."
            )
            roi = cv2.selectROI(frame, showCrosshair=False)

    cv2.destroyWindow('ROI selector')
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    vid_out = cv2.VideoWriter('Output_Video/' + videoname + '.avi',
                              fourcc,
                              20.0, (int(camera.get(3)), int(camera.get(4))),
                              isColor=True)
    # Run through video frames
    while (camera.isOpened()):
        ret, frame = camera.read()
        # Detect and return centeroids of the objects in the frame
        centers = human_detect.Detect(frame)
        total_centers = len(centers)

        # Track centroids, if found
        if (total_centers > 0):
            # Track object using Kalman Filter
            tracker.Update(centers)
            count = len(tracker.tracks)

            # drawing tracks with different colors
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Coordinated of predicted line
                        x_1 = int(tracker.tracks[i].trace[j][0][0])
                        y_1 = int(tracker.tracks[i].trace[j][1][0])
                        x_2 = int(tracker.tracks[i].trace[j + 1][0][0])
                        y_2 = int(tracker.tracks[i].trace[j + 1][1][0])
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (x_1, y_1), (x_2, y_2),
                                 track_colors[clr], 2)

            # Show the tracked video frame
            vid_out.write(frame)
            cv2.rectangle(frame, (roi[0], roi[1]),
                          (roi[0] + roi[2], roi[1] + roi[3]), (0, 255, 0), 1)
            cv2.putText(frame, 'people detected: ' + str(count), (10, 450),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1,
                        cv2.LINE_AA)
            cv2.imshow('Tracking', frame)
            #cv2.imshow('rectangle', rect)
            print("total number people in the frame: ", count)

        key = cv2.waitKey(50) & 0xff
        # Escape key to exit
        if key == 27:
            break

    camera.release()
    vid_out.release()
    cv2.destroyAllWindows()
    if roi_mode == 'manually':
        print(
            "Do you wish to save the created roi to be used as an optional pre-tested file for this video next run onwards (if it gave good results)"
        )
        wr = create_roi(videoname, roi)
Example #27
0
def main():
    """Main function for multi object tracking
    Usage:
        $ python2.7 objectTracking.py
    Pre-requisite:
        - Python2.7
        - Numpy
        - SciPy
        - Opencv 3.0 for Python
    Args:
        None
    Return:
        None
    """

    # Create opencv video capture object
    cap = cv2.VideoCapture('data/TrackingBugs.mp4')
    """
    Resolution: 596 x 336
    Frame rate: 30 fps
    """

    # Create Object Detector
    detector = Detectors()

    # Create Object Tracker
    tracker = Tracker(30, 30, 10, 100)

    # Variables initialization
    skip_frame_count = 0
    track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                    (0, 255, 255), (255, 0, 255), (255, 127, 255),
                    (127, 0, 255), (127, 0, 127)]
    pause = False

    # Infinite loop to process video frames
    while (True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        # Make copy of original frame
        orig_frame = copy.copy(frame)

        # Skip initial frames that display logo
        if (skip_frame_count < 15):
            skip_frame_count += 1
            continue

        # Detect and return centeroids of the objects in the frame
        centers = detector.Detect(frame)

        # If centroids are detected then track them
        if (len(centers) > 0):

            # Track object using Kalman Filter
            tracker.Update(centers)

            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                position = tracker.tracks[i].position()
                error = tracker.tracks[i].position_error()
                cv2.circle(frame, (position[0], position[1]), error,
                           (200, 200, 200), 2)

                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].track_id % 9
                        cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)),
                                 track_colors[clr], 2)

            # Display the resulting tracking frame
            cv2.imshow('Tracking', frame)

        # Display the original frame
        cv2.imshow('Original', orig_frame)

        # Slower the FPS
        cv2.waitKey(1)

        # Check for key strokes
        k = cv2.waitKey(50) & 0xff
        if k == 27:  # 'esc' key has been pressed, exit program.
            break
        if k == 112:  # 'p' has been pressed. this will pause/resume the code.
            pause = not pause
            if (pause is True):
                print("Code is paused. Press 'p' to resume..")
                while (pause is True):
                    # stay in this loop until
                    key = cv2.waitKey(30) & 0xff
                    if key == 112:
                        pause = False
                        print("Resume code..!!")
                        break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
Example #28
0
    # If centroids are detected then track them
    if (len(rectList) > 0):
        centers = [None] * len(rectList)
        for i in range(len(rectList)):
            rect = rectList[i]
            x = int(rect[0] + rect[2] / 2)
            y = int(rect[1] + rect[3] / 2)
            b = np.array([[x], [y]])
            centers[i] = np.round(b)
            cv2.rectangle(frame, rectList[i], (0, 0, 255), 5)
            cv2.circle(frame, (x, y), 4, (0, 0, 255), 3)

        # Track object using Kalman Filter
        if skip_frame_count > 10:
            tracker.Update(centers, False)

        skip_frame_count = skip_frame_count + 1

        if debug:
            # For identified object tracks draw tracking line
            # Use various colors to indicate different track_id
            for i in range(len(tracker.tracks)):
                if (len(tracker.tracks[i].trace) > 1):
                    for j in range(len(tracker.tracks[i].trace) - 1):
                        # Draw trace line
                        x1 = tracker.tracks[i].trace[j][0][0]
                        y1 = tracker.tracks[i].trace[j][1][0]
                        x2 = tracker.tracks[i].trace[j + 1][0][0]
                        y2 = tracker.tracks[i].trace[j + 1][1][0]
                        clr = tracker.tracks[i].id % 9