def main():
    # Max frame number to detect and track
    Max_frames=800
    # video file name
    video_path='TownCentreXVID.avi'
    # label string list given by users.
    labelstring=['good person', 'bad person','Good person']
    # video capture
    vidcap = cv2.VideoCapture(video_path)
    #args = parse_args()
    display = True
    #use_dlibTracker  = args.use_dlibTracker
    saver = False

    total_time = 0.0
    total_frames = 0

    # for disp
    if display:
        colours = np.random.rand(32, 3)  # used only for display
        # plt.ion()
        # fig = plt.figure(figsize=(10,5))


    #init detector
    detector = GroundTruthDetections()

    #init tracker
    tracker =  Sort(use_dlib= False) #create instance of the kalman tracker

    frames = detector.get_total_frames()
    for frame in range(0, frames):  # frame numbers begin at 0!
        # get detections
        detections = detector.get_detected_items(frame)
        # get the total frames
        total_frames += 1
        if (total_frames > Max_frames):
            break
        # get one fram from video
        success, img = vidcap.read()
        # fn = 'test/frame%d.jpg' % (frame)  # video frames are extracted to 'test/Pictures%d.jpg' with ffmpeg
        # img = io.imread(fn)
        # if (display):
        #     ax1 = fig.add_subplot(111, aspect='equal')
        #     ax1.imshow(img)

        start_time = time.time()
        # update tracker
        trackers = tracker.update(detections, img)
        # time tracking
        cycle_time = time.time() - start_time
        total_time += cycle_time

        print('frame: %d...took: %3fs' % (frame, cycle_time))
        # object tracking
        for d in trackers:
            #f_out.write('%d,%d,%d,%d,x,x,x,x,%.3f,%.3f,%.3f,%.3f\n' % (d[4], frame, 1, 1, d[0], d[1], d[2], d[3]))
            if (display):
                d = d.astype(np.int32)
                color, label_string = random_color(d[4], labelstring)
                # add rectangle of object
                cv2.rectangle(img, (d[0], d[1]), (d[2], d[3]), color, 3)
                # ax1.add_patch(patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3,
                #                                 ec=color))
                # ax1.set_adjustable('box-forced')
                # add label string
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(img, label_string, (d[0], d[1]), font, 1, color, 2, cv2.LINE_4)
                # ax1.annotate(label_string, xy=(d[0], d[1]), xytext=(d[0], d[1]))
                # if detections != []:  # detector is active in this frame
                #     ax1.annotate(" DETECTOR", xy=(5, 45), xytext=(5, 45))
        if (display):
            # plt.axis('off')
            # #fig.canvas.flush_events()
            # plt.draw()
            # fig.tight_layout()
            cv2.imshow("frame",img)
            cv2.waitKey(0)
            # save the frame with tracking boxes
            if (saver):
                cv2.imwrite("frameout/f"+str(frame)+".jpg",img)
            #     fig.set_size_inches(18.5, 10.5)
            #     fig.savefig("frameout/f%d.jpg" % frame, dpi=200)
            # ax1.cla()




    print("Total Tracking took: %.3f for %d frames or %.1f FPS"%(total_time,total_frames,total_frames/total_time))
Beispiel #2
0
def main():
    args = parse_args()
    display = args.display
    #display = False

    use_dlibTracker  = args.use_dlibTracker
    #use_dlibTracker = False
    saver = args.saver

    total_time = 0.0
    total_frames = 0

    # for disp
    if display:
        colours = np.random.rand(32, 3)  # used only for display
        plt.ion()
        fig = plt.figure()


    if not os.path.exists('output'):
        os.makedirs('output')
    out_file = 'output/townCentreOut.top'

    #init detector
    detector = GroundTruthDetections()

    #init tracker
    tracker =  Sort(use_dlib= use_dlibTracker) #create instance of the SORT tracker

    if use_dlibTracker:
        print "Dlib Correlation tracker activated!"
    else:
        print "Kalman tracker activated!"

    with open(out_file, 'w') as f_out:

        frames = detector.get_total_frames()
	#frames = 795

        for frame in range(0, frames):  #frame numbers begin at 0!
            # get detections
            detections = detector.get_detected_items(frame+1)

            total_frames +=1
	    if total_frames < 10:
	      jpg_name = "00000%d.jpg" %(frame+1)
	    elif total_frames < 100:
	      jpg_name = "0000%d.jpg" %(frame+1)
	    elif total_frames < 1000:
	      jpg_name = "000%d.jpg" %(frame+1)
            fn = '/home/coco/dataset/MOT16/MOT15-01/img1/'+ jpg_name # video frames are extracted to 'test/Pictures%d.jpg' with ffmpeg
            img = io.imread(fn)
            if (display):
                ax1 = fig.add_subplot(111, aspect='equal')
                ax1.imshow(img)
                if(use_dlibTracker):
                    plt.title('Dlib Correlation Tracker')
                else:
                    plt.title('Kalman Tracker')

            start_time = time.time()
	    #print("frame")
	    #print(frame+1)
	    #print(detections)
            trackers = tracker.update(detections,img)

            cycle_time = time.time() - start_time
            total_time += cycle_time

            print('frame: %d...took: %3fs'%(frame,cycle_time))
            print("valid trackers")
	    print(len(trackers))
            for d in trackers:
                f_out.write('%d,%d,%d,%d,x,x,x,x,%.3f,%.3f,%.3f,%.3f\n' % (d[4], frame, 1, 1, d[0], d[1], d[2], d[3]))
                if (display):
                    d = d.astype(np.int32)
                    ax1.add_patch(patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3,
                                                    ec=colours[d[4] % 32, :]))
	            print("frame")
		    print(frame+1)
		    print("draw tracker in the output image")
		    print(d[0])
		    print(d[1])
		    print(d[2])
		    print(d[3])

                    ax1.set_adjustable('box-forced')
                    #label
                    ax1.annotate('id = %d' % (d[4]), xy=(d[0], d[1]), xytext=(d[0], d[1]))
                    if detections != []:#detector is active in this frame
                        ax1.annotate(" DETECTOR", xy=(5, 45), xytext=(5, 45))
            #plt.axis("off")
	    #fig.canvas.flush_events()
	    #plt.draw()
	    #fig.tight_layout()
	    #fig.savefig("/home/coco/dataset/results/frameOut/%s"%(save_name), dpi=200)
	    #axl.cla()
            if (display):
                plt.axis('off')
                fig.canvas.flush_events()
                plt.draw()
                fig.tight_layout()
                if(True):
                    fig.savefig("/home/coco/dataset/results/frameOut/%s"%(jpg_name),dpi = 200)
                ax1.cla()
Beispiel #3
0
def main():
    args = parse_args()
    display = args.display
    use_dlibTracker = args.use_dlibTracker
    saver = args.saver

    total_time = 0.0
    total_frames = 0

    # for disp
    if display:
        colours = np.random.rand(32, 3)  # used only for display
        plt.ion()
        fig = plt.figure()

    if not os.path.exists('output'):
        os.makedirs('output')
    out_file = 'output/townCentreOut.top'

    #init detector
    detector = GroundTruthDetections()

    #init tracker
    tracker = Sort(
        use_dlib=use_dlibTracker)  #create instance of the SORT tracker

    if use_dlibTracker:
        print "Dlib Correlation tracker activated!"
    else:
        print "Kalman tracker activated!"

    with open(out_file, 'w') as f_out:

        frames = detector.get_total_frames()
        for frame in range(0, frames):  #frame numbers begin at 0!
            # get detections
            detections = detector.get_detected_items(frame)

            total_frames += 1
            fn = 'test/frame%d.jpg' % (
                frame + 1
            )  # video frames are extracted to 'test/Pictures%d.jpg' with ffmpeg
            img = io.imread(fn)
            if (display):
                ax1 = fig.add_subplot(111, aspect='equal')
                ax1.imshow(img)
                if (use_dlibTracker):
                    plt.title('Dlib Correlation Tracker')
                else:
                    plt.title('Kalman Tracker')

            start_time = time.time()
            #update tracker
            trackers = tracker.update(detections, img)

            cycle_time = time.time() - start_time
            total_time += cycle_time

            print('frame: %d...took: %3fs' % (frame, cycle_time))

            for d in trackers:
                f_out.write('%d,%d,%d,%d,x,x,x,x,%.3f,%.3f,%.3f,%.3f\n' %
                            (d[4], frame, 1, 1, d[0], d[1], d[2], d[3]))
                if (display):
                    d = d.astype(np.int32)
                    ax1.add_patch(
                        patches.Rectangle((d[0], d[1]),
                                          d[2] - d[0],
                                          d[3] - d[1],
                                          fill=False,
                                          lw=3,
                                          ec=colours[d[4] % 32, :]))
                    ax1.set_adjustable('box-forced')
                    #label
                    ax1.annotate('id = %d' % (d[4]),
                                 xy=(d[0], d[1]),
                                 xytext=(d[0], d[1]))
                    if detections != []:  #detector is active in this frame
                        ax1.annotate(" DETECTOR", xy=(5, 45), xytext=(5, 45))

            if (display):
                plt.axis('off')
                fig.canvas.flush_events()
                plt.draw()
                fig.tight_layout()
                #save the frame with tracking boxes
                if (saver):
                    fig.savefig("./frameOut/f%d.jpg" % (frame + 1), dpi=200)
                ax1.cla()

    print("Total Tracking took: %.3f for %d frames or %.1f FPS" %
          (total_time, total_frames, total_frames / total_time))
Beispiel #4
0
def main():
    args = parse_args()
    display = args.display
    use_dlibTracker = args.use_dlibTracker
    saver = args.saver

    total_time = 0.0
    total_frames = 0
    count = 0

    cap = cv2.VideoCapture('ppp.avi')

    # Check if camera opened successfully
    if (cap.isOpened() == False):
        print("Error opening video stream or file")

    # Read until video is completed
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret == True:

            # Display the resulting frame
            cv2.imshow('Frame', frame)
            cv2.imwrite("test/Pictures%d.jpg" % count,
                        frame)  # save frame as JPEG file
            fn = 'test/Pictures%d.jpg' % (count + 1)
            count += 1

            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break

        # Break the loop
        else:
            break

    # When everything done, release the video capture object
    cap.release()

    # Closes all the frames
    cv2.destroyAllWindows()

    # for disp
    if display:
        colours = np.random.rand(32, 3)  # used only for display
        plt.ion()
        fig = plt.figure()

    if not os.path.exists('output'):
        os.makedirs('output')
    out_file = 'output/townCentreOut.top'

    #init detector
    detector = GroundTruthDetections()

    #init tracker
    tracker = Sort(
        use_dlib=use_dlibTracker)  #create instance of the SORT tracker

    if use_dlibTracker:
        print("Dlib Correlation tracker activated!")
    else:
        print("Kalman tracker activated!")

    with open(out_file, 'w') as f_out:
        print(detector.get_total_frames())

        frames = detector.get_total_frames()
        for frame in range(0, frames):  #frame numbers begin at 0!
            # get detections
            detections = detector.get_detected_items(frame)

            total_frames += 1
            fn = 'test/Pictures%d.jpg' % (
                frame + 1
            )  # video frames are extracted to 'test/Pictures%d.jpg' with ffmpeg
            img = io.imread(fn)
            if (display):
                ax1 = fig.add_subplot(111, aspect='equal')
                ax1.imshow(img)
                if (use_dlibTracker):
                    plt.title('Dlib Correlation Tracker')
                else:
                    plt.title('Kalman Tracker')

            start_time = time.time()
            #update tracker
            trackers = tracker.update(detections, img)

            cycle_time = time.time() - start_time
            total_time += cycle_time

            print('frame: %d...took: %3fs' % (frame, cycle_time))

            for d in trackers:
                f_out.write('%d,%d,%d,%d,x,x,x,x,%.3f,%.3f,%.3f,%.3f\n' %
                            (d[4], frame, 1, 1, d[0], d[1], d[2], d[3]))
                if (display):
                    d = d.astype(np.int32)
                    ax1.add_patch(
                        patches.Rectangle((d[0], d[1]),
                                          d[2] - d[0],
                                          d[3] - d[1],
                                          fill=False,
                                          lw=3,
                                          ec=colours[d[4] % 32, :]))
                    ax1.set_adjustable('box-forced')
                    #label
                    ax1.annotate('id = %d' % (d[4]),
                                 xy=(d[0], d[1]),
                                 xytext=(d[0], d[1]))
                    if detections != []:  #detector is active in this frame
                        ax1.annotate(" DETECTOR", xy=(5, 45), xytext=(5, 45))

            if (display):
                plt.axis('off')
                fig.canvas.flush_events()
                plt.draw()
                fig.tight_layout()
                #save the frame with tracking boxes
                if (saver):
                    fig.savefig("./frameOut/f%d.jpg" % (frame + 1), dpi=200)
                ax1.cla()

    print("Total Tracking took: %.3f for %d frames or %.1f FPS" %
          (total_time, total_frames, total_frames / total_time))
Beispiel #5
0
def main():
    args = parse_args()
    display = args.display
    use_dlibTracker = args.use_dlibTracker
    saver = args.saver

    total_time = 0.0
    total_frames = 0

    # # for disp
    # if display:
    #     colours = np.random.rand(32, 3)  # used only for display
    #     plt.ion()
    #     fig = plt.figure()

    if not os.path.exists('output'):
        os.makedirs('output')
    out_file = 'output/townCentreOut.top'

    #init detector
    detector = GroundTruthDetections()

    #init tracker
    tracker = Sort(
        use_dlib=use_dlibTracker)  #create instance of the SORT tracker

    if use_dlibTracker:
        print("Dlib Correlation tracker activated!")
    else:
        print("Kalman tracker activated!")

    fourcc = cv2.VideoWriter_fourcc(*'MP4V')  #
    # out = cv2.VideoWriter('test.mp4', fourcc, 30.0, (1920, 1080))
    # cap = cv2.VideoCapture('/home/vdo-ubuntu/Videos/TownCentreXVID.avi')
    out = cv2.VideoWriter('test.mp4', fourcc, 30.0, (960, 480))
    cap = cv2.VideoCapture(
        '/home/vdo-ubuntu/Videos/DVR_ch12_main_20180716130000_20180716140000_1.avi'
    )
    # out = cv2.VideoWriter('test.mp4', fourcc, 30.0, (640, 480))
    # cap = cv2.VideoCapture(0)

    with open(out_file, 'w') as f_out:

        frames = detector.get_total_frames()  #프레임 전체개수를 받음
        # frames = 100
        for frame in range(0, frames):  #frame numbers begin at 0!
            # get detections
            detections = detector.get_detected_items(frame)

            total_frames += 1
            success, img = cap.read()
            # fn = 'data/Pictures%d.png' % (frame + 1)  # video frames are extracted to 'test/Pictures%d.jpg' with ffmpeg
            # img = io.imread(fn)
            # if (display):
            #     ax1 = fig.add_subplot(111, aspect='equal')
            #     ax1.imshow(img)
            #     if(use_dlibTracker):
            #         plt.title('Dlib Correlation Tracker')
            #     else:
            #         plt.title('Kalman Tracker')
            if success == True:
                cv2.imshow('d', img)
            start_time = time.time()
            #update tracker
            trackers = tracker.update(detections, img)

            cycle_time = time.time() - start_time
            total_time += cycle_time

            print('frame: %d...took: %3fs' % (frame, cycle_time))

            for d in trackers:
                f_out.write('%d,%d,%d,%d,x,x,x,x,%.3f,%.3f,%.3f,%.3f\n' %
                            (d[4], frame, 1, 1, d[0], d[1], d[2], d[3]))
                # if (display):
                #     d = d.astype(np.int32)
                #     ax1.add_patch(patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3,
                #                                     ec=colours[d[4] % 32, :]))
                #     ax1.set_adjustable('box-forced')
                #     #label
                #     ax1.annotate('id = %d' % (d[4]), xy=(d[0], d[1]), xytext=(d[0], d[1]))
                #     if detections != []:#detector is active in this frame
                #         ax1.annotate(" DETECTOR", xy=(5, 45), xytext=(5, 45))
                cv2.rectangle(img, (int(d[0]), int(d[1])),
                              (int(d[2]), int(d[3])), (0, 255, 0), 2)
                cv2.putText(img, 'id = %d' % (d[4]), (int(d[0]), int(d[3])),
                            cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255))
            out.write(img)

            # if (display):
            #     plt.axis('off')
            #     fig.canvas.flush_events()
            #     plt.draw()
            #     fig.tight_layout()
            #     #save the frame with tracking boxes
            #     if(saver):
            #         fig.savefig("./frameOut/f%d.jpg"%(frame+1),dpi = 200)
            #     ax1.cla()

    print("Total Tracking took: %.3f for %d frames or %.1f FPS" %
          (total_time, total_frames, total_frames / total_time))
Beispiel #6
0
total_time = 0.0
total_frames = 0

# for disp
if display:
    colours = np.random.rand(32, 3)  # used only for display
    plt.ion()
    fig = plt.figure()

if not os.path.exists('output'):
    os.makedirs('output')
out_file = 'output/townCentreOut.top'

#init detector
detector = GroundTruthDetections()

#init tracker
tracker = Sort(use_dlib=use_dlibTracker)  #create instance of the SORT tracker

if use_dlibTracker:
    print("Dlib Correlation tracker activated!")
else:
    print("Kalman tracker activated!")

with open(out_file, 'w') as f_out:

    frames = detector.get_total_frames()
    for frame in range(0, frames):  #frame numbers begin at 0!
        # get detections
        detections = detector.get_detected_items(frame)