Exemple #1
0
def initialize_tracker(i):

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'MOSSE', 'CSRT'
    ]

    tracker_type = tracker_types[i]

    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    if tracker_type == 'CSRT':
        tracker = cv2.TrackerCSRT_create()
    if tracker_type == 'MOSSE':
        tracker = cv2.TrackerMOSSE_create()
    return tracker
    def initTracker(self, firstFrame, mode=Mode.NO_DETECTION):
        if self.selectedTracker == self.trackerTypes[0]:
            self.tracker = cv2.TrackerBoosting_create()
        elif self.selectedTracker == self.trackerTypes[1]:
            self.tracker = cv2.TrackerMIL_create()
        elif self.selectedTracker == self.trackerTypes[2]:
            self.tracker = cv2.TrackerKCF_create()
        elif self.selectedTracker == self.trackerTypes[3]:
            self.tracker = cv2.TrackerTLD_create()
        elif self.selectedTracker == self.trackerTypes[4]:
            self.tracker = cv2.TrackerMedianFlow_create()
        elif self.selectedTracker == self.trackerTypes[5]:
            self.tracker = cv2.TrackerGOTURN_create()

        numpyImage = None

        if mode==Mode.DETECTION:
            numpyImage = self.detect(firstFrame)
        elif mode==Mode.NO_DETECTION:
            numpyImage = self.selectBB(firstFrame)


        return numpyImage
Exemple #3
0
 def call_tracker_constructor(self, tracker_type):
     # -- TODO: remove this if I assume OpenCV version > 3.4.0
     if int(self.major_ver == 3) and int(self.minor_ver) < 3:
         tracker = cv2.Tracker_create(tracker_type)
     # --
     else:
         if tracker_type == 'CSRT':
             tracker = cv2.TrackerCSRT_create()
         elif tracker_type == 'KCF':
             tracker = cv2.TrackerKCF_create()
         elif tracker_type == 'MOSSE':
             tracker = cv2.TrackerMOSSE_create()
         elif tracker_type == 'MIL':
             tracker = cv2.TrackerMIL_create()
         elif tracker_type == 'BOOSTING':
             tracker = cv2.TrackerBoosting_create()
         elif tracker_type == 'MEDIANFLOW':
             tracker = cv2.TrackerMedianFlow_create()
         elif tracker_type == 'TLD':
             tracker = cv2.TrackerTLD_create()
         elif tracker_type == 'GOTURN':
             tracker = cv2.TrackerGOTURN_create()
     return tracker
Exemple #4
0
def createTrackerByName(trackerType):
    global trackerTypes
    global multiTracker
    # Create a tracker based on tracker name
    if trackerType == trackerTypes[0]:
        tracker = cv2.TrackerBoosting_create()
    elif trackerType == trackerTypes[1]:
        tracker = cv2.TrackerMIL_create()
    elif trackerType == trackerTypes[2]:
        tracker = cv2.TrackerKCF_create()
    elif trackerType == trackerTypes[3]:
        tracker = cv2.TrackerTLD_create()
    elif trackerType == trackerTypes[4]:
        tracker = cv2.TrackerMedianFlow_create()
    elif trackerType == trackerTypes[5]:
        tracker = cv2.TrackerGOTURN_create()
    elif trackerType == trackerTypes[6]:
        tracker = cv2.TrackerMOSSE_create()
    elif trackerType == trackerTypes[7]:
        tracker = cv2.TrackerCSRT_create()
    else:
        tracker = None
    return tracker
def tracker_creater(api_name):

    # Then create a tracker object for which ever tracker is choosen 
    if api_name == 'BOOSTING':
        api_tracker = cv2.TrackerBoosting_create()
    elif api_name == 'MIL':
        api_tracker = cv2.TrackerMIL_create()
    elif api_name == 'KCF':
        api_tracker = cv2.TrackerKCF_create()
    elif api_name == 'MEDIANFLOW':
        api_tracker = cv2.TrackerMedianFlow_create()
    elif api_name == "CSRT":
        api_tracker = cv2.TrackerCSRT_create()
    elif api_name == "MOSSE":
        api_tracker = cv2.TrackerMOSSE_create()
    else:
        api_tracker = None
        print('Incorrect tracker name')
        print('Available trackers are:')
        for t in tracker_types:
          print(t)    

    return api_tracker      
def createOneTracker(type):
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    tracker_type = type
    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()
    return tracker
Exemple #7
0
def main():
    output_folder = 'results_70'
    data_folder = "data"
    for path, subfolders, files in os.walk(data_folder):
        print(path)
        if "groundtruth.txt" not in files:
            continue
        dataset = path.split("\\")[-1]
        path_gt = f'{path}/groundtruth.txt'
        trackers = {
            "CSRT": cv2.TrackerCSRT_create(),
            "KCF": cv2.TrackerKCF_create(),
            "Boosting": cv2.TrackerBoosting_create(),
            "MIL": cv2.TrackerMIL_create(),
            "TLD": cv2.TrackerTLD_create(),
            "MedianFlow": cv2.TrackerMedianFlow_create(),
            "MOSSE": cv2.TrackerMOSSE_create(),
        }
        gt = read_ground_truth(path_gt)
        # visualize_bounding_boxes(path, gt)
        # continue
        for tracker_name, tracker in trackers.items():
            print("Computing tracking with", tracker_name)
            start = time.time()
            predictions = track(path, init_tracker(gt), tracker)
            accuracy, robustness = evaluate(predictions, gt, iou_cutoff=0.7)
            duration = time.time() - start
            print(f'accuracy = {accuracy}, robustness = {robustness}')
            f = open(f'{output_folder}/result.txt', 'a+')
            f.write('{};{};{:.2f};{:.2f};{:.3f}\n'.format(
                dataset, tracker_name, accuracy * 100.0, robustness * 100.0,
                duration))
            f.close()
            f = open(f'{output_folder}/{dataset}_{tracker_name}.txt', 'a+')
            for bb in predictions:
                f.write(f'{bb}\n')
            f.close()
Exemple #8
0
def cam_personTracking(image, tracker, flag):
    #print('\tcam_personTracking')
    threshold_centroid = 60
    th = 40  #boundaryThreshold_centroid
    H, W, C = image.shape
    new = 0  #number of new persons added
    #Do tracking of currently tracked persons
    #personsTracked, personsTracked_ID = personTracking(image, tracker) #Main Tracker Update
    personsTracked = personTracking(image, tracker)  #Main Tracker Update #LIST
    #Check for boundary persons and deltracker and create new tracker with valid persons
    personsTracked, personRemovedIndexList, new_tracker = cam_boundaryCheck(
        image, personsTracked, tracker, th)

    #print('\t\tpersonsTracked: %d'%len(personsTracked))
    #print(personsTracked)
    #If flag is True, detect if new persons are present and add a new tracker for those persons
    personsMod = personsTracked  #Tuple now but later becomes List
    if (flag):  #Every tenth frame
        personsDetected = personDetection(
            image[th:H - th, th:W - th,
                  0:C], th)  #Every 10th image detect all persons
        print('personsDetected')
        print(personsDetected)
        if personsDetected:  #if frame not empty
            personsMod, new = modifyTracker(personsTracked, personsDetected,
                                            threshold_centroid, new)
            #print('\tAm I a List?: ')
            #print(isinstance(personsMod, list))
            #print('\t\tpersonsDetected: %d'%len(personsDetected));
            #print(personsDetected);
            #print('\t\tpersonsMod After: %d'%len(personsMod));
            #print(personsMod);
            for i in range(len(personsTracked), len(personsMod)):
                ok = new_tracker.add(cv2.TrackerMIL_create(), image,
                                     tuple(personsMod[i]))

    return personsMod, new, personRemovedIndexList, new_tracker
Exemple #9
0
 def createOpencvTracker(self, type, detection):
     ''' Configures the opencv tracker type. '''
     if type == 'kcf':
         self.trackers_opencv = {
             key: cv2.TrackerKCF_create()
             for key in range(len(detection))
         }
     elif type == 'boosting':
         self.trackers_opencv = {
             key: cv2.TrackerBoosting_create()
             for key in range(len(detection))
         }
     elif type == 'mil':
         self.trackers_opencv = {
             key: cv2.TrackerMIL_create()
             for key in range(len(detection))
         }
     elif type == 'tld':
         self.trackers_opencv = {
             key: cv2.TrackerTLD_create()
             for key in range(len(detection))
         }
     elif type == 'medianflow':
         self.trackers_opencv = {
             key: cv2.TrackerMedianFlow_create()
             for key in range(len(detection))
         }
     elif type == 'csrt':
         self.trackers_opencv = {
             key: cv2.TrackerCSRT_create()
             for key in range(len(detection))
         }
     elif type == 'mosse':
         self.trackers_opencv = {
             key: cv2.TrackerMOSSE_create()
             for key in range(len(detection))
         }
Exemple #10
0
    def __init__(self, windowName = 'default window', videoName = "default video"):
        self.selection = None                           # 框选追踪目标状态
        self.track_window = None                        # 追踪窗口状态   
        self.drag_start = None                          # 鼠标拖动状态
        self.speed = 50                                 # 视频播放速度
        self.video_size = (960,540)                     # 视频大小
        self.box_color = (255,255,255)                  # 跟踪器外框颜色
        self.path_color = (0,0,255)                     # 路径颜色
        # 选择追踪器类型
        #                          0        1     2      3         4           5            6              7              8       
        self.tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'Dlib_Tracker', 'CamShift','Template_Matching']
        self.tracker_type = self.tracker_types[2] 
        # 创建视频窗口
        cv2.namedWindow(windowName,cv2.WINDOW_AUTOSIZE)
        cv2.setMouseCallback(windowName,self.onmouse)
        self.windowName = windowName
        # 打开视频
        self.cap = cv2.VideoCapture(videoName)
        if not self.cap.isOpened():
            print("Video doesn't exit!", videoName)
        # 视频相关属性
        self.frames_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) # 视频总帧数
        self.points = deque(maxlen = self.frames_count) # 存放每一帧中追踪目标的中心点

        # 判定所选追踪器类型并初始化追踪器
        if self.tracker_type == 'BOOSTING':
            self.tracker = cv2.TrackerBoosting_create()
        elif self.tracker_type == 'MIL':
            self.tracker = cv2.TrackerMIL_create() 
        elif self.tracker_type == 'KCF':
            self.tracker = cv2.TrackerKCF_create() 
        elif self.tracker_type == 'TLD':
            self.tracker = cv2.TrackerTLD_create()  
        elif self.tracker_type == 'MEDIANFLOW':
            self.tracker = cv2.TrackerMedianFlow_create()   
        elif self.tracker_type == 'GOTURN':
            self.tracker = cv2.TrackerGOTURN_create()  
def create_tracker(tracker: str):
    """ Créer un tracker """
    # type de tracker selon la précision que l'on veut obtenir ou les fps
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[0]

    if tracker not in tracker_types:
        print(
            f'error: not supported tracker \'{tracker}\', available trackers are {tracker_types}.'
        )
        sys.exit(-1)

    if int(major_ver) == 3 and int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        elif tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        elif tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        elif tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        elif tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        elif tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        elif tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        elif tracker_type == 'CSRT':
            tracker = cv2.TrackerCSRT_create()

    return tracker
Exemple #12
0
def reset(frame):
    global tracker
    global multiTracker
    multiTracker = None
    tracker = None
    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()
    if tracker_type == 'CSRT':
        tracker = cv2.TrackerCSRT_create()
    if tracker_type=='MOSSE':
        tracker = cv2.TrackerMOSSE_create()
    
    bboxes = select2(frame)
    multiTracker = createMultitracker(bboxes, frame)
def initTracker():
    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    elif tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    elif tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    elif tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    elif tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    elif tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()
    elif tracker_type == "CSRT":
        tracker = cv2.TrackerCSRT_create()
    elif tracker_type == "MOSSE":
        tracker = cv2.TrackerMOSSE_create()
    else:
        tracker = None
        print('Incorrect tracker name')
        print('Available trackers are:')
        for t in tracker_types:
            print(t)
    return tracker
Exemple #14
0
def getTracker(tracker_type=''):
    # tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE']
    # tracker_type = tracker_types[2]
    import pprint
    pprint.pprint(dir(cv2))
    if int(minor_ver) < 3 or not tracker_type:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()

    return tracker
 def __init__(self, tracker_type="BOOSTING", draw_coord=True):
     # version comparision, some API changed.
     (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
     self.tracker_types = [
         'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'
     ]
     self.tracker_type = tracker_type
     self.isWorking = False
     self.draw_coord = draw_coord
     if int(minor_ver) < 3:
         self.tracker = cv2.Tracker_create(tracker_type)
     else:
         if tracker_type == 'BOOSTING':
             self.tracker = cv2.TrackerBoosting_create()
         if tracker_type == 'MIL':
             self.tracker = cv2.TrackerMIL_create()
         if tracker_type == 'KCF':
             self.tracker = cv2.TrackerKCF_create()
         if tracker_type == 'TLD':
             self.tracker = cv2.TrackerTLD_create()
         if tracker_type == 'MEDIANFLOW':
             self.tracker = cv2.TrackerMedianFlow_create()
         if tracker_type == 'GOTURN':
             self.tracker = cv2.TrackerGOTURN_create()
Exemple #16
0
    def __init__(self, Frame, Bbox, Tracker):
        """ Initialise the tracker using the bounding box of the object in the frame """

        # create tracker
        # KCF is the default
        tracker = cv2.TrackerKCF_create()

        if Tracker is 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        elif Tracker is 'MIL':
            tracker = cv2.TrackerMIL_create()
        elif Tracker is 'TLD':
            tracker = cv2.TrackerTLD_create()
        elif Tracker is 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()

# initialise the tracker with the source frame
        ok = tracker.init(Frame, Bbox)

        if not ok:
            print("INFO: Object: Failed to initialise object tracker")

        self._bbox = Bbox
        self._tracker = tracker
Exemple #17
0
    def createTrackerByName(self, trackerType):
        # Create a tracker based on tracker name
        if trackerType == self.trackerTypes[0]:
            tracker = cv2.TrackerBoosting_create()
        elif trackerType == self.trackerTypes[1]:
            tracker = cv2.TrackerMIL_create()
        elif trackerType == self.trackerTypes[2]:
            tracker = cv2.TrackerKCF_create()
        elif trackerType == self.trackerTypes[3]:
            tracker = cv2.TrackerTLD_create()
        elif trackerType == self.trackerTypes[4]:
            tracker = cv2.TrackerMedianFlow_create()
        elif trackerType == self.trackerTypes[5]:
            tracker = cv2.TrackerMOSSE_create()
        elif trackerType == self.trackerTypes[6]:
            tracker = cv2.TrackerCSRT_create()
        else:
            tracker = None
            print('Incorrect tracker name')
            print('Available trackers are:')
            for t in self.trackerTypes:
                print(t)

        return tracker
def create_tracker():
    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    return (tracker, tracker_type)
def get_tracker(tracker_type):
    trackers = {
        'BOOSTING': (cv2.TrackerBoosting_create(), 2),
        'MIL': (cv2.TrackerMIL_create(), 2),
        'KCF': (cv2.TrackerKCF_create(), 2),
        'TLD': (cv2.TrackerTLD_create(), 2),
        'MEDIANFLOW': (cv2.TrackerMedianFlow_create(), 2),
        'GOTURN': (cv2.TrackerGOTURN_create(), 2),
        'MOSSE': (cv2.TrackerMOSSE_create(), 4),
        'CSRT': (cv2.TrackerCSRT_create(), 4)
    }

    if tracker_type not in trackers:
        return None

    tracker = trackers[tracker_type]
    tracker_min_minor = tracker[1]
    if int(MINOR) < tracker_min_minor:
        print("OpenCV version don't support")
        sys.exit()

    tracker = tracker[0]

    return tracker
    def init_tracker(self, frame, bbox):
        tracker_types = [
            'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'
        ]
        tracker_type = tracker_types[0]

        if tracker_type == tracker_types[0]:
            self.tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            self.tracker = cv2.TrackerMIL_create()

    #   if tracker_type == 'KCF':
    #       self.tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            self.tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            self.tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            self.tracker = cv2.TrackerGOTURN_create()

        # Initialize tracker with first frame and bounding box

        ok = self.tracker.init(frame, bbox)
        self.particle_filter = ParticleFilter()
        self.particle_filter = ParticleFilter.init_particles(
            self.particle_filter, region=bbox, particlesPerObject=100)
        img = frame[int(bbox[1]):int(bbox[1] + bbox[3]),
                    int(bbox[0]):int(bbox[0] + bbox[2])]
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        self.histo = cv2.calcHist([hsv], [0, 1], None, [180, 256],
                                  [0, 180, 0, 256])
        self.points.append(
            ParticleFilter.get_particle_center(self.particle_filter))
        self.frame_number += 1

        return self
Exemple #21
0
def object_tracker(frame_video, x_b, y_b, w_b, h_b, tracker_algo="MDF"):
    if tracker_algo == "boosting":
        tracker = cv2.TrackerBoosting_create()
    if tracker_algo == "CSRT":
        tracker = cv2.TrackerCSRT_create()
    if tracker_algo == "TLD":
        tracker = cv2.TrackerTLD_create()
    if tracker_algo == "MIL":
        tracker = cv2.TrackerMIL_create()
    if tracker_algo == "KCF":
        tracker = cv2.TrackerKCF_create()
    if tracker_algo == "MDF":
        tracker = cv2.TrackerMedianFlow_create()

    bbox = (x_b, y_b, w_b, h_b)
    tracker.init(frame_video, bbox)

    x, y, w, h = x_b, y_b, w_b, h_b
    cv2.rectangle(frame_video, (x, y), ((x + w), (y + h)), (0, 255, 0), 3, 3)
    cv2.putText(frame_video, "Tracking Started", (100, 75),
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
    y_cen = (y + y + h) / 2

    return frame_video, y_cen
Exemple #22
0
def create_tracker(frame, bbox):
    # require use of OpenCV 3.3 or above
    #if int(minor_ver) < 3:
    #    tracker = cv2.Tracker_create(tracker_type)
    #else:

    # bleh, write better code: tracker_type global

    if tracker_type == 'BOOSTING':
        tracker = cv2.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv2.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv2.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv2.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv2.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        tracker = cv2.TrackerGOTURN_create()

    ok = tracker.init(frame, bbox)
    updates[tracker] = 0
    return tracker
Exemple #23
0
if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[1]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    # Read video
    # video = cv2.VideoCapture("/Users/administrator/Downloads/fhd-video.mp4")
    video = cv2.VideoCapture(0)

    # Exit if video not opened.
    if not video.isOpened():
 def reset_tracker(self):
     self.cv2_tracker = cv2.TrackerMIL_create()
     self.bbox = None
     self.track_itr = 0
    # (major, minor) = cv2.__version__.split(".")[:2]
    # print(major, minor)
    if not os.path.exists(args.vid_path):
        sys.exit("[!] WARNING !! Data(input) path does NOT exist!!")
    if (args.vid_path.endswith("mp4") or args.vid_path.endswith("avi")) and args.input_type == 0:
        sys.exit("[!] WARNING !! Video file provided but processing mode is IMAGES")
    if args.vid_path.endswith("jpg")  and args.input_type == 1:
        sys.exit("[!] WARNING !! Image file provided but processing mode is VIDEO")


    OPENCV_OBJECT_TRACKERS = {
            "csrt": cv2.TrackerKCF_create(),
            "kcf": cv2.TrackerKCF_create(),
            "boosting": cv2.TrackerBoosting_create(),
            "mil" : cv2.TrackerMIL_create(),
            "tld" : cv2.TrackerTLD_create(),
            "medianflow" : cv2.TrackerMedianFlow_create(),
            "mosse": cv2.TrackerMOSSE_create()
            }

    if args.multi == False:
        if args.model == 0:
            tracker = OPENCV_OBJECT_TRACKERS[args.tracker]
        elif args.model == 1:
            tracker = SiamRPNvot()
            tracker.load_state_dict(torch.load(join(realpath(dirname(__file__)), './siam_tracker/SiamRPNVOT.model')))
            tracker.eval()
        else:
            sys.exit("[!] Incorrect Model argument: Choose 0 for CV2 tracker and 1 for DaSiamRPN tracker")
    else:
Exemple #26
0
def tracker_MIL():
    return cv2.TrackerMIL_create()
    new_box.append(0)
    tracker_ok.append(0)
    p1.append(0)
    p2.append(0)
    centre.append(0)
    tracker.append(0)
    i += 1

while cap.isOpened():
    ok, image = cap.read()
    #image=cv2.flip(image,1)

    k = cv2.waitKey(1) & 0xff
    if k == ord('p'):
        if flag < max_trackers:
            tracker[flag] = cv2.TrackerMIL_create()
            b_box[flag] = cv2.selectROI("Select ROI", image)
            tracker_ok[flag] = tracker[flag].init(image, b_box[flag])
            flag += 1
            #print flag

    i = 0
    while (i < flag):
        tracker_ok[i], new_box[i] = tracker[i].update(image)

        if tracker_ok[i]:
            p1[i] = (int(new_box[i][0]), int(new_box[i][1]))
            p2[i] = (int(new_box[i][0] + new_box[i][2]),
                     int(new_box[i][1] + new_box[i][3]))
            #cv2.rectangle(image, p1, p2, (0,0,200),2)
            centre[i] = (int(
         print "c1 : ", str(c1)
         print "c2 : ", str(c2)
         people_count = people_count + nc.crossed(c1, c2, linep1, linep2)
 text = "people count: " + str(people_count)
 print text
 cv.putText(image,
            text, (0, 15),
            cv.FONT_HERSHEY_SIMPLEX,
            0.5, (255, 0, 0),
            lineType=cv.LINE_AA)
 # #___________________________________________________________________________
 boxesHistory = copy.deepcopy(trackerObjects)
 #___________________________________________________________________________
 [
     tracker.add(
         cv.TrackerMIL_create(), image,
         tuple(
             nc.convert2ROI(
                 [item['topleft']['x'], item['topleft']['y']],
                 [item['bottomright']['x'], item['bottomright']['y']])))
     for item in newTrackerObjects
 ]
 ok, boxes = tracker.update(image)
 for iter in range(len(boxes)):
     newlocation = nc.convert2Rect(boxes[iter])
     trackerObjects[iter]['topleft']['x'] = int(newlocation['topleft']['x'])
     trackerObjects[iter]['topleft']['y'] = int(newlocation['topleft']['y'])
     trackerObjects[iter]['bottomright']['x'] = int(
         newlocation['bottomright']['x'])
     trackerObjects[iter]['bottomright']['y'] = int(
         newlocation['bottomright']['y'])
Exemple #29
0
    def frameProcessor(self, frame, gaussianBlur, useBlank, pixelThreshold,
                       dilation, averageDifferenceFrames, trackerBool):
        """
        This function processes the grabbed frames in 'self.grabbedFrames' using
        different techniques.
        """

        # A frame will converted to a grayscale image
        grayImage = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Makes sure the first screen is black
        if self.referenceFrame is None:

            # Initialize initial position of the objects to be tracked
            if trackerBool:

                for object in range(0, len(self.allROI)):

                    cutter = VideoCutter(frame)
                    cutter.windowName = "Select object in chamber " + str(
                        object + 1)
                    rect = cutter.selectROI()

                    self.trackedBoxes.append(rect)

                    tracker = cv2.TrackerMIL_create()
                    tracker.init(frame, rect)
                    self.trackers.append(tracker)

            self.referenceFrame = grayImage

        updates = []
        counter = 0

        for position in self.trackers:

            success, updatedBox = position.update(frame)

            if success:
                updates.append(updatedBox)
            else:
                updates.append(self.trackedBoxes[counter])

            counter += 1

        self.trackedBoxes = updates

        # Max a black and white view of the absolute difference between the
        # image and the reference frame
        difference = cv2.absdiff(self.referenceFrame, grayImage)

        # If a user has chosen the option of 'averaging between difference frames'
        if averageDifferenceFrames:

            if self.lastDifferenceFrame is None:
                self.lastDifferenceFrame = difference

            # The average is half the value of every pixel of a difference frame +
            # the other half from the next difference frame (sliding window)
            image = 0.5 * self.lastDifferenceFrame + 0.5 * difference

        else:

            image = difference

        if gaussianBlur[0]:

            # A Gaussian blur will be applied to that frame to cancel out noise
            image = cv2.GaussianBlur(image, (gaussianBlur[1], gaussianBlur[1]),
                                     0)

        # Threshold the data using the data the user has entered
        image = cv2.threshold(image, pixelThreshold, 255, cv2.THRESH_TOZERO)[1]

        # Dilate the thresholded image to fill in holes
        image = cv2.dilate(image, None, iterations=dilation)
        image *= 2

        self.lastDifferenceFrame = difference
        self.referenceFrame = grayImage

        i = 0

        # Every ROI is cut out of the processed frame
        for ROI in self.allROI:

            r = ROI
            processedFrame = image[int(r[1]):int(r[1] + r[3]),
                                   int(r[0]):int(r[0] + r[2])]

            # The preview is showed if desired
            if self.preview:

                for box in self.trackedBoxes:

                    xmid = box[0] + (box[2] / 2)
                    ymid = box[1] + (box[3] / 2)

                cv2.imshow('Preview', processedFrame)
                cv2.waitKey(1)
                time.sleep(1 / self.fps)

            else:

                # x is the timestamp/frametime
                timestamp = len(self.processedFrames[i][1]) * (1 / self.fps)
                self.processedFrames[i][0].append(round(timestamp, 4))

                # y is the amount of white pixels in the processed frame
                self.processedFrames[i][1].append(np.sum(processedFrame > 0))

            i += 1

        return
Exemple #30
0
def main():
        #Cargar archivos
    sistema = platform.system()
    if sistema == 'Linux':
        path_signs = "/home/pzampella/NimSet/CNN/Hands/Keras/CNN_87_1534855126/"
        json_file = open(path_signs + 'model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        classifier_hands = model_from_json(loaded_model_json)
        classifier_hands.load_weights(path_signs + '/model.h5')
        clasificador_rostro = cv2.CascadeClassifier("/home/pzampella/opencv-3.4.1/data/lbpcascades/lbpcascade_frontalface_improved.xml")       # Cargar el clasificador de rostros
        #clasificador_mano = cv2.CascadeClassifier("/home/pzampella/opencv-3.4.1/data/haarcascades/haarcascade_hands.xml")    # Cargar el clasificador de_manos
    if sistema == 'Windows':
        path_signs = r'CNN'
        json_file = open(path_signs + r'\model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        classifier_hands = model_from_json(loaded_model_json)
        classifier_hands.load_weights(path_signs + '\model.h5')
        clasificador_rostro = cv2.CascadeClassifier("lbpcascade_frontalface_improved.xml")       # Cargar el clasificador de rostros
        #clasificador_mano = cv2.CascadeClassifier("/home/pzampella/opencv-3.4.1/data/haarcascades/haarcascade_hands.xml")    # Cargar el clasificador de_manos
         
    # load weights into new model
    classifier_hands.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
    
    print("Loaded model hands from disk")

    Path_imagenes = "/home/pzampella/Imagenes/"
    if os.path.exists(Path_imagenes):
        shutil.rmtree(Path_imagenes)
    if not os.path.exists(Path_imagenes):
        os.makedirs(Path_imagenes)
    min_area = 3000                                                                                                     # Minima area detectable (en pixeles)
    piel_min = 0.5                                                                                                      # Porcentaje de piel minimo detectable
    bajo = np.array([0, 48, 80], dtype="uint8")                                                                         # Definir rango minimo de tez de piel
    alto = np.array([20, 255, 255], dtype="uint8")                                                                      # Definir rango maximo de tez de piel
    trackers = cv2.MultiTracker_create()                                                                                       # Inicializar tracker

    bboxs = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]                                                                  # Declarar bounding box

    print("\n Set the maximum number of hands to be detected. The greater the number, the lower the performance.\n")
    max_hands = int(input("Maximum number of hands: "))
    type(max_hands)
    
    Webcam = cv2.VideoCapture(0)                                                                                        # Declarar variable de captura de video y almacenar un frame

    if Webcam.isOpened() == False:                                                                                      # Revisar si hay acceso a la webcam:
        print("Error: Camara no disponible\n\n")                                                                         # Si no, mostrar error
        os.system("pause")
        return                                                                                                          # Terminar programa y salir
    # fin if

    FrameLeido, ImgOriginal = Webcam.read()

    if not FrameLeido or ImgOriginal is None:                                                                           # Si el frame no fue leido exitosamente:
        print("Error: no se ha podido leer la imagen de la camara\n")                                                    # Mostrar error
        os.system("pause")
        return
    # fin if

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)) # Generar matriz de Kernel

    ImgHSV = cv2.cvtColor(ImgOriginal, cv2.COLOR_BGR2HSV) # Pasar imagen a HSV
    Mascara = cv2.inRange(ImgHSV, bajo, alto) # Generar mascara
    #Mascara = cv2.erode(Mascara, kernel, iterations=2) # Erosionar mascara
    Mascara = cv2.dilate(Mascara, kernel, iterations=3) # Dilatar mascara
    Borroso = cv2.GaussianBlur(Mascara, (3, 3), 0) # Difuminar mascara
    Imagen = cv2.threshold(Borroso, 60, 255, cv2.THRESH_BINARY)[1] # Pasar mascara a binario
    contours, hierarchy = cv2.findContours(Imagen, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Extraer contornos
    areas = [cv2.contourArea(c) for c in contours] 
    contador = 0
    for i in range(0, len(areas)): # Contar el numero de areas mayores que el minimo
        if areas[i] > min_area:
            contador += 1
        # fin if
    # fin for
    maximo = min(contador, max_hands) # Definir el numero de areas (maximo 3, manos y cara)

    for i in range(0, maximo): # Para cada area
        areas = [cv2.contourArea(c) for c in contours] # Extraer areas
        max_index = np.argmax(areas) #Hallar el area de mayor tamano
        idx = contours[max_index]
        x, y, w, h = cv2.boundingRect(idx) # Crear el bounding box que encierra la mayor area
        bboxs[i] = (x, y, w, h)
        contours.pop(max_index) # Eliminar la mayor area
    #fin for

    for i in range(0, maximo):
        new_tracker = cv2.TrackerMIL_create()
        _ = trackers.add(new_tracker, Imagen, bboxs[i]) # Inicializar trackers
    # fin for
    contador_viejo = maximo
    reset_tracker = 0
    cuenta = 0
    texto = ""
    cara_cuenta = 0
    fgbg = cv2.createBackgroundSubtractorMOG2()
    while cv2.waitKey(1) != 27 and Webcam.isOpened(): # Ejecutar mientras la tecla ESC no sea presionada
        cuenta += 1
        FrameLeido, ImgOriginal = Webcam.read() # Leer el proximo frame
        if not FrameLeido or ImgOriginal is None: # Si el frame no fue leido exitosamente:
            print("Error: no se ha podido leer la imagen de la camara\n") # Mostrar error
            os.system("pause")
            break # Terminar programa y salir
        # fin if

        ImgHSV = cv2.cvtColor(ImgOriginal, cv2.COLOR_BGR2HSV) # Pasar imagen a HSV
        Mascara = cv2.inRange(ImgHSV, bajo, alto) # Generar mascara
        #Mascara = cv2.erode(Mascara, kernel, iterations=1) # Erosionar mascara
        Mascara = cv2.dilate(Mascara, kernel, iterations=3) # Dilatar mascara
        Borroso = cv2.GaussianBlur(Mascara, (3, 3), 0) # Difuminar mascara
        Imagen = cv2.threshold(Borroso, 60, 255, cv2.THRESH_BINARY)[1] # Pasar mascara a binario
        contours, _ = cv2.findContours(Imagen, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Extraer contornos
        areas = [cv2.contourArea(c) for c in contours]
        contador = 0
        for i in range(0, len(areas)): # Contar el numero de areas mayores que el minimo
            if areas[i] > min_area:
                contador += 1
            # fin if
        # fin for
        ###############################################
        maximo = min(contador, max_hands) # Definir el numero de areas (maximo 3, manos y cara)
        if contador_viejo != maximo or reset_tracker == 1: # Si cambia el numero de objetos o se pierde el tracking de alguno, reconfigurar tracking
            trackers.clear()
            trackers = cv2.MultiTracker_create() # Reiniciar tracking
            for i in range(0, maximo): # Para cada area
                areas = [cv2.contourArea(c) for c in contours] # Extraer areas
                max_index = np.argmax(areas) # Hallar el area de mayor tamano
                idx = contours[max_index]
                x, y, w, h = cv2.boundingRect(idx) # Crear el bounding box que encierra la mayor area
                bbox = (x, y, w, h)
                contours.pop(max_index) # Eliminar la mayor area
                new_tracker = cv2.TrackerMIL_create()
                _ = trackers.add(new_tracker, Imagen, bbox) # Redefinir el tracker
            # fin for
            reset_tracker = 0
        # fin if
        tracking, bboxs = trackers.update(Imagen) # Actualizar tracker
        contours, _ = cv2.findContours(Imagen, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        puntos = []
        puntos_medios = ""
        for bbox in bboxs:
            if tracking:
                # Tracking exitoso
                p = (int(bbox[0] + bbox[2]/2), int(bbox[1] + bbox[3]/2))
                puntos_medios = puntos_medios + str(p[0]) + "," + str(p[1])+" "
                for i in range(0, len(areas)):
                    if cv2.pointPolygonTest(contours[i], p, False) == 1.0:
                            _, _, bbox[2], bbox[3] = cv2.boundingRect(contours[i]) # Actualizar alto y ancho del bounding box
                     # fin if
                # fin for
                x_ = int(bbox[0])
                y_ = int(bbox[1])
                w_ = int(bbox[2])
                h_ = int(bbox[3])
                if h>w:
                    alpha = int((h_-w_)/2)
                    p1 = (x_-alpha, y_)
                    p2 = (x_+w_+alpha, y_+h_)
                else:
                    alpha = int((w_-h_)/2)
                    p1 = (x_, y_-alpha)
                    p2 = (x_+w_, y_+h_+alpha)
                tamano1 = len(ImgOriginal)
                tamano2 = len(ImgOriginal[0])
                if p1[0]<0:
                    p1=(0, p1[1])
                if p1[1]<0:
                    p1=(p1[0], 0)
                if p2[0]>tamano1-1:
                    p2=(tamano1-1, p2[1])
                if p2[1]>tamano2-1:
                    p2=(p2[0], tamano2-1)
                revisar = ImgOriginal[p1[1]:p2[1], p1[0]:p2[0]]
                if len(revisar[0]) != 0:
                    faces = len(clasificador_rostro.detectMultiScale(cv2.cvtColor(revisar, cv2.COLOR_BGR2GRAY), scaleFactor=1.1, minNeighbors=5))
                else:
                    faces = -1
                if faces == 0:
                    puntos.append([p1[1], p2[1], p1[0], p2[0]])            # Calcular puntos del bounding box
                    cv2.rectangle(ImgOriginal, p1, p2, (200, 0, 0), 2, 1) # Dibujar el nuevo bounding box
                #end if
            else:
                # Tracking fallido
                cv2.putText(ImgOriginal, "Fallo durante rastreo", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                (0, 0, 255), 2)
                print("Fallo durante rastreo")
            # fin if
        # fin for
        #texto = texto + puntos_medios + "|"
        contador_viejo = maximo
        for i in range(0, len(puntos)):
            piel = 0
            if puntos[i][0] != 0 or puntos[i][1] != 0 or puntos[i][2] != 0 or puntos[i][3] != 0: # Si el cuadro uno tiene imagen
                Cuadro = Imagen[puntos[i][0]:puntos[i][1], puntos[i][2]:puntos[i][3]] # Recortar imagen
                unos = list(Cuadro[0]).count(1)
                total = len(Cuadro) * len(Cuadro[0])
                if total == 0:
                    piel = 0
                else:
                    piel = unos / total # Calcular el porcentaje de piel detectada
            # fin if
            if piel < piel_min:
                reset_tracker = 1
            # fin if
            if (puntos[i][1]-puntos[i][0]>5) and (puntos[i][3]-puntos[i][2]>5):
                Aux = ImgOriginal[puntos[i][0]:puntos[i][1], puntos[i][2]:puntos[i][3]]
                Mask = Imagen[puntos[i][0]:puntos[i][1], puntos[i][2]:puntos[i][3]]
                Mask2 = cv2.morphologyEx(Mask, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) )
                Aux = cv2.bitwise_and(Aux,Aux,mask = Mask2)
                Aux2 = cv2.cvtColor(Aux, cv2.COLOR_RGB2GRAY)
                Aux2[Aux2 == 0] = 128
                contrast = exposure.equalize_hist(Aux2) * 255
                Aux3 = sc.fit_transform(cv2.resize(contrast, (28, 28)))
                Aux4 = Aux3.reshape(1, 28, 28, 1)
                    # plt.imshow(Aux)
                    # plt.show()
                letra = np.argmax(classifier_hands.predict(Aux4))
                cv2.putText(ImgOriginal, leer_letra(letra), (puntos[i][2]+5, puntos[i][0]+30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
                    # fin if
                #cv2.namedWindow("Uno", cv2.WINDOW_NORMAL)
                #cv2.imshow("Uno", Aux) # Dibujar el cuadro
                #cv2.imwrite(Path_imagenes + str(i) + "_" + str(0) + ".jpg", ImgOriginal[puntos[i][0]:puntos[i][1], puntos[i][2]:puntos[i][3]])
                #cv2.imwrite(Path_imagenes + str(i) + "_" + str(cuenta) + ".jpg", ImgOriginal[puntos[i][0]:puntos[i][1], puntos[i][2]:puntos[i][3]])  # Guardar imagen
            #fin if
            #cv2.namedWindow("Video mascara", cv2.WINDOW_NORMAL)
            #cv2.imshow("Video mascara", Aux2)
        # fin for
        cv2.namedWindow("Video original", cv2.WINDOW_NORMAL)                                                            # Crear ventana para mostrar frame original
        cv2.imshow("Video original", ImgOriginal)                                                                       # Mostrar frame original
    # fin while
    archivo = open(Path_imagenes+"\Coordenadas.txt", "w")
    archivo.write(texto)
    archivo.close()
    return