예제 #1
0
    def detect_and_track(self, frames):
        boxes, names = self.get_boxes(frames[0])

        if boxes is None:
            return [], []

        tracker = Sort(max_age=1, min_hits=1, iou_threshold=0.3)

        # Initalize tracker
        inital_detections = []

        for i in range(len(boxes)):
            inital_detections.append([boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3], 1])

        inital_detections = np.asarray(inital_detections)
        tracker.update(inital_detections)

        # Apply tracking on future frames
        detections = [inital_detections]
        for i in range(1, len(frames)):
            boxes, score = self.detect_model.detect(frames[i])

            dets = []
            if (boxes is not None):
                for i in range(len(boxes)):
                    dets.append([boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3], score])
            else:
                dets = np.empty((0, 5))

            dets = np.asarray(dets)
            detections.append(tracker.update(dets))

        return detections, names
예제 #2
0
 def detectStart(self):
     print(SystemInfo.detect_area, SystemInfo.detect_scale_label)
     if SystemInfo.video_opened_url is None:
         self.showMessage("提示", "还没选择视频")
         return
     dir_name, file_name = os.path.split(SystemInfo.video_opened_url)
     info_dir = os.path.join(dir_name, os.path.splitext(file_name)[0])
     if not os.path.exists(info_dir):
         os.makedirs(info_dir)
     self.getDetectSet()
     SystemInfo.detect_step = SystemInfo.detect_set_step
     config_file = os.path.join(info_dir, 'detect_config.ini')
     config = configparser.ConfigParser()
     if not os.path.exists(config_file):
         open(config_file, 'w').close()
         # os.mknod(config_file)
     config.read(config_file)
     section = str(SystemInfo.detect_set_start_time) + '-' + str(SystemInfo.detect_set_end_time) + '__' + \
               str(SystemInfo.detect_step)
     if section in config.sections():
         self.showMessage('提示', '相同的设置已经检测')
         return
     info_file = os.path.join(info_dir, section + '.pkl')
     total_step = (SystemInfo.detect_set_end_time * SystemInfo.video_fps -
                   SystemInfo.detect_set_start_time *
                   SystemInfo.video_fps) / SystemInfo.detect_set_step
     self.ProgressBar = ProgressBar("self.FileIndex", "self.VideoNum",
                                    SystemInfo.video_total_fps)
     mul_trackers = Sort(max_age=SystemInfo.video_fps,
                         step=SystemInfo.detect_step,
                         area=np.int32(SystemInfo.detect_area))
     for i in range(
             int(SystemInfo.detect_set_start_time * SystemInfo.video_fps),
             int(SystemInfo.detect_set_end_time * SystemInfo.video_fps) + 1,
             int(SystemInfo.detect_set_step)):
         SystemInfo.detect_info['detect_frame'].append(i)
         SystemInfo.video.set(cv2.CAP_PROP_POS_FRAMES, i)
         success, frame = SystemInfo.video.read()
         # tag_list = []
         # number, orientation, _, tag_center, _ = locate_code(frame, threshMode=0, bradleyFilterSize=15,
         #                                                     bradleyThresh=3, tagList=tag_list)
         # SystemInfo.detect_info['tag_label'].append(number)
         # SystemInfo.detect_all_number.extend(number)
         # SystemInfo.detect_info['orientation'].append(orientation)
         # SystemInfo.detect_info['tag_center'].append(tag_center)
         if success:
             mul_trackers.update(frame)
         self.ProgressBar.setTipLable("总帧数:{}帧,当前帧数:{},步长:{}".format(
             int(SystemInfo.video_total_fps), i,
             SystemInfo.detect_set_step))
         self.ProgressBar.setValue(i)  # 更新进度条的值
         QApplication.processEvents()  # 实时显示
     with open(info_file, 'wb') as f:
         pickle.dump(mul_trackers, f)
     get_detect_info(info_file, SystemInfo)
     SystemInfo.write(config, section, config_file)
     self.ProgressBar.close()  # 记得关闭进度条
     self.showMessage("提示", "检测完成!")
     SystemInfo.video_is_detect = True
     self.projectWidget.addDetect()
예제 #3
0
    def __init__(self, parent=None):
        super(MainWIndow, self).__init__(parent)
        self.timer_camera = QtCore.QTimer()
        self.width = 720
        self.height = 480
        self.set_ui()
        self.slot_init()
        # openpose params

        print(params)
        # Starting OpenPose
        self.opWrapper = opWrapper
        self.opWrapper.start()

        # Process Image
        self.datum = op.Datum()

        self.SEQ_LEN = 10
        self.isStarted = False
        self.UserFrames = {}

        self.make_pause = False
        self.saved_count = 0
        self.backgorund_image = {}
        self.users_complete = {}
        self.tracker = Sort(20, 3)
        self.currentExamination = 0

        # initiate the video
        self.cap = cv2.VideoCapture("/home/prince/Desktop/destination.mp4")
예제 #4
0
    def __init__(self, max_age=30, min_hits=5, use_dlib=False):
        self.tracker = Sort(max_age, min_hits, use_dlib)

        if use_dlib:
            print "TRACKER: Dlib Correlation tracker activated!"
        else:
            print "TRACKER: Kalman SORT tracker activated!"
예제 #5
0
    def __init__(self, video_path, output_path="", score=0.1, nms_threshold=0.45):
        self.score = score
        self.nms_threshold = nms_threshold

        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x / 1000, 1., 1.)
                      for x in range(1000)]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        np.random.seed(10101)  # Fixed seed for consistent colors across runs.
        np.random.shuffle(self.colors)  # Shuffle colors to decorrelate adjacent classes.
        np.random.seed(None)  # Reset seed to default.

        self.vid = cv2.VideoCapture(video_path)
        if not self.vid.isOpened():
            raise IOError("Couldn't open webcam or video")
        video_FourCC    = int(self.vid.get(cv2.CAP_PROP_FOURCC))
        video_fps       = self.vid.get(cv2.CAP_PROP_FPS)
        video_size      = (int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                            int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        isOutput = True if output_path != "" else False
        if isOutput:
            print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
            self.out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
        else:
            self.out = None
        
        bbox_path ='.'.join(video_path.split('.')[:-1])+'.pkl.gz'
        with gzip.open(bbox_path, "rb") as f:
            self.bbox_history = pickle.load(f)
        
        self.mot_tracker = Sort(max_age=12, min_hits=3)
예제 #6
0
    def bounds(self, batteries, houses):
        ''' Upper- and lowerbounds for costs for given houses & batteries'''

        # solution class + update house.priority_list
        initial_solution = Solution(houses, batteries)
        Sort.priority_value(Sort, houses, batteries)

        # get furthest and closest battery for each house
        lower = 0
        upper = 0
        for house in houses:
            b_close = house.priority_list[0]
            lower += initial_solution.distance_calc(house, b_close)

            b_far = house.priority_list[4]
            upper += initial_solution.distance_calc(house, b_far)

        battery_costs = 0
        for battery in batteries:
            battery_costs += battery.cost

        # calculate upper and lowerbound for total costs
        lowerbound = lower * 9 + battery_costs
        upperbound = upper * 9 + battery_costs

        print("LOWERBOUND: " , lowerbound)
        print("UPPERBOUND: " , upperbound)
        print()
        print()
예제 #7
0
    def __init__(self):
        self.tracker = Sort()

        label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
        categories = label_map_util.convert_label_map_to_categories(
            label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
        self.category_index = label_map_util.create_category_index(categories)

        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
            with tf.Session(graph=detection_graph) as self.sess:
                # Definite input and output Tensors for detection_graph
                self.image_tensor = detection_graph.get_tensor_by_name(
                    'image_tensor:0')
                # Each box represents a part of the image where a particular object was detected.
                self.detection_boxes = detection_graph.get_tensor_by_name(
                    'detection_boxes:0')
                # Each score represent how level of confidence for each of the objects.
                # Score is shown on the result image, together with the class label.
                self.detection_scores = detection_graph.get_tensor_by_name(
                    'detection_scores:0')
                self.detection_classes = detection_graph.get_tensor_by_name(
                    'detection_classes:0')
                self.num_detections = detection_graph.get_tensor_by_name(
                    'num_detections:0')
예제 #8
0
def main():
    a01 = [9, 8, 7, 6, 1, 5, 10, 3]
    a02 = [1, 2, 3, 4, 5, 6, 7, 8]
    a03 = [10, 23, 643, 12, 34, 1, 60]
    a04 = [-1, 5, 1, 654, 12, 4, 1]
    a05 = [1, 1, 1, 1, 2, 2, 1, 2, 4, 6]

    print("Before sorting: %s" % a01)
    Sort.quick_sort(a01, 0, len(a01) - 1)
    # Sort.quick_sort_with_hoare_partition(a, 0, len(a) - 1)
    print("After sorting: %s" % a01)
    print('--------------------------')

    print("Before sorting: %s" % a02)
    Sort.quick_sort(a02, 0, len(a02) - 1)
    print("After sorting: %s" % a02)
    print('--------------------------')

    print("Before sorting: %s" % a03)
    Sort.quick_sort(a03, 0, len(a03) - 1)
    print("After sorting: %s" % a03)
    print('--------------------------')

    print("Before sorting: %s" % a04)
    Sort.quick_sort(a04, 0, len(a04) - 1)
    print("After sorting: %s" % a04)
    print('--------------------------')

    print("Before sorting: %s" % a05)
    Sort.quick_sort(a05, 0, len(a05) - 1)
    print("After sorting: %s" % a05)
    print('--------------------------')
예제 #9
0
    def __init__(self, max_age=100, min_hits=10, iou_threshold=0.3):
        """
        ROS IoU Tracker
        :param max_age: Maximum number of frames to keep alive a track without associated detections.
        :param min_hits: Minimum number of associated detections before track is initialised.
        :param iou_threshold: Minimum IOU for match.
        """
        self.iou_threshold = iou_threshold
        self.bridge = CvBridge()
        self.tracked_img_pub = rospy.Publisher("/iou_tracker/detection_image",
                                               Image,
                                               queue_size=1)
        self.new_bboxes = []
        self.bboxes = []
        self.bboxes_msg = BoundingBoxes()
        self.traces = dict()
        self.mot_tracker = Sort(
            max_age=max_age, min_hits=min_hits,
            iou_threshold=iou_threshold)  # create instance of the SORT tracker
        self.image = np.zeros(1)
        self.raw_image_sub = rospy.Subscriber('/darknet_ros/detection_image',
                                              Image,
                                              self.__raw_image_callback,
                                              queue_size=1)
        #self.raw_image_sub = rospy.Subscriber('/r200/depth/image_raw', Image, self.__raw_image_callback, queue_size=1)

        self.bbox_pub = rospy.Publisher("/iou_tracker/bounding_boxes",
                                        BoundingBoxes,
                                        queue_size=1)
        self.bbox_nn_sub = rospy.Subscriber('/darknet_ros/bounding_boxes',
                                            BoundingBoxes,
                                            self.__bbox_nn_callback,
                                            queue_size=1)
        rospy.loginfo("iou_tracker has been initialized!")
예제 #10
0
 def bg_move(self):
     r_sort = Sort()
     r_sort.ckfile()
     result = r_sort.move()
     if result:
         return True
     else:
         return False
예제 #11
0
def Main():
    # Load weights
    model = Balloon.modellib.MaskRCNN(mode="inference", config=config1, model_dir=logpath)
    print("Loading weights ", weights_path)
    model.load_weights(weights_path, by_name=True)
    mot_tracker = Sort(sort_max_age, sort_min_hit)
    # evaluate
    vcapture = cv2.VideoCapture(videopath)
    width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = vcapture.get(cv2.CAP_PROP_FPS)
    # Define codec and create video writer
    file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(Balloon.datetime.datetime.now())
    vwriter = cv2.VideoWriter(file_name,
                              cv2.VideoWriter_fourcc(*'MJPG'),
                              fps, (width, height))
    colours = np.random.rand(32, 3) * 255
    count = 0
    success = True
    while success:
        print("frame: ", count)
        # Read next image
        success, image = vcapture.read()
        if success:
            # Detect objects
            r = model.detect([image], verbose=0)[0]
            result = r['rois']
            result = np.array(result)  # 变成array矩阵
            print('探测到的位置:','\n',result)
            det = result[:, 0:5]
            '''
            #print(det)
            det[:, 0] = det[:, 0] * width
            det[:, 1] = det[:, 1] * height
            det[:, 2] = det[:, 2] * width
            det[:, 3] = det[:, 3] * height
            print(det)
            将图片位置交给sort处理,det格式为[[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
            '''
            trackers = mot_tracker.update(det)
            print('Sort跟踪器位置','\n',trackers)
            for d in trackers:
                xmin = int(d[1])
                ymin = int(d[0])
                xmax = int(d[3])
                ymax = int(d[2])
                label = int(d[4])
                cv2.rectangle(image, (xmin, ymin), (xmax, ymax),
                              (int(colours[label % 32, 0]), int(colours[label % 32, 1]), int(colours[label % 32, 2])),
                              2)
                cv2.imshow("小猫追气球", image)
                cv2.waitKey(50)
            vwriter.write(image)
            count += 1
    vwriter.release()
    print("Saved to ", file_name)
예제 #12
0
 def __init__(self):
     super().__init__()
     self.initUI()
     self.thread = Thread()
     self.thread.sinout.connect(self.slotDisplayImage)
     self.thread.start()
     self.time = 0
     self.timeCount = 0
     self.frame = 0
     self.mot_tracker = Sort(max_age=5)
예제 #13
0
파일: testSort.py 프로젝트: ms214/File-Sort
class TestSort(unittest.TestCase):
    def setUp(self):
        self.s1 = Sort()

    def testOpenFile(self):
        self.assertTrue(self.s1.openFile())

    def testMove(self):
        self.s1.ckfile()
        self.assertTrue(self.s1.move())
예제 #14
0
파일: test_sort.py 프로젝트: bluerwf/bear
class TestSort(unittest.TestCase):
    def setUp(self):
        self.sorter = Sort()

    def test_bubble(self):
        a =[9,1,7,6,5,4,3,2]
        self.assertListEqual(self.sorter.bubble(a), [1,2,3,4,5,6,7,9])

    def test_qsort(self):
        a=[6,5,7,9,8]
        self.sorter.qsort(0, len(a) - 1, a)
        self.assertListEqual(a, [5,6,7,8,9])
예제 #15
0
def make_trackvideo(configpath, weightpath, imagefolder, videofolder):
    os.makedirs(videofolder, exist_ok=True)

    imagenames = sorted(os.listdir(imagefolder))

    # find video names
    videonames = set()
    for img_nm in imagenames:
        video = img_nm.rsplit('_', 1)[0]
        videonames.add(video)
    print(f"video files: {videonames}\n")

    detector = init_detector(configpath, weightpath)
    # make video files
    for vdo_id, vdo_nm in enumerate(videonames):
        fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
        fps = 12
        shape = (800, 800)
        vdo_pth = os.path.join(videofolder, f"{vdo_nm}.avi")
        result_video = cv2.VideoWriter(vdo_pth, fourcc, fps, shape)
        
        image_paths = glob.glob(os.path.join(imagefolder, f"{vdo_nm}*"))
        image_paths = sorted(image_paths, key=lambda x: int(os.path.splitext(x)[0].rsplit('_', 1)[1]))

        tracker = Sort()
        for img_id, img_pth in enumerate(image_paths):
            image = cv2.imread(img_pth)
            result, inf_time = inference_detector(detector, image)
            detected_boxes = result[0] 
            tracked_boxes = tracker.update(detected_boxes)

            # draw the result on image
            bboxes, track_ids = tracked_boxes[:, :-1], tracked_boxes[:, -1]
            
            for box, trk_id in zip(bboxes, track_ids):
                x1, y1, x2, y2 = box.astype("int")
                cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 2)

                cx, cy = (x1+x2)//2, (y1+y2)//2
                text = f"{trk_id.astype(int)}"
                cv2.putText(image, text, (cx-10, cy-10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                cv2.circle(image, (cx, cy), 4, (0, 0, 255), -1)
            
            image = cv2.resize(image, shape) 
            result_video.write(image)
            print(f"video {vdo_id} frame {img_id} drawing done.")
        
        result_video.release()
        print(f"\n{vdo_nm}.avi created!!\n")
    
    return None
예제 #16
0
def main():
    a = [9, 8, 7, 6, 1, 5, 10, 3]
    b = [9, 8, 7, 6, 1, 5, 10, 3]

    print("Before sorting: %s" % a)
    Sort.selection_sort(a)
    print("After sorting: %s" % a)

    print("--------------------------")

    print("Before sorting: %s" % b)
    Sort.selection_sort_recursive(b, 0)
    print("After sorting: %s" % b)
예제 #17
0
def main():
    a = [9, 8, 7, 6, 1, 5, 10, 3]
    b = [9, 8, 7, 6, 1, 5, 10, 3]

    print("Before sorting: %s" % a)
    Sort.bubble_sort(a)
    print("After sorting: %s" % a)

    print("------------------------")

    print("Before sorting: %s" % b)
    Sort.bubble_sort(b)
    print("After sorting: %s" % b)
예제 #18
0
 def __init__(self, recognition_threshhold=0.35):
     self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
     self.detect_model = MTCNN(keep_all=True, device=self.device, post_process=False, min_face_size=20)
     self.face_labels = {}
     self.frames_per_track = {}
     self.prev_dets = None
     self.rec_model = InceptionResnetV1(pretrained='vggface2').eval().to(self.device)
     self.rec_thresh = recognition_threshhold
     self.stride = 0
     self.track_embeddings = {}
     self.tracker = Sort(max_age=1, min_hits=1, iou_threshold=0.3)
     self.users_embeddings = []
     self.unknown_idx = 0
예제 #19
0
def performActions(player, cardsToBeat, cardsToPlay):
	# reset cardsToPlay from previous player
	for card in cardsToPlay:
		player.addCardToHand(card)
	cardsToPlay[:] = []
	Sort.qsort(player.hand, compareCard)

	print "To start putting cards down, type 'choose' and 'complete' to finish your turn"
	print "Or if you can't beat the current cards, just enter 'pass'\n"
	printAllRelevantHands(player, cardsToPlay, cardsToBeat)
	action = raw_input("Enter what you want to do (type 'options' for list of actions available): ")

	# let user decide all the things to do this turn
	while doAction(player, action, cardsToPlay, cardsToBeat):
		action = raw_input("Enter what you want to do (type 'options' for list of actions available): ")

	printAllRelevantHands(player, cardsToPlay, cardsToBeat)
	validHand = isAValidHand(cardsToPlay)

	# no cards currently on the table
	if len(cardsToBeat) == 0:
		# note that the player is participating in the round
		player.passed = False
		if len(cardsToPlay) == 0 or not validHand:
			printPause("You are the current leader!")
			printPause("You must play something valid!\n")
			return performActions(player, cardsToBeat, cardsToPlay)
		else:
			# set current hand on table
			return list(cardsToPlay)

	else:
		# have to pass or beat the leader
		typeInPlay = typeOfPlay(cardsToBeat)
		if len(cardsToPlay) == 0:
			printPause("You are choosing to pass\n")
			player.passed = True
			return cardsToBeat

		elif not validHand or not handCanBePlayed(cardsToPlay, typeInPlay, cardsToBeat):
			printPause("The hand you want to play is not valid!")
			print "You need to have a straight of", typeInPlay["straight"]
			print "and a \"of a kind\" of", typeInPlay["ofAKind"]
			print "to match the leader"
			printPause("")
			return performActions(player, cardsToBeat, cardsToPlay)

		else:
			# set current hand on table
			player.passed = False
			return list(cardsToPlay)
예제 #20
0
파일: runner.py 프로젝트: whtngus/algorithm
class Runner():
    def __init__(self):
        self.sort_collection = Sort()

    def sort(self, runner, sort_history, top):
        for i, (memo, history) in enumerate(sort_history(runner)):
            print("{} {}".format(memo, i + 1), end=" : ")
            for runner_info in history:
                print("{}\t".format(runner_info[1]), end="\t")
            print()
        self._result_print(runner, top)

    def run(self, sort_name, runner, top=3):
        # start history print
        runner_len = len(runner)
        self._start_history(runner)
        # sort setting
        print("=========================================")
        sort_history = self.sort_collection.get_sort(sort_name, runner_len)
        print("{} sort start".format(sort_name))
        if sort_name == "merge":
            result = self.sort_collection._merge_sort(runner)
            self._result_print(result, top)
            return
        elif sort_name == "quick":
            self.sort_collection._quick_sort(runner, 0, runner_len - 1)
            self._result_print(runner, top)
            return
        elif sort_name == None:
            return
        self.sort(runner, sort_history, top)
        print("=========================================")

    def _result_print(self, result, top):
        for i in range(top):
            print("{} 등 : {}번 {}".format(i + 1, result[i][0],
                                         self._s_to_time(result[i][1])))

    def _start_history(self, runner):
        # sort history print
        print("정렬 전", end=" : ")
        for runner_info in runner:
            print("{}".format(runner_info[1]), end="\t")
        print()

    def _s_to_time(self, second):
        h = second // 3600
        second -= h * 3600
        m = second // 60
        s = second % 60
        return "{}시간 {}분 {}초".format(h, m, s)
예제 #21
0
    def __init__(self, object_coord_file, model, load_model=True):

        if object_coord_file is not None:
            self.object_coord = pickle.load(open(object_coord_file, 'rb'))
        if load_model:
            self.e = TfPoseEstimator(get_graph_path(model),
                                     target_size=(368, 368))
        self.resize_out_ratio = 4
        self.id_human = 0

        #self._realsense = RealSense()
        self.tracktype = None

        self.tracker = Sort()
예제 #22
0
def main(args):
    # Read video
    frames, fps = read_video(args.video_path)
    print(f"Read {len(frames)} frames (fps: {fps})")

    # Read bboxes of each frame
    json_files = sorted(os.listdir(args.bbox_path),
                        key=lambda x: int(x.split(".")[0]))
    object_boxes_per_frame = []

    for file in json_files:
        with open(os.path.join(args.bbox_path, file)) as f:
            data = json.load(f)
            bboxes = data['children'].copy()
            object_boxes_per_frame.append(bboxes)
    print(f"Read {len(object_boxes_per_frame)} bbox files")

    # Run object tracking
    centroid_ids_per_frame = []

    if args.method == "centroid":
        ct = CentroidTracker(maxDisappeared=50)

        for ind in range(len(frames)):
            rects = [[obj['x1'], obj['y1'], obj['x2'], obj['y2']]
                     for obj in object_boxes_per_frame[ind]]
            centroid_ids = ct.update(rects)
            centroid_ids_per_frame.append(centroid_ids.copy())

    elif args.method == "kalman":
        tracker = Sort(max_age=50, min_hits=3)

        for ind in range(len(frames)):
            detections = np.array([[
                obj['x1'], obj['y1'], obj['x2'], obj['y2'], obj['confidence']
            ] for obj in object_boxes_per_frame[ind]])
            trackers = tracker.update(detections, None)
            centroid_ids = [[((track[0] + track[2]) / 2,
                              (track[1] + track[3]) / 2),
                             int(track[4])] for track in trackers]
            centroid_ids_per_frame.append(centroid_ids)
    else:
        raise NotImplementedError
    print(f"Processed {len(centroid_ids_per_frame)} frames")

    # Create output video
    annotated_frames = annotate_frames(frames, object_boxes_per_frame,
                                       centroid_ids_per_frame)
    frames2video(annotated_frames, fps=28, filepath=args.save_path)
    print("Created output video")
예제 #23
0
def demo_sort(array, sort):
    print("=======", sort, "sort")
    print("Before sorting:")
    print(array)
    if (sort == "selection"):
        Sort.selection_sort(array)
    elif (sort == "bubble"):
        Sort.bubble_sort(array)
    else:
        print("Invalid argument")

    print("After sorting:")
    print(array)

    print("\n")
예제 #24
0
	def __init__(self,*,mode, model_path, model_dir, config,max_age,min_hits,draw_mode = 'all'):
		MaskRCNN.__init__(self,mode=mode, model_dir=model_dir, config=config)
		Sort.__init__(self,max_age=max_age, min_hits=min_hits)
		self.num_classes = config.NUM_CLASSES
		self.load_weights(model_path,by_name = True)
		self.video_stream = None
		self.writer = None
		self.stream_is_open = True
		self.mask = None
		self.frame = None
		self.mask_frame = None
		self.frame_id = 0
		self.data = {}
		print('!!!!!!!!!!',type(self.data))
		self.track_per_class = {'classes':{},'scores':{}}
예제 #25
0
 def __init__(self):
     self.tracker = Sort()  #Create instance of the SORT tracker
     self.__dict__.update(self._defaults)  # set up default values
     self.class_names = self._get_class()
     self.anchors = self._get_anchors()
     self.sess = K.get_session()
     self.boxes, self.scores, self.classes = self.generate()
예제 #26
0
class SORTTracker():
    def __init__(self, filter_class=None, model='yolox-s', ckpt='yolox_s.pth'):
        self.detector = Predictor(model, ckpt)
        self.filter_class = filter_class
        self.sort = Sort()

    def update(self, image, visual=True, logger_=True):
        _, info = self.detector.inference(image, visual=True, logger_=logger_)
        outputs = []
        if info['box_nums'] > 0:
            bbox_xywh = []
            scores = []
            objectids = []
            #bbox_xywh = torch.zeros((info['box_nums'], 4))
            for [x1, y1, x2,
                 y2], class_id, score in zip(info['boxes'], info['class_ids'],
                                             info['scores']):
                if self.filter_class and class_names[int(
                        class_id)] not in self.filter_class:
                    continue
                # color = compute_color_for_labels(int(class_id))
                bbox_xywh.append(
                    [int((x1 + x2) / 2),
                     int((y1 + y2) / 2), x2 - x1, y2 - y1])
                objectids.append(info['class_ids'])
                scores.append(score)

            bbox_xywh = torch.Tensor(bbox_xywh)
            outputs = self.sort.update(bbox_xywh)
            if len(outputs) > 0:
                if visual:
                    image = vis_track8(image, outputs)

            return image, outputs
예제 #27
0
def sort_image(sort_class: Sort, out_boxes, out_scores, out_classes):
    dets = []

    for i in range(0, len(out_boxes)):
        dets.append([
            out_boxes[i][1], out_boxes[i][0], out_boxes[i][3], out_boxes[i][2],
            out_scores[i], out_classes[i]
        ])

    dets = np.array(dets)
    # update
    trackers = sort_class.update(dets)

    out_boxes = []
    out_scores = []
    out_classes = []
    object_id = []
    # d [x1,y1,x2,y2,object_id,score,type]
    for d in trackers:
        out_boxes.append(list([d[1], d[0], d[3], d[2]]))
        object_id.append(int(d[4]))
        out_scores.append(float(d[5]))
        out_classes.append(int(d[6]))

    return np.array(out_boxes), np.array(out_scores), np.array(
        out_classes), np.array(object_id)
예제 #28
0
class DeepSort():
    """
    wrapper class for Sort using deep appearance above
    """
    def __init__(self, max_age=30, min_hits=3):
        self.deep_appearance = DeepAppearance()
        self.sort = Sort(max_age, min_hits)

    def update(self, frame, dets):
        frame = np.flip(frame, axis=2)
        H, W, C = frame.shape
        images = []

        for detection in dets:
            x1 = max(int(detection[0]), 0)
            y1 = max(int(detection[1]), 0)
            x2 = min(int(detection[2]), W)
            y2 = min(int(detection[3]), H)
            image = frame[y1:y2, x1:x2, :]
            image = Image.fromarray(image)
            images.append(image)

        if len(images) > 0:
            embs = self.deep_appearance.predict_embeddings(images)
        else:
            embs = []

        return self.sort.update(dets, embs)
예제 #29
0
파일: search.py 프로젝트: sxwgit/spider
def Search(key):
    p = Pool(2)
    results = []
    for i in [search_jd, search_tb]:
        result = p.apply_async(i, args=(key, ))
        results.append(result)
    p.close()
    p.join()
    jd_list, pic_url = results[0].get()
    tb_list = results[1].get()
    '''
    jd_list,pic_url=search_jd(key)
    tt_list=search_tt(key)
    '''
    data = [Sort(jd_list), Sort(tb_list)]
    price_picurl = draw_picture(jd_list, tb_list, key)
    return data, pic_url, price_picurl
예제 #30
0
def detect_video(yolo, video_path, output_path=""):

    import cv2

    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps = vid.get(cv2.CAP_PROP_FPS)
    video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC),
              type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    avg_time = 0
    prev_time = timer()
    ppl_tracker = Sort()
    while True:
        return_value, frame = vid.read()
        t1 = timer()
        image = Image.fromarray(frame)
        image, pred_time = yolo.detect_image(image, frame, ppl_tracker)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result,
                    text=fps,
                    org=(3, 15),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50,
                    color=(255, 0, 0),
                    thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        t2 = timer() - t1
        print(t2)
        if avg_time == 0:
            avg_time = t2
        else:
            avg_time = (avg_time + t2) / 2
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    yolo.close_session()
예제 #31
0
def main():
    video_capture = cv2.VideoCapture(0)
    mot_tracker = Sort(max_age=10)  # create instance of the SORT tracker

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Find all the faces and face encodings in the frame of video
        face_locations = face_recognition.face_locations(small_frame)

        if len(face_locations) != 0:
            # top right bottom left => left top right bottom
            dets_list = [[l, t, r, b, 1] for (t, r, b, l) in face_locations]
            dets = np.array(dets_list)

            trackers = mot_tracker.update(dets)
            ids = trackers[:, 4].flatten()

            for (top, right, bottom, left), id in zip(face_locations, ids):
                top *= 4
                right *= 4
                bottom *= 4
                left *= 4

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, str(id), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
예제 #32
0
파일: tracker.py 프로젝트: MiG-Ui/LUKE
    def trackAll(self, detection_frames):
        """
        Tracks all detections in the given frames. Updates tracks_by_frame and
        signals when the computation has finished.
        """
        self.tracking = True
        self.stop_tracking = False
        self.state_changed_signal.emit()
        self.init_signal.emit()

        if self.detector.allCalculationAvailable():
            self.detector.computeAll()
            if self.detector.allCalculationAvailable():
                LogObject().print("Stopped before tracking.")
                self.abortComputing(True)
                return

        count = len(detection_frames)
        self.tracks_by_frame = {}
        self.mot_tracker = Sort(max_age=self.parameters.max_age,
                                min_hits=self.parameters.min_hits,
                                search_radius=self.parameters.search_radius)
        KalmanBoxTracker.count = 0
        ten_perc = 0.1 * count
        print_limit = 0
        for i, dets in enumerate(detection_frames):
            if i > print_limit:
                LogObject().print("Tracking:", int(float(i) / count * 100),
                                  "%")
                print_limit += ten_perc
            if self.stop_tracking:
                LogObject().print("Stopped tracking at", i)
                self.abortComputing(False)
                return

            self.tracks_by_frame[i] = self.trackBase(dets, i)

        LogObject().print("Tracking: 100 %")
        self.tracking = False
        self.applied_parameters = self.parameters.copy()
        self.applied_detector_parameters = self.detector.parameters.copy()

        self.state_changed_signal.emit()
        self.all_computed_signal.emit()
예제 #33
0
파일: tracker.py 프로젝트: Bruslan/MV3D-1
    def __init__(self, bag_file, tracklets):
        self.timestamp_map = extract_bag_timestamps(bag_file)
        self.frame_map = generate_frame_map(tracklets)
        
        #TODO : detector should publish BBoxArray:
        # below I just read bboxes from tracklet (see handle_img_msg function)
        self.detect_pub = rospy.Publisher("bbox/detections", BboxArray, queue_size=1)
        self.n_skip = 25 # to simulate delay by MV3D, skip detections (about 1 sec)
        self.skip_count = 0 # keep count of skips
 
        self.predict_pub = rospy.Publisher("bbox/predictions", BboxArray, queue_size=1)
        self.detected_bboxes = None
        self.latest_detection_time = None

        self.mot_tracker = Sort(max_age=3, 
                                min_hits=6, 
                                iou_threshold=0.1, 
                                max_time_elapsed=2)
        self.min_detections = 7
        self.track_count = {}
        self.is_busy = False
예제 #34
0
#!/usr/bin/python
import timeit
import random
from sort import Sort
from maxPQ import MaxPQ
from minPQ import MinPQ
from exercise import CubeSum

if __name__ == '__main__':
	sortExample = Sort(100)
	sortExample.show()
	sortExample.quick_3_way()
	sortExample.show()
	# test_input =  map(lambda x: random.randint(0, 100), list(range(10)))
	# print test_input
	# myPQ = MaxPQ(0)
	# myPQ.show()
	# for item in test_input:
	# 	myPQ.insert(item)
	# myPQ.show()
	# while not myPQ.is_empty():
	# 	print myPQ.delete_max()
	#length = raw_input('Type in the length of list>')
	# sortExample = Sort(10)
	# sortExample.show()
	# #print '================='
	# #sortExample.selection()
	# #sortExample.shell()
	# #sortExample.quick()
	# sortExample.quick_2()
	# boolVal = sortExample.is_sorted()
예제 #35
0
파일: tracker.py 프로젝트: Bruslan/MV3D-1
class Tracker:

    def __init__(self, bag_file, tracklets):
        self.timestamp_map = extract_bag_timestamps(bag_file)
        self.frame_map = generate_frame_map(tracklets)
        
        #TODO : detector should publish BBoxArray:
        # below I just read bboxes from tracklet (see handle_img_msg function)
        self.detect_pub = rospy.Publisher("bbox/detections", BboxArray, queue_size=1)
        self.n_skip = 25 # to simulate delay by MV3D, skip detections (about 1 sec)
        self.skip_count = 0 # keep count of skips
 
        self.predict_pub = rospy.Publisher("bbox/predictions", BboxArray, queue_size=1)
        self.detected_bboxes = None
        self.latest_detection_time = None

        self.mot_tracker = Sort(max_age=3, 
                                min_hits=6, 
                                iou_threshold=0.1, 
                                max_time_elapsed=2)
        self.min_detections = 7
        self.track_count = {}
        self.is_busy = False
   
 
    def startlistening(self):
        rospy.init_node('tracker', anonymous=True)
        rospy.Subscriber('/image_raw', Image, self.handle_image_msg) #TODO : just for time keeping (to be removed) 
        rospy.Subscriber("bbox/detections", BboxArray, self.handle_bbox_msg)
        print('tracker node initialzed')
        rospy.Timer(rospy.Duration(0.1), self.publish_predictions)
        rospy.spin()


    def handle_bbox_msg(self, bbox_msg):
        """ saves the latest bbox detections and latest detection time
        """
        self.latest_detection_time = rospy.get_rostime()
        print("Bboxes detected at", self.latest_detection_time.to_sec())
        self.detected_bboxes = bbox_msg 


    def publish_predictions(self, event):
        # wait until first detection
        # print('enter here? ', time.time())

        if (not self.latest_detection_time) or (not self.detected_bboxes):
            return

        # if no new detections since last call :
        if self.latest_detection_time < event.last_real:
            # predict tracks without update 
            for track in self.mot_tracker.trackers:
                t = rospy.get_rostime().to_nsec()
                track.predict(t, is_update=False)
            tracks = self.mot_tracker.good_tracks()
        # if new detections since last call :
        else : 
            detections = self.detected_bboxes.bboxes
            t = self.latest_detection_time.to_nsec()
            d = []
            for bbox in detections:
                d.append([bbox.x, bbox.y, bbox.z, bbox.h, bbox.w, bbox.l])
            tracks = self.mot_tracker.update(np.array(d),t)

        # publish all tracks
        print(tracks)

        bboxArray = BboxArray()
        bboxArray.header.stamp = rospy.get_rostime()
        for track in tracks:
            trk_id = int(track[5])
            if trk_id not in self.track_count.keys():
                self.track_count[trk_id] = 0
            else :
                self.track_count[trk_id] += 1
            if self.track_count[trk_id] < self.min_detections:
                continue
            bbox = Bbox()
            bbox.x, bbox.y, bbox.z = track[0:3]
            bbox.h = track[4]
            bbox.w =  bbox.l = 2*track[3]
            bbox.yaw = 0 # TODO : needs to be changed for cars
            bboxArray.bboxes.append(bbox)
        # rospy.logerr('here: ', bboxArray)
        rospy.logerr('bboxArray={} '.format(bboxArray))
        self.predict_pub.publish(bboxArray)


    #------------------------------------------------------------
    #TODO : used here for publishing bbox (to be removed)
    #------------------------------------------------------------
    def handle_image_msg(self, img_msg):
        if self.is_busy:
            return
        self.is_busy = True
        now = rospy.get_rostime()
        bboxArray = BboxArray()
        bboxArray.header.stamp = now
        timestamp = img_msg.header.stamp.to_nsec()
        self.frame_index = self.timestamp_map[timestamp]
        for i, f in enumerate(self.frame_map[self.frame_index]):
            bbox = Bbox()
            bbox.x, bbox.y, bbox.z = f.trans
            bbox.h, bbox.w, bbox.l = f.size
            bbox.score=1.
            bboxArray.bboxes.append(bbox)
        time.sleep(0.3) #simulate MV3D  delay
        self.detect_pub.publish(bboxArray)
        rospy.logerr('detect_pub bboxArray={} '.format(bboxArray))
        self.is_busy=False
예제 #36
0
def typeOfPlay(cardsPlayed):
	Sort.qsort(cardsPlayed, compareCard)
	return {"straight" : numOfStraight(cardsPlayed), "ofAKind" : numOfAKind(cardsPlayed)}
예제 #37
0
파일: filter.py 프로젝트: Bruslan/MV3D-1
    tracklets = parse_xml(tracklet_file)
    if 0:
        bag = rosbag.Bag(bag_file)
        print('Reading timestamps from bag ', bag_file)
        n_stamps = bag.get_message_count(topic_filters=['/image_raw'])
        timestamps= [t.to_sec() for _,_,t in bag.read_messages(topics=['/image_raw'])]
    else:
        timestamps =  pickle.load(open('./timestamps'+'_'+bag_name,'rb'))
    pickle.dump(timestamps,open('./timestamps'+'_'+bag_name,'wb'))
    detections = [[] for i in range(len(timestamps))]
    for track in tracklets:
        detections[track.first_frame].append(
        np.concatenate((track.trans[0],track.size))) # (x,y,z,h,w,l)

    if config.cfg.OBJ_TYPE == 'ped':
        mot_tracker = Sort(max_age=3, min_hits=5, iou_threshold=0.1, max_time_elapsed=2)
    else:
        mot_tracker = Sort(max_age=3, min_hits=5, iou_threshold=0.01, max_time_elapsed=2)
    collection = TrackletCollection()


    tracklets = {}
    frame_count=0
    fix_size = False
    for t,d in zip(timestamps,detections):
        #print("detections : ", d)
        tracks = mot_tracker.update(np.array(d), t)
        print(tracks)
        for track in tracks :
            trk_id = int(track[5])
            if trk_id not in tracklets.keys() :
예제 #38
0
	def sortHand(self, cmpfn):
		Sort.qsort(self.hand, compareCard)
예제 #39
0
파일: test_sort.py 프로젝트: bluerwf/bear
 def setUp(self):
     self.sorter = Sort()
예제 #40
0
	def sort(self, cmpfn):
		Sort.qsort(self.cards, cmpfn)