コード例 #1
0
ファイル: main.py プロジェクト: Tron-G/AlgorithmVisual
def quick_sort_method():
    data = request.get_json()

    if data["input_tpye"] == 1 and data["operate_type"] == 0:  # 生成随机数组
        quick_sort = sort.Sort()
        data["array_data"] = quick_sort.get_data()

    elif data["input_tpye"] == 0 and data["operate_type"] == 0:
        data["array_data"] = list(map(int, data["array_data"]))  # 字符格式转数字

    if data["operate_type"] == 1:  # 排序
        quick_sort = sort.Sort(data["array_data"])
        data["sort_process"] = quick_sort.get_quick_sort_data()

    return jsonify(data)
コード例 #2
0
ファイル: run.py プロジェクト: jodyanna/algorithms
def main():
    numbers = maker.Maker.create_array_random_int(1000, 0, 100)

    print(numbers, "\n")
    leet = sort.Sort()
    numbers = leet.merge_sort_top_down(numbers)
    print(numbers)
コード例 #3
0
def face_detect_frame():
    method_type = 'hog'
    video_root = '/Users/alexwang/data'
    video_list = ['14456458.mp4', '32974696.mp4', '16815015.mp4', '10616634.mp4']
    colours = np.random.rand(32, 3) * 256  # used only for display

    print('start process videos...')
    for video_name in video_list:
        video_tracker = sort.Sort(max_age=5)
        kal = sort.KalmanBoxTracker([0, 0, 1, 1, 0])
        kal.clear_count()
        cap = cv2.VideoCapture(os.path.join(video_root, video_name))
        frame_width = int(cap.get(3))
        frame_height = int(cap.get(4))
        output_path = os.path.join(video_root, 'notexpand_{}_{}'.format(method_type, video_name))
        out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'MP4V'), 15, (frame_width, frame_height))

        while (cap.isOpened()):
            try:
                ret, frame = cap.read()
                if not ret:
                    break

                image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if method_type == 'cnn':
                    face_img_list = face_detect.cnn_face_detect(image_rgb, expand=False)
                else:
                    face_img_list = face_detect.hog_face_detect(image_rgb, expand=False)
                detections = []
                for (face, rect, score) in face_img_list:
                    if score < 0.4:
                        continue
                    x_min, y_min, x_max, y_max = rect
                    detections.append([x_min, y_min, x_max, y_max, 10 * score])

                print('detections:', detections)
                track_bbs_ids = video_tracker.update(np.asarray(detections))
                for d in track_bbs_ids:
                    print('d:', d)

                    d = d.astype(np.int32)
                    rectangle(frame, d[0], d[1], d[2] - d[0],
                              d[3] - d[1], colours[d[4] % 32, :],
                              thickness=2, label=str(d[4]))

                cv2.imshow('image', frame)
                out.write(frame)
                # Exit if ESC oressed
                key = cv2.waitKey(1) & 0xff
                if key == 27:
                    sys.exit(0)
                elif key == ord('q'):
                    break
            except Exception as e:
                traceback.print_exc()
        cap.release()
        out.release()
コード例 #4
0
ファイル: preProc.py プロジェクト: yusuke0128/Kaggle
 def dataToRGB(self, URL):
     s = sort.Sort()
     dataSet = sorted(glob.glob(URL), key=s.numericalSort)
     RGBSet = [0] * len(dataSet)
     for i in range(len(dataSet)):
         dataSet[i] = cv2.imread(dataSet[i])
         dataSet[i] = cv2.resize(dataSet[i], (230, 174))
         RGBSet[i] = cv2.split(dataSet[i])
     RGBSet = np.array(RGBSet)
     print(RGBSet.shape)
     return RGBSet
コード例 #5
0
def detect_video(yolo, video_path, output_path=""):
    import cv2
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    video_FourCC    = int(vid.get(cv2.CAP_PROP_FOURCC))
    video_fps       = vid.get(cv2.CAP_PROP_FPS)
    video_size      = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
                        int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()

    mot_tracker = sort.Sort()
    yolo.mot_tracker = mot_tracker

    if yolo.write_to_file:
        emptyFile = open(yolo.output_path + 'result.dat', 'w')
    else:
        emptyFile = None

    while True:
        return_value, frame = vid.read()
        try:
            image = Image.fromarray(frame)
        except AttributeError:
            break
        image = yolo.detect_image(image, emptyFile)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50, color=(255, 0, 0), thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_NORMAL)
        cv2.imshow("result", result)
        if isOutput:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    if yolo.write_to_file:
        emptyFile.close()
    yolo.close_session()
コード例 #6
0
ファイル: GUI.py プロジェクト: Team4618/Scouting
    def __init__(self, parent, *args):
        self.parent = parent

        # main notebook
        self.root = Notebook(parent)
        self.root.pack(expand=True, fill=BOTH)

        self.scoutingPage = scouting.ScoutingUI(self.root)
        self.pickListPage = pickList.PickList(self.root)
        self.sortPage = sort.Sort(self.root)

        self.root.select(0)  # select the scouting page

        chooseFileBtn = Button(parent,
                               text="Choose data folder",
                               command=self.getFileDir)
        chooseFileBtn.pack(side=LEFT)

        # set our working directory
        chdir(path.dirname(__file__))

        # set a reference to the current directory where we're looking for data
        self.fileStrVar = StringVar()

        global filedir
        filedir = getcwd() + "\\files"

        if len(filedir) > 20:
            filedir = "..." + filedir[-17:]

        self.fileStrVar.set(filedir)
        Label(parent, textvariable=self.fileStrVar).pack(side=LEFT)

        # selector for event
        self.eventSV = StringVar()
        self.eventSV.set(event)

        self.eventSV.trace('w', self.onEventChange)

        eventList = ["No internet connection"]

        if isOnline():
            eventList = list(events.keys())

        eventChooser = OptionMenu(parent, self.eventSV, *eventList)
        eventChooser.pack(side=RIGHT)

        Label(parent, text="Event:").pack(side=RIGHT)
コード例 #7
0
    def __init__(self, path):
        global to_file
        to_file = path
        try:
            os.mkdir(os.path.join(os.getcwd(), "Files"))
            os.mkdir(os.path.join(os.getcwd(), "CSV_Files"))
        except:
            pass
        process = CrawlerProcess()
        process.crawl(FkscrapeSpider)
        process.start()

        obj = sentiment.SentimentAnalysis(
            os.path.join(os.getcwd(), "Files"),
            os.path.join(os.getcwd(), "CSV_Files"))

        obj1 = sort.Sort()
コード例 #8
0
ファイル: yolo.py プロジェクト: oomq/yolo3_sort_deepsort
 def _initialize_tracker(self):
     if not self.image:
         if self.tracker == 'sort':
             tracker = sort.Sort()
             return tracker, None
         elif self.tracker == 'deepsort':
             # initialize deep sort
             model_filename = self.deepsort_model
             encoder = gdet.create_box_encoder(model_filename, batch_size=1)
             metric = nn_matching.NearestNeighborDistanceMetric(
                 "cosine", matching_threshold=0.5, budget=None)
             tracker = deepsort_Tracker(metric)
             return tracker, encoder
         else:
             raise ValueError(
                 'The variable \"tracker\" must be \"sort\" or \"deepsort\".'
             )
     else:
         return None, None
コード例 #9
0
def detect_img(yolo):
    while True:

        img = input('Input image filename:')
        try:
            image = Image.open(img)
        except:
            print('Open Error! Try again!')
            continue
        else:
            # Initialization
            mot_tracker = sort.Sort()
            yolo.mot_tracker = mot_tracker
            yolo.frame = 1

            if yolo.write_to_file:
                emptyFile = open(yolo.output_path + 'result.dat', 'w')
            else:
                emptyFile = None
            r_image = yolo.detect_image(image, emptyFile)
            if yolo.write_to_file:
                emptyFile.close()
            r_image.save(yolo.__dict__['output_path'] + 'output.png', 'png')
    yolo.close_session()
コード例 #10
0
 def __init__(self, name):
     self.max_iou_distance = rospy.get_param('max_iou_distance', 0.7)
     self.max_age = rospy.get_param('max_age', 30)
     self.n_init = rospy.get_param('n_init', 3)
     self.metric = nn_matching.NearestNeighborDistanceMetric(
         "cosine", 0.2, None)
     self.tracker = Tracker(self.metric, self.max_iou_distance,
                            self.max_age, self.n_init)
     rospy.Subscriber("image_object", ObjectArray, self.__callback1)
     self.bridge = CvBridge()
     self.image_sub = rospy.Subscriber("image_raw", Image,
                                       self.__image_callback)
     self.pub = rospy.Publisher('object_tracked',
                                ObjectArray,
                                queue_size=10)
     self.image_pub = rospy.Publisher("object_tracked_image",
                                      Image,
                                      queue_size=10)
     self.cv_image = None
     self.visualizer = visualization.Visualization((960, 540),
                                                   update_ms=100)
     self.mot_tracker = sort.Sort(iou_th=0.1)
     self.seedling_id_list = []
     self.seedling_lifetime = dict()
コード例 #11
0
def main():
  # detection result folder
  detection_folder = '/media/yujiang/Data/Seedling/Experiments/counting_results_rgb/Final_model'
  # video folder
  video_folder = '/media/yujiang/Data/Seedling/Datasets/TestVideos'
  # folder for saving counted videos
  saving_folder = '/media/yujiang/Data/Seedling/Experiments/counting_videos_rgb/Final_model'
  # label map
  PATH_TO_LABELS = '/media/yujiang/Data/Seedling/Datasets/TFrecords/pascal_label_map.pbtxt'
  NUM_CLASSES = 2 # DO NOT change this, the number of classess identified by the detection model
  # Create label map
  label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
  categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
  category_index = label_map_util.create_category_index(categories)

  video_prefix_config_dict = {
    'TAMU_2015': {
      'iou_th':0.1, 'tracking_lifetime_th':9, 'is_transpose': False,
      'resize_fx':0.5, 'resize_fy':0.5, 'output_fps':5.0, 'is_display':True},
    'UGA_2015': {
      'iou_th':0.1, 'tracking_lifetime_th':15, 'is_transpose': False,
      'resize_fx':0.5, 'resize_fy':0.5, 'output_fps':5.0, 'is_display':True},
    'UGA_2018': {
      'iou_th':0.1, 'tracking_lifetime_th':9, 'is_transpose': True,
      'resize_fx':0.5, 'resize_fy':0.5, 'output_fps':5.0, 'is_display':True}}

  # Output video codec
  fourcc_codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')

  for video_prefix, counting_config in video_prefix_config_dict.items():
    
    # tracking and counting parameters
    is_transpose = counting_config['is_transpose']
    iou_th = counting_config['iou_th']
    tracking_lifetime_th = counting_config['tracking_lifetime_th']
    resize_fx = counting_config['resize_fx']
    resize_fy = counting_config['resize_fy']
    output_fps = counting_config['output_fps']
    is_display = counting_config['is_display']
    # get testing videos with the video prefix
    video_list = os.listdir(video_folder)
    video_list = [f for f in video_list if f.startswith(video_prefix) and f.endswith('.mp4')]

    for video_file in video_list:
      video_name = video_file.split('.')[0]
      detection_file_path = osp.join(detection_folder, '{0}_detection.json'.format(video_name))
      video_file_path = osp.join(video_folder, video_file)
      output_video_filepath = osp.join(saving_folder, '{0}.avi'.format(video_file[:-4]))

      # tracking list
      seedling_id_list = list()
      seedling_lifetime = dict()
      # load detection results
      with open(detection_file_path,'r') as fid:
        jstr = json.load(fid)
        res_list = json.loads(jstr)

        # load video for rendering tracking result
        cap = cv2.VideoCapture(video_file_path)
        frame_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        # generate output video writer
        fr_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        fr_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        if is_transpose:
          vd_writer = cv2.VideoWriter(output_video_filepath,
            fourcc_codec, output_fps,
            (int(fr_height*resize_fy), int(fr_width*resize_fx)))
        else:
          vd_writer = cv2.VideoWriter(output_video_filepath,
            fourcc_codec, output_fps,
            (int(fr_width*resize_fx), int(fr_height*resize_fy)))

        # initialize the Kalman filter based tracking algorithm
        mot_tracker = sort.Sort(iou_th=iou_th)
        # process individual frames
        for i in range(frame_num-1):
          ret, frame = cap.read()
          frame = cv2.resize(frame,None,fx=resize_fx, fy=resize_fy, interpolation = cv2.INTER_CUBIC)
          image_np = frame
          if is_transpose:
            image_np = image_np.transpose((1,0,2))
          # load detection result for the current frame
          detection_res = res_list[i]
          frame_bbox = detection_res['bbox']
          frame_bbox_array = np.array(frame_bbox, dtype=np.float32)
          frame_cls = detection_res['box_cls']
          frame_cls = np.array([int(i) for i in frame_cls])
          frame_scores = np.asarray(detection_res['box_scores']).reshape((-1,1))

          if len(frame_bbox_array) != 0:
            dets = np.concatenate((frame_bbox_array, frame_scores), axis=1)
          else:
            dets = np.array([])
          # update trackers
          trackers = mot_tracker.update(dets)
          # update tracker names
          frame_bbox_array_new = trackers[:,0:4]
          frame_cls_new = list()
          category_index_new = dict()
          for d_index, d in enumerate(trackers[:, 4]):
            if d not in seedling_id_list:
              seedling_id_list.append(d)
              # max_cls_id = len(seedling_id_list)
            cur_d = seedling_id_list.index(d)+1

            if cur_d not in list(seedling_lifetime.keys()):
              seedling_lifetime[cur_d] = list()
            seedling_lifetime[cur_d].append(i)
            frame_cls_new.append(cur_d) 
            category_index_new[cur_d] = {'id':d_index, 'name':'Plant'+str(cur_d)}

          # Visualization of the results of a detection.
          vis_util.visualize_boxes_and_labels_on_image_array(
              image_np,
              frame_bbox_array_new,
              frame_cls_new,
              frame_scores,
              category_index_new, # previously use 'category_index'
              use_normalized_coordinates=True,
              line_thickness=2)
          # write to the output video
          vd_writer.write(image_np)
          # 
          if is_display is True:
            cv2.imshow('video_window',image_np)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        vd_writer.release()
        cv2.destroyAllWindows()
        print('Processed the video {0} and saved as {1}.\n'.format(
          video_file_path, output_video_filepath))
コード例 #12
0
ファイル: demo.py プロジェクト: versey-sherry/sort
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Current device is', device)
    track_data = []
    images = glob(os.path.join(args.video_name, '*.jp*'))
    images = sorted([item.split('/')[-1] for item in images])
    threshold = 0.5

    if args.gt:
        print('ground truth file is', args.gt)
        #ground truth represented as dictionary list by the first frame
        gt = defaultdict(list)
        with open(args.gt) as file:
            for line in file:
                #print(line)
                #<frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <class>, <visibility>
                #print(eval(line)[0])
                gt[images[eval(line)[0] - 1]].append(list(eval(line)[1:]))
        #print(gt)

    # get faster RCNN
    #https://github.com/mlvlab/COSE474/blob/master/3_Object_Detection_and_MOT_tutorial.ipynb
    #load faster R-CNN model
    model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
    model.eval()
    model = model.to(device)

    #print(model)
    odata = OrderedDict()
    i = 0
    for frame in get_frames(args.video_name):
        print(images[i])
        start = time.time()
        #change different selection threhold here for the rcnn result
        odata[images[i]] = get_prediction(model, frame, device, 0.8)
        i += 1
        print('process time is', time.time() - start)

    #tracker starts here
    save_path = './results/'
    mot_tracker = SORT.Sort()

    img_array = []
    a = 0
    iou_array = []
    #keep track of id
    gt_track_id = {}
    gt_blist = []

    for key in odata.keys():
        a += 1
        arrlist = []
        det_img = cv2.imread(os.path.join(args.video_name, key))
        det_result = odata[key]
        img_iou = []
        if args.gt:
            gt_result = gt[key]
            #print(gt_result)
            #print(type(gt_result))

        #read in the detection results
        for info in det_result:
            bbox = info['bbox']
            labels = info['labels']
            scores = info['scores']
            templist = bbox + [scores]

            if labels == 'person':  # label 1 is a person in MS COCO Dataset
                arrlist.append(templist)

        start = time.time()
        track_bbs_ids = mot_tracker.update(np.array(arrlist))
        #print(track_bbs_ids.shape)
        print(
            'frame', a, 'Association update time is {} second'.format(
                round(time.time() - start, 4)))
        #print(track_bbs_ids)

        newname = save_path + key
        #print(newname)

        if args.gt:
            #relate the gt_id with track id
            gt_track = defaultdict(list)
            #print(gt_track)
            for gt_box in gt_result:
                gt_label = gt_box[0]
                gt_box = [
                    gt_box[1], gt_box[2], gt_box[1] + gt_box[3],
                    gt_box[2] + gt_box[4]
                ]
                for i in range(track_bbs_ids.shape[0]):
                    ele = track_bbs_ids[i, :]
                    bbox_track = [
                        int(ele[0]),
                        int(ele[1]),
                        int(ele[2]),
                        int(ele[3])
                    ]
                    temp_iou = compute_iou(bbox_track, gt_box)
                    if temp_iou > 0:
                        if gt_label in gt_track.keys():
                            if gt_track[gt_label][1] < temp_iou:
                                gt_track[gt_label] = [
                                    int(ele[4]), temp_iou, bbox_track
                                ]
                        else:
                            gt_track[gt_label] = [
                                int(ele[4]), temp_iou, bbox_track
                            ]
            #print(gt_track)

        #ploting ground truth
        if args.gt:
            overlay = det_img.copy()
            for gt_box in gt_result:
                #print('gtbox type', type(gt_box))
                #plot semi transparent box https://gist.github.com/IAmSuyogJadhav/305bfd9a0605a4c096383408bee7fd5c
                gt_label = gt_box[0]
                gt_box = [
                    gt_box[1], gt_box[2], gt_box[1] + gt_box[3],
                    gt_box[2] + gt_box[4]
                ]
                if gt_label not in gt_track.keys():
                    #plot 0 iou with red shade
                    cv2.rectangle(overlay, (gt_box[0], gt_box[1]),
                                  (gt_box[2], gt_box[3]), (0, 0, 255), -1)
                else:
                    if gt_track[gt_label][1] > threshold:
                        if gt_label in gt_track_id.keys():
                            if int(gt_track_id[gt_label]) != int(
                                    gt_track[gt_label][0]):
                                print('match id', gt_track_id[gt_label],
                                      int(gt_track[gt_label][0]))
                                gt_blist.append(gt_label)
                                del gt_track_id[gt_label]
                        elif gt_label not in gt_track_id.keys(
                        ) and gt_label not in gt_blist:
                            gt_track_id[gt_label] = gt_track[gt_label][0]
                        else:
                            pass

                        if gt_label not in gt_blist:
                            #plot over threshold with green shade
                            cv2.rectangle(overlay, (gt_box[0], gt_box[1]),
                                          (gt_box[2], gt_box[3]), (0, 255, 0),
                                          -1)
                            img_iou.append(gt_track[gt_label][1])
                        else:
                            cv2.rectangle(overlay, (gt_box[0], gt_box[1]),
                                          (gt_box[2], gt_box[3]), (0, 0, 255),
                                          -1)
                        #print('gt_label is', gt_label)
                        #print('gt_id key', gt_track_id.keys())
                    else:
                        #plot over threshold with red shade
                        cv2.rectangle(overlay, (gt_box[0], gt_box[1]),
                                      (gt_box[2], gt_box[3]), (0, 0, 255), -1)
            alpha = 0.3
            det_img = cv2.addWeighted(overlay, alpha, det_img, 1 - alpha, 0)
            #print(gt_track_id)
            print(gt_blist)

        for j in range(track_bbs_ids.shape[0]):
            ele = track_bbs_ids[j, :]
            print(ele)
            bbox_track = [int(ele[0]), int(ele[1]), int(ele[2]), int(ele[3])]
            track_label = str(int(ele[4]))
            track_data.append([
                a,
                int(ele[4]),
                int(ele[0]),
                int(ele[1]),
                int(ele[2]) - int(ele[0]),
                int(ele[3]) - int(ele[1])
            ])
            #print([a, int(ele[4]), int(ele[0]), int(ele[1]), int(ele[2])-int(ele[0]), int(ele[3]) - int(ele[1])])
            #cv2.rectangle(det_img, (bbox_track[0], bbox_track[1]), (bbox_track[2], bbox_track[3]), get_color(int(ele[4])), 3)
            #cv2.putText(det_img, track_label, (bbox_track[0]+5, bbox_track[1]+20), 0,0.6, (255,255,255),thickness=2)
            cv2.rectangle(det_img, (bbox_track[0], bbox_track[1]),
                          (bbox_track[2], bbox_track[3]), (0, 255, 0), 3)
            cv2.putText(det_img,
                        'person', (bbox_track[0] + 5, bbox_track[1] + 20),
                        0,
                        0.6, (0, 255, 0),
                        thickness=2)
        img_array.append(det_img)
        print('append')

    file_name = '_'.join([args.video_name.split('/')[-2], '.txt'])
    with open(file_name, 'w') as file:
        for det in track_data:
            print(det)
            det = '{}, {}, {}, {}, {}, {}, 1, -1, -1, -1'.format(
                det[0], det[1], det[2], det[3], det[4], det[5])
            file.write('%s\n' % str(det))

        #only compute tracked threshold
        if len(img_iou) > 0:
            img_iou_mean = sum(img_iou) / len(img_iou)
            iou_array.append(img_iou_mean)
            text = 'Frame {}: Mean IoU is {}%, {} tracked'.format(
                a, round((img_iou_mean * 100), 2), len(gt_track_id))
        else:
            text = 'Frame {}: Mean IoU is {}%, {} tracked)'.format(
                a, 0, len(gt_track_id))
        print(text)
        cv2.putText(det_img, text, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                    (0, 255, 0), 2, cv2.LINE_AA)

        img_array.append(det_img)
        print('appending img')
    '''
コード例 #13
0
def main():

    PATH_TO_LABELS = '/media/yujiang/Data/Seedling/Datasets/TFrecords/pascal_label_map.pbtxt'
    NUM_CLASSES = 2  # DO NOT change this

    detection_folder = '/media/yujiang/Data/Seedling/Experiments/counting_results_rgb/Final_model'
    video_folder = '/media/yujiang/Data/Seedling/Datasets/TestVideos'

    video_prefix_config_dict = {
        'TAMU_2015': {
            'iou_th': 0.1,
            'tracking_lifetime_th': 9
        },
        'UGA_2015': {
            'iou_th': 0.1,
            'tracking_lifetime_th': 15
        },
        'UGA_2018': {
            'iou_th': 0.1,
            'tracking_lifetime_th': 9
        }
    }

    # video_prefix_config_dict = {'UGA_2015': {'iou_th':0.1, 'tracking_lifetime_th':15}}

    # Create label map
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    for video_prefix, counting_config in video_prefix_config_dict.items():
        # tracking and counting parameters
        iou_th = counting_config['iou_th']
        tracking_lifetime_th = counting_config['tracking_lifetime_th']
        # video list
        video_list = os.listdir(video_folder)
        video_list = [
            f for f in video_list
            if f.startswith(video_prefix) and f.endswith('.mp4')
        ]
        for video_file in video_list:
            video_name = video_file.split('.')[0]
            detection_file_path = osp.join(
                detection_folder, '{0}_detection.json'.format(video_name))
            video_file_path = osp.join(video_folder, video_file)
            # tracking list
            seedling_id_list = list()
            seedling_lifetime = dict()
            # load detection results
            with open(detection_file_path, 'r') as fid:
                jstr = json.load(fid)
                res_list = json.loads(jstr)
                cap = cv2.VideoCapture(video_file_path)
                frame_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                cap.release()
                # initialize the Kalman filter based tracking algorithm
                mot_tracker = sort.Sort(iou_th=iou_th)
                total_time = 0
                for i in range(frame_num - 1):
                    # detection result for the current frame
                    detection_res = res_list[i]

                    frame_bbox = detection_res['bbox']
                    frame_bbox_array = np.array(frame_bbox, dtype=np.float32)
                    frame_cls = detection_res['box_cls']
                    frame_cls = np.array([int(i) for i in frame_cls])
                    frame_scores = np.asarray(
                        detection_res['box_scores']).reshape((-1, 1))

                    if len(frame_bbox_array) != 0:
                        dets = np.concatenate((frame_bbox_array, frame_scores),
                                              axis=1)
                    else:
                        dets = np.array([])

                    start_time = time.time()
                    # update trackers based on the current detection result
                    trackers = mot_tracker.update(dets)
                    cycle_time = time.time() - start_time
                    # update the total time used
                    total_time += cycle_time

                    # update the tracker list
                    for d_index, d in enumerate(trackers[:, 4]):
                        if d not in seedling_id_list:
                            seedling_id_list.append(d)
                            # max_cls_id = len(seedling_id_list)
                        cur_d = seedling_id_list.index(d) + 1

                        if cur_d not in list(seedling_lifetime.keys()):
                            seedling_lifetime[cur_d] = list()
                        seedling_lifetime[cur_d].append(i)
                # identify valid seedling trackers
                valid_seedling_tracker_list = list()
                for k, v in seedling_lifetime.items():
                    if len(v) >= tracking_lifetime_th:
                        valid_seedling_tracker_list.append(k)

            print('Video {0} has {1} seedlings using {2} seconds.\n'.format(
                video_file, len(valid_seedling_tracker_list), total_time))

    # cap = cv2.VideoCapture('/media/yujiang/HTP-2017/Cotton/2018/ENGR/20180613/ENGR1_1_19.MOV')

    # generate output video writer
    fr_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    fr_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
コード例 #14
0
    def SORT(self, predictions):
        qtt_frames = len(predictions)
        tracker = sort.Sort()

        tracked_vehicles_trajectory = {} # Trajetória de cada ID identificado
        vehicles_on_the_frame = {} # Veículos que estão presentes no frame X

        mot_labels = [[0, 0, 0, 0, 0, 0, 0] for _ in range(qtt_frames + 1)]

        for frame_number in range(1, qtt_frames+1):

            bboxes_atual = predictions[frame_number][:]

            # Formatar a lista para alimentar o Sort
            # np.array( [ [x1,y1,x2,y2,score1], [x3,y3,x4,y4,score2], ... ] )

            if len(bboxes_atual) == 0:
                bboxes_atual = np.zeros((0, 5)) # Requerido pelo Algoritmo Sort
            else:
                for idx in range(len(bboxes_atual)):
                    x1, y1, w, h, classe = bboxes_atual[idx][1:]
                    x2 = x1 + w
                    y2 = y1 + h
                    score = np.random.randint(50, 100)/100 # Temporariamente setar score como random.
                    bboxes_atual[idx] = [x1, y1, x2, y2, score, classe]
                
                # Numpy array requerido pelo Sort
                bboxes_atual = np.array(bboxes_atual)
                
                #last_col = bboxes_atual[:, -1]
                #find_rows = lambda classe: bboxes_atual[last_col == classe].copy()
                #bboxes_cars = find_rows(1)
                #bboxes_truck = find_rows(2)
                #bboxes_bus = find_rows(3)
            
                # Analisar o frame atual e identificar os bounding boxes id (update SORT)
                track_bbs_ids = tracker.update(bboxes_atual[:,:-1])
                this_frame_ids = track_bbs_ids[:,-1]
                #track_bbs_ids_cars = tracker_car.update(bboxes_cars)
                #track_bbs_ids_truck = tracker_truck(bboxes_truck)
                #track_bbs_ids_bus = tracker_bus(bboxes_bus)
                
                # Passar as coordenadas para o padrão: [frame,x,y,w,h,idx]
                newboxes_list = [[0,0,0,0,0,0,0] for _ in range(len(track_bbs_ids))]
                for i, newbox in enumerate(track_bbs_ids):
                    x1,y1,x2,y2,idx = newbox
                    x, y, w, h = x1, y1, abs(x2-x1), abs(y2-y1)
                    x,y,w,h,idx = int(x), int(y), int(w), int(h), int(idx)
                    newboxes_list[i] = [frame_number, x, y, w, h, classe, idx]

                    # Guardar a trajetória do centro do veículo IDx
                    xc, yc = int(x + w/2) , int(y + h/2)
                    if idx in tracked_vehicles_trajectory:
                        tracked_vehicles_trajectory[idx].append((frame_number,xc,yc))
                    else:
                        tracked_vehicles_trajectory[idx] =  [(frame_number,xc,yc)]
                
                # Atualizar a variável global
                vehicles_on_the_frame[frame_number] = this_frame_ids
                mot_labels[frame_number] = newboxes_list[:]

        return mot_labels, tracked_vehicles_trajectory, vehicles_on_the_frame 
コード例 #15
0
total_time = 0.0
total_frames = 0
#Read detections file
with open('detection-3.json') as json_file:
    json_data = json.load(json_file)

detections = []
for key in json_data:
    for det in json_data[key]:

        detection = [int(key), -1, *(det['bbox']), det['score'], -1, -1, -1]
        detections.append(detection)
#Convert to Numpy array
detections = np.asarray(detections)

mot_tracker = sort.Sort()
result_tracking = []
print("Processing ")
for frame in range(int(detections[:, 0].max())):
    frame += 1
    #dets = seq_dets[seq_dets[:,0]==frame,2:7]
    dets = detections[detections[:, 0] == frame, 2:7]
    dets[:, 2:4] += dets[:, 0:2]  #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
    total_frames += 1

    start_time = time.time()
    trackers = mot_tracker.update(dets)
    cycle_time = time.time() - start_time
    total_time += cycle_time

    for d in trackers:
コード例 #16
0

# -----------------------------------------------------------------------------|
# -----------------------------------------------------------------------------|
def list_is_same(some_list, other_list):
    """
    Checks if a list is same as other
    """
    return some_list == other_list


# -----------------------------------------------------------------------------|

# Create random list
some_random_list = []
my_sort = sort.Sort()

for iteration in range(20000):
    random_number = random.randint(0, 2000)
    some_random_list.append(random_number)

my_random_list = some_random_list[:]
print("List was", my_random_list)

# ---------------------------------------------------------------------------- #
# Insertion sort test
# ---------------------------------------------------------------------------- #
start = time.time()
my_sorted_list = my_sort.insertion_sort(my_random_list)
end = time.time()
コード例 #17
0
ファイル: sort_test.py プロジェクト: Jyssssss/jys-sort
import sort

jysSort = sort.Sort()
collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.bubble_sort(collection)
print('Bubble Sort:   ', collection)

collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.insertion_sort(collection)
print('Insertion Sort:', collection)

collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.selection_sort(collection)
print('Selection Sort:', collection)

collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.heap_sort(collection)
print('Heap Sort:     ', collection)

collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.quick_sort(collection)
print('Quick Sort:    ', collection)

collection = [9, 2, 5, 7, 1, 3, 4, 8, 6, 10]
jysSort.merge_sort(collection)
print('Merge Sort:    ', collection)