Exemple #1
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #2
0
 def __init__(self, args):
     self.args = args
     self.tracker = JDETracker(max_age=args.max_age, buffer_size=args.buffer_size, det_thresh=args.conf_thres,
                               thresh1=args.tracker_thresh1, thresh2=args.tracker_thresh2, thresh3=args.tracker_thresh3)
     
     self.track_outputs = args.track_outputs
     self.track_results = []
def batch_size_effect_measure(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=None,
                              show_image=False,
                              frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = {'0': 0}
    gpu_used = 0
    itr_id = 0
    self_dict = copy.deepcopy(tracker.__dict__)
    del self_dict['model']
    del self_dict['opt']

    pqueue = Queue()  # writer() writes to pqueue from _this_ process
    reader_p = Process(target=post_proc,
                       args=((pqueue, self_dict, tracker, results, save_dir,
                              show_image, opt, result_filename, data_type,
                              frame_id)))
    reader_p.daemon = True
    reader_p.start()

    for path, img, img0 in dataloader:
        # cv2.imwrite("test.jpeg", img0[0])
        if itr_id % 2 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                itr_id * opt.batch_size,
                float(opt.batch_size) / max(1e-5, timer.average_time)))

        if itr_id == 0:
            gpu_used = int(get_gpu_memory_map()[0]) - 129
        if img == 0:
            break
        timer.tic()
        img = np.array(img)
        blob = torch.from_numpy(
            img).cuda()  # .unsqueeze(0)        # ChangeHere
        preds = tracker.getDetections(blob)
        # preds = torch.tensor(np.random.randn(10, 500, 518)).half()
        write_preds(preds, img0, frame_id, pqueue)
        # preds = preds.cpu()
        # preds_arr = preds.numpy()
        timer.toc()

        itr_id += 1

    fps = float(opt.batch_size) / max(1e-5, timer.average_time)
    write_preds(0, 0, 0, pqueue)

    # for proc in jobs:
    #     proc.join()
    # save results
    return fps, gpu_used
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets, scores = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        frame_time = []
        timestamp = time.time()
        localTime = time.localtime(timestamp)
        strTime = time.strftime("%Y-%m-%d %H:%M:%S", localTime)

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                frame_time.append(strTime)
        timer.toc()
        # save results
        results.append(
            (frame_id + 1, online_tlwhs, online_ids, frame_time, scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=scores,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #5
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    if opt.save_var:
        import pickle
        filename = os.path.join(opt.save_var, '/MOT_result.pickle')
        with open(filename, 'wb') as f:
            pickle.dump(results, f)
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #6
0
def eval_det(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id < 302:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>20:
            #     break

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        dets = tracker.detect(blob, img0)
        # print(path, dets)
        tlbrs = []
        scores = []
        class_ids = []
        for det in dets:
            tlbrs.append(det[:4])
            scores.append(det[4])
            class_ids.append(int(det[5] - 1))
        # print(class_ids)
        if show_image or save_dir is not None:
            online_im = vis.plot_detections(img0,
                                            tlbrs,
                                            scores=None,
                                            ids=class_ids)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
Exemple #7
0
def eval_seq(opt,
             data_path,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    tracker = JDETracker(opt)
    # timer = Timer()
    results = []
    frame_id = 0
    frame_nums = 60  #len(os.listdir(data_path))//2
    #np_res = []
    for _ in range(frame_nums):
        frame_id += 1
        dets = np.loadtxt(os.path.join(data_path,
                                       str(frame_id) + '.txt'),
                          dtype=np.float32,
                          delimiter=',')

        online_targets = tracker.update(dets)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if tlwh[2] * tlwh[3] > opt.min_box_area and tlwh[2] / tlwh[3] < 1.6:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # np_res.append([frame_id,tid,tlwh[0],tlwh[1],tlwh[2],tlwh[3],1,0])

        ## save results
        results.append((frame_id, online_tlwhs, online_ids))

        if show_image or save_dir is not None:
            if save_dir:
                mkdir_if_missing(save_dir)
            img = cv2.imread(os.path.join(data_path, str(frame_id) + '.jpg'))
            online_im = vis.plot_tracking(img,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id)
        # if show_image:
        #     cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    # save results
    write_results(result_filename, results, data_type)
Exemple #8
0
    def eval(self, skip_frame=1, show_image=False):
        tracker = JDETracker(self.opt, frame_rate=self.frame_rate)
        timer = Timer()
        frame_id = 0

        for i, (path, img, img0) in enumerate(self.dataloader):
            if i % skip_frame != 0:
                continue
            if frame_id % 20 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))

            # run tracking
            timer.tic()
            if self.use_cuda:
                blob = torch.from_numpy(img).cuda().unsqueeze(0)
            else:
                blob = torch.from_numpy(img).unsqueeze(0)
            online_targets = tracker.update(blob, img0)
            online_tlwhs = []
            online_ids = []
            online_scores = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.opt.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
                    online_scores.append(t.score)
            timer.toc()
            tmp_result = {
                "frame_id": frame_id + 1,
                "bounding_box": online_tlwhs,
                "ids": online_ids,
                "scores": online_scores
            }
            self.send_result(tmp_result, raw_img=img0)

            frame_id += 1
            if show_image:
                online_im = self.send_image(img0, online_tlwhs, online_ids,
                                            frame_id,
                                            1. / max(1e-5, timer.average_time))
                cv2.imshow('Result', online_im)
        if self.video_saver is not None:
            self.video_saver.release()
        return frame_id, timer.average_time, timer.calls
Exemple #9
0
def eval_seq(opt,
             dataloader,
             data_type,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    writer = VideoWriter(save_dir, dataloader)
    for path, img, img0 in dataloader:

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
            writer.write(online_im)
        frame_id += 1
    print("***************************** DONE *****************************")
Exemple #10
0
def eval_seq(opt, save_dir='', frame_rate=30):

    dataloader = LoadImages(opt.multi_test_images_dataset)
    opt = opt.update_dataset_info_and_set_heads(opt, dataloader)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    fd = os.open(save_dir + '/args.txt', os.O_RDWR)
    for i, (path, img, img0) in enumerate(dataloader):

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            online_tlwhs.append(tlwh)
            online_ids.append(tid)
            centre = tlwh[:2] + tlwh[2:] / 2
            text = str(frame_id) + ' ' + str(tid) + ' ' + str(
                centre[1]) + ' ' + str(centre[0]) + '\n'
            os.write(fd, str.encode(text))
        timer.toc()
        # save results

        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                    online_im)
        frame_id += 1
    # save results
    os.close(fd)

    return frame_id, timer.average_time, timer.calls
Exemple #11
0
    def __init__(self, args, video_path):
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.rtsp != "":
            print("Using rtsp " + str(args.rtsp))
            self.vdo = cv2.VideoCapture(args.rtsp)
        elif args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()

        frame_rate = 30
        cfg_dict = parse_model_cfg(args.cfg)
        self.width, self.height = int(cfg_dict[0]['width']), int(
            cfg_dict[0]['height'])
        args.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
        self.tracker = JDETracker(args, frame_rate=frame_rate)

        self.known_faces = {}

        self.faceUtils = FaceUtils()
        self.camUtils = CamUtils(args)
        self.tmp_imgs = []
        self.tmp_moves = []
        self.result_imgs = queue.Queue()
        self.is_running = True
        print("Loading Control Done.")
Exemple #12
0
    def __init__(self, args):

        self.args = args
        self.num_classes = 4

        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        assert self.device == 'cuda', 'CUDA is not available'
        images_folder = os.path.join(args.images_root, args.videoname)
        self.dataset = Vehicle(images_folder,
                               model=args.model_type,
                               image_size_effdet=args.image_size_effdet)
        self.loader = DataLoader(self.dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 sampler=SequentialSampler(self.dataset),
                                 num_workers=args.num_workers,
                                 pin_memory=False,
                                 drop_last=False)

        self.detector = Detector(args.weight, args.conf_thres, args.iou_thres,
                                 args.model_type, args.image_size_effdet)

        self.tracker = JDETracker(max_age=args.max_age,
                                  buffer_size=args.buffer_size,
                                  det_thresh=args.conf_thres,
                                  thresh1=args.tracker_thresh1,
                                  thresh2=args.tracker_thresh2,
                                  thresh3=args.tracker_thresh3)
        self.track_features_type = args.track_features_type

        self.save_detects = args.save_detects
        self.detect_outputs = args.detect_outputs
        self.detect_results = []

        self.track_outputs = args.track_outputs
        self.track_results = []
Exemple #13
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea
    if save_dir:
        mkdir_if_missing(save_dir)

    #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)
    #1) Verifica se il programma va eseguito con CPU o GPU
    #2) Crea il modello e lo valuta
    #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti
    tracker = JDETracker(opt, frame_rate=frame_rate)
    #viene inizializzato il timer per monitorare il tempo di elaborazione
    timer = Timer()
    #inizializzazione array dei risultati
    results = []
    #identificatore del frame
    frame_id = 0
    #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main
    for path, img, img0 in dataloader:
        #visualizza il frame rate dopo 20 frame elaborati
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()

        #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU
        #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img

        blob = torch.from_numpy(img).cuda().unsqueeze(0)

        #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)
        #1) Vengono passati come parametri gli elementi blob e img0
        #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile
        #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0
        #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0
        #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet
        #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti
        #7) Seconda associazione con IoU
        #8) Inizializza nuovi Stracks
        #9) Aggiorna lo stato
        #10) Ritorna nella variabile il valore degli stracks attivi
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #vengono iterati i vari stracks
        for t in online_targets:

            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #14
0
def eval_seq_realtime(opt, save_dir=None, show_image=True, frame_rate=30):
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    frame_id = 0

    cv2.namedWindow('online_im', cv2.WINDOW_FREERATIO)
    cv2.setWindowProperty('online_im', cv2.WND_PROP_AUTOSIZE,
                          cv2.WND_PROP_AUTOSIZE)
    cap = cv2.VideoCapture(0)
    ret, im = cap.read()

    height = im.shape[0]
    width = im.shape[1]

    while True:
        ret, img0 = cap.read()

        # Padded resize
        img, _, _, _ = letterbox(img0, height=height, width=width)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        text_scale = max(1, online_im.shape[1] / 1600.)
        text_thickness = 1 if text_scale > 1.1 else 1
        line_thickness = max(1, int(online_im.shape[1] / 500.))
        cv2.putText(online_im,
                    'Press ESC to STOP', (300, int(15 * text_scale)),
                    cv2.FONT_HERSHEY_PLAIN,
                    text_scale, (0, 0, 255),
                    thickness=2)
        cv2.imshow('online_im', online_im)
        key = cv2.waitKey(1)
        if key == 27:
            break
        frame_id += 1
    cv2.destroyAllWindows()
    cfg_dict = parse_model_cfg(path=command_args.cfg)
    command_args.img_size = [int(cfg_dict[0]["width"]), int(cfg_dict[0]["height"])]
    timer = Timer()

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(command_args.input_video, command_args.img_size)
    frame_rate = dataloader.frame_rate
    orig_width = dataloader.vw
    orig_height = dataloader.vh
    width = dataloader.w
    height = dataloader.h

    video_writer = cv2.VideoWriter(os.path.join(command_args.output_dir, os.path.splitext(os.path.basename(command_args.input_video))[0] + ".mp4"),
                                   cv2.VideoWriter_fourcc(*"mp4v"), float(frame_rate), (orig_width, orig_height))

    tracker = JDETracker(opt=command_args, frame_rate=frame_rate)

    video_info = {}
    frame_id = 0
    for path, img, img0, orig_img in dataloader:
        frame_info = {}
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
Exemple #16
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
        #print(ROS_Img)
        #self.get_logger().info('I heard: "%s"' % msg.data)
        demo(opt, msg, self.frame_id)
        self.frame_id = self.frame_id + 1
        #rospy.Timer(rospy.Duration(10000), stop_callback)

    def get_ROS_Img(self):
        return self.ROS_Img


if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    print("============================HEREEEE1=============================")
    opt = opts().init()
    print("============================HEREEEE2=============================")
    tracker = JDETracker(opt, 30)
    #ROS STUFF
    rclpy.init()
    minimal_subscriber = MinimalSubscriber()

    print(
        "============================HEREEEE2.25=============================")
    try:
        rclpy.spin(minimal_subscriber)
        # for i in range(150):
        #     rclpy.spin_once(minimal_subscriber)
        #preprocessing(init)
    except Exception as e:
        print("SUB NO WORK", e)
    ROS_Img = minimal_subscriber.get_ROS_Img()
    # Destroy the node explicitly
Exemple #18
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    '''
    img:  Normalized RGB image
    img0: BGR image
    '''
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # print("\n==> blob.size", blob.size()) 1, 3, 608, 1088
        '''
        tracker update
        '''
        online_targets = tracker.update(blob, img0)

        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        ''' 
        print("==> [track.eval_seq] tracker's output-> online_targets:", online_targets)
        try:
            print("==> [track.eval_seq] len(online_tlwhs):", len(online_tlwhs))
            print("==> [track.eval_seq] online_tlwhs[0]:", online_tlwhs[0])
            print("==> [track.eval_seq] online_ids[0]:", online_ids[0])
        except:
            pass
        
        partial output:
        ==> [multi-tracker.update] len(output_stracks): 5
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-13), OT_2_(1-13), OT_3_(1-13), OT_20_(10-13), OT_7_(2-13)]
        ==> [track.eval_seq] len(online_tlwhs): 5
        ==> [track.eval_seq] online_tlwhs[0]: [     802.38      163.64      24.074      57.376]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-14), OT_2_(1-14), OT_3_(1-14), OT_20_(10-14), OT_7_(2-14), OT_23_(13-14), OT_13_(4-14)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     809.96      163.69      25.305      60.319]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-15), OT_2_(1-15), OT_3_(1-15), OT_20_(10-15), OT_7_(2-15), OT_23_(13-15), OT_19_(10-15)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     818.46       164.4      26.832      63.971]
        ==> [track.eval_seq] online_ids[0]: 1
        '''

        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #19
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    width, height = dataloader.w, dataloader.h
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    # for selected object tracking
    global click_pos
    global is_selected
    selected_id = None

    # set video output writer
    counter = 0
    encode = 0x00000021
    output_video = cv2.VideoWriter(
        os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
        (width, height), True)

    # start tracking
    for path, img, img0 in dataloader:
        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            output_video.release()
            # Call MP4Box to divide new mp4 file
            output_video = cv2.VideoWriter(
                os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
                (width, height), True)
            counter += 1

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            # get visualization result and some control flags for selected object tracking
            online_im, click_pos, selected_id, is_selected = vis.plot_tracking(
                img0,
                online_tlwhs,
                online_ids,
                frame_id=frame_id,
                fps=1. / timer.average_time,
                selected_id=selected_id,
                click_pos=click_pos,
                is_selected=is_selected)
        if show_image:
            # bind mouse event linstener
            cv2.setMouseCallback("online_im", on_click)
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(
                os.path.join(save_dir, 'frame', '{:05d}.jpg'.format(frame_id)),
                online_im)
            output_video.write(online_im)
        frame_id += 1
    output_video.release()
    # save results
    write_results(result_filename, results, data_type)

    return frame_id, timer.average_time, timer.calls
Exemple #20
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             bbox_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    if bbox_dir:
        mkdir_if_missing(bbox_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    for path, img0, detection in dataloader:
        if frame_id % 1 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        online_targets, detection_boxes = tracker.update(detection, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        #bbox detection plot
        box_tlbrs = []
        box_scores = []
        img_bbox = img0.copy()
        for box in detection_boxes:
            tlbr = box.tlbr
            tlwh = box.tlwh
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                box_tlbrs.append(tlbr)
                box_scores.append(box.score)

        timer.toc()
        # save results
        results.append(
            ([frame_id + 1] * len(online_tlwhs), online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            bbox_im = vis.plot_detections(img_bbox,
                                          box_tlbrs,
                                          scores=box_scores)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.imshow('bbox_im', bbox_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
            cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)),
                        bbox_im)

        frame_id += 1
    # save results
    track_pools = []
    id_pools = []
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #21
0
    parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
    parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
    parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
    parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
    parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
    parser.add_argument('--input-video', type=str, help='path to the input video')
    parser.add_argument('--output-format', type=str, default='text', choices=['video', 'text'], help='Expected output format. Video or text.')
    parser.add_argument('--output-root', type=str, default='results', help='expected output root path')
    parser.add_argument("--count", action="store_true", default=False, help='Enable the counting output')
    opt = parser.parse_args()


    cfg_dict = parse_model_cfg(opt.cfg)
    opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]

    tracker = JDETracker(opt)

    tracker.model.eval()
    device = 'cuda'
    # ------------------------ export -----------------------------
    output_onnx = 'trmot.onnx'
    print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
    input_names = ['images']
    output_names = ['scores']
    #inputs = torch.randn(1, 3, detector.net.width, detector.net.height).to(device)
    inputs = torch.randn(1, 3, 480, 864).to(device)



    torch_out = torch.onnx._export(tracker.model, inputs, output_onnx, export_params=True, verbose=False, opset_version=11, enable_onnx_checker=False,
                                   input_names=input_names, output_names=output_names)
Exemple #22
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    tid_max, tid_temp = 1, 1
    for path, img, img0 in dataloader:
        if frame_id < 300:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        filename = path.split('/')[-1]
        if '0000001' in path:
            tid_max = tid_temp + 1
            print(path, tid_max)
            tracker = JDETracker(opt, frame_rate=frame_rate)

        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id >20:
        #     break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update_sep3(
            blob,
            img0,
            conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5])
        # print(online_targets)
        online_tlwhs = []
        online_ids = []
        online_cids = []  #class id
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id + tid_max
            tcid = t.class_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:  # and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_cids.append(tcid)
            tid_temp = max(tid, tid_temp)
        timer.toc()
        # save results
        results.append((filename, online_tlwhs, online_ids, online_cids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #23
0
def run(opt):
    """
    :param opt:
    :return:
    """

    # Set dataset and device
    dataset = LoadImages(opt.source, img_size=opt.img_size)
    device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
    opt.device = device

    # Set result output
    frame_dir = opt.save_dir + '/frame'
    if not os.path.isdir(frame_dir):
        os.makedirs(frame_dir)
    else:
        shutil.rmtree(frame_dir)
        os.makedirs(frame_dir)

    # class name to class id and class id to class name
    names = load_classes(opt.names)
    id2cls = defaultdict(str)
    cls2id = defaultdict(int)
    for cls_id, cls_name in enumerate(names):
        id2cls[cls_id] = cls_name
        cls2id[cls_name] = cls_id

    # Set tracker
    tracker = JDETracker(opt)  # Joint detectionand embedding

    for fr_id, (path, img, img0, vid_cap) in enumerate(dataset):
        img = torch.from_numpy(img).to(opt.device)
        img = img.float()  # uint8 to fp32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # update tracking result of this frame
        online_targets_dict = tracker.update_tracking(img, img0)
        # print(online_targets_dict)

        # aggregate frame's results
        online_tlwhs_dict = defaultdict(list)
        online_ids_dict = defaultdict(list)
        for cls_id in range(opt.num_classes):
            # process each object class
            online_targets = online_targets_dict[cls_id]
            for track in online_targets:
                tlwh = track.tlwh
                t_id = track.track_id
                # vertical = tlwh[2] / tlwh[3] > 1.6  # box宽高比判断:w/h不能超过1.6?
                # if tlwh[2] * tlwh[3] > opt.min_box_area:  # and not vertical:
                online_tlwhs_dict[cls_id].append(tlwh)
                online_ids_dict[cls_id].append(t_id)

        if opt.show_image:
            if tracker.frame_id > 0:
                online_im = vis.plot_tracks(image=img0,
                                            tlwhs_dict=online_tlwhs_dict,
                                            obj_ids_dict=online_ids_dict,
                                            num_classes=opt.num_classes,
                                            frame_id=fr_id,
                                            id2cls=id2cls)

        if opt.save_dir is not None:
            save_path = os.path.join(frame_dir, '{:05d}.jpg'.format(fr_id))
            cv2.imwrite(save_path, online_im)

    # output tracking result as video
    src_name = os.path.split(opt.source)[-1]
    name, suffix = src_name.split('.')
    result_video_path = opt.save_dir + '/' + name + '_track' + '.' + suffix

    cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}' \
        .format(frame_dir, result_video_path)
    os.system(cmd_str)
Exemple #24
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir='.',
             show_image=True,
             frame_rate=25):

    tracker = JDETracker(opt, frame_rate=frame_rate)
    p = path_root_index[5]
    if save_dir:
        save_dir = osp.join(save_dir, p)
        mkdir_if_missing(save_dir)
    image_path = getimage_path(path_root + p)
    timer = Timer()
    results = []
    frame_id = -1
    result_array_list = []
    result = []
    for path in image_path:

        # img=cv2.imread(path)

        img0 = cv2.imread(path)  # BGR
        # assert img0 is not None, 'Failed to load ' + img_path
        img_height = img0.shape[0]
        img_width = img0.shape[1]
        # print(img_height,img_width)
        # print(img0.shape)
        # Padded resize
        img, _, _, _ = letterbox(img0, height=608, width=1088)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        frame_id += 1

        # if frame_id % 20 == 0:
        #     logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id==2:
        #   break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        online_cref = []
        # result_array_list=[]
        for t in online_targets:

            tlwh = t.tlwh

            tid = t.track_id
            confidence = t.score

            vertical = tlwh[2] / tlwh[3] > 1.6
            if confidence < 0.3:
                if tlwh[2] * tlwh[3] > 2700 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)

            elif confidence >= 0.3:
                if tlwh[2] * tlwh[3] > 1000 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    # print(tlwh[2] * tlwh[3])
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)
        # if frame_id==2:
        #   break
        timer.toc()
        # save results
        print(frame_id)
        # if result_array_list:

        online_tlwhs = np.array(online_tlwhs)
        online_ids = np.array(online_ids)
        online_cref = np.array(online_cref)
        # print(online_tlwhs)
        # print(online_tlwhs.shape)
        # print(online_ids.shape)
        # pick=non_max_suppression(online_tlwhs,0.7,online_cref)

        # online_tlwhsnms=online_tlwhs[pick]
        # online_idsnms=online_ids[pick]
        # online_crefnms=online_cref[pick]
        # result_array_list2=np.array(result_array_list).copy()[pick]
        # result+=list(result_array_list2)
        # print(result)

        # print(frame_id,online_idsnms)
        # result.append(online_tlwhsnms)
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_cref,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    res_array = np.array(result_array_list)
    # print(res_array)
    # print(res_array.shape)
    tmp_data1 = np.where(res_array[:, [2]] < 0,
                         res_array[:, [2]] + res_array[:, [4]], res_array[:,
                                                                          [4]])
    res_array[:, [4]] = tmp_data1
    tmp_data = np.where(res_array[:, [3]] < 0,
                        res_array[:, [3]] + res_array[:, [5]], res_array[:,
                                                                         [5]])
    res_array[:, [5]] = tmp_data
    res_array[:, [2, 3]] = np.maximum(res_array[:, [2, 3]], 0)
    # print(res_array)
    res_array = np.round(res_array, 0)
    # res_array=cutmorecord(res_array,img_width,img_height)
    # print(res_array)
    np.savetxt("{}.txt".format(p),
               res_array,
               fmt='%d,%d,%d,%d,%d,%d,%d,%d',
               delimiter=',')

    # save results
    # write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #25
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    '''
    width = dataloader.vw
    height = dataloader.vh
    '''
    width = 640
    height = 480
    '''
    process = (
        ffmpeg
        #new added re
        #new added preset ultrafast (try different mode if not ok)
        .input('pipe:', format = 'rawvideo', pix_fmt = 'rgb24', s = '{}x{}'.format(width, height), re = None)
        #new added
        #.setpts('1.7*PTS')
        .output('../try.m3u8', format = 'hls', pix_fmt = 'yuv420p', vcodec = 'libx264', preset = "ultrafast", hls_time = 10, hls_list_size = 2, start_number = 0, hls_flags = 'delete_segments+append_list', hls_segment_filename = '../try_%05d.ts')
        .overwrite_output()
        .run_async(pipe_stdin = True)
    )
    '''
    track_id = 0
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    cv2.namedWindow('online_im')
    cv2.setMouseCallback('online_im', mouse_click)

    #ffmpeg process
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im, track_id = vis.plot_tracking(img0,
                                                    online_tlwhs,
                                                    online_ids,
                                                    frame_id=frame_id,
                                                    fps=1. /
                                                    timer.average_time,
                                                    single=single,
                                                    mouse_x=mouse_x,
                                                    mouse_y=mouse_y,
                                                    track_id=track_id)
        if show_image:
            pass
            #cv2.imshow('online_im', online_im)
            #cv2.waitKey(1)
            #plt.imshow(online_im)
            #plt.show()
        #online_im_rgb = cv2.cvtColor(online_im, cv2.COLOR_BGR2RGB)
        #write_frame(process, online_im_rgb)
        stream(online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    # close process
    #close_process(process)
    terminate_stream()
    return frame_id, timer.average_time, timer.calls
Exemple #26
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True,
             gt_path=None):

    gt_results_dict = dict()
    if gt_path:
        if os.path.isfile(gt_path):
            with open(gt_path, 'r') as f:
                for line in f.readlines():
                    linelist = line.split(',')
                    if len(linelist) < 7:
                        continue
                    fid = int(linelist[0])
                    if fid < 1:
                        continue
                    gt_results_dict.setdefault(fid, list())

                    box_size = float(linelist[4]) * float(linelist[5])

                    score = 1

                    # if box_size > 7000:
                    # if box_size <= 7000 or box_size >= 15000:
                    # if box_size < 15000:
                    # continue

                    tlwh = tuple(map(float, linelist[2:6]))
                    target_id = (linelist[1])

                    gt_results_dict[fid].append((tlwh, target_id, score))

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:

    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time,
                                          gt_box=gt_results_dict[frame_id + 1])
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #27
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             countFileName,
             save_dir=None,
             show_image=True,
             frame_rate=30):

    device = torch.device("cpu" if opt.cpu else "cuda")

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    ids = set()
    if opt.le:
        skip_frames = 30

    for path, img, img0 in tqdm(dataloader):

        if opt.le and frame_id % skip_frames != 0:
            frame_id += 1
            continue

        if frame_id % opt.skip != 0:
            frame_id += 1
            continue

        #logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))

        # run trackering
        timer.tic()
        start = time.time()
        blob = torch.from_numpy(img).to(device).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                ids.add(tid)

        end = time.time()
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)

        ratio_x = float(dataloader.width) / img0.shape[1]
        ratio_y = float(dataloader.height) / img0.shape[0]
        if opt.AVA:
            ratio_y = 1080. / img0.shape[0]
            countFile = open(countFileName, 'a')
            txt = '{:.6f}'.format(end - start)

            for id, box in zip(online_ids, online_tlwhs):
                txt += ',{:.1f},{:.1f},{:.1f},{:.1f},-2,-2,-2,-2,{:d},-2,-2'.format(
                    box[0] * ratio_x, box[1] * ratio_y,
                    (box[0] + box[2]) * ratio_x, (box[1] + box[3]) * ratio_y,
                    id)
            txt += '\n'
            countFile.write(txt)
            countFile.close()

        if opt.video_path == 0:
            for id, box in zip(online_ids, online_tlwhs):
                cv2.rectangle(img0,
                              (int(box[0] * ratio_x), int(box[1] * ratio_y)),
                              (int((box[0] + box[2]) * ratio_x),
                               int((box[1] + box[3]) * ratio_y)), (0, 255, 0),
                              2)
            cv2.imshow('Live', img0)
            cv2.waitKey(1)

        frame_id += 1

    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #28
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename=None,
             save_dir=None,
             show_image=False,
             frame_rate=10,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    person_mot = PersonMOT()

    results = []
    frame_id = 0
    rflag = False

    # for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        if i % 12 != 0:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # online_scores.append(t.score)
        timer.toc()

        # 发送atc帧信息
        rflag = atcs.send_info(opt.device_id, opt.url, online_ids,
                               online_tlwhs, opt.input_stream)

        if rflag and i % opt.check_setp == 0:
            person_mot.handle_crime(opt.device_id, opt.url, opt.trigger,
                                    online_ids, online_tlwhs, img0)

        # 是否渲染图像
        if show_image or save_dir:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            results.append((frame_id + 1, online_tlwhs, online_ids))
            # 得分
            # results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        # 展示图像
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)

        # 存储图像
        if save_dir:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1

    # save results
    if opt.save:
        write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #29
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    len_all = len(dataloader)
    start_frame = int(len_all / 2)
    frame_id = int(len_all / 2)
    for i, (path, img, img0) in enumerate(dataloader):
        if i < start_frame:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        img_pil = Image.open(path).convert('RGB')
        normalize = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        img_norm = transforms.Compose(
            [T.RandomResize([800], max_size=1333), normalize])
        img_norm = img_norm(img_pil)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(img_norm.cuda().unsqueeze(0), img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #30
0
def eval_seq(opt, dataloader, data_type, result_filename, gt_filename, save_dir=None, show_image=True, frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    gts = []
    frame_id = 0
    tid_max, tid_temp = 1, 1
    for path, img, img0 in dataloader:
        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>400:
            #     break
        if '0000001' in path:
            tid_max = tid_temp
            tracker = JDETracker(opt, frame_rate=frame_rate)
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # online_targets = tracker.update(blob, img0)
        online_targets = tracker.update_sep3(blob, img0, conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5])  ## use class-separated tracker
        # print(online_targets)
        online_tlwhs = []
        online_ids = []
        online_cids = [] #class id
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id + tid_max
            tcid = t.class_id
            # vertical = tlwh[2] / tlwh[3] > 1.6
            # if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_cids.append(tcid)
            tid_temp = max(tid, tid_temp)
        timer.toc()
        gt_tlwhs = []
        gt_ids = []
        gt_cids = []
        gt_path = path.replace('images', 'labels_with_ids').replace('jpg', 'txt')
        gt_targets = read_gt_txts(gt_path)
        for (tlwh, tid, tcid) in gt_targets:
            gt_tlwhs.append(tlwh)
            gt_ids.append(tid)
            gt_cids.append(tcid)
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids, online_cids))
        # save gts
        gts.append((frame_id + 1, gt_tlwhs, gt_ids, gt_cids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    # save gts
    write_results(gt_filename, gts, data_type)
    return frame_id, timer.average_time, timer.calls