Ejemplo n.º 1
0
    def __init__(self, model, device, max_dist=0.2, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100):
        # models trained on: market1501, dukemtmcreid and msmt17
        if is_model_in_factory(model):
            # download the model
            model_path = join('deep_sort/deep/checkpoint', model + '.pth')
            if not file_exists(model_path):
                gdown.download(get_model_link(model), model_path, quiet=False)

            self.extractor = FeatureExtractor(
                # get rid of dataset information DeepSort model name
                model_name=model.rsplit('_', 1)[:-1][0],
                model_path=model_path,
                device=str(device)
            )
        else:
            if is_model_type_in_model_path(model):
                model_name = get_model_type(model)
                self.extractor = FeatureExtractor(
                    model_name=model_name,
                    model_path=model,
                    device=str(device)
                )
            else:
                print('Cannot infere model name from provided DeepSort path, should be one of the following:')
                show_supported_models()
                exit()

        self.max_dist = max_dist
        metric = NearestNeighborDistanceMetric(
            "cosine", self.max_dist, nn_budget)
        self.tracker = Tracker(
            metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
Ejemplo n.º 2
0
def main():
    args = parse_args()

    pwd = 'sshpass -p sihan123 '
    scp = 'scp [email protected]:/home/sihan/Documents/motoolkit/reid/build/install/bin/'
    os.system(pwd + scp + 'in.bin {}'.format(args.files_path))
    os.system(pwd + scp + 'out.bin {}'.format(args.files_path))

    extractor = FeatureExtractor(model_name='osnet_x1_0',
                                 model_path=args.model_path,
                                 device='cuda')

    with open(osp.join(args.files_path, 'in.bin'), 'rb') as fd:
        input = np.fromfile(fd, dtype=np.float32)
        input = torch.from_numpy(input).view(args.batch_size, 3, 256,
                                             128).cuda()

    baseline_output = extractor(input).view(-1)
    print('baseline_output:\n{}'.format(baseline_output))
    with open(osp.join(args.files_path, 'out.bin'), 'rb') as fd:
        test_output = np.fromfile(fd, dtype=np.float32)
        test_output = torch.from_numpy(test_output).cuda()
        print('test_output:\n{}'.format(test_output))

    error = torch.abs(baseline_output - test_output)
    print('error: min {}, max {}, mean {}'.format(error.min(), error.max(),
                                                  error.mean()))
 def __init__(self, model_name: str, checkpoint_path: str,
              img_size: Tuple[int, int], **kwargs):
     super().__init__(**kwargs)
     self.extractor = FeatureExtractor(model_name=model_name,
                                       model_path=checkpoint_path,
                                       device='cuda',
                                       image_size=(img_size[1],
                                                   img_size[0]))
Ejemplo n.º 4
0
def init_models(video_size=(704, 576)):
    yolo = YoloV5()
    reid = FeatureExtractor(
        model_name='osnet_x1_0',
        model_path=
        'pid/deep_person_reid/checkpoints/osnet_x1_0_market_256x128_amsgrad_ep150_stp60_lr0.0015_b64_fb10_softmax_labelsmooth_flip.pth',
        verbose=False)
    face_detector = RetinaFace(image_size=(video_size[1], video_size[0]))
    # detector = MTCNN()
    faceNet = FaceNet()
    return yolo, reid, face_detector, faceNet
    def __init__(self, proxy_map, startup_check=False):
        super(SpecificWorker, self).__init__(proxy_map)
        self.Period = 2000
        if startup_check:
            self.startup_check()
        else:
            self.timer.timeout.connect(self.compute)
            self.timer.start(self.Period)

        # Load center track, our tracking module
        opt = opts().init()
        self.detector = Detector(opt)

        # Reid reture extractor
        self.reid_extractor = FeatureExtractor(
            model_name='shufflenet',
            model_path='/home/shubh/Downloads/shufflenet-bee1b265.pth.tar',
            device='cuda')
Ejemplo n.º 6
0
    def __init__(self,
                 model_type,
                 device,
                 max_dist=0.2,
                 max_iou_distance=0.7,
                 max_age=70,
                 n_init=3,
                 nn_budget=100):

        self.extractor = FeatureExtractor(model_name=model_type,
                                          device=str(device))

        max_cosine_distance = max_dist
        metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance,
                                               nn_budget)
        self.tracker = Tracker(metric,
                               max_iou_distance=max_iou_distance,
                               max_age=max_age,
                               n_init=n_init)
Ejemplo n.º 7
0
def make_model():
    yolo = yolo_model()
    reid = FeatureExtractor(
        model_name='osnet_x1_0',
        model_path=
        'pid/deep_person_reid/checkpoints/osnet_x1_0_market_256x128_amsgrad_ep150_stp60_lr0.0015_b64_fb10_softmax_labelsmooth_flip.pth',
        verbose=False)
    pnet, rnet, onet = create_mtcnn_net(
        p_model_path="fid/mtcnn/mtcnn_checkpoints/pnet_epoch.pt",
        r_model_path="fid/mtcnn/mtcnn_checkpoints/rnet_epoch.pt",
        o_model_path="fid/mtcnn/mtcnn_checkpoints/onet_epoch.pt",
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24)
    mobileFace = mobile_face_model(
        "fid/InsightFace_Pytorch/facenet_checkpoints/model_ir_se50.pth")
    return yolo, reid, mtcnn_detector, mobileFace
Ejemplo n.º 8
0
# -*- coding: utf-8 -*-
# !@time: 2020/6/9 上午5:42
# !@author: superMC @email: [email protected]
# !@fileName: pid_demo.py

import cv2
from torchreid.utils import FeatureExtractor

from pid.yolov4.yolov4 import YoloV4 as Yolo

#from pid.yolov5.yolov5 import YoloV5 as Yolo

if __name__ == '__main__':
    import time

    yolo = Yolo()
    reid = FeatureExtractor(
        model_name='osnet_x1_0',
        model_path=
        'pid/deep_person_reid/checkpoints/osnet_x1_0_market_256x128_amsgrad_ep150_stp60_lr0.0015_b64_fb10_softmax_labelsmooth_flip.pth',
        verbose=False)

    image = cv2.imread("data/office1.jpg")
    # person_images, _ = detect_person(model, image)
    person_images, person_boxes = yolo(image)
    for box in person_boxes:
        image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]),
                              (255, 0, 0), 2)
    cv2.imshow('demo', image)
    cv2.waitKey(0)
Ejemplo n.º 9
0
 def _set_extractor(self):
     self.extractor = FeatureExtractor(model_name=self.model_name,
                                       model_path=self.model_path,
                                       device=self.device)
Ejemplo n.º 10
0
    # Arguments Parser
    parser = argparse.ArgumentParser()
    parser.add_argument("-v",
                        "--video",
                        type=str,
                        help="Surviellance video file path")
    parser.add_argument("-s",
                        "--save",
                        type=bool,
                        default=True,
                        help="Save cropped Images of Pedestrain")
    args = parser.parse_args()

    # OSNet Feature Extractor
    extractor = FeatureExtractor(
        model_name='osnet_x1_0',
        model_path='model_weights/osnet_weights.tar-250',
        device='cuda')

    model_path = 'model_weights/ssd_weights.pb'
    odapi = PersonDetector(path_to_ckpt=model_path)
    threshold = 0.7

    cap = cv2.VideoCapture(args.video)
    frame_counter = 0

    # Make a directory for saving cropped images of pedestrian
    gallery_path = "Gallery"
    os.makedirs(gallery_path, exist_ok=True)
    os.chdir(gallery_path)

    Features = []
Ejemplo n.º 11
0
def main():
    args = parse_args()
    if osp.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    calibs = [parse_calibration_data(f) for f in args.calibration]
    print('calibration:\n{}'.format(calibs))

    # Create input and output queue for each tracker.
    ncamera = len(args.inputs)
    images = []  # image queues
    tracklets = []  # tracklet queues
    for i in range(ncamera):
        images.append(Queue(maxsize=0))
        tracklets.append(Queue(maxsize=0))
    trajectories = []  # global trajectory list

    # Feature extractor.
    extractor = FeatureExtractor(model_name='osnet_x1_0',
                                 model_path=args.reid,
                                 device='cuda')

    # Create working threads.
    tid = 0
    threads = []
    exit = Value('i', 0)  # shared thread exit switch
    for i in range(ncamera):
        # Datastore thread.
        tid += 1
        threads.append(Datastore(tid, args.inputs[i], images[i]))
        # MTSCT thread.
        tid += 1
        model = build_tracker(config.MODEL)
        model.load_state_dict(torch.load(args.tracker, map_location='cpu'))
        locator = ImageToWorldTsai(calibs[i])
        threads.append(
            MTSCT(tid, images[i], tracklets[i], exit, model, locator))
    # MTMCT thread.
    tid += 1
    threads.append(MTMCT(tid, tracklets, trajectories, exit, extractor))

    # Start all threads.
    for thread in threads:
        thread.start()

    # Waiting for Datastore finish.
    ndead = 0
    while ndead != ncamera:
        ndead = sum([int(not t.is_alive()) for t in threads[:-1][0::2]])
        time.sleep(1)
    print('Datastore done.')

    # Waiting for MTSCT finish.
    nempty = 0
    while nempty != ncamera:
        nempty = sum([int(q.empty()) for q in images])
        time.sleep(1)
    print('MTSCT done.')

    # Waiting for MTMCT finish.
    nempty = 0
    while nempty != ncamera:
        nempty = sum([int(q.empty()) for q in tracklets])
        time.sleep(1)
    print('MTMCT done.')

    exit.value = 1
    for thread in threads:
        thread.join()
    print('All works done.')
Ejemplo n.º 12
0
from torchreid.utils import FeatureExtractor
import time

extractor = FeatureExtractor(
    model_name='osnet_x0_25',
    model_path='torchreid/models/osnet_x0_25_imagenet.pth',
    device='cpu')

image_list = ['torchreid/data/img.png']

total_time = 0
for i in range(100):

    time1 = time.time()
    features = extractor(image_list)
    time2 = time.time()
    total_time += (time2 - time1)

    #print(features.shape) # output (N, 512)

avg_time = total_time / len(range(100))
print(avg_time, "Avg time per loop")
print(1 / avg_time, "Avg FPS")