示例#1
0
文件: trackernew.py 项目: Jay-Liu/JDE
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')    
    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    model = build_tracker(config.MODEL)
    if os.path.isfile(args.model):
        model.load_state_dict(torch.load(args.model, map_location='cpu'))
    model.cuda().eval()

    h, w = [int(s) for s in args.insize.split('x')]
    tracker = JDETracker()
    if os.path.isfile(args.img_path):
        dataloader = dataset.VideoLoader(args.img_path, (h,w,3))
    else:
        dataloader = dataset.ImagesLoader(args.img_path, (h,w,3), formats=['*.jpg', '*.png'])
    
    strs = re.split(r'[\\, /]', args.img_path)
    imgpath = os.path.join(args.workspace, 'result', strs[-3], 'img')
    mkdir(imgpath)
    traj_path = os.path.join(args.workspace, 'result', '{}.txt'.format(strs[-3]))
    
    os.system('rm -f {}'.format(os.path.join(imgpath, '*')))
    for i, (path, im, lb_im) in enumerate(dataloader):
        input = torch.from_numpy(lb_im).unsqueeze(0).to(device)
        with torch.no_grad():
            outputs = model(input)
        print('{} {} {} {}'.format(path, im.shape, lb_im.shape, outputs.size()), end=' ')
        outputs =  nonmax_suppression(outputs, args.score_thresh, args.iou_thresh)[0]
        
        if outputs is None:
            print('no object detected!')
            segments = re.split(r'[\\, /]', path)
            cv2.imwrite(os.path.join(imgpath, segments[-1]), im)
            continue
        print('{}'.format(outputs.size()), end=' ')
        outputs[:, :4] = ltrb_net2img(outputs[:, :4], (h,w), im.shape[:2])
        if not args.only_detect:
            trajectories = tracker.update(outputs.numpy())
            print('{}'.format(len(trajectories)))        
            result = overlap_trajectory(trajectories, im)
            save_trajectories(traj_path, trajectories, i + 1)
        else:
            print('')
            result = overlap(outputs, im)
        segments = re.split(r'[\\, /]', path)
        cv2.imwrite(os.path.join(imgpath, segments[-1]), result)

    os.system('ffmpeg -f image2 -i {} {}.mp4 -y'.format(os.path.join(imgpath, '%06d.jpg'),
        os.path.join(args.workspace, 'result', strs[-3])))
示例#2
0
def main():
    args = parse_args()
    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.merge_from_list(args.opts)
    config.freeze()
    print(config)

    model = build_tracker(config.MODEL)
    classes = (nn.Conv2d, nn.BatchNorm2d, nn.Linear, nn.ReLU, nn.MaxPool2d)
    with open(args.save_name, 'w') as fd:
        for name, module in model.named_modules():
            if isinstance(module, classes):
                fd.write('{}\n'.format(name))
示例#3
0
def main():
    args = parse_args()
    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.merge_from_list(args.opts)
    config.freeze()
    print(config)
    
    model = build_tracker(config.MODEL)
    if os.path.isfile(args.weight):
        model.load_state_dict(torch.load(args.weight, map_location='cpu'))
    model.cuda().eval()
    print(model)
    
    input = torch.rand(64, 3, 320, 576)
    with torch.no_grad():
        output = model(input.cuda())
    print('output size: {}'.format(output.size()))
示例#4
0
def main():
    # Parse configurations.
    args = parse_args()
    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.merge_from_list(args.opts)

    torch.backends.cudnn.benchmark = True
    mkdirs(config.SYSTEM.TASK_DIR)

    # Build dataset
    dataset = build_dataset(config.DATASET)

    # Build model.
    num_ide = int(dataset.max_id + 1)
    config.MODEL.ARGS.HEAD.ARGS[1]['num_ide'] = num_ide
    model = build_tracker(config.MODEL)

    # Train tracker now.
    train_tracker(model, dataset, config)
示例#5
0
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # if args.backbone == 'darknet':
    #     model = darknet.DarkNet(np.random.randint(0, 100, (12, 2))).to(device)
    # elif args.backbone == 'shufflenetv2':
    #     model = shufflenetv2.ShuffleNetV2(np.random.randint(0, 100, (12, 2)),
    #         model_size=args.thin).to(device)
    # else:
    #     print('unknown backbone architecture!')
    #     sys.exit(0)
    #
    # # load state dict except the classifier layer
    # model_dict = model.state_dict()
    # trained_model_dict = torch.load(os.path.join(args.model), map_location='cpu')
    # trained_model_dict = {k : v for (k, v) in trained_model_dict.items() if k in model_dict}
    # trained_model_dict = collections.OrderedDict(trained_model_dict)
    # model_dict.update(trained_model_dict)
    # model.load_state_dict(model_dict)
    #
    # model.eval()

    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    model = build_tracker(config.MODEL)
    if os.path.isfile(args.model):
        model.load_state_dict(torch.load(args.model, map_location='cpu'))
    model.cuda().eval()

    # if '320x576' in args.insize:
    #     anchors = ((6,16),   (8,23),    (11,32),   (16,45),
    #                (21,64),  (30,90),   (43,128),  (60,180),
    #                (85,255), (120,360), (170,420), (340,320))
    # elif '480x864' in args.insize:
    #     anchors = ((6,19),    (9,27),    (13,38),   (18,54),
    #                (25,76),   (36,107),  (51,152),  (71,215),
    #                (102,305), (143,429), (203,508), (407,508))
    # elif '608x1088' in args.insize:
    #     anchors = ((8,24),    (11,34),   (16,48),   (23,68),
    #                (32,96),   (45,135),  (64,192),  (90,271),
    #                (128,384), (180,540), (256,640), (512,640))

    h, w = [int(s) for s in args.insize.split('x')]
    # decoder = jde.JDEcoder((h, w), embd_dim=args.embedding)
    tracker = JDETracker()
    if os.path.isfile(args.img_path):
        dataloader = dataset.VideoLoader(args.img_path, (h, w, 3))
    else:
        dataloader = dataset.ImagesLoader(args.img_path, (h, w, 3),
                                          formats=['*.jpg', '*.png'])

    strs = re.split(r'[\\, /]', args.img_path)
    imgpath = os.path.join(args.workspace, 'result', strs[-3], 'img')
    mkdir(imgpath)
    traj_path = os.path.join(args.workspace, 'result',
                             '{}.txt'.format(strs[-3]))

    os.system('rm -f {}'.format(os.path.join(imgpath, '*')))
    for i, (path, im, lb_im) in enumerate(dataloader):
        input = torch.from_numpy(lb_im).unsqueeze(0).to(device)
        with torch.no_grad():
            outputs = model(input)
        # outputs = decoder(outputs)
        print('{} {} {} {}'.format(path, im.shape, lb_im.shape,
                                   outputs.size()),
              end=' ')
        outputs = nonmax_suppression(outputs, args.score_thresh,
                                     args.iou_thresh)[0]

        if outputs is None:
            print('no object detected!')
            segments = re.split(r'[\\, /]', path)
            cv2.imwrite(os.path.join(imgpath, segments[-1]), im)
            continue
        print('{}'.format(outputs.size()), end=' ')
        outputs[:, :4] = ltrb_net2img(outputs[:, :4], (h, w), im.shape[:2])
        if not args.only_detect:
            trajectories = tracker.update(outputs.numpy())
            print('{}'.format(len(trajectories)))
            result = overlap_trajectory(trajectories, im)
            save_trajectories(traj_path, trajectories, i + 1)
        else:
            print('')
            result = overlap(outputs, im)
        segments = re.split(r'[\\, /]', path)
        cv2.imwrite(os.path.join(imgpath, segments[-1]), result)

    os.system('ffmpeg -f image2 -i {} {}.mp4 -y'.format(
        os.path.join(imgpath, '%06d.jpg'),
        os.path.join(args.workspace, 'result', strs[-3])))
示例#6
0
def main():
    args = parse_args()
    if osp.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    calibs = [parse_calibration_data(f) for f in args.calibration]
    print('calibration:\n{}'.format(calibs))

    # Create input and output queue for each tracker.
    ncamera = len(args.inputs)
    images = []  # image queues
    tracklets = []  # tracklet queues
    for i in range(ncamera):
        images.append(Queue(maxsize=0))
        tracklets.append(Queue(maxsize=0))
    trajectories = []  # global trajectory list

    # Feature extractor.
    extractor = FeatureExtractor(model_name='osnet_x1_0',
                                 model_path=args.reid,
                                 device='cuda')

    # Create working threads.
    tid = 0
    threads = []
    exit = Value('i', 0)  # shared thread exit switch
    for i in range(ncamera):
        # Datastore thread.
        tid += 1
        threads.append(Datastore(tid, args.inputs[i], images[i]))
        # MTSCT thread.
        tid += 1
        model = build_tracker(config.MODEL)
        model.load_state_dict(torch.load(args.tracker, map_location='cpu'))
        locator = ImageToWorldTsai(calibs[i])
        threads.append(
            MTSCT(tid, images[i], tracklets[i], exit, model, locator))
    # MTMCT thread.
    tid += 1
    threads.append(MTMCT(tid, tracklets, trajectories, exit, extractor))

    # Start all threads.
    for thread in threads:
        thread.start()

    # Waiting for Datastore finish.
    ndead = 0
    while ndead != ncamera:
        ndead = sum([int(not t.is_alive()) for t in threads[:-1][0::2]])
        time.sleep(1)
    print('Datastore done.')

    # Waiting for MTSCT finish.
    nempty = 0
    while nempty != ncamera:
        nempty = sum([int(q.empty()) for q in images])
        time.sleep(1)
    print('MTSCT done.')

    # Waiting for MTMCT finish.
    nempty = 0
    while nempty != ncamera:
        nempty = sum([int(q.empty()) for q in tracklets])
        time.sleep(1)
    print('MTMCT done.')

    exit.value = 1
    for thread in threads:
        thread.join()
    print('All works done.')