예제 #1
0
 def __init__(self, tid, data_path, images, insize=(320, 576, 3)):
     super(Datastore, self).__init__(name=tid)
     self.tid = tid
     self.data_path = data_path
     self.images = images
     self.insize = insize
     self.dataset = dataset.ImagesLoader(data_path,
                                         insize,
                                         formats=['*.jpg', '*.png'])
예제 #2
0
파일: trackernew.py 프로젝트: Jay-Liu/JDE
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')    
    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    model = build_tracker(config.MODEL)
    if os.path.isfile(args.model):
        model.load_state_dict(torch.load(args.model, map_location='cpu'))
    model.cuda().eval()

    h, w = [int(s) for s in args.insize.split('x')]
    tracker = JDETracker()
    if os.path.isfile(args.img_path):
        dataloader = dataset.VideoLoader(args.img_path, (h,w,3))
    else:
        dataloader = dataset.ImagesLoader(args.img_path, (h,w,3), formats=['*.jpg', '*.png'])
    
    strs = re.split(r'[\\, /]', args.img_path)
    imgpath = os.path.join(args.workspace, 'result', strs[-3], 'img')
    mkdir(imgpath)
    traj_path = os.path.join(args.workspace, 'result', '{}.txt'.format(strs[-3]))
    
    os.system('rm -f {}'.format(os.path.join(imgpath, '*')))
    for i, (path, im, lb_im) in enumerate(dataloader):
        input = torch.from_numpy(lb_im).unsqueeze(0).to(device)
        with torch.no_grad():
            outputs = model(input)
        print('{} {} {} {}'.format(path, im.shape, lb_im.shape, outputs.size()), end=' ')
        outputs =  nonmax_suppression(outputs, args.score_thresh, args.iou_thresh)[0]
        
        if outputs is None:
            print('no object detected!')
            segments = re.split(r'[\\, /]', path)
            cv2.imwrite(os.path.join(imgpath, segments[-1]), im)
            continue
        print('{}'.format(outputs.size()), end=' ')
        outputs[:, :4] = ltrb_net2img(outputs[:, :4], (h,w), im.shape[:2])
        if not args.only_detect:
            trajectories = tracker.update(outputs.numpy())
            print('{}'.format(len(trajectories)))        
            result = overlap_trajectory(trajectories, im)
            save_trajectories(traj_path, trajectories, i + 1)
        else:
            print('')
            result = overlap(outputs, im)
        segments = re.split(r'[\\, /]', path)
        cv2.imwrite(os.path.join(imgpath, segments[-1]), result)

    os.system('ffmpeg -f image2 -i {} {}.mp4 -y'.format(os.path.join(imgpath, '%06d.jpg'),
        os.path.join(args.workspace, 'result', strs[-3])))
예제 #3
0
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # if args.backbone == 'darknet':
    #     model = darknet.DarkNet(np.random.randint(0, 100, (12, 2))).to(device)
    # elif args.backbone == 'shufflenetv2':
    #     model = shufflenetv2.ShuffleNetV2(np.random.randint(0, 100, (12, 2)),
    #         model_size=args.thin).to(device)
    # else:
    #     print('unknown backbone architecture!')
    #     sys.exit(0)
    #
    # # load state dict except the classifier layer
    # model_dict = model.state_dict()
    # trained_model_dict = torch.load(os.path.join(args.model), map_location='cpu')
    # trained_model_dict = {k : v for (k, v) in trained_model_dict.items() if k in model_dict}
    # trained_model_dict = collections.OrderedDict(trained_model_dict)
    # model_dict.update(trained_model_dict)
    # model.load_state_dict(model_dict)
    #
    # model.eval()

    if os.path.isfile(args.config):
        config.merge_from_file(args.config)
    config.freeze()

    model = build_tracker(config.MODEL)
    if os.path.isfile(args.model):
        model.load_state_dict(torch.load(args.model, map_location='cpu'))
    model.cuda().eval()

    # if '320x576' in args.insize:
    #     anchors = ((6,16),   (8,23),    (11,32),   (16,45),
    #                (21,64),  (30,90),   (43,128),  (60,180),
    #                (85,255), (120,360), (170,420), (340,320))
    # elif '480x864' in args.insize:
    #     anchors = ((6,19),    (9,27),    (13,38),   (18,54),
    #                (25,76),   (36,107),  (51,152),  (71,215),
    #                (102,305), (143,429), (203,508), (407,508))
    # elif '608x1088' in args.insize:
    #     anchors = ((8,24),    (11,34),   (16,48),   (23,68),
    #                (32,96),   (45,135),  (64,192),  (90,271),
    #                (128,384), (180,540), (256,640), (512,640))

    h, w = [int(s) for s in args.insize.split('x')]
    # decoder = jde.JDEcoder((h, w), embd_dim=args.embedding)
    tracker = JDETracker()
    if os.path.isfile(args.img_path):
        dataloader = dataset.VideoLoader(args.img_path, (h, w, 3))
    else:
        dataloader = dataset.ImagesLoader(args.img_path, (h, w, 3),
                                          formats=['*.jpg', '*.png'])

    strs = re.split(r'[\\, /]', args.img_path)
    imgpath = os.path.join(args.workspace, 'result', strs[-3], 'img')
    mkdir(imgpath)
    traj_path = os.path.join(args.workspace, 'result',
                             '{}.txt'.format(strs[-3]))

    os.system('rm -f {}'.format(os.path.join(imgpath, '*')))
    for i, (path, im, lb_im) in enumerate(dataloader):
        input = torch.from_numpy(lb_im).unsqueeze(0).to(device)
        with torch.no_grad():
            outputs = model(input)
        # outputs = decoder(outputs)
        print('{} {} {} {}'.format(path, im.shape, lb_im.shape,
                                   outputs.size()),
              end=' ')
        outputs = nonmax_suppression(outputs, args.score_thresh,
                                     args.iou_thresh)[0]

        if outputs is None:
            print('no object detected!')
            segments = re.split(r'[\\, /]', path)
            cv2.imwrite(os.path.join(imgpath, segments[-1]), im)
            continue
        print('{}'.format(outputs.size()), end=' ')
        outputs[:, :4] = ltrb_net2img(outputs[:, :4], (h, w), im.shape[:2])
        if not args.only_detect:
            trajectories = tracker.update(outputs.numpy())
            print('{}'.format(len(trajectories)))
            result = overlap_trajectory(trajectories, im)
            save_trajectories(traj_path, trajectories, i + 1)
        else:
            print('')
            result = overlap(outputs, im)
        segments = re.split(r'[\\, /]', path)
        cv2.imwrite(os.path.join(imgpath, segments[-1]), result)

    os.system('ffmpeg -f image2 -i {} {}.mp4 -y'.format(
        os.path.join(imgpath, '%06d.jpg'),
        os.path.join(args.workspace, 'result', strs[-3])))