コード例 #1
0
 def __init__(self, dataset, detector = 'yolov3', experiment_name = 'default', overwrite = False):
     self.detector = detector
     self.dataset = dataset
     self.overwrite = overwrite
     self.experiment_name = experiment_name
     self.checkdir()
     self.config = Config(detector)
     self.DA = DA('validation', self.config)
コード例 #2
0
ファイル: app.py プロジェクト: RuaHU/TLfPS
 def __init__(self, detector, det = None, name = 'SelectFrame'):
     wx.Frame.__init__(self, None, title=name)
     self.sizer = wx.BoxSizer(wx.VERTICAL)
     self.SetSizer(self.sizer)
     self.config = Config(detector)
     self.DA = DA('validation', self.config)
     self.fmts = ['*.jpg', '*.png', '*.bmp','*.jpeg']
     self.menuBar()
     self.Center()
     self.query = {}
     self.gallery = {}
     pub.subscribe(self.queryReid, 'query')
コード例 #3
0
ファイル: prw_evaluation.py プロジェクト: RuaHU/TLfPS
 def __init__(self,
              dataset,
              detector='yolov3',
              experiment_name='default',
              overwrite=False):
     if dataset is None: return
     self.detector = detector
     self.dataset = dataset
     self.overwrite = overwrite
     self.experiment_name = experiment_name
     self.checkdir()
     self.config = Config(detector)
     self.DA = DA('validation', self.config)
     self.load_gallery()
     self.load_query()
コード例 #4
0
ファイル: trainshell.py プロジェクト: RuaHU/TLfPS
    def thread_da(self, thread_name, delay):
        self.logger.info("start new thread: %s" % thread_name)
        da = DA('training', self.config)
        while 1:
            cnt = 0
            while len(self.thread_task[thread_name]) == 0:
                cnt += 1
                if cnt > 200:
                    self.logger.warn("thread %s stacked" % thread_name)
                    cnt = 0
                time.sleep(delay)
            img1, box1 = self.thread_task[thread_name].pop()

            image_1, gt_bbox1, gt_id, gt_loc = da(img1, box1)
            self.thread_task_results[thread_name].append(
                [image_1, gt_bbox1, gt_id, gt_loc])
コード例 #5
0
ファイル: testmodel.py プロジェクト: RuaHU/TLfPS
def main(argv):
    M = 'yolov3'
    gpu = '0'
    image_path = None
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:g:p:',
                                   ['m=', 'gpu=', 'path='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-p', '--path']:
            image_path = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg

    print('model: [%s] gpu: [%s]' % (M, gpu))
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)

    if not os.path.exists(image_path):
        raise ValueError(
            'you must specify a image for testing %s' %
            ('' if image_path is None else 'bad image path: [%s]' %
             image_path))

    config = Config(M)
    model_path = os.path.join(parpath, 'pretrained_weights/%s.h5' % config.M)
    model = MODELS(config, model_type='detection').load_model(model_name='')

    #test
    da = DA('validation', config)
    img = cv2.imread(image_path)
    input_img, input_box, input_ids, meta = da(img, [])
    cv2.imwrite('input_img.jpg', (input_img * 255).astype(np.uint8))
    if config.M == 'mrcnn':
        anchors = ANCHORS(config)
        detection, detection_score = model.predict([
            np.stack([input_img]),
            np.stack([input_box]),
            np.stack(meta),
            np.stack([anchors.get_anchors(input_img.shape)])
        ])
    else:
        detection, detection_score = model.predict(
            [np.stack([input_img]),
             np.stack([input_box])])
    detection = da.unmold(detection[0], meta)
    image = draw(img, detection, detection_score)
    print(detection_score)
    print(detection)
    cv2.imwrite('%s_test.jpg' % config.M, image)
    print('detection results saved as: %s_test.jpg' % config.M)
コード例 #6
0
ファイル: elapsedtime.py プロジェクト: RuaHU/TLfPS
 def __init__(self, dataset, reid, detector='yolov3'):
     self.detector = detector
     self.reid = reid
     self.dataset = dataset
     self.config = Config(detector)
     self.DA = DA('validation', self.config)
コード例 #7
0

import cv2
import time
import os, sys
parpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
curpath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(parpath)
from tools.load_weights import load_weights_by_name
from tools.config import Config
from tools.DataAugmentation import DataAugmentation as DA
sys.path.remove(parpath)
if __name__ == '__main__':
    config = Config('dla_34')
    model = DLASeg(config).model(model_type='detection')
    load_weights_by_name(model, 'dla_34.h5')
    da = DA('validation', config)
    while 1:
        start = time.time()
        img0 = cv2.imread('test.jpg')
        img, _, _, meta = da(img0, [])
        dets, scores = model.predict([[img], np.zeros([1, 1, 4])])
        boxes, scores = da.unmold(dets[0], meta), scores[0]
        for i in range(0, boxes.shape[0]):
            bbox = boxes[i][0:4]
            cv2.rectangle(img0, (bbox[0], bbox[1]),
                          (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0),
                          2)
        cv2.imwrite('dets.jpg', img0)
        break
コード例 #8
0
def main(argv):
    M = 'yolov3'
    gpu = '0'
    CUHK_SYSU = "/home/ronghua/Projects/data/dataset-v2/"
    PRW = '/home/ronghua/Projects/data/PRW-v16.04.20/'
    try:
        opts, args = getopt.getopt(argv[1:], 'hm:g:p:c:',
                                   ['m=', 'gpu=', 'prw=', 'cuhk='])
    except getopt.GetoptError:
        print(argv[0] + ' -m <M> -g <gpu>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print(argv[0] + ' -m <M> -g <gpu>')
        elif opt in ['-m', '--M']:
            M = arg
        elif opt in ['-p', '--path']:
            PRW = arg
        elif opt in ['-c', '--cuhk']:
            CUHK_SYSU = arg
        elif opt in ['-g', '--gpu']:
            gpu = arg

    print('model: [%s] gpu: [%s], CUHK_SYSU: [%s], PRW: [%s]' %
          (M, gpu, CUHK_SYSU, PRW))
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    dataset = False
    if os.path.exists(CUHK_SYSU):
        dataset = True
    if os.path.exists(PRW):
        dataset = True

    if not dataset:
        raise ValueError(
            'you should specify at least one dataset [CUHK_SYSU or PRW]')

    config = Config(M)
    model_path = os.path.join(parpath, 'pretrained_weights/%s.h5' % config.M)
    model = MODELS(config, model_type='detection').load_model(model_name='')
    if config.M == 'mrcnn':
        anchors = ANCHORS(config)

    load_weights_by_name(model, model_path)
    da = DA('validation', config)

    if os.path.exists(CUHK_SYSU):
        filepath = os.path.join(parpath, 'experiment_results',
                                'cuhk_%s_gallery.pkl' % config.M)
        if os.path.exists(filepath):
            print('cuhk_%s_gallery.pkl exists.' % config.M)
        else:
            print('creating cuhk-sysu gallery for %s' % config.M)
            gallery = []
            pool_path = os.path.join(CUHK_SYSU, 'dataset/annotation/pool.mat')
            if not os.path.exists(pool_path):
                raise ValueError('cannot found %s' % pool_path)
            pool = loadmat(pool_path)['pool'].squeeze()
            imnames = [imname[0] for imname in pool]
            for imname in imnames:
                img = cv2.imread(
                    os.path.join(CUHK_SYSU, 'dataset/Image/SSM/', imname))
                input_img, input_box, input_ids, meta = da(img, [])
                if config.M == 'mrcnn':
                    detection, scores = model.predict([
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([anchors.get_anchors(input_img.shape)])
                    ])
                else:
                    detection, scores = model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                detection = da.unmold(detection[0], meta)
                detection[:, 2:] += detection[:, :2]
                features = np.zeros([len(detection), 0])
                gallery.append([imname, features, detection])
                print("\r%d|%d" % (len(gallery), len(imnames)), end='')
            print('')
            f = open(filepath, 'wb')
            pickle.dump(gallery, f)
            f.close()

    if os.path.exists(PRW):
        filepath = os.path.join(parpath, 'experiment_results',
                                'prw_%s_gallery.pkl' % config.M)
        if os.path.exists(filepath):
            print('prw_%s_gallery.pkl exists.' % config.M)
        else:
            print('creating prw gallery for %s' % config.M)
            gallery = []
            frame_test_path = os.path.join(PRW, 'frame_test.mat')
            if not os.path.exists(frame_test_path):
                raise ValueError('cannot found %s' % frame_test_path)
            frame_indices = loadmat(
                frame_test_path)['img_index_test'].squeeze()
            imnames = [imname[0] + '.jpg' for imname in frame_indices]
            for imname in imnames:
                img = cv2.imread(os.path.join(PRW, 'frames', imname))
                input_img, input_box, input_ids, meta = da(img, [])
                if config.M == 'mrcnn':
                    detection, scores = model.predict([
                        np.stack([input_img]),
                        np.stack([input_box]),
                        np.stack(meta),
                        np.stack([anchors.get_anchors(input_img.shape)])
                    ])
                else:
                    detection, scores = model.predict(
                        [np.stack([input_img]),
                         np.stack([input_box])])
                detection = da.unmold(detection[0], meta)
                detection[:, 2:] += detection[:, :2]
                features = np.zeros([len(detection), 0])
                gallery.append([imname, features, detection])
                print("\r%d|%d" % (len(gallery), len(imnames)), end='')
            print('')
            f = open(filepath, 'wb')
            pickle.dump(gallery, f)
            f.close()