コード例 #1
0
 def __init__(self, args):
     super(YOLODetector, self).__init__()
     self.model = trainer.YOLOTrainer(boxes=[],
                                      images=[],
                                      args=args,
                                      test_mode=True)
     self.session = None
コード例 #2
0
ファイル: tasks.py プロジェクト: dmellop/DeepVideoAnalytics
def train_yolo_detector(task_id):
    """
    :param task_id:
    :return:
    """
    start = TEvent.objects.get(pk=task_id)
    if celery_40_bug_hack(start):
        return 0
    start.task_id = train_yolo_detector.request.id
    start.started = True
    start.operation = train_yolo_detector.name
    start.save()
    start_time = time.time()
    args = json.loads(start.arguments_json)
    labels = set(args['labels']) if 'labels' in args else set()
    object_names = set(args['object_names']) if 'object_names' in args else set()
    detector = CustomDetector.objects.get(pk=args['detector_pk'])
    create_detector_folders(detector)
    args['root_dir'] = "{}/models/{}/".format(settings.MEDIA_ROOT,detector.pk)
    class_distribution, class_names, rboxes, rboxes_set, frames, i_class_names = create_detector_dataset(object_names,labels)
    images, boxes = [], []
    path_to_f = {}
    for k,f in frames.iteritems():
        path = "{}/{}/frames/{}.jpg".format(settings.MEDIA_ROOT,f.video_id,f.frame_index)
        path_to_f[path] = f
        images.append(path)
        boxes.append(rboxes[k])
        # print k,rboxes[k]
    with open("{}/input.json".format(args['root_dir']),'w') as input_data:
        json.dump({'boxes':boxes,'images':images,'args':args,'class_names':class_names.items()},input_data)
    detector.boxes_count = sum([len(k) for k in boxes])
    detector.frames_count = len(images)
    detector.classes_count = len(class_names)
    detector.save()
    train_task = trainer.YOLOTrainer(boxes=boxes,images=images,class_names=i_class_names,args=args)
    train_task.train()
    detector.phase_1_log = file("{}/phase_1.log".format(args['root_dir']))
    detector.phase_2_log = file("{}/phase_2.log".format(args['root_dir']))
    detector.class_distribution = json.dumps(class_distribution.items())
    detector.class_names = json.dumps(class_names.items())
    detector.save()
    results = train_task.predict()
    for path, box_class, score, top, left, bottom, right in results:
        r = Region()
        r.region_type = r.DETECTION
        r.confidence = int(100.0 * score)
        r.object_name = "YOLO_{}_{}".format(detector.pk,box_class)
        r.y = top
        r.x = left
        r.w = right - left
        r.h = bottom - top
        r.frame_id = path_to_f[path].pk
        r.video_id = path_to_f[path].video_id
        r.save()
    start.completed = True
    start.seconds = time.time() - start_time
    start.save()
    return 0
コード例 #3
0
def detect_custom_objects(detector_pk, video_pk):
    """
    Detection using customized trained YOLO detectors
    :param detector_pk:
    :param video_pk:
    :return:
    """
    setup_django()
    from dvaapp.models import Region, Frame, CustomDetector
    from django.conf import settings
    from dvalib.yolo import trainer
    from PIL import Image
    args = {'detector_pk': int(detector_pk)}
    video_pk = int(video_pk)
    detector = CustomDetector.objects.get(pk=args['detector_pk'])
    args['root_dir'] = "{}/detectors/{}/".format(settings.MEDIA_ROOT,
                                                 detector.pk)
    class_names = {k: v for k, v in json.loads(detector.class_names)}
    i_class_names = {i: k for k, i in class_names.items()}
    frames = {}
    for f in Frame.objects.all().filter(video_id=video_pk):
        frames[f.pk] = f
    images = []
    path_to_f = {}
    for k, f in frames.iteritems():
        path = "{}/{}/frames/{}.jpg".format(settings.MEDIA_ROOT, f.video_id,
                                            f.frame_index)
        path_to_f[path] = f
        images.append(path)
    train_task = trainer.YOLOTrainer(boxes=[],
                                     images=images,
                                     class_names=i_class_names,
                                     args=args,
                                     test_mode=True)
    results = train_task.predict()
    for path, box_class, score, top, left, bottom, right in results:
        r = Region()
        r.region_type = r.DETECTION
        r.confidence = int(100.0 * score)
        r.object_name = "YOLO_{}_{}".format(detector.pk, box_class)
        r.y = top
        r.x = left
        r.w = right - left
        r.h = bottom - top
        r.frame_id = path_to_f[path].pk
        r.video_id = path_to_f[path].video_id
        r.save()
        right = r.w + r.x
        bottom = r.h + r.y
        img = Image.open(path)
        img2 = img.crop((r.x, r.y, right, bottom))
        img2.save("{}/{}/detections/{}.jpg".format(settings.MEDIA_ROOT,
                                                   video_pk, r.pk))
コード例 #4
0
def train_yolo(start_pk):
    """
    Train a yolo model specified in a TaskEvent.
    This is necessary to ensure that the Tensorflow process exits and releases the allocated GPU memory.
    :param start_pk: TEvent PK with information about lauching the training task
    :return:
    """
    setup_django()
    from django.conf import settings
    from dvaapp.models import Region, Frame, CustomDetector, TEvent
    from dvaapp.shared import create_detector_folders, create_detector_dataset
    from dvalib.yolo import trainer
    start = TEvent.objects.get(pk=start_pk)
    args = json.loads(start.arguments_json)
    labels = set(args['labels']) if 'labels' in args else set()
    object_names = set(args['object_names']) if 'object_names' in args else set()
    detector = CustomDetector.objects.get(pk=args['detector_pk'])
    create_detector_folders(detector)
    args['root_dir'] = "{}/detectors/{}/".format(settings.MEDIA_ROOT,detector.pk)
    class_distribution, class_names, rboxes, rboxes_set, frames, i_class_names = create_detector_dataset(object_names,labels)
    images, boxes = [], []
    path_to_f = {}
    for k,f in frames.iteritems():
        path = "{}/{}/frames/{}.jpg".format(settings.MEDIA_ROOT,f.video_id,f.frame_index)
        path_to_f[path] = f
        images.append(path)
        boxes.append(rboxes[k])
        # print k,rboxes[k]
    with open("{}/input.json".format(args['root_dir']),'w') as input_data:
        json.dump({'boxes':boxes,
                   'images':images,
                   'args':args,
                   'class_names':class_names.items(),
                   'class_distribution':class_distribution.items()},
                  input_data)
    detector.boxes_count = sum([len(k) for k in boxes])
    detector.frames_count = len(images)
    detector.classes_count = len(class_names)
    detector.save()
    train_task = trainer.YOLOTrainer(boxes=boxes,images=images,class_names=i_class_names,args=args)
    train_task.train()
    detector.phase_1_log = file("{}/phase_1.log".format(args['root_dir'])).read()
    detector.phase_2_log = file("{}/phase_2.log".format(args['root_dir'])).read()
    detector.class_distribution = json.dumps(class_distribution.items())
    detector.class_names = json.dumps(class_names.items())
    detector.trained = True
    detector.save()
    results = train_task.predict()
    bulk_regions = []
    for path, box_class, score, top, left, bottom, right in results:
        r = Region()
        r.region_type = r.ANNOTATION
        r.confidence = int(100.0 * score)
        r.object_name = "YOLO_{}_{}".format(detector.pk,box_class)
        r.y = top
        r.x = left
        r.w = right - left
        r.h = bottom - top
        r.frame_id = path_to_f[path].pk
        r.video_id = path_to_f[path].video_id
        bulk_regions.append(r)
    Region.objects.bulk_create(bulk_regions,batch_size=1000)
    folder_name = "{}/detectors/{}".format(settings.MEDIA_ROOT,detector.pk)
    file_name = '{}/exports/{}.dva_detector.zip'.format(settings.MEDIA_ROOT,detector.pk)
    zipper = subprocess.Popen(['zip', file_name, '-r', '.'],cwd=folder_name)
    zipper.wait()
    return 0
コード例 #5
0
     # print k,rboxes[k]
 with open("{}/input.json".format(args['root_dir']), 'w') as input_data:
     json.dump(
         {
             'boxes': boxes,
             'images': images,
             'args': args,
             'class_names': class_names.items(),
             'class_distribution': class_distribution.items()
         }, input_data)
 detector.boxes_count = sum([len(k) for k in boxes])
 detector.frames_count = len(images)
 detector.classes_count = len(class_names)
 detector.save()
 args['class_names'] = i_class_names
 train_task = trainer.YOLOTrainer(boxes=boxes, images=images, args=args)
 train_task.train()
 detector.phase_1_log = file("{}/phase_1.log".format(
     args['root_dir'])).read()
 detector.phase_2_log = file("{}/phase_2.log".format(
     args['root_dir'])).read()
 detector.class_distribution = json.dumps(class_distribution.items())
 detector.class_names = json.dumps(class_names.items())
 detector.trained = True
 detector.save()
 results = train_task.predict()
 bulk_regions = []
 for path, box_class, score, top, left, bottom, right in results:
     r = Region()
     r.region_type = r.ANNOTATION
     r.confidence = int(100.0 * score)
コード例 #6
0
ファイル: tasks.py プロジェクト: sundoze/DeepVideoAnalytics
def train_yolo_detector(task_id):
    """
    :param task_id:
    :return:
    """
    start = TEvent.objects.get(pk=task_id)
    if celery_40_bug_hack(start):
        return 0
    start.task_id = train_yolo_detector.request.id
    start.started = True
    start.operation = train_yolo_detector.name
    start.save()
    start_time = time.time()
    args = json.loads(start.arguments_json)
    labels = set(args['labels']) if 'labels' in args else set()
    object_names = set(
        args['object_names']) if 'object_names' in args else set()
    detector = CustomDetector.objects.get(pk=args['detector_pk'])
    create_detector_folders(detector)
    class_names = {k: i for i, k in enumerate(labels.union(object_names))}
    i_class_names = {i: k for k, i in class_names.items()}
    rboxes = defaultdict(list)
    frames = {}
    for r in Region.objects.all().filter(object_name__in=object_names):
        frames[r.frame_id] = r.frame
        rboxes[r.frame_id].append(
            (class_names[r.object_name], r.x, r.y, r.x + r.w, r.y + r.h))
    for l in AppliedLabel.objects.all().filter(label_name__in=labels):
        frames[l.frame_id] = l.frame
        if l.region:
            r = l.region
            rboxes[l.frame_id].append(
                (class_names[l.label_name], r.x, r.y, r.x + r.w, r.y + r.h))
    images, boxes = [], []
    path_to_f = {}
    for k, f in frames.iteritems():
        path = "{}/{}/frames/{}.jpg".format(settings.MEDIA_ROOT, f.video_id,
                                            f.frame_index)
        path_to_f[path] = f
        images.append(path)
        boxes.append(rboxes[k])
        # print k,rboxes[k]
    with open("{}/input.json".format(args['root_dir']), 'w') as input_data:
        json.dump(
            {
                'boxes': boxes,
                'images': images,
                'args': args,
                'class_names': class_names.items()
            }, input_data)
    train_task = trainer.YOLOTrainer(boxes=boxes,
                                     images=images,
                                     class_names=i_class_names,
                                     args=args)
    train_task.train()
    results = train_task.predict()
    for path, box_class, score, top, left, bottom, right in results:
        r = Region()
        r.region_type = r.DETECTION
        r.confidence = int(100.0 * score)
        r.object_name = "YOLO_{}_{}".format(detector.pk, box_class)
        r.y = top
        r.x = left
        r.w = right - left
        r.h = bottom - top
        r.frame_id = path_to_f[path].pk
        r.video_id = path_to_f[path].video_id
        r.save()
    start.completed = True
    start.seconds = time.time() - start_time
    start.save()
    return 0