Beispiel #1
0
def create_annotation(form, object_name, labels, frame):
    annotation = Region()
    annotation.object_name = object_name
    if form.cleaned_data['high_level']:
        annotation.full_frame = True
        annotation.x = 0
        annotation.y = 0
        annotation.h = 0
        annotation.w = 0
    else:
        annotation.full_frame = False
        annotation.x = form.cleaned_data['x']
        annotation.y = form.cleaned_data['y']
        annotation.h = form.cleaned_data['h']
        annotation.w = form.cleaned_data['w']
    annotation.text = form.cleaned_data['text']
    annotation.metadata = form.cleaned_data['metadata']
    annotation.frame = frame
    annotation.video = frame.video
    annotation.region_type = Region.ANNOTATION
    annotation.save()
    for lname in labels:
        if lname.strip():
            dl, _ = Label.objects.get_or_create(name=lname, set="UI")
            rl = RegionLabel()
            rl.video = annotation.video
            rl.frame = annotation.frame
            rl.region = annotation
            rl.label = dl
            rl.save()
Beispiel #2
0
def perform_face_indexing(video_id):
    from dvaapp.models import Region,Frame,Video,IndexEntries
    from dvalib import indexer,detector
    from dvaapp.operations.video_processing import WFrame,WVideo
    from django.conf import settings
    from scipy import misc
    face_indexer = indexer.FacenetIndexer()
    dv = Video.objects.get(id=video_id)
    video = WVideo(dv, settings.MEDIA_ROOT)
    frames = Frame.objects.all().filter(video=dv)
    wframes = [WFrame(video=video, frame_index=df.frame_index, primary_key=df.pk) for df in frames]
    input_paths = {f.local_path(): f.primary_key for f in wframes}
    faces_dir = '{}/{}/detections'.format(settings.MEDIA_ROOT, video_id)
    indexes_dir = '{}/{}/indexes'.format(settings.MEDIA_ROOT, video_id)
    face_detector = detector.FaceDetector()
    aligned_paths = face_detector.detect(wframes)
    logging.info(len(aligned_paths))
    faces = []
    faces_to_pk = {}
    count = 0
    for path, v in aligned_paths.iteritems():
        for scaled_img, bb in v:
            d = Region()
            d.region_type = Region.DETECTION
            d.video = dv
            d.confidence = 100.0
            d.frame_id = input_paths[path]
            d.object_name = "mtcnn_face"
            left, top, right, bottom = bb[0], bb[1], bb[2], bb[3]
            d.y = top
            d.x = left
            d.w = right - left
            d.h = bottom - top
            d.save()
            face_path = '{}/{}.jpg'.format(faces_dir, d.pk)
            output_filename = os.path.join(faces_dir, face_path)
            misc.imsave(output_filename, scaled_img)
            faces.append(face_path)
            faces_to_pk[face_path] = d.pk
            count += 1
    dv.refresh_from_db()
    dv.detections = dv.detections + count
    dv.save()
    path_count, emb_array, entries, feat_fname, entries_fname = face_indexer.index_faces(faces, faces_to_pk,
                                                                                         indexes_dir, video_id)
    i = IndexEntries()
    i.video = dv
    i.count = len(entries)
    i.contains_frames = False
    i.contains_detections = True
    i.detection_name = "Face"
    i.algorithm = 'facenet'
    i.entries_file_name = entries_fname.split('/')[-1]
    i.features_file_name = feat_fname.split('/')[-1]
    i.save()
Beispiel #3
0
def ssd_detect(video_id):
    """
    This is a HACK since Tensorflow is absolutely atrocious in allocating and freeing up memory.
    Once a process / session is allocated a memory it cannot be forced to clear it up.
    As a result this code gets called via a subprocess which clears memory when it exits.

    :param video_id:
    :return:
    """
    import django
    from PIL import Image
    sys.path.append(os.path.dirname(__file__))
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
    django.setup()
    from django.conf import settings
    from dvaapp.models import Video, Region, Frame
    from dvalib import entity, detector
    dv = Video.objects.get(id=video_id)
    frames = Frame.objects.all().filter(video=dv)
    v = entity.WVideo(dvideo=dv, media_dir=settings.MEDIA_ROOT)
    wframes = {
        df.pk: entity.WFrame(video=v,
                             frame_index=df.frame_index,
                             primary_key=df.pk)
        for df in frames
    }
    detection_count = 0
    algorithm = detector.SSDetector()
    logging.info("starting detection {}".format(algorithm.name))
    frame_detections = algorithm.detect(wframes.values())
    for frame_pk, detections in frame_detections.iteritems():
        for d in detections:
            dd = Region()
            dd.region_type = Region.DETECTION
            dd.video = dv
            dd.frame_id = frame_pk
            dd.object_name = d['name']
            dd.confidence = d['confidence']
            dd.x = d['left']
            dd.y = d['top']
            dd.w = d['right'] - d['left']
            dd.h = d['bot'] - d['top']
            dd.save()
            img = Image.open(wframes[frame_pk].local_path())
            img2 = img.crop((d['left'], d['top'], d['right'], d['bot']))
            img2.save("{}/{}/detections/{}.jpg".format(settings.MEDIA_ROOT,
                                                       video_id, dd.pk))
            detection_count += 1
    dv.refresh_from_db()
    dv.detections = dv.detections + detection_count
    dv.save()
def annotate_entire_frame(request, frame_pk):
    frame = Frame.objects.get(pk=frame_pk)
    annotation = None
    if request.method == 'POST':
        if request.POST.get('text').strip() \
                or request.POST.get('metadata').strip() \
                or request.POST.get('object_name', None):
            annotation = Region()
            annotation.region_type = Region.ANNOTATION
            annotation.x = 0
            annotation.y = 0
            annotation.h = 0
            annotation.w = 0
            annotation.full_frame = True
            annotation.text = request.POST.get('text')
            annotation.metadata = request.POST.get('metadata')
            annotation.object_name = request.POST.get('object_name',
                                                      'frame_metadata')
            annotation.frame = frame
            annotation.video = frame.video
            annotation.save()
        for label_name in request.POST.get('tags').split(','):
            if label_name.strip():
                if annotation:
                    dl = RegionLabel()
                    dl.video = frame.video
                    dl.frame = frame
                    dl.label = Label.objects.get_or_create(name=label_name,
                                                           set="UI")[0]
                    dl.region = annotation
                    dl.save()
                else:
                    dl = FrameLabel()
                    dl.video = frame.video
                    dl.frame = frame
                    dl.label = Label.objects.get_or_create(name=label_name,
                                                           set="UI")[0]
                    dl.save()
    return redirect("frame_detail", pk=frame.pk)
Beispiel #5
0
def create_yolo_test_data():
    import json
    import shutil
    import numpy as np
    import os
    from PIL import Image
    setup_django()
    from dvaapp.shared import handle_uploaded_file
    from django.core.files.uploadedfile import SimpleUploadedFile
    from dvaapp.models import Region, TEvent, Frame, AppliedLabel
    from dvaapp.tasks import extract_frames, export_video_by_id
    try:
        shutil.rmtree('tests/yolo_test')
    except:
        pass
    try:
        os.mkdir('tests/yolo_test')
    except:
        pass
    data = np.load('shared/underwater_data.npz')
    json_test = {}
    json_test['anchors'] = [(0.57273, 0.677385), (1.87446, 2.06253),
                            (3.33843, 5.47434), (7.88282, 3.52778),
                            (9.77052, 9.16828)]
    id_2_boxes = {}
    class_names = {
        0: "red_buoy",
        1: "green_buoy",
        2: "yellow_buoy",
        3: "path_marker",
        4: "start_gate",
        5: "channel"
    }
    for i, image in enumerate(data['images'][:500]):
        path = "tests/yolo_test/{}.jpg".format(i)
        Image.fromarray(image).save(path)
        id_2_boxes[path.split('/')[-1]] = data['boxes'][i].tolist()
    local('zip tests/yolo_test.zip -r tests/yolo_test/* ')
    fname = "tests/yolo_test.zip"
    name = "yolo_test"
    f = SimpleUploadedFile(fname,
                           file(fname).read(),
                           content_type="application/zip")
    dv = handle_uploaded_file(f, name)
    extract_frames(TEvent.objects.create(video=dv).pk)
    for df in Frame.objects.filter(video=dv):
        for box in id_2_boxes[df.name]:
            r = Region()
            r.video = dv
            r.frame = df
            c, top_x, top_y, bottom_x, bottom_y = box
            r.object_name = class_names[c]
            r.region_type = Region.ANNOTATION
            r.x = top_x
            r.y = top_y
            r.w = bottom_x - top_x
            r.h = bottom_y - top_y
            r.save()
            l = AppliedLabel()
            l.frame = df
            l.video = dv
            l.label_name = class_names[c]
            l.region = r
            l.save()
    export_video_by_id(TEvent.objects.create(video=dv).pk)
    try:
        shutil.rmtree('tests/yolo_test')
    except:
        pass
Beispiel #6
0
}
labels = {k: Label.objects.create(name=v, set="test") for k, v in class_names}
for i, image in enumerate(data['images'][:500]):
    path = "/Users/aub3/tests/yolo_test/{}.jpg".format(i)
    Image.fromarray(image).save(path)
    id_2_boxes[path.split('/')[-1]] = data['boxes'][i].tolist()
local('zip /Users/aub3/tests/yolo_test.zip -r /Users/aub3/tests/yolo_test/* ')
fname = "/Users/aub3/tests/yolo_test.zip"
name = "yolo_test"
f = SimpleUploadedFile(fname, file(fname).read(), content_type="application/zip")
dv = handle_uploaded_file(f, name)
perform_dataset_extraction(TEvent.objects.create(video=dv).pk)
for df in Frame.objects.filter(video=dv):
    for box in id_2_boxes[df.name]:
        r = Region()
        r.video = dv
        r.frame = df
        c, top_x, top_y, bottom_x, bottom_y = box
        r.object_name = class_names[c]
        r.region_type = Region.ANNOTATION
        r.x = top_x
        r.y = top_y
        r.w = bottom_x - top_x
        r.h = bottom_y - top_y
        r.save()
        l = RegionLabel()
        l.frame = df
        l.video = dv
        l.label = labels[c]
        l.region = r
        l.save()
Beispiel #7
0
def create_yolo_test_data():
    import json
    import shutil
    import numpy as np
    import os
    from PIL import Image
    setup_django()
    from dvaapp.shared import handle_uploaded_file
    from django.core.files.uploadedfile import SimpleUploadedFile
    from dvaapp.models import Region,TEvent,Frame, AppliedLabel
    from dvaapp.tasks import extract_frames,export_video_by_id
    try:
        shutil.rmtree('tests/yolo_test')
    except:
        pass
    try:
        os.mkdir('tests/yolo_test')
    except:
        pass
    data = np.load('shared/underwater_data.npz')
    json_test = {}
    json_test['anchors'] = [(0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434), (7.88282, 3.52778), (9.77052, 9.16828)]
    id_2_boxes = {}
    class_names = {
        0:"red_buoy",
        1:"green_buoy",
        2:"yellow_buoy",
        3:"path_marker",
        4:"start_gate",
        5:"channel"
    }
    for i,image in enumerate(data['images'][:500]):
        path = "tests/yolo_test/{}.jpg".format(i)
        Image.fromarray(image).save(path)
        id_2_boxes[path.split('/')[-1]] = data['boxes'][i].tolist()
    local('zip tests/yolo_test.zip -r tests/yolo_test/* ')
    fname = "tests/yolo_test.zip"
    name = "yolo_test"
    f = SimpleUploadedFile(fname, file(fname).read(), content_type="application/zip")
    dv = handle_uploaded_file(f, name)
    extract_frames(TEvent.objects.create(video=dv).pk)
    for df in Frame.objects.filter(video=dv):
        for box in id_2_boxes[df.name]:
            r = Region()
            r.video = dv
            r.frame = df
            c , top_x, top_y, bottom_x, bottom_y = box
            r.object_name = class_names[c]
            r.region_type = Region.ANNOTATION
            r.x = top_x
            r.y = top_y
            r.w = bottom_x - top_x
            r.h = bottom_y - top_y
            r.save()
            l = AppliedLabel()
            l.frame = df
            l.video = dv
            l.label_name = class_names[c]
            l.region = r
            l.save()
    export_video_by_id(TEvent.objects.create(video=dv).pk)
    try:
        shutil.rmtree('tests/yolo_test')
    except:
        pass