def precision_recall(data,
                     cls,
                     prob_threshold,
                     iou_treshold,
                     pred_field='predictions',
                     gt_field='ground_truth'):
    TP = 0.0
    TPFP = 0.0  # total positive results / pred
    TPFN = 0.0  # total existing cases / rel
    for obj in data:
        ground_truth = (obj[gt_field]
                        | mp.where(lambda x: x['tag'] == cls)
                        | mp.as_list)
        TPFN += len(ground_truth)
        predictions = (
            obj[pred_field]
            |
            mp.where(lambda x: x['tag'] == cls and x['prob'] > prob_threshold)
            | mp.as_list)
        for gt_box in ground_truth:
            pred_boxes = (
                predictions
                | mp.apply(
                    ['x1', 'y1', 'width', 'height'], 'iou',
                    lambda x: intersection_over_union(x, (gt_box['x1'], gt_box[
                        'y1'], gt_box['width'], gt_box['height'])))
                | mp.filter('iou', lambda x: x < iou_treshold)
                | mp.as_list)
            if len(pred_boxes) > 0:
                TP += 1
                TPFP += len(pred_boxes)
    return (float(TP > 0) if TPFP == 0 else TP / TPFP,
            float(TP > 0) if TPFN == 0 else TP / TPFN)
예제 #2
0
def coco_to_custom_vision(stream, project_id, trainer, data_dir):
    stream = stream | mp.as_list
    tags = stream | mp.select_field('class_id') | mp.dedup() | mp.as_list
    cv_tags = {tag: trainer.create_tag(project_id, tag) for tag in tags}

    stream = (
        stream
        | mp.apply(['width', 'height', 'ground_truth'], 'ground_truth',
                   lambda x: x[2]
                   | mp.where(lambda box: (box['width'] >= x[0] * 0.1) and
                              (box['height'] >= x[1] * 0.1))
                   | mp.as_list)
        | mp.filter('ground_truth', lambda x: len(x) > 0)
        | mp.apply(
            ['width', 'height', 'ground_truth'], 'regions', lambda x: x[2]
            | mp.select(lambda box: Region(tag_id=cv_tags[box['tag']].id,
                                           left=box['x1'] / x[0],
                                           top=box['y1'] / x[1],
                                           width=box['width'] / x[0],
                                           height=box['height'] / x[1]))
            | mp.as_list)
        | mp.apply(['filename', 'regions'], 'tagged_img',
                   lambda x: ImageFileCreateEntry(
                       name=x[0],
                       contents=open(join(data_dir, x[0]), mode="rb").read(),
                       regions=x[1]))
        | mp.as_list)
    tagged_images_with_regions = stream | mp.select_field(
        'tagged_img') | mp.as_list
    for i in range(0, len(tagged_images_with_regions), 50):
        trainer.create_images_from_files(
            project_id, images=tagged_images_with_regions[i:i + 50])
예제 #3
0
from pipe import Pipe
from moviepy.editor import *
import numpy as np
import itertools
import cv2
import math
import matplotlib.pyplot as plt

import keras
from keras.models import Sequential
from keras.layers import *
from keras.regularizers import l2

# Get list of test videos from matches.json
test_names = (from_json(os.path.join(source_dir, 'matches.json'))
              | mp.where(lambda x: 'Test' in x.keys() and int(x['Test']) > 0)
              | mp.apply(['Id', 'Half'], 'pattern',
                         lambda x: "{}_{}_".format(x[0], x[1]))
              | mp.select_field('pattern')
              | mp.as_list)

data = (mp.get_datastream(data_dir, ext=".resized.mp4")
        | datasplit_by_pattern(test_pattern=test_names)
        | stratify_sample_tt(shuffle=True)
        | summary()
        | mp.take(1000)
        | mp.iter('filename', lambda x: print("Processing {}".format(x)))
        | mp.apply(
            'filename', 'aud',
            lambda x: np.load(x.replace('.resized.mp4', '.audiofeatures.npy')))
        | normalize_npy_value('aud', interval=(-1, 1))
예제 #4
0
parser.add_argument("dir",
                    help="Directory of pictures to be processed",
                    default=".")
parser.add_argument("--facegroup",
                    help="Name of face group to use for face recognition",
                    default="")

args = parser.parse_args()

face.BaseUrl.set(conf['FaceApi']['Endpoint'])
face.Key.set(conf['FaceApi']['Key'])

print("Face API Calling Utility")

files = mp.get_files(
    args.dir) | mp.where(lambda x: not x.endswith('.json')) | mp.as_list

for x in files:
    print(" + {}".format(x), end='')
    jsfn = os.path.splitext(x)[0] + '.json'
    if os.path.isfile(jsfn):
        print(" -> skip")
        continue
    res = []
    try:
        res = face.face.detect(
            x, True, True,
            'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur'
        )
    except:
        pass