def precision_recall(data,
                     cls,
                     prob_threshold,
                     iou_treshold,
                     pred_field='predictions',
                     gt_field='ground_truth'):
    TP = 0.0
    TPFP = 0.0  # total positive results / pred
    TPFN = 0.0  # total existing cases / rel
    for obj in data:
        ground_truth = (obj[gt_field]
                        | mp.where(lambda x: x['tag'] == cls)
                        | mp.as_list)
        TPFN += len(ground_truth)
        predictions = (
            obj[pred_field]
            |
            mp.where(lambda x: x['tag'] == cls and x['prob'] > prob_threshold)
            | mp.as_list)
        for gt_box in ground_truth:
            pred_boxes = (
                predictions
                | mp.apply(
                    ['x1', 'y1', 'width', 'height'], 'iou',
                    lambda x: intersection_over_union(x, (gt_box['x1'], gt_box[
                        'y1'], gt_box['width'], gt_box['height'])))
                | mp.filter('iou', lambda x: x < iou_treshold)
                | mp.as_list)
            if len(pred_boxes) > 0:
                TP += 1
                TPFP += len(pred_boxes)
    return (float(TP > 0) if TPFP == 0 else TP / TPFP,
            float(TP > 0) if TPFN == 0 else TP / TPFN)
Ejemplo n.º 2
0
def coco_to_custom_vision(stream, project_id, trainer, data_dir):
    stream = stream | mp.as_list
    tags = stream | mp.select_field('class_id') | mp.dedup() | mp.as_list
    cv_tags = {tag: trainer.create_tag(project_id, tag) for tag in tags}

    stream = (
        stream
        | mp.apply(['width', 'height', 'ground_truth'], 'ground_truth',
                   lambda x: x[2]
                   | mp.where(lambda box: (box['width'] >= x[0] * 0.1) and
                              (box['height'] >= x[1] * 0.1))
                   | mp.as_list)
        | mp.filter('ground_truth', lambda x: len(x) > 0)
        | mp.apply(
            ['width', 'height', 'ground_truth'], 'regions', lambda x: x[2]
            | mp.select(lambda box: Region(tag_id=cv_tags[box['tag']].id,
                                           left=box['x1'] / x[0],
                                           top=box['y1'] / x[1],
                                           width=box['width'] / x[0],
                                           height=box['height'] / x[1]))
            | mp.as_list)
        | mp.apply(['filename', 'regions'], 'tagged_img',
                   lambda x: ImageFileCreateEntry(
                       name=x[0],
                       contents=open(join(data_dir, x[0]), mode="rb").read(),
                       regions=x[1]))
        | mp.as_list)
    tagged_images_with_regions = stream | mp.select_field(
        'tagged_img') | mp.as_list
    for i in range(0, len(tagged_images_with_regions), 50):
        trainer.create_images_from_files(
            project_id, images=tagged_images_with_regions[i:i + 50])
        k = int(sys.argv[1])
        n = int(sys.argv[2])
        config.base_dir = config.base_dir_batch
        config.data_dir = config.data_dir_batch
    else:
        k, n = 0, 1

    (mp.get_datastream(data_dir, ext=".full.mp4")
     | batch(k, n)
     | mp.fapply('video', resize_video.load_resize)
     | execute)

    resized_file_names = (mp.get_datastream(data_dir, ext=".resized.mp4")
                          | mp.select_field("filename")
                          | mp.as_list)

    # use only the first threshold
    scene_changes = get_scene_changes(resized_file_names, data_dir)[40]

    (mp.get_datastream(data_dir, ext=".resized.mp4")
     | mp.filter("filename", lambda f: os.path.abspath(f) not in scene_changes)
     | cachecomputex(".resized.mp4", ".optflow.npy", create_denseflow.calcflow,
                     functools.partial(skip, s="creating dense flow"))
     | cachecomputex(".resized.mp4", ".vgg.npy", create_vgg.calcvgg,
                     functools.partial(skip, s="creating VGG"))
     | cachecomputex(".resized.mp4", ".audiofeatures.npy",
                     create_audio_features.calcaudio,
                     functools.partial(skip, s="creating audio features"))
     | close_moviepy_video()
     | execute)
Ejemplo n.º 4
0
face.BaseUrl.set(conf['FaceApi']['Endpoint'])
face.Key.set(conf['FaceApi']['Key'])

classes = mp.get_classes(args.dir)
data = mp.get_datastream(args.dir, classes=classes) | mp.as_list

print("Person Group Trainer Utility")
print(" + found {} people".format(len(classes)))
print(" + Creating face group {}".format(args.facegroup))
face.person_group.create(args.facegroup, name=args.facegroup)

people = {}

for p in classes.keys():
    photos = data | mp.filter('class_name', lambda x: x == p) | mp.as_list
    print("Adding person {} - {} pics".format(p, len(photos)))
    pers = face.person.create(args.facegroup, p)
    people[pers['personId']] = p
    for x in photos:
        print(" + Adding photo {}".format(x['filename']), end='')
        try:
            face.person.add_face(x['filename'], args.facegroup,
                                 pers['personId'])
            print("-> ok")
        except:
            print("-> error")

print("Training...")
face.person_group.train(args.facegroup)
print("All done")
Ejemplo n.º 5
0
    print(metrics.classification_report(yte, y_pred))
    print(metrics.confusion_matrix(yte, y_pred))
    # plotConfusion(yte, y_pred, title=title)
    return y_pred, clf


cls = SVM(X_tr, X_te, Y_tr, Y_te, C=25)

print(cls)

exit(0)
trainstream, valstream = data | mp.make_train_test_split

no_intervals = 200
no_features = 34
no_train = data | mp.filter('split',
                            lambda x: x == mp.SplitType.Train) | mp.count
no_test = data | mp.filter('split',
                           lambda x: x == mp.SplitType.Test) | mp.count
print("Training samples = {}\nTesting samples = {}".format(no_train, no_test))
batchsize = 32

model = Sequential()
#model.add(Conv1D(10,5,input_shape=(no_features,no_intervals),data_format='channels_first'))
#model.add(MaxPooling1D(data_format='channels_first'))
#model.add(Conv1D(20,5,input_shape=(no_features,no_intervals),data_format='channels_first'))
#model.add(MaxPooling1D(data_format='channels_first'))
model.add(Flatten(input_shape=(no_features, no_intervals)))
model.add(Dropout(0.3))
model.add(
    Dense(3, activation='softmax',
          kernel_initializer='glorot_uniform'))  #kernel_regularizer=l2(0.01)))
Ejemplo n.º 6
0
print(" + Loading descriptions from {}".format(dir))


def loadjs(fn):
    with open(fn) as f:
        return json.load(f)


min_size = size / 3 if args.ignore_small else 0
max_faces_no = 2 if args.ignore_multiface else 99999

data = (mp.get_files(dir, ext='.json')
        | mp.as_field('filename')
        | mp.apply('filename', 'descr', loadjs)
        | mp.filter('descr', lambda x: len(x) > 0 and len(x) < max_faces_no)
        | mp.unroll('descr')
        | mp.filter(
            'descr', lambda x: abs(x['faceAttributes']['headPose']['yaw']) < 15
            and abs(x['faceAttributes']['headPose']['pitch']) < 15)
        | mp.filter(
            'descr', lambda x: x['faceLandmarks']['pupilRight']['x'] - x[
                'faceLandmarks']['pupilLeft']['x'] > min_size)
        | mp.as_list)

print(" + Found {} faces".format(len(data)))

print(" + Storing dataset...")


@mp.Pipe
Ejemplo n.º 7
0
        res = face.face.detect(fn,True,True,'')
        if not args.nocache:
            cache[h] = res
        res = res[0]['faceLandmarks']
        #print(res[0]['faceLandmarks'])
    except:
        pass
    return res

print(" + Loading images from {}".format(dir))

data = (
    mp.get_files(dir)
    | mp.as_field('filename')
    | mp.apply_nx('filename','image',lambda x: cv2.imread(x),print_exceptions=False)
    | mp.filter('image',lambda x: x is not None)
    | mp.as_list)

print(" + Found {} images".format(len(data)))

print(" + Extracting facial landmarks...")

data = (
    data 
    | mp.apply(['filename','image'],'landmarks',detect)
    | mp.filter('landmarks',lambda x: x!=[])
    | mp.as_list
)

if not args.nocache:
    print(" + Saving cache...")
Ejemplo n.º 8
0
def merge(images, wts=None):
    res = np.zeros_like(images[0], dtype=np.float32)
    if wts is None:
        wts = np.ones(len(images))
    wts /= np.sum(wts)
    for n, i in enumerate(images):
        res += wts[n] * i.astype(np.float32)
    return res.astype(np.int32)


data = (mp.get_files(dir, ext='.json')
        | mp.as_field('filename')
        | mp.apply('filename', 'descr', loadjs)
        | mp.unroll('descr')
        | mp.filter(
            'descr', lambda x: person_id == "" or
            ('candidates' in x and person_id in
             [z['personId'] for z in x['candidates']]))
        | mp.filter(
            'descr', lambda x: abs(x['faceAttributes']['headPose']['yaw']) < 15
            and abs(x['faceAttributes']['headPose']['pitch']) < 15)
        | mp.filter(
            'descr', lambda x: x['faceLandmarks']['pupilRight']['x'] - x[
                'faceLandmarks']['pupilLeft']['x'] > 50)
        | mp.as_list)

print("Found {} faces".format(len(data)))


def get_transform(descr):
    f = descr['faceLandmarks']
    mc_x = (f['mouthLeft']['x'] + f['mouthRight']['x']) / 2.0
Ejemplo n.º 9
0
    season = match["Season"].replace("/", "")

    h = max(0, int(match["Half"]))

    print(f"Processing {match['Video']} ({match['Id']})\n")
    videoFilePath = os.path.join(source_dir,match["Video"])
    marksFilePath = os.path.join(source_dir,"Marks.jsonl")
    video = VideoFileClip(videoFilePath)

    data = []
    with open(marksFilePath,'r') as f:
        for cnt, line in enumerate(f):
            data.append(json.loads(line))

    # Get list of times when match halves start
    mt = data | mp.filter('eventType', lambda x: "start" in x) | mp.select_field('matchTime') | mp.as_list
    mt = convtime(mt[h])

    # Shots are fine, added also Goal which are not marked as shots
    cuts = data | mp.filter('eventType', shotFilter) | mp.apply('matchTime', 'start', convtime)

    # Consider, for negative examples, also Attacks, which are Shots nearby the goal area (or in other places of the field)
    # They are also filtered (later), removing those overlapping with shots/goals
    attacks = data | mp.filter('eventType', attackFilter) | mp.apply('matchTime', 'start', convtime)

    clipTrimTime = float(args.clipTrimTime)                       # +/- X seconds centered on event time

    clipTrimAttackTimeRange = float(args.clipTrimAttackTimeRange) # +/- X seconds centered on event time
    clipTrimAttackStart = float(args.clipTrimAttackStart)         # when attack clip starts (before event time)
    clipTrimAttackEnd = float(args.clipTrimAttackEnd)             # when attack clip ends (after event time)