예제 #1
0
def lda(directories):
    images = load(directories, True, permute=False)

    f = HaarFeature()
    x = []

    for idx, im in enumerate(images):
        print("%d/%d" % (idx, len(images)))
        x.append(np.array(f.process(im)))

    y_train = [im.label for im in images]
    classes = list(set(y_train))
    class_to_index = {key: index for index, key in enumerate(classes)}
    labels = np.concatenate(np.array([[class_to_index[name] for name in y_train]]))

    clf = ExtraTreesClassifier()
    clf = clf.fit(x, labels)
    w, h = f.size, f.size
    i = 0

    filtered = []
    for size in f.haarSizes:
        for x in range(w - size):
            for y in range(h - size):
                for haar_type in range(len(f.haars)):
                    score = clf.feature_importances_[i]
                    if score > 0.000001:
                        filtered.append((size, x, y, haar_type, score))
                    i += 1

    sorted_filtered = sorted(filtered, key=lambda tup: tup[4], reverse=True)
    text_file = open("haarImportance.txt", "w")

    for k in sorted_filtered:
        # print("[size=%d][x=%d][y=%d][type=%d] \t=> %f" % k)
        text_file.write("[size=%d][x=%d][y=%d][type=%d] \t=> %f\n" % k)

    text_file.close()
예제 #2
0
def load_images(directories, is_train=False, permute=True):
    return load(directories, is_train, permute)
예제 #3
0
def test_feature(directories, trainers):
    images = load(directories, True, permute=False)
    for image in images:
        if '01005_05517' in image.filename:
            feature = ColorFeature()
            feature.process(image)
예제 #4
0
def test_feature(directories, trainers):
    images = load(directories, True, permute=False)
    for image in images:
        if '01005_05517' in image.filename:
            feature = ColorFeature()
            feature.process(image)
예제 #5
0
def load_images(directories, is_train=False, permute=True):
    return load(directories, is_train, permute)
예제 #6
0
    def Task(progress):
        for i, url in enumerate(urls):
            progress.update(i + 1)
            img_path = 'images/' + convertUrlToName(url)
            if memo.getUrlData(url)['encoding'] != [] or not os.path.isfile(
                    img_path):
                continue

            img = face_recognition.load_image_file(img_path)

            locations = face_recognition.face_locations(img)

            if len(locations) == 1:
                encoding = list(
                    face_recognition.face_encodings(
                        img,
                        known_face_locations=locations,
                        num_jitters=accurate)[0])

                memo.upd_urls({url: {'encoding': encoding}})
            else:
                memo.remove_urls([url])

    execTask(name='calculate encodings:', size=len(urls), task=Task)


if __name__ == '__main__':
    image_loader.load()
    fillEncodings()