示例#1
0
文件: views.py 项目: rhina/web_obret
def search_ajax(request):
    screen_name = request.POST.get("keyword")
    urls = get_twimg_urls(screen_name)
    pred_model = 'openface/models/dlib/shape_predictor_68_face_landmarks.dat'
    align = NaiveDlib(pred_model)
    heights = []
    boxes = {}
    for i, url in enumerate(urls):
        bgrImg = io.imread(url)
        rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
        heights.append(rgbImg.shape[0])
        # bb = align.getLargestFaceBoundingBox(rgbImg)
        bbs = align.getAllFaceBoundingBoxes(rgbImg)
        if not boxes.has_key(i):
            boxes[i] = []
        for bb in bbs:
            boxes[i].append([bb.left(), bb.top(), bb.width(), bb.height()])
    print(boxes)
    print request.POST
    # images = request.POST["images[]"]
    # print images
    params={'images':urls, 'boxes':boxes, 'heights':heights}
    # params = {'test':'test'}
    # params['keyword'] = ""
    # params = test_search(keyword)
    # params['images'] = images
    # response = HttpResponse(json.dumps(params, ensure_ascii=False, indent=2), \
    #         content_type='application/json; charset=UTF-8', status=j
    return render_json_response(params)
示例#2
0
class FaceService(object):
    def __init__(self):
        self.align = NaiveDlib(os.path.join(dlibModelDir, "mean.csv"),
                               os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
        self.net = openface.TorchWrap(os.path.join(openfaceModelDir, 'nn4.v1.t7'),
                                      imgDim=96, cuda=False)

        self.identities = []

        if os.path.exists("trained_images.pickle"):
            self.trained_images = pickle.load(open('trained_images.pickle', 'rb'))
            identities_set = set()
            for trained_image in self.trained_images.values():
                identities_set.add(trained_image.identity)

            self.identities = list(identities_set)
            self.identities.sort()
        else:
            self.trained_images = {}

        if os.path.exists("svm.pickle"):
            self.svm = pickle.load(open('svm.pickle', 'rb'))
        else:
            self.svm = None

    def training(self, identity, images):
        X = []
        y = []

        training_result = []
        identities_set = set()

        for img in self.trained_images.values():
            X.append(img.rep)
            y.append(img.identity)
            identities_set.add(img.identity)

        identities_set.add(identity)
        self.identities = list(identities_set)
        self.identities.sort()

        for image in images:
            bbs = self.align.getAllFaceBoundingBoxes(image)

            if len(bbs) is not 1:
                training_result.append('0 or many people in image')
                continue
                # raise Exception('0 or many people in image')

            bb = bbs[0]
            alignedFace = self.align.alignImg("affine", 96, image, bb)
            if alignedFace is None:
                training_result.append('not exist face in image')
                continue

            # save_array(alignedFace, "train.png")

            phash = str(imagehash.phash(Image.fromarray(alignedFace)))
            if phash in self.trained_images:
                rep = self.trained_images[phash].rep
                training_result.append('already trained')
            else:
                rep = self.net.forwardImage(alignedFace)
                self.trained_images[phash] = Face(rep, identity)

                X.append(rep)
                y.append(identity)

                training_result.append(0)

        for index, identity in enumerate(self.identities):
            for i in range(len(y)):
                if y[i] == identity:
                    y[i] = index

        if len(self.identities) > 1:
            X = np.vstack(X)
            y = np.array(y)

            param_grid = [
                {'C': [1, 10, 100, 1000],
                 'kernel': ['linear']},
                {'C': [1, 10, 100, 1000],
                 'gamma': [0.001, 0.0001],
                 'kernel': ['rbf']}
            ]
            print "*" * 60
            for x in X:
                print x[:4]
            print y
            self.svm = GridSearchCV(SVC(C=0.5, probability=True), param_grid, cv=5).fit(X, y)

        return training_result

    def remove_face(self, identity):
        X = []
        y = []

        remove_faces = []
        identities_set = set()

        for key, value in self.trained_images.items():
            if value.identity == identity:
                remove_faces.append(key)
            else:
                X.append(value.rep)
                y.append(value.identity)
                identities_set.add(value.identity)

        self.identities = list(identities_set)
        self.identities.sort()

        for key in remove_faces:
            del self.trained_images[key]

        for index, identity in enumerate(self.identities):
            for i in range(len(y)):
                if y[i] == identity:
                    y[i] = index

        if len(self.identities) > 1:
            X = np.vstack(X)
            y = np.array(y)

            param_grid = [
                {'C': [1, 10, 100, 1000],
                 'kernel': ['linear']},
                {'C': [1, 10, 100, 1000],
                 'gamma': [0.001, 0.0001],
                 'kernel': ['rbf']}
            ]
            print "*" * 60
            for x in X:
                print x[:4]
            print y
            self.svm = GridSearchCV(SVC(C=0.5, probability=True), param_grid, cv=5).fit(X, y)
        else:
            self.svm = None

    def predict(self, image):
        result_priority_queue = PriorityQueue()
        results = []

        bbs = self.align.getAllFaceBoundingBoxes(image)

        for bb_index, bb in enumerate(bbs):
            alignedFace = self.align.alignImg("affine", 96, image, bb)
            if alignedFace is None:
                continue

            phash = str(imagehash.phash(Image.fromarray(alignedFace)))
            if phash in self.trained_images:
                identity = self.trained_images[phash].identity
                result_priority_queue.put_nowait((-1.0, identity, bb_index))
            else:
                rep = self.net.forwardImage(alignedFace)
                if self.svm is not None:
                    result_proba_list = self.svm.predict_proba(rep)
                    identity = np.argmax(result_proba_list[0])
                    print str(result_proba_list[0]) + " " + str(bb)
                    for index, prob in enumerate(result_proba_list[0]):
                        result_priority_queue.put_nowait((prob * -1.0, self.identities[index], bb_index))
                else:
                    result_priority_queue.put_nowait((0.0, -1, bb_index))

        matched_identities = []
        matched_bb_indices = []
        threshold = 0.6

        while len(matched_identities) != len(bbs) and result_priority_queue.empty() is False:
            detectedFaceInfo = result_priority_queue.get_nowait()

            identity = detectedFaceInfo[1]
            probability = detectedFaceInfo[0] * -1.0
            bb_index = detectedFaceInfo[2]
            # print detectedFaceInfo

            if identity in matched_identities:
                # print "matched_bbs : " + str(matched_identities)
                continue

            matched_bb_indices.append(bb_index)
            matched_identities.append(identity)

            if probability < threshold:
                results.append((-1, bbs[bb_index], 0.0))
            else:
                results.append((identity, bbs[bb_index], probability))

                # print '+' + str(results[len(results) - 1])

        for bb_index, bb in enumerate(bbs):
            if bb_index in matched_bb_indices:
                continue

            results.append((-1, bb, 0.0))

        return results
示例#3
0
align = NaiveDlib(os.path.join(dlibModelDir, "mean.csv"),
        os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))

#img = Image.open('../data/1.png')
#img = Image.open('../data/alumnus.jpg')
img = Image.open('../data/celebrities.jpg')
buf = np.fliplr(np.asarray(img))

rgbFrame = np.zeros((img.height, img.width, 3), dtype=np.uint8)

rgbFrame[:, :, 0] = buf[:, :, 2]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 0]

cv2.imshow('frame', rgbFrame)

annotatedFrame = np.copy(rgbFrame)

bbs = align.getAllFaceBoundingBoxes(rgbFrame)
for bb in bbs:
    bl = (bb.left(), bb.bottom())
    tr = (bb.right(), bb.top())
    cv2.rectangle(annotatedFrame, bl, tr, color=(153, 255, 204),
                  thickness=2)

cv2.imshow('frame2', annotatedFrame)

while True:
    pass

print 'end'
示例#4
0
from PIL import Image
import numpy as np

params = list()
params.append(cv2.IMWRITE_PNG_COMPRESSION)
params.append(9)

modelDir = os.path.join(fileDir, "../project", 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
sys.path.append(openfaceModelDir)

align = NaiveDlib(os.path.join(dlibModelDir, "mean.csv"),
                  os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat"))

video_capture = cv2.VideoCapture(0)

ret, frame = video_capture.read()
sleep(1)
ret, frame = video_capture.read()

image = frame

cv2.imwrite('img.png', frame, params)
bbs = align.getAllFaceBoundingBoxes(image)

print len(bbs)

bb = bbs[0]
alignedFace = align.alignImg("affine", 96, image, bb)
cv2.imwrite('img2.png', alignedFace, params)