Esempio n. 1
0
def outfacing(lock, receiver, ttsque=None, finder=finder):
    cv2.namedWindow('visuals')
    processor = faceprocessor()
    extractor = modelInferance()
    processor.setSource(cv2.VideoCapture(0))
    processor.setFinder(finder().nextFaces)
    faceDict = FaceDictionary()
    datawindow = window()
    dnn = model()
    dnn.load_previous()
    generator = datagenerator()
    clf, revmap = load_classifier(lock)
    cacher = feature_cacher()
    kill = False
    cacheFlag = False
    label = None
    cv2.namedWindow('visual')
    ix = 0
    while cv2.waitKey(10) < 0 and not kill:
        temp = processor.getFaces()
        if receiver and receiver.poll(0.001):
            signal = receiver.recv()
            print(signal)
            if signal.find('label:') >= 0:
                label = signal[signal.find(':') + 1:]
                cacheFlag = True
            if signal == 'STOPCACHE':
                cacheFlag = False
            if signal == 'KILL':
                kill = True
            if signal == 'Done Thinking':
                clf, revmap = load_classifier(lock)
                print('HOT SWAPPED CLASSIFIER')
        if temp is None:
            continue
        frame, fbbs, wfaces = temp
        feats = extractor.getFeatures(wfaces)
        #faceDict.update(fbbs,feats)
        #index,fbbs,feats = faceDict.getobjs()
        index = np.ones(len(fbbs))
        clss, score, tmap = handledata(dnn, cacher, generator, cacheFlag,
                                       label, feats, wfaces, datawindow)
        if tmap is not None:
            revmap = tmap
        if score is not None:
            isUnknown = score < 8.0
        else:
            isUnknown = False

        sendmesg(isUnknown, 'What are you doing here? Relinquish thine name. ',
                 ttsque)

        ix += 1

        visualize(frame, fbbs, wfaces, clss, revmap, isUnknown, index)
        # print(clss)
    extractor.kill()
    return 1
Esempio n. 2
0
def outfacing(lock, receiver, finder=finder):
    cv2.namedWindow('visuals')
    processor = faceprocessor()
    extractor = modelInferance()
    processor.setSource(cv2.VideoCapture(0))
    processor.setFinder(finder().nextFaces)
    #processor.setTracker(tracker())
    clf, revmap = load_classifier(lock)
    cacher = feature_cacher()
    kill = False
    cacheFlag = False
    label = None
    cv2.namedWindow('visual')
    try:
        while cv2.waitKey(10) < 0 and not kill:
            temp = processor.getFaces()
            if receiver and receiver.poll(0.001):
                signal = receiver.recv()
                print(signal)
                if signal.find('label:') >= 0:
                    label = signal[signal.find(':') + 1:]
                    cacheFlag = True
                if signal == 'STOPCACHE':
                    cacheFlag = False
                if signal == 'KILL':
                    kill = True
                if signal == 'Done Thinking':
                    clf, revmap = load_classifier(lock)
                    print('HOT SWAPPED CLASSIFIER')
            if temp is None:
                continue
            frame, fbbs, wfaces = temp
            feats = extractor.getFeatures(wfaces)
            pair = wfaces[0].shape
            vizframe = np.zeros((pair[0], pair[1] * len(wfaces), 3),
                                dtype=np.uint8)
            cv2.imshow('visual', frame)
            clss = clf.predict_proba(feats)
            if cacheFlag:
                pairs = zip(feats, wfaces)
                cacher.cachePairs(pairs, label)
            i = 0
            for face, probs in zip(wfaces, clss):
                #retrieve the top 2 classes based on probabilty
                top2 = probs.argsort()[-2:]
                for ix, cls in enumerate(top2):
                    cv2.putText(face,
                                revmap[round(cls)] + ':' + str(probs[cls]),
                                (0, 45 + ix * 50), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0))
                    vizframe[:, i * 96:(i + 1) * 96] = face

                i += 1
            cv2.imshow('visuals', vizframe)
    finally:
        extractor.kill()
    return 1
Esempio n. 3
0
 def __init__(self,finder=None,
              processor=faceprocessor,
              extractor=modelInferance,
              cachePath="/home/avail/data/facerecognition/cache"):
     self.finder = finder
     if self.finder is not None:
         self.processor = processor()
         self.processor.setFinder(finder().nextFaces)
         self.extractor = extractor()
     self.saveDir = cachePath