Ejemplo n.º 1
0
def stream_capture(q, fr_q, fr_q_results):
    #start camera capture
    vcap = VideoStream(src=url).start()

    #main loop
    fr = FaceRecognizer(ctx='gpu',
                        fd_model_path='./fd_model',
                        gpu=0,
                        license=license)
    fr_results = None
    while (True):
        try:
            frame = vcap.read()
            #initialize FR class

            if not fr_q_results.empty():
                fr_results = fr_q_results.get_nowait()
            if fr_results:
                for i, box in enumerate(fr_results['boxes']):
                    fr.draw_box(frame, box)
                    if fr_results['labels'][i]['predicted_label'] is not None:
                        fr.draw_label(
                            frame, (int(box[0]), int(box[1])),
                            fr_results['labels'][i]['predicted_label'], 2, 2)

            #pass frame to streaming service and fr queue
            if q.full():
                q.get()
            if fr_q.full():
                fr_q.get()
            q.put(frame)
            fr_q.put(frame)
            # time.sleep(0.01)
        except Exception as e:
            print traceback.format_exc()
Ejemplo n.º 2
0
def live_search(request):
    if 'username' not in request.session:
        request.session["username"] = None
    print("in function live ==============")
    contact = None

    # fr = FaceRecognizer(ctx='cpu',
    #                     fd_model_path='./fd_model',
    #                     fr_model_path='./model-tfv2/model.trueface',
    #                     params_path='./model-tfv2/model.params',
    #                     license='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbW90aW9uIjpudWxsLCJmciI6dHJ1ZSwicGFja2FnZV9pZCI6bnVsbCwiZXhwaXJ5X2RhdGUiOiIyMDE5LTA5LTI3IiwidGhyZWF0X2RldGVjdGlvbiI6bnVsbCwibWFjaGluZXMiOiI1IiwiYWxwciI6bnVsbCwibmFtZSI6IkpvaG4gQnJpZGdld2F0ZXIiLCJ0a2V5IjoibmV3IiwiZXhwaXJ5X3RpbWVfc3RhbXAiOjE1Njk1NDI0MDAuMCwiYXR0cmlidXRlcyI6dHJ1ZSwidHlwZSI6Im9mZmxpbmUiLCJlbWFpbCI6ImpvaG5iQGJsdWVzdG9uZS5uZXR3b3JrIn0._B9h-H4sZ5tQBslIVZtM1b2Y4_-TSN1e4dAo6KAp0nU'
    #                     )

    # fr.create_collection('collection', 'collection.npz', return_features=False)

    vcap = VideoStream(src=0).start()
    t_end = time.time() + 60 * 0.5
    while time.time() < t_end:
    #while(True):
        frame = vcap.read()
        frame = cv2.resize(frame, (640, 480))
        bounding_boxes, points, chips = fr.find_faces(frame,
                                                  return_chips=True,
                                                  return_binary=True)
        if bounding_boxes is None:
                continue
        for i, chip in enumerate(chips):
            identity = fr.identify(chip,
                                   threshold=0.3,
                                   collection='./collection.npz')
            print("========================")
            print(identity)
            if identity['predicted_label'] != None:
                contact = Contact.objects.filter(lost_one__folder_name__contains=identity['predicted_label'])
                print (contact)
                break
        if contact:
            print('==============================')
            print('in if contact')
            # vcap.stopped = True
            # vcap.stream.release()
            break
        else:
            print('==============================')
            print('in else contact')

            continue
    vcap.stopped = True
    vcap.stream.release()
    return render(request, 'index.html', {"contacts":contact, "user":request.session["username"]})
Ejemplo n.º 3
0
from multiprocessing import Pool, Process
import time
import signal
from trueface.utils import RedisQueue
import base64
import json
import os
import traceback

face_detector = FaceRecognizer(ctx='gpu',
                               fd_model_path='./fd_model',
                               license=os.environ['TF_TOKEN'])

q = RedisQueue('office_camera')

vcap = VideoStream(src="rtsp://192.168.1.177:554/stream2").start()


def init_worker():
    signal.signal(signal.SIGINT, signal.SIG_IGN)


def detect(frame):
    bounding_boxes, points, chips = face_detector.find_faces(
        frame, return_chips=True, return_binary=True)
    return bounding_boxes, points, chips


p = Pool(1, init_worker)

frames = []
Ejemplo n.º 4
0
url = int(url) if len(url) == 1 else url
threshold = config['config']['threshold']
webhook_url = config['config']['webhook_url']
webhook_holdout = config['config']['webhook_holdout']
license = config['config']['license']

#initialize FR class
fr = FaceRecognizer(ctx='gpu',
                    fd_model_path='./fd_model',
                    fr_model_path='./model-tfv2/model.trueface',
                    params_path='./model-tfv2/model.params',
                    gpu=0,
                    license=license)

#start camera capture
vcap = VideoStream(src=url).start()


#webhook function
def webhook(identity):
    """performs webhook"""
    identity['probability'] = fr.cosine_sim_to_prob(identity['confidence'])
    data = {
        "identity": identity,
        "timestamp": strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
    }
    r = requests.post(webhook_url, json=data)
    print r.text


#simple streaming server
Ejemplo n.º 5
0
from trueface.threat_detection import ThreatDetection
from trueface.video import VideoStream
import tensorflow as tf
import cv2
import os

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)

with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    threat_detection = ThreatDetection(
        "./threat-model/model.trueface", 
        "./threat-model/model.params", 
        "./threat-model/trueface-classes.csv", 
        os.environ['TF_TOKEN'])

    cap = VideoStream(src=0).start()

    while True:
        frame = cap.read()
        if frame is None:
            print('image none')
            break

        prediction = threat_detection.predict(frame)
        print(prediction)
        if prediction:

            for pred in prediction:

                if pred['score'] < 0.5:
                    continue
Ejemplo n.º 6
0
from trueface.motion import MotionDetector
from trueface.video import VideoStream
import cv2

#please use 0.6.2 if you desire

cap = VideoStream(src=0).start()

# apply a binary threshold only keeping pixels above thresh and setting the result to maxValue.  If you want
# motion to be picked up more, increase the value of maxValue.  To pick up the least amount of motion over time, set maxValue = 1
# frames count allows you to pick the count of frames to track heatmaps

motion = MotionDetector(cap.read(), threshold=1, max_value=3, frames=100)

count = 0
while True:
    frame = cap.read()
    frame = motion.detect(frame)
    count += 1
    #fades motion, use if not use frames= param
    #motion.fade(ratio=0.75)

    cv2.imshow("image", frame)
    if cv2.waitKey(33) == ord('q'):
        break