def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Input stream is of resolution: {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service("face_recognition", version=1)

    ### Choose here the appropriate pictures.
    ### Pictures given as samples for the album should only contain 1 visible face.
    ### You can provide the API with more than 1 photo for a given person.
    w1_s1 = conn.blobs.create(open("./images/gwenn.jpg", 'rb'))
    w2_s1 = conn.blobs.create(open("./images/aurelien.jpg", 'rb'))
    w3_s1 = conn.blobs.create(open("./images/sylvain.jpg", 'rb'))

    album = {'gwenn': [w1_s1], 'aurelien': [w2_s1], 'sylvain': [w3_s1]}

    service.enable_session({"album": album})

    while camera.isOpened():
        ret, frame = camera.read()
        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        job = service.process({"image": buff})
        res = job.result

        for face in res['faces']:
            x, y, dx, dy = face['roi']
            cv2.rectangle(frame, (x, y), (x + dx, y + dy), (0, 255, 0))

            if len(face['names']) > 0:
                name = face['names'][0]['key']
                cv2.putText(frame, "Name = {}".format(name), (x, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))

            cv2.imshow('original', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
Exemple #2
0
    def AngusGender(self, imageString, image):

        path = imageORimageString(imageString, image)  #path of saved image.
        conn = angus.connect()  #connection
        service = conn.services.get_service(
            "age_and_gender_estimation",
            version=1)  # determine which service to call
        job = service.process({'image': open(path, 'rb')})  #call service
        response = job.result
        return response
Exemple #3
0
def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Input stream is of resolution: {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service('face_expression_estimation', 1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()
        if not ret:
            break

        ### angus.ai computer vision services require gray images right now.
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        job = service.process({"image": buff})
        res = job.result

        for face in res['faces']:
            x, y, dx, dy = face['roi']
            cv2.rectangle(frame, (x, y), (x + dx, y + dy), (0, 255, 0))

            ### Sorting of the 5 expressions measures
            ### to display the most likely on the screen
            exps = [(face[exp], exp) for exp in
                    ['sadness', 'happiness', 'neutral', 'surprise', 'anger']]
            exps.sort()
            max_exp = exps[-1]

            cv2.putText(frame, str(max_exp[1]), (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))

        cv2.imshow('original', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    ### Disabling session on the server
    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
def main():
    os.environ[
        'LD_LIBRARY_PATH'] = "../ARSDKBuildUtils/Targets/Unix/Install/lib"
    sub = subprocess.Popen(["./JumpingSumoInterface"],
                           stdin=subprocess.PIPE,
                           stdout=None,
                           stderr=subprocess.STDOUT)
    time.sleep(2)
    conn = angus.connect()
    service = conn.services.get_service('face_detection', 1)
    launch("./video_fifo", sub, service)
def main():
    os.environ[
        'LD_LIBRARY_PATH'] = "../ARSDKBuildUtils/Targets/Unix/Install/lib"
    sub = subprocess.Popen(
        ["./JumpingSumoInterface"],
        stdin=subprocess.PIPE,
        stdout=None,
        stderr=subprocess.STDOUT)
    time.sleep(2)
    conn = angus.connect()
    service = conn.services.get_service('face_detection', 1)
    launch("./video_fifo", sub, service)
Exemple #6
0
    def AngusText2Sound(self, text, lang):
        def decode_output(sound, filename):
            sound = base64.b64decode(sound)
            sound = zlib.decompress(sound)
            with open(filename, "wb") as f:
                f.write(sound)

        conn = angus.connect()
        service = conn.services.get_service('text_to_speech', version=1)
        job = service.process({'text': text, 'lang': lang})
        decode_output(job.result["sound"], "angusSound.mp3")
        f1 = open("angusSound.mp3", "r")
        return f1
Exemple #7
0
def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Video stream is of resolution {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service("age_and_gender_estimation", version=1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()

        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        job = service.process({"image": buff})
        res = job.result

        for face in res['faces']:
            print('in face')
            x, y, dx, dy = face['roi']
            age = face['age']
            gender = face['gender']

            cv2.rectangle(frame, (x, y), (x + dx, y + dy), (0, 255, 0))
            cv2.putText(frame,
                        "(age, gender) = ({:.1f}, {})".format(age, gender),
                        (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))

        cv2.imshow('original', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
Exemple #8
0
def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Input stream is of resolution: {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service("upper_body_detection", version=1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()
        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])

        buff = StringIO.StringIO(np.array(buff).tostring())

        job = service.process({"image": buff})
        res = job.result
        pprint(res)

        for body in res['upper_bodies']:
            x, y, dx, dy = body['upper_body_roi']
            cv2.rectangle(frame, (x, y), (x + dx, y + dy), (0, 255, 0))

        cv2.imshow('original', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
Exemple #9
0
    def init_angus(self):
        # Try max 5 times angus-server connexion
        conn = angus.connect()
        service = None
        for count in range(5):
            try:
                service = conn.services.get_service("scene_analysis",
                                                    version=1)
                service.enable_session()
                break
            except Exception as e:
                LOGGER.error(e)
                LOGGER.warning(
                    "Scene Analysis not ready (attempt #%s/5), wait 1s and try again",
                    count + 1)
                time.sleep(2)
                continue
        if service is None:
            LOGGER.error("Scene analysis is not available, shutdown")
            return False

        LOGGER.info("Scene Analysis service connected")
        self.service = service
        return True
from math import sin, cos
import cv2
import numpy as np
import angus

LINETH = 3

if __name__ == '__main__':

    ### To grab the host computer web cam instead of a given file, try:
    cap = cv2.VideoCapture(0)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    print "Input stream is of resolution: " + str(cap.get(3)) + " x " + str(cap.get(4))
    conn = angus.connect()
    service = conn.services.get_service('gaze_analysis', 1)
    service.enable_session()

    while cap.isOpened():
        ret, frame = cap.read()
        if frame != None:
            ### angus.ai computer vision services require gray images right now.
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, buff = cv2.imencode(".png", gray)
            buff = StringIO.StringIO(np.array(buff).tostring())

            job = service.process({"image": buff})
            res = job.result

            print "---------- Raw answer from Angus.ai -----------"
    if inputs and outputs:
        msg = "i/o"
    elif inputs:
        msg = "input"
    elif outputs:
        msg = "output"

    d = int(raw_input("Select your %s: "%msg))

    return d

if __name__ == "__main__":
    ########
    # Angus
    ########
    root = angus.connect()

    ################
    # Input / Output
    ################
    if len(sys.argv) == 3:
        audio_in = int(sys.argv[1])
        audio_out = int(sys.argv[2])
    else:
        audio_in = choose_io(inputs=True)
        audio_out = choose_io(outputs=True)

    ###########
    # Directory
    ###########
    directory = dict()
Exemple #12
0
import numpy as np
import angus

if __name__ == '__main__':

    ### To grab the host computer web cam instead of a given file, try:
    ### cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(0)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    print "Input stream is of resolution: " + str(cap.get(3)) + " x " + str(
        cap.get(4))

    conn = angus.connect()
    service = conn.services.get_service('gaze_analysis', 1)
    service.enable_session()

    while (cap.isOpened()):
        ret, frame = cap.read()
        if (frame != None):
            ### angus.ai computer vision services require gray images right now.
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, buff = cv2.imencode(".png", gray)
            buff = StringIO.StringIO(np.array(buff).tostring())

            job = service.process({"image": buff})
            res = job.result

            print "---------- Raw answer from Angus.ai -----------"
Exemple #13
0
    def get_frame_proxy(self, test):
        conn = angus.connect("https://gate.angus.ai", )
        service = conn.services.get_service("scene_analysis", version=1)
        service.enable_session()

        return self.get_frame(service)
Exemple #14
0
def main(stream_index):
    camera = cv2.VideoCapture(0)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Input stream is of resolution: {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service('gaze_analysis', 1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()
        if not ret:
            break

        ### angus.ai computer vision services require gray images right now.
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        job = service.process({"image": buff})
        res = job.result

        for face in res['faces']:
            x, y, dx, dy = map(int, face['roi'])

            nose = face['nose']
            nose = (nose[0], nose[1])

            eyel = face['eye_left']
            eyel = (eyel[0], eyel[1])
            eyer = face['eye_right']
            eyer = (eyer[0], eyer[1])

            psi = face['head_roll']
            theta = -face['head_yaw']
            phi = face['head_pitch']

            ### head orientation
            length = 150
            xvec = int(
                length *
                (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
            yvec = int(
                -length *
                (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
            cv2.line(frame, nose, (nose[0] + xvec, nose[1] + yvec),
                     (0, 140, 255), 3)

            psi = 0
            theta = -face['gaze_yaw']
            phi = face['gaze_pitch']

            ### gaze orientation
            length = 150
            xvec = int(
                length *
                (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
            yvec = int(
                -length *
                (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
            cv2.line(frame, eyel, (eyel[0] + xvec, eyel[1] + yvec),
                     (0, 140, 0), 3)

            xvec = int(
                length *
                (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
            yvec = int(
                -length *
                (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
            cv2.line(frame, eyer, (eyer[0] + xvec, eyer[1] + yvec),
                     (0, 140, 0), 3)

        cv2.imshow('original', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    ### Disabling session on the server
    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
Exemple #15
0
def main(stream_index):
    p = pyaudio.PyAudio()

    # Device configuration
    conf = p.get_device_info_by_index(stream_index)
    channels = int(conf['maxInputChannels'])
    if channels < TARGET_CHANNELS:
        raise RuntimeException("Bad device, no input channel")

    rate = int(conf['defaultSampleRate'])

    # Angus
    conn = angus.connect()
    service = conn.services.get_service('sound_localization', version=1)
    service.enable_session()

    # Record Process
    stream_queue = Queue.Queue()

    def chunk_callback(in_data, frame_count, time_info, status):
        in_data = prepare(in_data, channels, rate)
        stream_queue.put(in_data)
        return (in_data, pyaudio.paContinue)

    stream = p.open(format=PYAUDIO_FORMAT,
                    channels=channels,
                    rate=rate,
                    input=True,
                    frames_per_buffer=CHUNK,
                    input_device_index=stream_index,
                    stream_callback=chunk_callback)
    stream.start_stream()

    while True:
        nb_buffer_available = stream_queue.qsize()
        if nb_buffer_available > 0:
            print("nb buffer available = {}".format(nb_buffer_available))

        if nb_buffer_available == 0:
            time.sleep(0.01)
            continue

        data = stream_queue.get()

        buff = StringIO.StringIO()

        wf = wave.open(buff, 'wb')
        wf.setnchannels(TARGET_CHANNELS)
        wf.setsampwidth(p.get_sample_size(PYAUDIO_FORMAT))
        wf.setframerate(TARGET_RATE)
        wf.writeframes(data)
        wf.close()

        job = service.process({
            'sound': StringIO.StringIO(buff.getvalue()),
            'baseline': 0.14,
            'sensitivity': 0.7
        })
        pprint(job.result['sources'])

    stream.stop_stream()
    stream.close()
    p.terminate()
Exemple #16
0
def main():
    #angus.con
    conn = angus.connect("https://gate.angus.ai", )
    service = conn.services.get_service("scene_analysis", version=1)
    service.enable_session()
    try:
        camera = cv2.VideoCapture("/Users/rafiahmed/Downloads/GOPR1379.MP4")
        print("Video stream is of resolution {} x {}".format(camera.get(3), camera.get(4)))
        #camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
        #camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
        #camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)
    except:
        print "CV2 Bombed"
        return
    #video = skvideo.io.VideoCapture("/Users/rafiahmed/Downloads/GOPR1379.mp4")
    ret, frame = camera.read()
    i = 0
    while(ret):
        ret, frame = camera.read()
        cv2.imshow("Frame", frame)

        if not ret:
            break
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())
        t = datetime.datetime.now(pytz.utc)
        #print("Calling service")
        #job = service.process({"image": buff,
        #                       "timestamp": t.isoformat()
        #                       })

        job = service.process({"image": buff,
                               "timestamp": t.isoformat(),
                               "camera_position": "ceiling",
                               "sensitivity": {
                                   "appearance": 0.7,
                                   "disappearance": 0.7,
                                   "age_estimated": 0.4,
                                   "gender_estimated": 0.5,
                                   "focus_locked": 0.9,
                                   "emotion_detected": 0.4,
                                   "direction_estimated": 0.8
                               }
                               })

        #print("Called service")
        res = job.result
        pprint(res)
        if "error" in res:
            print("Bomb")
            print(res["error"])
        else:
            # This parses the entities data
            print("No Bomb")
            for key, val in res["entities"].iteritems():
                print("Iterating")
                # display only gaze vectors
                # retrieving eyes points
                eyel, eyer = val["face_eye"]
                eyel = tuple(eyel)
                eyer = tuple(eyer)

                # retrieving gaze vectors
                psi = 0
                g_yaw, g_pitch = val["gaze"]
                theta = - g_yaw
                phi = g_pitch

                # Computing projection on screen
                # and drawing vectors on current frame
                length = 150
                xvec = int(length * (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
                yvec = int(- length * (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
                cv2.line(frame, eyel, (eyel[0] + xvec, eyel[1] + yvec), (0, 140, 0), 3)

                xvec = int(length * (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
                yvec = int(- length * (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
                cv2.line(frame, eyer, (eyer[0] + xvec, eyer[1] + yvec), (0, 140, 0), 3)
                i = i+1
                print (str("Frame: ") + str(i))
        cv2.imshow('original', frame)
        service.disable_session()
Exemple #17
0
RATE = 48000
RECORD_SECONDS = 2

def decode_output(sound, filename):
	sound = base64.b64decode(sound)
	sound = zlib.decompress(sound)
	with open(filename, "wb") as f:
	    f.write(sound)

### Index will differ depending on your system
INDEX = 4  # USB Cam


p = pyaudio.PyAudio()

conn = angus.connect()
service1 = conn.services.get_service('sound_detection', version=1)
service1.enable_session()
stream_queue = Queue.Queue()

conn1 = angus.connect()
service2 = conn.services.get_service('text_to_speech', version=1)



def callback(in_data, frame_count, time_info, status):
    stream_queue.put(in_data)
    return (in_data, pyaudio.paContinue)

stream = p.open(format=FORMAT,
                channels=CHANNELS,
def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Input stream is of resolution: {} x {}".format(camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service("scene_analysis", version=1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()
        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        t = datetime.datetime.now(pytz.utc)
        job = service.process({"image": buff,
                               "timestamp" : t.isoformat(),
                               "camera_position": "facing",
                               "sensitivity": {
                                   "appearance": 0.7,
                                   "disappearance": 0.7,
                                   "age_estimated": 0.4,
                                   "gender_estimated": 0.5,
                                   "focus_locked": 0.9,
                                   "emotion_detected": 0.4,
                                   "direction_estimated": 0.8
                               }
        })
        res = job.result

        if "error" in res:
            print(res["error"])
        else:
            # This parses the events
            if "events" in res:
                for event in res["events"]:
                    value = res["entities"][event["entity_id"]][event["key"]]
                    print("{}| {}, {}".format(event["type"],
                                              event["key"],
                                              value))

            # This parses the entities data
            for key, val in res["entities"].iteritems():
                x, y, dx, dy = map(int, val["face_roi"])
                cv2.rectangle(frame, (x, y), (x+dx, y+dy), (0, 255, 0), 2)

        cv2.imshow("original", frame)
        if cv2.waitKey(1) & 0xFF == 27:
            break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
def main(stream_index):
    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Video stream is of resolution {} x {}".format(
        camera.get(3), camera.get(4)))

    conn = angus.connect()
    service = conn.services.get_service("scene_analysis", version=1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()

        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        t = datetime.datetime.now(pytz.utc)
        job = service.process({"image": buff, "timestamp": t.isoformat()})
        res = job.result

        if "error" in res:
            print(res["error"])
        else:
            # This parses the entities data
            for key, val in res["entities"].iteritems():
                # display only gaze vectors
                # retrieving eyes points
                eyel, eyer = val["face_eye"]
                eyel = tuple(eyel)
                eyer = tuple(eyer)

                # retrieving gaze vectors
                psi = 0
                g_yaw, g_pitch = val["gaze"]
                theta = -g_yaw
                phi = g_pitch

                # Computing projection on screen
                # and drawing vectors on current frame
                length = 150
                xvec = int(
                    length *
                    (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
                yvec = int(
                    -length *
                    (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
                cv2.line(frame, eyel, (eyel[0] + xvec, eyel[1] + yvec),
                         (0, 140, 0), 3)

                xvec = int(
                    length *
                    (sin(phi) * sin(psi) - cos(phi) * sin(theta) * cos(psi)))
                yvec = int(
                    -length *
                    (sin(phi) * cos(psi) - cos(phi) * sin(theta) * sin(psi)))
                cv2.line(frame, eyer, (eyer[0] + xvec, eyer[1] + yvec),
                         (0, 140, 0), 3)

        cv2.imshow('original', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
import angus
from pprint import pprint

conn = angus.connect("https://gate.angus.ai")
service = conn.services.get_service("scene_analysis", version=1)

service.enable_session()

images = ["./G0061126.JPG", "./G0061128.JPG", "./G0061053.JPG"]

for i in images:
    job = service.process({"image": open(i, 'rb'),
                           "timestamp" : "2016-10-26T16:21:01.136287+00:00",
                           "camera_position": "ceiling",
                           "sensitivity": {
                                            "appearance": 0.7,
                                            "disappearance": 0.7,
                                            "age_estimated": 0.4,
                                            "gender_estimated": 0.5,
                                            "focus_locked": 0.9,
                                            "emotion_detected": 0.4,
                                            "direction_estimated" : 0.8
                                          }
                          })
    print str("***************************************************************")
    print "Results for: " + str(i)
    pprint(job.result)


service.disable_session()
Exemple #21
0
    def analyze(self):
        try:
            services = ('age_and_gender_estimation', 'face_expression_estimation', 'gaze_analysis')
            url = "https://gate.angus.ai"
            c_id = "1269cac4-3537-11e6-819d-0242ac110002"
            a_tk = "b407f040-fca7-4dfa-80de-f7399deed597"
            conf = angus.get_default_configuration()
            conf.set_credential(client_id=c_id, access_token=a_tk)

            conn = angus.connect(url=url, conf=conf)
            s = conn.services.get_services(services)
            job = s.process({'image': open(self.photo.path)})
            res = job.result

            res_age_gender = res['age_and_gender_estimation']
            res_expr = res['face_expression_estimation']
            res_gaze = res['gaze_analysis']
            self.faces = int(res_age_gender['nb_faces'])
        except Exception:
            return

        tags = []
        for face in res_age_gender['faces']:
            tags.append(face['gender'])
            if   face['age'] < 10: tags.append("baby")
            elif face['age'] < 40: tags.append("young")
            else:                  tags.append("old")

            size = res_age_gender['input_size']
            roi = face['roi']
            center = [roi[0] + roi[2]/2, roi[1] + roi[3]/2]
            c = False
            if   center[0] < size[0]/2 - size[0]/10: tags.append("left")
            elif center[0] > size[0]/2 + size[0]/10: tags.append("right")
            else: c = True

            if   center[1] < size[1]/2 - size[1]/10: tags.append("up")
            elif center[1] > size[1]/2 + size[1]/10: tags.append("down")
            else: c = True

            if c:
                tags.append("center")

        for face in res_expr['faces']:
            if face['sadness']   > 0.5: tags.append("sad")
            if face['neutral']   > 0.5: tags.append("neutral")
            if face['anger']     > 0.5: tags.append("anger")
            if face['surprise']  > 0.5: tags.append("surprise")
            if face['happiness'] > 0.5: tags.append("happiness")

        for face in res_gaze['faces']:
            if   face['head_pitch'] < -0.2: tags.append("head_down")
            elif face['head_pitch'] >  0.2: tags.append("head_up")
            else:                           tags.append("head_zero")

            if   face['head_yaw'] < -0.2: tags.append("head_right")
            elif face['head_yaw'] >  0.2: tags.append("head_left")
            else:                         tags.append("head_zero")

            if   face['head_roll'] < -0.2: tags.append("head_roll_right")
            elif face['head_roll'] >  0.2: tags.append("head_roll_left")
            else:                          tags.append("head_zero")

            if   face['gaze_pitch'] < -0.2: tags.append("eye_down")
            elif face['gaze_pitch'] >  0.2: tags.append("eye_up")
            else:                           tags.append("eye_zero")

            if   face['gaze_yaw'] < -0.2: tags.append("eye_right")
            elif face['gaze_yaw'] >  0.2: tags.append("eye_left")
            else:                         tags.append("eye_zero")

        Tag.init_tags()
        tags = set(tags)
        for t in tags:
            print "get", t
            tag = Tag.objects.get(tag=t)
            self.tags.add(tag)
        self.save()
        return self.faces
Exemple #22
0
def main(stream_index):

    p = pyaudio.PyAudio()

    # Device configuration
    conf = p.get_device_info_by_index(stream_index)
    channels = int(conf['maxInputChannels'])
    if channels < 1:
        raise RuntimeException("Bad device, no input channel")

    rate = int(conf['defaultSampleRate'])
    if rate < 16000:
        raise RuntimeException("Bad device, sample rate is too low")

    # Angus
    conn = angus.connect()
    service = conn.services.get_service('voice_detection', version=1)
    service.enable_session()

    # Record Process
    stream_queue = Queue.Queue()

    def chunk_callback(in_data, frame_count, time_info, status):
        in_data = prepare(in_data, channels, rate)
        stream_queue.put(in_data.tostring())
        return (in_data, pyaudio.paContinue)

    stream = p.open(format=PYAUDIO_FORMAT,
                    channels=channels,
                    rate=rate,
                    input=True,
                    frames_per_buffer=CHUNK,
                    input_device_index=stream_index,
                    stream_callback=chunk_callback)
    stream.start_stream()

    # Get data and send to Angus.ai
    while True:
        nb_buffer_available = stream_queue.qsize()

        if nb_buffer_available == 0:
            time.sleep(0.01)
            continue

        data = stream_queue.get()
        buff = StringIO.StringIO()

        wf = wave.open(buff, 'wb')
        wf.setnchannels(1)
        wf.setsampwidth(p.get_sample_size(PYAUDIO_FORMAT))
        wf.setframerate(16000)
        wf.writeframes(data)
        wf.close()

        job = service.process({
            'sound': StringIO.StringIO(buff.getvalue()),
            'sensitivity': 0.2
        })

        res = job.result["voice_activity"]

        if res == "VOICE":
            print "\033[A                                             \033[A"
            print "***************************"
            print "*****   VOICE !!!!   ******"
            print "***************************"

    stream.stop_stream()
    stream.close()
    p.terminate()
def f(stream_index, width, height):

    camera = cv2.VideoCapture(stream_index)
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, int(width))
    camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, int(height))
    camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)

    if not camera.isOpened():
        print("Cannot open stream of index {}".format(stream_index))
        exit(1)

    print("Video stream is of resolution {} x {}".format(camera.get(3), camera.get(4)))

    stats = st.Stats("stats.json")
    animation = []
    engaged = []

    conn = angus.connect()
    service = conn.services.get_service("scene_analysis", version=1)
    service.enable_session()

    while camera.isOpened():
        ret, frame = camera.read()

        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, buff = cv2.imencode(".jpg", gray,  [cv2.IMWRITE_JPEG_QUALITY, 80])
        buff = StringIO.StringIO(np.array(buff).tostring())

        t = datetime.datetime.now(pytz.utc)
        job = service.process({"image": buff,
                               "timestamp" : t.isoformat(),
                               "camera_position": "facing",
                               "sensitivity": {
                                   "appearance": 0.7,
                                   "disappearance": 0.7,
                                   "age_estimated": 0.4,
                                   "gender_estimated": 0.5,
                                   "focus_locked": 0.9,
                                   "emotion_detected": 0.4,
                                   "direction_estimated": 0.8
                               }
        })

        res = job.result

        events = res["events"]
        entities = res["entities"]

        for idx, h in entities.iteritems():
            pt = ad.displayAge(frame, idx, h, 0.50, 0.35)
            ch = ad.displayHair(frame, idx, h)
            ad.displayAvatar(frame, h, pt, ch)
            ad.displayEmotion(frame, h, pt)
            ad.displayGender(frame, h, pt)
            ad.displayGaze(frame, idx, h, pt, 0.50)

        panel = ((width - 180, 40), (width-20, height - 40))
        ad.blur(frame, panel[0], panel[1], (255, 255, 255), 2)
        ad.computeConversion(res, events, entities, engaged, stats, animation, 0.5, 500)
        ad.displayConversion(frame, stats, (width - 100, int(0.3*height)))
        ad.displayAnimation(frame, animation)
        ad.display_logo(frame, 20, height - 60)

        cv2.imshow('window', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            stats.save()
            break

    service.disable_session()

    camera.release()
    cv2.destroyAllWindows()
Exemple #24
0
 def __init__(self):
     conn = angus.connect()
     self.service = conn.services.get_service('age_and_gender_estimation',
                                              version=1)
     self.ageDist = self.parseAgeDict("ageDist.txt")