Beispiel #1
0
    def __selectDrawFrameByFaces(self):
        # 表情スコア計算用の
        # 辞書型リストを取得する
        emotionSumScores = face.Face().result()

        # 念の為、初期化する
        for emotionName in emotionSumScores.keys():
            emotionSumScores[emotionName] = 0.0

        # 顔を検出する
        self.__faces = self.__detectFaces()

        # 各顔の表情スコアを求める
        self.__faces = self.__computeFaceScores(self.__faces)

        # スコア別に合計を求める
        faceList = self.__faces.face()
        for aface in faceList:
            emotionScores = aface.result()
            for emotionName in emotionScores.keys():
                emotionSumScores[emotionName] += emotionScores[emotionName]

        # スコアの合計が最大である表情の名称とそのスコアを取得する
        self.__bestEmotion = BestEmotion("None", -1)  # 前回の結果を削除する
        for emotionName in emotionSumScores.keys():  # スコアが最大である表情の名称を検索する
            emotionScore = emotionSumScores[emotionName]  # 表情に対応するスコアを獲得する
            if emotionScore == 0:  # 表情スコアが0の場合は対象外
                continue
            if emotionScore > self.__bestEmotion.score:
                self.__bestEmotion.set(emotionName, emotionScore)
Beispiel #2
0
def main(render_sdl):
    if render_sdl:
        from sdl.driver import sdl_init
        from sdl.driver import sdl_draw
        window, renderer, pixels = sdl_init()

    else:
        from dotstar import Adafruit_DotStar
        strip = Adafruit_DotStar()
        strip.begin()

    t0 = time.time()
    i = 0

    while True:
        t = time.time() - t0
        i += 1
        try:
            importlib.reload(face)
            f = face.Face()
            f.render(t=t, i=i)
        except (ModuleNotFoundError):
            continue

        if render_sdl:
            print('rendering sdl')
            sdl_draw(pixels, window, f.grid)
        else:
            arr = face.to_arr()
            render_dotstar(strip, arr)
Beispiel #3
0
 def __init__(self, controller, window):
     Ui_MainWindow.__init__(self)
     self.setupUi(window)
     self.controller = controller
     self.face = face.Face(self.controller)
     expression_names = [name.capitalize() for name in self.face.get_expression_names()]
     self.expressionComboBox.addItems(expression_names)
     self.pushButton.clicked.connect(self.user_rq_expression_perf)
    def read_file(self):
        fr = offfile.OffFile(self.filename)

        first_line = fr.nextLine().split()

        number_of_verticies = int(first_line[0])
        number_of_faces = int(first_line[1])
        number_of_edges = int(first_line[2])

        self.n = number_of_verticies

        # Every vertex in the .off
        self.verts = []
        self.verts_prime = []
        # Every face in the .off
        self.faces = []
        # The ID of the faces related to this vertx ID (i.e. vtf[i] contains faces that contain ID i)
        self.verts_to_face = []

        for i in range(self.n):
            vert_line = fr.nextLine().split()
            x = float(vert_line[0])
            y = float(vert_line[1])
            z = float(vert_line[2])
            self.verts.append(np.array([x, y, z]))
            self.verts_prime.append(np.array([x, y, z]))

            self.verts_to_face.append([])
        self.verts_prime = np.asmatrix(self.verts_prime)
        self.neighbour_matrix = np.zeros((self.n, self.n))

        print("Generating Adjacencies")
        for i in range(number_of_faces):
            face_line = fr.nextLine().split()
            v1_id = int(face_line[1])
            v2_id = int(face_line[2])
            v3_id = int(face_line[3])
            self.faces.append(face.Face(v1_id, v2_id, v3_id))
            # Add this face to each vertex face map
            self.assign_values_to_neighbour_matrix(v1_id, v2_id, v3_id)
            self.verts_to_face[v1_id].append(i)
            self.verts_to_face[v2_id].append(i)
            self.verts_to_face[v3_id].append(i)

        print("Generating Edge Matrix")
        self.edge_matrix = np.zeros((self.n, self.n))

        for row in range(self.n):
            self.edge_matrix[row][row] = self.neighbour_matrix[row].sum()
        print("Generating Laplacian Matrix")

        # N size array of 3x3 matricies
        self.cell_rotations = np.zeros((self.n, 3, 3))

        print(str(len(self.verts)) + " verticies")
        print(str(len(self.faces)) + " faces")
        print(str(number_of_edges) + " edges")
Beispiel #5
0
    def add_face_data(self, match_profile):
        denom = len(match_profile)
        import face
        import math
        f = face.Face()
        c = 0
        for match in match_profile:
            c += 1  #count moved here to increment for every member of mp
            if match.anal == 0:
                #                               match.anal = 1 #this was making me skip over analysis section. moved this down to after analysis
                pic_with_one = False  #this var should be true iff there exists a photo with one subject
                pic_num = 0
                face_count = -1  #before facial anal
                list_faces = [
                ]  #later add all faces to this, then analyze them. i can do more complex
                for pic in match.photos:  # analysis this way. like checking for conflicting genders to narrow it down
                    pic_num += 1
                    temp_face = f.anal(pic)
                    if temp_face.status_code != 200:
                        temp_face = {}
                        # lol - a shitty fix but 'face' will not be in temp_face so code won't break if pic can't be downloaded
                    else:
                        temp_face = temp_face.json()
                    if 'face' in temp_face:
                        face_count = len(temp_face['face'])
                        if face_count == 1:
                            match.facial_attributes = temp_face  #if there's on face in this pic. use it
                            match.photo_used = pic_num
                            pic_with_one = True
                            print('photo number ' + str(pic_num) +
                                  ' for _id ' + str(match._id) +
                                  ' has just one subject.')
                            break  #break photo loop
                        else:
                            #get rid of this else. just put print the num of faces and if else print some other shit
                            print('photo number ' + str(pic_num) +
                                  ' for _id ' + str(match._id) + ' has ' +
                                  str(face_count) +
                                  ' subjects. Trying next photo')

                if not pic_with_one:
                    #TODO: do not default to first photo if it has zero subjects
                    print(
                        'no photos found with just one subject. Defaulting to first photo'
                    )
                    match.facial_attributes = f.anal(match.photos[0]).json(
                    )  #default to pic #1 if there is no photo w one subject
                    match.photo_used = 1  # later i'll check if there is any pic in which all but one subject is the wrong gender
#                               c += 1 #moved this increment above the check for match.anal == 0 for an accurate count
                print(str(c) + '/' + str(denom) + ' face scans completed')
#                       print('facial attribute scan completed. now analyzing data') #moved a few lines below to only print when not yet analyzed
        return match_profile  #this will be where i return once separated
Beispiel #6
0
def process_frame(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.resize(frame, (800, 450))
    faces = detector(frame)
    face1 = face.Face(faces[0], frame)
    frame = face1.frame
    eye_center = (face1.left_eye.center / face1.left_eye.scale +
                  face1.left_eye.eye_origin)
    frame[int(eye_center[1]), int(eye_center[0])] = 255
    eye_center = face1.right_eye.center / face1.right_eye.scale + face1.right_eye.eye_origin
    frame[int(eye_center[1]), int(eye_center[0])] = 255
    print(face1.left_eye.relative_center)
    return frame, face1
Beispiel #7
0
def getFaceGivenLines(triangleMesh, vertexStartIndex, vertexIndexTable,
                      vertexes):
    "Add face given line index and lines."
    faceGivenLines = face.Face()
    faceGivenLines.index = len(triangleMesh.faces)
    for vertexIndex in xrange(vertexStartIndex, vertexStartIndex + 3):
        vertex = vertexes[vertexIndex]
        vertexUniqueIndex = len(vertexIndexTable)
        if str(vertex) in vertexIndexTable:
            vertexUniqueIndex = vertexIndexTable[str(vertex)]
        else:
            vertexIndexTable[str(vertex)] = vertexUniqueIndex
            triangleMesh.vertexes.append(vertex)
        faceGivenLines.vertexIndexes.append(vertexUniqueIndex)
    return faceGivenLines
    def setMesh(self, vertices, faces, edges):
        number_of_verticies = vertices.shape[0]
        number_of_faces = faces.shape[0]
        number_of_edges = edges.shape[0]
        self.n = number_of_verticies

        # Every vertex in the .off
        self.verts = []
        self.verts_prime = []
        # Every face in the .off
        self.faces = []
        # The ID of the faces related to this vertx ID (i.e. vtf[i] contains faces that contain ID i)
        self.verts_to_face = []

        for i in range(self.n):
            x, y, z = vertices[i]
            self.verts.append(np.array([x, y, z]))
            self.verts_prime.append(np.array([x, y, z]))
            self.verts_to_face.append([])

        self.verts_prime = np.asmatrix(self.verts_prime)
        self.neighbour_matrix = matrix((self.n, self.n))

        print("Generating Adjacencies")
        for i in range(number_of_faces):
            v1_id, v2_id, v3_id = faces[i]
            self.faces.append(face.Face(v1_id, v2_id, v3_id))
            # Add this face to each vertex face map
            self.assign_values_to_neighbour_matrix(v1_id, v2_id, v3_id)
            self.verts_to_face[v1_id].append(i)
            self.verts_to_face[v2_id].append(i)
            self.verts_to_face[v3_id].append(i)

        print("Generating Edge Matrix")
        self.edge_matrix = matrix((self.n, self.n))

        for row in range(self.n):
            self.edge_matrix[row, row] = self.neighbour_matrix[row].sum()
        print("Generating Laplacian Matrix")

        # N size array of 3x3 matricies
        self.cell_rotations = np.zeros((self.n, 3, 3))

        print(str(len(self.verts)) + " verticies")
        print(str(len(self.faces)) + " faces")
        print(str(number_of_edges) + " edges")
    def __init__(self, video_source=0):
        self.vid = cv2.VideoCapture(video_source)
        if not self.vid.isOpened():
            raise ValueError("Unable to open video source", video_source)
        self.width, self.height = self.vid.get(
            cv2.CAP_PROP_FRAME_WIDTH), self.vid.get(
                cv2.CAP_PROP_FRAME_HEIGHT)  # Get video source width and height
        self.ind_run_demo, self.ind_recog_face, self.ind_track_face, self.ind_caption = 0, 0, 0, 0
        self.caption = ""

        self.roboFace = face.Face(x_weight=0.8, y_weight=0.2)
        #################################################################
        # Set up tracker
        self.tracker = cv2.TrackerMedianFlow_create()
        self.Tracking_Period = 5  # set tracking period before re-initialisation in seconds
        # Load Neural Net model and meanFace
        self.model = load_model(
            '../face_detection/trained/pretrained_CelebA_normalised0203-05.h5')
        self.meanFace = np.load('../face_detection/mean_face_normalised.npy')
        # Load Face Cascade and Eye Cascade classifiers
        self.face_cascade = cv2.CascadeClassifier(
            '../face_detection/haarcascade_frontalface_alt.xml')
        self.eye_cascade = cv2.CascadeClassifier(
            '../face_detection/haarcascade_eye.xml')
        #################################################################
        # Set Speed for smoother movement
        self.roboFace.setSpeedAll(100)
        self.roboFace.setSpeedHead(80)
        self.flag = Event()
        self.flag.clear()
        #################################################################
        self.roboFace.neutral()
        self.probStream = None
        self.saidNothing = 0
        self.t1 = cv2.getTickCount()
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.waiting_phrases = [
            "Hi! Is anybody here?", "Greetings human! Nice to meet you! ",
            "My name is roboface! I am a friendly robot!",
            "Hello! It's a pleasure to meet you!", "I feel so lonely!"
        ]
    def read_file(self):

        first_line = self.nextLine().split()

        number_of_verticies =   int(first_line[0])
        number_of_faces =       int(first_line[1])
        number_of_edges =       int(first_line[2])

        n = number_of_verticies

        # Every vertex in the .off
        verts = []
        # Every face in the .off
        faces = []
        # The ID of the faces related to this vertx ID (i.e. vtf[i] contains faces that contain ID i)
        verts_to_face = []

        for i in range(n):
            vert_line = self.nextLine().split()
            x = float(vert_line[0])
            y = float(vert_line[1])
            z = float(vert_line[2])
            verts.append(np.array([x, y, z]))

            verts_to_face.append([])

        for i in range(number_of_faces):
            face_line = self.nextLine().split()
            v1_id = int(face_line[1])
            v2_id = int(face_line[2])
            v3_id = int(face_line[3])
            faces.append(face.Face(v1_id, v2_id, v3_id))
            # Add this face to each vertex face map
            verts_to_face[v1_id].append(i)
            verts_to_face[v2_id].append(i)
            verts_to_face[v3_id].append(i)
        print("Num of verts ", len(verts))
        print("Num of faces ", len(faces))
        return (verts, faces, verts_to_face)
Beispiel #11
0
def serial_loop():
    with serial.Serial('COM5', 9600, timeout=0.1) as ser:

        setPortCount = 0

        if face_or_words == "face":
            arranged_data = face.Face(MODE, config.sensor_nums)
        elif face_or_words == "words":
            arranged_data = words.Words(MODE, config.sensor_nums)
        arranged_data.make_dir_train_or_test(is_new)  #フォルダを作成する
        try:
            while True:
                s = ser.readline()
                m = None

                if setPortCount < 100:
                    print("waiting port now" + str(setPortCount))
                    ser.write(bytes(str(2), 'UTF-8'))

                try:
                    de = s.decode('utf-8')
                    m = re.match("\-*[\w]+", str(de))
                except Exception as e:
                    pass
                if (m != None):

                    setPortCount = setPortCount + 1

                    config.is_calibration, make_serial_flush = arranged_data.fetch_numbers(
                        m.group())
                    if make_serial_flush:
                        ser.flushInput()
                else:
                    pass
                    #print(type(m))
        except:
            print("Unexpected error:", sys.exc_info()[0])
            raise
        ser.close()
Beispiel #12
0
def face_detection(facesIn, window=False):
    """画像を持ったfacesクラスをブチ込むとfaceを入れて返すクラス"""
    """in:faces out:faces"""
    # 定数定義
    ORG_WINDOW_NAME = "org"
    GAUSSIAN_WINDOW_NAME = "gaussian"

    # 分類器の指定
    cascade_file = "haarcascade_frontalface_alt2.xml"
    cascade = cv2.CascadeClassifier(cascade_file)

    height, width, channels = facesIn.image().shape

    # ウィンドウの準備
    if window:
        cv2.namedWindow(ORG_WINDOW_NAME)
        cv2.namedWindow(GAUSSIAN_WINDOW_NAME)

    # 画像の取得と顔の検出
    img = facesIn.image()
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    face_list = cascade.detectMultiScale(img_gray, minSize=(100, 100))

    # 検出した顔に印を付ける
    for (x, y, w, h) in face_list:
        color = (0, 0, 225)
        pen_w = 3
        cv2.rectangle(img_gray, (x, y), (x+w, y+h), color, thickness = pen_w)
        
        facesIn.set_face(face.Face(0, [(x, y), (w, h)]))
        #print( str(w) +','+ str(h))

    # フレーム表示
    if window:
        cv2.imshow(ORG_WINDOW_NAME, img)
        cv2.imshow(GAUSSIAN_WINDOW_NAME, img_gray)

    return facesIn
Beispiel #13
0
    def __init__(self):
        self.image

        self.face = face.Face(x_weight=0.8, y_weight=0.2)
        # Set Speed for smoother movement
        self.face.setSpeedAll(100)
        self.face.setSpeedHead(80)
        self.face.neutral()
        self.face.setSpeedLips(127)

        self.talk_flag = Event()
        self.talk_flag.clear()

        self.model = load_model(
            '../face_detection/trained/pretrained_CelebA_normalised0203-05.h5')

        self.video_capt = cv2.VideoCapture(
            1)  # 0 for built-in webcam, 1 for robot

        if self.video_capt.isOpened():  # try to get the first frame
            self.rval, self.image = self.video_capt.read()
        else:
            self.rval = False
            self.say("Sorry, I cannot see you.")
Beispiel #14
0
        data[guy] = curr_pics

    return data


data = load_data(data_dir)
keys = []
for key in data.keys():
    keys.append(key)
    print('foler:{},image numbers:{}'.format(key, len(data[key])))

train_x = []
train_y = []
face_encoder = face.Encoder()
face_recognition = face.Recognition()
face_init = face.Face()

#lena = mpimg.imread('lena.jpg')
#faces=face_encoder.generate_embedding(lena)
#emb_data=np.array(faces)
#print(emb_data.shape)
##
for x in data[keys[0]]:

    emb_data = face_recognition.identify(x)
    #
    #
    #
    train_x.append(emb_data)
    train_y.append(0)
    print(len(train_x))
#This short demo was created by Athanasios Raptakis and Viacheslav Honcharenko
#during WS2017 for the  Robotics Practical Lip articulation Roboface at heidelberg uni

import numpy as np
from threading import Thread, Event
import face
from time import sleep, time
import os
from scipy.io import wavfile
from scipy.ndimage.filters import maximum_filter1d, gaussian_filter
import string

#Create an Instance of Roboface class
roboFace = face.Face(x_weight=0.8, y_weight=0.2)


#The Lip trajectory is generated
def Undersampled_Lip_Tragectory(phrase, Sleep_Time):
    A = "espeak -z -s 100 -v female5 -w test.wav "
    A = A + "'" + phrase + "'"
    #os.system("espeak -z -s 80 -v female5 -w test.wav 'Hey, why no one is looking at me? I feel neglected. I feel it! I am afraid!' ")
    os.system(A)
    samplerate, data = wavfile.read('test.wav')
    dt = 1 / float(samplerate)
    times = np.arange(len(data)) / float(samplerate)
    N = len(times)
    max_data = maximum_filter1d(data, size=1000)
    max_data = gaussian_filter(max_data, sigma=100)
    max_Amplitude = 10
    Amplitude = max_Amplitude * (max_data / float(np.max(max_data)))
    n = Sleep_Time * samplerate
Beispiel #16
0
    def decorate(
            self,
            faces  # Facesクラスのインスタンス
    ):
        if len(faces.face()) <= 0:
            return faces

        size = faces.image().shape[:2]
        if size <= 0:
            return faces

        # 表情スコアの合計計算用の辞書型リストを用意する
        # 辞書型リストは表情名がキー,値はスコアとなっている
        # 詳しくはFaceクラス参照してください
        emotionSumScores = face.Face().result()

        # 念の為,各合計スコアを0.0で初期化する
        for emotionName in emotionSumScores.keys():
            emotionSumScores[emotionName] = 0.0

        # スコア別に合計を求める
        for aface in faces.face():
            emotionScores = aface.result()
            for emotionName in emotionScores.keys():
                emotionSumScores[emotionName] += emotionScores[emotionName]

        # スコアの合計が最大である表情の名称とそのスコアを取得する
        bestEmotion = BestEmotion("None", -1)  # 前回の結果を削除する
        for emotionName in emotionSumScores.keys():  # スコアが最大である表情の名称を検索する
            emotionScore = emotionSumScores[emotionName]  # 表情に対応するスコアを獲得する
            #            if emotionScore == 0:                           # 表情スコアが0の場合は対象外
            #                continue
            if emotionScore > bestEmotion.score:
                bestEmotion.set(emotionName, emotionScore)

        #print("--- self.__emotionImages.getEmotionImage(bestEmotion.name) type is")
        #print(type(self.__emotionImages.getEmotionImage(bestEmotion.name)))
        #print("---")

        bestEmotionImage = self.__emotionImages.getEmotionImage(
            bestEmotion.name)

        cvCapImg = cv2.cvtColor(faces.image(), cv2.COLOR_BGR2RGB)
        pilCapImg = Image.fromarray(cvCapImg)
        pilCapRGBAImg = pilCapImg.convert('RGBA')

        #print("[Type] bestEmotionImage : ")
        #print(type(bestEmotionImage))

        cvFrameImg = cv2.cvtColor(bestEmotionImage, cv2.COLOR_BGRA2RGBA)
        pilFrameImg = Image.fromarray(cvFrameImg)
        pilFrameRGBAImg = pilFrameImg.convert('RGBA')

        pilRGBATmp = Image.new('RGBA', pilCapRGBAImg.size, (255, 255, 255, 0))

        point = (0, 0)
        pilRGBATmp.paste(pilFrameRGBAImg, point, pilFrameRGBAImg)

        pilResImg = \
            Image.alpha_composite(pilCapRGBAImg, pilRGBATmp)

        cvResImg = cv2.cvtColor(np.asarray(pilResImg), cv2.COLOR_RGBA2BGRA)

        # デバッグ表示、デバッグファイル出力
        #c = 1
        #valuesStr = ""
        #self.__debugLogFile = open("debugLogFile.txt", "a")
        #for scoreName in emotionSumScores.keys():
        #    cv2.putText(cvResImg, scoreName + " = " + str(emotionSumScores[scoreName]), (10, c*20), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), thickness=2)
        #    valuesStr = valuesStr + " " + str(emotionSumScores[scoreName])
        #    c = c+1
        #self.__debugLogFile.write(valuesStr + "\n")
        #self.__debugLogFile.write("best emotion = " + bestEmotion.name + ", score = " + str(bestEmotion.score) + "\n")
        #self.__debugLogFile.write("\n")
        #self.__debugLogFile.close()

        #faces[0].decImg = cvResImg
        faces.set_image(cvResImg)

        return faces
Beispiel #17
0
def readOpenFoamMesh(directory):
  local_path = 'constant/polyMesh'

  points_file = path.join(directory, local_path, 'points')
  faces_file = path.join(directory, local_path, 'faces')
  owner_file = path.join(directory, local_path, 'owner')
  neighbour_file = path.join(directory, local_path, 'neighbour')

  data = None
  with open(points_file, 'r') as data_file:
    data = data_file.readlines()
  pointer = find_string_list(data, ['('])
  nNodes = int(data[pointer-1])
  nodes = []
  for i in range(nNodes):
    pointer += 1
    array = array_inside(data[pointer], '(', ')', float)
    nodes.append(nd.Node(i, array))

  with open(faces_file, 'r') as data_file:
    data = data_file.readlines()
  pointer = find_string_list(data, ['('])
  nFaces = int(data[pointer-1])
  faces = []
  for i in range(nFaces):
    pointer += 1
    array = array_inside(data[pointer], '(', ')', int)
    node_array = [nodes[j] for j in array]
    faces.append(fc.Face(i, node_array))

  with open(owner_file, 'r') as data_file:
    data = data_file.readlines()
  pointer = find_string_list(data, ['('])
  assert nFaces == int(data[pointer-1])
  # Finding total number of cells
  pointer2 = find_string_list(data, [')'], pointer)
  owner_cell_indexes = [int(i.strip())
                        for i in data[pointer+1:pointer2]]
  nCells = max(owner_cell_indexes) + 1
  cellfaces = [[] for i in range(nCells)]
  cellfacesigns = [[] for i in range(nCells)]
  for face_id, cell_id in enumerate(owner_cell_indexes):
    cellfaces[cell_id].append(faces[face_id])
    cellfacesigns[cell_id].append(1.0)

  with open(neighbour_file, 'r') as data_file:
    data = data_file.readlines()
  pointer = find_string_list(data, ['('])
  nIFaces = int(data[pointer-1])
  pointer2 = find_string_list(data, [')'], pointer)
  neighbour_cell_indexes = [int(i.strip())
                            for i in data[pointer + 1:pointer2]]
  for face_id, cell_id in enumerate(neighbour_cell_indexes):
    cellfaces[cell_id].append(faces[face_id])
    cellfacesigns[cell_id].append(-1.0)

  cells = []
  for i, face_list in enumerate(cellfaces):
    cells.append(cl.Cell(i, face_list, cellfacesigns[i]))

  return (nNodes, nodes, nFaces, nIFaces, faces, nCells, cells)
Beispiel #18
0
            roboFace.angry(movelips=False)
        elif emotion == 'unsure':
            roboFace.unsure(movelips=False)
        elif emotion == 'neutral':
            roboFace.neutral(movelips=False)
        else:
            print('*** ERROR ***')
            print("Invalid syntax or argument")
            break
        if phrase != "silence":
            Say(phrase)
        sleep(float(pause_time))


#Create an Instance of Roboface class
roboFace = face.Face()
roboFace.setSpeedAll(60)
roboFace.neutral()
sleep(1)

instructions = [
    '<neutral_lips> Hi! (0.01)',
    '<happy_lips> My name is Roboface! Welcome to the Robotics Lab!(0.1)',
    '<moveleft> silence (1)', '<moveright> silence (1)',
    '<neutral_lips>My purpose is to study Human Robot interaction. I can recognise human emotions and express my fillings through verbal and non verbal comunication(0)',
    '<neutral_lips>I can express emotions like happiness(0)',
    '<happy_lips> silence (1)', '<happy> Anger (1)', '<angry_lips> silence(1)',
    '<angry> and Sadness! (1)', '<sad_lips> silence (1)',
    '<moveleft> silence (1)', '<moveright> silence (1)',
    '<neutral_lips>silence(0.5)', '<neutral_lips>I am not a common robot(0.1)',
    '<happy>I can think with a neural network and speak with a real human voice, though a text to speach device(0.1)',
Beispiel #19
0
import numpy as np
import cv2
import face
import sort_vp
import ikine
import motor_operator

import intro_sketch

from multiprocessing import Process, Event
import time
import contour_paper

start_time = time.time()

face = face.Face()

tx = 40.
ty = 95.
init_tx = 40
init_ty = 95


def dist(x1, y1, x2, y2):
    dist = np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))
    return dist


letter_size = [279, 216]

ESC_ASCII_VALUE = 0x1b
Beispiel #20
0
# Kevin Chen
# 12/01/17
# This fuction make the pygame shape about face in a pygame window

import face
import pygame
import sys
from pygame.locals import *

pygame.init()
mainSurface = pygame.display.set_mode((700, 700), 0, 32)
myface = face.Face(mainSurface)


while True:
    for event in pygame.event.get():
        if event.type == QUIT:
            pygame.quit()
            sys.exit
        elif event.type == MOUSEBUTTONDOWN:
            myface.drawFace(pygame.mouse.get_pos())

    pygame.display.update()
Beispiel #21
0
    def run(self):
        cap = cv.VideoCapture(0)
        i = 0
        while True:
            if (self.end_learning is True):
                if (self.first_flag is True):
                    if (face.checking_one_face(self.Faces) == False):
                        self.lr = trainLR(self.Faces)
                else:
                    self.first_flag = True
                self.end_learning = False

            ok,img = cap.read()
            if ok is not True:
                msg  = "video device is busy"
                self.emit(QtCore.SIGNAL("error"), msg)
                break

            if img != None:
                bb = align.getLargestFaceBoundingBox(img, skipMulti = True,scaling=self.scaling)
                if bb is not None:
                    landmaks_a = align.findLandmarks(img, bb)
                    if landmaks_a is not None:
                        self.image = face_aligned(img, bb, self.aligned_size, landmarks=landmaks_a, landmarks_i=self.landmarks_type, scaling=self.scaling)
            id = 0
            if (self.learning == 0):
                if (self.image is not None):
                    if (self.lr is not None):
                        rep = net.forward(self.image)
                        id = self.lr.predict_proba(rep)[0][1]
                    else:
                        id = 0

                    self.emit(QtCore.SIGNAL("probabil"), id)
                if self.loading_flag is True:
                    if os.path.exists(os.path.join(fileDir,"classifier.pkl")):
                        os.remove(os.path.join(fileDir,"classifier.pkl"))
                    fname = os.path.join(fileDir, "classifier.pkl")
                    with open(fname, 'w') as f:
                        pickle.dump(self.lr, f)
                        self.emit(QtCore.SIGNAL("p_control"), " NETWORK HAS BEEN SAVED ", "black")
                        self.loading_flag = False
                                                  # now id -> probability of id == 1.
                                                  # ^ - prob of 1. If there was 0, that would be prob of 0
                                                  # http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
                if (id > self.probth):
                    self.name = self.my_name
                else:
                    self.name = "Others"

                if bb is not None:
                    bl = (bb.left(), bb.bottom())
                    tr = (bb.right(), bb.top())
                    cv.rectangle(img, bl, tr, color=(153, 255, 204), thickness=3)
                    for p in self.landmarks_type:
                        cv.circle(img, center=landmaks_a[p], radius=3, color=(255, 100, 50), thickness=-1)
                    cv.putText(img, self.name, (bb.left(), bb.bottom() + 20), cv.FONT_HERSHEY_SIMPLEX, fontScale=0.75, color=(152, 255, 204), thickness=2)

            if (self.learning == 1):
                if(i < self.iters):
                    if self.image is not None:
                        if(self.caller.comboBox.currentText() == "Owner"):
                            fc = face.Face(self.image, 1)
                            self.owner += 1
                            self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                        if(self.caller.comboBox.currentText() == "Others"):
                            fc = face.Face(self.image, 0)
                            self.others += 1
                            self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                        self.Faces.append(fc)
                        self.emit(QtCore.SIGNAL("loading"), True, i*100/self.iters, "teaching from cameras")
                        i+=1
                        if ( i >= self.iters):
                            self.emit(QtCore.SIGNAL("loading"), False)
                            self.learning = 0
                            self.end_learning = True
                            i = 0

            j = 0
            if(self.learning == 2):
                file_list = os.listdir(self.caller.lineEdit_2.text())
                for filem in file_list:
                    img_l = cv.imread(os.path.join(str(self.caller.lineEdit_2.text()),filem))
                    if img_l is not None:
                        bb = align.getLargestFaceBoundingBox(img_l, skipMulti=True,scaling=self.scaling)
                        if bb is not None:
                            landmaks_a = align.findLandmarks(img_l, bb)
                            if landmaks_a is not None:
                                self.image = face_aligned(img_l, bb, self.aligned_size, landmarks=landmaks_a, landmarks_i=self.landmarks_type, scaling=self.scaling)
                                if self.image is not None:
                                    if (self.caller.comboBox.currentText() == "Owner"):
                                        fc = face.Face(self.image, 1)
                                        self.owner+=1
                                        self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                                    if (self.caller.comboBox.currentText() == "Others"):
                                        fc = face.Face(self.image, 0)
                                        self.others+=1
                                        self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                                    if bb is not None:
                                        bl = (bb.left(), bb.bottom())
                                        tr = (bb.right(), bb.top())
                                        cv.rectangle(img_l, bl, tr, color=(153, 255, 204), thickness=3)
                                        for p in self.landmarks_type:
                                            cv.circle(img_l, center=landmaks_a[p], radius=3, color=(255, 100, 50),
                                                      thickness=-1)
                                        cv.putText(img_l, "Others", (bb.left(), bb.bottom() + 20), cv.FONT_HERSHEY_SIMPLEX,
                                                   fontScale=0.75, color=(153, 255, 204), thickness=2)

                                    image_out = cvimage2qimage(img_l)
                                    self.emit(QtCore.SIGNAL("img_signal"), image_out)
                                    self.Faces.append(fc)
                                    self.emit(QtCore.SIGNAL("loading"), True, j * 100 / len(file_list), "teaching from folder " + str(filem))
                    if (j >= (len(file_list)-1)):
                        self.emit(QtCore.SIGNAL("loading"), False)
                        self.end_learning = True
                        self.learning = 0
                    j+=1

            if(self.learning == 3):
                self.vk_learning()
                self.end_learning = True
                self.learning = 0

            image_out = cvimage2qimage(img)
            self.emit(QtCore.SIGNAL("img_signal"), image_out)
Beispiel #22
0
# Minwoo Rhee
# 20181211
# assignment_ten.py
# click to draw faces

import pygame, sys
from pygame.locals import *
import face

pygame.init()
mainSurface = pygame.display.set_mode((800, 800), 0, 32)
pygame.display.set_caption("Pygame faces")

while True:
    for event in pygame.event.get():
        if event.type == QUIT:
            pygame.quit()
            sys.exit()
        if event.type == MOUSEBUTTONDOWN:
            face1 = face.Face(mainSurface)
            face1 = face1.draw_face(pygame.mouse.get_pos())
Beispiel #23
0
 def add_face(v_1, v_2, v_3, faces, neighbours):
     neighbours[v_1].append(v_2);neighbours[v_1].append(v_3)
     neighbours[v_2].append(v_1);neighbours[v_2].append(v_3)
     neighbours[v_3].append(v_2);neighbours[v_3].append(v_1)
     faces.append(face.Face(v_1, v_2, v_3))
Beispiel #24
0
    def vk_learning(self):
        access_token, user_id = vk_requests.get_saved_auth_params()
        if not access_token or not user_id:
            access_token, user_id = vk_requests.get_auth_params()

        if os.path.exists(IMG_FOLDER_NOTME):
            shutil.rmtree(IMG_FOLDER_NOTME)
        if os.path.exists(IMG_FOLDER_ME):
            shutil.rmtree(IMG_FOLDER_ME)

        if IMG_FOLDER_NOTME and not os.path.exists(IMG_FOLDER_NOTME):
            os.makedirs(IMG_FOLDER_NOTME)
        if IMG_FOLDER_ME and not os.path.exists(IMG_FOLDER_ME):
            os.makedirs(IMG_FOLDER_ME)

        num_friends = int(self.caller.loggingwidget.lineEdit_2.text())

        i = 0
        new_user_id = self.caller.loggingwidget.lineEdit.text()
        dictionary = vk_requests.get_me(access_token, new_user_id)
        d = dictionary[0]
        self.my_name = d["first_name"] + " " + d["last_name"]
        new_user_id = int(d["uid"])

        friends = vk_requests.get_my_friends_list(access_token, new_user_id)
        j = random.randrange(1000, 10001, 1) % (len(friends) - num_friends)
        while i < num_friends:
            self.emit(QtCore.SIGNAL("loading"), True, (i * 100 / num_friends), "loading friends...")
            f_img = vk_requests.get_imgs_metadata(access_token, friends[j % len(friends)])
            f_urls = vk_requests.get_photos_urls(f_img)
            self.save_photos(f_urls, IMG_FOLDER_NOTME)
            i += 1
            j += 34
        self.emit(QtCore.SIGNAL("loading"), False)
        self.emit(QtCore.SIGNAL("loading"), True, 0, "loading me...")
        imgs = vk_requests.get_imgs_metadata(access_token, new_user_id)
        urls = vk_requests.get_photos_urls(imgs)
        self.save_photos(urls, IMG_FOLDER_ME)
        self.emit(QtCore.SIGNAL("finishloading"))

        file_list1 = os.listdir(os.path.join(fileDir, IMG_FOLDER_NOTME))
        file_list2 = os.listdir(os.path.join(fileDir, IMG_FOLDER_ME))

        i = 0
        for file1 in file_list1:
            img1 = cv.imread(os.path.join(os.path.join(fileDir, IMG_FOLDER_NOTME), file1))
            if img1 is not None:
                bb = align.getLargestFaceBoundingBox(img1, skipMulti=True, scaling=self.scaling)
                if bb is not None:
                    landmarks1 = align.findLandmarks(img1, bb)
                    self.image = face_aligned(img1, bb, self.aligned_size, landmarks=landmarks1,
                                              landmarks_i=self.landmarks_type, scaling=self.scaling)
                    if self.image is not None:
                        fc = face.Face(self.image, 0)
                        self.Faces.append(fc)
                        self.others += 1
                        self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                        bl = (bb.left(), bb.bottom())
                        tr = (bb.right(), bb.top())
                        cv.rectangle(img1, bl, tr, color=(153, 255, 204), thickness=3)
                        for p in self.landmarks_type:
                            cv.circle(img1, center=landmarks1[p], radius=3, color=(255, 100, 50), thickness=-1)
                        cv.putText(img1, "your friend", (bb.left(), bb.bottom() + 20), cv.FONT_HERSHEY_SIMPLEX, fontScale=0.75, color=(152, 255, 204), thickness=2)
                        image_out = cvimage2qimage(img1)
                        self.emit(QtCore.SIGNAL("img_signal"), image_out)
            i += 1
            self.emit(QtCore.SIGNAL("loading"), True, i * 100 / len(file_list1),"learning friend" + str(file1))

        self.emit(QtCore.SIGNAL("loading"), False)
        i = 0
        for file2 in file_list2:
            img2 = cv.imread(os.path.join(os.path.join(fileDir, IMG_FOLDER_ME), file2))
            if img2 is not None:
                bb = align.getLargestFaceBoundingBox(img2, skipMulti=True,scaling=self.scaling)
                if bb is not None:
                    landmarks2 = align.findLandmarks(img2, bb)
                    self.image = face_aligned(img2, bb, self.aligned_size, landmarks=landmarks2,
                                              landmarks_i=self.landmarks_type, scaling=self.scaling)
                    if self.image is not None:
                        fc = face.Face(self.image, 1)
                        self.owner += 1
                        self.emit(QtCore.SIGNAL("ownoth"), self.owner, self.others)
                        self.Faces.append(fc)
                        bl = (bb.left(), bb.bottom())
                        tr = (bb.right(), bb.top())
                        cv.rectangle(img2, bl, tr, color=(153, 255, 204), thickness=3)
                        for p in self.landmarks_type:
                            cv.circle(img2, center=landmarks2[p], radius=3, color=(255, 100, 50), thickness=-1)
                        cv.putText(img2, self.my_name, (bb.left(), bb.bottom() + 20), cv.FONT_HERSHEY_SIMPLEX, fontScale=0.75, color=(152, 255, 204), thickness=2)
                        image_out = cvimage2qimage(img2)
                        self.emit(QtCore.SIGNAL("img_signal"), image_out)
            i += 1
            self.emit(QtCore.SIGNAL("loading"),True, i * 100 / len(file_list2), "learning me" + str(file2))
        self.emit(QtCore.SIGNAL("loading"), False)