예제 #1
0
파일: mesh.py 프로젝트: caichun/FaceMorpher
def main(image1_path, image2_path):

    img1, gray1 = readImage(image1_path)
    img2, gray2 = readImage(image2_path)

    # load face model

    conns = facetracker.LoadCon(r'external\FaceTracker\model\face.con')
    trigs = facetracker.LoadTri(r'external\FaceTracker\model\face.tri')
    tracker1 = facetracker.FaceTracker(
        r'external\FaceTracker\model\face.tracker')
    tracker2 = facetracker.FaceTracker(
        r'external\FaceTracker\model\face.tracker')
    tracker1.setWindowSizes((11, 9, 7))
    tracker2.setWindowSizes((11, 9, 7))
    # search feature points

    feature_pair = getFeaturePoints(tracker1, tracker2, gray1, gray2)
    intermediate_feature = interpolatePts(feature_pair)
    frames = combineImages(intermediate_feature, TRIANGLES, image1_path,
                           image2_path)
    frames.extend(frames[::-1])
    while (True):
        for i in range(0, len(frames)):
            f = frames[i]
            cv2.waitKey(20)
            cv2.imshow("Cameras", f)
            cv2.waitKey(20)
예제 #2
0
def get_tracker_points(args):
    """
    FaceTracker settings:
    - clamp      : [0-4] 1 gives a very loose fit, 4 gives a very tight
                   fit
    - iterations : Number of iterations. Should be a number in the
                   range: 1-25, 1 is fast and inaccurate, 25 is slow and
                   accurate
    - tolerance  : Matching tolerance. Should be a double in the range:
                   .01-1.

    pyFaceTracker:
    http://pythonhosted.org/pyFaceTracker/generated/facetracker.FaceTracker.html

    FaceTracker:
    https://github.com/kylemcdonald/FaceTracker
    """

    portrait_path = args.portrait_path
    tracker_path = args.tracker_path
    clamp = args.clamp
    iterations = args.iterations
    tolerance = args.tolerance
    out_dir = args.output_dir
    plot = args.plot

    # Load portrait
    portrait = Image.open(portrait_path)
    portrait_gray = portrait.convert("L")
    portrait_gray_np = np.array(portrait_gray)

    # Load tracker
    tracker = facetracker.FaceTracker(tracker_path)

    # Get tracker points
    tracker.resetFrame()
    if clamp:
        tracker.clamp = clamp
    tracker.iterations = iterations
    if tolerance:
        tracker.tolerance = tolerance
    tracker.update(portrait_gray_np)

    # Format tracker points
    malformed_points = tracker.get2DShape()[0]
    nb_coords = len(malformed_points)
    nb_points = nb_coords / 2
    xs = malformed_points[:nb_points]
    ys = malformed_points[nb_points:]
    points = zip(xs, ys)
    points_np = np.squeeze(np.array(points))

    # Save tracker points
    name = rm_dir_and_ext(portrait_path)
    np.save(out_dir + name + ".npy", points_np)

    # Output plot
    if plot:
        save_plot(portrait, points_np, name, out_dir=out_dir)
예제 #3
0
def main(img_path):
    #
    # Load image
    #
    img = Image.open(img_path)
    gray = img.convert('L')
    img = np.asanyarray(img)
    gray = np.asarray(gray)

    #
    # Load face model
    #
    conns = facetracker.LoadCon(r'..\external\FaceTracker\model\face.con')
    trigs = facetracker.LoadTri(r'..\external\FaceTracker\model\face.tri')
    tracker = facetracker.FaceTracker(
        r'..\external\FaceTracker\model\face.tracker')

    #
    # Search for faces in the image
    #
    tracker.setWindowSizes((11, 9, 7))
    if tracker.update(gray):
        img = tracker.draw(img, conns, trigs)

        obj3D = tracker.get3DShape()

        fig3d = plt.figure()
        ax = fig3d.add_subplot(111, projection='3d')
        ax.scatter(obj3D[:66, 0], obj3D[66:132, 0], obj3D[132:, 0])
        for i in range(66):
            ax.text(obj3D[i], obj3D[i + 66], obj3D[i + 132], str(i))
        ax.view_init(-90, -90)

    else:
        print 'Failed tracking face in image'

    plt.figure()
    plt.imshow(img)

    plt.show()
예제 #4
0
def tutorial1():

    img = Image.open(os.path.join(IMAGES_BASE, 'MonaLisa.jpg'))
    gray = img.convert('L')
    img = np.asanyarray(img)
    gray = np.asarray(gray)

    #
    # Load face model
    #
    conns = facetracker.LoadCon(r'..\external\FaceTracker\model\face.con')
    trigs = facetracker.LoadTri(r'..\external\FaceTracker\model\face.tri')
    tracker = facetracker.FaceTracker(
        r'..\external\FaceTracker\model\face.tracker')

    #
    # Search for faces in the image
    #
    tracker.setWindowSizes((11, 9, 7))
    if tracker.update(gray):
        img = tracker.draw(img, conns, trigs)

        obj3D = tracker.get3DShape()

        fig3d = plt.figure()
        ax = fig3d.add_subplot(111, projection='3d')
        ax.scatter(obj3D[:66, 0], obj3D[66:132, 0], obj3D[132:, 0])
        for i in range(66):
            ax.text(obj3D[i], obj3D[i + 66], obj3D[i + 132], str(i))
        ax.view_init(-75, -90)
        plt.title('3D Model of Mona Lisa\'s face')
        savefig('3DMonaLisa')
    else:
        print 'Failed tracking face in image'

    plt.figure()
    plt.imshow(img)
    plt.title('Face of the Mona Lisa')
    savefig('MonaLisaFace')
# -*- encoding:utf-8 -*-

import os
import random
import string
import numpy as np
import cv2
import facetracker
from werkzeug.utils import secure_filename
from PIL import Image

conns = facetracker.LoadCon('./static/model/face.con')
trigs = facetracker.LoadTri('./static/model/face.tri')
tracker = facetracker.FaceTracker('./static/model/face.tracker')
cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
allow_extends = set(['png', 'jpeg', 'jpg', 'gif'])
digit = 8


def allowed_file(filename):
    return '.' in filename and \
      filename.rsplit('.', 1)[1].lower() in allow_extends


def image_save(img):

    if img and allowed_file(img.filename):

        ImgName = img.filename
        result = "ok"
        img = cv2.imdecode(np.fromstring(img.read(), np.uint8),
예제 #6
0
        pass

    cv2.destroyAllWindows()

if __name__ == '__main__':
    import sys, getopt
    print help_message

    args, video_src = getopt.getopt(sys.argv[1:], '', ['face=', 'con=', 'tri='])
    try: video_src = video_src[0]
    except: video_src = 0
    args = dict(args)
    face_fn = args.get('--con', r"../external/FaceTracker/model/face.tracker")
    con_fn = args.get('--con', r"../external/FaceTracker/model/face.con")
    tri_fn  = args.get('--tri', r"../external/FaceTracker/model/face.tri")

    tracker = facetracker.FaceTracker(face_fn)
    conns = facetracker.LoadCon(con_fn)
    trigs = facetracker.LoadTri(tri_fn)

    cam = create_capture(video_src)
    tracker.setWindowSizes((7,))

    shape3D = np.random.randn(3, 66)
    l = mlab.points3d(shape3D[0, :], shape3D[1, :], shape3D[2, :])
    ms = l.mlab_source

    print("Server is running on localhost:8001...")
    server = pywsgi.WSGIServer(('0.0.0.0', 8001), myapp, handler_class=WebSocketHandler)
    server.serve_forever()
예제 #7
0
from naoqi import ALProxy
import facetracker as track

tracker = track.FaceTracker()
tracker.startTracking()

#tts.say("")

예제 #8
0
 def __init__(self, recipe):
     self.recipe = recipe
     self.faceTracker = tracker.FaceTracker(use_nao)
     self.ledController = eye_leds.LedController(use_nao)