コード例 #1
0
ファイル: showModel.py プロジェクト: ryota-be/source
import dataHandle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import re


name = raw_input("name:")
x, y, z = dataHandle.getData(name)
r = re.compile(r"[^[][ ]+")
try:
    fig3d = plt.figure()
    ax = fig3d.add_subplot(111, projection="3d")
    ax.scatter(x, y, z)
    ax.view_init(-90, -90)
    plt.show()
except NameError:
    print name + "is not registered"
    print "Register: python (calibration.py|pictureCalibration.py img_path)"
コード例 #2
0
ファイル: showDiff.py プロジェクト: ryota-be/source
    plt.ion()
    args, video_src = getopt.getopt(sys.argv[1:], "", ["face=", "con=", "tri="])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    face_fn = args.get("--con", r"..\pyFaceTracker-0.1.1\external\FaceTracker\model\face.tracker")
    con_fn = args.get("--con", r"..\pyFaceTracker-0.1.1\external\FaceTracker\model\face.con")
    tri_fn = args.get("--tri", r"..\pyFaceTracker-0.1.1\external\FaceTracker\model\face.tri")

    tracker = facetracker.FaceTracker(face_fn)
    conns = facetracker.LoadCon(con_fn)
    trigs = facetracker.LoadTri(tri_fn)
    name = raw_input("Comparison name:")
    comX, comY, comZ = dataHandle.getData(name)
    cam = create_capture(video_src)
    tracker.setWindowSizes((7,))

    try:
        while True:
            ret, img = cam.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # グレースケール画像のヒストグラムの均一化
            gray = cv2.equalizeHist(gray)
            if tracker.update(gray):
                obj3D = tracker.get3DShape()
                # drawMask(ax,obj3D)
                shape, visi = tracker.get2DShape()
                draw(img, shape, visi)
            else:
コード例 #3
0
ファイル: calibration.py プロジェクト: ryota-be/source
			gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
			#グレースケール画像のヒストグラムの均一化
			gray = cv2.equalizeHist(gray)
			if tracker.update(gray):
				obj3D = tracker.get3DShape()
				drawMask(ax,obj3D)
				shape,visi = tracker.get2DShape()
				draw(img,shape,visi)
			else:
				tracker.setWindowSizes((11, 9, 7))
			cv2.imshow('facedetect', img)
			plt.show()
			key = cv2.waitKey(5)
			if key > 0:
				if key == ord('q'):
					break
				elif key == ord('s'):
					import dataHandle
					name = raw_input('Input name:')
					x = str(obj3D[:len(obj3D)/3])
					y = str(obj3D[len(obj3D)/3:len(obj3D)*2/3])
					z = str(obj3D[len(obj3D)*2/3:])
					dataHandle.insert(name,x,y,z)
					for row in dataHandle.getData(name):
						print row[0],row[1],row[2]

	except:
		pass
		
	cv2.destroyAllWindows()