예제 #1
0
    def __init__(self, img_size):
        self.img_size = img_size

        self.nintr = 9
        self.unknowns = None  # number of unknowns in our equation system

        # initial K matrix
        # with aspect ratio of 1 and pp at center. Focal length is empirical.
        self.Kin = cv2.getDefaultNewCameraMatrix(np.diag([1000, 1000, 1]),
                                                 img_size, True)
        self.K = self.Kin.copy()

        self.cdist = None

        self.flags = cv2.CALIB_USE_LU

        # calibration data
        self.keyframes = []
        self.reperr = float("NaN")
        self.PCov = np.zeros((self.nintr, self.nintr))  # parameter covariance
        self.pose_var = np.zeros(6)

        self.disp_idx = None  # index of dispersion
예제 #2
0
import numpy as np
import cv2 as cv

# aruco
adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400))

# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)

# AR scene
cv.ovis.addResourceLocation("packs/Sinbad.zip")  # shipped with Ogre

win = cv.ovis.createWindow("arucoAR", imsize, flags=0)
win.setCameraIntrinsics(K, imsize)
win.createEntity("figure", "Sinbad.mesh", (0, 0, 5), (1.57, 0, 0))
win.createLightEntity("sun", (0, 0, 100))

# video capture
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])

while cv.ovis.waitKey(1) != 27:
    img = cap.read()[1]
    win.setBackground(img)
    corners, ids = cv.aruco.detectMarkers(img, adict)[:2]

    cv.waitKey(1)
예제 #3
0
import numpy as np
import cv2 as cv

# aruco
adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400))

# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)

# AR scene
cv.ovis.addResourceLocation("packs/Sinbad.zip") # shipped with Ogre

win = cv.ovis.createWindow("arucoAR", imsize, flags=0)
win.setCameraIntrinsics(K, imsize)
win.createEntity("figure", "Sinbad.mesh", (0, 0, 5), (1.57, 0, 0))
win.createLightEntity("sun", (0, 0, 100))

# video capture
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])

while cv.ovis.waitKey(1) != 27:
    img = cap.read()[1]
    win.setBackground(img)
    corners, ids = cv.aruco.detectMarkers(img, adict)[:2]

    cv.waitKey(1)
예제 #4
0
def main(ctx):
    ## random calibration data. your mileage may vary
    imsize = (800, 600)
    K = cv2.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)

    ## setup Ogre for AR
    scn_mgr = ctx.getRoot().createSceneManager()
    Ogre.RTShader.ShaderGenerator.getSingleton().addSceneManager(scn_mgr)

    cam = scn_mgr.createCamera("camera")
    cam.setNearClipDistance(5)
    ctx.getRenderWindow().addViewport(cam)

    camnode = scn_mgr.getRootSceneNode().createChildSceneNode()
    # convert OpenCV to OGRE coordinate system
    camnode.rotate((1, 0, 0), math.pi)
    camnode.attachObject(cam)

    set_camera_intrinsics(cam, K, imsize)
    bgtex = create_image_background(scn_mgr)

    ## setup 3D scene
    scn_mgr.setAmbientLight((.1, .1, .1))
    scn_mgr.getRootSceneNode().createChildSceneNode().attachObject(
        scn_mgr.createLight())

    marker_node = scn_mgr.getRootSceneNode().createChildSceneNode()
    mesh_node = marker_node.createChildSceneNode()
    mesh_node.attachObject(scn_mgr.createEntity("Sinbad.mesh"))
    mesh_node.rotate((1, 0, 0), math.pi / 2)
    mesh_node.translate((0, 0, 5))
    mesh_node.setVisible(False)

    ## video capture
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, imsize[0])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, imsize[1])

    ## aruco
    adict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
    cv2.imshow("marker", cv2.aruco.drawMarker(adict, 0, 400))

    rvec, tvec = None, None
    while cv2.waitKey(1) != 27:
        cont, img = cap.read()
        if not cont: break

        im = Ogre.Image(Ogre.PF_BYTE_BGR, img.shape[1], img.shape[0], 1, img,
                        False)
        if bgtex.getBuffer():
            bgtex.getBuffer().blitFromMemory(im.getPixelBox())
        else:
            bgtex.loadImage(im)

        corners, ids = cv2.aruco.detectMarkers(img, adict)[:2]

        if ids is not None:
            rvecs, tvecs = cv2.aruco.estimatePoseSingleMarkers(
                corners, 5, K, None)[:2]
            rvec, tvec = rvecs[0].ravel(), tvecs[0].ravel()

            ax = Ogre.Vector3(*rvec)
            ang = ax.normalise()
            marker_node.setOrientation(Ogre.Quaternion(ang, ax))
            marker_node.setPosition(tvec)
            mesh_node.setVisible(True)

        ctx.getRoot().renderOneFrame()
import cv2
import sys

cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)

video_capture = cv2.VideoCapture(1)
video_capture = cv2.getDefaultNewCameraMatrix()

while True:
    # Capture frame-by-frame
    ret, frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE

        # cv2.cv.CV_HAAR_SCALE_IMAGE
    )

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

    # Display the resulting frame
    cv2.imshow('Video', frame)