Esempio n. 1
0
    def __init__(self, widget):
        super().__init__()
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        self.widget = widget

        # set records_holder
        sl_model = QStringListModel()
        self.ui.records_holder.setModel(sl_model)

        # set ear detector
        self.camera = Camera(4, self.ui.camera_holder.width(),
                             self.ui.camera_holder.height())
        self.recognizer = Recognizer()

        # set update timer
        self.fps = 60
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update)
        self.timer.start(1000 // self.fps)

        # set updater
        self.updater = Updater(
            self.ui,
            Database(host='localhost',
                     database='eardoor',
                     user='******',
                     password='******',
                     table='records'), sl_model)
Esempio n. 2
0
    pyrr.Vector3((1.3,  -2.0, -2.5)),
    pyrr.Vector3((1.5,  2.0,  -2.5)),
    pyrr.Vector3((1.5,  0.2,  -1.5)),
    pyrr.Vector3((-1.3, 1.0,  -1.0))
]

pointLightPositions = [
    pyrr.Vector3((0.5, -0.4, 0.2)),
    pyrr.Vector3((2.3, -3.3, -4.0)),
    pyrr.Vector3((-4.0, 2.0, -12.0)),
    pyrr.Vector3((0.0, 0.0, -3.0))
]

vertices = np.array(vertices, dtype=np.float32)

camera = Camera(pos=pyrr.Vector3((0, 0, 2)))


def window_resize(window, width, height):
    glViewport(0, 0, width, height)


def move_camera(window):
    if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
        camera.move_forward()
    if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
        camera.move_backward()
    if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
        camera.strafe_right()
    if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
        camera.strafe_left()
    0.5,
    0.5,
    0.0,
    1.0,
    0.0,
    -0.5,
    0.5,
    -0.5,
    0.0,
    1.0,
    0.0,
]

vertices = np.array(vertices, dtype=np.float32)

camera = Camera()


def window_resize(window, width, height):
    glViewport(0, 0, width, height)


def move_camera(window):
    if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
        camera.move_forward()
    if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
        camera.move_backward()
    if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
        camera.strafe_right()
    if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
        camera.strafe_left()
    (-1.7, 3.0, -7.5),
    (1.3, -2.0, -2.5),
    (1.5, 2.0, -2.5),
    (1.5, 0.2, -1.5),
    (-1.3, 1.0, -1.0),
]

quad_vertices = [
    -1.0, 1.0, 0.0, 1.0, -1.0, -1.0, 0.0, 0.0, 1.0, -1.0, 1.0, 0.0, -1.0, 1.0,
    0.0, 1.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0
]

cube_vertices = np.array(cube_vertices, dtype=np.float32)
quad_vertices = np.array(quad_vertices, dtype=np.float32)

camera = Camera(pyrr.Vector3((0.0, 0.0, 5.0)))


def window_resize(window, width, height):
    glViewport(0, 0, width, height)


def move_camera(window):
    if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
        camera.move_forward()
    if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
        camera.move_backward()
    if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
        camera.strafe_right()
    if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
        camera.strafe_left()
Esempio n. 5
0
]

# Custom Camera params
render_size = (640, 480)

camera_position = Vector3(0, 0, -100)
look_at_position = Vector3(0, 0, 100)
forward = Vector3(0, 0, 1)
right = Vector3.cross(forward, floor.normal)
down = Vector3.cross(forward, right)

# Defining Camera objects
cam = Camera(
    position=camera_position,
    forward=forward,
    right=right,
    down=down,
    fov=90,
    render_size=render_size,
)

# Tracing parameters
bounces = 3
antialiasing_samples = 1
bounce_samples = 5
energy = 2


def color_from_trace(x, y):
    # Getting coordinate on canvas
    frame_point = cam.canvas_origin + (cam.right *
                                       (cam.render_size[0] / 2 - x) +
Esempio n. 6
0
from utils.Camera import Camera, CameraType

predictor_path = sys.argv[1]

if len(sys.argv) < 1:
    print("[Init] Please include trained model")
    exit()

window = dlib.image_window()
window.set_title("Test Video")

detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor(predictor_path)

camera = Camera(CameraType.WEB_CAM)
while True:
    image = camera.take_frame()

    dets = detector(image)

    window.clear_overlay()

    for k, d in enumerate(dets):
        shape = shape_predictor(image, d)

        new_shape = face_utils.shape_to_np(shape)
        (leftStart, leftEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
        (rightStart, rightEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

        leftEye = new_shape[leftStart:leftEnd]
Esempio n. 7
0
skybox_vertices = [
    -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, -1.0, 1.0,
    1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0,
    -1.0, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0, -1.0,
    1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0, 1.0, -1.0,
    -1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
    -1.0, 1.0, -1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0, 1.0,
    1.0, 1.0, 1.0, -1.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, -1.0,
    -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0, -1.0, -1.0, -1.0, 1.0, 1.0, -1.0,
    1.0
]

cube_vertices = np.array(cube_vertices, dtype=np.float32)
skybox_vertices = np.array(skybox_vertices, dtype=np.float32)

camera = Camera(pyrr.Vector3((0, 0, 4)))


def window_resize(window, width, height):
    glViewport(0, 0, width, height)


def move_camera(window):
    if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:
        camera.move_forward()
    if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:
        camera.move_backward()
    if glfw.get_key(window, glfw.KEY_D) == glfw.PRESS:
        camera.strafe_right()
    if glfw.get_key(window, glfw.KEY_A) == glfw.PRESS:
        camera.strafe_left()