Beispiel #1
0
def main():
    webcam = Webcam()
    webcam.start()
    mnp = MarkNplay()

    while True:
        image = webcam.get_current_frame()
        mnp.screen_thread(image)

        cv2.imshow('AR Book Reader', image)
        cv2.waitKey(10)
def take_imgs(chessboard_size=(11,7), kSaveImageDeltaTime=1):
    sys.path.append("../")
    os.makedirs("./calib_images", exist_ok=True)
    camera_num = 0
    if len(sys.argv) == 2:
            camera_num = int(sys.argv[1])
    print('opening camera: ', camera_num)

    webcam = Webcam(camera_num)
    webcam.start()
    
    lastSaveTime = time.time()
 
    while True:
        
        # get image from webcam
        image = webcam.get_current_frame()
        if image is not None: 

            # check if pattern found
            ret, corners = cv2.findChessboardCorners(cv2.cvtColor(image,cv2.COLOR_BGR2GRAY), chessboard_size, None)
        
            if ret == True:     
                print('found chessboard')
                # save image
                filename = datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.bmp'
                image_path="./calib_images/" + filename
                
                elapsedTimeSinceLastSave = time.time() - lastSaveTime
                do_save = elapsedTimeSinceLastSave > kSaveImageDeltaTime
                print(elapsedTimeSinceLastSave, kSaveImageDeltaTime)
                if do_save:
                    lastSaveTime = time.time()
                    print('saving file ', image_path)
                    cv2.imwrite(image_path, image)

                # draw the corners
                image = cv2.drawChessboardCorners(image, chessboard_size, corners, ret)                       

            cv2.imshow('camera', image)                

        else: 
            pass
            #print('empty image')                
                            
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break    
Beispiel #3
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise glyphs
        self.glyphs = Glyphs()
        self.glyphs_cache = None

        # initialise browser
        self.browser = None
        
        if self.config_provider.browser:
            self.browser = Browser()

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start threads
        self.webcam.start()
        
        if self.browser: 
            self.browser.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.is_detected = False
        self.sporty_robot.is_detected = False

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image)

        # handle glyphs
        self._handle_glyphs(image)
       
        # handle browser
        self._handle_browser()

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tostring("raw", "BGRX", 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_glyphs(self, image):

        # attempt to detect glyphs
        glyphs = []

        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex:
            print(ex)

        # manage glyphs cache
        if glyphs:
            self.glyphs_cache = glyphs
        elif self.glyphs_cache: 
            glyphs = self.glyphs_cache
            self.glyphs_cache = None
        else:
            return

        for glyph in glyphs:
            
            rvecs, tvecs, _, glyph_name = glyph

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if glyph_name == ROCKY_ROBOT:
                self.rocky_robot.is_detected = True
                
                if self.browser and self.browser.is_speaking:
                    self.rocky_robot.next_frame(True)
                else:
                    self.rocky_robot.next_frame(False)
            
            elif glyph_name == SPORTY_ROBOT:
                self.sporty_robot.is_detected = True
                
                if self.browser and self.browser.is_speaking:
                    self.sporty_robot.next_frame(True)
                else:
                    self.sporty_robot.next_frame(False)
            
            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def _handle_browser(self):

        # check browser instantiated
        if not self.browser: return

        # handle browser
        if self.rocky_robot.is_detected:
            self.browser.load(ROCK)
        elif self.sporty_robot.is_detected:
            self.browser.load(SPORT)
        else:
            self.browser.halt()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("SaltwashAR")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
Beispiel #4
0
class CPPN:
    def __init__(self,
                 height,
                 width,
                 n_inputs,
                 n_hidden,
                 n_outputs,
                 non_linearity,
                 device,
                 webcam=False):
        self.height = height
        self.width = width

        self.n_inputs = n_inputs
        self.n_hidden = n_hidden
        self.n_outputs = n_outputs
        self.non_linearity = non_linearity
        self.device = device

        self.network = Network(n_inputs, n_hidden, n_outputs, non_linearity)
        self.network.to(device)

        self.network_new = Network(n_inputs, n_hidden, n_outputs,
                                   non_linearity)
        self.network_new.to(device)

        self.sound = Sound()

        self.visualisation_input = self._create_visualisation_tensor()

        if webcam:
            self.webcam = Webcam()
            self.webcam.start()

    def _create_visualisation_tensor(self):
        visualisation_input = np.zeros(
            (self.height, self.width, self.n_inputs))

        for i in range(self.height):
            for j in range(self.width):
                visualisation_input[i, j] = [
                    i / float(self.height), j / float(self.width)
                ] + [0] * (self.n_inputs - 2)

        visualisation_input = torch.tensor(visualisation_input.reshape(
            -1, self.n_inputs),
                                           dtype=torch.float,
                                           device=self.device)

        return visualisation_input

    # def _create_kaleidoscope_visualisation_tensor(self):
    #     visualisation_input = np.zeros((self.height, self.width, self.n_inputs))
    #
    #     for i in range(self.height):
    #         for j in range(self.width):
    #             visualisation_input[i, j] = [i/float(self.height),j/float(self.width)] + [0]*(self.n_inputs-2)
    #
    #     visualisation_input = torch.tensor(visualisation_input.reshape(-1, self.n_inputs), dtype=torch.float, device=self.device)
    #
    #     return visualisation_input

    def _visualise_np(self, bands=None):
        with torch.no_grad():
            if bands is not None:
                o = torch.tensor(bands, dtype=torch.float).repeat(
                    self.visualisation_input.size(0), 1)

                self.visualisation_input[:, 2:] = o

            im = self.network(self.visualisation_input).reshape(
                self.width, self.height, self.n_outputs)

            return im.detach().cpu().numpy()

    def _visualise(self, name, save_im=False):
        with torch.no_grad():
            im = self.network(self.visualisation_input).reshape(
                self.width, self.height, 3).detach().cpu().numpy() * 255

            if save_im:
                cv2.imwrite(name + ".png", im)

            return im

    def start(self):
        cv2.namedWindow("CPPN", cv2.WINDOW_NORMAL)
        cv2.setWindowProperty("CPPN", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)

        audio_generator = self.sound.read_audio()

        t = time.time()
        interpolating = 0
        training = 0
        generator = None

        while True:
            if time.time() - t > 20. and not training and not interpolating:
                choice = np.random.choice([1])

                if choice == 0:
                    training = 1
                    target_images = glob.glob("target_images/*")
                    choice = np.random.choice(target_images)
                    image = cv2.imread(choice)
                    image = ToTensor()(image).float().to(self.device)

                    generator = self.train(image,
                                           self.height,
                                           self.width,
                                           0.001,
                                           save_im=False,
                                           image_folder="formation1",
                                           network_name="Santa",
                                           epochs=100,
                                           live=True,
                                           verbose=True)
                elif choice == 1:
                    self._random_network()
                    generator = self.interpolate(60)
                    interpolating = 1
                elif choice == 2:
                    print("webcam")
                    training = 1
                    image = self.webcam.read()

                    image = ToTensor()(image).float().to(self.device)

                    generator = self.train(image,
                                           self.height,
                                           self.width,
                                           0.001,
                                           save_im=False,
                                           image_folder="formation1",
                                           network_name="Santa",
                                           epochs=100,
                                           live=True,
                                           verbose=True)

                if choice == 0:
                    self.sound.sin = self.sound.sin == False
                    self.sound.amplitudes = np.abs(np.random.rand(8))
                    self.sound.frequencies = np.abs(np.random.rand(8))
                elif choice == 1:
                    self.sound.calculate_bands = self.sound.calculate_bands == False

                if self.sound.calculate_bands == False and self.sound.sin == False:
                    if np.random.rand() < 0.5:
                        self.sound.calculate_bands = True
                    else:
                        self.sound.sin = True

                t = time.time()

            if interpolating:
                try:
                    self.network = next(generator)
                except StopIteration:
                    interpolating = 0

            if training:
                try:
                    self.network = next(generator)
                except StopIteration:
                    training = 0
                    torch.cuda.empty_cache()

            frame = self._visualise_np(next(audio_generator))
            cv2.imshow(
                "CPPN",
                cv2.copyMakeBorder(frame, 0, 0, 0, 0, cv2.BORDER_CONSTANT, 0))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cv2.destroyAllWindows()
        self.sound.stream.stop_stream()
        self.sound.stream.close()
        self.sound.pa.terminate()

    def _resize_image(self, image, width, height):
        image = F.interpolate(image.unsqueeze(0), size=(width, height))
        image = torch.squeeze(image, 0)
        image = image.permute(1, 2, 0)

        return image

    def _random_network(self):
        self.network_new = Network(self.n_inputs, self.n_hidden,
                                   self.n_outputs, self.non_linearity)
        self.network_new.to(self.device)

    def train(self,
              image,
              width,
              height,
              loss_threshold,
              save_im=False,
              image_folder="",
              save_network=True,
              network_name="",
              epochs=1,
              live=False,
              verbose=False):
        image = self._resize_image(image, width, height)

        optimiser = torch.optim.Adam(self.network.parameters())
        criterion = torch.nn.MSELoss()

        previous_loss = None

        network = copy.copy(self.network)

        for i in range(epochs):
            loss = criterion(
                network(self.visualisation_input).reshape(width, height, 3),
                image)

            optimiser.zero_grad()
            loss.backward()

            optimiser.step()
            if verbose:
                print(i, loss)
            if save_im:
                if previous_loss is None or previous_loss - loss_threshold > loss:
                    previous_loss = loss
                    self._visualise(
                        "{}/image{:06d}.png".format(image_folder, i), save_im)
            if live:
                yield network

        if save_network:
            torch.save(self.network.state_dict(),
                       'trained_networks/network{}.pt'.format(network_name))

        torch.cuda.empty_cache()

    def load(self, network_name, new=False):
        if new:
            self.network_new.load_state_dict(
                torch.load("trained_networks/network" + network_name + ".pt"))
            self.network_new.to(self.device)
        else:
            self.network.load_state_dict(
                torch.load("trained_networks/network" + network_name + ".pt"))
            self.network.to(self.device)

    def interpolate(self, num_interpolation_frames, space="beta", beta=2.0):
        """
        beta parameter alters the steepness of the function
        """
        weights1 = []
        biases1 = []

        weights2 = []
        biases2 = []

        for idx, layer in enumerate(self.network.module_list):
            weights1.append(layer.weight.data)
            biases1.append(layer.bias.data)
            weights2.append(self.network_new.module_list[idx].weight.data)
            biases2.append(self.network_new.module_list[idx].bias.data)

        frame_distribution = frame_distribution = np.linspace(
            0, 1, num=num_interpolation_frames)

        if space != "lin":
            f = lambda x: 1 / (1 + np.power(x / (1 + np.finfo(float).eps - x),
                                            -beta))
            frame_distribution = f(frame_distribution)

        weights = []
        for weight_pair in zip(weights1, weights2):
            difference = weight_pair[0] - weight_pair[1]
            new_weights = torch.zeros(
                num_interpolation_frames,
                list(weight_pair[0].size())[0],
                list(weight_pair[0].size())[1]).float().to(self.device)
            for idx, i in enumerate(frame_distribution):
                new_weights[idx] = weight_pair[0] - i * difference
            weights.append(new_weights)

        biases = []
        for bias_pair in zip(biases1, biases2):
            difference = bias_pair[0] - bias_pair[1]
            new_biases = torch.zeros(num_interpolation_frames,
                                     list(bias_pair[0].size())[0]).float().to(
                                         self.device)
            for idx, i in enumerate(frame_distribution):
                new_biases[idx] = bias_pair[0] - i * difference
            biases.append(new_biases)

        network = copy.deepcopy(self.network)

        for i in range(num_interpolation_frames):
            for idx, layer in enumerate(network.module_list):
                network.module_list[idx].weight.data = weights[idx][i]
                network.module_list[idx].bias.data = biases[idx][i]

            yield network

    def random_walk(self):
        pass

    def animate(self):
        pass

    def alzheimer(self):
        pass
Beispiel #5
0
from skimage.measure import compare_ssim as ssim


def mse(imageA, imageB):
    # the 'Mean Squared Error' between the two images is the
    # sum of the squared difference between the two images;
    # NOTE: the two images must have the same dimension
    err = np.sum((imageA.astype("float") - imageB.astype("float"))**2)
    err /= float(imageA.shape[0] * imageA.shape[1])
    return err


if __name__ == "__main__":
    camera = Webcam()
    if camera.isConnected():
        try:
            print("copying output")
            camera.start()
            while (True):
                image1 = camera.getFrame()
                image2 = camera.getFrame()
                cor = ssim(image1, image2)
                if cor < 0.78:
                    print("gettingframe:   ", cor)
                #camera.show(image1)
        except (KeyboardInterrupt, SystemExit):
            print("exiting")
            camera.stop()
    else:
        print("problems")
Beispiel #6
0
class OpenGLGlyphs:

    ##############################################################초기화
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        self.find = fp()
        self.find.set_img('sample.jpg')

        self.hei, self.wid = self.webcam.get_frame_shape()[:2]
        # initialise cube
        # self.d_obj = None
        self.img = None
        # initialise texture
        self.texture_background = None
        self.K = None
        self.mark_kp = None
        self.mark_des = None
        self.set_keypoint()
        self.new_kp = None

        self.mat_kp = None
        self.mat_des = None
        self.H = None

        # self.Rt=None

    ##############################################################카메라 세팅
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0) # 투명도 결정
        glClearDepth(1.0) # 깊이 버퍼의 모든 픽셀에 설정될 초기값 지정
        glDepthFunc(GL_LESS) # 언제나 새로 들어오는 값이 기준 GL_LESS를 설정했다고 하자. 이 경우에는 새로 들어온 값이
                                # 이미 저장되어 있는 값 보다 적을 경우에 depth buffer의 값을 새로 들어온 값으로 갱신하겠다,
        glEnable(GL_DEPTH_TEST) # 요건 깊이 정보에 따라 이미지를 순서대로 나줌.
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()

        self.K = my_calibration((Height, Width))
        fx = self.K[0, 0]
        fy = self.K[1, 1]
        fovy = 2 * arctan(0.5 * Height / fy) * 180 / pi
        aspect = (float)(Width * fy) / (Height * fx)
        # define the near and far clipping planes
        near = 0.1
        far = 100.0
        # set perspective
        gluPerspective(fovy, aspect, near, far)

        glMatrixMode(GL_MODELVIEW)
        # self.d_obj=[OBJ('Rocket.obj')]
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
        # gluPerspective(33.7, 1.3, 0.1, 100.0)

    ##############################################################marker의 kp, des저장
    def set_keypoint(self):

        self.find.start()
        self.mark_kp, self.mark_des = self.find.get_point()

    ##############################################################K값 구하기
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        Rt = self._my_cal(image)
        """
        if Rt!=None:
            box=ones((self.hei,self.wid),uint8)
            H_box=cv2.warpPerspective(box,self.H,(self.wid, self.hei))
            image=image*H_box[:,:,newaxis]
            image=cv2.drawKeypoints(image,self.mat_kp,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
        """
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)

        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.wid, self.hei, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()

        # glTranslatef(0.0,0.0,0.0)
        gluLookAt(0.0, 0.0, 12.5, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)

        self._draw_background()
        glPopMatrix()
        ################Rt를 구해서 매칭되는 이미지가 있는지 판단

        if Rt is not None:
            self._set_modelview_from_camera(Rt)
            glEnable(GL_LIGHTING)
            glEnable(GL_LIGHT0)
            glEnable(GL_DEPTH_TEST)
            glEnable(GL_NORMALIZE)
            glClear(GL_DEPTH_BUFFER_BIT)
            glMaterialfv(GL_FRONT, GL_AMBIENT, [0.5, 0.5, 0.0, 1.0])
            glMaterialfv(GL_FRONT, GL_DIFFUSE, [0.9, 0.9, 0.0, 1.0])
            glMaterialfv(GL_FRONT, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])
            glMaterialfv(GL_FRONT, GL_SHININESS, 0.25 * 128.0)
            glutSolidTeapot(0.1)

        glutSwapBuffers()

    ##############################################################OpenGL용 Rt변환
    def _set_modelview_from_camera(self, Rt):

        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()
        Rx = array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])

        # set rotation to best approximation
        R = Rt[:, :3]

        # change sign of x-axis
        R[0, :] = -R[0, :]
        # set translation
        t = Rt[:, 3]
        t[0] = -t[0]

        # setup 4*4 model view matrix
        M = eye(4)
        M[:3, :3] = dot(R, Rx)
        M[:3, 3] = t
        M[3, :3] = t

        # transpose and flatten to get column order
        M = M.T

        m = M.flatten()
        # replace model view with the new matrix
        glLoadMatrixf(m)

    ##############################################################Rt반환
    def _my_cal(self, image):
        find_H = fp()
        find_H.set_cv_img(image)
        find_H.start()
        kp, des = find_H.get_point()

        self.H = self.match_images(self.mark_kp, self.mark_des, kp, des)
        if self.H is not None:
            cam1 = camera.Camera(hstack((self.K, dot(self.K, array([[0], [0], [-1]])))))
            # Rt1=dot(linalg.inv(self.K),cam1.P)
            cam2 = camera.Camera(dot(self.H, cam1.P))

            A = dot(linalg.inv(self.K), cam2.P[:, :3])
            A = array([A[:, 0], A[:, 1], cross(A[:, 0], A[:, 1])]).T
            cam2.P[:, :3] = dot(self.K, A)
            Rt = dot(linalg.inv(self.K), cam2.P)

            return Rt
        else:
            return None

    ##############################################################match image
    def match_images(self, kp1, des1, kp2, des2):
        matcher = cv2.BFMatcher()
        match_des = matcher.knnMatch(des1, des2, k=2)
        matches = []
        matA, matB = [], []
        matC = []

        for m in match_des:
            if m[0].distance < 0.8 * m[1].distance:
                matA.append(kp1[m[0].queryIdx])
                matB.append(kp2[m[0].trainIdx])
                matC.append(des1[m[0].queryIdx])

        if len(matA) > 50:
            ptsA = float32([m.pt for m in matA])
            ptsB = float32([n.pt for n in matB])
            H1 = []
            H1, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 5.0)
            H1 = self.homo_check(H1)
            self.mat_kp = array([matB[i] for i in range(status.shape[0]) if status[i] == 1])
            self.mat_des = array([matC[i] for i in range(status.shape[0]) if status[i] == 1])

            return H1
        else:
            return None

    ##############################################################homography check
    def homo_check(self, H1):
        if self.H is None:
            return H1
        else:
            if cv2.norm(H1, self.H) > 1.0:
                return H1
            else:
                return self.H

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0);
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0);
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0);
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0);
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()
        glDeleteTextures(1)

    def keyboard(self, *args):
        if args[0] is GLUT_KEY_UP:
            glutDestroyWindow(self.window_id)
            self.webcam.finish()
            sys.exit()

    ##############################################################OpenGL창 초기

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(self.wid, self.hei)
        glutInitWindowPosition(400, 400)
        self.window_id = glutCreateWindow(b"OpenGL Glyphs")
        self._init_gl(self.wid, self.hei)
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        glutSpecialFunc(self.keyboard)
        glutMainLoop()
Beispiel #7
0
from webcam import Webcam
import cv2
from datetime import datetime

webcam = Webcam()
webcam.start()

while True:

    # get image from webcam
    image = webcam.get_current_frame()

    # display image
    cv2.imshow('grid', image)
    cv2.waitKey(3000)

    # save image to file, if pattern found
    ret, corners = cv2.findChessboardCorners(
        cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (7, 6), None)

    if ret == True:
        filename = datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.jpg'
        cv2.imwrite("pose/sample_images/" + filename, image)
class VidMag():
    def __init__(self):
        self.webcam = Webcam()
        self.buffer_size = 40
        self.fps = 0
        self.times = []
        self.t0 = time.time()
        self.data_buffer = []
        #self.vidmag_frames = []
        self.frame_out = np.zeros((10, 10, 3), np.uint8)
        self.webcam.start()
        print("init")

    #--------------COLOR MAGNIFICATIONN---------------------#
    def build_gaussian_pyramid(self, src, level=3):
        s = src.copy()
        pyramid = [s]
        for i in range(level):
            s = cv2.pyrDown(s)
            pyramid.append(s)
        return pyramid

    def gaussian_video(self, video_tensor, levels=3):
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_gaussian_pyramid(frame, level=levels)
            gaussian_frame = pyr[-1]
            if i == 0:
                vid_data = np.zeros(
                    (video_tensor.shape[0], gaussian_frame.shape[0],
                     gaussian_frame.shape[1], 3))
            vid_data[i] = gaussian_frame
        return vid_data

    def temporal_ideal_filter(self, tensor, low, high, fps, axis=0):
        fft = fftpack.fft(tensor, axis=axis)
        frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)
        bound_low = (np.abs(frequencies - low)).argmin()
        bound_high = (np.abs(frequencies - high)).argmin()
        fft[:bound_low] = 0
        fft[bound_high:-bound_high] = 0
        fft[-bound_low:] = 0
        iff = fftpack.ifft(fft, axis=axis)
        return np.abs(iff)

    def amplify_video(self, gaussian_vid, amplification=70):
        return gaussian_vid * amplification

    def reconstract_video(self, amp_video, origin_video, levels=3):
        final_video = np.zeros(origin_video.shape)
        for i in range(0, amp_video.shape[0]):
            img = amp_video[i]
            for x in range(levels):
                img = cv2.pyrUp(img)
            img = img + origin_video[i]
            final_video[i] = img
        return final_video

    def magnify_color(self,
                      data_buffer,
                      fps,
                      low=0.4,
                      high=2,
                      levels=3,
                      amplification=30):
        gau_video = self.gaussian_video(data_buffer, levels=levels)
        filtered_tensor = self.temporal_ideal_filter(gau_video, low, high, fps)
        amplified_video = self.amplify_video(filtered_tensor,
                                             amplification=amplification)
        final_video = self.reconstract_video(amplified_video,
                                             data_buffer,
                                             levels=levels)
        #print("c")
        return final_video

    #-------------------------------------------------------------#

    #-------------------MOTION MAGNIFICATIONN---------------------#
    #build laplacian pyramid for video
    def laplacian_video(self, video_tensor, levels=3):
        tensor_list = []
        for i in range(0, video_tensor.shape[0]):
            frame = video_tensor[i]
            pyr = self.build_laplacian_pyramid(frame, levels=levels)
            if i == 0:
                for k in range(levels):
                    tensor_list.append(
                        np.zeros((video_tensor.shape[0], pyr[k].shape[0],
                                  pyr[k].shape[1], 3)))
            for n in range(levels):
                tensor_list[n][i] = pyr[n]
        return tensor_list

    #Build Laplacian Pyramid
    def build_laplacian_pyramid(self, src, levels=3):
        gaussianPyramid = self.build_gaussian_pyramid(src, levels)
        pyramid = []
        for i in range(levels, 0, -1):
            GE = cv2.pyrUp(gaussianPyramid[i])
            L = cv2.subtract(gaussianPyramid[i - 1], GE)
            pyramid.append(L)
        return pyramid

    #reconstract video from laplacian pyramid
    def reconstract_from_tensorlist(self, filter_tensor_list, levels=3):
        final = np.zeros(filter_tensor_list[-1].shape)
        for i in range(filter_tensor_list[0].shape[0]):
            up = filter_tensor_list[0][i]
            for n in range(levels - 1):
                up = cv2.pyrUp(up) + filter_tensor_list[n + 1][i]
            final[i] = up
        return final

    #butterworth bandpass filter
    def butter_bandpass_filter(self, data, lowcut, highcut, fs, order=5):
        omega = 0.5 * fs
        low = lowcut / omega
        high = highcut / omega
        b, a = signal.butter(order, [low, high], btype='band')
        y = signal.lfilter(b, a, data, axis=0)
        return y

    def magnify_motion(self,
                       video_tensor,
                       fps,
                       low=0.4,
                       high=1.5,
                       levels=3,
                       amplification=30):
        lap_video_list = self.laplacian_video(video_tensor, levels=levels)
        filter_tensor_list = []
        for i in range(levels):
            filter_tensor = self.butter_bandpass_filter(
                lap_video_list[i], low, high, fps)
            filter_tensor *= amplification
            filter_tensor_list.append(filter_tensor)
        recon = self.reconstract_from_tensorlist(filter_tensor_list)
        final = video_tensor + recon
        return final

    #-------------------------------------------------------------#

    def buffer_to_tensor(self, buffer):
        tensor = np.zeros((len(buffer), 192, 256, 3), dtype="float")
        i = 0
        for i in range(len(buffer)):
            tensor[i] = buffer[i]
        return tensor

    def run_color(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(self.data_buffer)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_color(data_buffer=tensor, fps=self.fps)
            #print(final_vid[0].shape)
            #self.vidmag_frames.append(final_vid[-1])
            #print(self.fps)
            self.frame_out = final_vid[-1]

    def run_motion(self):
        self.times.append(time.time() - self.t0)
        L = len(self.data_buffer)
        #print(L)

        if L > self.buffer_size:
            self.data_buffer = self.data_buffer[-self.buffer_size:]
            self.times = self.times[-self.buffer_size:]
            #self.vidmag_frames = self.vidmag_frames[-self.buffer_size:]
            L = self.buffer_size

        if len(self.data_buffer) > self.buffer_size - 1:
            self.fps = float(L) / (self.times[-1] - self.times[0])
            tensor = self.buffer_to_tensor(self.data_buffer)
            final_vid = self.magnify_motion(video_tensor=tensor, fps=self.fps)
            #print(self.fps)
            #self.vidmag_frames.append(final_vid[-1])
            self.frame_out = final_vid[-1]

    def key_handler(self):
        """
        A plotting or camera frame window must have focus for keypresses to be
        detected.
        """
        self.pressed = waitKey(1) & 255  # wait for keypress for 10 ms
        if self.pressed == 27:  # exit program on 'esc'
            print("[INFO] Exiting")
            self.webcam.stop()
            sys.exit()

    def mainLoop(self):
        frame = self.webcam.get_frame()
        f1 = imutils.resize(frame, width=256)
        #crop_frame = frame[100:228,200:328]
        self.data_buffer.append(f1)
        self.run_color()
        #print(frame)

        #if len(self.vidmag_frames) > 0:
        #print(self.vidmag_frames[0])
        cv2.putText(frame, "FPS " + str(float("{:.2f}".format(self.fps))),
                    (20, 420), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)

        #frame[100:228,200:328] = cv2.convertScaleAbs(self.vidmag_frames[-1])
        cv2.imshow("Original", frame)
        #f2 = imutils.resize(cv2.convertScaleAbs(self.vidmag_frames[-1]), width = 640)
        f2 = imutils.resize(cv2.convertScaleAbs(self.frame_out), width=640)

        cv2.imshow("Color amplification", f2)

        self.key_handler()  #if not the GUI cant show anything
Beispiel #9
0
class OpenGLGlyphs:
    # constants
    INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0],
                               [-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()

        # textures
        self.texture_background = None
        self.texture_cube = None

    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # enable textures
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
        self.texture_cube = glGenTextures(1)

        # create cube texture
        image = Image.open("sample2.jpg")
        ix = image.size[0]
        iy = image.size[1]
        image = image.tostring("raw", "RGBX", 0, -1)

        glBindTexture(GL_TEXTURE_2D, self.texture_cube)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     image)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tostring("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -10.0)
        self._draw_background()
        glPopMatrix()

        # handle glyph
        image = self._handle_glyph(image)

        glutSwapBuffers()

    def _handle_glyph(self, image):

        # attempt to detect glyph
        rvecs = None
        tvecs = None

        try:
            rvecs, tvecs = detect_glyph(image)
        except Exception as ex:
            print(ex)

        if rvecs == None or tvecs == None:
            return

        # build view matrix
        rmtx = cv2.Rodrigues(rvecs)[0]

        view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0]],
                                [rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[1]],
                                [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[2]],
                                [0.0, 0.0, 0.0, 1.0]])

        view_matrix = view_matrix * self.INVERSE_MATRIX

        view_matrix = np.transpose(view_matrix)

        # load view matrix and draw cube
        glBindTexture(GL_TEXTURE_2D, self.texture_cube)
        glPushMatrix()
        glLoadMatrixd(view_matrix)
        self._draw_cube()
        glPopMatrix()

    def _draw_cube(self):
        # draw cube
        glBegin(GL_QUADS)

        glTexCoord2f(0.0, 0.0)
        glVertex3f(0.0, 0.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, 0.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, 0.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(0.0, 1.0, 0.0)

        glTexCoord2f(1.0, 0.0)
        glVertex3f(0.0, 0.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(0.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, 0.0, -1.0)

        glTexCoord2f(0.0, 1.0)
        glVertex3f(0.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(0.0, 1.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, 1.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)

        glTexCoord2f(1.0, 1.0)
        glVertex3f(0.0, 0.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 0.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, 0.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(0.0, 0.0, 0.0)

        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, 0.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, 0.0, 0.0)

        glTexCoord2f(0.0, 0.0)
        glVertex3f(0.0, 0.0, -1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(0.0, 0.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(0.0, 1.0, 0.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(0.0, 1.0, -1.0)

        glEnd()

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("OpenGL Glyphs")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #10
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
class OpenGLGlyphs:
  
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])
 
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
 
        # initialise glyphs
        self.glyphs = Glyphs()
 
        # initialise shapes
        self.cone = None
        self.sphere = None
 
        # initialise texture
        self.texture_background = None
 
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)
         
        # assign shapes
        self.cone = OBJ('cone.obj')
        self.sphere = OBJ('sphere.obj')
 
        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
 
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()
 
        # get image from webcam
        image = self.webcam.get_current_frame()
 
        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
  
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
         
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        self._draw_background()
        glPopMatrix()
 
        # handle glyphs
        image = self._handle_glyphs(image)
 
        glutSwapBuffers()
 
    def _handle_glyphs(self, image):
 
        # attempt to detect glyphs
        glyphs = []
 
        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex: 
            print(ex)
 
        if not glyphs: 
            return
 
        for glyph in glyphs:
             
            rvecs, tvecs, glyph_name = glyph
 
            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]
 
            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])
 
            view_matrix = view_matrix * self.INVERSE_MATRIX
 
            view_matrix = np.transpose(view_matrix)
 
            # load view matrix and draw shape
            glPushMatrix()
            glLoadMatrixd(view_matrix)
 
            if glyph_name == SHAPE_CONE:
                glCallList(self.cone.gl_list)
            elif glyph_name == SHAPE_SPHERE:
                glCallList(self.sphere.gl_list)
 
            glPopMatrix()
 
    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
 
    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("OpenGL Glyphs")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #12
0
class OpenGLGlyphs:
  
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])
 
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
 
        # initialise glyphs
        self.glyphs = Glyphs()
 
        # initialise shapes
        self.cone = None
        self.sphere = None
        # self.hp = HP()
        # initialise texture
        self.texture_background = None
 
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)
         
        # assign shapes
        self.cone = OBJ('texturedCube.obj')
        self.sphere = OBJ('./sortedOut/hair.obj')
 
        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
 
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()
 
        # get image from webcam
        image = self.webcam.get_current_frame()
 
        # convert image to OpenGL texture formatz   
        bg_image = cv2.flip(image, 0)
        bg_image = np.asarray(bg_image)
        bg_image = Image.fromarray(bg_image)    
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
  
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
         
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        self._draw_background()
        glPopMatrix()
 
        # handle glyphs
        image = self._handle_headpose(image)
        
        glutSwapBuffers()
 
    def _handle_headpose(self, image):
 
        # attempt to detect glyphs
        glyphs = []
        # glyphs = self.hp.main(image)
        
        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex: 
            print(ex)
        
        
        if not glyphs: 
            return
 
        for glyph in glyphs:


            rvecs, tvecs, glyph_name = glyph

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw shape
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if glyph_name == SHAPE_CONE:
                glCallList(self.cone.gl_list)
            elif glyph_name == SHAPE_SPHERE:
                glCallList(self.sphere.gl_list)


            glPopMatrix()
 
    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
 
    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow(b"OpenGL Glyphs")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #13
0
class OpenGLGlyphs:
    ##############################################################초기화
    def __init__(self):
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        self.find = fp()
        self.find.set_img('book.jpg')

        self.hei, self.wid = self.webcam.get_frame_shape()[:2]
        # initialise cube
        self.d_obj = None
        self.img = None
        # initialise texture
        self.texture_background = None
        self.K = None
        self.mark_kp = None
        self.mark_des = None
        self.set_keypoint()
        self.new_kp = None

        self.mat_kp = None
        self.mat_des = None
        self.H = None

        # self.Rt=None

    ##############################################################카메라 세팅
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()

        self.K = my_calibration((Height, Width))
        fx = self.K[0, 0]
        fy = self.K[1, 1]
        fovy = 2 * np.arctan(0.5 * Height / fy) * 180 / np.pi
        aspect = (float)(Width * fy) / (Height * fx)
        # define the near and far clipping planes
        near = 0.1
        far = 100.0
        # set perspective
        gluPerspective(fovy, aspect, near, far)

        glMatrixMode(GL_MODELVIEW)
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    ##############################################################marker의 kp, des저장
    def set_keypoint(self):

        self.find.start()
        self.mark_kp, self.mark_des = self.find.get_point()

    ##############################################################K값 구하기
    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        Rt = self._my_cal(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)

        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.wid, self.hei, 0, GL_RGBA,
                     GL_UNSIGNED_BYTE, bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()

        # glTranslatef(0.0,0.0,0.0)
        gluLookAt(0.0, 0.0, 12, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)

        self._draw_background()
        glPopMatrix()
        ################Rt를 구해서 매칭되는 이미지가 있는지 판단

        if Rt is not None:
            self._set_modelview_from_camera(Rt)
            glEnable(GL_LIGHTING)
            glEnable(GL_LIGHT0)
            glEnable(GL_DEPTH_TEST)
            glEnable(GL_NORMALIZE)
            glClear(GL_DEPTH_BUFFER_BIT)
            ObjLoader("jnu.obj").render_scene()

        glutSwapBuffers()

    ##############################################################OpenGL용 Rt변환
    def _set_modelview_from_camera(self, Rt):

        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()
        Rx = np.array([[0.2, 0, 0], [0, 0, 0.2], [0, 0.2, 0]])

        # set rotation to best approximation
        R = Rt[:, :3]

        # change sign of x-axis
        R[0, :] = -R[0, :]
        # set translation
        t = Rt[:, 3]
        t[0] = -t[0]

        # setup 4*4 model view matrix
        M = np.eye(4)
        M[:3, :3] = np.dot(R, Rx)
        M[:3, 3] = t
        M[3, :3] = t

        # transpose and flatten to get column order
        M = M.T

        m = M.flatten()
        # replace model view with the new matrix
        glLoadMatrixf(m)

    ##############################################################Rt반환
    def _my_cal(self, image):
        find_H = fp()
        find_H.set_cv_img(image)
        find_H.start()
        kp, des = find_H.get_point()

        self.H = self.match_images(self.mark_kp, self.mark_des, kp, des)
        if self.H is not None:
            cam1 = camera.Camera(
                np.hstack((self.K, np.dot(self.K, np.array([[0], [0],
                                                            [-1]])))))
            # Rt1=dot(linalg.inv(self.K),cam1.P)
            cam2 = camera.Camera(np.dot(self.H, cam1.P))

            A = np.dot(np.linalg.inv(self.K), cam2.P[:, :3])
            A = np.array([A[:, 0], A[:, 1], np.cross(A[:, 0], A[:, 1])]).T
            cam2.P[:, :3] = np.dot(self.K, A)
            Rt = np.dot(np.linalg.inv(self.K), cam2.P)

            return Rt
        else:
            return None

    ##############################################################match image
    def match_images(self, kp1, des1, kp2, des2):
        matcher = cv2.BFMatcher()
        match_des = matcher.knnMatch(des1, des2, k=2)
        matches = []
        matA, matB = [], []
        matC = []

        for m in match_des:
            if m[0].distance < 0.8 * m[1].distance:
                matA.append(kp1[m[0].queryIdx])
                matB.append(kp2[m[0].trainIdx])
                matC.append(des1[m[0].queryIdx])

        if len(matA) > 50:
            ptsA = np.float32([m.pt for m in matA])
            ptsB = np.float32([n.pt for n in matB])
            H1 = []
            H1, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 5.0)
            H1 = self.homo_check(H1)
            self.mat_kp = np.array(
                [matB[i] for i in range(status.shape[0]) if status[i] == 1])
            self.mat_des = np.array(
                [matC[i] for i in range(status.shape[0]) if status[i] == 1])

            return H1
        else:
            return None

    ##############################################################homography check
    def homo_check(self, H1):
        if self.H is None:
            return H1
        else:
            if cv2.norm(H1, self.H) > 1.0:
                return H1
            else:
                return self.H

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()
        glDeleteTextures(1)

    def keyboard(self, *args):
        if args[0] == GLUT_KEY_UP:
            glutDestroyWindow(self.window_id)
            self.webcam.finish()
            sys.exit()

    ##############################################################OpenGL창 초기

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(self.wid, self.hei)
        glutInitWindowPosition(200, 200)
        self.window_id = glutCreateWindow(b"OpenGL Glyphs")
        self._init_gl(self.wid, self.hei)
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        glutMainLoop()
Beispiel #14
0
class MUcamera:
    def __init__(self):
        self.w = Webcam()
        self.start = self.w.start()
        self.im = self.w.grab_image()
        self.w.register_callback(self.average_intensity, 1)
        self.avg_intensity = []
        self.images = []
        self.filtered = []
        self.f, self.ax = plt.subplots(1, 1)

#To find the average intensity

    def average_intensity(self, image):
        pix_val = list(image.getdata())
        pixel_intensity = []
        for x in pix_val:
            avg = sum(x) / len(x)
            pixel_intensity.append(avg)
        self.avg_pixel = np.average(pixel_intensity)
        self.avg_intensity.append(self.avg_pixel)
        return self.avg_intensity

#To find the average filtered intensity using width as 3

    def average_intensity_filtered(self):

        width = 3
        if len(self.avg_intensity) >= 5:
            for x in range(len(self.avg_intensity) - 2):
                self.filtered.append(
                    (self.avg_intensity[x] + self.avg_intensity[x + 1] +
                     self.avg_intensity[x + 2]) / width)
            return self.filtered
        else:
            self.filtered = self.avg_intensity
            return self.filtered

#To stop the execution of webcam.py

    def stop(self):
        self.w.stop()
        self.average_intensity_mean_plot()
        self.average_intensity_filtered_plot()
        self.daytime()
        self.most_common_color()

    #To plot the average intensity
    def average_intensity_mean_plot(self):

        self.ax.plot(self.avg_intensity, 'C1', label='Average')
        self.ax.legend()
        self.ax.set_xlabel('Image Number')
        self.ax.set_ylabel('Intensity')
        self.ax.set_title('Image Intensity')

#To plot the average filtered intensity

    def average_intensity_filtered_plot(self):
        self.average_intensity_filtered()
        self.ax.plot(self.filtered, 'C2', label='Filtered')
        self.ax.legend()

#To check if it is daytime or nighttime

    def daytime(self):
        self.average = np.mean(np.mean(self.im, axis=1))
        if self.average >= 95:
            return print("True")
        else:
            return print("False")


#To check the most common color

    def most_common_color(self):
        w, h = self.im.size
        pixels = self.im.getcolors(w * h)
        most_frequent_pixel = pixels[0]
        for count, color in pixels:
            if count > most_frequent_pixel[0]:
                most_frequent_pixel = (count, color)
        proportion = most_frequent_pixel[0] / len(pixels)
        return print(
            'The most common color is {}'.format(most_frequent_pixel[1]),
            'with a count of {}'.format(most_frequent_pixel[0]),
            'and the proportion of pixels is {}'.format(proportion))
Beispiel #15
0
import cv2
from glyphfunctions import *
from glyphdatabase import *
from webcam import Webcam

webcam = Webcam()
webcam.start()

QUADRILATERAL_POINTS = 4
BLACK_THRESHOLD = 100
WHITE_THRESHOLD = 155

while True:

    # Stage 1: Read an image from our webcam
    image = webcam.get_current_frame()

    # Stage 2: Detect edges in image
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edges = cv2.Canny(gray, 100, 200)

    # Stage 3: Find contours
    contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edges',edges)
    # contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
    #
    # for contour in contours:
    #
    #     # Stage 4: Shape check
    #     perimeter = cv2.arcLength(contour, True)
def main():
    try:

        camera_type = sys.argv[1]
        recording = False

        if len(sys.argv) == 3:
            if sys.argv[2] == "record":
                recording = True
            else:
                recording = False

        if camera_type == "webcam":

            collector = Webcam(video_width=640, video_height=480)
            collector.start()

        else:
            print("No such camera {camera_type}")
            collector = None
            exit(-1)

        if not os.path.isfile(MODEL_PATH):
            print("Downloading model, please wait...")
            download_file_from_google_drive(SOURCE, MODEL_PATH)
            print("Done downloading the model.")

        # get device
        device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        # initialise model
        model = get_model_instance_segmentation(NUMBER_OF_CLASSES)
        model.load_state_dict(
            torch.load('./models/frcnn_hands.pth', map_location=device))
        model.to(device)
        model.eval()

        if recording:
            movie = cv2.VideoWriter(
                f'./recordings/hand_frcnn_{camera_type}.avi',
                cv2.VideoWriter_fourcc(*'DIVX'), 8, (640, 480))

        with torch.no_grad():

            while collector.started:

                image, _ = collector.read()

                if image is not None:

                    orig = image.copy()

                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    image = transforms.ToTensor()(image).to(device)

                    out = model([image])

                    boxes = get_prediction(pred=out, threshold=.7)

                    try:

                        for box in boxes:
                            cv2.rectangle(img=orig,
                                          pt1=(box[0], box[1]),
                                          pt2=(box[2], box[3]),
                                          color=(0, 255, 255),
                                          thickness=2)

                        if recording:
                            movie.write(orig)

                        cv2.imshow("mask", orig)
                        k = cv2.waitKey(1)

                        if k == ord('q'):
                            collector.stop()

                    except Exception as e:
                        print(e)

    finally:
        print("Stopping stream.")
        collector.stop()
        if recording:
            movie.release()
        cv2.destroyAllWindows()
Beispiel #17
0
class OpenGLGlyphs:

    INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0],
                               [-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        self.webcam = Webcam()
        self.webcam.start()

        self.glyphs = Glyphs()
        self.cone = None
        self.sphere = None

        self.texture_background = None

    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        self.cone = OBJ('cone.obj')
        self.sphere = OBJ('sphere.obj')

        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        image = self.webcam.get_current_frame()

        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tostring("raw", "BGRX", 0, -1)

        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     bg_image)

        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -10.0)
        self._draw_background()
        glPopMatrix()

        image = self._handle_glyphs(image)

        glutSwapBuffers()

    def _handle_glyphs(self, image):

        glyphs = []

        try:
            glyphs = self.glyphs.detect(image)
        except Exception as ex:
            print(ex)

        if not glyphs:
            return

        for glyph in glyphs:

            rvecs, tvecs, glyph_name = glyph

            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array(
                [[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0]],
                 [rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[1]],
                 [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[2]],
                 [0.0, 0.0, 0.0, 1.0]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            glPushMatrix()
            glLoadMatrixd(view_matrix)
            print(view_matrix)

            if glyph_name == SHAPE_CONE:
                glCallList(self.cone.gl_list)
            elif glyph_name == SHAPE_SPHERE:
                glCallList(self.sphere.gl_list)

            glPopMatrix()

    def _draw_background(self):

        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()

    def main(self):
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("nichi Technologies")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #18
0
class ArkwoodAR:
    def __init__(self):

        # initialise webcams
        self.webcam_one = Webcam(0)
        self.webcam_two = Webcam(1)

        # initialise config
        self.config_provider = ConfigProvider()

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # start webcam threads
        self.webcam_one.start()
        self.webcam_two.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()
        window_half_width = glutGet(GLUT_WINDOW_WIDTH) / 2
        window_height = glutGet(GLUT_WINDOW_HEIGHT)

        # get image from webcams
        image_one = self.webcam_one.get_current_frame()
        image_two = self.webcam_two.get_current_frame()

        # detect feature in images
        detection = self.features.detect(image_one, image_two)

        # render first image
        glViewport(0, 0, window_half_width, window_height)

        if detection:
            image_one = self.features.render(image_one, detection[0])

        self._handle_background(image_one)

        # render second image
        glViewport(window_half_width, 0, window_half_width, window_height)

        if detection:
            image_two = self.features.render(image_two, detection[1])

        self._handle_background(image_two)

        # swap buffers
        glutSwapBuffers()

    # handle background
    def _handle_background(self, image):

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0, 3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0, 3.0, 0.0)
        glEnd()
        glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 360)
        glutInitWindowPosition(100, 100)
        glutCreateWindow('ArkwoodAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
Beispiel #19
0
class ArucoFootball:

    # constants
    INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0],
                               [-1.0, -1.0, -1.0, -1.0],
                               [-1.0, -1.0, -1.0, -1.0],
                               [1.0, 1.0, 1.0, 1.0]])

    def __init__(self, btAddr):
        # init needed values
        self.btAddr = btAddr
        self.player = None
        self.texture_background = None
        self.set_ids = []
        self.set_players = []
        self.players = []
        self.calc_values()
        # initialise webcam and start thread
        self.webcam = Webcam()
        self.webcam.start()
        # connect wiimote and create model
        wiiModel = WiiModel.WiiModel(self.btAddr)
        # init openGl, Qt and Wiimote
        self.initOpenGL()
        self.initGUI()
        # run wiimote-connection-loop
        thread = threading.Thread(target=wiiModel.wiimoteLoop, args=(self.mainWindow, self.cursor))
        thread.start()
        # run opengl and camera in a thread
        thread = threading.Thread(target=glutMainLoop, args=())
        thread.start()
        # run Qt
        self.app.exec_()

    def initGUI(self):
        self.app = QApplication(sys.argv)
        self.mainWindow = MainWindow(self.players)
        self.mainWindow.show()
        self.set_player_widget = self.mainWindow.listWidgetB
        self.unset_player_widget = self.mainWindow.listWidgetA

        self.unset_player_widget.itemChanged.connect(self.removeID)
        self.mainWindow.setFocus()
        self.mainWindow.setWindowTitle("Tactic-Window")
        self.mainWindow.resize(600, 800)
        self.cursor = QCursor()

    def initOpenGL(self):
        # setup OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(1280, 960)
        glutInitWindowPosition(800, 400)
        self.window_id = glutCreateWindow("Footballfield")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)

        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # assign shapes
        player_model = "models/football-player-new.obj"
        self.player1 = OBJ(player_model, 1)
        self.player2 = OBJ(player_model, 2)
        self.player3 = OBJ(player_model, 3)
        # self.player4 = OBJ(player_model, 4)

        # add Players to list
        self.players.append(Player("Dani", "1", "player_images/dani-img.jpg", self.player1))
        self.players.append(Player("Maxi", "2", "player_images/maxi-img.jpg", self.player2))
        self.players.append(Player("Jonas", "3", "player_images/jonas-img.jpg", self.player3))
        # self.players.append(Player("Michi", "4", "player_images/michi-img.jpg", self.player4))

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    # if player removed from list -> remove set id
    def removeID(self):
        for index in range(self.unset_player_widget.count()):
            item = self.unset_player_widget.item(index)
            if item:
                data = item.data(Qt.UserRole)
                if data:
                    for player in self.players:
                        if player.number == data[0] and player.marker_num is not None:
                            self.set_ids.remove(player.marker_num)
                            player.marker_num = None

    # add players to set_players for all players on "Field"
    def setChangedListItems(self):
        items = []
        for index in range(self.set_player_widget.count()):
            item = self.set_player_widget.item(index)
            if item:
                data = item.data(Qt.UserRole)
                for player in self.players:
                    if player.number == data[0]:
                        items.append(player)

        self.set_players = items

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -10.0)
        self._draw_background()
        glPopMatrix()

        # handle glyphs
        image = self._handle_aruco(image)

        glutSwapBuffers()

    def calc_values(self):
        path = 'calib_images/*.jpg'
        self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = Tracker.calculate_camera_values(path)

    def _handle_aruco(self, image):
        img = image
        corners, ids, _ = Tracker.preprocess(img)
        if np.all(ids is not None):
            # check for OpenCV output in different versions
            params = aruco.estimatePoseSingleMarkers(corners, 1, self.mtx, self.dist)
            if len(params) == 2:
                rvec, tvec = params
            else:
                rvec, tvec, _ = params
        else:
            return

        # set all players to list from "Field"
        self.setChangedListItems()
        for i in range(len(ids)):

            rvecs, tvecs, glyph_name = rvec[i], tvec[i], ids[i][0]
            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]
            view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0][0]],
                                    [rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[0][1]],
                                    [rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[0][2]],
                                    [0.0, 0.0, 0.0, 1.0]])

            view_matrix = view_matrix * self.INVERSE_MATRIX
            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw shape
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            # check if ID is set or not and set it
            if ids[i] not in self.set_ids:
                for player in self.set_players:
                    if player.marker_num is None:
                        player.marker_num = ids[i]
                        self.set_ids.append(ids[i])
                        break

            # if ID is set project model
            for player in self.set_players:
                if player.marker_num == ids[i]:
                    glCallList(player.model.gl_list)

            glPopMatrix()

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0,  3.0, 0.0)
        glEnd()
Beispiel #20
0
class MUCamera:
    def __init__(self):
        self.MU = Webcam()
        self.img_intensity = []
        self.img_time = []
        self.MU.start()
        self.filtered_average = []
        self.MU.register_callback(self._average_intensity, 1)
        self.img = []
        self.euclidean_dist = None

    def _average_intensity(self, image):
        '''
        A function that does the actual calculations
        :param image: the image is retrieved from the webcam.py file using the callback function
        :return: the average intensity of the captured images in a list, the time of capture, the image objects
            in a list
        '''
        self.img_intensity.append(np.mean(np.mean(image)))
        self.img_time.append(time.time())
        self.img.append(image)
        return np.mean(np.mean(image))

    def average_intensity(self):
        '''
        The function that should actually be called if you want to know the average intensity
        :return: The average intensity of the image that was most recently retreived from the webcam
        '''
        while len(self.img_intensity) < 1:
            pass
        return self.img_intensity[-1]

    def filtered_average_intensity(self):
        '''
        Function that takes in the average intensity list from _average_intensity() and passes the data through
        a butterworth filter.
        https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html
        :return: A list of the filtered average intensities
        '''
        b, a = signal.butter(5, 0.025)
        zi = signal.lfilter_zi(b, a)
        z, _ = signal.lfilter(b,
                              a,
                              self.img_intensity,
                              zi=zi * self.img_intensity[0])
        z2, _ = signal.lfilter(b, a, z, zi=zi * z[0])
        self.filtered_average = signal.filtfilt(b, a, self.img_intensity)

    def intensity_plot(self):
        '''
        Creates a plot of the raw average intensites and the filtered intensites
        Raw is a solid black line, filtered is a dashed red line
        :return: A plot
        '''
        t0 = self.img_time[0]
        t = [(x - t0) / 60 for x in self.img_time]
        y = self.img_intensity
        y_filtered = self.filtered_average

        plt.plot(t, y, 'k', t, y_filtered, 'r--')
        plt.xlabel('Minutes')
        plt.ylabel('Average Intensity')
        plt.legend(
            ('Average Image Intensity', 'Smoothed Average Image Intensity'),
            loc='best')
        plt.title(
            'Average Image Intensity from 5:45 PM to 8:30 AM PST, May 29th')
        plt.grid()
        plt.show()

    def daytime(self, threshold=75):
        '''
        Determines whether it is night or day from retreived webcam image
        :param threshold: the average intesntity value that is used to determine time of day
        if calculated average intensity is less than the threshold, it is night, otherwise it is day
        :return: True if daytime, false if nighttime
        '''
        while len(self.img_intensity) < 1:
            pass
        img = self.img[-1]
        intensity = np.mean(np.mean(img))
        if intensity < threshold:
            return False
        else:
            return True

    def common_color(self):
        '''
        Calculates the most common color in the retrieved webcam image
        Uses statistics library
        :return: color that occurs the most in a tuple (R,G,B)
        '''
        img = self.img[-1].getdata()
        m = statistics.mode(img)
        return m

    def stop(self):
        '''
        Function that terminates the callback function that is started in __init__
        Will call the filtering function and plotting function
        :return:
        '''
        self.MU.stop()
        self.filtered_average_intensity()
        self.intensity_plot()

    def motion(self):
        '''
        Determines whether or not motion took place between two images
        Waits until 25 images have been retreived to make sure the two images that are being compared
        are actually different. Webcam updates about once a minute so 25 images should be long enough
        :return: True if motion occurred, false if motion did not occur
        '''
        while len(self.img) < 25:
            pass

        img1 = self.img[-25]
        img2 = self.img[-1]
        img3 = ImageChops.subtract(img1, img2)
        self.euclidean_dist = mth.sqrt(np.sum(np.array(img3.getdata())**2))

        if self.euclidean_dist > 8000:
            return True
        else:
            return False

    def highlight_motion(self):
        '''
        Creates an image that highlights the motion between 2 webcam images in red
        aits until 25 images have been retreived to make sure the two images that are being compared
        are actually different. Webcam updates about once a minute so 25 images should be long enough
        :return: The second picture, but with the different pixels highlighted in red
        '''
        while len(self.img) < 25:
            pass
        img1 = self.img[-25]
        img2 = self.img[-1]

        img3 = ImageChops.subtract(img1, img2)
        img2_data = np.asarray(img2)
        img3_data = np.asarray(img3)
        img2_data.setflags(write=1)
        for i in range(len(img3_data[1, :])):
            for j in range(len(img3_data[:, i])):
                avg = np.mean(img3_data[j, i])
                if avg > 35 and j > 250:
                    img2_data[j, i] = [255, 0, 0]

        img_new = Image.fromarray(img2_data, 'RGB')
        img_new.show()

    def event(self):
        '''
        Determines if there is an event going on in the quad. Based on the color and euclidean distance of
        two images in the quad
        :return: True if there is an event, false if otherwise
        Also displays the test cases.
        :return The image with the grey square is the baseline from which everything is compared to. The grey was the most
        common color in a cropped version of the size of the square
        :return The image with the white square is the case where there is an event
        '''
        while len(self.img_intensity) < 1:
            pass

        pxl_coor = (250, 365, 500, 470)
        img_grey_large = np.asarray(self.img[-1])
        img_event = np.asarray(self.img[-1])
        img = self.img[-1].crop(pxl_coor)
        baseline = np.asarray(img)

        baseline.setflags(write=1)
        img_grey_large.setflags(write=1)
        img_event.setflags(write=1)

        for i in range(len(baseline[1, :])):
            for j in range(len(baseline[:, i])):
                baseline[j, i] = [170, 170, 168]

        for i in range(249, 500):
            for j in range(365, 470):
                img_grey_large[j, i] = [170, 170, 168]
                img_event[j, i] = [255, 255, 255]

        img_grey = Image.fromarray(baseline, 'RGB')
        img_grey_large = Image.fromarray(img_grey_large, 'RGB')
        img_event = Image.fromarray(img_event, 'RGB')

        img_compare = ImageChops.subtract(img, img_grey)
        euclidean_dist = mth.sqrt(np.sum(np.array(img_compare.getdata())**2))

        img_grey_large.show()
        img_event.show()

        if euclidean_dist > 8000:
            return True
        else:
            return False
Beispiel #21
0
class MUcamera:
    def __init__(self):
        self.w = Webcam()

        self.start = self.w.start()
        self.im = self.w.grab_image()
        self.w.register_callback(self.average_intensity, 1)
        #        self.grabimage = self.w.grab_image()
        self.avg_intensity = []
        self.images = []
        self.filt_list = []
        self.f, self.ax = plt.subplots(1, 1)
        self.day = []

    def average_intensity(self):

        pix_val = list(self.im.getdata())
        pixel_intensity = []
        for x in pix_val:
            avg = sum(x) / len(x)
            pixel_intensity.append(avg)
        self.avg_pixel = np.average(pixel_intensity)
        self.avg_intensity.append(self.avg_pixel)
        return self.avg_intensity
#        avg  = np.mean(np.mean(image,axis=1))
#        self.avg_list.append(avg)

    def average_intensity_filtered(self):
        width = 3
        #        i=0

        if len(self.avg_intensity) >= 5:
            for x in range(len(self.avg_intensity) - 2):
                self.filt_list.append(
                    (self.avg_intensity[x] + self.avg_intensity[x + 1] +
                     self.avg_intensity[x + 2]) / width)
            return self.filt_list
        else:
            return self.filt_list
#        while i+width <= len(self.filt_list):
#            y = self.filt_list[i:i+width]
#            total_sum=sum(y)/width
#            self.filt_list.append(total_sum)
#            i+=1

#    def stop(self):
#        self.w.stop()
#        self.average_intensity_mean_plot()
#        self.average_intensity_filtered_plot()
#
#    def average_intensity_mean_plot(self):
#        self.ax.plot(self.avg_intensity, 'C1')
#        self.ax.set_xlabel('Image Number')
#        self.ax.set_ylabel('Intensity')
#        self.ax.set_title('Image Intensity')
##
#    def average_intensity_filtered_plot(self):
#        self.average_intensity_filtered()
#        self.ax.plot(self.filt_list, 'C2')
#

    def daytime(self):
        self.average = np.mean(np.mean(self.im, axis=1))
        if self.average >= 95:
            #            self.i.append(self.i)
            return print("True")
        else:
            return print("False")
##

    def most_common_color(self):
        w, h = self.im.size
        pixels = self.im.getcolors(w * h)
        print(len(pixels))
        most_frequent_pixel = pixels[0]
        for count, color in pixels:
            if count > most_frequent_pixel[0]:
                most_frequent_pixel = (count, color)

    #        compare("Most Common", image, most_frequent_pixel[1])
    #    print(self.most_frequent_pixel)
        return print(most_frequent_pixel[0] / len(pixels), most_frequent_pixel)

    def stop(self):
        self.w.stop()
        self.daytime()
        self.most_common_color()
        self.average_intensity_mean_plot()
        self.average_intensity_filtered_plot()

    def average_intensity_mean_plot(self):
        self.average_intensity()
        self.ax.plot(self.avg_intensity, 'C1')
        self.ax.set_xlabel('Image Number')
        self.ax.set_ylabel('Intensity')
        self.ax.set_title('Image Intensity')
#

    def average_intensity_filtered_plot(self):
        self.average_intensity_filtered()
        self.ax.plot(self.filt_list, 'C2')
Beispiel #22
0
class HandTracker:
    def __init__(self):
        self.webcam = Webcam()
        self.webcam.start()

        self.detection = Detection()

        self.x_axis = 0.0
        self.z_axis = 0.0
        self.show_cube = False
        self.texture_background = None
        self.texture_cube = None

    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # enable texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
        self.texture_cube = glGenTextures(1)

        # create cube texture
        image = Image.open("/home/annus/Pictures/image.jpeg")
        ix = image.size[0]
        iy = image.size[1]
        image = image.tobytes("raw", "RGBX", 0, -1)

        glBindTexture(GL_TEXTURE_2D, self.texture_cube)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     image)

    def _draw_scene(self):
        # handle any hand gesture
        self._handle_gesture()

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0, 0.0, -11.2)
        self._draw_background()
        glPopMatrix()

        # draw cube if enabled
        if self.show_cube:
            glColor4f(1.0, 1.0, 1.0, 1.0)
            glBlendFunc(GL_SRC_ALPHA, GL_ONE)
            glEnable(GL_BLEND)
            glDisable(GL_DEPTH_TEST)

            glBindTexture(GL_TEXTURE_2D, self.texture_cube)
            glPushMatrix()
            glTranslatef(0.0, 0.0, -7.0)
            glRotatef(self.x_axis, 1.0, 0.0, 0.0)
            glRotatef(0.0, 0.0, 1.0, 0.0)
            glRotatef(self.z_axis, 0.0, 0.0, 1.0)
            self._draw_cube()
            glPopMatrix()

            glDisable(GL_BLEND)
            glEnable(GL_DEPTH_TEST)

            # update rotation values
            self.x_axis = self.x_axis - 10
            self.z_axis = self.z_axis - 10

        glutSwapBuffers()

    def _handle_gesture(self):
        # get image from webcam
        image = self.webcam.get_current_frame()

        # detect hand gesture in image
        is_okay = self.detection.is_item_detected_in_image(
            'haarcascade_okaygesture.xml', image.copy())
        is_vicky = self.detection.is_item_detected_in_image(
            'haarcascade_vickygesture.xml', image.copy())

        if is_okay:
            # okay gesture shows cube
            self.show_cube = True
        elif is_vicky:
            # vicky gesture hides cube
            self.show_cube = False

        # convert image to OpenGL texture format
        image = cv2.flip(image, 0)
        gl_image = Image.fromarray(image)
        ix = gl_image.size[0]
        iy = gl_image.size[1]
        gl_image = gl_image.tostring("raw", "BGRX", 0, -1)

        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     gl_image)

    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(4.0, 3.0, 4.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-4.0, 3.0, 4.0)
        glEnd()

    def _draw_cube(self):
        # draw cube
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glEnd()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        glutCreateWindow("OpenGL Hand Tracker")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #23
0
class OpenGLGlyphs:
	# constants
	INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
								[-1.0,-1.0,-1.0,-1.0],
								[-1.0,-1.0,-1.0,-1.0],
								[ 1.0, 1.0, 1.0, 1.0]])
	

	
	def __init__(self):
		# initialise webcam and start thread
		self.webcam = Webcam()
		self.webcam.start()
		
		#initialise
		self.hBox = causalBox(winSize = 10)
		self.vBox = causalBox(winSize = 10)
		
		plyerName = ["John","Doe","Tommy","Emmanuel"]
		self.game = GameCtrler(plyerName)
		
		# initialise shapes
		self.dragon = None
		self.fly = None
		self.ele = None
		self.boat = None
		self.horse = None
		self.house = None
		self.juk = None

		# textures
		self.texture_background = None

	def _init_gl(self, Width, Height):
		# initialPosition = (0,0,0) 
		glClearColor(0.0, 0.0, 0.0, 0.0)
		glClearDepth(1.0)
		glDepthFunc(GL_LESS)
		glEnable(GL_DEPTH_TEST)
		glShadeModel(GL_SMOOTH)
		
		# Projection matrix
		glMatrixMode(GL_PROJECTION)
		glLoadIdentity()
		fovy = 2*np.arctan(Height/1375.0)*180.0/np.pi
		gluPerspective(fovy, float(Width)/float(Height), 0.1, 1375.1)
		glViewport(0,0,Width,Height)
		glMatrixMode(GL_MODELVIEW)

		# assign shapes
		print "loading model 1/7"
		self.dragon = OBJ('Drgn9-6.obj')
		print "loading model 2/7"
		self.fly = OBJ('plane.obj')
		print "loading model 3/7"
		self.ele = OBJ('minion.obj')
		print "loading model 4/7"
		self.boat = OBJ('VikingShip.mtl.obj')
		print "loading model 5/7"
		self.horse = OBJ('Wooden_Toy_Truck.obj')
		print "loading model 6/7"
		self.house = OBJ('house_001.obj')
		print "loading model 7/7"
		self.juk = OBJ('Barrel_variation.obj')
		print "loading model done"

		# enable textures
		glEnable(GL_TEXTURE_2D)
		self.texture_background = glGenTextures(1)

	def _draw_scene(self):
		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
		glLoadIdentity()
		
		# get image from webcam
		image = self.webcam.get_current_frame()

		self._draw_background(image)
		
		# handle glyphs
		image = self._handle_glyphs(image)
		
		glutSwapBuffers()
		
	def _handle_glyphs(self, image):
		
		
		# attempt to detect glyphs
		glyphs = []

		try:
			glyphs = detect_glyph(image, self.hBox, self.vBox, self.game)
		except Exception as ex: 
			print(ex)

		if not glyphs: 
			return
			
		for glyph in glyphs:
		
			rvecs, tvecs, glyph_name = glyph
			# build view matrix
			rmtx = cv2.Rodrigues(rvecs)[0]
			view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
									[rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
									[rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
									[0.0       ,0.0       ,0.0       ,1.0    ]])
			view_matrix =  view_matrix * self.INVERSE_MATRIX 
			view_matrix = np.transpose(view_matrix)
			
			# load view matrix and draw cube
			glPushMatrix()
			glLoadIdentity()
			glLoadMatrixd(view_matrix)
			if glyph_name == "B juk":
				glCallList(self.juk.gl_list)
			elif glyph_name == "R juk":
				glCallList(self.juk.gl_list)
			elif glyph_name == "B Phao":
				glCallList(self.house.gl_list)
			elif glyph_name == "R Phao":
				glCallList(self.house.gl_list)
			elif glyph_name == "B Horse":
				glCallList(self.horse.gl_list)
			elif glyph_name == "R Horse":
				glCallList(self.horse.gl_list)
			elif glyph_name == "B Boat":
				glCallList(self.boat.gl_list)
			elif glyph_name == "R Boat":
				glCallList(self.boat.gl_list)
			elif glyph_name == "B Ele":
				glCallList(self.ele.gl_list)
			elif glyph_name == "R Ele":
				glCallList(self.ele.gl_list)
			elif glyph_name == "B Fly":
				glCallList(self.fly.gl_list)
			elif glyph_name	== "R Fly":
				glCallList(self.fly.gl_list)
			elif glyph_name == "B T":
				glCallList(self.dragon.gl_list)
			elif glyph_name == "R T":
				glCallList(self.dragon.gl_list)				
			else:
				glCallList(self.dragon.gl_list)				
			glPopMatrix()

	def _draw_background(self, image):
	
		# convert image to OpenGL texture format
		bg_image = cv2.flip(image, 0)
		bg_image = Image.fromarray(bg_image)     
		ix = bg_image.size[0]
		iy = bg_image.size[1]
		bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
		
		# create background texture
		glBindTexture(GL_TEXTURE_2D, self.texture_background)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
		glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
		glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)

		# draw background
		glBindTexture(GL_TEXTURE_2D, self.texture_background)
		glPushMatrix()
		glLoadIdentity()
		glTranslatef(-100,100,-1375)
		glBegin(GL_QUADS)
		i, j = 1520/2, 820/2
		glTexCoord2f(0.0, 1.0); glVertex3f(-i, -j, 0.0)
		glTexCoord2f(1.0, 1.0); glVertex3f( i, -j, 0.0)
		glTexCoord2f(1.0, 0.0); glVertex3f( i,  j, 0.0)
		glTexCoord2f(0.0, 0.0); glVertex3f(-i,  j, 0.0)
		glEnd()
		glPopMatrix()

	def main(self):
	
		width = 1520
		heigh = 820
		# setup and run OpenGL
		glutInit()
		glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
		glutInitWindowSize(760, 410)
		glutInitWindowPosition(100, 100)
		self.window_id = glutCreateWindow("OpenGL Glyphs")
		glutDisplayFunc(self._draw_scene)
		glutIdleFunc(self._draw_scene)
		self._init_gl(width, heigh)
		glutMainLoop()
Beispiel #24
0
class HandTracker:
    def __init__(self):
        self.webcam = Webcam()
        self.webcam.start()

        self.detection = Detection()

        self.x_axis = 0.0
        self.y_axis = 0.0
        self.z_axis = 0.0
        self.z_pos = -7.0

    def _handle_gesture(self):
        # get image from webcam
        image = self.webcam.get_current_frame()

        # detect hand gesture in image
        is_okay = self.detection.is_item_detected_in_image(
            'haarcascade_okaygesture.xml', image.copy())
        is_vicky = self.detection.is_item_detected_in_image(
            'haarcascade_vickygesture.xml', image.copy())

        if is_okay:
            # okay gesture moves cube towards us
            self.z_pos = self.z_pos + 1.0
        elif is_vicky:
            # vicky gesture moves cube away from us
            self.z_pos = self.z_pos - 1.0

    def _draw_cube(self):
        # draw cube
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(1.0, 1.0, -1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(1.0, -1.0, 1.0)
        glTexCoord2f(0.0, 0.0)
        glVertex3f(-1.0, -1.0, -1.0)
        glTexCoord2f(1.0, 0.0)
        glVertex3f(-1.0, -1.0, 1.0)
        glTexCoord2f(1.0, 1.0)
        glVertex3f(-1.0, 1.0, 1.0)
        glTexCoord2f(0.0, 1.0)
        glVertex3f(-1.0, 1.0, -1.0)
        glEnd()

    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # initialize lighting
        glLightfv(GL_LIGHT0, GL_AMBIENT, (0.5, 0.5, 0.5, 1.0))
        glLightfv(GL_LIGHT0, GL_DIFFUSE, (1.0, 0.8, 0.0, 1.0))
        glEnable(GL_LIGHT0)
        glEnable(GL_LIGHTING)

        # initialize blending
        glColor4f(0.2, 0.2, 0.2, 0.5)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE)
        glEnable(GL_BLEND)

        # initialize texture
        image = open("devil.jpg")
        ix = image.size[0]
        iy = image.size[1]
        image = image.tostring("raw", "RGBX", 0, -1)

        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE,
                     image)
        glEnable(GL_TEXTURE_2D)

    def _draw_scene(self):
        # handle any hand gesture
        self._handle_gesture()

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # position and rotate cube
        glTranslatef(0.0, 0.0, self.z_pos)
        glRotatef(self.x_axis, 1.0, 0.0, 0.0)
        glRotatef(self.y_axis, 0.0, 1.0, 0.0)
        glRotatef(self.z_axis, 0.0, 0.0, 1.0)

        # position lighting
        glLightfv(GL_LIGHT0, GL_POSITION, (0.0, 0.0, 2.0, 1.0))

        # draw cube
        self._draw_cube()

        # update rotation values
        self.x_axis = self.x_axis - 10
        self.z_axis = self.z_axis - 10

        glutSwapBuffers()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        glutCreateWindow("OpenGL Hand Tracker")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()
Beispiel #25
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        self.window_id = glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
Beispiel #26
0
class AR_Project:
 
    def __init__(self):
        # sigint interrupt initialize
        signal.signal(signal.SIGINT, self.signal_handler)    

        # initialize webcam
        self.webcam = Webcam()
        self.webcam.start()
          
        self.x_axis = 0.0
        self.y_axis = 0.0
        self.z_axis = 0.0
        self.z_pos = -7.0
        
        self.win = 0
        self.texture_background = None
        self.texture_teapot = None
     
    def signal_handler(self, signal, frame):
        print('\nYou pressed Ctrl+C!')
        self.webcam.close()
        sys.exit()

    def _get_background(self):
        # get image from webcam 
        image = self.webcam.get_current_frame()

        # convert image to OpenGL texture format
        image = cv2.flip(image, 0)
        image = cv2.flip(image, 1)
        gl_image = Image.fromarray(image)     
        ix = gl_image.size[0]
        iy = gl_image.size[1]
        gl_image = gl_image.tobytes("raw", "BGRX", 0, -1)
      
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, gl_image)
 
    def _draw_background(self):
        # draw background
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 4.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 4.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 4.0)
        glEnd()
 
    def _init_gl(self, Width, Height):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(45.0, float(Width)/float(Height)-.2, 0.1, 500.0)
        glMatrixMode(GL_MODELVIEW)
 
        # enable texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)
 
        # initialize lighting 
        #glLightfv(GL_LIGHT0, GL_AMBIENT, (0.5, 0.5, 0.5, 1.0))
        #glLightfv(GL_LIGHT0, GL_DIFFUSE, (1.0, 0.8, 0.0, 1.0)) 
        glEnable(GL_LIGHT0)
        glEnable(GL_LIGHTING)
 
        # initialize blending
        glColor4f(0.2, 0.2, 0.2, 0.5)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE)
        glEnable(GL_BLEND)
 
        #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        #glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
        #glEnable(GL_TEXTURE_2D)
 
    def _draw_scene(self):
        # handle any hand gesture
        self._get_background()
 
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        glLoadIdentity();
            
        # draw background
        #glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-11.2)
        self._draw_background()
        glPopMatrix()

        # position teapot
        glTranslatef(0.0,0.0,self.z_pos);
        glRotatef(self.x_axis,1.0,0.0,0.0)
        glRotatef(self.y_axis,0.0,1.0,0.0)
        glRotatef(self.z_axis,0.0,0.0,1.0)
 
        # draw teapot
        glutSolidTeapot(1.2)
 
        # rotate teapot 
        self.x_axis = self.x_axis - 2
        self.z_axis = self.z_axis - 2
 
        glutSwapBuffers()
 
    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(800, 400)
        self.win = glutCreateWindow("COS 429 AR Project")
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl(640, 480)
        glutMainLoop()