def detect_objects(self, s: Settings) -> None:
        """
        Uses the in Settings specified method to detect the object
        :param s: Settings object used to store thresholds and camera used data
        :type s: Settings
        :return: None
        """
        m = Markers(s)
        self.frame = self._get_video(s, self._cap)
        marker_frame, warped_frame = m.remove_warp(s, self.frame)
        self.grayscale = self._convert_to_grayscale(warped_frame)
        width, height = self.frame.shape[0:2]

        if s.edge_detection_type == 1:  # Only threshold detection
            self.thrash = self._edge_threshold(s, self.grayscale)
            self.video_output = self._draw_contour(
                s, self.frame, self._edge_contour(self.thrash))
        elif s.edge_detection_type == 2:  # Threshold + Canny shape detection
            gausian_blur = cv.GaussianBlur(self.grayscale, (5, 5), sigmaX=0)
            self.thrash = self._edge_threshold(s, gausian_blur)
            self.canny = self._edge_canny(s, self.thrash)
            self.dilate = self._dilate_edges(self.canny,
                                             iterations=s.dilate_iterations)
            self.erode = self._erode_edges(self.dilate,
                                           iterations=s.erode_iterations)
            self.blur = self._blur_edges(s, self.erode)
            contours = self._edge_contour(self.blur)
            self.video_output = self._draw_contour(s, warped_frame, contours)
            cv.imwrite('./output.bmp', self.video_output)
        elif s.edge_detection_type == 3:  # Blob + Canny shape detection
            self.blob = self._edge_blob(self.grayscale)
            self.canny = self._edge_canny(s, self.blob)
        else:  # Simple Canny method
            self._edge_canny(s, self.grayscale)
 def __init__(self):
     # noinspection PyArgumentList
     self.cap = cv2.VideoCapture(0)
     self.cap.set(15, -7)
     self.frames_to_display = {}
     self.display = Display()
     self.markers = Markers()
     self.markers_cache = None
Example #3
0
    def __init__(self):
        # initialise markers
        self.markers = Markers()

        # initialise textures
        self.marker_one_textures, self.marker_two_textures = self._load_textures()

        # initialise video
        self.video_capture = cv2.VideoCapture()
Example #4
0
    def align(self, options):
        """Create MSA from marker genes."""

        check_dir_exists(options.identify_dir)
        make_sure_path_exists(options.out_dir)

        if not hasattr(options, 'outgroup_taxon'):
            options.outgroup_taxon = None

        markers = Markers(options.cpus)
        markers.align(options.identify_dir, options.taxa_filter,
                      options.min_perc_aa, options.custom_msa_filters,
                      options.consensus, options.min_perc_taxa,
                      options.out_dir, options.prefix, options.outgroup_taxon)

        self.logger.info('Done.')
Example #5
0
    def identify(self, options):
        """Identify marker genes in genomes."""

        if options.genome_dir:
            check_dir_exists(options.genome_dir)

        if options.batchfile:
            check_file_exists(options.batchfile)

        make_sure_path_exists(options.out_dir)

        genomes = self._genomes_to_process(options.genome_dir,
                                           options.batchfile,
                                           options.extension)

        markers = Markers(options.cpus)
        markers.identify(genomes, options.out_dir, options.prefix)

        self.logger.info('Done.')
Example #6
0
def loop():

    frame = capture.get_frame()
    if frame is None:
        return

    (cap_height, cap_width, cap_channels) = frame.shape
    # print(width, height, channels)

    global markerDict
    (corners, ids, rejects) = aruco.detectMarkers(frame, markerDict)
    # print(corners)
    frame = aruco.drawDetectedMarkers(frame, corners, ids)

    detected_markers = np.array([]).reshape(0, 3)
    for i in range(len(corners)):
        corner = corners[i][0]
        p1 = corner[0]
        p2 = corner[1]
        p3 = corner[2]
        p4 = corner[3]
        x = p1[0] - ((p1[0] - p3[0]) / 2)
        y = p1[1] - ((p1[1] - p3[1]) / 2)

        x = int(x)
        y = int(y)

        # print(x, y, cap_width, cap_height)
        detected_markers = np.vstack(
            (detected_markers,
             np.array([ids[i][0], x / cap_width, y / cap_height])))
        cv2.circle(frame, (x, y), 10, (255, 255, 255), 2)

    global sock
    if not sock.is_available():
        sock.connect()
    else:
        send_data = Markers()
        for row in detected_markers:
            marker = Marker(int(row[0]), row[1], row[2])
            send_data.append(marker)
        sock.send(str(send_data))
Example #7
0
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
Example #8
0
    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None
Example #9
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        self.window_id = glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()
Example #10
0
class MediaCube:

    # constants
    FILE_PATH = 'scripts/features/mediacube/'

    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise markers
        self.markers = Markers()

        # initialise textures
        self.marker_one_textures, self.marker_two_textures = self._load_textures()

        # initialise video
        self.video_capture = cv2.VideoCapture()

    def detect(self, image):

        markers = None

        # detect markers
        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        if not markers:
            return None

        return markers

    def render(self, markers):

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == MARKER_ONE:
                self.marker_one_textures[TEXTURE_FRONT] = cv2.flip(self._get_video_frame(), 0)
                self._draw_cube(marker_rotation, self.marker_one_textures)
            elif marker_name == MARKER_TWO:
                self._draw_cube(marker_rotation, self.marker_two_textures)

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def _load_textures(self):
        marker_one_textures = {}
        marker_two_textures = {}

        # load images
        image_green = cv2.imread('{}green.png'.format(self.FILE_PATH))
        image_yellow = cv2.imread('{}yellow.png'.format(self.FILE_PATH))
        image_blue = cv2.imread('{}blue.png'.format(self.FILE_PATH))
        image_pink = cv2.imread('{}pink.png'.format(self.FILE_PATH))
        image_saltwash = np.rot90(cv2.imread('{}saltwash.jpg'.format(self.FILE_PATH)), 2)
        image_halo = np.rot90(cv2.imread('{}halo.jpg'.format(self.FILE_PATH)), 2)

        # load textures for marker one
        marker_one_textures[TEXTURE_FRONT] = None
        marker_one_textures[TEXTURE_RIGHT] = image_green
        marker_one_textures[TEXTURE_BACK] = image_yellow
        marker_one_textures[TEXTURE_LEFT] = image_blue
        marker_one_textures[TEXTURE_TOP] = image_pink

        # load textures for marker two
        marker_two_textures[TEXTURE_FRONT] = image_saltwash
        marker_two_textures[TEXTURE_RIGHT] = image_green
        marker_two_textures[TEXTURE_BACK] = image_halo
        marker_two_textures[TEXTURE_LEFT] = image_blue
        marker_two_textures[TEXTURE_TOP] = image_pink

        return (marker_one_textures, marker_two_textures)

    def _get_video_frame(self):

        # get latest frame from video
        success, frame = self.video_capture.read()
        if success: return frame

        if not self.video_capture.isOpened():
            self.video_capture.open('{}channel_one.mp4'.format(self.FILE_PATH))
        else:
            self.video_capture.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0)      

        return self.video_capture.read()[1]

    def _draw_cube(self, marker_rotation, marker_textures):

        # draw cube
        if marker_rotation == 0:
            cube_degrees_0(marker_textures)
        elif marker_rotation == 1:
            cube_degrees_90(marker_textures)
        elif marker_rotation == 2:
            cube_degrees_180(marker_textures)
        elif marker_rotation == 3:
            cube_degrees_270(marker_textures)
class CopterMarkerVision:
    termination_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                            30, 0.001)
    pattern_size = (3, 3)
    pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
    pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
    axis = np.float32([[0, 0, 0], [0, 3, 0], [3, 3, 0], [3, 0, 0], [0, 0, -3],
                       [0, 3, -3], [3, 3, -3], [3, 0, -3]])

    def __init__(self):
        # noinspection PyArgumentList
        self.cap = cv2.VideoCapture(0)
        self.cap.set(15, -7)
        self.frames_to_display = {}
        self.display = Display()
        self.markers = Markers()
        self.markers_cache = None

    def process_frame(self, frame):
        if frame is None:
            print 'Null frame'
            return
        if len(frame.shape) < 2:
            raise ValueError('Invalid frame, shape < 2')
        elif len(frame.shape) > 2:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        else:
            gray = frame
        # ret,thresh1 = cv2.threshold(frame,200,255,cv2.THRESH_BINARY)

        eroded = cv2.erode(gray, np.ones((5, 5), np.uint8))

        markers = self.markers.detect(eroded)
        self.frames_to_display['gray'] = eroded

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache:
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        telemetry = {}

        for marker in markers:
            src, marker_rotation, marker_name = marker

            skew = (src[0][1] - src[3][1]) / (src[1][1] - src[2][1]) - 1.0

            # Shoelace algorithm to find size (polygon area).
            relative_marker_camera_distance = np.sqrt(
                Markers.polygon_area(src))

            # Frame Center
            frame_height, frame_width = tuple(
                np.divide(gray.shape, 2).astype(int))
            frame_center = (frame_width, frame_height)

            # Target
            target_midpoint = tuple(np.mean(src, axis=0).astype(int))
            target_camera_closeness = 60.0  # decrease for further away

            # Draw box around pattern
            cv2.drawMarker(frame, target_midpoint, (0, 0, 255),
                           cv2.MARKER_CROSS)
            for i in range(len(src) - 1):
                cv2.line(frame, tuple(src[i]), tuple(src[i + 1]), (255, 0, 0),
                         2)
            cv2.line(frame, tuple(src[3]), tuple(src[0]), (255, 0, 0), 2)

            # Draw offset arrows
            cv2.arrowedLine(frame, frame_center, target_midpoint,
                            (0, 255, 255))
            cv2.arrowedLine(
                frame, frame_center,
                (int(frame_center[0] + skew * 100.0), frame_center[1]),
                (0, 255, 255))

            # Control copter
            telemetry = {
                'vertical_dist':
                float(target_midpoint[1]) / float(frame_center[1]) - 1.0,
                'side_dist':
                float(target_midpoint[0]) / float(frame_center[0]) - 1.0,
                'forward_dist':
                float(target_camera_closeness) /
                float(relative_marker_camera_distance) - 1.0,
                'yaw_dist':
                skew
            }
            # print '{:.02f} {:.02f} {:.02f} {:.02f}'.format(vertical_dist, side_dist, forward_dist, yaw_dist)
            break

        self.frames_to_display['color'] = frame

        return telemetry

    def display_frames(self, frame_dict):
        for key, value in frame_dict.iteritems():
            if value is not None:
                cv2.imshow(key, value)
        self.frames_to_display = {}

    def capture_frame(self):
        ret, frame = self.cap.read()
        return frame

    def load_frame_from_file(self, filename):
        frame = cv2.imread(filename, 0)
        if frame is None:
            print("Failed to load", filename)
            self.cleanup()
        return frame

    def process(self):
        frame = self.capture_frame()
        self.process_frame(frame)
        self.display_frames(self.frames_to_display)

    def cleanup(self):
        self.cap.release()
        cv2.destroyAllWindows()
    def process_frame(self, frame):
        if frame is None:
            print 'Null frame'
            return
        if len(frame.shape) < 2:
            raise ValueError('Invalid frame, shape < 2')
        elif len(frame.shape) > 2:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        else:
            gray = frame
        # ret,thresh1 = cv2.threshold(frame,200,255,cv2.THRESH_BINARY)

        eroded = cv2.erode(gray, np.ones((5, 5), np.uint8))

        markers = self.markers.detect(eroded)
        self.frames_to_display['gray'] = eroded

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache:
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        telemetry = {}

        for marker in markers:
            src, marker_rotation, marker_name = marker

            skew = (src[0][1] - src[3][1]) / (src[1][1] - src[2][1]) - 1.0

            # Shoelace algorithm to find size (polygon area).
            relative_marker_camera_distance = np.sqrt(
                Markers.polygon_area(src))

            # Frame Center
            frame_height, frame_width = tuple(
                np.divide(gray.shape, 2).astype(int))
            frame_center = (frame_width, frame_height)

            # Target
            target_midpoint = tuple(np.mean(src, axis=0).astype(int))
            target_camera_closeness = 60.0  # decrease for further away

            # Draw box around pattern
            cv2.drawMarker(frame, target_midpoint, (0, 0, 255),
                           cv2.MARKER_CROSS)
            for i in range(len(src) - 1):
                cv2.line(frame, tuple(src[i]), tuple(src[i + 1]), (255, 0, 0),
                         2)
            cv2.line(frame, tuple(src[3]), tuple(src[0]), (255, 0, 0), 2)

            # Draw offset arrows
            cv2.arrowedLine(frame, frame_center, target_midpoint,
                            (0, 255, 255))
            cv2.arrowedLine(
                frame, frame_center,
                (int(frame_center[0] + skew * 100.0), frame_center[1]),
                (0, 255, 255))

            # Control copter
            telemetry = {
                'vertical_dist':
                float(target_midpoint[1]) / float(frame_center[1]) - 1.0,
                'side_dist':
                float(target_midpoint[0]) / float(frame_center[0]) - 1.0,
                'forward_dist':
                float(target_camera_closeness) /
                float(relative_marker_camera_distance) - 1.0,
                'yaw_dist':
                skew
            }
            # print '{:.02f} {:.02f} {:.02f} {:.02f}'.format(vertical_dist, side_dist, forward_dist, yaw_dist)
            break

        self.frames_to_display['color'] = frame

        return telemetry
Example #13
0
class SaltwashAR:
 
    # constants
    INVERSE_MATRIX = np.array([[ 1.0, 1.0, 1.0, 1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [-1.0,-1.0,-1.0,-1.0],
                               [ 1.0, 1.0, 1.0, 1.0]])

    def __init__(self):
        # initialise config
        self.config_provider = ConfigProvider()

        # initialise robots
        self.rocky_robot = RockyRobot()
        self.sporty_robot = SportyRobot()

        # initialise webcam
        self.webcam = Webcam()

        # initialise markers
        self.markers = Markers()
        self.markers_cache = None

        # initialise features
        self.features = Features(self.config_provider)

        # initialise texture
        self.texture_background = None

    def _init_gl(self):
        glClearColor(0.0, 0.0, 0.0, 0.0)
        glClearDepth(1.0)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glShadeModel(GL_SMOOTH)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluPerspective(33.7, 1.3, 0.1, 100.0)
        glMatrixMode(GL_MODELVIEW)

        # load robots frames
        self.rocky_robot.load_frames(self.config_provider.animation)
        self.sporty_robot.load_frames(self.config_provider.animation)

        # start webcam thread
        self.webcam.start()

        # assign texture
        glEnable(GL_TEXTURE_2D)
        self.texture_background = glGenTextures(1)

    def _draw_scene(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        glLoadIdentity()

        # reset robots
        self.rocky_robot.reset()
        self.sporty_robot.reset()

        # get image from webcam
        image = self.webcam.get_current_frame()

        # handle background
        self._handle_background(image.copy())

        # handle markers
        self._handle_markers(image.copy())
       
        # handle features
        self.features.handle(self.rocky_robot, self.sporty_robot, image.copy())

        glutSwapBuffers()

    def _handle_background(self, image):
        
        # let features update background image
        image = self.features.update_background_image(image)

        # convert image to OpenGL texture format
        bg_image = cv2.flip(image, 0)
        bg_image = Image.fromarray(bg_image)     
        ix = bg_image.size[0]
        iy = bg_image.size[1]
        bg_image = bg_image.tobytes('raw', 'BGRX', 0, -1)
 
        # create background texture
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
        glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
        glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
        
        # draw background
        glBindTexture(GL_TEXTURE_2D, self.texture_background)
        glPushMatrix()
        glTranslatef(0.0,0.0,-10.0)
        glBegin(GL_QUADS)
        glTexCoord2f(0.0, 1.0); glVertex3f(-4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 1.0); glVertex3f( 4.0, -3.0, 0.0)
        glTexCoord2f(1.0, 0.0); glVertex3f( 4.0,  3.0, 0.0)
        glTexCoord2f(0.0, 0.0); glVertex3f(-4.0,  3.0, 0.0)
        glEnd( )
        glPopMatrix()

    def _handle_markers(self, image):

        # attempt to detect markers
        markers = []

        try:
            markers = self.markers.detect(image)
        except Exception as ex:
            print(ex)

        # manage markers cache
        if markers:
            self.markers_cache = markers
        elif self.markers_cache: 
            markers = self.markers_cache
            self.markers_cache = None
        else:
            return

        for marker in markers:
            
            rvecs, tvecs, marker_rotation, marker_name = marker

            # build view matrix
            rmtx = cv2.Rodrigues(rvecs)[0]

            view_matrix = np.array([[rmtx[0][0],rmtx[0][1],rmtx[0][2],tvecs[0]],
                                    [rmtx[1][0],rmtx[1][1],rmtx[1][2],tvecs[1]],
                                    [rmtx[2][0],rmtx[2][1],rmtx[2][2],tvecs[2]],
                                    [0.0       ,0.0       ,0.0       ,1.0    ]])

            view_matrix = view_matrix * self.INVERSE_MATRIX

            view_matrix = np.transpose(view_matrix)

            # load view matrix and draw cube
            glPushMatrix()
            glLoadMatrixd(view_matrix)

            if marker_name == ROCKY_ROBOT:
                self.rocky_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())
            elif marker_name == SPORTY_ROBOT:
                self.sporty_robot.next_frame(marker_rotation, self.features.is_speaking(), self.features.get_emotion())

            glColor3f(1.0, 1.0, 1.0)
            glPopMatrix()

    def main(self):
        # setup and run OpenGL
        glutInit()
        glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
        glutInitWindowSize(640, 480)
        glutInitWindowPosition(100, 100)
        glutCreateWindow('SaltwashAR')
        glutDisplayFunc(self._draw_scene)
        glutIdleFunc(self._draw_scene)
        self._init_gl()
        glutMainLoop()