Example #1
0
 def __init__(self):
   #sys.argv = ['Main.py', '../res/videos/test-14.mpeg', '--hide_input']  # [debug: run with set command-line args]
   
   # * Initialize global context, passing in custom command line args (parsed by Context)
   argParser = argparse.ArgumentParser(add_help=False)
   showInputGroup = argParser.add_mutually_exclusive_group()
   showInputGroup.add_argument('--show_input', dest='show_input', action="store_true", default=True, help="show input video (emulate see-through display)?")
   showInputGroup.add_argument('--hide_input', dest='show_input', action="store_false", default=False, help="hide input video (show only virtual objects)?")
   argParser.add_argument('--task', default="Task", help="task to run (maps to class name)")
   argParser.add_argument('--scene', dest="scene_files", metavar='SCENE_FILE', nargs='+', help="scene fragment(s) to load (filenames in <res>/data/)")
   
   self.context = Context.createInstance(description="Tangible Data Exploration", parent_argparsers=[argParser])
   self.context.main = self  # hijack global context to share a reference to self
   # NOTE Most objects require an initialized context, so do this as soon as possible
   
   # * Obtain a logger (NOTE Context must be initialized first since it configures logging)
   self.logger = logging.getLogger(__name__)
   self.logger.info("Resource path: {}".format(self.context.resPath))
   if not haveCV:
     self.logger.warn("OpenCV library not available")
   
   # * Initialize GL rendering context and associated objects (NOTE order of initialization may be important)
   self.context.renderer = Renderer()
   self.context.controller = Controller()
   
   # * Initialize scene and load base scene fragments, including tools
   self.context.scene = Scene()
   self.context.scene.readXML(self.context.getResourcePath('data', 'CubeScene.xml'))  # just the cube
   #self.context.scene.readXML(self.context.getResourcePath('data', 'DragonScene.xml'))  # Stanford Dragon
   #self.context.scene.readXML(self.context.getResourcePath('data', 'BP3D-FMA7088-heart.xml'))  # BodyParts3D heart model hierarchy
   #self.context.scene.readXML(self.context.getResourcePath('data', 'RadialTreeScene.xml'))
   #self.context.scene.readXML(self.context.getResourcePath('data', 'PerspectiveScene.xml'))
   
   # ** Load scene fragments specified on commandline (only need to specify filename in data/ directory)
   self.logger.info("Scene fragment(s): %s", self.context.options.scene_files)
   if self.context.options.scene_files is not None:
       for scene_file in self.context.options.scene_files:
           self.context.scene.readXML(self.context.getResourcePath('data', scene_file))
   
   # * Initialize task (may load further scene fragments, including task-specific tools)
   try:
     taskModule = import_module('task.' + self.context.options.task)  # fetch module by name from task package
     taskType = getattr(taskModule, self.context.options.task)  # fetch class by name from corresponding module (same name, by convention)
     self.context.task = taskType()  # create an instance of specified task class
   except Exception as e:
     self.logger.error("Task initialization error: {}".format(e))
     self.context.task = Task()  # fallback to dummy task
   
   # * Finalize scene (resolves scene fragments into one hierarchy, builds ID-actor mapping)
   self.context.scene.finalize()  # NOTE should be called after all read*() methods have been called on scene
   
   # * Find cube in scene
   self.cubeActor = self.context.scene.findActorById('cube')
   self.cubeComponent = self.cubeActor.components['Cube'] if self.cubeActor is not None else None
   
   # * Open camera/input file
   self.logger.info("Input device/file: {}".format(self.context.options.input_source))
   self.camera = cv2.VideoCapture(self.context.options.input_source) if not self.context.isImage else cv2.imread(self.context.options.input_source)
   # TODO move some more options (e.g. *video*) to context; introduce config.yaml-like solution with command-line overrides
   self.options={ 'gui': self.context.options.gui, 'debug': self.context.options.debug,
                  'isVideo': self.context.isVideo, 'loopVideo': self.context.options.loop_video, 'syncVideo': self.context.options.sync_video, 'videoFPS': self.context.options.video_fps,
                  'isImage': self.context.isImage,
                  'cameraWidth': cameraWidth, 'cameraHeight': cameraHeight,
                  'windowWidth': windowWidth, 'windowHeight': windowHeight }
   self.context.videoInput = VideoInput(self.camera, self.options)
   # TODO If live camera, let input image stabilize by eating up some frames, then configure camera
   #   e.g. on Mac OS, use uvc-ctrl to turn off auto-exposure:
   #   $ ./uvc-ctrl -s 1 3 10
   
   # * Create image blitter, if input is to be shown
   if self.context.options.show_input:
     self.context.imageBlitter = FrameProcessorGL(self.options)  # CV-GL renderer that blits (copies) CV image to OpenGL window
     # TODO Evaluate 2 options: Have separate tracker and blitter/renderer or one combined tracker that IS-A FrameProcessorGL? (or make a pipeline?)
     self.logger.info("Video see-through mode enabled; input video underlay will be shown")
   else:
     self.logger.info("Video see-through mode disabled; only virtual objects will be shown")
   
   # * Setup tracking
   self.context.cubeTracker = CubeTracker(self.options)  # specialized cube tracker, available in context to allow access to cubeTracker's input and output images etc.
   if self.cubeComponent is not None:
     self.logger.info("Tracking setup: Cube has {} markers".format(len(self.cubeComponent.markers)))
     self.context.cubeTracker.addMarkersFromTrackable(self.cubeComponent)
Example #2
0
    def __init__(self):
        #sys.argv = ['Main.py', '../res/videos/test-14.mpeg', '--hide_input']  # [debug: run with set command-line args]

        # * Initialize global context, passing in custom command line args (parsed by Context)
        argParser = argparse.ArgumentParser(add_help=False)
        showInputGroup = argParser.add_mutually_exclusive_group()
        showInputGroup.add_argument(
            '--show_input',
            dest='show_input',
            action="store_true",
            default=True,
            help="show input video (emulate see-through display)?")
        showInputGroup.add_argument(
            '--hide_input',
            dest='show_input',
            action="store_false",
            default=False,
            help="hide input video (show only virtual objects)?")
        argParser.add_argument('--task',
                               default="Task",
                               help="task to run (maps to class name)")
        argParser.add_argument(
            '--scene',
            dest="scene_files",
            metavar='SCENE_FILE',
            nargs='+',
            help="scene fragment(s) to load (filenames in <res>/data/)")

        self.context = Context.createInstance(
            description="Tangible Data Exploration",
            parent_argparsers=[argParser])
        self.context.main = self  # hijack global context to share a reference to self
        # NOTE Most objects require an initialized context, so do this as soon as possible

        # * Obtain a logger (NOTE Context must be initialized first since it configures logging)
        self.logger = logging.getLogger(__name__)
        self.logger.info("Resource path: {}".format(self.context.resPath))
        if not haveCV:
            self.logger.warn("OpenCV library not available")

        # * Initialize GL rendering context and associated objects (NOTE order of initialization may be important)
        self.context.renderer = Renderer()
        self.context.controller = Controller()

        # * Initialize scene and load base scene fragments, including tools
        self.context.scene = Scene()
        self.context.scene.readXML(
            self.context.getResourcePath('data',
                                         'CubeScene.xml'))  # just the cube
        #self.context.scene.readXML(self.context.getResourcePath('data', 'DragonScene.xml'))  # Stanford Dragon
        #self.context.scene.readXML(self.context.getResourcePath('data', 'BP3D-FMA7088-heart.xml'))  # BodyParts3D heart model hierarchy
        #self.context.scene.readXML(self.context.getResourcePath('data', 'RadialTreeScene.xml'))
        #self.context.scene.readXML(self.context.getResourcePath('data', 'PerspectiveScene.xml'))

        # ** Load scene fragments specified on commandline (only need to specify filename in data/ directory)
        self.logger.info("Scene fragment(s): %s",
                         self.context.options.scene_files)
        if self.context.options.scene_files is not None:
            for scene_file in self.context.options.scene_files:
                self.context.scene.readXML(
                    self.context.getResourcePath('data', scene_file))

        # * Initialize task (may load further scene fragments, including task-specific tools)
        try:
            taskModule = import_module(
                'task.' + self.context.options.task
            )  # fetch module by name from task package
            taskType = getattr(
                taskModule, self.context.options.task
            )  # fetch class by name from corresponding module (same name, by convention)
            self.context.task = taskType(
            )  # create an instance of specified task class
        except Exception as e:
            self.logger.error("Task initialization error: {}".format(e))
            self.context.task = Task()  # fallback to dummy task

        # * Finalize scene (resolves scene fragments into one hierarchy, builds ID-actor mapping)
        self.context.scene.finalize(
        )  # NOTE should be called after all read*() methods have been called on scene

        # * Find cube in scene
        self.cubeActor = self.context.scene.findActorById('cube')
        self.cubeComponent = self.cubeActor.components[
            'Cube'] if self.cubeActor is not None else None

        # * Open camera/input file
        self.logger.info("Input device/file: {}".format(
            self.context.options.input_source))
        self.camera = cv2.VideoCapture(
            self.context.options.input_source
        ) if not self.context.isImage else cv2.imread(
            self.context.options.input_source)
        # TODO move some more options (e.g. *video*) to context; introduce config.yaml-like solution with command-line overrides
        self.options = {
            'gui': self.context.options.gui,
            'debug': self.context.options.debug,
            'isVideo': self.context.isVideo,
            'loopVideo': self.context.options.loop_video,
            'syncVideo': self.context.options.sync_video,
            'videoFPS': self.context.options.video_fps,
            'isImage': self.context.isImage,
            'cameraWidth': cameraWidth,
            'cameraHeight': cameraHeight,
            'windowWidth': windowWidth,
            'windowHeight': windowHeight
        }
        self.context.videoInput = VideoInput(self.camera, self.options)
        # TODO If live camera, let input image stabilize by eating up some frames, then configure camera
        #   e.g. on Mac OS, use uvc-ctrl to turn off auto-exposure:
        #   $ ./uvc-ctrl -s 1 3 10

        # * Create image blitter, if input is to be shown
        if self.context.options.show_input:
            self.context.imageBlitter = FrameProcessorGL(
                self.options
            )  # CV-GL renderer that blits (copies) CV image to OpenGL window
            # TODO Evaluate 2 options: Have separate tracker and blitter/renderer or one combined tracker that IS-A FrameProcessorGL? (or make a pipeline?)
            self.logger.info(
                "Video see-through mode enabled; input video underlay will be shown"
            )
        else:
            self.logger.info(
                "Video see-through mode disabled; only virtual objects will be shown"
            )

        # * Setup tracking
        self.context.cubeTracker = CubeTracker(
            self.options
        )  # specialized cube tracker, available in context to allow access to cubeTracker's input and output images etc.
        if self.cubeComponent is not None:
            self.logger.info("Tracking setup: Cube has {} markers".format(
                len(self.cubeComponent.markers)))
            self.context.cubeTracker.addMarkersFromTrackable(
                self.cubeComponent)