def run(self):
        """ used when run as behavior """
        print "Running obstacledetector."
        self.set_known_objects(['obstacle_matrix'])
        ticker = Ticker(self.update_frequency)
        while 1:
            # update the detector. This will:
            #     calculate an obstacle matrix from retreived Kinect images
            #     put the obstacle matrix into the memory
            ticker.tick()

            # get the obstacle matrix
            obstacleMatrix = self.getObstacleMatrix(
            )  # matrix containing all the found obstacles

            # put the matrix into memory
            if obstacleMatrix:
                delay = time.time() - self.mtime
                self.add_property('name', 'obstacle_matrix')
                self.add_property('matrix', obstacleMatrix)
                self.add_property('mtime', self.mtime)
                self.add_property('local_delay', delay)

            if self.fail_count > 50:
                self.logger.error(
                    "Failed to obtain image for %d times. Maybe a restart will help?"
                    % self.fail_count)
                break

            self.update()
    def __init__(self,
                 host,
                 port,
                 update_frequency,
                 source="webcam",
                 cascade="haarcascade_frontalface_default",
                 verbose=False,
                 rotation=0):
        """Initialize vision system with update frequency, ip adress, and Haar cascade"""
        self.logger = logging.getLogger("Borg.Brain.Vision.HaarFace_rotate")
        super(HaarFace_rotate, self).__init__(host, port)
        self.update_frequency = update_frequency
        self.__ticker = Ticker(frequency=update_frequency)

        cascadedir = os.environ[
            'BORG'] + "/brain/data/haarcascades/" + cascade + ".xml"
        self.verbose = verbose
        self.cascade = cv.Load(cascadedir)
        self.source = [source]
        self.vid_mem_reader = util.vidmemreader.VidMemReader(self.source)
        # set the type of objects that someone can detect
        self.set_known_objects(['face'])
        # get new image and see what's the resolution for the coordonate to angle/speed converter
        self.get_new_image()
        self.logger.info("Using haar cascade: " + cascadedir)

        self.rotationStep = 20  #Degrees
        self.rotatedImages = []
        self.rotationList = []

        self.rotation = rotation

        for i in range(0, 360, self.rotationStep):
            self.rotationList.append(i)
class SnapShotSaver(AbstractVisionModule):
    """
    Simple vision module, used to store images from specified video source.
    """
    def __init__(self,
                 host,
                 port,
                 update_frequency,
                 prefix="",
                 destination=None,
                 source="webcam",
                 verbose=False):
        self.logger = logging.getLogger("Borg.Brain.Vision.SnapShotSaver")
        super(SnapShotSaver, self).__init__(host, port)
        self.update_frequency = update_frequency
        self.__ticker = Ticker(frequency=update_frequency)
        self.verbose = verbose
        self.source = [source]
        if destination == None:
            raise Exception("Destination should be specified!")
        if not os.path.exists(destination):
            raise Exception("Specified destination does not exist!")
        self.destination = destination
        self.prefix = prefix
        self.vid_mem_reader = util.vidmemreader.VidMemReader(self.source)

    def train(self):
        pass

    def run(self):
        """start loop which gets a new image, then processes it"""

        count = 0
        while True:
            self.__ticker.tick()

            self.update()
            img = self.get_new_image()
            if img == None:
                print "not receiving images yet..."
            else:
                if self.verbose:
                    cv.ShowImage("SnapShotSaver", img)
                    cv.WaitKey(10)
                cv.SaveImage(
                    "%s/%s_%d.png" % (self.destination, self.prefix, count),
                    img)
                count += 1

    def get_new_image(self):
        """ Get new image from video shared memory """
        img = self.vid_mem_reader.get_latest_image()
        if not img:
            return None
        img = img[0]
        return convert_16to8(img)
    def __init__(self, controller_ip, controller_port, update_frequency=5):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.__last_print = 0

        self.given_input = ""
        print "Type in your command in this window. \n Take care to not press two keys in the same fifth of a second, as the press might be missed."
Esempio n. 5
0
    def run(self):
        """Start loop which simple sends a random fake observation every 5 secondes."""
        t = Ticker(self.freq)
        while True:
            t.tick()
            #Name, used to identify the observation in memory:
            Operators = self.Operators
            if Operators:
                self.add_property('name', 'HumanDetector')
                self.add_property('Operators', Operators)
                self.store_observation()

            self.update()
    def __init__(self,
                 host,
                 port,
                 blob_dir,
                 source="webcam",
                 tbd=None,
                 trainable=False,
                 denoise_value=5,
                 frequency=10):
        self.logger = logging.getLogger("Borg.Brain.Vision.BlobDetector")
        super(BlobDetector, self).__init__(host, port)

        self.ticker = Ticker(frequency=frequency,
                             name="BlobDetector",
                             ignore_delay=True)

        #TODO; not sure about this part:
        self.__vmr = util.vidmemreader.VidMemReader([source])
        self.__blob_dir = blob_dir
        #self.__colorlist_list = colorlist.load_color_file_list(self.__blob_dir)

        #self.__display_surface = None
        #self.__last_surface = None
        if not tbd:
            print "Using default empty boundaries."
            self.to_be_detected = [("default", [])]
        else:
            input_data = open(tbd)
            self.to_be_detected = pickle.load(input_data)
            input_data.close()
        print trainable
        if trainable:
            self._trainable = True
            self.add_from_next_image = False
            self.add_x = None
            self.add_y = None
        else:
            self._trainable = False
        print "Number of target colors: " + str(len(self.to_be_detected))
        self.current_target = 0

        self.denoise_value = int(denoise_value)

        # Variables to tune the size of the colorcubes added by clicking:
        self.cube_size = 5  # height/width of the square area used to sample pixels.
        self.increase_h = 1  # How much the hue of the cube is widened on each side.
        self.increase_s = 5  # How much the saturation of the cube is widened on each side.
        self.increase_v = 5  # How much the value of the cube is widened on each side.

        #TODO DEBUG VALUES!
        self.most_recent_button = "Startup"
class Remote(avm):
    """
    Give typed-in text input for your system in a separate window.
    """
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.__last_print = 0 
        
        self.given_input = ""
        print "Type in your command in this window. \n Take care to not press two keys in the same fifth of a second, as the press might be missed."


    def __del__(self):
        self.stop()

    def stop(self):
        self.disconnect()

    def train(self):
        pass

    def run(self):
        """start checking for typed characters, then sends them."""
        while self.is_connected():
            self.__ticker.tick() # Tick (sleep)

            c = getkey() 
            if c:
                if c == '\n':
                    self.add_property("name", "text_command")
                    self.add_property("text_command", self.given_input)
                    print ""
                    print ("Command read: " + self.given_input + "\n")
                    self.given_input = ""
                else:
                    self.given_input += c
            
            ############################
            # Send data
            self.update()
    def __init__(self,
                 host,
                 port,
                 update_frequency,
                 source="webcam",
                 verbose=False):
        self.logger = logging.getLogger("Borg.Brain.Vision.smokeDetect")
        super(smokeDetect, self).__init__(host, port)
        self.update_frequency = update_frequency
        self.__ticker = Ticker(frequency=update_frequency)

        self.verbose = verbose

        self.source = [source]
        self.vid_mem_reader = util.vidmemreader.VidMemReader(self.source)
        # set the type of objects that someone can detect
        self.set_known_objects(['smoke'])

        self.windowLocations = []

        self.labels = []
        self.data = []

        self.frameAverage = 5
        self.svm = None
        self.load_svm()
class Remote(avm):
    """
    Give typed-in text input for your system in a separate window.
    """
    def __init__(self, controller_ip, controller_port, update_frequency=5):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.__last_print = 0

        self.given_input = ""
        print "Type in your command in this window. \n Take care to not press two keys in the same fifth of a second, as the press might be missed."

    def __del__(self):
        self.stop()

    def stop(self):
        self.disconnect()

    def train(self):
        pass

    def run(self):
        """start checking for typed characters, then sends them."""
        while self.is_connected():
            self.__ticker.tick()  # Tick (sleep)

            c = getkey()
            if c:
                if c == '\n':
                    self.add_property("name", "text_command")
                    self.add_property("text_command", self.given_input)
                    print ""
                    print("Command read: " + self.given_input + "\n")
                    self.given_input = ""
                else:
                    self.given_input += c

            ############################
            # Send data
            self.update()
Esempio n. 10
0
 def __init__(self, param_dict):
     """
     Initialize brain class: create sensorintegrator, behaviorcontroller and bodycontroller
     """
     speed = int(param_dict.get_option('general', 'brain_speed', 10))
     self.logger = logging.getLogger('Borg.Brain.Brain')
     self.ticker = Ticker(frequency=speed, name="Brain", ignore_delay=True)
     starting_behavior = param_dict.get_option('general', 'starting_behavior')
     self.sensorintegrator = sensorintegrator.SensorIntegrator(param_dict)
     self.logger.info("Sensor Integrator initialiazed")
     self.behaviorcontroller = basebehavior.behaviorcontroller.BehaviorController(starting_behavior)
     self.logger.info("Behavior Controller initialized")
     self.set_brain_speed(speed)
     self.bodycontroller = body.bodycontroller.BodyController()
     self.bodycontroller.set_config(param_dict)
     self.logger.info("Body controller initialized")
     self.running = False
Esempio n. 11
0
    def _run(self):
        """
        The main loop of the VideoManager. It will communicate with the
        Communicator to start/stop video sources and to check which
        sources are available.
        """
        # Catch signals, shut down nicely
        signal.signal(signal.SIGTERM, self.stop)
        signal.signal(signal.SIGINT, self.stop)

        # Set up server listening on TCP socket and Unix Domain Socket
        self.__UD_server = VideoSourceServer(self, "VideoManager")
        # Use compression for TCP socket because that one will most likely
        # be used, if at all, by vision modules running on a different machine,
        # so bandwidth will be limited.
        self.__tcp_server = VideoSourceServer(self, 49953, compress=True, compress_level=6)

        # Set up the Queue for the image requests from vision modules
        self.__image_queue = Queue.Queue()

        self._ticker = Ticker(10)
        poller = iopoll.IOPoll()
        poller.register(self.__pipe)
        self.__logger = logging.getLogger('Borg.Brain.Communicator.VideoManager')
        self.__logger.info("VideoManager running")
        
        self._running = True
        #try:
        while self._running:
            self._ticker.tick()
            events = poller.poll(0)
            for fd, event in events:
                if event & iopoll.IO_ERROR:
                    self._running = False
                    break
                if event & iopoll.IO_READ:
                    self._handle_command()
                if event & iopoll.IO_WRITE:
                    self._send_heartbeat()
            if self.__vidmemwriter:
                self.__vidmemwriter.update()
        #except:
        #    self._running = False
        self._stop_video_sources()
        self.__logger.info("VideoManager stopping")
 def __init__(self,
              host,
              port,
              update_frequency,
              prefix="",
              destination=None,
              source="webcam",
              verbose=False):
     self.logger = logging.getLogger("Borg.Brain.Vision.SnapShotSaver")
     super(SnapShotSaver, self).__init__(host, port)
     self.update_frequency = update_frequency
     self.__ticker = Ticker(frequency=update_frequency)
     self.verbose = verbose
     self.source = [source]
     if destination == None:
         raise Exception("Destination should be specified!")
     if not os.path.exists(destination):
         raise Exception("Specified destination does not exist!")
     self.destination = destination
     self.prefix = prefix
     self.vid_mem_reader = util.vidmemreader.VidMemReader(self.source)
    def run(self):
        """ used when run as behavior """
        print "Running obstacledetector."
        self.set_known_objects(['obstacle_matrix'])
        ticker = Ticker(self.update_frequency)
        while 1:
            # update the detector. This will:
            #     calculate an obstacle matrix from retreived Kinect images
            #     put the obstacle matrix into the memory
            ticker.tick()

            # get the obstacle matrix
            obstacleMatrix = self.getObstacleMatrix() # matrix containing all the found obstacles
            delay = time.time() - self.mtime

            # put the matrix into memory
            self.add_property('name', 'obstacle_matrix')
            self.add_property('matrix', obstacleMatrix)
            self.add_property('mtime', self.mtime)
            self.add_property('local_delay', delay)
            self.update()
Esempio n. 14
0
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5,
                 location_file=False,
                 use_pcd=False):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.last_odo = None
        if not location_file:
            self.location_file = os.environ[
                'BORG'] + "/Brain/data/locations2/arena_istanbul.dat"
        else:
            self.location_file = location_file

        self.file_lock = threading.Lock()
        self.process = None
        self.__last_print = 0
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.__last_print = 0 
        
        self.given_input = ""
        print "Type in your command in this window. \n Take care to not press two keys in the same fifth of a second, as the press might be missed."
Esempio n. 16
0
 def __init__(self, param_dict):
     """
     Initialize brain class: create sensorintegrator, behaviorcontroller and bodycontroller
     """
     speed = int(param_dict.get_option('general', 'brain_speed', 10))
     self.logger = logging.getLogger('Borg.Brain.Brain')
     self.ticker = Ticker(frequency=speed, name="Brain", ignore_delay=True)
     starting_behavior = param_dict.get_option('general', 'starting_behavior')
     self.sensorintegrator = sensorintegrator.SensorIntegrator(param_dict)
     self.logger.info("Sensor Integrator initialiazed")
     self.behaviorcontroller = basebehavior.behaviorcontroller.BehaviorController(starting_behavior)
     self.logger.info("Behavior Controller initialized")
     self.set_brain_speed(speed)
     self.bodycontroller = body.bodycontroller.BodyController()
     self.bodycontroller.set_config(param_dict)
     self.logger.info("Body controller initialized")
     self.running = False
Esempio n. 17
0
    def _run(self):
        """
        The main loop of the VideoManager. It will communicate with the
        Communicator to start/stop video sources and to check which
        sources are available.
        """
        # Catch signals, shut down nicely
        signal.signal(signal.SIGTERM, self.stop)
        signal.signal(signal.SIGINT, self.stop)

        # Set up server listening on TCP socket and Unix Domain Socket
        self.__UD_server = VideoSourceServer(self, "VideoManager")
        # Use compression for TCP socket because that one will most likely
        # be used, if at all, by vision modules running on a different machine,
        # so bandwidth will be limited.
        self.__tcp_server = VideoSourceServer(self, 49953, compress=True, compress_level=6)

        # Set up the Queue for the image requests from vision modules
        self.__image_queue = Queue.Queue()

        self._ticker = Ticker(10)
        poller = iopoll.IOPoll()
        poller.register(self.__pipe)
        self.__logger = logging.getLogger('Borg.Brain.Communicator.VideoManager')
        self.__logger.info("VideoManager running")
        
        self._running = True
        #try:
        while self._running:
            self._ticker.tick()
            events = poller.poll(0)
            for fd, event in events:
                if event & iopoll.IO_ERROR:
                    self._running = False
                    break
                if event & iopoll.IO_READ:
                    self._handle_command()
                if event & iopoll.IO_WRITE:
                    self._send_heartbeat()
            if self.__vidmemwriter:
                self.__vidmemwriter.update()
        #except:
        #    self._running = False
        self._stop_video_sources()
        self.__logger.info("VideoManager stopping")
Esempio n. 18
0
    def __init__(self, host, port, blob_dir, source="webcam", tbd=None, trainable=False, denoise_value = 5, frequency = 10):
        self.logger = logging.getLogger("Borg.Brain.Vision.BlobDetector")
        super(BlobDetector, self).__init__(host, port)

        self.ticker = Ticker(frequency=frequency, name="BlobDetector", ignore_delay=True)

        #TODO; not sure about this part:
        self.__vmr = util.vidmemreader.VidMemReader([source])
        self.__blob_dir = blob_dir
        #self.__colorlist_list = colorlist.load_color_file_list(self.__blob_dir)

        #self.__display_surface = None
        #self.__last_surface = None
        if not tbd:
            print "Using default empty boundaries."
            self.to_be_detected = [("default",[])]
        else:
            input_data = open(tbd)
            self.to_be_detected = pickle.load(input_data)
            input_data.close()
        print trainable
        if trainable:
            self._trainable = True
            self.add_from_next_image = False
            self.add_x = None
            self.add_y = None
        else:
            self._trainable = False
        print "Number of target colors: " + str(len(self.to_be_detected))
        self.current_target = 0

        self.denoise_value = int(denoise_value)

        # Variables to tune the size of the colorcubes added by clicking:
        self.cube_size = 5 # height/width of the square area used to sample pixels.
        self.increase_h = 1 # How much the hue of the cube is widened on each side.
        self.increase_s = 5 # How much the saturation of the cube is widened on each side.
        self.increase_v = 5 # How much the value of the cube is widened on each side.

        #TODO DEBUG VALUES!
        self.most_recent_button = "Startup"
Esempio n. 19
0
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5,
                 location_file=False,
                 use_pcd=False):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.last_odo = None
        if not location_file:
            self.location_file = os.environ['BORG'] + "/Brain/data/locations2/arena_istanbul.dat"
        else:
            self.location_file = location_file

        self.file_lock = threading.Lock()
        self.process = None
        self.__last_print = 0 
Esempio n. 20
0
class VideoManager(multiprocessing.Process):
    """
    The VideoManager is a part of the Communicator. It handles the opening of
    video devices when requested and serves the images to the vision modules.

    The VideoManager is separated from the Communicator to avoid crashing the
    communicator when a video module fails. The communicator will automatically
    restart the video manager when it crashes.

    There are two modes available currently, which can be changed by setting the
    __server_mode boolean to True or False. When server mode is enabled, the
    VideoManager will set up a socket to accept incoming image requests and will
    serve the images over those sockets. When server mode is disabled, instead
    the VidMemWriter class will be used to write the images to /dev/shm/images,
    where the vision modules can collect them. The latter approach is the old
    behavior. While this works, it introduces more delay to the images because
    images are not served on request basis but depend on the frequency of the
    VideoManager. The new approach requests an image only when the vision module
    requires one, bringing the time of processing and the time of generation
    closer together. It also gives the opportunity to serve vision modules on
    a different host, by connecting over TCP/IP to port 59152. This will of
    course result in degraded performance because transmitting over the network
    takes a lot longer than over the Unix Domain Socket. Still, performance
    should be usable.
    """
    def __init__(self, pipe, profile=False):
        super(VideoManager, self).__init__()
        self.__vidmemwriter = None
        self.__pipe = pipe
        self.__heartbeat = time.time()
        self.__hb_interval = 2
        self.__camera = 0
        self.__nao_camera = None
        self.__logger = None
        self.__video_sources = []
        self.__profile = profile
        self.__server_mode = True

        self.__video_modules = {}
        self.__last_images = {}
        self.__video_locks = {}

        self.__image_queue = []
        self.__image_handlers = []

        try:
            import util.openni_kinectvideo
            self.__use_openni = False
        except:
            self.__use_openni = False

    def stop(self, signum=None, frame=None):
        """
        Process a stop request, usually used as a signal handler
        """
        self.__logger.warning("Caught signal, exiting")
        self._running = False

    def run(self):
        """
        The run method is the method started when the new process starts. It
        wraps the _run method, using the cProfile when requested to profile
        the VideoManager.
        """
        global vminstance

        iomanager.clear_IOM()
        vminstance = self
        if self.__profile:
            import cProfile, pstats
            cProfile.runctx('vminstance._run()', globals(), locals(), 'videomanager.prof')
        else:
            self._run()

    def _run(self):
        """
        The main loop of the VideoManager. It will communicate with the
        Communicator to start/stop video sources and to check which
        sources are available.
        """
        # Catch signals, shut down nicely
        signal.signal(signal.SIGTERM, self.stop)
        signal.signal(signal.SIGINT, self.stop)

        # Set up server listening on TCP socket and Unix Domain Socket
        self.__UD_server = VideoSourceServer(self, "VideoManager")
        # Use compression for TCP socket because that one will most likely
        # be used, if at all, by vision modules running on a different machine,
        # so bandwidth will be limited.
        self.__tcp_server = VideoSourceServer(self, 49953, compress=True, compress_level=6)

        # Set up the Queue for the image requests from vision modules
        self.__image_queue = Queue.Queue()

        self._ticker = Ticker(10)
        poller = iopoll.IOPoll()
        poller.register(self.__pipe)
        self.__logger = logging.getLogger('Borg.Brain.Communicator.VideoManager')
        self.__logger.info("VideoManager running")
        
        self._running = True
        #try:
        while self._running:
            self._ticker.tick()
            events = poller.poll(0)
            for fd, event in events:
                if event & iopoll.IO_ERROR:
                    self._running = False
                    break
                if event & iopoll.IO_READ:
                    self._handle_command()
                if event & iopoll.IO_WRITE:
                    self._send_heartbeat()
            if self.__vidmemwriter:
                self.__vidmemwriter.update()
        #except:
        #    self._running = False
        self._stop_video_sources()
        self.__logger.info("VideoManager stopping")
    
    def _send_heartbeat(self):
        """
        Send a heartbeat to the Communicator to indicate that
        the VideoManager has not crashed.
        """
        if time.time() > self.__heartbeat + self.__hb_interval:
            self.__pipe.send({"command": "heartbeat"})
            self.__heartbeat = time.time()

    def _handle_command(self):
        """
        Process a command from the Communicator: starting or stopping
        of video modules or returning which video sources are active
        or available.
        """
        data = self.__pipe.recv()
        cmd = data['command']
        if cmd == "start":
            # Start a video source
            source = data['source']
            ip = None
            inputres = None
            outputres = None
            if "ip" in data:
                ip = data['ip']
            if "inputres" in data:
                inputres = data['inputres']
            if "outputres" in data:
                outputres = data['outputres']
            if "camera" in data:
                self.__camera = data['camera']
            result = self._start_video_source(source, ip, inputres, outputres)
            self.__check_image_handlers()
            self.__pipe.send({"command": "source_started", "result": result})
        elif cmd == "stop":
            # Stop a video source
            source = data['source']
            result = self._stop_video_source(source)
            self.__pipe.send({"command": "source_stopped", "result": result})
        elif cmd == "set_nao_camera":
            # Change the camera the NAO is using: chin or top
            if "camera" in data:
                camera = data['camera']
                if self.__nao_camera:
                    self.__nao_camera.change_camera(camera)
                else:
                    self.__logger.warning("Nao Video camera source is None, cannot change camera")
        elif cmd == "check_sources":
            # Check which sources are available/connected to this machine
            sources = self._check_sources()
            self.__pipe.send({"command": "available_sources", "available_sources": sources})
        elif cmd == "set_speed":
            # Change the running frequency of the VideoManager
            if "frequency" in data:
                freq = data['frequency']
                self.__logger.info("Setting Video Manager speed to %d Hz" % freq)
                self._ticker.set_frequency(freq)
        elif cmd == "quit":
            # End the VideoManager
            self._running = False

    def _start_video_source(self, source, ip=None, inputres="640x480", outputres="640x480"):
        """
        This method starts a new video source, which some optional arguments
        specified such as input and output resolution and a IP address.
        Currently, these settings only apply to the naovideo source.
        """
        if source in self.__video_sources:
            return True

        if not source or source == "None":
            return True

        result = self._start_vidmemwriter(source, ip, inputres, outputres)

        return result

    def _start_vidmemwriter(self, camType, ip=None, inputres="640x480", outputres="640x480"):
        """
        Start the vidmemwriter and a video source, if required. If in server
        mode, no vidmemwriter will be started. Instead, the video module will
        be registered for with by the image_handlers. 
        """
        if not self.__vidmemwriter and not self.__server_mode:
            self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])

        if camType in self.__video_sources:
            return True

        self.__logger.info("I'm starting %s" % camType)

        if ros_pattern.match(camType):
            #The first 4 characters "ros_" identify that is a specific ros image
            #The second part *** in "ros_***/topic" is the encoding:
            topic = camType[4:]
            encoding = "passthrough"
            self.__logger.info("camType !!!!!! %s" % camType)
            if not camType[4] == '/':
                str_list = camType.split("_")
                topic = '_'.join(str_list[2:])
                encoding = str_list[1]
            ros_image_source = rosimage.RosImage(topic, encoding)

            if self.__server_mode:
                self.__register_video_source(camType, ros_image_source)
            else:
                self.__vidmemwriter.add_video_source(ros_image_source, camType)
            self.__video_sources.append(camType)
            self.__logger.info("rosimage started for topic: %s, with encoding: %s" % (topic, encoding))
            return True
        elif camType == "webcam":
            self.__logger.debug("I'm starting webcam")
            webcamsource = takeimages.TakeImages(self.__camera)
            img = webcamsource.get_image()
            if type(img) is type(""):
                self.__logger.error("No camera found. Please check connection!")
                return False

            if webcamsource.Nocamera:
                if self.__camera == -1:
                    self.__logger.error("No camera found. Please check connection!")
                else:
                    self.__logger.error("Camera %d not found. Please check connection!" % self.__camera)
                return False
            if self.__server_mode:
                self.__register_video_source('webcam', webcamsource)
            else:
                self.__vidmemwriter.add_video_source(webcamsource, "webcam")
            self.__video_sources.append("webcam")
            self.__logger.info("Webcam started")
            return True
        elif camType == 'kinect_openni':
            self.__logger.debug("I'm starting kinect using openni")
            import util.openni_kinectvideo as kv
            depth_source = kv.OpenNIKinect("depth")
            rgb_source = kv.OpenNIKinect("rgb")

            try:
                depth_source.get_image()
            except:
                self.__logger.error("Kinect not found. Please check connection!")
                return False

            if self.__server_mode:
                self.__register_video_source('kinect_depth', depth_source)
                self.__register_video_source('kinect_rgb', rgb_source)
            else:
                self.__vidmemwriter.add_video_source(depth_source, "kinect_depth")
                self.__vidmemwriter.add_video_source(rgb_source, "kinect_rgb")

            self.__video_sources.append("kinect_depth")
            self.__video_sources.append("kinect_rgb")
            self.__video_sources.append("kinect")
            self.__video_sources.append("kinect_openni")
        
            self.__logger.info("Kinect started")
            return True
        elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':
            if self.__use_openni:
                self.__logger.info("I'm starting kinect using openni")
                import util.openni_kinectvideo as kv
                depth_source = kv.OpenNIKinect("depth")
                rgb_source = kv.OpenNIKinect("rgb")

                try:
                    depth_source.get_image()
                except:
                    self.__logger.error("Kinect not found. Please check connection!")
                    return False
            else:
                self.__logger.info("I'm starting kinect using freenect")
                try:
                    import util.kinectmemwriter
                except:
                    self.__logger.error("Could not load kinectmemwriter module. Check modules.")
                    return False

                depth_source = util.kinectmemwriter.KinectDepthSource()
                rgb_source = util.kinectmemwriter.KinectRGBSource()

                try:
                    depth_source.get_image()
                except:
                    self.__logger.error("Kinect not found. Please check connection!")
                    return False

            if self.__server_mode:
                self.__register_video_source('kinect_depth', depth_source)
                self.__register_video_source('kinect_rgb', rgb_source)
            else:
                self.__vidmemwriter.add_video_source(depth_source, "kinect_depth")
                self.__vidmemwriter.add_video_source(rgb_source, "kinect_rgb")

            self.__video_sources.append("kinect_depth")
            self.__video_sources.append("kinect_rgb")
            self.__video_sources.append("kinect")
        
            self.__logger.info("Kinect started")
            return True
        elif camType == "naovideo":
            self.__logger.debug("I'm starting naovideo")
            try:
                import util.naovideo as naovideo
            except:
                self.__logger.error("Could not load naovideo module. Check modules")
                return False
            #get ip of nao:
            #TODO: fix this dirty hack (it should be read from the config file)
            naoip = "129.125.178.232"
            if ip:
                naoip = ip
    
            self.__logger.warn("Using input resolution %s and output resolution %s" % (inputres, outputres))
            #use the naovideo module:
            if self.__camera != 0 and self.__camera != 1:
                self.__camera = 0
            try:
                naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)
                naocamsource.get_image()
            except:
                self.__logger.error("Something went wrong using the camera of the nao (check connection!)")
                traceback.print_exc()
                return False

            if self.__server_mode:
                self.__register_video_source('naovideo', naocamsource)
            else:
                self.__vidmemwriter.add_video_source(naocamsource, "naovideo")
            self.__video_sources.append("naovideo")
            self.__nao_camera = naocamsource
            self.__logger.info("Naovideo started")
            return True
        else:
            self.__logger.warning("Invalid video source specified: %s" % camType)
            return False
    
    def __register_video_source(self, name, source):
        """
        Register a running video source, for use in server mode.
        """
        self.__video_modules[name] = source
        self.__last_images[name] = (time.time(), source.get_image())
        self.__video_locks[name] = threading.Lock()

    def _stop_video_sources(self):
        """
        Stop all video sources.
        """
        for source in self.__video_sources:
            self._stop_video_source(source)

    def _stop_video_source(self, video_source):
        """
        Stop the specified video source
        """
        if not video_source in self.__video_sources:
            self.__logger.warning("Not stopping video source %s - not running" % video_source)
            return True

        if self.__server_mode:
            if video_source == "kinect":
                if "kinect_rgb" in self.__video_modules:
                    del self.__video_modules["kinect_rgb"]
                    del self.__last_images["kinect_rgb"]
                    del self.__video_locks["kinect_rgb"]
                if "kinect_depth" in self.__video_modules:
                    del self.__video_modules["kinect_depth"]
                    del self.__last_images["kinect_depth"]
                    del self.__video_locks["kinect_depth"]
            else:
                if video_source in self.__video_modules:
                    if video_source == "naovideo":
                        self.__video_modules[video_source].unsubscribe()

                    del self.__video_modules[video_source]
                    del self.__last_images[video_source]
                    del self.__video_locks[video_source]

        if video_source == "kinect":
            import util.kinectvideo
            if not self.__server_mode:
                self.__vidmemwriter.stop_video_source("kinect_rgb")
                self.__vidmemwriter.stop_video_source("kinect_depth")
            for src in ['kinect', 'kinect_rgb', 'kinect_depth', 'kinect_openni']:
                if src in self.__video_sources:
                    self.__video_sources.remove(src)
            util.kinectvideo.StopKinect()
        else:
            self.__video_sources.remove(video_source)
            if not self.__server_mode:
                self.__vidmemwriter.stop_video_source(video_source)
         
        if not self.__video_sources:
            self.__logger.info("No video source active. Stopping vidmemwriter.")
            self.__vidmemwriter = None
        return True

    def _check_sources(self):
        """
        Check the available video sources. Use the list of active video sources
        as a starting point. Then, for each known video source not currently
        active, try to start it, and if this succeeds, add it to the list.
        """
        avail_sources = []
        for source in self.__video_sources:
            avail_sources.append(source)

        # Try to start each video source known. Naovideo is not in this list,
        # because without an explicit request, no IP and port is known so no
        # connection can be made.
        for source in ["webcam", "kinect"]:
            if source in avail_sources:
                continue

            # Status unknown, try to start and stop
            self.__logger.info("Trying to see if source %s is available" % source)
            try:
                if self._start_video_source(source):
                    avail_sources.append(source)
                    self._stop_video_source(source)
            except Exception, e:
                self.__logger.info("Error starting source %s, (error: %s) must not be available" % (source, str(e)))
        
        return avail_sources
Esempio n. 21
0
class BlobDetector(AbstractVisionModule):

    def __init__(self, host, port, blob_dir, source="webcam", tbd=None, trainable=False, denoise_value = 5, frequency = 10):
        self.logger = logging.getLogger("Borg.Brain.Vision.BlobDetector")
        super(BlobDetector, self).__init__(host, port)

        self.ticker = Ticker(frequency=frequency, name="BlobDetector", ignore_delay=True)

        #TODO; not sure about this part:
        self.__vmr = util.vidmemreader.VidMemReader([source])
        self.__blob_dir = blob_dir
        #self.__colorlist_list = colorlist.load_color_file_list(self.__blob_dir)

        #self.__display_surface = None
        #self.__last_surface = None
        if not tbd:
            print "Using default empty boundaries."
            self.to_be_detected = [("default",[])]
        else:
            input_data = open(tbd)
            self.to_be_detected = pickle.load(input_data)
            input_data.close()
        print trainable
        if trainable:
            self._trainable = True
            self.add_from_next_image = False
            self.add_x = None
            self.add_y = None
        else:
            self._trainable = False
        print "Number of target colors: " + str(len(self.to_be_detected))
        self.current_target = 0

        self.denoise_value = int(denoise_value)

        # Variables to tune the size of the colorcubes added by clicking:
        self.cube_size = 5 # height/width of the square area used to sample pixels.
        self.increase_h = 1 # How much the hue of the cube is widened on each side.
        self.increase_s = 5 # How much the saturation of the cube is widened on each side.
        self.increase_v = 5 # How much the value of the cube is widened on each side.

        #TODO DEBUG VALUES!
        self.most_recent_button = "Startup"


    def run(self):
        """Start loop which gets a new image, then processes it"""

        # Create display window.
        cv2.namedWindow("Blob Detector")
        cv2.moveWindow("Blob Detector",100,100)
        if self._trainable:
            print "Using mouse clicking."
            cv.SetMouseCallback("Blob Detector", self.onmouse)
        else:
            print "Not using mouse clicking."
        while True:

            self.ticker.tick(True)
            # Get the image
            cv_im = self.get_new_image()[0]
            if cv_im:

                # Pre-process the image
                HSV_image = self.preprocess_image(cv_im)

                # Create a copy image on which found contours are shown.
                self.draw_image = copy.deepcopy(HSV_image)

                if self._trainable and self.add_from_next_image:
                    # Try to add the area specified by the most recent click.
                    self.add_new_cube(HSV_image)

                self.detection_process(HSV_image)

                self.image_display(self.draw_image)

            else:
                # If no image is recieved, nothing can be done.
                print "No image recieved!"
                print "-------------------------------"

            self.update()

    def train(self):
        #We do not use this
        pass

    def get_new_image(self):
        #TODO: Get image from vmr:
        return self.__vmr.get_latest_image()

    def onmouse(self, event, x, y, flags, param):

        #print "Flags: " + str(flags)
        if not ( flags == cv.CV_EVENT_FLAG_CTRLKEY or flags == cv2.EVENT_FLAG_SHIFTKEY or flags == cv2.EVENT_FLAG_ALTKEY):
            self.handle_nokey_click(event, x, y)
            self.most_recent_button = "None"
            return

        if flags == cv.CV_EVENT_FLAG_ALTKEY:
            # Hold Alt to store/delete trained data
            self.handle_alt_click(event)
            self.most_recent_button = "Alt"
            return

        if flags == cv.CV_EVENT_FLAG_SHIFTKEY:
            # Hold Shift to increase HSV width.
            self.handle_shift_click(event)
            self.most_recent_button = "Shift"
            return

        if flags == cv.CV_EVENT_FLAG_CTRLKEY:
            # Hold Ctrl to decrease HSV width.
            self.handle_ctrl_click(event)
            self.most_recent_button = "Ctrl"
            return

        #print "No usable event, so doing nothing with the click."
        return

    def handle_custom_commands(self, entry):
        """This function will handle commands to add cubes from a behavior"""
        print entry
        if self._trainable and entry['command'] == 'train':
            print "Trying to train by clicking."
            # Get the target points and image
            points = entry['params']
            image = self.get_new_image()[0]
            processed_image = self.preprocess_image(image)
            for point in points:
                # Set the internal variables
                self.add_x = point['x']
                self.add_y = point['y']
                print "Clicking on: " + str(point['x']) + ", " + str(point['y'])

                # Add the cube
                self.add_new_cube(processed_image)

############ Subfunctions

######## Image processing

    def preprocess_image(self, image):
        """This function converts the image to a Numpy array and transfers it to HSV space."""

        # Convert the image to a Numpy array.
        arr = numpy.asarray(image[:,:])
        #cv2.imshow("Blob Detector", arr)
        #print cv_im
        #print arr

        # Convert to HSV
        return cv2.cvtColor(arr,cv2.COLOR_BGR2HSV)

    def add_new_cube(self, image):
        """"This function takes care of adding a new colorcube to the range of detection cubes."""
        # Try to add the area specified by the most recent click.

        # Determine the area to use.
        # This is a square area of self.cube_size pixels.
        area_to_be_added = cv2.getRectSubPix(image, (self.cube_size,self.cube_size), (float(self.add_x), float(self.add_y)))

        # Determine the min and max values in that area.
        # Split the image
        (hue, saturation, value) = cv2.split(area_to_be_added)
        # Determine the min and max in the area.
        # These min and max are increased/decreased to improve detection.
        hue_max = numpy.amin([180, numpy.amax(hue) + self.increase_h])
        hue_min = numpy.amax([0, numpy.amin(hue) - self.increase_h])
        print hue
        if (hue_max - hue_min) > 100:
            #Ok, probably it is some kind of a red color (around 0):
            #So we need to determin the min and max around 0:
            #TODO: Make more efficient using numpy or so:
            hue_min = 180
            hue_max = 0
            for hue_val_line in hue:
                for hue_val in hue_val_line:
                    print hue_val
                    if hue_val > 90:
                        if hue_val < hue_min:
                            hue_min = hue_val
                    if hue_val <= 90:
                        if hue_val > hue_max:
                            hue_max = hue_val

        saturation_max = numpy.amin([255, numpy.amax(saturation) + self.increase_s])
        saturation_min = numpy.amax([0, numpy.amin(saturation) - self.increase_s])

        value_max = numpy.amin([255, numpy.amax(value) + self.increase_v])
        value_min = numpy.amax([0, numpy.amin(value) - self.increase_v])

        # Add these to a dict.
        new_cube = {'h_lower':hue_min, 'h_upper':hue_max, 's_lower':saturation_min, 's_upper':saturation_max, 'v_lower':value_min, 'v_upper': value_max}

        print "Made new dict."
        print new_cube

        # Add the dict to the detection dicts.
        ((self.to_be_detected[self.current_target])[1]).append(copy.deepcopy(new_cube))

        # And it's done!
        self.add_from_next_image = False

    def store_contour(self, contour, tag):
        """"This function will store a found image contour in the central memory."""

        # Determine the bounding box
        x, y, w, h = cv2.boundingRect(contour)

        # Determine the area
        area = cv2.contourArea(contour)

        # Then, store that in memory.
        self.add_property('name', tag)
        self.add_property('x', x)
        self.add_property('width', w)
        self.add_property('y', y)
        self.add_property('height', h)
        self.add_property('surface', area)
        self.store_observation()

    def store_combined_contours(self, contours, tag):
        """This will store the combined contours in a list sorted by area."""
        contour_list = []
        # Determine the dict values for each contour.
        for contour in contours:
            # Determine x, y, w and h
            x, y, w, h = cv2.boundingRect(contour)
            # And the area.
            area = cv2.contourArea(contour)

            # Put this data into a dict
            contour_dict = {'x': x, 'width': w, 'y': y, 'height': h, 'surface': area}
            # And add it to the list.
            contour_list.append(copy.deepcopy(contour_dict))

        if len(contour_list) <1:
            return
        sorted_contours = sorted(contour_list, key=lambda k: k['surface'], reverse=True)
        new_tag = "combined_" + str(tag)
        self.add_property('name', new_tag)
        self.add_property("sorted_contours", copy.deepcopy(sorted_contours))
        self.store_observation()

    def detection_process(self, image):
        """"This function runs the detection process for the recieved image."""
        # Now, go over the various colors to be detected.
        for target in self.to_be_detected:
            # Detect any blobs
            #print target
            tag, cubes = target
            found = colorblob_new.process_HSV(image, cubes, self.denoise_value)
            collection = []

            if not found:
                # No contours found.
                pass
            else:
                #Determine HSV color of contours:
                h = 0
                target_1 = cubes[0]
                if target_1['h_upper'] < target_1['h_lower']:
                    h = target_1['h_lower'] + (((180 - target_1['h_lower']) + target_1['h_upper']) / 2)
                    if h > 180:
                        h -= 180
                else:
                    h = target_1['h_lower'] + ((target_1['h_upper'] - target_1['h_lower']) / 2)

                # Draw the contours into the draw_image
                cv2.drawContours(self.draw_image, found, -1, (h, 150, 150), thickness = -1)

                for contour in found:
                    self.store_contour(contour, tag)
                    collection.append(contour)
            self.store_combined_contours(collection, tag)

    def image_display_line(self, image, position, text):
        cv2.putText(image, text, position, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0,0,0), lineType=cv2.CV_AA)
        cv2.putText(image, text, (position[0] - 1, position[1] - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255,255,255), lineType=cv2.CV_AA)

    def image_display_line_list(self, image, position, line_list):
        line_left_offset = position[0]
        line_height = 10
        line_y_position = position[1] + line_height
        for line in line_list:
            self.image_display_line(image, (line_left_offset, line_y_position), line)
            line_y_position += line_height


    def image_display(self, image):
        """"This will display the image with the found contours and relevant text on it."""
        # Convert to BGR for display.
        disp_image = cv2.cvtColor(image,cv2.COLOR_HSV2BGR)
        if self._trainable:
            line_list = []

            # Display the colour clicks will be added to.
            line_list.append("Target: " + str((self.to_be_detected[self.current_target])[0]))
            # And the area used when clicking to add.
            line_list.append("Area: " + str(self.cube_size) + " x " + str(self.cube_size) + " px")
            # And the degree to which the H, S and V values found by clicking are widened.
            line_list.append("HSV wideing values: " + str(self.increase_h) + " " + str(self.increase_s) + " " + str(self.increase_v))
            #TODO DEBUG CODE!
            line_list.append(self.most_recent_button)

            self.image_display_line_list(disp_image, (3, 0), line_list)
        # Display the actual image.
        cv2.imshow("Blob Detector", disp_image)
        cv2.waitKey(10)

######## Mouse input

    def handle_ctrl_click(self, event):
        """This will handle a ctrl-click when training."""
        print "Control pressed."
        if event == cv.CV_EVENT_LBUTTONDOWN:
            # Click left to decrease hue
            print "Narrowing hue edges of color cube"
            self.increase_h -= 1
            if self.increase_h < 0:
                self.increase_h = 180-self.increase_h
            return
        if event == cv.CV_EVENT_MBUTTONDOWN:
            # Click middle to decrease saturation
            print "Narrowing saturation edges of color cube"
            self.increase_s -= 5
            if self.increase_s < 0:
                self.increase_s = 0
            return
        if event == cv.CV_EVENT_RBUTTONDOWN:
            # Click right to decrease value
            print "Narrowing value edges of color cube"
            self.increase_v -= 5
            if self.increase_v < 0:
                self.increase_v = 0
            return

    def handle_alt_click(self, event):
        """This will handle a alt-click when training."""
        print "Alt pressed."
        # Save all added data
        #if (event == cv.CV_EVENT_LBUTTONDOWN):
        print "Saving added data."
        output=open(tbd, 'wb')
        pickle.dump(self.to_be_detected, output)
        output.close()
        return
        # Click right to remove last added cube
        #if (event == cv.CV_EVENT_LBUTTONDOWN) and tbd:
            #TODO Not working ATM.
            #return

    def handle_shift_click(self, event):
        """This will handle a shift-click when training."""
        print "Shift pressed."
        if event == cv.CV_EVENT_LBUTTONDOWN:
            # Click left to increase hue
            print "Widening hue edges of color cube"
            self.increase_h += 1
            if self.increase_h > 180:
                self.increase_h = 0
            return
        if event == cv.CV_EVENT_MBUTTONDOWN:
            # Click middle to increase saturation
            print "Widening saturation edges of color cube"
            self.increase_s += 5
            if self.increase_s > 255:
                self.increase_s = 0
            return
        if event == cv.CV_EVENT_RBUTTONDOWN:
            # Click right to increase value
            print "Widening value edges of color cube"
            self.increase_v += 5
            if self.increase_v > 255:
                self.increase_v = 0
            return

    def handle_nokey_click(self, event, x, y):
        """This will handle a keyless click when training."""
        #print "No CTRL, ALT or SHIFT pressed!"

        # Will add a blob to the current color and save it on a left-click.
        if event == cv.CV_EVENT_LBUTTONDOWN:
            print "Adding pixel cube for current click to the color."
            self.add_from_next_image = True
            self.add_x = copy.copy(x)
            self.add_y = copy.copy(y)
            # This is checked for in the processing if trainable is True.
            return
        # Will cycle through the colors on a right-click.
        if event == cv.CV_EVENT_RBUTTONDOWN:
            print "Switching colors."
            self.current_target += 1
            if self.current_target > len(self.to_be_detected) -1:
                self.current_target = 0
            return
        if (event == cv.CV_EVENT_MBUTTONDOWN) and tbd:
            #TODO: switch this to cycling through selection square sizes.
            print "Cycling cube size."
            self.cube_size += 1
            if self.cube_size > 10:
                self.cube_size = 1
Esempio n. 22
0
class Brain(object):
    """
    Main class that starts the sensors and the behaviors and connects them
    togethers
    """

    def __init__(self, param_dict):
        """
        Initialize brain class: create sensorintegrator, behaviorcontroller and bodycontroller
        """
        speed = int(param_dict.get_option('general', 'brain_speed', 10))
        self.logger = logging.getLogger('Borg.Brain.Brain')
        self.ticker = Ticker(frequency=speed, name="Brain", ignore_delay=True)
        starting_behavior = param_dict.get_option('general', 'starting_behavior')
        self.sensorintegrator = sensorintegrator.SensorIntegrator(param_dict)
        self.logger.info("Sensor Integrator initialiazed")
        self.behaviorcontroller = basebehavior.behaviorcontroller.BehaviorController(starting_behavior)
        self.logger.info("Behavior Controller initialized")
        self.set_brain_speed(speed)
        self.bodycontroller = body.bodycontroller.BodyController()
        self.bodycontroller.set_config(param_dict)
        self.logger.info("Body controller initialized")
        self.running = False

    def set_brain_speed(self, Hertz = 10):
        """
        The speed of the brain is measured in Hertz
        """
        self.ticker.set_frequency(Hertz)
        self.brain_speed = Hertz

    def get_brain_speed(self):
        """
        Return the speed of the brain measured in Hertz
        """
        return self.brain_speed

    def start(self):
        """
        This starts the robot brain.
        This method needs to be threaded otherwise we cannot stop the brain
        """
        self.running = True
        self.logger.info("Running brain")

        sleep_func = True

        if self.bodycontroller.get_nr_pioneers():
            if body.bodycontroller.pioneer.ROS_ENABLED:
                self.logger.info("ROS enabled in body controller. " \
                                 "Using ros.sleep instead of time.sleep")
                sleep_func = body.bodycontroller.pioneer.rospy.sleep

        while self.running == True:
            self.ticker.tick(sleep_func)
            memory = self.sensorintegrator.update()
            try:
                self.behaviorcontroller.update()
            except basebehavior.behaviorcontroller.BehaviorFinished:
                self.logger.warn("All behaviors have finished or stopped. Stopping brain")
                self.stop()
            self.bodycontroller.update()

    def stop(self, signum=None, frame=None):
        """
        the stop function only works if start is threaded
        """
        if self.running:
            self.running = False
            self.sensorintegrator.stop()
            self.bodycontroller.stop()
Esempio n. 23
0
class Remote(avm):
    """
    Remote control the pioneer / scanner
    """
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5,
                 location_file=False,
                 use_pcd=False):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.last_odo = None
        if not location_file:
            self.location_file = os.environ[
                'BORG'] + "/Brain/data/locations2/arena_istanbul.dat"
        else:
            self.location_file = location_file

        self.file_lock = threading.Lock()
        self.process = None
        self.__last_print = 0

    def __del__(self):
        self.stop()

    def stop(self):
        """
        When stopping the module, make sure the PCD Writer and
        SLAM6D handlers also stop
        """
        self.disconnect()

    def train(self):
        pass

    def run(self):
        """start loop which gets a new image, then processes it"""
        while self.is_connected():
            self.__ticker.tick()  # Tick (sleep)

            if self.process and self.process.is_alive():
                self.update()
                continue

            c = getkey()
            if c:
                if c == 'w':
                    print "Moving forward"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "forward")
                elif c == 'a':
                    print "Turning left"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "left")
                elif c == 'd':
                    print "Turning right"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "right")
                elif c == 's':
                    print "Moving backward"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "backward")
                elif c == 'q':
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "stop")
                elif c == 'r':
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "terminate")
                elif c == 't':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "make_scan")
                elif c == 'm':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "save_map")
                elif c == 'l':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "load_map")
                elif c == 'c':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "match_map")
                elif c == 'x':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "make_map")
                elif c == 'z':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "discard_map")
                elif c == 'p':
                    self.save_pose()
                elif c == 'g':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "toggle_autoscan")
                elif c == 'h':
                    print "[w] = forward    [a] = left         [s] = backward           [d] = right"
                    print "[q] = stop       [t] = take scan    [c] = matching mode      [x] = mapping mode"
                    print "[m] = save map   [l] = load map     [p] = save current pose  [g] = Toggle autoscanning"

            ############################
            # Send data
            self.update()

    def handle_custom_commands(self, entry):
        if entry['command'] == "odometry":
            odo = entry['params'][1]
            if odo != self.last_odo:
                if self.__last_print < time.time() - 2:
                    print "Current position: %s" % repr(odo)
                    self.__last_print = time.time()
                self.last_odo = odo

    def save_pose(self):
        if self.process:
            if self.process.is_alive():
                return
            else:
                self.process = None
        odo = self.last_odo
        self.process = threading.Thread(target=do_save_pose,
                                        args=(odo, self.location_file,
                                              self.file_lock))
        self.process.start()
Esempio n. 24
0
class VideoManager(multiprocessing.Process):
    """
    The VideoManager is a part of the Communicator. It handles the opening of
    video devices when requested and serves the images to the vision modules.

    The VideoManager is separated from the Communicator to avoid crashing the
    communicator when a video module fails. The communicator will automatically
    restart the video manager when it crashes.

    There are two modes available currently, which can be changed by setting the
    __server_mode boolean to True or False. When server mode is enabled, the
    VideoManager will set up a socket to accept incoming image requests and will
    serve the images over those sockets. When server mode is disabled, instead
    the VidMemWriter class will be used to write the images to /dev/shm/images,
    where the vision modules can collect them. The latter approach is the old
    behavior. While this works, it introduces more delay to the images because
    images are not served on request basis but depend on the frequency of the
    VideoManager. The new approach requests an image only when the vision module
    requires one, bringing the time of processing and the time of generation
    closer together. It also gives the opportunity to serve vision modules on
    a different host, by connecting over TCP/IP to port 59152. This will of
    course result in degraded performance because transmitting over the network
    takes a lot longer than over the Unix Domain Socket. Still, performance
    should be usable.
    """
    def __init__(self, pipe, profile=False):
        super(VideoManager, self).__init__()
        self.__vidmemwriter = None
        self.__pipe = pipe
        self.__heartbeat = time.time()
        self.__hb_interval = 2
        self.__camera = 0
        self.__nao_camera = None
        self.__logger = None
        self.__video_sources = []
        self.__profile = profile
        self.__server_mode = True

        self.__video_modules = {}
        self.__last_images = {}
        self.__video_locks = {}

        self.__image_queue = []
        self.__image_handlers = []

        try:
            import util.openni_kinectvideo
            self.__use_openni = False
        except:
            self.__use_openni = False

    def stop(self, signum=None, frame=None):
        """
        Process a stop request, usually used as a signal handler
        """
        self.__logger.warning("Caught signal, exiting")
        self._running = False

    def run(self):
        """
        The run method is the method started when the new process starts. It
        wraps the _run method, using the cProfile when requested to profile
        the VideoManager.
        """
        global vminstance

        iomanager.clear_IOM()
        vminstance = self
        if self.__profile:
            import cProfile, pstats
            cProfile.runctx('vminstance._run()', globals(), locals(), 'videomanager.prof')
        else:
            self._run()

    def _run(self):
        """
        The main loop of the VideoManager. It will communicate with the
        Communicator to start/stop video sources and to check which
        sources are available.
        """
        # Catch signals, shut down nicely
        signal.signal(signal.SIGTERM, self.stop)
        signal.signal(signal.SIGINT, self.stop)

        # Set up server listening on TCP socket and Unix Domain Socket
        self.__UD_server = VideoSourceServer(self, "VideoManager")
        # Use compression for TCP socket because that one will most likely
        # be used, if at all, by vision modules running on a different machine,
        # so bandwidth will be limited.
        self.__tcp_server = VideoSourceServer(self, 49953, compress=True, compress_level=6)

        # Set up the Queue for the image requests from vision modules
        self.__image_queue = Queue.Queue()

        self._ticker = Ticker(10)
        poller = iopoll.IOPoll()
        poller.register(self.__pipe)
        self.__logger = logging.getLogger('Borg.Brain.Communicator.VideoManager')
        self.__logger.info("VideoManager running")
        
        self._running = True
        #try:
        while self._running:
            self._ticker.tick()
            events = poller.poll(0)
            for fd, event in events:
                if event & iopoll.IO_ERROR:
                    self._running = False
                    break
                if event & iopoll.IO_READ:
                    self._handle_command()
                if event & iopoll.IO_WRITE:
                    self._send_heartbeat()
            if self.__vidmemwriter:
                self.__vidmemwriter.update()
        #except:
        #    self._running = False
        self._stop_video_sources()
        self.__logger.info("VideoManager stopping")
    
    def _send_heartbeat(self):
        """
        Send a heartbeat to the Communicator to indicate that
        the VideoManager has not crashed.
        """
        if time.time() > self.__heartbeat + self.__hb_interval:
            self.__pipe.send({"command": "heartbeat"})
            self.__heartbeat = time.time()

    def _handle_command(self):
        """
        Process a command from the Communicator: starting or stopping
        of video modules or returning which video sources are active
        or available.
        """
        data = self.__pipe.recv()
        cmd = data['command']
        if cmd == "start":
            # Start a video source
            source = data['source']
            ip = None
            inputres = None
            outputres = None
            if "ip" in data:
                ip = data['ip']
            if "inputres" in data:
                inputres = data['inputres']
            if "outputres" in data:
                outputres = data['outputres']
            if "camera" in data:
                self.__camera = data['camera']
            result = self._start_video_source(source, ip, inputres, outputres)
            self.__check_image_handlers()
            self.__pipe.send({"command": "source_started", "result": result})
        elif cmd == "stop":
            # Stop a video source
            source = data['source']
            result = self._stop_video_source(source)
            self.__pipe.send({"command": "source_stopped", "result": result})
        elif cmd == "set_nao_camera":
            # Change the camera the NAO is using: chin or top
            if "camera" in data:
                camera = data['camera']
                if self.__nao_camera:
                    self.__nao_camera.change_camera(camera)
                else:
                    self.__logger.warning("Nao Video camera source is None, cannot change camera")
        elif cmd == "check_sources":
            # Check which sources are available/connected to this machine
            sources = self._check_sources()
            self.__pipe.send({"command": "available_sources", "available_sources": sources})
        elif cmd == "set_speed":
            # Change the running frequency of the VideoManager
            if "frequency" in data:
                freq = data['frequency']
                self.__logger.info("Setting Video Manager speed to %d Hz" % freq)
                self._ticker.set_frequency(freq)
        elif cmd == "quit":
            # End the VideoManager
            self._running = False

    def _start_video_source(self, source, ip=None, inputres="640x480", outputres="640x480"):
        """
        This method starts a new video source, which some optional arguments
        specified such as input and output resolution and a IP address.
        Currently, these settings only apply to the naovideo source.
        """
        if source in self.__video_sources:
            return True

        if not source or source == "None":
            return True

        result = self._start_vidmemwriter(source, ip, inputres, outputres)

        return result

    def _start_vidmemwriter(self, camType, ip=None, inputres="640x480", outputres="640x480"):
        """
        Start the vidmemwriter and a video source, if required. If in server
        mode, no vidmemwriter will be started. Instead, the video module will
        be registered for with by the image_handlers. 
        """
        if not self.__vidmemwriter and not self.__server_mode:
            self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])

        if camType in self.__video_sources:
            return True

        self.__logger.info("I'm starting %s" % camType)

        if ros_pattern.match(camType):
            #The first 4 characters "ros_" identify that is a specific ros image
            #The second part *** in "ros_***/topic" is the encoding:
            topic = camType[4:]
            encoding = "passthrough"
            self.__logger.info("camType !!!!!! %s" % camType)
            if not camType[4] == '/':
                str_list = camType.split("_")
                topic = '_'.join(str_list[2:])
                encoding = str_list[1]
            ros_image_source = rosimage.RosImage(topic, encoding)

            if self.__server_mode:
                self.__register_video_source(camType, ros_image_source)
            else:
                self.__vidmemwriter.add_video_source(ros_image_source, camType)
            self.__video_sources.append(camType)
            self.__logger.info("rosimage started for topic: %s, with encoding: %s" % (topic, encoding))
            return True
        elif camType == "webcam":
            self.__logger.debug("I'm starting webcam")
            webcamsource = takeimages.TakeImages(self.__camera)
            img = webcamsource.get_image()
            if type(img) is type(""):
                self.__logger.error("No camera found. Please check connection!")
                return False

            if webcamsource.Nocamera:
                if self.__camera == -1:
                    self.__logger.error("No camera found. Please check connection!")
                else:
                    self.__logger.error("Camera %d not found. Please check connection!" % self.__camera)
                return False
            if self.__server_mode:
                self.__register_video_source('webcam', webcamsource)
            else:
                self.__vidmemwriter.add_video_source(webcamsource, "webcam")
            self.__video_sources.append("webcam")
            self.__logger.info("Webcam started")
            return True
        elif camType == 'kinect_openni':
            self.__logger.debug("I'm starting kinect using openni")
            import util.openni_kinectvideo as kv
            depth_source = kv.OpenNIKinect("depth")
            rgb_source = kv.OpenNIKinect("rgb")

            try:
                depth_source.get_image()
            except:
                self.__logger.error("Kinect not found. Please check connection!")
                return False

            if self.__server_mode:
                self.__register_video_source('kinect_depth', depth_source)
                self.__register_video_source('kinect_rgb', rgb_source)
            else:
                self.__vidmemwriter.add_video_source(depth_source, "kinect_depth")
                self.__vidmemwriter.add_video_source(rgb_source, "kinect_rgb")

            self.__video_sources.append("kinect_depth")
            self.__video_sources.append("kinect_rgb")
            self.__video_sources.append("kinect")
            self.__video_sources.append("kinect_openni")
        
            self.__logger.info("Kinect started")
            return True
        elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':
            if self.__use_openni:
                self.__logger.info("I'm starting kinect using openni")
                import util.openni_kinectvideo as kv
                depth_source = kv.OpenNIKinect("depth")
                rgb_source = kv.OpenNIKinect("rgb")

                try:
                    depth_source.get_image()
                except:
                    self.__logger.error("Kinect not found. Please check connection!")
                    return False
            else:
                self.__logger.info("I'm starting kinect using freenect")
                try:
                    import util.kinectmemwriter
                except:
                    self.__logger.error("Could not load kinectmemwriter module. Check modules.")
                    return False

                depth_source = util.kinectmemwriter.KinectDepthSource()
                rgb_source = util.kinectmemwriter.KinectRGBSource()

                try:
                    depth_source.get_image()
                except:
                    self.__logger.error("Kinect not found. Please check connection!")
                    return False

            if self.__server_mode:
                self.__register_video_source('kinect_depth', depth_source)
                self.__register_video_source('kinect_rgb', rgb_source)
            else:
                self.__vidmemwriter.add_video_source(depth_source, "kinect_depth")
                self.__vidmemwriter.add_video_source(rgb_source, "kinect_rgb")

            self.__video_sources.append("kinect_depth")
            self.__video_sources.append("kinect_rgb")
            self.__video_sources.append("kinect")
        
            self.__logger.info("Kinect started")
            return True
        elif camType == "naovideo":
            self.__logger.debug("I'm starting naovideo")
            try:
                import util.naovideo as naovideo
            except:
                self.__logger.error("Could not load naovideo module. Check modules")
                return False
            #get ip of nao:
            #TODO: fix this dirty hack (it should be read from the config file)
            naoip = "129.125.178.232"
            if ip:
                naoip = ip
    
            self.__logger.warn("Using input resolution %s and output resolution %s" % (inputres, outputres))
            #use the naovideo module:
            if self.__camera != 0 and self.__camera != 1:
                self.__camera = 0
            try:
                naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)
                naocamsource.get_image()
            except:
                self.__logger.error("Something went wrong using the camera of the nao (check connection!)")
                traceback.print_exc()
                return False

            if self.__server_mode:
                self.__register_video_source('naovideo', naocamsource)
            else:
                self.__vidmemwriter.add_video_source(naocamsource, "naovideo")
            self.__video_sources.append("naovideo")
            self.__nao_camera = naocamsource
            self.__logger.info("Naovideo started")
            return True
        else:
            self.__logger.warning("Invalid video source specified: %s" % camType)
            return False
    
    def __register_video_source(self, name, source):
        """
        Register a running video source, for use in server mode.
        """
        self.__video_modules[name] = source
        self.__last_images[name] = (time.time(), source.get_image())
        self.__video_locks[name] = threading.Lock()

    def _stop_video_sources(self):
        """
        Stop all video sources.
        """
        for source in self.__video_sources:
            self._stop_video_source(source)

    def _stop_video_source(self, video_source):
        """
        Stop the specified video source
        """
        if not video_source in self.__video_sources:
            self.__logger.warning("Not stopping video source %s - not running" % video_source)
            return True

        if self.__server_mode:
            if video_source == "kinect":
                if "kinect_rgb" in self.__video_modules:
                    del self.__video_modules["kinect_rgb"]
                    del self.__last_images["kinect_rgb"]
                    del self.__video_locks["kinect_rgb"]
                if "kinect_depth" in self.__video_modules:
                    del self.__video_modules["kinect_depth"]
                    del self.__last_images["kinect_depth"]
                    del self.__video_locks["kinect_depth"]
            else:
                if video_source in self.__video_modules:
                    if video_source == "naovideo":
                        self.__video_modules[video_source].unsubscribe()

                    del self.__video_modules[video_source]
                    del self.__last_images[video_source]
                    del self.__video_locks[video_source]

        if video_source == "kinect":
            import util.kinectvideo
            if not self.__server_mode:
                self.__vidmemwriter.stop_video_source("kinect_rgb")
                self.__vidmemwriter.stop_video_source("kinect_depth")
            for src in ['kinect', 'kinect_rgb', 'kinect_depth', 'kinect_openni']:
                if src in self.__video_sources:
                    self.__video_sources.remove(src)
            util.kinectvideo.StopKinect()
        else:
            self.__video_sources.remove(video_source)
            if not self.__server_mode:
                self.__vidmemwriter.stop_video_source(video_source)
         
        if not self.__video_sources:
            self.__logger.info("No video source active. Stopping vidmemwriter.")
            self.__vidmemwriter = None
        return True

    def _check_sources(self):
        """
        Check the available video sources. Use the list of active video sources
        as a starting point. Then, for each known video source not currently
        active, try to start it, and if this succeeds, add it to the list.
        """
        avail_sources = []
        for source in self.__video_sources:
            avail_sources.append(source)

        # Try to start each video source known. Naovideo is not in this list,
        # because without an explicit request, no IP and port is known so no
        # connection can be made.
        for source in ["webcam", "kinect"]:
            if source in avail_sources:
                continue

            # Status unknown, try to start and stop
            self.__logger.info("Trying to see if source %s is available" % source)
            try:
                if self._start_video_source(source):
                    avail_sources.append(source)
                    self._stop_video_source(source)
            except Exception, e:
                self.__logger.info("Error starting source %s, (error: %s) must not be available" % (source, str(e)))
        
        return avail_sources
Esempio n. 25
0
    def __image_request_handler(self):
        """
        This method is the main loop for a image handler worker thread. It will
        try to get an image request from the Queue and if available, it will
        try to fulfill the request. 

        As soon as the new image is obtained from the video module, it will
        be sent over the socket that requested the image.
        """
        self.__logger.info("Image Request Handling Thread started")
        ticker = Ticker(2)
        while self._running:
            timeout = ticker.end_tick(False)
            try:
                task = self.__image_queue.get(True, timeout)
            except Queue.Empty:
                ticker.start_tick()
                continue

            # There is a task to process
            ticker.start_tick()
            source, connection = task

            # Check if the connection has been closed. If it was,
            # do not bother processing the request.
            if not connection.connected():
                self.__logger.info("Skipping request for image of source %s"   \
                                   " because requesting client disconnected"   \
                                   % source)
                self.__image_queue.task_done()
                continue 

            # Obtain new image
            error = "No image available"
            image = None
            mtime = time.time()
            if source in self.__video_modules:
                try:
                    mtime, image = self.__get_image(source)
                except Exception as err:
                    error = "Obtaining image failed: %s" % repr(err)
            else:
                error = "Video source %s has not been started" % source

            if connection.connected():
                if image:
                    # Valid image was obtained
                    img_str = image.tostring()
                    data = {'name':         'image',
                            'source':       source,
                            'time':         mtime,
                            'shape':        (image.width, image.height),
                            'depth':        image.depth,
                            'nChannels':    image.nChannels}
                else:
                    # An error occured, notify the vision module
                    self.__logger.info("Failed to obtain image for source %s. "\
                                       " Error message: %s" % (source, error))
                    img_str = ""
                    data = {'name':         'image',
                            'source':       source,
                            'time':         mtime,
                            'error':        error}
                # Send the data to the vision module.
                if not connection.sendall(data, img_str):
                    self.__logger.warning("Failed to send data to client. "   \
                                          "Probably disconnected")
            else:
                self.__logger.info("Image of source %s obtained but not "      \
                                   "sending because requesting client "        \
                                   "disconnected" % source)
            self.__image_queue.task_done()
        self.__logger.info("Image Request Handling Thread ended")
class HaarFace_rotate(AbstractVisionModule):
    """example vision class. Detects faces using the openCV face detector"""
    def __init__(self,
                 host,
                 port,
                 update_frequency,
                 source="webcam",
                 cascade="haarcascade_frontalface_default",
                 verbose=False,
                 rotation=0):
        """Initialize vision system with update frequency, ip adress, and Haar cascade"""
        self.logger = logging.getLogger("Borg.Brain.Vision.HaarFace_rotate")
        super(HaarFace_rotate, self).__init__(host, port)
        self.update_frequency = update_frequency
        self.__ticker = Ticker(frequency=update_frequency)

        cascadedir = os.environ[
            'BORG'] + "/brain/data/haarcascades/" + cascade + ".xml"
        self.verbose = verbose
        self.cascade = cv.Load(cascadedir)
        self.source = [source]
        self.vid_mem_reader = util.vidmemreader.VidMemReader(self.source)
        # set the type of objects that someone can detect
        self.set_known_objects(['face'])
        # get new image and see what's the resolution for the coordonate to angle/speed converter
        self.get_new_image()
        self.logger.info("Using haar cascade: " + cascadedir)

        self.rotationStep = 20  #Degrees
        self.rotatedImages = []
        self.rotationList = []

        self.rotation = rotation

        for i in range(0, 360, self.rotationStep):
            self.rotationList.append(i)

    def train(self):
        pass

    def run(self):
        """start loop which gets a new image, then processes it"""

        im = None
        while im == None:
            im = self.vid_mem_reader.get_latest_image()
            if im == None:
                print "not receiving images yet..."
                time.sleep(0.2)

        #Wait for video source to be ready:
        #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):
        #res = self.vid_mem_reader.get_resolution()

        #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image
        #(while if you print it, it seems to be a IPL image).
        #print im
        size = cv.GetSize(im[0])
        #print size
        self.res = ({'width': size[0], 'height': size[1]})
        res = self.res

        self.transformer = util.speed_angle.SpeedAngle(None, res['width'],
                                                       res['height'])

        while True:
            self.__ticker.tick()
            start_time = time.time()
            img = self.get_new_image()
            ''' Parallel Process Inside this module
            
            im = np.asarray(img[:,:])
            time_spent = time.time() - start_time
            
            #Parallel process
            
            self.parallel_rotate_image(im)
            self.logger.debug("Set one finished")
            
            print "Image Length: ", self.rotatedImages
            for img in self.rotatedImages:
                self.get_faces(img[0])
                self.update()
            
            self.rotatedImages = []
            '''
            im = np.asarray(img[:, :])

            image = self.rotate_image(im, [self.rotation])
            self.get_faces(image)
            self.update()

            #TODO: To be removed and or configurable:
            directory = "/tmp/emergency/"
            if not os.path.exists(directory):
                os.makedirs(directory)
            try:
                cv.SaveImage(directory + "image.png", image)
            except:
                print "ERROR: Could not write image to /tmp/emergency/"

    def parallel_rotate_image(self, img):
        #This function distributes the rotations into all the cores of the pc, and then calls the rotation function
        queue = multiprocessing.Queue()
        processList = []
        cpu_count = multiprocessing.cpu_count()
        chunk = int(len(self.rotationList) / cpu_count)

        tempList = []
        lock = multiprocessing.Lock()

        for x in range(cpu_count):
            if x == cpu_count - 1:
                #print x*chunk, " :to: ", (len(self.rotationList) -1)
                tempList.append(
                    self.rotationList[(x * chunk):(len(self.rotationList) -
                                                   1)])
            else:
                #print x*chunk, " :to: ", ((x+1)*chunk)
                tempList.append(self.rotationList[(x * chunk):((x + 1) *
                                                               chunk)])

            processList.append(
                multiprocessing.Process(target=self.rotate_image,
                                        args=(
                                            lock,
                                            queue,
                                            img,
                                            tempList[x],
                                        )))
            processList[x].start()

        self._AbstractVisionModule__send_heartbeat()
        for proc in processList:
            image = queue.get()
            pimg = pickle.loads(image[0])
            cv_im = cv.CreateImageHeader(image[1], cv.IPL_DEPTH_8U, 3)
            cv.SetData(cv_im, pimg)
            self.rotatedImages.append(cv_im)
            proc.terminate()

    def rotate_image(self, image, rotationList, lock=None, queue=None):
        (h, w, c) = image.shape
        image_center = (w / 2., h / 2.)
        for angle in rotationList:
            try:
                rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
                result = cv2.warpAffine(image,
                                        rot_mat, (h, w),
                                        flags=cv2.INTER_LINEAR)
                image = cv2.cv.fromarray(result)
                self.logger.debug("Image type is, %s", image)
            except:
                self.logger.warning("Exception in rotating image.", exc_info=1)
                return cv.fromarray(self.image)

            if lock:
                image_str = image.tostring()
                pimg2 = pickle.dumps(image_str, -1)

                try:
                    lock.acquire()
                    queue.put([pimg2, cv.GetSize(image)])

                    #bitmap = cv.CreateImageHeader((h, w), cv.IPL_DEPTH_8U, c)
                    #cv.SetData(bitmap, result.tostring(), image.dtype.itemsize * 3 * w)
                    lock.release()
                except:
                    self.logger.debug(
                        "either not using parallelism or locking fails. %s",
                        exc_info=1)
            else:
                return image

    def get_new_image(self):
        """ Get new image from video shared memory """
        return self.vid_mem_reader.get_latest_image()[0]

    def get_faces(self, img):
        """ Detect faces in image """
        results = self.detect_and_draw(img, False)
        (width, height) = cv.GetSize(img)
        for result in results:
            self.add_property('name', 'face')
            self.add_property('pixel_location', result[0])
            self.add_property(
                'relative_location',
                (float(result[0][0]) / width, float(result[0][1]) / height))
            self.add_property('angle_and_distance', result[1])
            self.add_property('face_area', result[2])
            self.add_property('width', result[4])
            self.add_property('height', result[5])
            self.add_property('relative_face_area',
                              (float(result[2]) / (width * height)))
            self.add_property('confidence', result[3])
            self.add_property('dimensions', (width, height))
            self.add_property('rotation', self.rotation)
            self.store_observation()

    def get_no_faces(self):
        return

    def detect_and_draw(self, img, videosettings):
        """
        This program is demonstration for face and object detection using haar-like features.
        The program finds faces in a camera image or video stream and displays a red box around them.

        Original C implementation by:  ?
        Python implementation by: Roman Stanchak, James Bowman
        
        Draws faces detected in img using cascade
        and returns list of tuples ((x, y, width, height), nNeighbours)"""
        if videosettings:
            haar_scale = 1.2
            min_neighbors = 2
            haar_flags = cv.CV_HAAR_DO_CANNY_PRUNING
        else:
            haar_scale = 1.1
            min_neighbors = 3
            haar_flags = 0
        min_size = (7, 7)
        image_scale = 2.4

        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(
            img.width / image_scale), cv.Round(img.height / image_scale)), 8,
                                   1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            store = cv.CreateMemStorage(1024)
            faces = cv.HaarDetectObjects(small_img, self.cascade, store,
                                         haar_scale, min_neighbors, haar_flags,
                                         min_size)
            normfaces = []
            if faces:
                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * image_scale), int(y * image_scale))
                    pt2 = (int(
                        (x + w) * image_scale), int((y + h) * image_scale))
                    cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                    #normfaces.append(((int(round(x*image_scale)), int(round(y*image_scale)), int(round(w*image_scale)), int(round(h*image_scale))),n))

                    center_x = (int(round(w * image_scale) / 2) +
                                int(round(x * image_scale)))
                    center_y = (int(round(h * image_scale) / 2) +
                                int(round(y * image_scale)))
                    angle_x = self.transformer.get_x_angle(center_x)
                    # hardcoded speed according to the size of the face in the image
                    speed_y = self.transformer.get_speed(w, h)
                    normfaces.append(((center_x, center_y), (speed_y, angle_x),
                                      (w * h), n, w, h))
        if self.verbose:
            cv.ShowImage("Haar Detector rotation" + str(self.rotation), img)

            cv.WaitKey(10)
        return normfaces

    def get_predator_distance(self, bb, depth):
        self.logger.debug("Bounding Box: " + str(bb))
        if bb[0] < 0:
            bb[0] = 0
        if bb[2] >= self.res['width']:
            bb[2] = self.res['width'] - 1
        if bb[1] < 0:
            bb[1] = 0
        if bb[3] >= self.res['height']:
            bb[3] = self.res['height'] - 1
        dist_rect = cv.CreateImage((bb[2] - bb[0], bb[3] - bb[1]),
                                   cv.IPL_DEPTH_8U, 1)
        dist_rect = cv.GetSubRect(depth,
                                  (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]))
        return cv.Avg(dist_rect)[0]
Esempio n. 27
0
class Brain(object):
    """
    Main class that starts the sensors and the behaviors and connects them
    togethers
    """

    def __init__(self, param_dict):
        """
        Initialize brain class: create sensorintegrator, behaviorcontroller and bodycontroller
        """
        speed = int(param_dict.get_option('general', 'brain_speed', 10))
        self.logger = logging.getLogger('Borg.Brain.Brain')
        self.ticker = Ticker(frequency=speed, name="Brain", ignore_delay=True)
        starting_behavior = param_dict.get_option('general', 'starting_behavior')
        self.sensorintegrator = sensorintegrator.SensorIntegrator(param_dict)
        self.logger.info("Sensor Integrator initialiazed")
        self.behaviorcontroller = basebehavior.behaviorcontroller.BehaviorController(starting_behavior)
        self.logger.info("Behavior Controller initialized")
        self.set_brain_speed(speed)
        self.bodycontroller = body.bodycontroller.BodyController()
        self.bodycontroller.set_config(param_dict)
        self.logger.info("Body controller initialized")
        self.running = False

    def set_brain_speed(self, Hertz = 10):
        """
        The speed of the brain is measured in Hertz
        """
        self.ticker.set_frequency(Hertz)
        self.brain_speed = Hertz

    def get_brain_speed(self):
        """
        Return the speed of the brain measured in Hertz
        """
        return self.brain_speed

    def start(self):
        """
        This starts the robot brain.
        This method needs to be threaded otherwise we cannot stop the brain
        """
        self.running = True
        self.logger.info("Running brain")

        sleep_func = True

        if self.bodycontroller.get_nr_pioneers():
            if body.bodycontroller.pioneer.ROS_ENABLED:
                self.logger.info("ROS enabled in body controller. " \
                                 "Using ros.sleep instead of time.sleep")
                sleep_func = body.bodycontroller.pioneer.rospy.sleep

        while self.running == True:
            self.ticker.tick(sleep_func)
            memory = self.sensorintegrator.update()
            try:
                self.behaviorcontroller.update()
            except basebehavior.behaviorcontroller.BehaviorFinished:
                self.logger.warn("All behaviors have finished or stopped. Stopping brain")
                self.stop()
            self.bodycontroller.update()

    def stop(self, signum=None, frame=None):
        """
        the stop function only works if start is threaded
        """
        if self.running:
            self.running = False
            self.sensorintegrator.stop()
            self.bodycontroller.stop()
Esempio n. 28
0
class Remote(avm):
    """
    Remote control the pioneer / scanner
    """
    def __init__(self,
                 controller_ip,
                 controller_port,
                 update_frequency=5,
                 location_file=False,
                 use_pcd=False):
        """
        Constructor
        """
        self.logger = logging.getLogger("Borg.Brain.Vision.Remote")
        super(Remote, self).__init__(controller_ip, controller_port)

        # Store configuration
        self.__ticker = Ticker(frequency=update_frequency)
        self.last_odo = None
        if not location_file:
            self.location_file = os.environ['BORG'] + "/Brain/data/locations2/arena_istanbul.dat"
        else:
            self.location_file = location_file

        self.file_lock = threading.Lock()
        self.process = None
        self.__last_print = 0 

    def __del__(self):
        self.stop()

    def stop(self):
        """
        When stopping the module, make sure the PCD Writer and
        SLAM6D handlers also stop
        """
        self.disconnect()

    def train(self):
        pass

    def run(self):
        """start loop which gets a new image, then processes it"""
        while self.is_connected():
            self.__ticker.tick() # Tick (sleep)

            if self.process and self.process.is_alive():
                self.update()
                continue

            c = getkey() 
            if c:
                if c == 'w':
                    print "Moving forward"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "forward")
                elif c == 'a':
                    print "Turning left"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "left")
                elif c == 'd':
                    print "Turning right"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "right")
                elif c == 's':
                    print "Moving backward"
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "backward")
                elif c == 'q':
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "stop")
                elif c == 'r':
                    self.add_property("name", "pioneer_command")
                    self.add_property("pioneer_command", "terminate")
                elif c == 't':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "make_scan")
                elif c == 'm':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "save_map")
                elif c == 'l':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "load_map")
                elif c == 'c':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "match_map")
                elif c == 'x':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "make_map")
                elif c == 'z':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "discard_map")
                elif c == 'p':
                    self.save_pose()
                elif c == 'g':
                    self.add_property("name", "map_command")
                    self.add_property("map_command", "toggle_autoscan")
                elif c == 'h':
                    print "[w] = forward    [a] = left         [s] = backward           [d] = right"
                    print "[q] = stop       [t] = take scan    [c] = matching mode      [x] = mapping mode"
                    print "[m] = save map   [l] = load map     [p] = save current pose  [g] = Toggle autoscanning"
            
            ############################
            # Send data
            self.update()

    def handle_custom_commands(self, entry):
        if entry['command'] == "odometry":
            odo = entry['params'][1]
            if odo != self.last_odo:
                if self.__last_print < time.time() - 2:
                    print "Current position: %s" % repr(odo)
                    self.__last_print = time.time()
                self.last_odo = odo

    def save_pose(self):
        if self.process:
            if self.process.is_alive():
                return
            else:
                self.process = None
        odo = self.last_odo
        self.process = threading.Thread(target=do_save_pose, args=(odo, self.location_file, self.file_lock))
        self.process.start()
Esempio n. 29
0
    def __image_request_handler(self):
        """
        This method is the main loop for a image handler worker thread. It will
        try to get an image request from the Queue and if available, it will
        try to fulfill the request. 

        As soon as the new image is obtained from the video module, it will
        be sent over the socket that requested the image.
        """
        self.__logger.info("Image Request Handling Thread started")
        ticker = Ticker(2)
        while self._running:
            timeout = ticker.end_tick(False)
            try:
                task = self.__image_queue.get(True, timeout)
            except Queue.Empty:
                ticker.start_tick()
                continue

            # There is a task to process
            ticker.start_tick()
            source, connection = task

            # Check if the connection has been closed. If it was,
            # do not bother processing the request.
            if not connection.connected():
                self.__logger.info("Skipping request for image of source %s"   \
                                   " because requesting client disconnected"   \
                                   % source)
                self.__image_queue.task_done()
                continue 

            # Obtain new image
            error = "No image available"
            image = None
            mtime = time.time()
            if source in self.__video_modules:
                try:
                    mtime, image = self.__get_image(source)
                except Exception as err:
                    error = "Obtaining image failed: %s" % repr(err)
            else:
                error = "Video source %s has not been started" % source

            if connection.connected():
                if image:
                    # Valid image was obtained
                    img_str = image.tostring()
                    data = {'name':         'image',
                            'source':       source,
                            'time':         mtime,
                            'shape':        (image.width, image.height),
                            'depth':        image.depth,
                            'nChannels':    image.nChannels}
                else:
                    # An error occured, notify the vision module
                    self.__logger.info("Failed to obtain image for source %s. "\
                                       " Error message: %s" % (source, error))
                    img_str = ""
                    data = {'name':         'image',
                            'source':       source,
                            'time':         mtime,
                            'error':        error}
                # Send the data to the vision module.
                if not connection.sendall(data, img_str):
                    self.__logger.warning("Failed to send data to client. "   \
                                          "Probably disconnected")
            else:
                self.__logger.info("Image of source %s obtained but not "      \
                                   "sending because requesting client "        \
                                   "disconnected" % source)
            self.__image_queue.task_done()
        self.__logger.info("Image Request Handling Thread ended")
class BlobDetector(AbstractVisionModule):
    def __init__(self,
                 host,
                 port,
                 blob_dir,
                 source="webcam",
                 tbd=None,
                 trainable=False,
                 denoise_value=5,
                 frequency=10):
        self.logger = logging.getLogger("Borg.Brain.Vision.BlobDetector")
        super(BlobDetector, self).__init__(host, port)

        self.ticker = Ticker(frequency=frequency,
                             name="BlobDetector",
                             ignore_delay=True)

        #TODO; not sure about this part:
        self.__vmr = util.vidmemreader.VidMemReader([source])
        self.__blob_dir = blob_dir
        #self.__colorlist_list = colorlist.load_color_file_list(self.__blob_dir)

        #self.__display_surface = None
        #self.__last_surface = None
        if not tbd:
            print "Using default empty boundaries."
            self.to_be_detected = [("default", [])]
        else:
            input_data = open(tbd)
            self.to_be_detected = pickle.load(input_data)
            input_data.close()
        print trainable
        if trainable:
            self._trainable = True
            self.add_from_next_image = False
            self.add_x = None
            self.add_y = None
        else:
            self._trainable = False
        print "Number of target colors: " + str(len(self.to_be_detected))
        self.current_target = 0

        self.denoise_value = int(denoise_value)

        # Variables to tune the size of the colorcubes added by clicking:
        self.cube_size = 5  # height/width of the square area used to sample pixels.
        self.increase_h = 1  # How much the hue of the cube is widened on each side.
        self.increase_s = 5  # How much the saturation of the cube is widened on each side.
        self.increase_v = 5  # How much the value of the cube is widened on each side.

        #TODO DEBUG VALUES!
        self.most_recent_button = "Startup"

    def run(self):
        """Start loop which gets a new image, then processes it"""

        # Create display window.
        cv2.namedWindow("Blob Detector")
        cv2.moveWindow("Blob Detector", 100, 100)
        if self._trainable:
            print "Using mouse clicking."
            cv.SetMouseCallback("Blob Detector", self.onmouse)
        else:
            print "Not using mouse clicking."
        while True:

            self.ticker.tick(True)
            # Get the image
            cv_im = self.get_new_image()[0]
            if cv_im:

                # Pre-process the image
                HSV_image = self.preprocess_image(cv_im)

                # Create a copy image on which found contours are shown.
                self.draw_image = copy.deepcopy(HSV_image)

                if self._trainable and self.add_from_next_image:
                    # Try to add the area specified by the most recent click.
                    self.add_new_cube(HSV_image)

                self.detection_process(HSV_image)

                self.image_display(self.draw_image)

            else:
                # If no image is recieved, nothing can be done.
                print "No image recieved!"
                print "-------------------------------"

            self.update()

    def train(self):
        #We do not use this
        pass

    def get_new_image(self):
        #TODO: Get image from vmr:
        return self.__vmr.get_latest_image()

    def onmouse(self, event, x, y, flags, param):

        #print "Flags: " + str(flags)
        if not (flags == cv.CV_EVENT_FLAG_CTRLKEY or flags
                == cv2.EVENT_FLAG_SHIFTKEY or flags == cv2.EVENT_FLAG_ALTKEY):
            self.handle_nokey_click(event, x, y)
            self.most_recent_button = "None"
            return

        if flags == cv.CV_EVENT_FLAG_ALTKEY:
            # Hold Alt to store/delete trained data
            self.handle_alt_click(event)
            self.most_recent_button = "Alt"
            return

        if flags == cv.CV_EVENT_FLAG_SHIFTKEY:
            # Hold Shift to increase HSV width.
            self.handle_shift_click(event)
            self.most_recent_button = "Shift"
            return

        if flags == cv.CV_EVENT_FLAG_CTRLKEY:
            # Hold Ctrl to decrease HSV width.
            self.handle_ctrl_click(event)
            self.most_recent_button = "Ctrl"
            return

        #print "No usable event, so doing nothing with the click."
        return

    def handle_custom_commands(self, entry):
        """This function will handle commands to add cubes from a behavior"""
        print entry
        if self._trainable and entry['command'] == 'train':
            print "Trying to train by clicking."
            # Get the target points and image
            points = entry['params']
            image = self.get_new_image()[0]
            processed_image = self.preprocess_image(image)
            for point in points:
                # Set the internal variables
                self.add_x = point['x']
                self.add_y = point['y']
                print "Clicking on: " + str(point['x']) + ", " + str(
                    point['y'])

                # Add the cube
                self.add_new_cube(processed_image)

        # Then save the additions
        self.handle_alt_click(None)

############ Subfunctions

######## Image processing

    def preprocess_image(self, image):
        """This function converts the image to a Numpy array and transfers it to HSV space."""

        # Convert the image to a Numpy array.
        arr = numpy.asarray(image[:, :])
        #cv2.imshow("Blob Detector", arr)
        #print cv_im
        #print arr

        # Convert to HSV
        return cv2.cvtColor(arr, cv2.COLOR_BGR2HSV)

    def add_new_cube(self, image):
        """"This function takes care of adding a new colorcube to the range of detection cubes."""
        # Try to add the area specified by the most recent click.

        # Determine the area to use.
        # This is a square area of self.cube_size pixels.
        area_to_be_added = cv2.getRectSubPix(
            image, (self.cube_size, self.cube_size),
            (float(self.add_x), float(self.add_y)))

        # Determine the min and max values in that area.
        # Split the image
        (hue, saturation, value) = cv2.split(area_to_be_added)
        # Determine the min and max in the area.
        # These min and max are increased/decreased to improve detection.
        hue_max = numpy.amin([180, numpy.amax(hue) + self.increase_h])
        hue_min = numpy.amax([0, numpy.amin(hue) - self.increase_h])
        print hue
        if (hue_max - hue_min) > 100:
            #Ok, probably it is some kind of a red color (around 0):
            #So we need to determin the min and max around 0:
            #TODO: Make more efficient using numpy or so:
            hue_min = 180
            hue_max = 0
            for hue_val_line in hue:
                for hue_val in hue_val_line:
                    print hue_val
                    if hue_val > 90:
                        if hue_val < hue_min:
                            hue_min = hue_val
                    if hue_val <= 90:
                        if hue_val > hue_max:
                            hue_max = hue_val

        saturation_max = numpy.amin(
            [255, numpy.amax(saturation) + self.increase_s])
        saturation_min = numpy.amax(
            [0, numpy.amin(saturation) - self.increase_s])

        value_max = numpy.amin([255, numpy.amax(value) + self.increase_v])
        value_min = numpy.amax([0, numpy.amin(value) - self.increase_v])

        # Add these to a dict.
        new_cube = {
            'h_lower': hue_min,
            'h_upper': hue_max,
            's_lower': saturation_min,
            's_upper': saturation_max,
            'v_lower': value_min,
            'v_upper': value_max
        }

        print "Made new dict."
        print new_cube

        # Add the dict to the detection dicts.
        ((self.to_be_detected[self.current_target])[1]).append(
            copy.deepcopy(new_cube))

        # And it's done!
        self.add_from_next_image = False

    def store_contour(self, contour, tag):
        """"This function will store a found image contour in the central memory."""

        # Determine the bounding box
        x, y, w, h = cv2.boundingRect(contour)

        # Determine the area
        area = cv2.contourArea(contour)

        # Then, store that in memory.
        self.add_property('name', tag)
        self.add_property('x', x)
        self.add_property('width', w)
        self.add_property('y', y)
        self.add_property('height', h)
        self.add_property('surface', area)
        self.store_observation()

    def store_combined_contours(self, contours, tag):
        """This will store the combined contours in a list sorted by area."""
        contour_list = []
        # Determine the dict values for each contour.
        for contour in contours:
            # Determine x, y, w and h
            x, y, w, h = cv2.boundingRect(contour)
            # And the area.
            area = cv2.contourArea(contour)

            # Put this data into a dict
            contour_dict = {
                'x': x,
                'width': w,
                'y': y,
                'height': h,
                'surface': area
            }
            # And add it to the list.
            contour_list.append(copy.deepcopy(contour_dict))

        if len(contour_list) < 1:
            return
        sorted_contours = sorted(contour_list,
                                 key=lambda k: k['surface'],
                                 reverse=True)
        new_tag = "combined_" + str(tag)
        self.add_property('name', new_tag)
        self.add_property("sorted_contours", copy.deepcopy(sorted_contours))
        self.store_observation()

    def detection_process(self, image):
        """"This function runs the detection process for the recieved image."""
        # Now, go over the various colors to be detected.
        for target in self.to_be_detected:
            # Detect any blobs
            #print target
            tag, cubes = target
            found = colorblob_new.process_HSV(image, cubes, self.denoise_value)
            collection = []

            if not found:
                # No contours found.
                pass
            else:
                #Determine HSV color of contours:
                h = 0
                target_1 = cubes[0]
                if target_1['h_upper'] < target_1['h_lower']:
                    h = target_1['h_lower'] + ((
                        (180 - target_1['h_lower']) + target_1['h_upper']) / 2)
                    if h > 180:
                        h -= 180
                else:
                    h = target_1['h_lower'] + (
                        (target_1['h_upper'] - target_1['h_lower']) / 2)

                # Draw the contours into the draw_image
                cv2.drawContours(self.draw_image,
                                 found,
                                 -1, (h, 150, 150),
                                 thickness=-1)

                for contour in found:
                    self.store_contour(contour, tag)
                    collection.append(contour)
            self.store_combined_contours(collection, tag)

    def image_display_line(self, image, position, text):
        cv2.putText(image,
                    text,
                    position,
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.3, (0, 0, 0),
                    lineType=cv2.CV_AA)
        cv2.putText(image,
                    text, (position[0] - 1, position[1] - 1),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.3, (255, 255, 255),
                    lineType=cv2.CV_AA)

    def image_display_line_list(self, image, position, line_list):
        line_left_offset = position[0]
        line_height = 10
        line_y_position = position[1] + line_height
        for line in line_list:
            self.image_display_line(image, (line_left_offset, line_y_position),
                                    line)
            line_y_position += line_height

    def image_display(self, image):
        """"This will display the image with the found contours and relevant text on it."""
        # Convert to BGR for display.
        disp_image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        if self._trainable:
            line_list = []

            # Display the colour clicks will be added to.
            line_list.append("Target: " + str(
                (self.to_be_detected[self.current_target])[0]))
            # And the area used when clicking to add.
            line_list.append("Area: " + str(self.cube_size) + " x " +
                             str(self.cube_size) + " px")
            # And the degree to which the H, S and V values found by clicking are widened.
            line_list.append("HSV wideing values: " + str(self.increase_h) +
                             " " + str(self.increase_s) + " " +
                             str(self.increase_v))
            #TODO DEBUG CODE!
            line_list.append(self.most_recent_button)

            self.image_display_line_list(disp_image, (3, 0), line_list)
        # Display the actual image.
        cv2.imshow("Blob Detector", disp_image)
        cv2.waitKey(10)

######## Mouse input

    def handle_ctrl_click(self, event):
        """This will handle a ctrl-click when training."""
        print "Control pressed."
        if event == cv.CV_EVENT_LBUTTONDOWN:
            # Click left to decrease hue
            print "Narrowing hue edges of color cube"
            self.increase_h -= 1
            if self.increase_h < 0:
                self.increase_h = 180 - self.increase_h
            return
        if event == cv.CV_EVENT_MBUTTONDOWN:
            # Click middle to decrease saturation
            print "Narrowing saturation edges of color cube"
            self.increase_s -= 5
            if self.increase_s < 0:
                self.increase_s = 0
            return
        if event == cv.CV_EVENT_RBUTTONDOWN:
            # Click right to decrease value
            print "Narrowing value edges of color cube"
            self.increase_v -= 5
            if self.increase_v < 0:
                self.increase_v = 0
            return

    def handle_alt_click(self, event):
        """This will handle a alt-click when training."""
        print "Alt pressed."
        # Save all added data
        #if (event == cv.CV_EVENT_LBUTTONDOWN):
        print "Saving added data."
        output = open(tbd, 'wb')
        pickle.dump(self.to_be_detected, output)
        output.close()
        return
        # Click right to remove last added cube
        #if (event == cv.CV_EVENT_LBUTTONDOWN) and tbd:
        #TODO Not working ATM.
        #return

    def handle_shift_click(self, event):
        """This will handle a shift-click when training."""
        print "Shift pressed."
        if event == cv.CV_EVENT_LBUTTONDOWN:
            # Click left to increase hue
            print "Widening hue edges of color cube"
            self.increase_h += 1
            if self.increase_h > 180:
                self.increase_h = 0
            return
        if event == cv.CV_EVENT_MBUTTONDOWN:
            # Click middle to increase saturation
            print "Widening saturation edges of color cube"
            self.increase_s += 5
            if self.increase_s > 255:
                self.increase_s = 0
            return
        if event == cv.CV_EVENT_RBUTTONDOWN:
            # Click right to increase value
            print "Widening value edges of color cube"
            self.increase_v += 5
            if self.increase_v > 255:
                self.increase_v = 0
            return

    def handle_nokey_click(self, event, x, y):
        """This will handle a keyless click when training."""
        #print "No CTRL, ALT or SHIFT pressed!"

        # Will add a blob to the current color and save it on a left-click.
        if event == cv.CV_EVENT_LBUTTONDOWN:
            print "Adding pixel cube for current click to the color."
            self.add_from_next_image = True
            self.add_x = copy.copy(x)
            self.add_y = copy.copy(y)
            # This is checked for in the processing if trainable is True.
            return
        # Will cycle through the colors on a right-click.
        if event == cv.CV_EVENT_RBUTTONDOWN:
            print "Switching colors."
            self.current_target += 1
            if self.current_target > len(self.to_be_detected) - 1:
                self.current_target = 0
            return
        if (event == cv.CV_EVENT_MBUTTONDOWN) and tbd:
            #TODO: switch this to cycling through selection square sizes.
            print "Cycling cube size."
            self.cube_size += 1
            if self.cube_size > 10:
                self.cube_size = 1