def __init__(self):

        self.scriptPath = os.path.dirname(__file__)
        self.targetImageGray = None
        self.templateImageGray = None
        self.imageFlowFilter = ImageFlowFilter()

        # Setup the GUI
        builder = gtk.Builder()
        builder.add_from_file(self.scriptPath + "/GUI/AlignmentExplorer.glade")

        self.window = builder.get_object("winMain")
        self.adjDisplacementX = builder.get_object("adjDisplacementX")
        self.adjDisplacementY = builder.get_object("adjDisplacementY")
        self.lblSSDDisplay = builder.get_object("lblSSDDisplay")
        self.lblTargetName = builder.get_object("lblTargetName")
        self.lblTemplateName = builder.get_object("lblTemplateName")

        dwgTargetImage = builder.get_object("dwgTargetImage")
        dwgMergedImage = builder.get_object("dwgMergedImage")
        dwgTemplateImage = builder.get_object("dwgTemplateImage")
        dwgErrorImage = builder.get_object("dwgErrorImage")
        dwgSubtractImage = builder.get_object("dwgSubtractImage")
        self.dwgTargetImageDisplay = Display(dwgTargetImage)
        self.dwgMergedImageDisplay = Display(dwgMergedImage)
        self.dwgTemplateImageDisplay = Display(dwgTemplateImage)
        self.dwgErrorImageDisplay = Display(dwgErrorImage)
        self.dwgSubtractImageDisplay = Display(dwgSubtractImage)

        builder.connect_signals(self)

        updateLoop = self.update()
        gobject.idle_add(updateLoop.next)

        self.window.show()
    def __init__( self ):
    
        self.scriptPath = os.path.dirname( __file__ )
        self.targetImageGray = None
        self.templateImageGray = None
        self.imageFlowFilter = ImageFlowFilter()
            
        # Setup the GUI        
        builder = gtk.Builder()
        builder.add_from_file( self.scriptPath + "/GUI/AlignmentExplorer.glade" )
        
        self.window = builder.get_object( "winMain" )   
        self.adjDisplacementX = builder.get_object( "adjDisplacementX" )
        self.adjDisplacementY = builder.get_object( "adjDisplacementY" )
        self.lblSSDDisplay = builder.get_object( "lblSSDDisplay" )
        self.lblTargetName = builder.get_object( "lblTargetName" )
        self.lblTemplateName = builder.get_object( "lblTemplateName" )

        dwgTargetImage = builder.get_object( "dwgTargetImage" )
        dwgMergedImage = builder.get_object( "dwgMergedImage" )
        dwgTemplateImage = builder.get_object( "dwgTemplateImage" )
        dwgErrorImage = builder.get_object( "dwgErrorImage" )
        dwgSubtractImage = builder.get_object( "dwgSubtractImage" )
        self.dwgTargetImageDisplay = Display( dwgTargetImage )
        self.dwgMergedImageDisplay = Display( dwgMergedImage )
        self.dwgTemplateImageDisplay = Display( dwgTemplateImage )
        self.dwgErrorImageDisplay = Display( dwgErrorImage )
        self.dwgSubtractImageDisplay = Display( dwgSubtractImage )
        
        builder.connect_signals( self )
               
        updateLoop = self.update()
        gobject.idle_add( updateLoop.next )
        
        self.window.show()
Esempio n. 3
0
    def __init__(self):

        self.scriptPath = os.path.dirname(__file__)

        self.accumulatorImage = None
        self.maskArray = None
        self.fillingImageDataUI = False
        self.handlingFilePath = False

        self.imageFlowFilter = ImageFlowFilter()

        # Setup the GUI
        builder = gtk.Builder()
        builder.add_from_file(self.scriptPath + "/GUI/ImpactExplorer.glade")

        self.window = builder.get_object("winMain")
        self.comboCurImage = builder.get_object("comboCurImage")
        self.checkAddToMask = builder.get_object("checkAddToMask")
        self.adjDisplacementX = builder.get_object("adjDisplacementX")
        self.adjDisplacementY = builder.get_object("adjDisplacementY")
        self.filePathImage = builder.get_object("filePathImage")
        #self.lblSSDDisplay = builder.get_object( "lblSSDDisplay" )
        #self.lblTargetName = builder.get_object( "lblTargetName" )
        #self.lblTemplateName = builder.get_object( "lblTemplateName" )

        dwgCurImage = builder.get_object("dwgCurImage")
        dwgMergedImage = builder.get_object("dwgMergedImage")
        dwgImpactMotionImage = builder.get_object("dwgImpactMotionImage")
        dwgAccumulatorImage = builder.get_object("dwgAccumulatorImage")
        dwgMaskImage = builder.get_object("dwgMaskImage")
        dwgSegmentedImage = builder.get_object("dwgSegmentedImage")
        self.dwgCurImageDisplay = Display(dwgCurImage)
        self.dwgMergedImageDisplay = Display(dwgMergedImage)
        self.dwgImpactMotionImageDisplay = Display(dwgImpactMotionImage)
        self.dwgAccumulatorImageDisplay = Display(dwgAccumulatorImage)
        self.dwgMaskImageDisplay = Display(dwgMaskImage)
        self.dwgSegmentedImageDisplay = Display(dwgSegmentedImage)

        self.filePathImage.setOnFilenameChangedCallback(
            self.onFilePathImageChanged)
        builder.connect_signals(self)
        self.onMenuItemNewActivate(None)  # Create new config

        self.window.show()
class MainWindow:

    #---------------------------------------------------------------------------
    def __init__(self):

        self.scriptPath = os.path.dirname(__file__)
        self.targetImageGray = None
        self.templateImageGray = None
        self.imageFlowFilter = ImageFlowFilter()

        # Setup the GUI
        builder = gtk.Builder()
        builder.add_from_file(self.scriptPath + "/GUI/AlignmentExplorer.glade")

        self.window = builder.get_object("winMain")
        self.adjDisplacementX = builder.get_object("adjDisplacementX")
        self.adjDisplacementY = builder.get_object("adjDisplacementY")
        self.lblSSDDisplay = builder.get_object("lblSSDDisplay")
        self.lblTargetName = builder.get_object("lblTargetName")
        self.lblTemplateName = builder.get_object("lblTemplateName")

        dwgTargetImage = builder.get_object("dwgTargetImage")
        dwgMergedImage = builder.get_object("dwgMergedImage")
        dwgTemplateImage = builder.get_object("dwgTemplateImage")
        dwgErrorImage = builder.get_object("dwgErrorImage")
        dwgSubtractImage = builder.get_object("dwgSubtractImage")
        self.dwgTargetImageDisplay = Display(dwgTargetImage)
        self.dwgMergedImageDisplay = Display(dwgMergedImage)
        self.dwgTemplateImageDisplay = Display(dwgTemplateImage)
        self.dwgErrorImageDisplay = Display(dwgErrorImage)
        self.dwgSubtractImageDisplay = Display(dwgSubtractImage)

        builder.connect_signals(self)

        updateLoop = self.update()
        gobject.idle_add(updateLoop.next)

        self.window.show()

    #---------------------------------------------------------------------------
    def onWinMainDestroy(self, widget, data=None):
        gtk.main_quit()

    #---------------------------------------------------------------------------
    def main(self):
        # All PyGTK applications must have a gtk.main(). Control ends here
        # and waits for an event to occur (like a key press or mouse event).
        gtk.main()

    #---------------------------------------------------------------------------
    def mergeImages(self):

        if self.targetImageGray == None:
            # Nothing to do
            return

        # Create a transformed version of the template image
        transformedImage = scipy.ndimage.interpolation.shift(
            self.templateImageGray, (self.adjDisplacementY.get_value(),
                                     self.adjDisplacementX.get_value()))

        # Create a composite image using the target image for the red channel
        # and the template image for the green channel. This should show
        # matched pixels as yellow
        width = self.targetImageGray.shape[1]
        height = self.targetImageGray.shape[0]
        mergedImage = np.zeros((height, width, 3), dtype=np.uint8)
        mergedImage[:, :, 0] = self.targetImageGray
        mergedImage[:, :, 1] = transformedImage

        # Display the merged image
        self.dwgMergedImageDisplay.setImageFromNumpyArray(mergedImage)

        # Calculate and display the Sum of Squared Differences (SSD) between the 2 images
        SSDValues = np.square(
            transformedImage.astype(np.int32) -
            self.targetImageGray.astype(np.int32))
        EPSILON = 128
        SSDValues[SSDValues <= EPSILON * EPSILON] = 0

        transformSSD = np.sum(SSDValues)
        self.lblSSDDisplay.set_text(str(transformSSD))

        # Display the error as a bitmap
        self.dwgErrorImageDisplay.setImageFromNumpyArray(
            np.sqrt(SSDValues).astype(np.uint8))

        # Subtract the aligned template image from the target image and display
        #cv.Dilate( transformedImage, transformedImage )
        transformedImage[transformedImage > 0] = 255
        subtractImage = self.targetImageGray.astype(
            np.int32) - transformedImage.astype(np.int32)
        subtractImage[subtractImage < 0] = 0
        self.dwgSubtractImageDisplay.setImageFromNumpyArray(
            subtractImage.astype(np.uint8))

    #---------------------------------------------------------------------------
    def chooseImageFile(self):

        result = None

        dialog = gtk.FileChooserDialog(
            title="Choose Image File",
            action=gtk.FILE_CHOOSER_ACTION_SAVE,
            buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK,
                     gtk.RESPONSE_ACCEPT))

        dialog.set_current_folder(self.scriptPath + "/../../test_data")

        filter = gtk.FileFilter()
        filter.add_pattern("*.png")
        filter.add_pattern("*.jpg")
        filter.set_name("Image Files")
        dialog.add_filter(filter)
        dialog.set_filter(filter)

        result = dialog.run()

        if result == gtk.RESPONSE_ACCEPT:
            result = dialog.get_filename()

        dialog.destroy()

        return result

    #---------------------------------------------------------------------------
    def onMenuItemOpenTargetActivate(self, widget):

        filename = self.chooseImageFile()

        if filename != None:
            # Load in the target image and convert to grayscale
            imageCV = cv.LoadImageM(filename)
            self.targetImageGray = np.ndarray((imageCV.height, imageCV.width),
                                              dtype=np.uint8)
            cv.CvtColor(imageCV, self.targetImageGray, cv.CV_BGR2GRAY)

            # Display the image
            self.dwgTargetImageDisplay.setImageFromNumpyArray(
                self.targetImageGray)
            self.lblTargetName.set_text(os.path.split(filename)[1])

            # Clear the template image
            self.templateImageGray = np.zeros(self.targetImageGray.shape,
                                              dtype=np.uint8)
            self.dwgTemplateImageDisplay.setImageFromNumpyArray(
                self.templateImageGray)
            self.lblTemplateName.set_text("")

            # Merge the images
            self.mergeImages()

    #---------------------------------------------------------------------------
    def onMenuItemOpenTemplateActivate(self, widget):

        if self.targetImageGray == None:
            print "Error: Must load target image first"
            return

        filename = self.chooseImageFile()

        if filename != None:
            # Load in the template image
            imageCV = cv.LoadImageM(filename)

            # Check that it has the same dimensions as the target image
            if imageCV.width != self.targetImageGray.shape[ 1 ] \
                or imageCV.height != self.targetImageGray.shape[ 0 ]:

                print "Error: The template image must have the same dimensions as the target image"
                return

            # Convert to grayscale and display
            self.templateImageGray = np.ndarray(
                (imageCV.height, imageCV.width), dtype=np.uint8)
            cv.CvtColor(imageCV, self.templateImageGray, cv.CV_BGR2GRAY)
            self.dwgTemplateImageDisplay.setImageFromNumpyArray(
                self.templateImageGray)
            self.lblTemplateName.set_text(os.path.split(filename)[1])

            # Merge the images
            self.mergeImages()

    #---------------------------------------------------------------------------
    def onMenuItemQuitActivate(self, widget):
        self.onWinMainDestroy(widget)

    #---------------------------------------------------------------------------
    def onAdjDisplacementXValueChanged(self, widget):
        self.mergeImages()

    #---------------------------------------------------------------------------
    def onAdjDisplacementYValueChanged(self, widget):
        self.mergeImages()

    #---------------------------------------------------------------------------
    def onBtnAutoAlignClicked(self, widget):

        if self.targetImageGray == None or self.templateImageGray == None:
            # Nothing to do
            return

        # Align the images
        ( transX, transY, rotationAngle, newImage ) = \
            self.imageFlowFilter.calcImageFlow( self.targetImageGray, self.templateImageGray )

        # Display the x and y displacements
        self.adjDisplacementX.set_value(transX)
        self.adjDisplacementY.set_value(transY)

        # Merge the images
        #self.mergeImages()

    #---------------------------------------------------------------------------
    def onDwgTargetImageExposeEvent(self, widget, data):

        self.dwgTargetImageDisplay.drawPixBufToDrawingArea(data.area)

    #---------------------------------------------------------------------------
    def onDwgMergedImageExposeEvent(self, widget, data):

        self.dwgMergedImageDisplay.drawPixBufToDrawingArea(data.area)

    #---------------------------------------------------------------------------
    def onDwgTemplateImageExposeEvent(self, widget, data):

        self.dwgTemplateImageDisplay.drawPixBufToDrawingArea(data.area)

    #---------------------------------------------------------------------------
    def onDwgErrorImageExposeEvent(self, widget, data):

        self.dwgErrorImageDisplay.drawPixBufToDrawingArea(data.area)

    #---------------------------------------------------------------------------
    def onDwgSubtractImageExposeEvent(self, widget, data):

        self.dwgSubtractImageDisplay.drawPixBufToDrawingArea(data.area)

    #---------------------------------------------------------------------------
    def update(self):

        lastTime = time.clock()

        while 1:

            curTime = time.clock()
            #print "Processing image", framIdx

            yield True

        yield False
class MainWindow:
 
    #---------------------------------------------------------------------------
    def __init__( self ):
    
        self.scriptPath = os.path.dirname( __file__ )
        self.targetImageGray = None
        self.templateImageGray = None
        self.imageFlowFilter = ImageFlowFilter()
            
        # Setup the GUI        
        builder = gtk.Builder()
        builder.add_from_file( self.scriptPath + "/GUI/AlignmentExplorer.glade" )
        
        self.window = builder.get_object( "winMain" )   
        self.adjDisplacementX = builder.get_object( "adjDisplacementX" )
        self.adjDisplacementY = builder.get_object( "adjDisplacementY" )
        self.lblSSDDisplay = builder.get_object( "lblSSDDisplay" )
        self.lblTargetName = builder.get_object( "lblTargetName" )
        self.lblTemplateName = builder.get_object( "lblTemplateName" )

        dwgTargetImage = builder.get_object( "dwgTargetImage" )
        dwgMergedImage = builder.get_object( "dwgMergedImage" )
        dwgTemplateImage = builder.get_object( "dwgTemplateImage" )
        dwgErrorImage = builder.get_object( "dwgErrorImage" )
        dwgSubtractImage = builder.get_object( "dwgSubtractImage" )
        self.dwgTargetImageDisplay = Display( dwgTargetImage )
        self.dwgMergedImageDisplay = Display( dwgMergedImage )
        self.dwgTemplateImageDisplay = Display( dwgTemplateImage )
        self.dwgErrorImageDisplay = Display( dwgErrorImage )
        self.dwgSubtractImageDisplay = Display( dwgSubtractImage )
        
        builder.connect_signals( self )
               
        updateLoop = self.update()
        gobject.idle_add( updateLoop.next )
        
        self.window.show()
        
    #---------------------------------------------------------------------------
    def onWinMainDestroy( self, widget, data = None ):  
        gtk.main_quit()
        
    #---------------------------------------------------------------------------   
    def main( self ):
        # All PyGTK applications must have a gtk.main(). Control ends here
        # and waits for an event to occur (like a key press or mouse event).
        gtk.main()
        
    #---------------------------------------------------------------------------
    def mergeImages( self ):
        
        if self.targetImageGray == None:
            # Nothing to do
            return
        
        # Create a transformed version of the template image
        transformedImage = scipy.ndimage.interpolation.shift( 
            self.templateImageGray, 
            ( self.adjDisplacementY.get_value(), self.adjDisplacementX.get_value() ) )
        
        # Create a composite image using the target image for the red channel
        # and the template image for the green channel. This should show 
        # matched pixels as yellow
        width = self.targetImageGray.shape[ 1 ]
        height = self.targetImageGray.shape[ 0 ]
        mergedImage = np.zeros( ( height, width, 3 ), dtype=np.uint8 )
        mergedImage[ :, :, 0 ] = self.targetImageGray
        mergedImage[ :, :, 1 ] = transformedImage
        
        # Display the merged image
        self.dwgMergedImageDisplay.setImageFromNumpyArray( mergedImage )
        
        # Calculate and display the Sum of Squared Differences (SSD) between the 2 images
        SSDValues = np.square( 
            transformedImage.astype( np.int32 ) - self.targetImageGray.astype( np.int32 ) )
        EPSILON = 128
        SSDValues[ SSDValues <= EPSILON*EPSILON ] = 0
            
        transformSSD = np.sum( SSDValues )
        self.lblSSDDisplay.set_text( str( transformSSD ) )
        
        # Display the error as a bitmap
        self.dwgErrorImageDisplay.setImageFromNumpyArray( 
            np.sqrt( SSDValues ).astype( np.uint8 ) )
            
        # Subtract the aligned template image from the target image and display
        #cv.Dilate( transformedImage, transformedImage )
        transformedImage[ transformedImage > 0 ] = 255
        subtractImage = self.targetImageGray.astype( np.int32 ) - transformedImage.astype( np.int32 )
        subtractImage[ subtractImage < 0 ] = 0
        self.dwgSubtractImageDisplay.setImageFromNumpyArray( subtractImage.astype( np.uint8 ) )

    #---------------------------------------------------------------------------
    def chooseImageFile( self ):
        
        result = None
        
        dialog = gtk.FileChooserDialog(
            title="Choose Image File",
            action=gtk.FILE_CHOOSER_ACTION_SAVE,
            buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
                      gtk.STOCK_OK, gtk.RESPONSE_ACCEPT) )

        dialog.set_current_folder( self.scriptPath + "/../../test_data" )
            
        filter = gtk.FileFilter()
        filter.add_pattern( "*.png" )
        filter.add_pattern( "*.jpg" )
        filter.set_name( "Image Files" )
        dialog.add_filter( filter )
        dialog.set_filter( filter )
            
        result = dialog.run()

        if result == gtk.RESPONSE_ACCEPT:
            result = dialog.get_filename()

        dialog.destroy()
        
        return result
    
    #---------------------------------------------------------------------------
    def onMenuItemOpenTargetActivate( self, widget ):
                
        filename = self.chooseImageFile()
        
        if filename != None:
            # Load in the target image and convert to grayscale
            imageCV = cv.LoadImageM( filename )
            self.targetImageGray = np.ndarray( ( imageCV.height, imageCV.width ), dtype=np.uint8 )
            cv.CvtColor( imageCV, self.targetImageGray, cv.CV_BGR2GRAY )
            
            # Display the image
            self.dwgTargetImageDisplay.setImageFromNumpyArray( self.targetImageGray )
            self.lblTargetName.set_text( os.path.split( filename )[ 1 ] )
            
            # Clear the template image
            self.templateImageGray = np.zeros( self.targetImageGray.shape, dtype=np.uint8 )
            self.dwgTemplateImageDisplay.setImageFromNumpyArray( self.templateImageGray )
            self.lblTemplateName.set_text( "" )
            
            # Merge the images
            self.mergeImages()
            
    #---------------------------------------------------------------------------
    def onMenuItemOpenTemplateActivate( self, widget ):
        
        if self.targetImageGray == None:
            print "Error: Must load target image first"
            return
        
        filename = self.chooseImageFile()
        
        if filename != None:
            # Load in the template image 
            imageCV = cv.LoadImageM( filename )
            
            # Check that it has the same dimensions as the target image
            if imageCV.width != self.targetImageGray.shape[ 1 ] \
                or imageCV.height != self.targetImageGray.shape[ 0 ]:
                    
                print "Error: The template image must have the same dimensions as the target image"
                return
            
            # Convert to grayscale and display
            self.templateImageGray = np.ndarray( ( imageCV.height, imageCV.width ), dtype=np.uint8 )
            cv.CvtColor( imageCV, self.templateImageGray, cv.CV_BGR2GRAY )
            self.dwgTemplateImageDisplay.setImageFromNumpyArray( self.templateImageGray )
            self.lblTemplateName.set_text( os.path.split( filename )[ 1 ] )

            # Merge the images
            self.mergeImages()
    
    #---------------------------------------------------------------------------
    def onMenuItemQuitActivate( self, widget ):
        self.onWinMainDestroy( widget )
       
    #---------------------------------------------------------------------------
    def onAdjDisplacementXValueChanged( self, widget ):
        self.mergeImages()
    
    #---------------------------------------------------------------------------
    def onAdjDisplacementYValueChanged( self, widget ):
        self.mergeImages()
        
    #---------------------------------------------------------------------------
    def onBtnAutoAlignClicked( self, widget ):
        
        if self.targetImageGray == None or self.templateImageGray == None:
            # Nothing to do
            return
        
        # Align the images
        ( transX, transY, rotationAngle, newImage ) = \
            self.imageFlowFilter.calcImageFlow( self.targetImageGray, self.templateImageGray )
        
        # Display the x and y displacements
        self.adjDisplacementX.set_value( transX )
        self.adjDisplacementY.set_value( transY )
        
        # Merge the images
        #self.mergeImages()
    
    #---------------------------------------------------------------------------
    def onDwgTargetImageExposeEvent( self, widget, data ):
        
        self.dwgTargetImageDisplay.drawPixBufToDrawingArea( data.area )
    
    #---------------------------------------------------------------------------
    def onDwgMergedImageExposeEvent( self, widget, data ):
        
        self.dwgMergedImageDisplay.drawPixBufToDrawingArea( data.area )
    
    #---------------------------------------------------------------------------
    def onDwgTemplateImageExposeEvent( self, widget, data ):
        
        self.dwgTemplateImageDisplay.drawPixBufToDrawingArea( data.area )
    
    #---------------------------------------------------------------------------
    def onDwgErrorImageExposeEvent( self, widget, data ):
        
        self.dwgErrorImageDisplay.drawPixBufToDrawingArea( data.area )
        
    #---------------------------------------------------------------------------
    def onDwgSubtractImageExposeEvent( self, widget, data ):
        
        self.dwgSubtractImageDisplay.drawPixBufToDrawingArea( data.area )
        
    #---------------------------------------------------------------------------
    def update( self ):

        lastTime = time.clock()

        while 1:
            
            curTime = time.clock()
            #print "Processing image", framIdx
                
            yield True
            
        yield False
    def processBag( self, bag ):
    
        FLIP_IMAGE = bool( self.options.frameFlip == "True" )
        USING_OPTICAL_FLOW_FOR_MOTION = False
        print "frameFlip = ", FLIP_IMAGE
    
        bagFrameIdx = 0
        frameIdx = 0
        impactFrameIdx = None
        
        # Setup filters
        opticalFlowFilter = OpticalFlowFilter(
            self.OPTICAL_FLOW_BLOCK_WIDTH, self.OPTICAL_FLOW_BLOCK_HEIGHT, 
            self.OPTICAL_FLOW_RANGE_WIDTH, self.OPTICAL_FLOW_RANGE_HEIGHT )
            
        motionDetectionFilter = MotionDetectionFilter()
        imageFlowFilter = ImageFlowFilter()
        residualSaliencyFilter = ResidualSaliencyFilter()
            
        # Process bag file
        for topic, msg, t in bag.read_messages():
            
            if self.workCancelled:
                # We've been given the signal to quit
                break
            
            if msg._type == "sensor_msgs/Image":
                
                bagFrameIdx += 1
                if (bagFrameIdx-1)%self.PROCESSED_FRAME_DIFF != 0:
                    continue
                
                print "Processing image", frameIdx
                
                # Get input image
                image = cv.CreateMatHeader( msg.height, msg.width, cv.CV_8UC3 )
                cv.SetData( image, msg.data, msg.step )
                
                if FLIP_IMAGE:
                    cv.Flip( image, None, 1 )
                
                # Convert to grayscale
                grayImage = cv.CreateMat( msg.height, msg.width, cv.CV_8UC1 )
                cv.CvtColor( image, grayImage, cv.CV_BGR2GRAY )
                grayImageNumpPy = np.array( grayImage )
                
                # Calculate optical flow
                opticalFlowArrayX, opticalFlowArrayY = \
                    opticalFlowFilter.calcOpticalFlow( grayImage )
                    
                # Detect motion
                if USING_OPTICAL_FLOW_FOR_MOTION:
                    if frameIdx == 0:
                        motionImage = PyVarFlowLib.createMotionMask( 
                            grayImageNumpPy, grayImageNumpPy )
                    else:
                        motionImage = PyVarFlowLib.createMotionMask( 
                            np.array( self.grayScaleImageList[ frameIdx - 1 ] ), 
                            grayImageNumpPy )
                else:
                    motionImage = motionDetectionFilter.calcMotion( grayImage )
                
                
                # Work out the left most point in the image where motion appears
                motionTest = np.copy( motionImage )
                
                cv.Erode( motionTest, motionTest )
                if frameIdx == 0:
                    leftMostMotion = motionImage.shape[ 1 ]
                else:
                    leftMostMotion = self.leftMostMotionList[ frameIdx - 1 ]
                
                leftMostMotionDiff = 0
                for i in range( leftMostMotion ):
                    if motionTest[ :, i ].max() > 0:
                        leftMostMotionDiff = abs( leftMostMotion - i )
                        leftMostMotion = i
                        break
                
                segmentationMask = np.zeros( ( msg.height, msg.width ), dtype=np.uint8 )
                
                FRAMES_BACK = 3
                
                if impactFrameIdx == None:        
                    if leftMostMotionDiff > 18 and leftMostMotion < 0.75*msg.width:
                        
                        # Found impact frame
                        impactFrameIdx = frameIdx
                    
                else:
                    PROCESS_IMPACT = False
                    if PROCESS_IMPACT and frameIdx - impactFrameIdx == FRAMES_BACK:
                        
                        # Should now have enough info to segment object
                        impactMotionImage = self.motionImageList[ impactFrameIdx ]
                        
                        print "Aligning"
                        postImpactRealFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, motionImage )
                        print "Aligning"
                        postImpactFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx + 2 ] )
                        print "Aligning"
                        postImpactNearFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx + 1 ] )
                        
                        segmentationMask = np.maximum( np.maximum( np.maximum( 
                            impactMotionImage, postImpactNearFlow[ 3 ] ), postImpactFarFlow[ 3 ] ), postImpactRealFarFlow[ 3 ] )
                        cv.Dilate( segmentationMask, segmentationMask )
                        
                        print "Aligning"
                        preImpactRealFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 8 ] )
                        print "Aligning"
                        preImpactFarFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 6 ] )
                        print "Aligning"
                        preImpactNearFlow = imageFlowFilter.calcImageFlow( impactMotionImage, self.motionImageList[ impactFrameIdx - 4 ] )
                        
                        subMask = np.maximum( np.maximum( 
                            preImpactRealFarFlow[ 3 ], preImpactFarFlow[ 3 ] ), preImpactNearFlow[ 3 ] )
                        cv.Erode( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        cv.Dilate( subMask, subMask )
                        
                        subMask[ subMask > 0 ] = 255
                        diffImage = segmentationMask.astype( np.int32 ) - subMask.astype( np.int32 )
                        diffImage[ diffImage < 0 ] = 0
                        diffImage = diffImage.astype( np.uint8 )
                        cv.Erode( diffImage, diffImage )
                        #diffImage[ diffImage > 0 ] = 255

                        #segmentationMask = subMask
                        segmentationMask = diffImage
                        #segmentationMask = np.where( diffImage > 128, 255, 0 ).astype( np.uint8 )
                
                # Calculate image flow
                #imageFlow = imageFlowFilter.calcImageFlow( motionImage )
                
                ## Calculate saliency map
                #saliencyMap, largeSaliencyMap = residualSaliencyFilter.calcSaliencyMap( grayImageNumpPy )
                
                #blobMap = np.where( largeSaliencyMap > 128, 255, 0 ).astype( np.uint8 )
                
                #blobMap, numBlobs = PyBlobLib.labelBlobs( blobMap )
                #print "found", numBlobs, "blobs"
                
                #largeSaliencyMap = np.where( largeSaliencyMap > 128, 255, 0 ).astype( np.uint8 )
                
                
                
                
                
                
                # Threshold the saliency map
                #largeSaliencyMap = (largeSaliencyMap > 128).astype(np.uint8) * 255
                #cv.AdaptiveThreshold( largeSaliencyMap, largeSaliencyMap, 255 )
                
                # Detect clusters within the saliency map
                #NUM_CLUSTERS = 5
                
                #numSamples = np.sum( saliencyMap )
                #sampleList = np.ndarray( ( numSamples, 2 ), dtype=np.float32 )
                
                #sampleListIdx = 0
                #for y in range( saliencyMap.shape[ 0 ] ):
                    #for x in range( saliencyMap.shape[ 1 ] ):
                        
                        #numNewSamples = saliencyMap[ y, x ]
                        #if numNewSamples > 0:
                            #sampleList[ sampleListIdx:sampleListIdx+numNewSamples, 0 ] = x
                            #sampleList[ sampleListIdx:sampleListIdx+numNewSamples, 1 ] = y
                            #sampleListIdx += numNewSamples
                            
                #sampleList[ 0:numSamples/2 ] = ( 20, 20 )
                #sampleList[ numSamples/2: ] = ( 200, 200 )
                
                #labelList = np.ndarray( ( numSamples, 1 ), dtype=np.int32 )
                #cv.KMeans2( sampleList, NUM_CLUSTERS, labelList, 
                    #(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01) )
                    
                #saliencyScaleX = float( largeSaliencyMap.shape[ 1 ] ) / saliencyMap.shape[ 1 ]
                #saliencyScaleY = float( largeSaliencyMap.shape[ 0 ] ) / saliencyMap.shape[ 0 ]
                clusterList = []
                #for clusterIdx in range( NUM_CLUSTERS ):
                    
                    #clusterSamples = sampleList[ 
                        #np.where( labelList == clusterIdx )[ 0 ], : ]

                    #if clusterSamples.size <= 0:
                        #mean = ( 0.0, 0.0 )
                        #stdDev = 0.0
                    #else:
                        #mean = clusterSamples.mean( axis=0 )
                        #mean = ( mean[ 0 ]*saliencyScaleX, mean[ 1 ]*saliencyScaleY )
                        #stdDev = clusterSamples.std()*saliencyScaleX
                    
                    #clusterList.append( ( mean, stdDev ) )
                
                
                
                
                # Work out the maximum amount of motion we've seen in a single frame so far
                #motionCount = motionImage[ motionImage > 0 ].size
                
                #if frameIdx == 0:
                    #lastMotionCount = 0
                #else:
                    #lastMotionCount = self.maxMotionCounts[ frameIdx - 1 ]
                    
                #if motionCount < lastMotionCount:
                    #motionCount = lastMotionCount
                
                ## Work out diffImage    
                #diffImage = np.array( motionImage, dtype=np.int32 ) \
                     #- np.array( imageFlow[ 3 ], dtype=np.int32 )
                #diffImage = np.array( np.maximum( diffImage, 0 ), dtype=np.uint8 )
                
                
                
                
                
                # Segment the image
                #workingMask = np.copy( motionImage )
                #workingMask = np.copy( diffImage )
                workingMask = np.copy( segmentationMask )
                kernel = cv.CreateStructuringElementEx( 
                    cols=3, rows=3, 
                    anchorX=1, anchorY=1, shape=cv.CV_SHAPE_CROSS )
                cv.Erode( workingMask, workingMask, kernel )
                cv.Dilate( workingMask, workingMask )
                
                extraExtraMask = np.copy( workingMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                cv.Dilate( extraExtraMask, extraExtraMask )
                
                allMask = np.copy( extraExtraMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                cv.Dilate( allMask, allMask )
                
                possibleForeground = workingMask > 0
            
                if workingMask[ possibleForeground ].size >= 100 \
                    and frameIdx >= 16:
                        
                    print "Msk size", workingMask[ possibleForeground ].size
                    print workingMask[ 0, 0:10 ]
                    
                    fgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
                    bgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
                    #workingMask[ possibleForeground ] = self.GC_FGD
                    #workingMask[ possibleForeground == False ] = self.GC_PR_BGD
                    
                    #workingMask[ : ] = self.GC_PR_BGD
                    #workingMask[ possibleForeground ] = self.GC_FGD
                    
                    workingMask[ : ] = self.GC_BGD
                    workingMask[ allMask > 0 ] = self.GC_PR_BGD
                    workingMask[ extraExtraMask > 0 ] = self.GC_PR_FGD
                    workingMask[ possibleForeground ] = self.GC_FGD
                    
                    
                    if frameIdx == 16:
                        # Save mask
                        maskCopy = np.copy( workingMask )
                        maskCopy[ maskCopy == self.GC_BGD ] = 0
                        maskCopy[ maskCopy == self.GC_PR_BGD ] = 64
                        maskCopy[ maskCopy == self.GC_PR_FGD ] = 128
                        maskCopy[ maskCopy == self.GC_FGD ] = 255
                        print "Unused pixels", \
                            maskCopy[ (maskCopy != 255) & (maskCopy != 0) ].size
                          
                        outputImage = cv.CreateMat( msg.height, msg.width, cv.CV_8UC3 )
                        cv.CvtColor( maskCopy, outputImage, cv.CV_GRAY2BGR )
                        
                        cv.SaveImage( "output.png", image );
                        cv.SaveImage( "outputMask.png", outputImage ); 
                        
                        print "Saved images"
                        #return 
                        
                    
                    #print "Set Msk size", workingMask[ workingMask == self.GC_PR_FGD ].size
                
                    imageToSegment = image #self.inputImageList[ frameIdx - FRAMES_BACK ]
                
                    imageCopy = np.copy( imageToSegment )
                    cv.CvtColor( imageCopy, imageCopy, cv.CV_BGR2RGB )
                
                    print "Start seg"
                    cv.GrabCut( imageCopy, workingMask, 
                        (0,0,0,0), fgModel, bgModel, 12, self.GC_INIT_WITH_MASK )
                    print "Finish seg"
                
                    segmentation = np.copy( imageToSegment )
                    segmentation[ (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD) ] = 0
                
                    
                    black = (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD)
                    #motionImage = np.where( black, 0, 255 ).astype( np.uint8 )
                    
                    # Refine the segmentation
                    REFINE_SEG = False
                    if REFINE_SEG:
                        motionImageCopy = np.copy( motionImage )
                        cv.Erode( motionImageCopy, motionImageCopy )
                        #cv.Erode( motionImageCopy, motionImageCopy )
                        #cv.Erode( motionImageCopy, motionImageCopy )
                        
                        workingMask[ motionImageCopy > 0 ] = self.GC_PR_FGD
                        workingMask[ motionImageCopy == 0 ] = self.GC_PR_BGD
                        
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        cv.Dilate( motionImageCopy, motionImageCopy )
                        workingMask[ motionImageCopy == 0 ] = self.GC_BGD
                        
                        print "Other seg"
                        cv.GrabCut( imageCopy, workingMask, 
                            (0,0,0,0), fgModel, bgModel, 12, self.GC_INIT_WITH_MASK )
                        print "Other seg done"
                            
                        segmentation = np.copy( imageToSegment )
                        segmentation[ (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD) ] = 0
                    
                        
                        black = (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD)
                        motionImage = np.where( black, 0, 255 ).astype( np.uint8 )
                    
                
                else:
                    segmentation = np.zeros( ( image.height, image.width ), dtype=np.uint8 )
                
                
                # Save output data
                self.inputImageList[ frameIdx ] = image
                self.grayScaleImageList[ frameIdx ] = grayImage
                self.opticalFlowListX[ frameIdx ] = opticalFlowArrayX
                self.opticalFlowListY[ frameIdx ] = opticalFlowArrayY
                self.motionImageList[ frameIdx ] = motionImage
                self.segmentationList[ frameIdx ] = segmentation
                self.segmentationMaskList[ frameIdx ] = segmentationMask
                #self.maxMotionCounts[ frameIdx ] = motionCount
                #self.imageFlowList[ frameIdx ] = imageFlow
                #self.saliencyMapList[ frameIdx ] = largeSaliencyMap
                #self.saliencyClusterList[ frameIdx ] = clusterList
                self.leftMostMotionList[ frameIdx ] = leftMostMotion
                
                frameIdx += 1
                self.numFramesProcessed += 1
                
        if not self.workCancelled:
            
            
            SAVE_MOTION_IMAGES = True
            BASE_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/motion_images/motion_{0:03}.png"
            
            if SAVE_MOTION_IMAGES and len( self.motionImageList ) > 0:
                
                width = self.motionImageList[ 0 ].shape[ 1 ]
                height = self.motionImageList[ 0 ].shape[ 0 ]
                colourImage = np.zeros( ( height, width, 3 ), dtype=np.uint8 )
                
                for frameIdx, motionImage in enumerate( self.motionImageList ):
                    
                    colourImage[ :, :, 0 ] = motionImage
                    colourImage[ :, :, 1 ] = motionImage
                    colourImage[ :, :, 2 ] = motionImage
                    
                    outputName = BASE_MOTION_IMAGE_NAME.format( frameIdx + 1 )
                    cv.SaveImage( outputName, colourImage )
            
            # Recalculate impactFrameIdx
            width = self.motionImageList[ 0 ].shape[ 1 ]
            
            totalMotionDiff = 0
            maxMotionDiff = 0
            impactFrameIdx = None
            for motionIdx in range( 1, len( self.leftMostMotionList ) ):
            
                motionDiff = abs( self.leftMostMotionList[ motionIdx ] \
                    - self.leftMostMotionList[ motionIdx - 1 ] )
                totalMotionDiff += motionDiff
                    
                if motionDiff > maxMotionDiff and totalMotionDiff > 0.5*width:
                    maxMotionDiff = motionDiff
                    impactFrameIdx = motionIdx
            
            if maxMotionDiff <= 18:
                impactFrameIdx = None
                    
            
            if impactFrameIdx != None:
                
                preMotionImages = []
                postMotionImages = []
                impactMotionImage = None
                
                NUM_FRAMES_BEFORE = 3
                
                prefix = self.options.outputPrefix
                if prefix != "":
                    prefix += "_"
                
                BASE_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "motion_{0:03}.png"
                START_MOTION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "start_motion.png"
                START_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "start.png"
                IMPACT_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "impact.png"
                SEGMENTATION_IMAGE_NAME = self.scriptPath + "/../../test_data/impact_images/" + prefix + "segmentation.png"
                NUM_FRAMES_AFTER = 3
                
                width = self.motionImageList[ 0 ].shape[ 1 ]
                height = self.motionImageList[ 0 ].shape[ 0 ]
                colourImage = np.zeros( ( height, width, 3 ), dtype=np.uint8 )
                
                for frameIdx in range( impactFrameIdx - NUM_FRAMES_BEFORE,
                    impactFrameIdx + NUM_FRAMES_AFTER + 1 ):
                    
                    motionImage = self.motionImageList[ frameIdx ]  
                    
                    if frameIdx < impactFrameIdx:
                        preMotionImages.append( motionImage )
                    elif frameIdx == impactFrameIdx:
                        impactMotionImage = motionImage
                    else: # frameIdx > impactFrameIdx
                        postMotionImages.append( motionImage )
                    
                    colourImage[ :, :, 0 ] = motionImage
                    colourImage[ :, :, 1 ] = motionImage
                    colourImage[ :, :, 2 ] = motionImage
                    
                    outputName = BASE_MOTION_IMAGE_NAME.format( frameIdx - impactFrameIdx )
                    cv.SaveImage( outputName, colourImage )
                
                motionDetectionFilter.calcMotion( self.grayScaleImageList[ 0 ] )
                startMotionImage = motionDetectionFilter.calcMotion( 
                    self.grayScaleImageList[ impactFrameIdx ] )
                colourImage[ :, :, 0 ] = startMotionImage
                colourImage[ :, :, 1 ] = startMotionImage
                colourImage[ :, :, 2 ] = startMotionImage  
                cv.SaveImage( START_MOTION_IMAGE_NAME, colourImage )
                
                cv.CvtColor( self.inputImageList[ 0 ], colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( START_IMAGE_NAME, colourImage )
                cv.CvtColor( self.inputImageList[ impactFrameIdx ], colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( IMPACT_IMAGE_NAME, colourImage )
                
                print "Segmenting..."
                segmentation = self.produceSegmentation( self.inputImageList[ 0 ], 
                    impactMotionImage, preMotionImages, postMotionImages )
                cv.CvtColor( segmentation, colourImage, cv.CV_RGB2BGR )    
                cv.SaveImage( SEGMENTATION_IMAGE_NAME, colourImage )
                    
            self.refreshGraphDisplay()
            
            
        print "Finished processing bag file"
        if bool( self.options.quitAfterFirstSegmentation == "True" ):
            print "Trying to quit"
            self.onWinMainDestroy( None )
        else:
            print "Not trying to quit so neeah"
 def produceSegmentation( self, startFrame, impactMotionImage, 
                             preMotionImages, postMotionImages ):
     
     ROI_X = 0
     ROI_Y = 76
     ROI_WIDTH = 230
     ROI_HEIGHT = 100
     
     blankFrame = np.zeros( ( startFrame.height, startFrame.width ), dtype=np.uint8 )
     
     imageFlowFilter = ImageFlowFilter()    
     
     # Create the accumulator image
     accumulatorArray = np.copy( impactMotionImage ).astype( np.int32 )
         
     # Take maximum values from motion images after the impact but
     # don't add them in to de-emphasise the manipulator
     imageNum = 1
     for postMotionImage in postMotionImages:
         
         print "Aligning post impact image {0}...".format( imageNum )
         imageNum += 1
         
         ( transX, transY, rotationAngle, alignedImage ) = \
             imageFlowFilter.calcImageFlow( impactMotionImage, postMotionImage )
         accumulatorArray = np.maximum( accumulatorArray, alignedImage )
                 
     # Dilate and subtract motion images from before the impact
     imageNum = 1
     for preMotionImage in preMotionImages:
         
         print "Aligning pre impact image {0}...".format( imageNum )
         imageNum += 1
         
         ( transX, transY, rotationAngle, alignedImage ) = \
             imageFlowFilter.calcImageFlow( impactMotionImage, preMotionImage )
             
         cv.Dilate( alignedImage, alignedImage )
         cv.Dilate( alignedImage, alignedImage )
         cv.Dilate( alignedImage, alignedImage )
         accumulatorArray = accumulatorArray - alignedImage
         
     accumulatorImage = np.clip( accumulatorArray, 0, 255 ).astype( np.uint8 )
     
     # Create the segmentation mask from the accumulator image
     startMask = np.copy( accumulatorImage )
     cv.Dilate( startMask, startMask )
     cv.Erode( startMask, startMask )
     cv.Dilate( startMask, startMask )
     cv.Erode( startMask, startMask )
     startMask = scipy.ndimage.filters.gaussian_filter( 
         startMask, 5.0, mode='constant' )
     
     startMask[ startMask > 0 ] = 255
         
     # Find the larget blob in the ROI
     # Label blobs
     startMask, numBlobs = PyBlobLib.labelBlobs( startMask )
     
     # Find blobs in the region of interest
     testMap = np.copy( startMask )
     testMap[ :ROI_Y, : ] = 0       # Mask out area above the ROI
     testMap[ :, :ROI_X ] = 0       # Mask out area to the left of the ROI
     testMap[ ROI_Y+ROI_HEIGHT: ] = 0   # Mask out area below the ROI
     testMap[ :, ROI_X+ROI_WIDTH: ] = 0   # Mask out area to the right of the ROI
 
     biggestBlobIdx = None
     biggestBlobSize = 0
 
     for blobIdx in range( 1, numBlobs + 1 ):
         if testMap[ testMap == blobIdx ].size > 0:
             blobSize = startMask[ startMask == blobIdx ].size
             if blobSize > biggestBlobSize:
                 biggestBlobSize = blobSize
                 biggestBlobIdx = blobIdx
 
     # Isolate the largest blob
     if biggestBlobIdx != None:
         biggestBlobPixels = (startMask == biggestBlobIdx)
         startMask[ biggestBlobPixels ] = 255
         startMask[ biggestBlobPixels == False ] = 0
     else:
         print "No central blob"
         return blankFrame
         
     # Now expand it to get exclusion mask
     exclusionMask = np.copy( startMask )
     for i in range( 10 ):
         cv.Dilate( exclusionMask, exclusionMask )
     cv.Erode( exclusionMask, exclusionMask )
     cv.Erode( exclusionMask, exclusionMask )
     
     #----------------------------------------------------
     
     maskArray = np.copy( startMask )
     possiblyForeground = ( maskArray > 0 ) & ( accumulatorImage > 0 )
     maskArray[ possiblyForeground ] = self.GC_PR_FGD
     maskArray[ possiblyForeground == False ] = self.GC_PR_BGD
     maskArray[ exclusionMask == 0 ] = self.GC_BGD
     
     definiteMask = np.copy( accumulatorImage )
     definiteMask[ possiblyForeground ] = 255
     definiteMask[ possiblyForeground == False ] = 0
     cv.Erode( definiteMask, definiteMask )
     cv.Erode( definiteMask, definiteMask )
     maskArray[ definiteMask == 255 ] = self.GC_FGD
     
     # Now create the working mask and segment the image
     
     workingMask = np.copy( maskArray )
         
     fgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
     cv.Set( fgModel, 0 )
     bgModel = cv.CreateMat( 1, 5*13, cv.CV_64FC1 )
     cv.Set( bgModel, 0 )
     
     workingImage = np.copy( startFrame )
     cv.GrabCut( workingImage, workingMask, 
         (0,0,0,0), fgModel, bgModel, 6, self.GC_INIT_WITH_MASK )
         
     cv.Set( fgModel, 0 )
     cv.Set( bgModel, 0 )
     bgdPixels = (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD)
     workingMask[ bgdPixels ] = 0
     workingMask[ bgdPixels == False ] = 255
     cv.Erode( workingMask, workingMask )
     bgdPixels = workingMask == 0
     workingMask[ bgdPixels ] = self.GC_PR_BGD
     workingMask[ bgdPixels == False ] = self.GC_PR_FGD
     workingMask[ exclusionMask == 0 ] = self.GC_BGD
     
     cv.GrabCut( workingImage, workingMask, 
         (0,0,0,0), fgModel, bgModel, 6, self.GC_INIT_WITH_MASK )
     
     segmentation = np.copy( startFrame )
     segmentation[ (workingMask != self.GC_PR_FGD) & (workingMask != self.GC_FGD) ] = 0
     
     # Remove everything apart from the biggest blob in the ROI
     graySeg = np.zeros( ( startFrame.height, startFrame.width ), dtype=np.uint8 )
     cv.CvtColor( segmentation, graySeg, cv.CV_RGB2GRAY )
     startMask = np.copy( graySeg )
     startMask[ startMask > 0 ] = 255
         
     # Find the larget blob in the ROI
     
     # Label blobs
     startMask, numBlobs = PyBlobLib.labelBlobs( startMask )
     
     # Find blobs in the region of interest
     testMap = np.copy( startMask )
     testMap[ :ROI_Y, : ] = 0       # Mask out area above the ROI
     testMap[ :, :ROI_X ] = 0       # Mask out area to the left of the ROI
     testMap[ ROI_Y+ROI_HEIGHT: ] = 0   # Mask out area below the ROI
     testMap[ :, ROI_X+ROI_WIDTH: ] = 0   # Mask out area to the right of the ROI
 
     biggestBlobIdx = None
     biggestBlobSize = 0
 
     for blobIdx in range( 1, numBlobs + 1 ):
         if testMap[ testMap == blobIdx ].size > 0:
             blobSize = startMask[ startMask == blobIdx ].size
             if blobSize > biggestBlobSize:
                 biggestBlobSize = blobSize
                 biggestBlobIdx = blobIdx
 
     # Isolate the largest blob
     if biggestBlobIdx != None:
         biggestBlobPixels = (startMask == biggestBlobIdx)
         segmentation[ biggestBlobPixels == False, 0 ] = 255
         segmentation[ biggestBlobPixels == False, 1 ] = 0
         segmentation[ biggestBlobPixels == False, 2 ] = 255
     else:
         print "No central blob after main segmentation"
         return blankFrame
     
     return segmentation