Esempio n. 1
0
class RecorderIface(TrackerIface):
    """
    This class extends the TrackerIface to provide video acquisition and live detection (tracking).
    It uses openCV to run the camera and thus should work with any camera supported by openCV.
    It uses the first available USB/firewire camera unless the platform is a raspberry pi,
    in which case it will use the pi camera.
    """
    @pyqtSlot()
    def load(self):  # TODO: check if worth keeping
        pass

    @pyqtSlot(result=QVariant)
    def start(self):
        """
        Start the recording and tracking.
        
        :returns: The recording was started status code
        """
        if not hasattr(self.params, 'destPath'):
            return False
        vidExt = os.path.splitext(self.params.destPath)[1]
        if vidExt not in VIDEO_FORMATS:
            print(('Unknow format: {}'.format(vidExt)))
            return False

        self.positions = []  # reset between runs
        self.distancesFromArena = []

        bgStart = self.params.bgFrameIdx
        nBackgroundFrames = self.params.nBgFrames
        trackFrom = self.params.startFrameIdx
        trackTo = self.params.endFrameIdx if (
            self.params.endFrameIdx > 0) else None

        threshold = self.params.detectionThreshold
        minArea = self.params.objectsMinArea
        maxArea = self.params.objectsMaxArea
        teleportationThreshold = self.params.teleportationThreshold

        nSds = self.params.nSds
        clearBorders = self.params.clearBorders
        normalise = self.params.normalise
        extractArena = self.params.extractArena

        self.tracker = GuiTracker(
            self,
            srcFilePath=None,
            destFilePath=self.params.destPath,
            threshold=threshold,
            minArea=minArea,
            maxArea=maxArea,
            teleportationThreshold=teleportationThreshold,
            bgStart=bgStart,
            trackFrom=trackFrom,
            trackTo=trackTo,
            nBackgroundFrames=nBackgroundFrames,
            nSds=nSds,
            clearBorders=clearBorders,
            normalise=normalise,
            plot=True,
            fast=False,
            extractArena=extractArena,
            cameraCalibration=self.params.calib,
            callback=None)
        self.stream = self.tracker  # To comply with BaseInterface
        self._setDisplay()
        self._updateImgProvider()

        self.tracker.setRoi(self.roi)

        self.timer.start(self.timerSpeed)
        return True

    def getSamplingFreq(self):
        """
        Return the sampling frequency (note this is a maximum and can be limited by a slower CPU)
        """
        return 1.0 / (self.timerSpeed / 1000.0)  # timer speed in ms

    @pyqtSlot(result=QVariant)
    def camDetected(self):
        """
        Check if a camera is available
        """
        cap = cv2.VideoCapture(0)
        detected = False
        if cap.isOpened():
            detected = True
        cap.release()
        return detected

    def getImg(self):
        self.display.reload()
class RecorderIface(TrackerIface):
    """
    This class extends the TrackerIface to provide video acquisition and live detection (tracking).
    It uses openCV to run the camera and thus should work with any camera supported by openCV.
    It uses the first available USB/firewire camera unless the platform is a raspberry pi,
    in which case it will use the pi camera.
    """

    @pyqtSlot()
    def load(self): # TODO: check if worth keeping
        pass
        
    @pyqtSlot(result=QVariant)
    def start(self):
        """
        Start the recording and tracking.
        
        :returns: The recording was started status code
        """
        if not hasattr(self.params, 'destPath'):
            return False
        vidExt = os.path.splitext(self.params.destPath)[1]
        if vidExt not in VIDEO_FORMATS:
            print('Unknow format: {}'.format(vidExt))
            return False
        
        self.positions = [] # reset between runs
        self.distancesFromArena = []
        
        bgStart = self.params.bgFrameIdx
        nBackgroundFrames = self.params.nBgFrames
        trackFrom = self.params.startFrameIdx
        trackTo = self.params.endFrameIdx if (self.params.endFrameIdx > 0) else None
        
        threshold = self.params.detectionThreshold
        minArea = self.params.objectsMinArea
        maxArea = self.params.objectsMaxArea
        teleportationThreshold = self.params.teleportationThreshold
        
        nSds = self.params.nSds
        clearBorders = self.params.clearBorders
        normalise = self.params.normalise
        extractArena = self.params.extractArena
        
        self.tracker = GuiTracker(self, srcFilePath=None, destFilePath=self.params.destPath,
                                threshold=threshold, minArea=minArea, maxArea=maxArea,
                                teleportationThreshold=teleportationThreshold,
                                bgStart=bgStart, trackFrom=trackFrom, trackTo=trackTo,
                                nBackgroundFrames=nBackgroundFrames, nSds=nSds,
                                clearBorders=clearBorders, normalise=normalise,
                                plot=True, fast=False, extractArena=extractArena,
                                cameraCalibration=self.params.calib,
                                callback=None)
        self.stream = self.tracker # To comply with BaseInterface
        self._setDisplay()
        self._updateImgProvider()
        
        self.tracker.setRoi(self.roi)
        
        self.timer.start(self.timerSpeed)
        return True
        
    def getSamplingFreq(self):
        """
        Return the sampling frequency (note this is a maximum and can be limited by a slower CPU)
        """
        return 1.0 / (self.timerSpeed / 1000.0) # timer speed in ms
        
    @pyqtSlot(result=QVariant)
    def camDetected(self):
        """
        Check if a camera is available
        """
        cap = cv2.VideoCapture(0)
        detected = False
        if cap.isOpened():
            detected = True
        cap.release()
        return detected

    def getImg(self):
        self.display.reload()
Esempio n. 3
0
class TrackerIface(BaseInterface):
    """
    This class implements the BaseInterface to provide a qml interface
    to the GuiTracker object of the tracking module.
    """
    def __init__(self, app, context, parent, params, displayName, providerName,
                 analysisProvider1, analysisProvider2):
        BaseInterface.__init__(self, app, context, parent, params, displayName,
                               providerName)

        self.positions = []
        self.roi = None
        self.analysisImageProvider = analysisProvider1
        self.analysisImageProvider2 = analysisProvider2

    @pyqtSlot(QVariant, result=QVariant)
    def getRow(self, idx):
        """
        Get the data (position and distancesFromArena) at row idx
        
        :param int idx: The index of the row to return
        """
        idx = int(idx)
        if 0 <= idx < len(self.positions):
            row = [idx] + list(self.positions[idx]) + list(
                self.distancesFromArena[idx])
            return [str(e) for e in row]
        else:
            return -1

    @pyqtSlot()
    def load(self):
        """
        Load the video and create the GuiTracker object
        Also registers the analysis image providers (for the analysis tab) with QT
        """
        self.tracker = GuiTracker(self,
                                  srcFilePath=self.params.srcPath,
                                  destFilePath=None,
                                  nBackgroundFrames=1,
                                  plot=True,
                                  fast=False,
                                  cameraCalibration=self.params.calib,
                                  callback=None)
        self.stream = self.tracker  # To comply with BaseInterface
        self.tracker.roi = self.roi

        self.nFrames = self.tracker._stream.nFrames - 1
        self.currentFrameIdx = self.tracker._stream.currentFrameIdx

        if self.params.endFrameIdx == -1:
            self.params.endFrameIdx = self.nFrames

        self._setDisplay()
        self._setDisplayMax()
        self._updateImgProvider()

    @pyqtSlot()
    def start(self):
        """
        Start the tracking of the loaded video with the parameters from self.params
        """
        self.positions = []  # reset between runs
        self.distancesFromArena = []

        self.tracker._stream.bgStartFrame = self.params.bgFrameIdx
        nBackgroundFrames = self.params.nBgFrames
        self.tracker._stream.bgEndFrame = self.params.bgFrameIdx + nBackgroundFrames - 1
        self.tracker.trackFrom = self.params.startFrameIdx
        self.tracker.trackTo = self.params.endFrameIdx if (
            self.params.endFrameIdx > 0) else None

        self.tracker.threshold = self.params.detectionThreshold
        self.tracker.minArea = self.params.objectsMinArea
        self.tracker.maxArea = self.params.objectsMaxArea
        self.tracker.teleportationThreshold = self.params.teleportationThreshold

        self.tracker.nSds = self.params.nSds
        self.tracker.clearBorders = self.params.clearBorders
        self.tracker.normalise = self.params.normalise
        self.tracker.extractArena = self.params.extractArena

        self.tracker.setRoi(self.roi)

        self.timer.start(self.timerSpeed)

    @pyqtSlot()
    def stop(self):
        """
        The qt slot to self._stop()
        """
        self._stop('Recording stopped manually')

    def _stop(self, msg):
        """
        Stops the tracking gracefully
        
        :param string msg: The message to print upon stoping
        """
        self.timer.stop()
        self.tracker._stream.stopRecording(msg)

    @pyqtSlot(QVariant, QVariant, QVariant, QVariant, QVariant)
    def setRoi(self, width, height, x, y, diameter):
        """
        Sets the ROI (in which to check for the specimen) from the one drawn in QT
        Scaling is applied to match the (resolution difference) between the representation 
        of the frames in the GUI (on which the user draws the ROI) and the internal representation
        used to compute the position of the specimen.
        
        :param width: The width of the image representation in the GUI
        :param height: The height of the image representation in the GUI
        :param x: The center of the roi in the first dimension
        :param y: The center of the roi in the second dimension
        :param diameter: The diameter of the ROI
        """
        if hasattr(self, 'tracker'):
            streamWidth, streamHeight = self.tracker._stream.size  # flipped for openCV
            horizontalScalingFactor = streamWidth / width
            verticalScalingFactor = streamHeight / height

            radius = diameter / 2.0
            scaledX = (x + radius) * horizontalScalingFactor
            scaledY = (y + radius) * verticalScalingFactor
            scaledRadius = radius * horizontalScalingFactor

            self.roi = Circle((scaledX, scaledY), scaledRadius)

    @pyqtSlot()
    def removeRoi(self):
        self.roi = None

    @pyqtSlot(QVariant)
    def save(self, defaultDest):
        """
        Save the data (positions and distancesFromArena) as a csv style file
        """
        diag = QFileDialog()
        if defaultDest:
            defaultDest = os.path.splitext(defaultDest)[0] + '.csv'
        else:
            defaultDest = os.getenv('HOME')
        destPath = diag.getSaveFileName(parent=diag,
                                        caption='Save file',
                                        directory=defaultDest,
                                        filter="Text (*.txt *.dat *.csv)",
                                        initialFilter="Text (*.csv)")
        destPath = destPath[0]
        if destPath:
            self.write(destPath)

    def write(self, dest):
        """
        The method called by save() to write the csv file
        """
        with open(dest, 'w') as outFile:
            writer = csv.writer(outFile,
                                delimiter=',',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
            for fid, row in enumerate(self.positions):
                writer.writerow([fid] + list(row))

    @pyqtSlot(QVariant)
    def setFrameType(self, outputType):
        """
        Set the type of frame to display. (As source, difference with background or binary mask)
        
        :param string outputType: The type of frame to display. One of ['Raw', 'Diff', 'Mask']
        """
        self.outputType = outputType.lower()

    @pyqtSlot()
    def analyseAngles(self):
        """
        Compute and plot the angles between the segment Pn -> Pn+1 and Pn+1 -> Pn+2
        """
        fig, ax = plt.subplots()
        angles = video_analysis.getAngles(self.positions)
        video_analysis.plotAngles(angles, self.getSamplingFreq())
        self.analysisImageProvider._fig = fig

    @pyqtSlot()
    def analyseDistances(self):
        """
        Compute and plot the distances between the points Pn and Pn+1
        """
        fig, ax = plt.subplots()
        distances = video_analysis.posToDistances(self.positions)
        video_analysis.plotDistances(distances, self.getSamplingFreq())
        self.analysisImageProvider2._fig = fig

    @pyqtSlot()
    def saveAnglesFig(self):
        """
        Save the graph as a png or jpeg image
        """
        diag = QFileDialog()
        destPath = diag.getSaveFileName(parent=diag,
                                        caption='Save file',
                                        directory=os.getenv('HOME'),
                                        filter="Image (*.png *.jpg)")
        destPath = destPath[0]
        if destPath:
            imsave(destPath, self.analysisImageProvider.getArray())

    def getSamplingFreq(self):
        return self.tracker._stream.fps

    def getImg(self):
        if self.tracker._stream.currentFrameIdx < self.nFrames:
            self.display.reload()
            self._updateDisplayIdx()
        else:
            self._stop('End of recording reached')
class TrackerIface(BaseInterface):
    """
    This class implements the BaseInterface to provide a qml interface
    to the GuiTracker object of the tracking module.
    """
    def __init__(self, app, context, parent, params, displayName, providerName, analysisProvider1, analysisProvider2):
        BaseInterface.__init__(self, app, context, parent, params, displayName, providerName)
        
        self.positions = []
        self.roi = None
        self.analysisImageProvider = analysisProvider1
        self.analysisImageProvider2 = analysisProvider2

    @pyqtSlot(QVariant, result=QVariant)
    def getRow(self, idx):
        """
        Get the data (position and distancesFromArena) at row idx
        
        :param int idx: The index of the row to return
        """
        idx = int(idx)
        if 0 <= idx < len(self.positions):
            row = [idx] + list(self.positions[idx]) + list(self.distancesFromArena[idx])
            return [str(e) for e in row]
        else:
            return -1

    @pyqtSlot()
    def load(self):
        """
        Load the video and create the GuiTracker object
        Also registers the analysis image providers (for the analysis tab) with QT
        """
        self.tracker = GuiTracker(self, srcFilePath=self.params.srcPath, destFilePath=None,
                                nBackgroundFrames=1, plot=True,
                                fast=False, cameraCalibration=self.params.calib,
                                callback=None)
        self.stream = self.tracker # To comply with BaseInterface
        self.tracker.roi = self.roi

        self.nFrames = self.tracker._stream.nFrames - 1
        self.currentFrameIdx = self.tracker._stream.currentFrameIdx
        
        if self.params.endFrameIdx == -1:
            self.params.endFrameIdx = self.nFrames
        
        self._setDisplay()
        self._setDisplayMax()
        self._updateImgProvider()

    @pyqtSlot()
    def start(self):
        """
        Start the tracking of the loaded video with the parameters from self.params
        """
        self.positions = [] # reset between runs
        self.distancesFromArena = []
        
        self.tracker._stream.bgStartFrame = self.params.bgFrameIdx
        nBackgroundFrames = self.params.nBgFrames
        self.tracker._stream.bgEndFrame = self.params.bgFrameIdx + nBackgroundFrames - 1
        self.tracker.trackFrom = self.params.startFrameIdx
        self.tracker.trackTo = self.params.endFrameIdx if (self.params.endFrameIdx > 0) else None
        
        self.tracker.threshold = self.params.detectionThreshold
        self.tracker.minArea = self.params.objectsMinArea
        self.tracker.maxArea = self.params.objectsMaxArea
        self.tracker.teleportationThreshold = self.params.teleportationThreshold
        
        self.tracker.nSds = self.params.nSds
        self.tracker.clearBorders = self.params.clearBorders
        self.tracker.normalise = self.params.normalise
        self.tracker.extractArena = self.params.extractArena
        
        self.tracker.setRoi(self.roi)
            
        self.timer.start(self.timerSpeed)

    @pyqtSlot()
    def stop(self):
        """
        The qt slot to self._stop()
        """
        self._stop('Recording stopped manually')
        
    def _stop(self, msg):
        """
        Stops the tracking gracefully
        
        :param string msg: The message to print upon stoping
        """
        self.timer.stop()
        self.tracker._stream.stopRecording(msg)
        
    @pyqtSlot(QVariant, QVariant, QVariant, QVariant, QVariant)
    def setRoi(self, width, height, x, y, diameter):
        """
        Sets the ROI (in which to check for the specimen) from the one drawn in QT
        Scaling is applied to match the (resolution difference) between the representation 
        of the frames in the GUI (on which the user draws the ROI) and the internal representation
        used to compute the position of the specimen.
        
        :param width: The width of the image representation in the GUI
        :param height: The height of the image representation in the GUI
        :param x: The center of the roi in the first dimension
        :param y: The center of the roi in the second dimension
        :param diameter: The diameter of the ROI
        """
        if hasattr(self, 'tracker'):
            streamWidth, streamHeight = self.tracker._stream.size # flipped for openCV
            horizontalScalingFactor = streamWidth / width
            verticalScalingFactor = streamHeight / height
            
            radius = diameter / 2.0
            scaledX = (x + radius) * horizontalScalingFactor
            scaledY = (y + radius) * verticalScalingFactor
            scaledRadius = radius * horizontalScalingFactor
            
            self.roi = Circle((scaledX, scaledY), scaledRadius)

    @pyqtSlot()
    def removeRoi(self):
        self.roi = None

    @pyqtSlot(QVariant)
    def save(self, defaultDest):
        """
        Save the data (positions and distancesFromArena) as a csv style file
        """
        diag = QFileDialog()
        if defaultDest:
            defaultDest = os.path.splitext(defaultDest)[0] + '.csv'
        else:
            defaultDest = os.getenv('HOME')
        destPath = diag.getSaveFileName(parent=diag,
                                    caption='Save file',
                                    directory=defaultDest,
                                    filter="Text (*.txt *.dat *.csv)", 
                                    initialFilter="Text (*.csv)")
        destPath = destPath[0]
        if destPath:
            self.write(destPath)
    
    def write(self, dest):
        """
        The method called by save() to write the csv file
        """
        with open(dest, 'w') as outFile:
            writer = csv.writer(outFile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
            for fid, row in enumerate(self.positions):
                writer.writerow([fid]+list(row))

    @pyqtSlot(QVariant)
    def setFrameType(self, outputType):
        """
        Set the type of frame to display. (As source, difference with background or binary mask)
        
        :param string outputType: The type of frame to display. One of ['Raw', 'Diff', 'Mask']
        """
        self.outputType = outputType.lower()

    @pyqtSlot()
    def analyseAngles(self):
        """
        Compute and plot the angles between the segment Pn -> Pn+1 and Pn+1 -> Pn+2
        """
        fig, ax = plt.subplots()
        angles = video_analysis.getAngles(self.positions)
        video_analysis.plotAngles(angles, self.getSamplingFreq())
        self.analysisImageProvider._fig = fig

    @pyqtSlot()
    def analyseDistances(self):
        """
        Compute and plot the distances between the points Pn and Pn+1
        """
        fig, ax = plt.subplots()
        distances = video_analysis.posToDistances(self.positions)
        video_analysis.plotDistances(distances, self.getSamplingFreq())
        self.analysisImageProvider2._fig = fig

    @pyqtSlot()
    def saveAnglesFig(self):
        """
        Save the graph as a png or jpeg image
        """
        diag = QFileDialog()
        destPath = diag.getSaveFileName(parent=diag,
                                    caption='Save file',
                                    directory=os.getenv('HOME'), 
                                    filter="Image (*.png *.jpg)")
        destPath = destPath[0]
        if destPath:
            imsave(destPath, self.analysisImageProvider.getArray())

    def getSamplingFreq(self):
        return self.tracker._stream.fps

    def getImg(self):
        if self.tracker._stream.currentFrameIdx < self.nFrames:
            self.display.reload()
            self._updateDisplayIdx()
        else:
            self._stop('End of recording reached')