Beispiel #1
0
class Undistort(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
        ImageType('outputImage', output=True),
        StringType('calibrationFile'),
        NameType('outputCalibrationData', output=True)
    ]

    def __init__(self, **kwargs):
        super(Undistort, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')


    def preCyclicCall(self):
        with open(self.calibrationFile.value, 'rb') as f:
            self.camera_matrix = pickle.load(f)
            self.dist_coefs = pickle.load(f)

    def externalCall(self):

        image = self.inputImage.data

        if self.outputCalibrationData.data is None:
            self.outputCalibrationData.data, roi = cv2.getOptimalNewCameraMatrix(self.camera_matrix, self.dist_coefs, (image.shape[1], image.shape[0]), 0)

        image = cv2.undistort(image, self.camera_matrix, self.dist_coefs, None, self.outputCalibrationData.data)


        self.outputImage.data = image
Beispiel #2
0
class CVTransformColor(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('outputImageName', output=True),
        StringType('colorCode'),
    ]

    def __init__(self, **kwargs):
        super(CVTransformColor, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def preCyclicCall(self):
        self.colorCode.value = self.colorCode.value.upper()

        if not hasattr(cv2, self.colorCode.value):
            self.log.error('unknown colorCode <%s>, detaching module <%s>',
                           self.colorCode.value, self.logicSectionName)
            self.activeModule = False

    def externalCall(self):
        image = self.inputImageName.data
        image = cv2.cvtColor(image, getattr(cv2, self.colorCode.value))
        self.outputImageName.data = image
Beispiel #3
0
class NewImage(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('outputImageName', output=True),
        BoolType('likeImage'),
        IntType('width'),
        IntType('height'),
        IntType('depth'),
    ]

    def __init__(self, **kwargs):
        super(NewImage, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):

        if self.likeImage.value:
            image = np.zeros_like(self.inputImageName.data)
        else:
            shape = list()
            if self.height.value > 0:
                shape.append(self.height.value)
            if self.width.value > 0:
                shape.append(self.width.value)
            if self.depth.value > 0:
                shape.append(self.depth.value)
            shape = tuple(shape)

            image = np.zeros(shape)
        self.outputImageName.data = image
Beispiel #4
0
class HStack(BasePlugin):

    configParameter = [
        ImageType('inputImageL', input=True),
        ImageType('inputImageR', input=True),
        ImageType('outputImage', output=True),
    ]

    def __init__(self, **kwargs):
        super(HStack, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        self.outputImage.data = np.hstack((self.inputImageL.data, self.inputImageR.data))
class DirectVideoSource(BasePlugin):

    configParameter = [
        StringType('inputVideoFile'),
        ImageType('outputImageName', output=True),
    ]

    def __init__(self, **kwargs):
        super(DirectVideoSource, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def preCyclicCall(self):
        self.initVideo()

    def initVideo(self):
        self.inputVideoFile.data = cv2.VideoCapture(self.inputVideoFile.value)

    def externalCall(self):
        if self.inputVideoFile.data.isOpened():
            ret, image = self.inputVideoFile.data.read()
            if ret:
                self.outputImageName.data = image.copy()
            else:
                self.initVideo()
Beispiel #6
0
class PyrUp(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
        ImageType('outputImage', output=True),
    ]

    def __init__(self, **kwargs):
        super(PyrUp, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        image = self.inputImage.data
        image = cv2.pyrUp(image)
        self.outputImage.data = image
Beispiel #7
0
class DirectCVCamSource(BasePlugin):

    configParameter = [
        ImageType('outputImageName', output=True),
        IntType('camId'),
        IntType('frameWidth'),
        IntType('frameHeight'),
    ]

    def __init__(self, **kwargs):
        super(DirectCVCamSource, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def preCyclicCall(self):
        self.initCam()

    def initCam(self):
        self.cam = cv2.VideoCapture(self.camId.value)

        if self.frameWidth > 0:
            self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.frameWidth.value)
        if self.frameHeight > 0:
            self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.frameHeight.value)

    def timeBypassActions(self):
        self.cam.grab()

    def externalCall(self):
        self.cam.grab()
        i, image = self.cam.read()
        self.outputImageName.data = image.copy()
Beispiel #8
0
class InvertImage(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('outputImageName', output=True),
    ]

    def __init__(self, **kwargs):
        super(InvertImage, self).__init__(**kwargs)

    def externalCall(self):
        image = self.inputImageName.data

        image = cv2.subtract(255, image)

        self.outputImageName.data = image
Beispiel #9
0
class CVBitwiseAnd(BasePlugin):

    configParameter = [
        ImageType('inputImageName1', input=True),
        ImageType('inputImageName2', input=True),
        ImageType('outputImageName', output=True),
    ]

    def __init__(self, **kwargs):
        super(CVBitwiseAnd, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        image = np.zeros(self.inputImageName1.data.shape, np.uint8)
        cv2.bitwise_and(self.inputImageName1.data, self.inputImageName2.data, image)
        self.outputImageName.data = image
Beispiel #10
0
class DirectKinectSource(BasePlugin):

    configParameter = [
        IntType('camId'),
        ImageType('outputImageName', output=True),
        ImageType('outputDepthImageName', output=True),
        NameType('outputDepthRawName', output=True),
        BoolType('reverseDepthVisualisation'),
    ]

    def __init__(self, **kwargs):
        super(DirectKinectSource, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        #try:
        self.log.debug('try to get image from cam')
        imageData, _ = freenect.sync_get_video(index=self.camId.value)
        depthDataRaw, _ = freenect.sync_get_depth()
        #except TypeError:
        #    self.log.error('asd')

        imageData = np.array(imageData)
        imageData = cv2.cvtColor(imageData, cv2.COLOR_RGB2BGR)

        self.outputImageName.data = imageData.copy()

        depthDataImage = np.float32(depthDataRaw)

        #depthDataImage = (depthDataImage)/2047*256
        #depthDataImage = np.uint8(depthDataImage)
        #depthDataImage = np.float32(depthDataImage) * 255 / 130
        depthDataImage = np.uint8(depthDataImage)

        #depthDataImage = depthDataImage * 255 / 130

        #depthDataImage = np.uint8(cv2.normalize(depthDataImage, depthDataImage, 0, 255, cv2.NORM_MINMAX))

        if self.reverseDepthVisualisation.value:
            depthDataImage = 255 - depthDataImage

        self.outputDepthImageName.data = depthDataImage.copy()
        self.outputDepthRawName.data = depthDataRaw.copy()
Beispiel #11
0
class Threshold(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
        ImageType('outputImage', output=True),
        IntType('threshold'),
        IntType('max'),
        IntType('type'),
    ]

    def __init__(self, **kwargs):
        super(Threshold, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        image = self.inputImage.data
        ret, image = cv2.threshold(image, self.threshold.value, self.max.value,
                                   self.type.value)
        self.outputImage.data = image
Beispiel #12
0
class WeightChanels(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('outputImageName', output=True),
        IntType('ofs'),
    ]

    def __init__(self, **kwargs):
        super(WeightChanels, self).__init__(**kwargs)

    def preCyclicCall(self):
        pass

    def externalCall(self):

        hlut = np.sin(np.linspace(0, np.pi, 31))
        hblk = np.zeros(180 - hlut.size, np.float32)
        hlut = np.concatenate((hlut, hblk))
        self.hlut = np.roll(hlut, self.ofs.value)

        flut = np.sin(np.linspace(0, np.pi * 0.5, 256)) * 255
        fblk = np.zeros(256 - flut.size, np.float32)
        self.flut = np.concatenate((flut, fblk))

        tflut = np.sin(np.linspace(0, np.pi * 0.8, 256)) * 255
        tfblk = np.zeros(256 - tflut.size, np.float32)
        self.tflut = np.concatenate((tflut, tfblk))

        image = self.inputImageName.data


        image = np.array(self.hlut[image[:, :, 0]] * \
            (0.2 * self.tflut[image[:, :, 1]] +
             0.8 * self.flut[image[:, :, 2]]), np.uint8)

        self.outputImageName.data = image
Beispiel #13
0
class EqualizeHist(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
        ImageType('outputImage', output=True),
    ]

    def __init__(self, **kwargs):
        super(EqualizeHist, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        ch = 2
        image = self.inputImage.data
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        hsv[..., ch] = cv2.equalizeHist(hsv[..., ch])

        image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        #size = image.shape[0]
        #cv2.circle(image, (image.shape[1]//2,image.shape[0]+size//2), size, (0,0,0), -1)

        self.outputImage.data = image
Beispiel #14
0
class CVDrawCicles(BasePlugin):
    """
    if inputContourIndexListName is empty, then draw all contours
    """

    configParameter = [
        ImageType('inputImageName', input=True),
        StringType('circleData', input=True),
        ImageType('outputImageName', output=True),
        BoolType('binarizedOutput'),
        IntListType('color'),
        IntType('thickness'),
    ]

    def __init__(self, **kwargs):
        super(CVDrawCicles, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):

        if self.binarizedOutput.value:
            if len(self.inputImageName.data.shape) > 1:
                image = np.zeros(tuple(self.inputImageName.data.shape[:2]),
                                 np.uint8)
            else:
                image = np.zeros_like(self.inputImageName.data)
        else:
            image = self.inputImageName.data.copy()

        if self.circleData.data is not None:
            for circ in self.circleData.data[0]:
                cv2.circle(image, (circ[0], circ[1]), circ[2],
                           self.color.value, self.thickness.value)

        self.outputImageName.data = image
Beispiel #15
0
class Merge(BasePlugin):

    configParameter = [
        ImageListType('inputImageList', input=True),
        ImageType('outputImageName', output=True),
    ]

    def __init__(self, **kwargs):
        super(Merge, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):

        mdata = tuple([self.inputImageList.data[x] for x in self.inputImageList])
        self.outputImageName.data = cv2.merge(mdata)
Beispiel #16
0
class CVGaussBlur(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        NameType('outputImageName', output=True),
        FloatType('sigmaX'), FloatType('sigmaY'),
        IntType('kSize', constraint=range(1,99,2)),
    ]

    def __init__(self, **kwargs):
        super(CVGaussBlur, self).__init__(**kwargs)

    def externalCall(self):
        image = self.inputImageName.data
        image = cv2.GaussianBlur(image, (self.kSize.value, self.kSize.value),
                                 sigmaX=self.sigmaX.value, sigmaY=self.sigmaY.value)
        self.outputImageName.data = image
Beispiel #17
0
class Canny(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        NameType('outputImageName', output=True),
        IntType('threshold1', input=True),
        IntType('threshold2', input=True),
    ]

    def __init__(self, **kwargs):
        super(Canny, self).__init__(**kwargs)

    def externalCall(self):
        image = self.inputImageName.data
        image = cv2.GaussianBlur(image, (3, 3), 1)
        #image = cv2.Laplacian(image,cv2.CV_64F, delta=self.threshold1.value, scale=2)
        image = cv2.Canny(image,
                          self.threshold1.value,
                          self.threshold2.value,
                          L2gradient=False)  #, apertureSize=3)
        self.outputImageName.data = image
Beispiel #18
0
class CVInRange(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        NameListType('cvValueListNames', input=True),
        NameListType('outputMaskListNames', output=True),
    ]

    def __init__(self, **kwargs):
        super(CVInRange, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):
        self.log.debug(
            'check parameter (dependecy) for cvValueListNames (name, value data): '
            '<%s>  <%s>  <%s>', self.cvValueListNames.name,
            self.cvValueListNames.value, self.cvValueListNames.data)

        stepsize = 2
        for i in range(len(self.cvValueListNames.value) / stepsize):
            minkey = self.cvValueListNames.value[(i * stepsize)]
            maxkey = self.cvValueListNames.value[(i * stepsize) + 1]
            minvalue = self.cvValueListNames.data[minkey]
            maxvalue = self.cvValueListNames.data[maxkey]

            self.log.debug(
                'inRange action Nr. <%s> for inputImageName <%s> creating image <%s>',
                i, self.inputImageName.value,
                self.outputMaskListNames.value[i])
            self.log.debug(
                'checking lower border <%s> with <%s> up to upper border <%s> with <%s>',
                minkey, minvalue, maxkey, maxvalue)

            imagebin = cv2.inRange(self.inputImageName.data, minvalue,
                                   maxvalue).copy()
            self.outputMaskListNames.setDataValue(
                self.outputMaskListNames.value[i], imagebin)
Beispiel #19
0
class HoughCircles(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        StringType('circleData', output=True),
        IntType('dp', constraint=range(1, 1000)),
        IntType('minDist', constraint=range(1, 1000)),
        IntType('minRad', constraint=range(1, 1000)),
        IntType('maxRad', constraint=range(1, 1000)),
        IntType('threshold1', constraint=range(1, 1000)),
        IntType('threshold2', constraint=range(1, 1000)),
        ImageType('inputOrgImageName', input=True),
        BoolType('doDrawCircles'),
        ImageType('outputOrgCircleImageName', output=True),
        BoolType('doCannyOutput'),
        ImageType('outputCannyImageName', output=True),
    ]

    def __init__(self, **kwargs):
        super(HoughCircles, self).__init__(**kwargs)

    def externalCall(self):

        reslist = cv2.HoughCircles(self.inputImageName.data,
                                   cv2.cv.CV_HOUGH_GRADIENT,
                                   dp=self.dp.value,
                                   minDist=self.minDist.value,
                                   minRadius=self.minRad.value,
                                   maxRadius=self.maxRad.value,
                                   param1=self.threshold1.value,
                                   param2=self.threshold2.value)

        self.circleData.data = reslist

        if self.doCannyOutput.value:
            image = self.inputImageName.data
            canny = cv2.Canny(image, self.threshold1.value,
                              self.threshold1.value // 2)
            self.outputCannyImageName.data = canny

        if self.doDrawCircles.value:
            resvisimage = self.inputOrgImageName.data.copy()

            if reslist is not None and len(reslist):
                for x in reslist[0]:
                    corr = 5
                    mask = np.zeros(tuple(self.inputImageName.data.shape[:2]),
                                    np.uint8)
                    cv2.circle(mask, (x[0], x[1]), int(x[2] - corr), 255, -1)

                    mean_val = cv2.mean(self.inputOrgImageName.data, mask=mask)
                    mv = np.zeros((1, 1, 3), np.uint8)

                    mv[..., 0] = mean_val[0]
                    mv[..., 1] = mean_val[1]
                    mv[..., 2] = mean_val[2]

                    mv2 = cv2.cvtColor(mv, cv2.COLOR_BGR2HSV)

                    #cv2.circle(resvisimage, (x[0], x[1]), int(x[2]-corr), (mean_val[0],mean_val[1],mean_val[2]), -1)
                    self.drawText(resvisimage, str(mv2[0, 0]), x[0] - 40,
                                  x[1] - self.maxRad.value - 4, 1)

                    if 28 > mv2[0, 0, 0] or mv2[0, 0, 0] > 32 or mv2[
                            0, 0, 1] < 70 or mv2[0, 0, 2] < 150:
                        #continue
                        pass

                    cv2.circle(resvisimage, (x[0], x[1]), self.minRad.value,
                               (100, 255, 100), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), self.maxRad.value,
                               (100, 100, 255), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), self.minDist.value,
                               (100, 100, 100), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), x[2],
                               (255, 100, 100), 2)
                    cv2.circle(resvisimage, (x[0], x[1]), 4, (50, 50, 50), -1)

            self.outputOrgCircleImageName.data = resvisimage

    def drawText(self,
                 image,
                 text,
                 xpos,
                 ypos,
                 scale=1,
                 color=(225, 225, 225)):
        xpos = int(xpos)
        ypos = int(ypos)
        for i in range(3, 0, -1):
            if i < 2:
                cv2.putText(image, text, (xpos + i, ypos + i),
                            cv2.FONT_HERSHEY_PLAIN, scale, color)
            else:
                cv2.putText(image, text, (xpos + i, ypos + i),
                            cv2.FONT_HERSHEY_PLAIN, scale, (50, 50, 50))
Beispiel #20
0
class MSER(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('inputOrginalImageName', input=True),
        ImageType('outputImageName', output=True),
        ImageType('outputImageMask', output=True),
        ImageType('outputOrginalImageFilt', output=True),
        FloatType('maskScale'),
        IntType('minRadius'),
        IntType('maxRadius'),
    ]

    def __init__(self, **kwargs):
        super(MSER, self).__init__(**kwargs)


    def preCyclicCall(self):
        pass

    def externalCall(self):
        d_red = cv2.cv.RGB(200, 100, 100)
        l_red = cv2.cv.RGB(250, 200, 200)
        d_green = cv2.cv.RGB(100, 200, 100)
        l_green = cv2.cv.RGB(200, 250, 200)

        orig = self.inputOrginalImageName.data
        img = orig.copy()
        #img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img2 = self.inputImageName.data

        detector = cv2.FeatureDetector_create('MSER')
        fs = detector.detect(img2)
        #print dir(detector)

        fs.sort(key=lambda x: -x.size)


        def supress(x):
                for f in fs:
                    distx = f.pt[0] - x.pt[0]
                    disty = f.pt[1] - x.pt[1]
                    dist = math.sqrt(distx*distx + disty*disty)
                    #print f.size/1.5
                    if (f.size > x.size) and (dist < f.size/2) \
                        :#or (f.size > self.maxRadius.value) or (f.size < self.minRadius.value):
                        #print dist, self.minRadius.value, self.maxRadius.value

                        return True

        sfs = [x for x in fs if not supress(x)]

        mask = np.zeros_like(img2)

        for f in sfs:
                cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(2), d_red, 1)
                cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size/2), d_green, 1)
                cv2.circle(mask, (int(f.pt[0]), int(f.pt[1])), int(f.size/1.5*self.maskScale.value), 255, -1)

        h, w = orig.shape[:2]
        #vis = np.zeros((h, w*2+5), np.uint8)
        #vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        #vis[:h, :w] = orig
        #vis[:h, w+5:w*2+5] = img

        filt = np.zeros_like(orig)

        cv2.merge((mask, mask, mask), filt)

        filt = cv2.bitwise_and(orig, filt)

        self.outputImageMask.data = mask
        self.outputImageName.data = img
        self.outputOrginalImageFilt.data = filt
Beispiel #21
0
class Calibrate(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
        ImageType('outputImage', output=True),
        IntType('visualTresholdLow'),
        IntType('gridX'),
        IntType('gridY'),
        IntType('dataPerGridCell'),
        IntType('successHue'),
        IntType('boardw'),
        IntType('boardh'),
        FloatType('delay'),
        StringType('outputFileName'),
        BoolType('syncCalculation'),
    ]

    def __init__(self, **kwargs):
        super(Calibrate, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

        self.data = np.zeros((self.gridY.value, self.gridX.value), np.uint8)

        self.board_w = self.boardw.value
        self.board_h = self.boardh.value

        self.board_n = self.board_w * self.board_h
        self.board_sz = (self.board_w, self.board_h)  # size of board

        pattern_size = (self.board_w, self.board_h)
        self.pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
        self.pattern_points[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
        #pattern_points *= cbsq

        self.obj_points = list()
        self.img_points = list()

        self.last_time = time.time()

        self.syncObj = None

        self.calculationDone = False
        self.calculationVisualisation = False

    def externalCall(self):

        waitForSync = False
        if self.syncCalculation.value:

            waitForSync = True

            if not self.syncObj:
                self.syncObj = StringType('syncSection')
                self.syncObj.value = self.sectionConfig['syncSection']
                self.syncObj.data = False
                self.sectionConfig['syncSection'] = self.syncObj

            syncObjOther = self.fullConfig[self.syncObj.value][
                self.syncObj.name]
            if isinstance(syncObjOther, StringType):
                waitForSync = not syncObjOther.data

            #print self.logicSectionName,

            #print self.logicSectionName, self.fullConfig[secObj.value]['foo']

        image = self.inputImage.data

        # PREPARE VISUALIZATION

        if self.data is None:
            self.data = np.zeros((self.gridY.value, self.gridX.value),
                                 np.uint8)

        hsv = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2HSV)

        hsv[..., 0] = 0
        hsv[..., 1] = 255

        dark = hsv[..., 2] < self.visualTresholdLow.value
        hsv[dark] = self.visualTresholdLow.value

        # DRAW VISUALIZATION

        totalx = image.shape[1]
        totaly = image.shape[0]
        xsize = totalx // self.gridX.value
        ysize = totaly // self.gridY.value

        for y, yline in enumerate(self.data):
            for x, value in enumerate(yline):
                hsv[ysize * y:ysize * (y + 1), xsize * x:xsize * (x + 1),
                    0] = value * int(
                        self.successHue.value / self.dataPerGridCell.value)

        doCalculate = self.data.sum() >= (self.dataPerGridCell.value *
                                          self.data.size)

        # AQUIRE BOARD DATA
        isBoard = False
        waitForDelay = time.time() - self.last_time <= self.delay.value
        if not doCalculate and not waitForDelay:

            isBoard, boardData = cv2.findChessboardCorners(
                image, self.board_sz, self.board_n,
                cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FILTER_QUADS)

            attachToList = False

            if isBoard:
                cv2.drawChessboardCorners(hsv, self.board_sz, boardData, 0)
                for point in boardData:
                    x, y = point[0]
                    if self.data[y //
                                 ysize][x //
                                        xsize] < self.dataPerGridCell.value:
                        self.data[y // ysize][x // xsize] += 1
                        attachToList = True
                    self.last_time = time.time()

            if attachToList:
                self.img_points.append(boardData.reshape(-1, 2))
                self.obj_points.append(self.pattern_points)

        # CALCULATE
        self.syncObj.data = doCalculate
        if doCalculate and not waitForSync and not self.calculationDone and self.calculationVisualisation:

            self.calculationDone = True

            rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(
                self.obj_points, self.img_points, (totalx, totaly))

            print '*' * 100
            print self.logicSectionName
            print '*' * 100
            print "RMS:", rms
            print "camera matrix:\n", camera_matrix
            print "distortion coefficients: ", dist_coefs.ravel()
            print '*' * 100

            with open(self.outputFileName.value, 'wb') as f:
                pickle.dump(camera_matrix, f, -1)
                pickle.dump(dist_coefs, f, -1)
                pickle.dump(rms, f, -1)
                pickle.dump(rvecs, f, -1)
                pickle.dump(tvecs, f, -1)

        self.outputImage.data = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        if doCalculate:
            if waitForSync:
                textStr = 'wait for sync'
            else:
                if not self.calculationDone:
                    textStr = 'calculating'
                    self.calculationVisualisation = True
                else:
                    textStr = 'done'
        else:
            if isBoard or waitForDelay:
                textStr = 'board found'
            else:
                textStr = 'searching board'

        drawText(self.outputImage.data,
                 textStr,
                 50,
                 50,
                 scale=2,
                 thickness=3,
                 bgthickness=3)
Beispiel #22
0
class Histogram(BasePlugin):

    configParameter = [
        ImageType('inputImageName', input=True),
        ImageType('outputImageName', output=True),
        IntType('scale'),
    ]

    def __init__(self, **kwargs):
        super(Histogram, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

        self.hsv_map = np.zeros((180, 256, 3), np.uint8)
        h, s = np.indices(self.hsv_map.shape[:2])
        self.hsv_map[:, :, 0] = h
        self.hsv_map[:, :, 1] = s
        self.hsv_map[:, :, 2] = 255
        self.hsv_map = cv2.cvtColor(self.hsv_map, cv2.COLOR_HSV2BGR)

    def externalCall(self):

        image = self.inputImageName.data
        image = cv2.pyrDown(image)

        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])

        h = np.clip(h * 0.005 * self.scale.value, 0, 1)
        vis = self.hsv_map * h[:, :, np.newaxis] / 255.0

        vis = cv2.pyrUp(vis)

        colsub = (0.2, 0.6, 0.2)
        colmain = (0.3, 0.9, 0.3)

        blacksize = 5

        for i in range(10, 360, 10):
            cv2.line(vis, (0, i), (5, i), 0, blacksize)
            cv2.line(vis, (0, i), (5, i), colsub, 1)
        for i in range(30, 360, 30):
            cv2.line(vis, (0, i), (15, i), 0, blacksize)
            cv2.line(vis, (0, i), (15, i), colmain, 1)

        for i in range(20, 510, 20):
            cv2.line(vis, (i, 0), (i, 5), 0, blacksize)
            cv2.line(vis, (i, 0), (i, 5), colsub, 1)
        for i in range(100, 510, 100):
            cv2.line(vis, (i, 0), (i, 15), 0, blacksize)
            cv2.line(vis, (i, 0), (i, 15), colmain, 1)

        nvis = np.zeros((vis.shape[0] + 15, vis.shape[1] + 30, vis.shape[2]))
        nvis[15:, 30:, ...] = vis

        nvis[15:, 30, ...] = colsub
        self.drawText(nvis, 'SATURATION', 450, 10, 0.6, colsub)  # 32
        self.drawText(nvis, 'HUE', 4, 367, 0.6, colsub)

        for i in range(50, 300, 50):
            self.drawText(nvis, str(i), i * 2 + 17, 10, 0.75, colmain)

        for i in range(15, 180, 15):
            self.drawText(nvis, str(i), 0, i * 2 + 18, 0.75, colmain)

        #nvis = cv2.pyrUp(nvis)
        self.outputImageName.data = nvis

    def drawText(self,
                 image,
                 text,
                 xpos,
                 ypos,
                 scale=1,
                 color=(225, 225, 225)):
        xpos = int(xpos)
        ypos = int(ypos)
        for i in range(1, 0, -1):
            if i < 2:
                cv2.putText(image, text, (xpos + i, ypos + i),
                            cv2.FONT_HERSHEY_PLAIN, scale, color)
            else:
                cv2.putText(image, text, (xpos + i, ypos + i),
                            cv2.FONT_HERSHEY_PLAIN, scale, (50, 50, 50))
Beispiel #23
0
class TestPlugin(BasePlugin):

    configParameter = [
        ImageType('inputImage', input=True),
    ]

    def __init__(self, **kwargs):
        super(TestPlugin, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):

        image = self.inputImage.data
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        hsv = cv2.pyrDown(hsv)

        hsv = cv2.pyrDown(hsv)
        hsv = cv2.pyrDown(hsv)

        green1 = hsv[..., 0] < 33
        green2 = hsv[..., 0] > 36

        hsv[...] = 255

        hsv[green1] = 0
        hsv[green2] = 0

        #se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
        #hsv[...,2] = cv2.morphologyEx(hsv[...,2], cv2.MORPH_OPEN, se)

        se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        hsv[..., 2] = cv2.morphologyEx(hsv[..., 2], cv2.MORPH_CLOSE, se)

        hsv = cv2.pyrUp(hsv)
        '''
        gray = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
        gray = cv2.cvtColor(gray,cv2.COLOR_BGR2HSV)
        edges = cv2.Canny(gray,100,50,apertureSize = 3)

        cv2.imshow('AR', edges)

        lines = cv2.HoughLines(edges,1,np.pi/180,100)

        if lines is not None:
            for rho,theta in lines[0]:
                a = np.cos(theta)
                b = np.sin(theta)
                x0 = a*rho
                y0 = b*rho
                x1 = int(x0 + 1000*(-b))
                y1 = int(y0 + 1000*(a))
                x2 = int(x0 - 1000*(-b))
                y2 = int(y0 - 1000*(a))

                cv2.line(hsv,(x1,y1),(x2,y2),(0,0,255),2)

        '''

        hsv = cv2.pyrUp(hsv)
        hsv = cv2.pyrUp(hsv)

        image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        gray = cv2.cvtColor(hsv, cv2.COLOR_BGR2GRAY)

        th, thimagexx = cv2.threshold(gray, 1, 255, 0)
        cv2.imshow('thimagexx', thimagexx)

        cont, h = cv2.findContours(gray, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)

        #image = cv2.pyrUp(image)
        #image = cv2.pyrUp(image)

        cv2.drawContours(image, cont, -1, (0, 0, 255))

        cv2.imshow('FOO', image)
Beispiel #24
0
class Rectify(BasePlugin):

    configParameter = [
        ImageType('inputImageLeft', input=True),
        ImageType('inputImageRight', input=True),
    ]

    def __init__(self, **kwargs):
        super(Rectify, self).__init__(**kwargs)
        self.log = logging.getLogger(__name__)
        self.log.debug('logging started')

    def externalCall(self):

        sift = cv2.xfeatures2d.SIFT_create()

        img1 = self.inputImageLeft.data
        img2 = self.inputImageRight.data

        img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        good = []
        pts1 = []
        pts2 = []

        # ratio test as per Lowe's paper
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.8 * n.distance:
                good.append(m)
                pts2.append(kp2[m.trainIdx].pt)
                pts1.append(kp1[m.queryIdx].pt)

        # Now we have the list of best matches from both the images. Let’s find the Fundamental Matrix.

        pts1 = np.int32(pts1)
        pts2 = np.int32(pts2)
        pts1 = np.float32(pts1)
        pts2 = np.float32(pts2)
        F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
        if F is None:
            print 'F IS NONE'
            return
        print F

        # We select only inlier points
        pts1 = pts1[mask.ravel() == 1]
        pts2 = pts2[mask.ravel() == 1]

        # Find epilines corresponding to points in right image (second image) and
        # drawing its lines on left image
        lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1, 1, 2), 2, F)
        lines1 = lines1.reshape(-1, 3)
        img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)

        # Find epilines corresponding to points in left image (first image) and
        # drawing its lines on right image
        lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1, 1, 2), 1, F)
        lines2 = lines2.reshape(-1, 3)
        img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)

        cv2.imshow("img5", img5)
        cv2.imshow("img3", img3)