Example #1
0
    def opencv_grey_image(self):
        image = cv.CreateImageHeader(self.image_size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(image, self.image_data)

        gray_image = cv.CreateImage(self.image_size, 8, 1)
        convert_mode = getattr(cv, 'CV_%s2GRAY' % self.image_mode)
        cv.CvtColor(image, gray_image, convert_mode)

        return gray_image
Example #2
0
def disp_thresh(lower, upper):
    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth > lower, depth < upper)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U, 1)
    cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage('Depth', image)
    cv.WaitKey(10)
Example #3
0
def faces_from_pil_image(pil_image):
    "Return a list of (x,y,h,w) tuples for faces detected in the PIL image"
    storage = cv.CreateMemStorage(0)
    facial_features = cv.Load('haarcascade_frontalface_alt.xml', storage=storage)
    cv_im = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pil_image.tostring())
    faces = cv.HaarDetectObjects(cv_im, facial_features, storage)
    # faces includes a `neighbors` field that we aren't going to use here
    return [f[0] for f in faces]
Example #4
0
def convertToCvGrayscale(originalImage):
    size = cv.GetSize(originalImage)

    #cv.SaveImage("original_cv.png", originalImage)

    rgbImage = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(rgbImage, originalImage.tostring())
    cv.CvtColor(originalImage, rgbImage, cv.CV_BGR2RGB)

    #cv.SaveImage("rgb_cv.png", rgbImage)

    grayscaleImage = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 1)
    cv.SetData(grayscaleImage, size[0] * size[1] * "0")
    cv.CvtColor(rgbImage, grayscaleImage, cv.CV_BGR2GRAY)

    #cv.SaveImage("grayscale_cv.png", grayscaleImage)

    return grayscaleImage
Example #5
0
def mat2cv(m):
    fig = pylab.figure(1)
    ax = pylab.matshow(m,fignum=1)
    buf = io.BytesIO()
    fig.savefig(buf,format='png')
    buf.seek(0)
    pi = Image.open(buf)
    cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 1)
    cv.SetData(cv_im, pi.tostring())
Example #6
0
    def red_eye(self):
        self.load_cascade_file()
        faces = [
            face for face in self.context.request.focal_points
            if face.origin == 'Face Detection'
        ]
        if faces:
            engine = self.context.modules.engine
            mode, data = engine.image_data_as_rgb()
            mode = mode.lower()
            sz = engine.size
            image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, data)

            for face in faces:
                face_x = int(face.x - face.width / 2)
                face_y = int(face.y - face.height / 2)

                face_roi = (int(face_x), int(face_y), int(face.width),
                            int(face.height))

                cv.SetImageROI(image, face_roi)

                eyes = cv.HaarDetectObjects(image, self.cascade,
                                            cv.CreateMemStorage(0), HAAR_SCALE,
                                            MIN_NEIGHBORS, HAAR_FLAGS,
                                            MIN_SIZE)

                for (x, y, w, h), other in self.filter_eyes(eyes):
                    # Set the image Region of interest to be the eye area [this reduces processing time]
                    cv.SetImageROI(image, (face_x + x, face_y + y, w, h))

                    if self.context.request.debug:
                        cv.Rectangle(image, (0, 0), (w, h),
                                     cv.RGB(255, 255, 255), 2, 8, 0)

                    for pixel in self.get_pixels(image, w, h, mode):
                        green_blue_avg = (pixel['g'] + pixel['b']) / 2

                        if not green_blue_avg:
                            red_intensity = RED_THRESHOLD
                        else:
                            # Calculate the intensity compared to blue and green average
                            red_intensity = pixel['r'] / green_blue_avg

                        # If the red intensity is greater than 2.0, lower the value
                        if red_intensity >= RED_THRESHOLD:
                            new_red_value = (pixel['g'] + pixel['b']) / 2
                            # Insert the new red value for the pixel to the image
                            cv.Set2D(
                                image, pixel['y'], pixel['x'],
                                cv.RGB(new_red_value, pixel['g'], pixel['b']))

                    # Reset the image region of interest back to full image
                    cv.ResetImageROI(image)

            self.context.modules.engine.set_image_data(image.tostring())
Example #7
0
 def getCaptureImage(self):
     results = self.video.getImageRemote(self.subscriberID)#获取最新图像进行处理
     ByteArray = bytearray(results[6])#二进制流变成字节数组
     nuArray = numpy.array(ByteArray) #转换成numpy矩阵
     bgrImage = nuArray.reshape(640,480,3)#从一维的变成三维的彩色图像,如果是二位则为灰度图
     cv.SetData(self.imageHeader,bgrImage,0)
     ipltemp = cv.CloneImage(self.imageHeader) #它制作图像的完整拷贝包括头、ROI和数据。ipltemp对象
     temp = self.imageHeader[:]#temp为cvmat对象
     return numpy.asarray(temp)#cvmat→array 
def pretty_depth_cv(depth):
    import cv
    depth = pretty_depth(depth)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
    cv.SetData(image, depth.tostring(),
               depth.dtype.itemsize * depth.shape[1])
    return image
Example #9
0
 def displayMap(self, mapbytes):
     
     # Interleave the grayscale map bytes into the color bytes
     self.bgrbytes[0::3] = mapbytes
     self.bgrbytes[1::3] = mapbytes
     self.bgrbytes[2::3] = mapbytes
     
     # Put color bytes into image
     cv.SetData(self.image, self.bgrbytes, self.map_size_pixels*3)
Example #10
0
def imgmsg_to_cv(img_msg, desired_encoding = "passthrough"):
    try:
        return bridge.imgmsg_to_cv(img_msg, desired_encoding)
    except:
        cv2_im = bridge.imgmsg_to_cv2(img_msg, desired_encoding)
        img_msg = bridge.cv2_to_imgmsg(cv2_im)
        source_type = encoding_as_cvtype(img_msg.encoding)
        im = cv.CreateMatHeader(img_msg.height, img_msg.width, source_type)
        cv.SetData(im, img_msg.data, img_msg.step)
        return im
Example #11
0
 def callback(self, data):
     try:
         source = self.bridge.imgmsg_to_cv2(data, "bgr8")  #imgmsg_to_cv2
         bitmap = cv.CreateImageHeader((source.shape[1], source.shape[0]),
                                       cv.IPL_DEPTH_8U, 3)
         cv.SetData(bitmap, source.tostring(),
                    source.dtype.itemsize * 3 * source.shape[1])
         cv_image = bitmap
     except CvBridgeError, e:
         print e
Example #12
0
def convert_np_to_ipl_image(img_np):
    # Inspired from https://stackoverflow.com/questions/11528009/opencv-converting-from-numpy-to-iplimage-in-python
    # img_np is numpy array
    num_colors = 1

    bitmap = cv.CreateImageHeader((img_np.shape[1], img_np.shape[0]),
                                  cv.IPL_DEPTH_8U, num_colors)
    cv.SetData(bitmap, img_np.tostring(),
               img_np.dtype.itemsize * num_colors * img_np.shape[1])
    return bitmap
Example #13
0
def msg2rgb(msg):
    ma = msg.uint8_data # MultiArray
    dim = dict([ (d.label,d.size) for d in ma.layout.dim ])
    (w,h) = (dim['width'], dim['height'])
    im = cv.CreateImageHeader((w,h), cv.IPL_DEPTH_8U, dim['channel'])
    cv.SetData(im, ma.data, w)

    rgb = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
    cv.CvtColor(im, rgb, cv.CV_BayerBG2RGB)
    return rgb
Example #14
0
 def cv_rep(self, depth=cv.IPL_DEPTH_8U, chan=1):
     """OpenCV representation of this image.
     """
     if not hasattr(self, '_cv_rep'):
         pc = self.img_pil.convert('L')
         cvi = cv.CreateImageHeader(pc.size, depth, chan)
         cv.SetData(cvi, pc.tostring())
         self.img_cv = cvi
         self._cv_rep = cvi
     return self._cv_rep
Example #15
0
def convert_image(filename, threshold, blue_threshold, green_threshold):
    '''convert a file'''
    pgm = cuav_util.PGM(filename)
    im_640 = numpy.zeros((480, 640, 3), dtype='uint8')
    scanner.thermal_convert(pgm.array, im_640, threshold, blue_threshold,
                            green_threshold)

    color_img = cv.CreateImageHeader((640, 480), 8, 3)
    cv.SetData(color_img, im_640)
    return color_img
def FindExtrinsicCameraParams(imagepoints, objectpoints, KK):
    """ Use OpenCV to solve for the affine transformation that matches imagepoints to object points
    imagepoints - 2xN array
    objectpoints - 3xN array
    KK - 3x3 array or 4 element array
    """
    imagepoints = array(imagepoints, float)
    objectpoints = array(objectpoints, float)
    if len(KK.shape) == 1:
        cvKK = cv.CreateMat(3, 3, cv.CV_32FC1)
        cvKK[0, 0] = KK[0]
        cvKK[0, 1] = 0
        cvKK[0, 2] = KK[2]
        cvKK[1, 0] = 0
        cvKK[1, 1] = KK[1]
        cvKK[1, 2] = KK[3]
        cvKK[2, 0] = 0
        cvKK[2, 1] = 0
        cvKK[2, 2] = 1
    else:
        cvKK = cv.fromarray(KK)
    cvDist = cv.CreateMat(4, 1, cv.CV_32FC1)
    cvDist[0, 0] = 0
    cvDist[1, 0] = 0
    cvDist[2, 0] = 0
    cvDist[3, 0] = 0
    rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
    tvec = cv.CreateMat(3, 1, cv.CV_32FC1)
    object_points = cv.CreateMatHeader(3, objectpoints.shape[0], cv.CV_32FC1)
    cv.SetData(
        object_points,
        struct.pack('f' * (objectpoints.shape[0] * 3),
                    *transpose(objectpoints).flat), 4 * objectpoints.shape[0])
    image_points = cv.CreateMatHeader(2, imagepoints.shape[0], cv.CV_32FC1)
    cv.SetData(
        image_points,
        struct.pack('f' * (imagepoints.shape[0] * 2),
                    *transpose(imagepoints).flat), 4 * imagepoints.shape[0])
    cv.FindExtrinsicCameraParams2(object_points, image_points, cvKK, cvDist,
                                  rvec, tvec)
    T = matrixFromAxisAngle((rvec[0, 0], rvec[1, 0], rvec[2, 0]))
    T[0:3, 3] = [tvec[0, 0], tvec[1, 0], tvec[2, 0]]
    return T
Example #17
0
def gaussiannoise(im, mean=0.0, std=15.0):
    """
	Applies Gaussian noise to the image.  This models sensor noise found in cheap cameras in low light etc.
	
	**Parameters:**
		* im - (cvArr) - The source image.
		* mean (float) - The mean value of the Gaussian distribution.
		* std (float) - The standard deviation of the Gaussian distribution.  Larger standard deviation means more noise.
		
	**Returns:**
		The noisy image.
		
	.. note::
		This function takes a while to run on large images.
		
	.. todo::
		* Argument for blue amplification to model bad sensors?
		* Use numpy to speed things up?
	
	.. seealso::
		:func:`saltandpepper()`
	"""
    # The first version below takes around 0.4s less time to run on my computer than the version beneath it on a colour image that is about 600x800.
    # But I still don't like it...
    # Want to change this to make it quicker still and nicer to read.
    # Numpy would make this really quick but don't want it be a dependancy.
    # Also it's tricky to add the blue amplification using this method.
    dst = create(im)
    if im.channels == 3:
        data = array.array('d', [
            random.gauss(mean, std) for i in xrange(im.width * im.height * 3)
        ])
        noise = cv.CreateMatHeader(im.height, im.width, cv.CV_64FC3)
        cv.SetData(noise, data, cv.CV_AUTOSTEP)
    else:
        data = array.array(
            'd',
            [random.gauss(mean, std) for i in xrange(im.width * im.height)])
        noise = cv.CreateMatHeader(im.height, im.width, cv.CV_64FC1)
        cv.SetData(noise, data, cv.CV_AUTOSTEP)
    cv.Add(im, noise, dst)
    return dst
Example #18
0
def main():
    hash=hashlib.sha1()
    buff=StringIO.StringIO()
    buff.write(sys.stdin.read()) #STDIN to buffer
    hash.update(buff.getvalue())
    buff.seek(0)
    pil_im=Image.open(buff)
    cv_im = cv.CreateImageHeader(pil_im.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pil_im.tostring())
    cascade = cv.Load("../src/main/resources/haarcascade_frontalface_default.xml")
    print hash.hexdigest()+":"+str(cv.HaarDetectObjects(cv_im, cascade, cv.CreateMemStorage(0), 1.2, 2, 0, (50, 50)))
Example #19
0
    def init(self):
        if not config.normalize.points or len(config.normalize.points) < 4:
            self._label = tk.Label(self, text=u'まだ正規化が済んでいません。\n正規化を行ってください。')
            self._label.pack()
            return

        if not config.template.images:
            config.template.images = [None for i in xrange(10)]

        # カメラの準備
        self._camera = cv.CaptureFromCAM(config.camera.id)

        # カメラ画像表示用Canvasなどの準備
        self._cvmat = None
        self._image = tk.PhotoImage(width=config.canvas.width,
                                    height=config.canvas.height)
        self._canvas = tk.Canvas(self,
                                 width=config.canvas.width,
                                 height=config.canvas.height)
        self._canvas.create_image(config.canvas.width / 2,
                                  config.canvas.height / 2,
                                  image=self._image,
                                  tags='image')
        self._canvas.pack(expand=1, fill=tk.BOTH)
        self._canvas.tag_bind('image', '<ButtonPress-1>', self.mouseDown)
        self._canvas.tag_bind('image', '<B1-Motion>', self.mouseDrag)
        self._canvas.tag_bind('image', '<ButtonRelease-1>', self.mouseUp)

        # ボタン
        self._buttons = []
        for i in xrange(10):
            command = (lambda id: lambda: self.fixation(id))(i)
            button = tk.Button(self, text=u'%d' % i, command=command)
            button.pack(side=tk.LEFT)
            self._buttons.append(button)
            # ボタン画像をセーブデータから復元する
            cvimageinfo = config.template.images[i]
            if cvimageinfo:
                cvmat = cv.CreateMatHeader(cvimageinfo.rows, cvimageinfo.cols,
                                           cvimageinfo.type)
                cv.SetData(cvmat, cvimageinfo.data)
                self.setButtonImage(i, cvmat)
        self.allButtonEnable(False)

        # マウス座標の情報
        self._mouse_down = None
        self._mouse_up = None

        # 画像をフィルタするための変数
        self._clip_rect, self._perspective_points = Points2Rect(
            config.normalize.points)

        # カメラ画像の更新を1秒間隔にする
        self.addTiming(self.showImage, 1)
Example #20
0
 def convert_image(self, ros_image):
     try:
         """ Convert to old cv image """
         cv2_image = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")
         cv_image = cv.CreateImageHeader(
             (cv2_image.shape[1], cv2_image.shape[0]), 8, 3)
         cv.SetData(cv_image, cv2_image.tostring(),
                    cv2_image.dtype.itemsize * 3 * cv2_image.shape[1])
         return cv_image
     except CvBridgeError, e:
         print e
Example #21
0
def mat2cv(m):
    fig = pylab.figure(1)
    ax = pylab.matshow(m,fignum=1)
    buf = StringIO.StringIO()
    fig.savefig(buf,format='png')
    buf.seek(0)
    pi = Image.open(buf)
    cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pi.tostring(),pi.size[0]*3)
    buf.close()
    return cv_im
Example #22
0
 def img(self):
     '''return a cv image for the thumbnail'''
     if self._img is not None:
         return self._img
     self._img = cv.CreateImage((self.width, self.height), 8, 3)
     cv.SetData(self._img, self.imgstr)
     cv.CvtColor(self._img, self._img, cv.CV_BGR2RGB)
     if self.border_width and self.border_colour is not None:
         cv.Rectangle(self._img, (0, 0), (self.width - 1, self.height - 1),
                      self.border_colour, self.border_width)
     return self._img
Example #23
0
 def loadTemplates(self):
     u'''テンプレート画像の読み込み'''
     self._templates = []
     for i, cvimageinfo in enumerate(config.template.images):
         cvmat = cv.CreateMatHeader(cvimageinfo.rows, cvimageinfo.cols, cvimageinfo.type)
         cv.SetData(cvmat, cvimageinfo.data)
         self._templates.append(A(
             image = cv.GetImage(cvmat),
             number = i,
             result = None,
         ))
Example #24
0
    def create_image(self, buffer):
        # FIXME: opencv doesn't support gifs, even worse, the library
        # segfaults when trying to decoding a gif. An exception is a
        # less drastic measure.
        if FORMATS[self.extension] == 'GIF':
            raise ValueError("opencv doesn't support gifs")
        imagefiledata = cv.CreateMatHeader(1, len(buffer), cv.CV_8UC1)
        cv.SetData(imagefiledata, buffer, len(buffer))
        img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)

        return img0
Example #25
0
def cv2ndarray_to_iplimage(source):
    """source is numpy array returned by cv2"""
    if len(source.shape) != 3 or source.shape[2] != 3:
        raise ValueError(
            "cv2ndarray_to_iplimage currently only supports 3 dimensional ndarrays with depth 3"
        )
    h, w, d = source.shape
    bitmap = cv.CreateImageHeader((w, h), cv.IPL_DEPTH_8U, 3)
    cv.SetData(bitmap, source.tostring(),
               source.dtype.itemsize * 3 * source.shape[1])
    return bitmap
Example #26
0
    def get_image(self):
        """
        Get image through proxy, copy it, and return the copy (to avoid memory
        problems)
        """
        result = self.camProxy.getImageRemote(self.nameId)
        #result is array with [width, height, numberOfLayers, colorSpace, timestamp(highest 32 bits), timestamp(lowest 32 bits), actual image]
        cv.SetData(self.iplImageHeader, result[6], result[0] * result[2]) #in the case data image depth = 8u only!
        cv.Resize(self.iplImageHeader, self.output)

        return self.output
Example #27
0
def convert_np_to_cvmat(img_np):
    """
    This gives a: AttributeError: 'numpy.ndarray' object has no attribute
    'from_array'
    ImageAlignment.template_image = ImageAlignment.template_image.from_array()
    """
    # Inspired from https://stackoverflow.com/questions/5575108/how-to-convert-a-numpy-array-view-to-opencv-matrix :
    h_np, w_np = img_np.shape[:2]
    tmp_cv = cv.CreateMat(h_np, w_np, cv.CV_8UC3)
    cv.SetData(tmp_cv, img_np.data, img_np.strides[0])
    return tmp_cv
Example #28
0
def image_from_archive(archive, name):
    """
    Load image PGM file from tar archive. 

    Used for tarfile loading and unit test.
    """
    member = archive.getmember(name)
    filedata = archive.extractfile(member).read()
    imagefiledata = cv.CreateMat(1, len(filedata), cv.CV_8UC1)
    cv.SetData(imagefiledata, filedata, len(filedata))
    return cv.DecodeImageM(imagefiledata)
Example #29
0
def mat2cv(m,dpi=80,scale=1):
    pylab.close(1)
    fig = pylab.figure(1,figsize=tuple(reversed([(float(i)/dpi)*scale for i in m.shape])),dpi=dpi)
    ax = pylab.matshow(m,fignum=1)
    buf = io.BytesIO()
    fig.savefig(buf,format='png')
    buf.seek(0)
    pi = Image.open(buf).convert('RGB')
    cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(cv_im, pi.tostring(),pi.size[0]*3)
    buf.close()
    return cv_im
Example #30
0
def show_depth():
    global threshold
    global current_depth

    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth >= current_depth - threshold,
                                 depth <= current_depth + threshold)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U, 1)
    cv.SetData(image, depth.tostring(), depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage('Depth', image)