コード例 #1
0
ファイル: ImageMixer.py プロジェクト: perchrn/TaktPlayer
    def _mixImageSubtract(self, wipeSettings, level, image1, image2, mixMat):
        if(level < 0.99):
            wipeMode, wipePostMix, wipeConfig = wipeSettings
#            print "DEBUG pcn: mixImageSubtract: wipeSettings: " + str(wipeSettings)
            if((wipeMode == WipeMode.Fade) or (wipeMode == WipeMode.Default)):
                cv.ConvertScaleAbs(image2, image2, level, 0.0)
                cv.Sub(image1, image2, mixMat)
                return mixMat
            else:
                if(wipePostMix == False):
                    image2, _ = self._wipeImage(wipeMode, wipeConfig, level, image2, None, mixMat, False)
                    cv.Sub(image1, image2, mixMat)
                    return mixMat
                else:
                    cv.Sub(image1, image2, mixMat)
                    return self._wipeMix(wipeMode, wipeConfig, level, image1, mixMat, image2)
        cv.Sub(image1, image2, mixMat)
        return mixMat
コード例 #2
0
ファイル: ImageMixer.py プロジェクト: perchrn/TaktPlayer
    def _mixImageSelfMaskHue(self, wipeSettings, level, image1, image2, mixMat):
        cv.CvtColor(image1, mixMat, cv.CV_RGB2HSV)
        cv.Split(mixMat, self._mixMixMask1, None, None, None)
        cv.CvtColor(image2, mixMat, cv.CV_RGB2HSV)
        cv.Split(mixMat, self._mixMixMask2, None, None, None)
        cv.Sub(self._mixMixMask2, self._mixMixMask1, self._mixImageMask)

        cv.CmpS(self._mixImageMask, 255-int((level*254)), self._mixImageMask, cv.CV_CMP_GT)
        return self._mixImageAlphaMask(wipeSettings, level, image1, image2, self._mixImageMask, mixMat)
コード例 #3
0
def precornerdetect(image):
    # assume that the image is floating-point 
    corners = cv.CloneMat(image)
    cv.PreCornerDetect(image, corners, 3)

    dilated_corners = cv.CloneMat(image)
    cv.Dilate(corners, dilated_corners, None, 1)

    corner_mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
    cv.Sub(corners, dilated_corners, corners)
    cv.CmpS(corners, 0, corner_mask, cv.CV_CMP_GE)
    return (corners, corner_mask)
コード例 #4
0
    def filter(self, input_img):
        original_input_roi = cv.GetImageROI(input_img)
        filter_roi = shared_roi(input_img, self.base_img)
        
        if not filter_roi:
            raise Exception("input image and training image do not share a region of interest")
        
        cv.SetImageROI(input_img, filter_roi)

        # and that is it! Just subtract the difference we calibrated on from
        # the normal image, and tada!
        
        input_roi = cv.GetImageROI(input_img)
        cv.SetImageROI(self.base_img, input_roi)

        cv.Sub(input_img, self.base_img, input_img)
        cv.SetImageROI(input_img, original_input_roi)
コード例 #5
0
ファイル: moustache.py プロジェクト: nicolastrote/moustache
def detect_and_draw(img, cascade, mask):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:

                # Affichage du carré de recherche
                xmoustache = int((x * image_scale) + w * 0.5)
                ymoustache = int((y * image_scale) + h * 1.25)
                wmoustache = int(w * 0.5 * image_scale)
                hmoustache = int(h * 0.19 * image_scale)
                img_mask = cv.CreateImage((wmoustache, hmoustache), mask.depth,
                                          mask.nChannels)
                cv.SetImageROI(
                    img, (xmoustache, ymoustache, wmoustache, hmoustache))
                cv.Resize(mask, img_mask, cv.CV_INTER_LINEAR)

                # Affichage du carré de recherche
                cv.Sub(img, img_mask, img)
                cv.ResetImageROI(img)
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                #cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
コード例 #6
0
def sub_image(image1, image2):
    new_image = cv.CreateImage((image1.width, image1.height), image1.depth, 1)
    cv.Sub(image1, image2, new_image, None)
    return new_image
コード例 #7
0
ファイル: dmtx.py プロジェクト: 2RoN4eG/Regula-Test-Task
    def find(self, img):
        started = time.time()
        gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
        sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)

        cv.Sobel(gray, sobel, 1, 0)
        cv.Sobel(gray, sobely, 0, 1)
        cv.Add(sobel, sobely, sobel)

        sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
        absnorm8(sobel, sobel8)
        cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)

        sobel_integral = self.Cached('sobel_integral', img.height + 1,
                                     img.width + 1, cv.CV_32SC1)
        cv.Integral(sobel8, sobel_integral)

        d = 16
        _x1y1 = cv.GetSubRect(
            sobel_integral,
            (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x1y2 = cv.GetSubRect(
            sobel_integral,
            (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y1 = cv.GetSubRect(
            sobel_integral,
            (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
        _x2y2 = cv.GetSubRect(
            sobel_integral,
            (d, d, sobel_integral.cols - d, sobel_integral.rows - d))

        summation = cv.CloneMat(_x2y2)
        cv.Sub(summation, _x1y2, summation)
        cv.Sub(summation, _x2y1, summation)
        cv.Add(summation, _x1y1, summation)
        sum8 = self.Cached('sum8', summation.height, summation.width,
                           cv.CV_8UC1)
        absnorm8(summation, sum8)
        cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)

        cv.ShowImage("sum8", sum8)
        seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
        subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
        t_cull = time.time() - started

        seqs = []
        while seq:
            seqs.append(seq)
            seq = seq.h_next()

        started = time.time()
        found = {}
        print 'seqs', len(seqs)
        for seq in seqs:
            area = cv.ContourArea(seq)
            if area > 1000:
                rect = cv.BoundingRect(seq)
                edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
                candidate = cv.GetSubRect(subimg, rect)
                sym = self.dm.decode(
                    candidate.width,
                    candidate.height,
                    buffer(candidate.tostring()),
                    max_count=1,
                    #min_edge = 6,
                    #max_edge = int(edge)      # Units of 2 pixels
                )
                if sym:
                    onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y)
                                for (x, y) in self.dm.stats(1)[1]]
                    found[sym] = onscreen
                else:
                    print "FAILED"
        t_brute = time.time() - started
        print "cull took", t_cull, "brute", t_brute
        return found
コード例 #8
0
ファイル: houghlines.py プロジェクト: xuhom/HBBR_Scanner
        print("no laser lines not found in settings!")

    print(str(settings))

    # do color thresholding
    src_hsv = cv.CreateImage(cv.GetSize(src2), 8, 3)
    src_diff = cv.CreateImage(cv.GetSize(src2), 8, 3)
    src = cv.CreateImage(cv.GetSize(src2), 8, 1)
    #    red     = cv.CreateImage(cv.GetSize(src2), 8, 1)
    #    redT    = cv.CreateImage(cv.GetSize(src2), 8, 1)
    #    green   = cv.CreateImage(cv.GetSize(src2), 8, 1)
    #    blue    = cv.CreateImage(cv.GetSize(src2), 8, 1)
    #    cv.Split(src2, red, green, blue, None);
    #    cv.Threshold(red, redT, 20, 255, cv.CV_THRESH_BINARY);

    cv.Sub(src2, src4, src_diff)
    cv.CvtColor(src_diff, src_hsv, cv.CV_RGB2HSV)
    #CV_BGR2HSV_FULL
    cv.InRangeS(src_hsv, RED_MIN, RED_MAX, src)

    #    cv.NamedWindow("Red", 1)
    #    cv.NamedWindow("Green", 1)
    #    cv.NamedWindow("Blue", 1)
    #    cv.NamedWindow("RedT", 1)

    #    cv.ShowImage("RedT", redT)
    #    cv.ShowImage("Red", red)
    #    cv.ShowImage("Green", green)
    #    cv.ShowImage("Blue", blue)

    #    k = cv.WaitKey(0) % 0x100
コード例 #9
0
ファイル: manage.py プロジェクト: viesal/CV
#
#     cv.Zero(g)
#     cv.Zero(b)
#     cv.Zero(diff)
#     cv.Zero(smoth)
#     cv.Zero(hsv)
#     cv.Zero(diff2)
#     print time.clock()

# img1 = cv.LoadImage(CreateImg(cap1, 'capture'))
# cv.ShowImage('img1', img1)
# cv.WaitKey()
# img2 = cv.LoadImage(CreateImg(cap1, 'capture'))
# cv.ShowImage('img2', img2)

cv.Sub(img1, img2, diff)
path = u"img/sub{}.jpg".format(datetime.strftime(
    datetime.now(), "%S%f "))  # Уникальное имя для каждого кадра
cv.SaveImage(path, diff)
cv.CvtColor(diff, hsv, cv2.COLOR_BGR2HSV)

lower_wite = cv.Scalar(50, 50, 50)
uper_wite = cv.Scalar(255, 255, 255)

frame_threshed = cv.CreateImage(cv.GetSize(hsv), 8, 1)
cv.InRangeS(hsv, lower_wite, uper_wite, frame_threshed)
path = u"img/mask{}.jpg".format(datetime.strftime(
    datetime.now(), "%S%f "))  # Уникальное имя для каждого кадра
cv.SaveImage(path, frame_threshed)
cv.Copy(img2, diff2, frame_threshed)
コード例 #10
0
	def detect(self):	

		self.log("Start detection thread")
		### open cam ###
		cap = vc.VideoCapture(self._videoInput, self).capture
		if cap == None:
			self.log("Can't open camera!")
			self.active = False
			return
		
		cap.setFPS(self._fps)
		cap.setVideoSize(self._videoSize)
		cap.setBin(self._bin)
		cap.setGain(self._gain)
		
		
		### init ###
		frame, ts = cap.getFrame()
		
		if not frame:
			self.log("Error reading first frame")
			self.active = False
			return
		frameSize = cv.GetSize(frame)
		
		darkframe = None
		if self._darkframe:
			try:
				darkframe = cv.LoadImage(self._darkframe)
				if frameSize != cv.GetSize(darkframe):
					darkframe = None
					self.log("Darkframe has wrong size")
			except:
				self.log("Darkframe not found")
				
		mask = None
		'''
		if self._mask:
			try:
				tmp = cv.LoadImage(self._mask)
				mask = cv.CreateImage( frameSize, cv.IPL_DEPTH_8U, 1)
				cv.CvtColor( tmp, mask, cv.CV_RGB2GRAY )
				if frameSize != cv.GetSize(mask):
					raise Exception();
			except:
				self.log("Mask not found or wrong size")
		'''
		small, smallSize = self.workingThumb(frame, frameSize)
		runAvg = cv.CreateImage( smallSize, cv.IPL_DEPTH_32F, small.channels)
		runAvgDisplay = cv.CreateImage(smallSize, small.depth, small.channels)
		differenceImg = cv.CreateImage(smallSize, small.depth, small.channels)
		grayImg = cv.CreateImage(smallSize, cv.IPL_DEPTH_8U, 1)
		historyBuffer = imagestack.Imagestack(self._prevFrames)
		capbuf = None
		videoGap = self._maxVideoGap
		postFrames = 0
		frameCount = 1
		detect = False
		newVideo = True
		
		### testwindow
		if self._showWindow:
			self.log("Show window")
			cv.NamedWindow("Thread " + str(self._thread), 1)
			
		### server
		if self._runServer:
			self.log("Start Server on port %d" % self._serverPort)
			self._server = httpserver.httpserver(self._serverPort)

		### capture loop ###
		while self._run:
			frame, ts = cap.getFrame()
			if ts == None:
				ts = time.time()
				
			### create small image for detection
			small, smallSize = self.workingThumb(frame, frameSize)
			
			videoGap += 1

			if small:
				frameCount += 1

				if 1/float(frameCount) < self._avgLevel and not detect:
					self.log("start detection")
					detect = True
				
				### substract darkframe
				if darkframe:
					cv.Sub( frame, darkframe, frame )
	
				if self._deNoiseLevel > 0:
					cv.Smooth( small, small, cv.CV_MEDIAN, self._deNoiseLevel)
				cv.RunningAvg( small, runAvg, self._avgLevel, mask )
				cv.ConvertScale( runAvg, runAvgDisplay, 1.0, 0.0 )
				cv.AbsDiff( small, runAvgDisplay, differenceImg )
				
				if differenceImg.depth == grayImg.depth:
					grayImg = differenceImg
				else:
					cv.CvtColor( differenceImg, grayImg, cv.CV_RGB2GRAY )
				cv.Threshold( grayImg, grayImg, self._detectThresh, 255, cv.CV_THRESH_BINARY )
				contour = cv.FindContours( grayImg, cv.CreateMemStorage(0), cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)	
				

				## draw bounding rect
				while contour:
					bounding_rect = cv.BoundingRect( list(contour) )
					area = bounding_rect[2]*bounding_rect[3]
					if area > self._minRectArea and area < self._maxRectArea and detect:
						videoGap = 0
						#print(str(area))
						self.log("motion detected...")
						if self._drawRect:
							self.drawBoundingRect(frame, bounding_rect, smallSize = smallSize)

					contour = contour.h_next()
		
				## add text notations
				ms = "%04d" % int( (ts-int(ts)) * 10000 )
				t = time.strftime("%Y-%m-%d %H:%M:%S." + ms + " UTC", time.gmtime(ts))
				frame = self.addText(frame, self._texttl, t)

				
				## save / show frame
				if videoGap < self._maxVideoGap:
					if newVideo:
						self.log("Found motion, start capturing")
						capbuf = []
						newVideo = False						
						directory = os.path.join(self._videoDir,
											"%d/%02d/%s" % (time.gmtime(ts).tm_year, time.gmtime(ts).tm_mon, t))

						if not self.isWriteable(directory):
							self._log(directory + "is not writeable!")
							self._run = False
							continue

						capbuf.extend(historyBuffer.getImages())

					capbuf.append({'img' : frame, 'time' : t})
				else:
					if postFrames < self._postFrames and not newVideo:
						capbuf.append({'img' : frame, 'time' : t})
						postFrames += 1
					elif not newVideo:
						self.log("Stop capturing")
						### write images to hdd in new thread ###
						thread.start_new(self.saveVideo, (directory, capbuf))
						capbuf = None
						postFrames = 0
						newVideo = True


				######## Add Frame to history buffer ########
				historyBuffer.add(cv.CloneImage( frame ), t)


				######## Window ########
				if self._showWindow:
					cv.ShowImage("Thread " + str(self._thread), frame)
					cv.ShowImage("Thread %d avg" % self._thread, runAvgDisplay)
					cv.ShowImage("Thread %d diff" % self._thread, differenceImg)
					cv.WaitKey(1)
				
				######## Update Server ########
				if self._server:
					self._server.updateImage(cv.CloneImage( frame ))
				
				self.log("Proc: " + str(time.time() - ts))

			else:
				self.log("no more frames (" + str(frameCount) +" frames)")
				break

		self.log("Close camera")
		cap.close()
		
		if self._server:
			self.log("Close server")
			self._server.shutdown()
		
		self.log("end detection thread " + str(self._thread))
		self.active = False