Ejemplo n.º 1
0
def main():
	print "\t\t########################################"
	print "\t\tOPTIMISED = ",cv2.useOptimized()," !!!!"
	print "\t\t########################################"

	while True:
		ret,img=cap.read()
		#img = cv2.medianBlur(img,3)    # 5 is a fairly small kernel size
		img = cv2.resize(img,None,fx=1.3,fy=1,interpolation = cv2.INTER_LINEAR)
		
		hand_box = [(0,50),(400,400)]
		head_box = [(500,50),(800,400)]
		cv2.rectangle(img,hand_box[0],hand_box[1],(255,255,255),2)
		cv2.rectangle(img,head_box[0],head_box[1],(50,50,50),2)
		
		head_frame = img[50:400,500:800]
		try:
			img[50:400,500:800] = lipSegment(head_frame)	
		except ValueError, e:
			#print e
			pass

		hand_frame = img[50:400,0:400]
		
		try:
			mask,counter,hull,(cx,cy),list_far,list_end = count_fingers(hand_frame)
			
			if(cv2.contourArea(hull)>3000) and list_far:
				cv2.drawContours(hand_frame,[hull],0,(0,255,0),1)
				[cv2.circle(hand_frame,far,5,[0,0,0],-1) for far in list_far]
				[cv2.circle(hand_frame,end,5,[150,150,150],-1) for end in list_end]
				cv2.putText(hand_frame,"Fingers = "+str(counter+1),(10,250),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2,1)

		except ZeroDivisionError, e:
			print "Count_fingers ZeroDivisionError: ",e
Ejemplo n.º 2
0
    def image_callback(self, msg):
        image = self.bridge.compressed_imgmsg_to_cv2(msg, desired_encoding='bgr8')
        imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        cv2.useOptimized()
        cv2.setUseOptimized(True)

        kp1, des1 = self.sift.detectAndCompute(self.blocking_img, None)
        kp2, des2 = self.sift.detectAndCompute(imageGray, None)
        
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        try:
            flann = cv2.FlannBasedMatcher(index_params, search_params)
            matches = flann.knnMatch(des1, des2, k=2)

        except Exception as ex:
            print('knnMatch error')
            
            return 
            
        good = []

        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        outer_dst_pts = np.float32([])

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

            outer_dst_pts = dst_pts

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            matchesMask = mask.ravel().tolist()

            h, w, d= self.blocking_img.shape

            pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)

#            dst = None

            try:
                dst = cv2.perspectiveTransform(pts, M)

            except Exception as ex:
                print('perspectiveTransform error: dst = %s' % dst)
                
                return
             
            image = cv2.polylines(image, [np.int32(dst)], True, (255, 0, 0), 3, cv2.LINE_AA)
            
            self.match = True
            
            rospy.logdebug('주차 표지판 탐지 : %s' % self.match)
            
        else:
            self.match = False
            
            rospy.logdebug("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT))

            matchesMask = None

        #draw_params = dict(matchColor=(0, 255, 0),
        #                   singlePointColor=None,
        #                   matchesMask=matchesMask,
        #                   flags=2)

       # matches_img = cv2.drawMatches(self.blocking_img, kp1, image, kp2, good, None, **draw_params)
        
        if show_matched_points:
            for pt in outer_dst_pts:
                x,y = pt[0]

                cv2.circle(image, (x, y),3, (0,0,255), -1)
        
        self.match_pub.publish(self.match)
        
        #cv2.imshow('match', matches_img)
        cv2.imshow("image", image)

        cv2.waitKey(3)
import cv2 as cv
import numpy as np
import time

print(cv.useOptimized())  # True,默认开启
ts1 = time.time()
img1 = cv.imread(r'/home/qiao/PythonProjects/Opencv_On_CT/Test_Img/9.jpg')
res1 = cv.medianBlur(img1, 49)
te1 = time.time()
print(te1 - ts1)

cv.setUseOptimized(False)
print(cv.useOptimized())
ts2 = time.time()
img2 = cv.imread(r'/home/qiao/PythonProjects/Opencv_On_CT/Test_Img/9.jpg')
res2 = cv.medianBlur(img2, 49)
te2 = time.time()
print(te2 - ts2)

# True
# 0.014556407928466797
# False
# 0.014634370803833008
# 我这儿性能没提升呢?
Ejemplo n.º 4
0
# -*- coding: utf-8 -*-
# __author__ = 'corvin'

import cv2
import numpy as np
'''
OpenCV 中的默认优化
在编译时 优化是 默认开启的。因此 OpenCV   的就是优化后的代码 
如果你把优化 关闭的 就只能执行低效的代码了。
你可以使用函数 cv2.useOptimized() 来查看优化是否 开启了 
使用函数 cv2.setUseOptimized() 来开启优化
'''
# check if optimization is enabled
"""
In [5]: cv2.useOptimized()
Out[5]: True
In [6]: %timeit res = cv2.medianBlur(img,49)
10 loops, best of 3: 34.9 ms per loop
# Disable it
In [7]: cv2.setUseOptimized(False)
In [8]: cv2.useOptimized()
Out[8]: False
In [9]: %timeit res = cv2.medianBlur(img,49)
#优化后中值滤波的速度是原来的两倍
"""
print(cv2.useOptimized())
cv2.setUseOptimized(False)
print(cv2.useOptimized())
当数组的大小是 2 的指数时 DFT 效率最高。
当数组的大小是 2,3,5 的倍数时效率也会很高。所以如果你想提高代码的运行效率时,你可以修改输入图像的大小(补 0)。
对于OpenCV 你必须自己手动补 0。但是 Numpy,你只需要指定 FFT 运算的大小,它会自动补 0。

最佳大小:OpenCV 提供了一个函数:cv2.getOptimalDFTSize()。

 OpenCV 的速度是 Numpy 的 3 倍。
"""
import cv2
import numpy as np
import time

img = cv2.imread('./image/girl001.jpg', 0)
rows, cols = img.shape
print(rows, cols)

nrows = cv2.getOptimalDFTSize(rows)
ncols = cv2.getOptimalDFTSize(cols)
print(nrows, ncols)

nimg = np.zeros((nrows, ncols))
nimg[:rows, :cols] = img
if cv2.useOptimized():
    fft1 = np.fft.fft2(img)

    fft2 = np.fft.fft2(img, [nrows, ncols])

    dft1 = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    dft2 = cv2.dft(np.float32(nimg), flags=cv2.DFT_COMPLEX_OUTPUT)

Ejemplo n.º 6
0
def closing(img):
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=2)
    return img

img = cv2.imread('F:\\Documents\\IMGW\\HomeWork1\\R6.jpg',0) 
cv2.imshow('origin', img)
#Binarization
img[img > 128] = 255
img[img <= 128] = 0
cv2.imwrite('F:\\Documents\\IMGW\\HomeWork1\\GRAY.jpg',img)

cv2.setUseOptimized(True) 
print('----------opening x100----------','\n')
print ('Enable AVX:',cv2.useOptimized())
opening_with_avx = img
start = timeit.default_timer()
for i in range (0,100):
    opening_with_avx = opening(opening_with_avx)
stop = timeit.default_timer()
t1 = stop - start
print('Runtime with AVX: ', t1,"\n")
            


opening_without_avx = img
cv2.setUseOptimized(False) 

print ('Enable AVX:',cv2.useOptimized())
start = timeit.default_timer()
Ejemplo n.º 7
0
    def run(self):
        #Video goes here. New videos / camera setup need new parametrization:
        print(cv2.useOptimized())
        if self.inputType == 'FILE':
            cap = cv2.VideoCapture(self.input)
        else:
            cap = cv2.VideoCapture(0)
            cap.set(3,480)
            cap.set(4,360)
            cap.set(5,5)
            #camera = PiCamera()
            #camera.resolution = (480, 360)
            #camera.framerate = 5
            #rawCapture = PiRGBArray(camera, size=(480, 360))

            ## allow the camera to warmup
            #time.sleep(0.1)
        #Counter for all passed objects
        runningID = 0
        #Array of objects currently tracked
        trackedObjects = []
        #Objects not detected anymore in current frame
        toDelete = []
        #Labeling font
        font = cv2.FONT_HERSHEY_SIMPLEX
        #framecounter to skip n frames and improve performance
        framecount = 0
        #Counters for passed obejcts
        countIn = 2
        countOut = 0
        self.Running = True
        #do until user interruption
        while(self.Running == True):

            framecount=framecount + 1
            ret, frame = cap.read()
            #grab next frame and increase framecounter
            #change the modulo to skip frames. currently consider each frame
            if (framecount % self.frameRatio) == 0:
            #for capture in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
                #frame = capture.array
                #coose region of interest and apply foreground subtractor to it


                fgmask = self.fgbg.apply(frame)

                #Smooth the detected foreground

                blurred = cv2.medianBlur(fgmask,self.blurRadius)

                #Find countours around detected objects
                cv2.imshow('frame1', blurred)


                image, contours, hierarchy = cv2.findContours(blurred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

                #print('\n')
                #Loop through all detected objects in current frame
                a = int(round(time.time() * 1000))
                for cnt in contours:
                    #Get the moments (information about object density or so)
                    M = cv2.moments(cnt)
                    #Make sure we can divide by zero-moment and the object has a certain size.
                    #Size threshold to be found empirically
                    if (M['m00'] > 0 and cv2.contourArea(cnt) > self.areaThreshold):
                        #Calculate centroid
                        cx = int(M['m10']/M['m00'])
                        cy = int(M['m01']/M['m00'])
                        #Store area
                        area = cv2.contourArea(cnt)
                        print('Area: ')
                        print(area)
                        #Redundant area check...first could be removed I guess
                        #Object of interest. We assume it is not tracked yet
                        tracked = False
                        #Loop through all tracked objects and calculate the distance of the centroid to the current object
                        for i in range(len(trackedObjects)):
                            #distance calculation...pythagoras
                            distance = math.sqrt((cx-trackedObjects[i][1])*(cx-trackedObjects[i][1])+(cy-trackedObjects[i][2])*(cy-trackedObjects[i][2]))
                            #print for debug
                            print('Distance; ')
                            print(distance)
                            #If the distance to a certain tracked object is smaller than a certain threshold, we
                            #assume it is the same object and distance is due to movement. More sophisticated criteria could be included here
                            #such as shape info etc.
                            if(distance < self.speedLimit):
                                #check whether the objects has passed an imaginary counting line in either direction since the last frame
                                if(cy < self.barrier and trackedObjects[i][2] > self.barrier):
                                    countIn+=1
                                    self.occupancy += 1
                                elif(cy > self.barrier and trackedObjects[i][2] < self.barrier):
                                    countOut +=1
                                    self.occupancy -=1
                                #Update information of the tracked object, since it is the same
                                trackedObjects[i][1] = cx
                                trackedObjects[i][2] = cy
                                trackedObjects[i][3] = area
                                #This is used to sort out the objects later that disappeared in the current frame
                                trackedObjects[i][4] = True
                                if(self.display==True):
                                #Draw a contour around it
                                    frame = cv2.drawContours(frame, [cnt], 0, (255,0,0), 3)
                                    cv2.putText(frame,str(trackedObjects[i][0]),(cx,cy), font, 2,(255,0,0),2,cv2.LINE_AA)
                                #Mark the object as tracked.
                                tracked = True
                                #Break loop. One object can not be tracked multiple times :)
                                break
                        #If the object is identified as new object
                        if tracked == False:
                            #assign new ID
                            runningID = runningID + 1
                            #Store its ID, centroid, area and track status in the array of tracked objects
                            trackedObjects.append([runningID,cx,cy,area,True])
                            if(self.display==True):
                                #Draw the bounding box
                                frame = cv2.drawContours(frame, [cnt], 0, (255,0,0), 3)
                                #Label it
                                cv2.putText(frame,str(runningID),(int(cx),int(cy)), font, 2,(255,0,0),2,cv2.LINE_AA)
                #Loop through all tracked objects and verify whether they still exist in the current frame
                for i in range(len(trackedObjects)):
                    if(trackedObjects[i][4]) == False:
                        toDelete.append(trackedObjects[i])
                    else:
                        trackedObjects[i][4] = False
                #Delete objects which left the frame
                for i in range(len(toDelete)):
                    trackedObjects.remove(toDelete[i])
                #Empty storage for objects to delete
                toDelete = []
                if(self.display==True):
                    #Draw counting line
                    cv2.line(frame,(0,self.barrier),(480,self.barrier),(0,255,0),3)
                    #Display passed objects in either direction
                    cv2.putText(frame,"In"+str(countIn),(20,180), font, 1,(0,255,0),1,cv2.LINE_AA)
                    cv2.putText(frame,"Out"+str(countOut),(160,180), font, 1,(0,255,0),1,cv2.LINE_AA)
                    #Finally, display the current frame with the tracked objects and counters
                    cv2.imshow('frame', frame)
                    cv2.waitKey(250)
                #rawCapture.truncate(0)
                b = int(round(time.time() * 1000))

                print("Frametime: ")
                c=b-a
                print(c)
Ejemplo n.º 8
0
import cv2
from time import sleep

print("optimized cv="+str(cv2.useOptimized()))

def printTimeSinceLastMeasurement(name):
	global start_time
	print(name+" took "+str((cv2.getTickCount() - start_time)/cv2.getTickFrequency()))
	start_time=cv2.getTickCount()

### start init code
cap = cv2.VideoCapture(0)
if not cap.isOpened():
	print("No capture device")
	sys.exit()
# resolution xy
print(str(cap.get(3))+"x"+str(cap.get(4)))
ret = cap.set(3,320)
ret = cap.set(4,240)
print(" now set to "+str(cap.get(3))+"x"+str(cap.get(4)))
# framerate (will stay at 30)
ret = cap.set(5,60)
print(str(cap.get(5))+" fps")
#read one frame to init
_, img = cap.read()
### end Init code
start_time = cv2.getTickCount()
### ##


count=0
Ejemplo n.º 9
0
# extract main background and logo foreground
im_fg = cv2.bitwise_and(im_roi, im_roi, mask=im_mask)
im_bg = cv2.bitwise_and(im_2, im_2, mask=im_imask)

# add the logo to the main image
im_edit = cv2.add(im_bg, im_fg)
im_1[0:rows, 0:cols] = im_edit
cv2.imshow('overlayed', im_1)

#------------------------------------------------------------
# How To Instrument Code
#------------------------------------------------------------
# One can also use the %timeit command in ipython
#------------------------------------------------------------
if not cv2.useOptimized(): # should be on by default
    cv2.setUseOptimized(True)

t1 = cv2.getTickCount()

# ... your code execution ...

t2 = cv2.getTickCount()
time = (t2 - t1) / cv2.getTickFrequency()

#------------------------------------------------------------
# Color Conversion
#------------------------------------------------------------
im_gray = cv2.cvtColor(im_1, cv2.COLOR_BGR2GRAY)
im_hsv  = cv2.cvtColor(im_1, cv2.COLOR_BGR2HSV)
Ejemplo n.º 10
0
def main():
    print(cv2.useOptimized())

    data_left = np.load('Left_calibrated.npy').item()
    data_right = np.load('Right_calibrated.npy').item()

    data = np.load('intrs.npy').item()
    objp_left, objp_right, imgp_left, imgp_right = data['OBJL'], data[
        'OBJR'], data['IMPL'], data['IMGR']
    KL, DL, DIML = data_left['K'], data_left['D'], data_left['DIM']
    KR, DR, DIMR = data_right['K'], data_right['D'], data_right['DIM']
    KL = np.array(KL)
    DL = np.array(DL)
    KR = np.array(KR)
    DR = np.array(DR)

    flags = 0
    # flags |= cv2.CALIB_ZERO_TANGENT_DIST
    flags |= cv2.fisheye.CALIB_FIX_SKEW
    # flags |= cv2.CALIB_ZERO_TANGENT_DIST
    flags |= cv2.fisheye.CALIB_CHECK_COND
    flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC
    flags |= cv2.fisheye.CALIB_FIX_K4

    termination_criteria_extrinsics = (cv2.TERM_CRITERIA_EPS +
                                       cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    (rms_stereo, _, _, _, _, R, T, E, F) = \
        cv2.stereoCalibrate(objp_right, imgp_left, imgp_right, KL, DL, KR, DR, DIML,
                            criteria=termination_criteria_extrinsics, flags = flags)
    print(rms_stereo)

    camL = cv2.VideoCapture(0)
    camR = cv2.VideoCapture(1)

    RGB_false(camL)
    RGB_false(camR)
    make_480p(camL)
    make_480p(camR)
    while camL.grab() and camR.grab():

        e1 = cv2.getTickCount()

        _, frameL = camL.retrieve()
        _, frameR = camR.retrieve()

        #print(frameR.shape)

        left_calibrated = undistorted(f8(frameL), data_left)
        right_calibrated = undistorted(f8(frameR), data_right)

        #fps = camL.get(cv2.CAP_PROP_FPS)
        #print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))

        stackedFrames = np.concatenate((left_calibrated, right_calibrated),
                                       axis=1)
        cv2.imshow("calibrated", stackedFrames)

        #cv2.imshow("calibrated", left_calibrated)

        key = cv2.waitKey(40) & 0xFF

        if key == ord('q'):
            break
        elif key == ord('s'):
            tmp = camL
            camL = camR
            camR = tmp

        e2 = cv2.getTickCount()
        t = (e2 - e1) / cv2.getTickFrequency()
        print(t)

    camL.release()
    cv2.destroyAllWindows()
Ejemplo n.º 11
0
def appStarted(app):
    initConstants(app)
    app.running = False
    app.count = 0
    cv2.setUseOptimized(True)
    print(cv2.useOptimized())
import numpy as np
import cv2

cv2.setUseOptimized(True)
print('Optimized status:', cv2.useOptimized())

img = cv2.imread('omj.jpg')
e1 = cv2.getTickCount()

rng = range(5, 49, 2)
for i in rng:
    img = cv2.medianBlur(img, i)
e2 = cv2.getTickCount()
t = (e2 - e1) / cv2.getTickFrequency()
print('Elapsed time:', t, 's')
'''
cv2.useOptimized()
cv2.setUseOptimized(False)
'''
'''
cv2.imshow("figure", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
Ejemplo n.º 13
0
# author: roczhang
# file: demo_3.py
# time: 2021/05/14
from timeit import timeit

import cv2 as cv
import numpy as np

img1 = cv.imread('/data/file/img/data/messi5.jpg')
e1 = cv.getTickCount()
for i in range(5, 49, 2):
    img1 = cv.medianBlur(img1, i)
e2 = cv.getTickCount()
t = (e1 - e2) / cv.getTickFrequency()
print(t)

cv.useOptimized()
Ejemplo n.º 14
0

if __name__ == "__main__":
    args = get_args()
    coco_writer = None
    in_annots = None
    e_video_exts = ['webm']
    video_exts = ['mp4', 'avi']
    image_exts = ['jpg', 'png']
    e_kwargs = [dict() for _ in range(len(args.e_paths))]
    if args.kwargs:
        keys = args.kwargs[0]
        for i, values in enumerate(args.kwargs[1:len(e_kwargs) + 1]):
            e_kwargs[i] = {k: v for k, v in zip(keys, values)}

    print('OpenCV is optimized:', cv2.useOptimized())
    if args.skip_annotations:
        print('Not writing annotaions!')

    if args.in_annotations is not None:
        in_annots = COCO(args.in_annotations)
        print('Building path to img id mapping...')
        path2img_id = dict()
        for img in in_annots.imgs.values():
            path2img_id[img['file_name']] = img['id']
        in_annots.__dict__['root_path'] = args.in_path
        in_annots.__dict__['path2img_id'] = path2img_id

    # Get path for effects
    e_readers, e_cfgs = [], []
    for i, (path, kwargs) in enumerate(zip(args.e_paths, e_kwargs)):
Ejemplo n.º 15
0
        out[0:h, 0:w] = frame

        writer.write(out)

        cv2.imshow("Output", out)
        cv2.resize(out, (400, 400))

        if cv2.waitKey(1) & 0xFF == ord("q"):
            print()
            print("[WARNING] pressed q button...")
            print()
            break

    print("[INFO] cleaning up ....")
    print()
    cv2.destroyAllWindows()
    writer.release()
    vid.release()


if __name__ == "__main__":
    print("[INFO] checking for linux OS platform ...")
    print()
    if platform.lower() in {"linux", "linux2"}:
        if not cv2.useOptimized():
            cv2.setUseOptimized(True)
        main()
    elif platform.lower() in {"darwin", "win32"}:
        print("Not a linux platform ...")
        exit()
Ejemplo n.º 16
0
###################################
##### Authors:                #####
##### Stephane Vujasinovic    #####
##### Frederic Uhrweiller     #####
#####                         #####
##### Creation: 2017          #####
###################################

import numpy as np
import cv2
import time

cv2.useOptimized()
print(
    'Starting the Calibration just press the space bar to exit this part of the Programm\n'
)
print(
    'Push (s) to save the image you want and push (c) to see next frame without saving the image'
)

# Contador de Imagenes guardadas
i = 0

# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# Prepare object points
objp = np.zeros((9 * 6, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)

# Arrays to store object points and image points from all images
Ejemplo n.º 17
0
from objectDetector import ObjectDetector
from stream import Stream
from fps import FPS
import cv2

# enable optimised mode if possible
optimized = cv2.useOptimized()
if not optimized:
    cv2.setUseOptimized(True)

# initialise new frontal face detector
faceDetector = ObjectDetector(model='FRONTAL_FACE', minSize=35)

# start new video capture
capture = Stream(src=0).start()

# initialise an array to store detected objects
objects = []

# start FPS recorder
fps = FPS().start()

while True:

    ret, frame = capture.read()

    # optional resize to reduce computation
    # frame = cv2.resize(frame, (0, 0), fx=0.35, fy=0.35)

    # get new detected objects
    objects = faceDetector.detect(frame)
Ejemplo n.º 18
0
from matplotlib import pyplot as plt




wincount = 0

def win(*img_list):
    global wincount
    for img in img_list:
        cv2.imshow('win%d'%wincount,img)
        wincount += 1

if __name__ =='__main__':
    #------ Main  ------
    print('OpenCV is %soptimized'%('' if cv2.useOptimized() else 'not '))
    
    img_name = r'color\colorruler.jpg'
    img = cv2.imread(r'F:\Workplace\GroundStation\imageprocess\%s'%img_name,cv2.IMREAD_COLOR)
    
    s = img.tostring()
    
    newimg = np.ndarray(shape=img.shape, dtype=img.dtype,buffer = s)
    cv2.imshow('',newimg)
    cv2.waitKey(0)
    
    # print(img) = [BGR(1,:);BGR(2,:);...;BGR(n,:)] 
    
    #------ matplotlib usage ------
    # plt.imshow(img[:,:,::-1], cmap = 'gray', interpolation = 'bicubic')#BGR to RGB
    # plt.show()
Ejemplo n.º 19
0
# -*- coding: utf-8 -*-

"""
Created on 2016年5月10日 上午11:41:01

@author: Thunderbolt.Lei (花名:穆雷)
@description: 此实例的图像混合,保持了原图的颜色,并根据像素的重新运算,实现图像的混合<br>
"""

import cv2
import numpy as np


# cv2.setUseOptimized(False)
# check if optimization is enabled, the default option is enabled
ret = cv2.useOptimized()
print ret

e1 = cv2.getTickCount()

# 加载图像
img1 = cv2.imread("../../../datas/imgs/Faces.jpg")
img2 = cv2.imread("../../../datas/imgs/bg01.png")

# I want to put logo on top-left corner, So I create a ROI
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]

# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 175, 255, cv2.THRESH_BINARY)
Ejemplo n.º 20
0
 def __init__(self, height=320, width=240, template='Puck'):
     self.template = cv2.imread('templates/' + template + '.png',0)
     self.cap = cv2.VideoCapture(0)
     cv2.useOptimized()
     self.cap.set(3, height)
     self.cap.set(4, width)
# 函数运行时间和性能优化(有问题)
import cv2 as cv
import time
cv.setUseOptimized(True)
img = cv.imread("C:\\Users\\Admin\\Desktop\\image\\airfield.bmp")
# e1 = cv.getTickCount() # 开始获取时钟周期数
e1 = time.time()  # 法二:使用time模块
for i in range(5, 49, 2):
    img = cv.medianBlur(img, i)
# e2 = cv.getTickCount() # 获取结束时钟周期数e
e2 = time.time()
# t = (e2-e1)/cv.getTickFrequency() # 获取时钟周期频率
t = e2 - e1
print(t)

print(cv.useOptimized())
if cv.useOptimized() == True:
    cv.setUseOptimized(False)
print(cv.useOptimized())
e3 = time.time()  # 法二:使用time模块
for i in range(5, 49, 2):
    img = cv.medianBlur(img, i)
e4 = time.time()
t = e4 - e3
print(t)
Ejemplo n.º 22
0
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2, img2, mask=mask)

# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst

cv2.imshow('bitwise', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
Default Optimization in OpenCV
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_core/py_optimization/py_optimization.html
'''

cv2.useOptimized()  # check if optimization is enabled
print(cv2.useOptimized())
res1 = cv2.medianBlur(img, 49)
# optimization is disabled
cv2.setUseOptimized(False)
cv2.useOptimized()
print(cv2.useOptimized())
res1 = cv2.medianBlur(img, 49)

cv2.imshow('res1', res1)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
Measuring Performance with OpenCV
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_core/py_optimization/py_optimization.html
'''
Ejemplo n.º 23
0
imgName = 'img.jpg'
dataFolder = '../DATA/'
img = cv2.imread(dataFolder + imgName, 1)

f = cv2.getTickFrequency(
)  #Frequency of clock-cycles (clock-cycles-num/second)

enableOpt = raw_input('Use optimization (yes/no): ')

if (enableOpt == 'yes'):
    cv2.setUseOptimized(True)
else:
    cv2.setUseOptimized(False)

print "Optimization: " + str(cv2.useOptimized())  #Optimization setting

e1 = cv2.getTickCount()  #Number of clock-cycles when execution started
t1 = time.time()  #Time when execution started

#Functions - mesuring execution time
for i in xrange(5, 49, 2):
    img1 = cv2.medianBlur(img, i)
print "NumZero: " + str(np.count_nonzero(img))

e2 = cv2.getTickCount()  #Number of clock-cycles when execution ended
t2 = time.time()  #Time when execution ended

time = (e2 - e1) / f  #Time of execution in seconds
print "Time: (" + str(e2) + "-" + str(e1) + ")" + "/" + str(f) + " = " + str(
    time)
Ejemplo n.º 24
0
import cv2
from threading import Thread
import numpy as np
from time import sleep
import enum
import time
import math
import threading

print(cv2.useOptimized(), "use optimized")


class targetRegion(enum.Enum):
    topLeft = 0
    top = 1
    topRight = 2
    right = 3
    bottomRight = 4
    bottom = 5
    bottomLeft = 6
    left = 7
    center = 8


class tapePos:
    def __init__(self, qu):
        self.qu = qu
        self.img = None
        self.exposure = 157  #ms #ON CORAL
        self.white_balance = 100  #CHANGE #ON CORAL
        self.hue = (20, 255)  #Pixel
Ejemplo n.º 25
0
 def __init__(self, parent, fps=15, height=240,width=320):
     wx.Frame.__init__(self, parent)
     self.SetName("Mask Tester Controller")
     
     #Boolean to handle exiting CV2 IMSHOW window
     self.exists = True
     
     #Colors for applying the mask
     self.upper_color = self.color_RGB2HSV([0,0,0])
     self.lower_color = self.color_RGB2HSV([0,0,0])
     
     self.cap = cv2.VideoCapture(0)
     cv2.useOptimized()
     self.cap.set(3, height)
     self.cap.set(4, width)
     
     ## GUI CODE BELOW ##
     MainBox    = wx.BoxSizer(wx.HORIZONTAL)
     
     StaticBox1 = wx.StaticBox(self,label='Lower Color')
     StaticBox2 = wx.StaticBox(self,label='Upper Color')
     LowerBox  = wx.StaticBoxSizer(StaticBox1, wx.VERTICAL)
     LowerSizer = wx.FlexGridSizer(3,3,10,15)
     UpperBox  = wx.StaticBoxSizer(StaticBox2, wx.VERTICAL)
     UpperSizer = wx.FlexGridSizer(3,3,10,15)
     
     ComboBoxSizer = wx.FlexGridSizer(1,2,10,15)
     self.ComboBoxText = wx.StaticText(self,id=1,label="Image from:\t")
     self.Choices  = ["Frame","HSV","Mask","Res"]
     self.ComboBox = wx.ComboBox(self,value=self.Choices[0],choices=self.Choices,style=wx.CB_READONLY)
     ComboBoxSizer.Add(self.ComboBoxText,flag=wx.ALIGN_CENTER)
     ComboBoxSizer.Add(self.ComboBox,flag=wx.ALIGN_CENTER)
     
     self.R_lowerTextLabel = wx.StaticText(self,id=1,label="R")
     self.G_lowerTextLabel = wx.StaticText(self,id=2,label="G")
     self.B_lowerTextLabel = wx.StaticText(self,id=3,label="B")
     self.R_lower = wx.Slider(self,id=1,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.G_lower = wx.Slider(self,id=2,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.B_lower = wx.Slider(self,id=3,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.R_lowerText = wx.StaticText(self,id=1,label="0")
     self.G_lowerText = wx.StaticText(self,id=2,label="0")
     self.B_lowerText = wx.StaticText(self,id=3,label="0")
     
     LowerSizer.Add(self.R_lowerTextLabel,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.R_lower,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.R_lowerText,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.G_lowerTextLabel,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.G_lower,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.G_lowerText,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.B_lowerTextLabel,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.B_lower,flag=wx.ALIGN_CENTER)
     LowerSizer.Add(self.B_lowerText,flag=wx.ALIGN_CENTER)
     
     self.R_upperTextLabel = wx.StaticText(self,id=1,label="R")
     self.G_upperTextLabel = wx.StaticText(self,id=2,label="G")
     self.B_upperTextLabel = wx.StaticText(self,id=3,label="B")
     self.R_upper = wx.Slider(self,id=4,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.G_upper = wx.Slider(self,id=5,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.B_upper = wx.Slider(self,id=6,value=0,minValue=0,maxValue=255,size=(200,20),style=wx.SL_HORIZONTAL)
     self.R_upperText = wx.StaticText(self,id=4,label="0")
     self.G_upperText = wx.StaticText(self,id=5,label="0")
     self.B_upperText = wx.StaticText(self,id=6,label="0")
     
     UpperSizer.Add(self.R_upperTextLabel,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.R_upper,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.R_upperText,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.G_upperTextLabel,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.G_upper,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.G_upperText,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.B_upperTextLabel,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.B_upper,flag=wx.ALIGN_CENTER)
     UpperSizer.Add(self.B_upperText,flag=wx.ALIGN_CENTER)
     
     LowerBox.Add(LowerSizer,flag=wx.ALIGN_CENTER)
     UpperBox.Add(UpperSizer,flag=wx.ALIGN_CENTER)
     
     ControlSizer = wx.FlexGridSizer(3,1,1,1)
     ControlSizer.Add(ComboBoxSizer,flag=wx.ALIGN_CENTER)
     ControlSizer.Add(LowerBox,flag=wx.ALIGN_CENTER)
     ControlSizer.Add(UpperBox,flag=wx.ALIGN_CENTER)
     
     MainBox.Add(ControlSizer,flag=wx.ALIGN_CENTER)
     
     self.sliders = [self.R_lower,self.G_lower,self.B_lower,
                     self.R_upper,self.G_upper,self.B_upper]
     for slider in self.sliders:
         slider.Bind(wx.EVT_SLIDER,self.OnAdjust)
     
     self.ComboBox.Bind(wx.EVT_COMBOBOX, self.OnComboBox)
     self.Bind(wx.EVT_CLOSE, self.OnExit)
     
     self.SetSizer(MainBox)
     self.Centre()
     self.Fit()
     self.Show(True)
     self.GenerateFrame()
Ejemplo n.º 26
0
Archivo: Main.py Proyecto: gunduru/EOT
def main():

    timeInterval = int(
        ConfigManager.ConfigSectionMap("Basic_Conf")['timeinterval']
    )  ##Assign time interval into variable from config/config.ini file
    threading.Timer(
        timeInterval,
        main).start()  ## called main() function every 'timeInterval' seconds

    cv2.useOptimized()
    blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN(
    )  # attempt KNN training

    if blnKNNTrainingSuccessful == False:  # if KNN training was not successful
        print(
            "\nerror: KNN traning was not successful\n")  # show error message
        return  # and exit program
    # end if

    photo_path = CamManager.get_image()  ##open camera and capture image
    imgOriginalScene = cv2.imread(
        "captured_img/last.png")  # open captured image from directory

    if imgOriginalScene is None:  # if image was not read successfully
        print("\nerror: image not read from file \n\n"
              )  # print error message to std out
        os.system("pause")  # pause so user can see error message
        return  # and exit program
    # end if

    listOfPossiblePlates = DetectPlates.detectPlatesInScene(
        imgOriginalScene)  # detect plates

    listOfPossiblePlates = DetectChars.detectCharsInPlates(
        listOfPossiblePlates)  # detect chars in plates

    if len(listOfPossiblePlates) == 0:  # if no plates were found
        print(
            "\nno plates were detected\n")  # inform user no plates were found
    else:  # else
        # if we get in here list of possible plates has at leat one plate

        # sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
        listOfPossiblePlates.sort(
            key=lambda possiblePlate: len(possiblePlate.strChars),
            reverse=True)

        # suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
        licPlate = listOfPossiblePlates[0]

        if len(licPlate.strChars) == 0:  # if no chars were found in the plate
            print("\nno characters were detected\n\n")  # show message
            return  # and exit program
        # end if

        print("\ncharacters read from image = " + licPlate.strChars +
              "\n")  # write license plate text to std out
        print("----------------------------------------")

        mTime = datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S'
        )  ##assign timestamp of measurement to variable 'mTime'
        DBManager.insert_data(
            mTime, int(licPlate.strChars), photo_path
        )  ##insert data into database; timestamp - measured data - photo path

    # end if else

    return
Ejemplo n.º 27
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np

img1 = cv2.imread('../assets/lena.jpg')

e1 = cv2.getTickCount()
for i in xrange(5,49,2):
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()

t = (e2 - e1)/cv2.getTickFrequency()
print "cv2.useOptimized()=",cv2.useOptimized(),", time=",t

cv2.setUseOptimized(False)

e1 = cv2.getTickCount()
for i in xrange(5,49,2):
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()

t = (e2 - e1)/cv2.getTickFrequency()
print "cv2.useOptimized()=",cv2.useOptimized(),", time=",t
Ejemplo n.º 28
0
# cv2.getTickCount function returns the number of clock-cycles after a reference event (like the moment machine was switched ON)
# to the moment this function is called. So if you call it before and after the function execution, you get number of clock-cycles
# used to execute a function.
#
# cv2.getTickFrequency function returns the frequency of clock-cycles, or the number of clock-cycles per second. So to find the time
# of execution in seconds, you can do following:

import cv2

img1 = cv2.imread("pic.jpg")

e1 = cv2.getTickCount()
for i in xrange(5, 49, 2):
    img1 = cv2.medianBlur(img1, i)
e2 = cv2.getTickCount()
t = (e2 - e1) / cv2.getTickFrequency()
print t

print cv2.useOptimized()
# -*- coding: utf-8 -*-
import cv2
import numpy as np

'''
OpenCV 中的默认优化
在编译时 优化是 默认开启的。因此 OpenCV   的就是优化后的代码 
如果你把优化 关闭的 就只能执行低效的代码了。
你可以使用函数 cv2.useOptimized() 来查看优化是否 开启了 
使用函数 cv2.setUseOptimized() 来开启优化
'''
# check if optimization is enabled
"""
In [5]: cv2.useOptimized()
Out[5]: True
In [6]: %timeit res = cv2.medianBlur(img,49)
10 loops, best of 3: 34.9 ms per loop
# Disable it
In [7]: cv2.setUseOptimized(False)
In [8]: cv2.useOptimized()
Out[8]: False
In [9]: %timeit res = cv2.medianBlur(img,49)
#优化后中值滤波的速度是原来的两倍
"""
print(cv2.useOptimized())
cv2.setUseOptimized(False)
print(cv2.useOptimized())
Ejemplo n.º 30
0
# ----------------------#
cv2.imshow('tmp',img1)  #
cv2.waitKey(0)          #
# ----------------------#
e1 = cv2.getTickCount()
for i in xrange(5,49,2):
    # 中值模糊
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print t

# ----------------------#
cv2.imshow('tmp',img1)  #
cv2.waitKey(0)          #
# ----------------------#



# check if optimization is enabled
print cv2.useOptimized()

cv2.setUseOptimized(False)
print cv2.useOptimized()

e1 = cv2.getTickCount()
for i in xrange(5,49,2):
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print t
Ejemplo n.º 31
0
e2 = cv2.getTickCount()

t = (e2 - e1) / cv2.getTickFrequency()
print(t)

#python中的计算程序运行时间
start_time = time.time()
temp = 0
for i in range(1000):
    temp = temp * i

end_time = time.time()
print(end_time - start_time)
'''
opencv中的默认优化
    cv2.useOptimized()查看优化是否开启
    cv2.setUseOptimized()开启优化
'''

print(cv2.useOptimized())  #检查优化是否开启
img = cv2.imread('logo.png')
e1 = cv2.getTickCount()
res = cv2.medianBlur(img, 49)
e2 = cv2.getTickCount()
print((e2 - e1) / cv2.getTickFrequency())

cv2.setUseOptimized(False)  #关闭优化
e1 = cv2.getTickCount()
res = cv2.medianBlur(img, 49)
e2 = cv2.getTickCount()
print((e2 - e1) / cv2.getTickFrequency())
Ejemplo n.º 32
0
import cv2 as cv
import numpy as np

# 왜 최적화가 필요한가? 이미지처리에는 엄청나게 많은 연산이 진행되기 때문
# 배울 것
# cv.getTickCount
# cv.getTickFrequency

img = cv.imread("../1_arithmetic_operation/apple.jpg")
cv.setUseOptimized(True)  # 기본 최적화 기능 on

for i in range(0, 2):
    e1 = cv.getTickCount()  # 코드 실행 시간
    for i in range(5, 49, 2):
        img = cv.medianBlur(img, i)
    e2 = cv.getTickCount()  # 코드 실행 종료 시간
    t = (e2 - e1) * 1000 / cv.getTickFrequency()  # 총 코드 실행 시간
    print("Default OpenCV Optimization:", cv.useOptimized(), "\t", t, "ms")
    cv.setUseOptimized(False)  # 기본 최적화 기능 off
Ejemplo n.º 33
0
    scipy.show_config();


    # See http://docs.scipy.org/doc/numpy/reference/generated/numpy.set_printoptions.html
    # We use 7 digits precision and suppress using scientific notation.
    np.set_printoptions(precision=7, suppress=True, \
                        threshold=70000, linewidth=4000);
                        #threshold=7000000, linewidth=4000);
                        #threshold=7000, linewidth=300);
                        #threshold=1000000, linewidth=3000);


    # Inspired from \OpenCV2-Python-Tutorials-master\source\py_tutorials\py_core\py_optimization

    # normally returns True - relates to using the SIMD extensions of x86: SSX, AVX
    common.DebugPrint("cv2.useOptimized() is %s" % str(cv2.useOptimized()));

    if False:
        cv2.setUseOptimized(True);
        cv2.useOptimized();

    """
    From http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#checkhardwaresupport
        CV_CPU_MMX - MMX
        CV_CPU_SSE - SSE
        CV_CPU_SSE2 - SSE 2
        CV_CPU_SSE3 - SSE 3
        CV_CPU_SSSE3 - SSSE 3
        CV_CPU_SSE4_1 - SSE 4.1
        CV_CPU_SSE4_2 - SSE 4.2
        CV_CPU_POPCNT - POPCOUNT
Ejemplo n.º 34
0
import cv2 as cv
import numpy as np

img1 = cv.imread("Opencv/girl.png")
# cv.getTickCount  获取时间 从参考点开始 time.time()
# cv.getTickFrequency 获取时钟频率 (帧率)

e1 = cv.getTickCount()
for i in range(5, 49, 2):
    img1 = cv.medianBlur(img1, i)  # 中值滤波
e2 = cv.getTickCount()
time = (e2 - e1) / cv.getTickFrequency()
print(time)

# 函数优化 返回是否开启优化
print(cv.useOptimized())

# cv.setUseOptimized() 开启优化
# cv2.countNonZero() 和 np.count_nonzero()
cv.imshow("Image", img1)
cv.waitKey(0)
cv.destroyAllWindows()
img = cv.imread('../../datas/images/fish.jpg')
# 时间开始
e1 = cv.getTickCount()

for i in range(5, 49, 2):
    img = cv.medianBlur(img, i)

# 时间结束
e2 = cv.getTickCount()
# 计算耗时
t = (e2 - e1) / cv.getTickFrequency()

print("operation time usage:", t)  # operation time usage: 0.31588224549033933

# 判断Opencv是否已经使用优化功能
print(cv.useOptimized())  #True,表示已经启用

# 现在关闭优化功能
cv.setUseOptimized(False)
# 重新测试

# 时间开始
e1 = cv.getTickCount()

for i in range(5, 49, 2):
    img = cv.medianBlur(img, i)

# 时间结束
e2 = cv.getTickCount()
# 计算耗时
t = (e2 - e1) / cv.getTickFrequency()
Ejemplo n.º 36
0
def main():
    if(SAVE_CSV):
        fp = open("output.log","w") 
    # is cv2 optimized
    if(not cv2.useOptimized()):
        cv.setUseOptimized(True)
        logging.info("set optimized {0}".format(cv2.useOptimized()))

    # CNN spatial input image scaling coeifficents
    in_width = args.sizex #YOLO input
    in_height = args.sizey
    # 1/alpha
    scale_factor = 1 / 255.0
    # RGB mean substraction
    RGB_mean = (0,0,0)
    # swap RB channels
    swapRB = True

    # on object numbers
    total_in_screen = 0
    total_in = 0
    total_out = 0
    # track objects
    tracked_objects = []
    tracked_objs_in = []
    tracked_objs_out = []

    # capture the video stream
    cap = cv2.VideoCapture(args.video)    
    # load labels names
    labels = []
    with open(args.labels, 'r') as f:
        labels = [line.strip() for line in f.readlines()]
    # create random color space for labels
    COLORS = np.random.uniform(0, 255, size=(len(labels), 3))
    # customized label list for filtering
    filter_labels = ['person'] #labels
    # to improve timing set flags
    filter_label_active = [True if (itm in filter_labels) else False for itm in labels]
    for idx,itm in enumerate(labels):        
        logging.debug(itm)
        logging.debug(filter_label_active[idx])
    # read DNN into cv2 dnn format
    net = cv2.dnn.readNet(args.weights, args.config)
    # identify output layer of the NN
    output_layer = net.getUnconnectedOutLayersNames()
    vid_init = None
    vid_out=None
    while (True):        
        start_t = cv2.getTickCount()
        total_in_screen = 0
        # Capture frame-by-frame
        ret, frame = cap.read()
        # use follwoing line if Umat
        # ret, Oframe = cap.read()
        if not ret:
            logging.warning("failed to capture the frame")
            break
        # umat should make things faster but difficult to workwith in python 
        # should use C++ cuda
        #frame = cv2.UMat(Oframe)
        # resize the frame, convert it to grayscale, and blur it
        frame = cv2.resize(frame, (in_height,in_width))
        [frame_height, frame_width, *frame_rest] = frame.shape
        # use follwoing line if Umat
        #[frame_height, frame_width, *frame_rest] = Oframe.shape
        if(VID_OUT and not vid_init):
            vid_init = True
            # opencv write video to a file.
            # Define the codec and create VideoWriter object
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            vid_out = cv2.VideoWriter('output.avi',fourcc, 20.0, (frame_width,frame_height))
        # preprocessing the image to size for CNN spatial size
        blob = cv2.dnn.blobFromImage(
            frame, scale_factor, (in_width, in_height), RGB_mean, swapRB, crop=False)
        # set DNN inputs with scaled values
        net.setInput(blob)
        # DNN forward propogation. Can use new Aync and optional [,output] for few OP detection
        dnn_outputs = net.forward(output_layer)

        # output filtration        
        conf_threshold = 0.5
        nms_threshold = 0.4
        label_idxs = []
        boxes = []
        confidences = []
        # loop over each layer outputs
        for dnn_output in dnn_outputs:
            # loop over each detection of blobs
            for detection in dnn_output:
                # select the best label with highest output probability for blob
                # first 4 indexes are blob dimensions
                detection_val = detection[5:]
                label_idx = np.argmax(detection_val)
                if(not filter_label_active[label_idx]):
                    pass
                else:
                    confidence = detection_val[label_idx]
                    if confidence > conf_threshold:
                        # add to total persons
                        total_in_screen+=1
                        # scale the bounding box to frame
                        (cx,cy,w,h) = map(int,detection[0:4]*np.array([frame_width,frame_height, frame_width,frame_height]))
                        x = int(cx - w / 2)
                        y = int(cy - h / 2)
                        
                        # add predictions to lists for NMS normalization
                        label_idxs.append(label_idx)
                        confidences.append(float(confidence))
                        boxes.append([x, y, w, h])                        

        # non-maxima suppression to suppress overlapping blobs/boxes
        detects_normalized = cv2.dnn.NMSBoxes(
            boxes, confidences, conf_threshold, nms_threshold)
        # loop over detections left after normalization
        for i in detects_normalized:
            i = i[0]
            [x,y,w,h,*rest] = boxes[i]
            label = str(labels[label_idxs[i]])
            color = COLORS[label_idxs[i]]
            # mark center point of the objext
            cv2.circle(frame, (int(cx), int(cy)), 1, (0, 0, 255), 5)
            # draw a rectangle
            cv2.rectangle(frame, (x, y), (x+w, y+h), color, 1)
            # draw label
            cv2.putText(frame, label, (x, y-15),cv2.FONT_HERSHEY_TRIPLEX, 0.4, color, 1)
            if(SAVE_CSV):
                fp.write("{},{},{},{},{},{}\n".format(
                    datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), label, cx, cy, w, h))
            # tracking
            # if no object is there
            if(TRACK):
                #tracking variables
                max_move = frame_width//8
                # divide the frame by 2 on x direction
                margin_x = frame_width //2
                margin_y = frame_height
                obj_idx = 0
                if (len(tracked_objects)==0):
                    tracked_objects.append(Track_Object(label_idxs[i], cx,cy))
                # if objects are available
                else:
                    match_found = False
                    for idx,track_object in enumerate(tracked_objects):
                        # check objects that were close for a match
                        dir_x = track_object.last_loc_x - cx
                        if (abs(dir_x)<max_move):
                            dir_y = track_object.last_loc_y - cy
                            if (abs(dir_y)<max_move):
                                track_object.dir_x=round(dir_x,2)
                                track_object.dir_y=round(dir_y,2)
                                track_object.last_loc_x=cx
                                track_object.last_loc_y=cy
                                obj_idx = idx
                                match_found = True
                    if(not match_found):
                        obj_idx = len(tracked_objects)
                        tracked_objects.append(Track_Object(label_idxs[i], cx,cy))
                # print additional details about tracking and inward outward counts
                #total_in = 0
                #total_out = 0
                for idx,track_object in enumerate(tracked_objects):
                    cv2.putText(frame, str(idx), (cx+15, cy-+15),cv2.FONT_HERSHEY_TRIPLEX, 0.4, color, 1)
                    track_details = "x` :{1}   y`:{2}".format(idx,track_object.dir_x, track_object.dir_y)
                    cv2.putText(frame, track_details, (x, y-5),cv2.FONT_HERSHEY_TRIPLEX, 0.4, color, 1)
                    logging.debug(str(idx))
                    logging.debug(track_details)
                    track_details = "x  :{1}   y :{2}".format(idx,track_object.last_loc_x, track_object.last_loc_y)
                    logging.debug(track_details)
                    if(track_object.last_loc_x < margin_x and track_object.last_loc_x+track_object.dir_x > margin_x ):
                        # prevent double entry object already in cannot go again in
                        if(idx not in tracked_objs_in):
                            tracked_objs_in.append(idx)
                            if(idx in tracked_objs_out):
                                tracked_objs_out.remove(idx)
                    elif(track_object.last_loc_x > margin_x and track_object.last_loc_x+track_object.dir_x < margin_x ):
                        # prevent double entry
                        if(idx not in tracked_objs_out):
                            tracked_objs_out.append(idx)
                            if(idx in tracked_objs_in):
                                tracked_objs_in.remove(idx)
                    else:
                        # all the other objects that doesn't pass the boundry or stationary
                        pass
                    total_in = len(tracked_objs_in)
                    total_out = len(tracked_objs_out)

                # draw a in out margin
                cv2.line(frame, (margin_x,0),(margin_x, margin_y), (0,0,255), 1)
                    

        # fps calculation
        fps = 1/((cv2.getTickCount()-start_t)/cv2.getTickFrequency())        

        # print values on frame
        text_start_x = frame_width - 100
        text_start_y = 30
        text_seperation = 20
        cv2.putText(frame, "Total: {}".format(str(total_in_screen)), 
            (text_start_x, text_start_y+text_seperation*1),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 0, 0), 2)
        if(TRACK):
            cv2.putText(frame, "Inward: {}".format(str(total_in)), 
                (text_start_x, text_start_y+text_seperation*2),
                        cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 0, 0), 2)
            cv2.putText(frame, "Outward: {}".format(str(total_out)), 
                (text_start_x, text_start_y+text_seperation*3),
                        cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 0, 0), 2)
        cv2.putText(frame, "FPS: {}".format(str(round(fps,2))), 
            (text_start_x, text_start_y+text_seperation*4),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 0, 0), 2)
        cv2.imshow("Live Feed", frame)
        if(VID_OUT):
            vid_out.write(frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # clean up release and distroy opencv windows
    logging.info("cleaning up the app")
    fp.close()
    cap.release()
    if (VID_OUT and vid_init):
        vid_out.release()
    cv2.destroyAllWindows()
Ejemplo n.º 37
0
import cv2
import numpy as np
import time
import timeit

img1 = cv2.imread('Images\messi.jpg')

time1 = time.time()
e1 = cv2.getTickCount(
)  # Gives the no. of clock cycles till this function call
for i in xrange(5, 49, 2):
    img1 = cv2.medianBlur(img1, i)
e2 = cv2.getTickCount()
time2 = time.time()
t = (e2 - e1
     ) / cv2.getTickFrequency()  # Gives the number of clock cycles per second
print t  # So it will print the execution time of above function
print time2 - time1
print time1
print cv2.useOptimized()  # Check if optimization is enabled
time3 = time.time()
res = cv2.medianBlur(img1, 49)
time4 = time.time()
print "Optimized: " + str(time4 - time3)
cv2.setUseOptimized(False)
print cv2.useOptimized()
time3 = time.time()
res = cv2.medianBlur(img1, 49)
time4 = time.time()
print "Unoptimized: " + str(time4 - time3)
Ejemplo n.º 38
0
#!/usr/bin/env python
#!encoding=utf8

import cv2
import numpy as np

def test_medianBlur(img):
    """docstring for test_medianBlur"""
    e1 = cv2.getTickCount()

    for i in xrange(5, 49, 2):
        img = cv2.medianBlur(img, i)
        e2 = cv2.getTickCount()
        t = (e2 - e1) / cv2.getTickFrequency()
        print t

        # cv2.imshow('medianBlur', img1)
        # cv2.waitKey()


print 'Default useOptimized: ', cv2.useOptimized()
img = cv2.imread('demo2.png')

cv2.setUseOptimized(False)
print 'useOptimized: ', cv2.useOptimized()
test_medianBlur(np.copy(img))

cv2.setUseOptimized(True)
print 'useOptimized: ', cv2.useOptimized()
test_medianBlur(np.copy(img))
Ejemplo n.º 39
0
        #提取log前景,其它部分黑色
        img2_fg = cv2.bitwise_and(img2, img2, mask=mask)

        #叠加
        dst = cv2.add(img1_bg, img2_fg)
        img1[0:rows, 0:cols] = dst

        cv2.imshow("img2", img2)
        cv2.imshow("mask", mask)
        cv2.imshow("mask_inv", mask_inv)
        cv2.imshow("img1_bg", img1_bg)
        cv2.imshow("img2_fg", img2_fg)

        cv2.imshow("img1", img1)

        cv2.waitKey(0)

    #性能优化
    if 1:
        if cv2.useOptimized() == False:
            cv2.setUseOptimized(True)

        img = cv2.imread("./fengjing.jpg", cv2.IMREAD_COLOR)

        print("cv2.useOptimized() = ", cv2.useOptimized())

        time_1 = time.time()
        res = cv2.medianBlur(img, 49)
        time_2 = time.time()
        print("diff time = ", time_2 - time_1)
Ejemplo n.º 40
0
'''
e1 = cv2.getTickCount()
# your code execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
'''

img1 = cv2.imread('messi.png')

e1 = cv2.getTickCount()
for i in xrange(5,49,2):
    img1 = cv2.medianBlur(img1,i)
e2 = cv2.getTickCount()
t = (e2 - e1)/cv2.getTickFrequency()
print t

# check if optimization is enabled
cv2.useOptimized()
# True

# %timeit res = cv2.medianBlur(img,49)
# 10 loops, best of 3: 34.9 ms per loop

# Disable it
cv2.setUseOptimized(False)
cv2.useOptimized()
# False

# %timeit res = cv2.medianBlur(img,49)
# 10 loops, best of 3: 64.1 ms per loop
    def run(self):

        # variables for syncing between threads
        global frames
        global timestamps
        global frame_width
        global frame_height
        global trackingupdate
        global quality
        global outputQuality # of the cropped video

        self.processed_frames = 0
        self.success_frames = 0
        self.error_frames = 0

        # phone and screen constants
        self.marker_size = 66 #rectangular (mm)
        self.secondary_marker_size = 20
        self.screen_height = int(config['DEFAULT']['screenHeightMM']) #mm
        self.screen_width = int(config['DEFAULT']['screenHeightMM']) #mm
        self.screen_pixel_height = int(config['DEFAULT']['screenHeightPX'])
        self.screen_pixel_width = int(config['DEFAULT']['screenHeightPX'])

        # precomputed calibration constants for the eyetracker
        self.dist = numpy.array([[0.05357947, -0.22872005, -0.00118557, -0.00126952, 0.2067489 ]])
        self.cameraMatrix = numpy.array([[1.12585498e+03, 0.00000000e+00, 9.34478069e+02], [0.00000000e+00, 1.10135217e+03, 5.84380561e+02], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
        self.mtx = numpy.array([[1.12825274e+03, 0.00000000e+00, 9.35684715e+02], [0.00000000e+00, 1.10801151e+03, 5.86151765e+02], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
        self.corner_refine_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        directory = './out/{0}-{1}/'.format(self.participant, datetime.datetime.now().strftime("%Y-%m-%d"))
        #directory = './out/2018-11-15-12-38/'
        videoFilename_processed = 'gaze_video_processed.avi'
        videoFilename_raw = 'gaze_video_raw.avi'
        fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
        firstFrame = frames.get()
        inputHeight, inputWidth, c = firstFrame.shape
        out_processed = cv2.VideoWriter(directory + videoFilename_processed,fourcc, 25, (self.screen_pixel_width/outputQuality,self.screen_pixel_height/outputQuality))
        out_raw = cv2.VideoWriter(directory + videoFilename_raw,fourcc, 25, (inputWidth,inputHeight))
        cv2.useOptimized()

        # open log files
        computedFrames = open(directory + "/computed_frames.txt", "a+")
        log = open(directory + 'log.txt', 'a+')

        self.counter = 0 # used to execute functions only for every xth frame
        self.logcounter = 0
        while not (frames.qsize() == 0 and recording_flag == False):
            if(not frames.empty()):
                self.current_frame = frames.get()

                # tobii format,         yyyy-mm-dd-hh-mm-ss-ffffff
                self.current_timestamp, self.current_absolute_timestamp = timestamps.get()

                self.raw_frame = self.current_frame
                self.current_frame = cv2.undistort(self.current_frame, self.mtx, self.dist, None, self.cameraMatrix)
                frame_gray = cv2.cvtColor(self.current_frame, cv2.COLOR_BGR2GRAY) # aruco.detectMarkers() requires gray image
                markers = cv2.aruco.detectMarkers(frame_gray,dictionary)

                self.processed_frames += 1
                if len(markers[0]) > 0:

                    # array order of corners is clockwise
                    corners, ids, rejectedImgPoints = markers

                    # get index of marker with id == 0
                    # bottom left == id1, bottom right = id3
                    try:
                        id0 = ids.tolist().index([0])
                    except:
                        # No marker with id == 0 detected
                        self.error_frames += 1
                        # print('markers detected but the main one is not amongst them.')
                        logstr = str(self.current_absolute_timestamp) + "; " + str(self.success_frames) + ' / ' + str(self.processed_frames) + '; ' + str(self.error_frames) + ' errors' + '; ' + str((float(self.error_frames) / float(self.processed_frames)) * float(100)) + '%; (' + str(frames.qsize()) + ' buffered'
                        self.logcounter += 1
                        if self.logcounter > 20:
                            log.write(logstr)
                            self.logcounter = 0
                        print(logstr)

                        self.counter += 1
                        if self.counter > 200:
                            self.counter = 0
                            cv2.imshow('frame',self.raw_frame)
                            cv2.waitKey(100)
                        continue

                    term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
                    corners_id0 = cv2.cornerSubPix(frame_gray, corners[id0], (5, 5), (-1, -1), term)
                    rvec, tvec, objPoints = cv2.aruco.estimatePoseSingleMarkers(corners_id0, self.marker_size, self.cameraMatrix, self.dist) #tvec is the translation vector of the markers center

                    # corner estimation based on single big marker on the top of the screen
                    screenCorners = numpy.float32([
                        [-self.marker_size/2, -self.marker_size/2 - 1, 0],
                        [self.marker_size/2, -self.marker_size/2 - 1, 0],
                        [-self.marker_size/2,-self.marker_size/2 - (self.screen_height + 15) - 1,0],
                        [self.marker_size/2,-self.marker_size/2 - (self.screen_height + 15) - 1,0]
                    ]).reshape(-1,3)
                    imgpts, jac = cv2.projectPoints(screenCorners, rvec, tvec, self.mtx, self.dist) #world coordinates to camera coordinates
                    #self.current_frame = cv2.aruco.drawAxis(self.current_frame, self.cameraMatrix, self.dist, rvec, tvec, 100)

                    screen_top_left = imgpts[0][0]
                    screen_top_right = imgpts[1][0]
                    screen_bottom_left = imgpts[2][0]
                    screen_bottom_right = imgpts[3][0]

                    id1 = None
                    id3 = None

                    # check if secondary markers were detected
                    try:
                        id1 = ids.tolist().index([1])
                        secondary_id = id1
                    except:
                        logstr = str(self.current_absolute_timestamp) + "; marker 1 not detected"
                        log.write(logstr)
                        pass

                    try:
                        id3 = ids.tolist().index([3])
                        secondary_id = id3
                    except:
                        logstr = str(self.current_absolute_timestamp) + "; marker 3 not detected"
                        log.write(logstr)
                        pass

                    # if one of them is detected: compute vector intersection
                    if id1 or id3 is not None:
                        corners_secondary = cv2.cornerSubPix(frame_gray, corners[secondary_id], (5, 5), (-1, -1), term)
                        rvec, tvec, objPoints = cv2.aruco.estimatePoseSingleMarkers(corners_secondary, self.secondary_marker_size, self.cameraMatrix, self.dist)

                        screenCorners = numpy.float32([ # bottom markers are upsidedown
                            [self.secondary_marker_size/2, -self.secondary_marker_size/2 - 1, 0],
                            [-self.secondary_marker_size/2, -self.secondary_marker_size/2 - 1, 0]
                        ]).reshape(-1,3)
                        imgpts, jac = cv2.projectPoints(screenCorners, rvec, tvec, self.mtx, self.dist) #world coordinates to camera coordinates

                        secondary_marker_top_left = imgpts[0][0]
                        secondary_marker_top_right = imgpts[1][0]

    	                # use coordinates themselves if all markers were detected
                        if id3 is not None:
                            screen_bottom_left = secondary_marker_top_left
                            screen_bottom_right = self.intersection(corners_id0[0][1],corners_id0[0][2],secondary_marker_top_left,secondary_marker_top_right)
                        else:
                            screen_bottom_right = secondary_marker_top_right
                            screen_bottom_left = self.intersection(corners_id0[0][0],corners_id0[0][3],secondary_marker_top_right,secondary_marker_top_left)

                        #self.current_frame = cv2.aruco.drawAxis(self.current_frame, self.cameraMatrix, self.dist, rvec, tvec, 100)

                    # self.drawCircle(self.current_frame, screen_top_left[0], screen_top_left[1])
                    # self.drawCircle(self.current_frame, screen_top_right[0], screen_top_right[1])
                    # self.drawCircle(self.current_frame, screen_bottom_left[0], screen_bottom_left[1])
                    # self.drawCircle(self.current_frame, screen_bottom_right[0], screen_bottom_right[1])

                    #Transform perspective according to QR code corner coordinates
                    pts1 = numpy.float32([screen_top_left, screen_top_right, screen_bottom_left, screen_bottom_right])
                    pts2 = numpy.float32([[0,0],[self.screen_pixel_width/outputQuality,0],[0,self.screen_pixel_height/outputQuality],[self.screen_pixel_width/outputQuality,self.screen_pixel_height/outputQuality]])

                    M = cv2.getPerspectiveTransform(pts1,pts2)
                    self.current_frame = cv2.warpPerspective(
                        self.current_frame,
                        M,
                        (self.screen_pixel_width/outputQuality,
                        self.screen_pixel_height/outputQuality)
                    )

                    trackingupdate.put((self.current_timestamp, self.current_absolute_timestamp, M))

                    computedFrames.write(
                        str(self.current_timestamp) + "; " +
                        self.current_absolute_timestamp + "; " +
                        str(screen_top_left) +  '; ' +
                        str(screen_top_right) + '; ' +
                        str(screen_bottom_right) +  '; ' +
                        str(screen_bottom_left) + '; ' +
                        str(M) + "\n"
                    )

                    out_processed.write(self.current_frame)

                    frames.task_done()
                    timestamps.task_done()
                    trackingupdate.task_done()

                    self.success_frames += 1

                else:
                    #log frames without detected marker
                    log.write(str(self.current_absolute_timestamp) + "; no markers detected; " + str(self.current_timestamp) + '\n')

                    #command line feedback for success rate
                    self.error_frames +=1

                out_raw.write(self.raw_frame)
                logstr = str(self.current_absolute_timestamp) + "; " + str(self.success_frames) + ' / ' + str(self.processed_frames) + '; ' + str(self.error_frames) + ' errors' + '; ' + str((float(self.error_frames) / float(self.processed_frames)) * float(100)) + '%; (' + str(frames.qsize()) + ' buffered'
                print(logstr)

                self.logcounter += 1
                if self.logcounter > 20:
                    log.write(logstr)
                    self.logcounter = 0

                self.counter += 1
                if self.counter > 200:
                    self.counter = 0
                    cv2.imshow('frame', self.raw_frame)
                    cv2.waitKey(100)

        computedFrames.close()
        log.close()

        out_processed.release()
        out_raw.release()
        cv2.destroyAllWindows()
        sys.exit(0)