コード例 #1
0
    def draw(self, q):
        previousTime = time.clock()
        #plt.ion()
        last_time = time.clock()
        nt = 0
        pn = 0
        y = 0.0
        while (True):
            while (q.empty() or
                   ((time.clock() - previousTime) > self.interval)) is False:
                y = q.get()
                self.mem.append(y)
                nt += 1

            #self.render_gym()
            #self.render_matplot()
            #self.render_cv()
            self.render_sim(y, y)

            fps = 1.0 / (time.clock() - last_time)
            print(
                color, "loop running on {} fps with {} recieve speed".format(
                    fps, (nt - pn) * fps))
            pn = nt
            last_time = time.clock()
            previousTime = time.clock()
        cv2.destroAllWindows()
コード例 #2
0
ファイル: rostros.py プロジェクト: dvsivle/coursesUMAKER
def working():
    '''
    This funtion detect faces
    '''

    face_cascade = cv2.CascadeClassifier('files/haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('files/haarcascade_eye.xml')
    
    frame = cv2.imread('woman1.png')
    gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale( gray,1.1,5 )
    
    for (x,y,w,h) in faces:
        
        frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),10)        
        
        roi_gray = gray[y:y+h,x:x+w]
        roi_color = frame[y:y+h,x:x+w]
        
        eyes = eye_cascade.detectMultiScale(roi_gray)

        for (ex,ey,ew,eh) in eyes:
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),10)

    while True:
        #Display RGB image
        cv2.imshow("Woman",frame)

        #Display Gray image
        cv2.imshow("Gray Woman",gray)

        #Quit program when 'esc' key input
        k = cv2.waitKey(0) & 0xFF
        if k == 27:
            break
        cv2.destroAllWindows()
コード例 #3
0
        image_tensor=detection_graph.get_tensor_by_name('image_tensor:0')
        detection_boxes=detection_graph.get_tensor_by_name('detection_boxes:0')
        detection_scores=detection_graph.get_tensor_by_name('detection_scores:0')
        detection_classes=detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections=detection_graph.get_tensor_by_name('num_detections:0')

        while(cap.isOpened()):
            	vector_image = load_image()
            	vector_image_hd = np.expand_dims(vector_image,axis=0)
            	(boxes,scores,classes,num)=sess.run([detection_boxes,detection_scores,detection_classes,num_detections],feed_dict={image_tensor:vector_image_hd})
            
        	# visualization
        	vis_util.visualize_boxes_and_labels_on_image_array(
            		vector_image,
            		np.squeeze(boxes),
            		np.squeeze(classes).astype(np.int32),
            		np.squeeze(scores),
            		category_index,
            		use_normalized_coordinates=True,
            		line_thickness=5)

        	cv2.imshow('Frame',vector_image)
		
		if cv2.waitKey(10) & 0xFF == ord('q'):
			break

cap.release()
cv2.destroAllWindows()


コード例 #4
0
ファイル: scans.py プロジェクト: adiagr/hackcbs
def main():

    blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN(
    )  # attempt KNN training

    if blnKNNTrainingSuccessful == False:  # if KNN training was not successful
        print(
            "\nerror: KNN traning was not successful\n")  # show error message
        return  # and exit program
    # end if

    imgOriginalScene = cv2.imread("LicPlateImages/1.png")  # open image

    if imgOriginalScene is None:  # if image was not read successfully
        print("\nerror: image not read from file \n\n"
              )  # print error message to std out
        os.system("pause")  # pause so user can see error message
        return  # and exit program
    # end if

    listOfPossiblePlates = DetectPlates.detectPlatesInScene(
        imgOriginalScene)  # detect plates

    listOfPossiblePlates = DetectChars.detectCharsInPlates(
        listOfPossiblePlates)  # detect chars in plates

    cv2.imshow("imgOriginalScene", imgOriginalScene)  # show scene image

    if len(listOfPossiblePlates) == 0:  # if no plates were found
        print("\nno license plates were detected\n"
              )  # inform user no plates were found
    else:  # else
        # if we get in here list of possible plates has at leat one plate

        # sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
        listOfPossiblePlates.sort(
            key=lambda possiblePlate: len(possiblePlate.strChars),
            reverse=True)

        # suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
        licPlate = listOfPossiblePlates[0]

        cv2.imshow(
            "imgPlate",
            licPlate.imgPlate)  # show crop of plate and threshold of plate
        cv2.imshow("imgThresh", licPlate.imgThresh)

        if len(licPlate.strChars) == 0:  # if no chars were found in the plate
            print("\nno characters were detected\n\n")  # show message
            return  # and exit program
        # end if

        drawRedRectangleAroundPlate(
            imgOriginalScene, licPlate)  # draw red rectangle around plate

        #print("\nlicense plate read from image = " + licPlate.strChars + "\n")  # write license plate text to std out
        #print("----------------------------------------")

        writeLicensePlateCharsOnImage(
            imgOriginalScene,
            licPlate)  # write license plate text on the image

        cv2.imshow("imgOriginalScene", imgOriginalScene)  # re-show scene image

        cv2.imwrite("imgOriginalScene.png",
                    imgOriginalScene)  # write image out to file
        print("Detected number-plate: MCLRNF1")
        print("Pinging", end=" ")
        for i in range(5):
            print(".", end=" ")
            sleep(1)
    # end if else

    cv2.waitKey(0)  # hold windows open until user presses a key
    cv2.destroAllWindows()
    return
コード例 #5
0
import numpy as np
import cv2

cap=cv2.VideoCapture('vtest.avi')

while(cap.isOpened()):
	ret, frame=cap.read()
	
	gray= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	
	cv2.imshow('frame', gray)
	if cv2.waitKey(1) & 0xFF == ord('q):
		break

cap.release()
cv2.destroAllWindows()