def predictImages(self, imagePathArg, pred_stagesArg, croppedImagepath, numPlateOrg): # create a session to perform inference with numPlateOrg.model.as_default(): with tf.Session(graph=numPlateOrg.model) as sess: # create a predicter, used to predict plates and chars predicter = Predicter(numPlateOrg.model, sess, numPlateOrg.categoryIdx) # load the image from disk # print("[INFO] Loading image \"{}\"".format(imagePaths)) image = cv2.imread(imagePathArg) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # image = ImageEnhance.Sharpness(imagePathArg) # If prediction stages == 2, then perform prediction on full image, find the plates, crop the plates from the image, # and then perform prediction on the plate images if pred_stagesArg == 2: # Perform inference on the full image, and then select only the plate boxes boxes, scores, labels = predicter.predictPlates(image, preprocess=False) licensePlateFound_pred, plateBoxes_pred, plateScores_pred = self.plateFinder.findPlatesOnly( boxes, scores, labels) imageLabelled = self.getBoundingBox(image, plateBoxes_pred, imagePathArg, croppedImagepath) else: print("[ERROR] --pred_stages {}. The number of prediction stages must be either 1 or 2".format( pred_stagesArg)) quit() return imageLabelled
categoryIdx = label_map_util.create_category_index(categories) # create a plateFinder plateFinder = PlateFinder(args["min_confidence"], categoryIdx, rejectPlates=False, charIOUMax=0.3) # create plate displayer plateDisplay = PlateDisplay() # create a session to perform inference with model.as_default(): with tf.Session(graph=model) as sess: # create a predicter, used to predict plates and chars predicter = Predicter(model, sess, categoryIdx) imagePaths = paths.list_images(args["imagePath"]) frameCnt = 0 start_time = time.time() # Loop over all the images for imagePath in imagePaths: frameCnt += 1 # load the image from disk print("[INFO] Loading image \"{}\"".format(imagePath)) image = cv2.imread(imagePath) (H, W) = image.shape[:2] # If prediction stages == 2, then perform prediction on full image, find the plates, crop the plates from the image, # and then perform prediction on the plate images
def predictImages(modelArg, labelsArg, imagePathArg, num_classesArg, min_confidenceArg, image_displayArg, pred_stagesArg): # initialize the model model = tf.Graph() # create a context manager that makes this model the default one for # execution with model.as_default(): # initialize the graph definition graphDef = tf.GraphDef() # load the graph from disk with tf.gfile.GFile(modelArg, "rb") as f: serializedGraph = f.read() graphDef.ParseFromString(serializedGraph) tf.import_graph_def(graphDef, name="") # load the class labels from disk labelMap = label_map_util.load_labelmap(labelsArg) categories = label_map_util.convert_label_map_to_categories( labelMap, max_num_classes=num_classesArg, use_display_name=True) categoryIdx = label_map_util.create_category_index(categories) # create a plateFinder plateFinder = PlateFinder(min_confidenceArg, categoryIdx, rejectPlates=False, charIOUMax=0.3) # create plate displayer plateDisplay = PlateDisplay() # create a session to perform inference with model.as_default(): with tf.Session(graph=model) as sess: # create a predicter, used to predict plates and chars predicter = Predicter(model, sess, categoryIdx) imagePaths = paths.list_images(imagePathArg) frameCnt = 0 start_time = time.time() # Loop over all the images for imagePath in imagePaths: frameCnt += 1 # load the image from disk print("[INFO] Loading image \"{}\"".format(imagePath)) image = cv2.imread(imagePath) (H, W) = image.shape[:2] # If prediction stages == 2, then perform prediction on full image, find the plates, crop the plates from the image, # and then perform prediction on the plate images if pred_stagesArg == 2: # Perform inference on the full image, and then select only the plate boxes boxes, scores, labels = predicter.predictPlates( image, preprocess=True) licensePlateFound_pred, plateBoxes_pred, plateScores_pred = plateFinder.findPlatesOnly( boxes, scores, labels) # loop over the plate boxes, find the chars inside the plate boxes, # and then scrub the chars with 'processPlates', resulting in a list of final plateBoxes, char texts, char boxes, char scores and complete plate scores plates = [] for plateBox in plateBoxes_pred: boxes, scores, labels = predicter.predictChars( image, plateBox) chars = plateFinder.findCharsOnly( boxes, scores, labels, plateBox, image.shape[0], image.shape[1]) if len(chars) > 0: plates.append(chars) else: plates.append(None) plateBoxes_pred, charTexts_pred, charBoxes_pred, charScores_pred, plateAverageScores_pred = plateFinder.processPlates( plates, plateBoxes_pred, plateScores_pred) # If prediction stages == 1, then predict the plates and characters in one pass elif pred_stagesArg == 1: # Perform inference on the full image, and then find the plate text associated with each plate boxes, scores, labels = predicter.predictPlates( image, preprocess=False) licensePlateFound_pred, plateBoxes_pred, charTexts_pred, charBoxes_pred, charScores_pred, plateScores_pred = plateFinder.findPlates( boxes, scores, labels) else: print( "[ERROR] --pred_stages {}. The number of prediction stages must be either 1 or 2" .format(pred_stagesArg)) quit() # Print plate text for charText in charTexts_pred: print(" Found: ", charText) # Display the full image with predicted plates and chars if image_displayArg == True: imageLabelled = plateDisplay.labelImage( image, plateBoxes_pred, charBoxes_pred, charTexts_pred) cv2.imshow("Labelled Image", imageLabelled) cv2.waitKey(0) # print some performance statistics curTime = time.time() processingTime = curTime - start_time fps = frameCnt / processingTime print( "[INFO] Processed {} frames in {:.2f} seconds. Frame rate: {:.2f} Hz" .format(frameCnt, processingTime, fps)) platesReturn = [] for i, plateBox in enumerate(plateBoxes_pred): #platesReturn[i] = { 'plateBoxLoc': plateBox, 'plateText': charTexts_pred[i], 'charBoxLocs': charBoxes_pred[i]} platesReturn.append({ 'plateText': charTexts_pred[i], 'plateBoxLoc': list(plateBox), 'charBoxLocs': list([list(x) for x in charBoxes_pred[i]]) }) #results = results.encode('utf-8') return {"numPlates": len(platesReturn), "plates": platesReturn}
conf["output_image_path"], conf["output_video_path"]) # create a session to perform inference with model.as_default(): with tf.Session(graph=model) as sess: # initialize the points to the video files stream = cv2.VideoCapture(videoPath) videoWriter = None # Prepare findFrameWithPlate for a new video sequence plateLogFlag = False firstPlateFound = False # create a predicter, used to predict plates and chars predicter = Predicter(model, sess, categoryIdx) # loop over frames from the video file stream while True: # grab the next frame #(grabbed, image) = stream.read() # read the next frame from the video stream grabbed = stream.grab() # grab frame but do not decode # We have reached the end of the video clip. Save any residual plates to log # Remove all the plate history if not grabbed: if platesReadyForLog == True: plateDictBest = plateHistory.selectTheBestPlates() # generate output files, ie cropped Images, full image and log file plateHistory.writeToFile(plateDictBest,