def get_predictions(self, raw_strokes, n): strokes = self.preprocess_strokes(raw_strokes) img = self.draw_cv2(strokes, time_color=True) x = np.zeros((1, self.SIZE, self.SIZE, 1)) x[0, :, :, 0] = img x = preprocess_input(x).astype(np.float32) y_pred = self.model.predict(x)[0] top_n = np.argsort(-y_pred)[:n] return [{ 'id': i, 'label': self.labels[x], 'probability': float(y_pred[x]) } for i, x in enumerate(top_n)]
def img_to_bin(image): image_grey = image.convert('L') image_arr = np.array(image_grey, 'uint8') faces = ImageUtility.FACE_CASCADE.detectMultiScale( image_arr, scaleFactor=ImageUtility.SCALE_FACTOR, minNeighbors=5) roi = [] for (x, y, w, h) in faces: roi.append(np.array(image, 'uint8')[y:y + h, x:x + w]) if len(roi) == 1: roi = ImageUtility.resize_roi(image, x, y, w, h) roi = preprocess_input(roi) return Binary(pickle.dumps(roi, protocol=2), subtype=128) if len(roi) == 0: return None
def detect_mask_in_frame(frame): frame = imutils.resize(frame, width=500) # convert an image from one color space to another # (to grayscale) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_detector.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40), flags=cv2.CASCADE_SCALE_IMAGE, ) faces_dict = {"faces_list": [], "faces_rect": []} for rect in faces: (x, y, w, h) = rect face_frame = frame[y:y + h, x:x + w] # preprocess image face_frame_prepared = preprocess_face_frame(face_frame) faces_dict["faces_list"].append(face_frame_prepared) faces_dict["faces_rect"].append(rect) if faces_dict["faces_list"]: faces_preprocessed = preprocess_input( np.array(faces_dict["faces_list"])) preds = model.predict(faces_preprocessed) for i, pred in enumerate(preds): mask_or_not, confidence = decode_prediction(pred) write_bb(mask_or_not, confidence, faces_dict["faces_rect"][i], frame) return frame
# loop in order to append all new mask image values for image in os.listdir(new_with_dir): # every 10 increments, update the bar if counter % 10 == 0: progress_bar.bar_method(i + 1, 1000, prefix='Loading Faces... ', suffix='Complete', length=50, time=float(total_time)) i += 1 image_main = load_img(new_with_dir + image) # load the specified image from the directory image_main = img_to_array(image_main) # convert the image to a numpy array image_main = preprocess_input( image_main) # pre-process the input based on the MobileNetV2 model mask_set.append( (image_main, 'mask')) # append the list of image value and label to the data list counter += 1 # increment counter by 1 end = time.time() # end time total_time = float( end - start) # total_time now equals the end value minus beginning value # time, counter, i = 0 total_time = 0.0 counter = 0 i = 0 start = time.time() # start the timer
# -*- coding: utf-8 -*- """ Created on Sat Apr 6 15:51:57 2019 @author: tak """ from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2 from tensorflow.python.keras.applications.mobilenet_v2 import decode_predictions from tensorflow.python.keras.applications.mobilenet_v2 import preprocess_input from tensorflow.python.keras.preprocessing.image import load_img from tensorflow.python.keras.preprocessing.image import img_to_array import numpy as np model = MobileNetV2() model.summary() model.save('mobilenetv2.h5') img_dog = load_img('dog.jpg', target_size=(224, 224)) arr_dog = preprocess_input(img_to_array(img_dog)) arr_input = np.stack([arr_dog, ]) probs = model.predict(arr_input) results = decode_predictions(probs) print(results[0])
# grab the list of images in our dataset directory, then initialize # the list of data (i.e., images) and class images print("[INFO] loading images...") imagePaths = list(paths.list_images(args["dataset"])) data = [] labels = [] # loop over the image paths for imagePath in imagePaths: # extract the class label from the filename label = imagePath.split(os.path.sep)[-2] # load the input image (224x224) and preprocess it image = load_img(imagePath, target_size=(224, 224)) image = img_to_array(image) image = preprocess_input(image) # update the data and labels lists, respectively data.append(image) labels.append(label) # convert the data and labels to NumPy arrays data = np.array(data, dtype="float32") labels = np.array(labels) # perform one-hot encoding on the labels lb = LabelBinarizer() labels = lb.fit_transform(labels) labels = to_categorical(labels) # partition the data into training and testing splits using 75% of
def predict_mask(image, model): # set the h and w variables to the images height and width -> i.e. (224, 224) (h, w) = image.shape[:2] # construct a blob from the image using the cv2 pre-trained deep neural network # blob is used to find the ROIs in the image blob = cv2.dnn.blobFromImage(image, size=(224, 224)) # pass the constructed blob into the face_net pre-trained model # this model is used to recognize the faces within an image detection_net.setInput(blob) # get the predicted faces as an output from the model preds = detection_net.forward() # for all the faces that it sees in the image for i in range(preds.shape[2]): # if the confidence of the face it sees is above .6, continue # in other words, if it thinks that the chance that it is a face is .6 or more, then proceed if preds[0, 0, i, 2] > .6: # get the specified pixel locations for each face and multiply it by h and w from before # this will output the starting locations of the box # this is the ROI (region of interest) of the image, or the face we want to look for box = preds[0, 0, i, 3:7] * np.array([w, h, w, h]) # convert the box into int, currently its in float (start_x, start_y, end_x, end_y) = box.astype("int") # set the starting x and y, and ending x and y coordinates for the box (start_x, start_y) = (max(0, start_x), max(0, start_y)) (end_x, end_y) = (min(w - 1, end_x), min(h - 1, end_y)) # get those specified pixels from the first numpy array of the image and set it equal to face face = image[start_y:end_y, start_x:end_x] # change color of the image, resize it, and pre-process it in order to pass it into the model face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (224, 224)) face = img_to_array(face) face = preprocess_input(face) # expand the dimensions of the image -> i.e. (1, 224, 224, 3) face = np.expand_dims(face, axis=0) # predict the mask_prob and no_mask_prob of the image by passing it into the model mask_prob, no_mask_prob = model.predict(face)[0] # if the mask_prob is greater than the no_mask_prob then say that there is a mask # else vice versa # set color to green (good) # color is in BGR and not RGB, so (B, G, R) if mask_prob > no_mask_prob: label = "Mask" color = (0, 255, 0) else: label = "No Mask" color = (0, 0, 255) # the resulting label to be displayed on top of the bounding box result = f'{label}: {round((max(mask_prob, no_mask_prob) * 100), 2)}%' # place the text on top of the box at specified coordinates and construct a rectangle # this rectangle shows the ROI cv2.putText(image, result, (start_x, start_y - 10), cv2.FONT_HERSHEY_DUPLEX, .7, color, 2) cv2.rectangle(image, (start_x, start_y), (end_x, end_y), color, 6)
def write(): st.title(' Face Mask Detector') net = load_face_detector_and_model() model = load_cnn_model() selected_option = st.radio("Choose", ('File', 'Webcam')) if selected_option == 'File': #uploaded_image = st.sidebar.file_uploader("Choose a JPG file", type="jpg") uploaded_image = st.sidebar.file_uploader("Choose a JPG file", type=FILE_TYPES) confidence_value = st.sidebar.slider('Confidence:', 0.0, 1.0, 0.5, 0.1) if uploaded_image: image1 = Image.open(uploaded_image) st.sidebar.image(image1, caption='Uploaded Image.', use_column_width=True) #st.sidebar.info('Uploaded image:') #st.sidebar.image(uploaded_image, width=240) #f = open(uploaded_image, 'rb') #file = st.file_uploader("Upload file", type=FILE_TYPES) show_file = st.empty() if not uploaded_image: show_file.info("Please upload a file of type: " + ", ".join(FILE_TYPES)) return file_type = get_file_type(uploaded_image) if file_type == FileType.IMAGE: show_file.image(image1) elif file_type == FileType.PYTHON: st.code(uploaded_image.getvalue()) else: data = pd.read_csv(uploaded_image) st.dataframe(data.head(10)) f = open(get_file_name(uploaded_image), 'rb') img_bytes = f.read() f.close() grad_cam_button = st.sidebar.button('Grad CAM') patch_size_value = st.sidebar.slider('Patch size:', 10, 90, 20, 10) occlusion_sensitivity_button = st.sidebar.button( 'Occlusion Sensitivity') image = cv2.imdecode(np.fromstring(img_bytes, np.uint8), 1) #image = cv2.imdecode(np.fromstring(uploaded_image.read(), np.uint8), 1) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) orig = image.copy() (h, w) = image.shape[:2] blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0)) net.setInput(blob) detections = net.forward() for i in range(0, detections.shape[2]): confidence = detections[0, 0, i, 2] if confidence > confidence_value: box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") (startX, startY) = (max(0, startX), max(0, startY)) (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) face = image[startY:endY, startX:endX] face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (224, 224)) face = img_to_array(face) face = preprocess_input(face) expanded_face = np.expand_dims(face, axis=0) (mask, withoutMask) = model.predict(expanded_face)[0] predicted_class = 0 label = "No Mask" if mask > withoutMask: label = "Mask" predicted_class = 1 color = (0, 255, 0) if label == "Mask" else (0, 0, 255) label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) cv2.putText(image, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) cv2.rectangle(image, (startX, startY), (endX, endY), color, 2) st.image(image, width=640) st.write('### ' + label) if grad_cam_button: data = ([face], None) explainer = GradCAM() grad_cam_grid = explainer.explain(data, model, class_index=predicted_class, layer_name="Conv_1") st.image(grad_cam_grid) if occlusion_sensitivity_button: data = ([face], None) explainer = OcclusionSensitivity() sensitivity_occlusion_grid = explainer.explain( data, model, predicted_class, patch_size_value) st.image(sensitivity_occlusion_grid) # PROGRAM FOR WEB CAM if selected_option == 'Webcam': labels_dict = {0: 'without_mask', 1: 'with_mask'} color_dict = {0: (0, 0, 255), 1: (0, 255, 0)} size = 4 webcam = cv2.VideoCapture(0) #Use camera 0 st.write("Webcam On") stframe_cam = st.empty() # We load the xml file classifier = cv2.CascadeClassifier( 'src/pages/Services/frecog/haarcascade_frontalface_default.xml') while True: (rval, im) = webcam.read() stframe_cam.image(im) # st.write("Webcam Read") #if im: #ret, framecar = vf.read() im = cv2.flip(im, 1, 1) #Flip to act as a mirror # Resize the image to speed up detection mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size)) # detect MultiScale / faces faces = classifier.detectMultiScale(mini) # Draw rectangles around each face for f in faces: (x, y, w, h) = [v * size for v in f] #Scale the shapesize backup #Save just the rectangle faces in SubRecFaces face_img = im[y:y + h, x:x + w] resized = cv2.resize(face_img, (150, 150)) normalized = resized / 255.0 reshaped = np.reshape(normalized, (1, 150, 150, 3)) reshaped = np.vstack([reshaped]) result = model.predict(reshaped) #print(result) label = np.argmax(result, axis=1)[0] cv2.rectangle(im, (x, y), (x + w, y + h), color_dict[label], 2) cv2.rectangle(im, (x, y - 40), (x + w, y), color_dict[label], -1) cv2.putText(im, labels_dict[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2) # Show the image stframe_cam.image('LIVE', im) #cv2.imshow('LIVE', im) key = cv2.waitKey(10) # if Esc key is press then break out of the loop if key == 27: #The Esc key break # Stop video webcam.release() # Close all started windows cv2.destroyAllWindows() #write() #uploaded_image.close()