Пример #1
0
def perform_maskDetection(frame, outs, conf_threshold, nms_threshold):
    frame_height = frame.shape[0]
    frame_width = frame.shape[1]

    # Scan through all the bounding boxes output from the network and keep only
    # the ones with high confidence scores. Assign the box's class label as the
    # class with the highest score.
    confidences = []
    boxes = []
    final_boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > conf_threshold:
                center_x = int(detection[0] * frame_width)
                center_y = int(detection[1] * frame_height)
                width = int(detection[2] * frame_width)
                height = int(detection[3] * frame_height)
                left = int(center_x - width / 2)
                top = int(center_y - height / 2)
                confidences.append(float(confidence))
                boxes.append([left, top, width, height])

    # Perform non maximum suppression to eliminate redundant
    # overlapping boxes with lower confidences.
    indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,
                               nms_threshold)
    faces = []
    locs = []

    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        final_boxes.append(box)

        # detected face
        (startX, startY, endX, endY) = refined_box(left, top, width, height)

        # extract the face ROI, convert it from BGR to RGB channel
        # ordering, resize it to 224x224, and preprocess it
        try:
            face = frame[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)
            face = np.expand_dims(face, axis=0)
        except:
            pass

        # add the face and bounding boxes to their respective
        # lists
        faces.append(face)
        locs.append((startX, startY, endX, endY))
    return (faces, locs)
def mask_webcam():
    # facenet : find face model
    facenet = cv2.dnn.readNet(
        'models/deploy.prototxt',
        'models/res10_300x300_ssd_iter_140000.caffemodel')
    # model : model of detection mask
    model = load_model('models/realmodel.h5')

    # Reading webcam (노트북 기본 내장)
    cap = cv2.VideoCapture(0)

    # Reading webcam (외장 - 로지텍 허브 사용)
    #cap = cv2.VideoCapture(0)
    i = 0

    #url="https://192.168.43.1:8080" //ipWebcam url
    #cap=cv2.VideoCapture(url+"/video")

    while cap.isOpened():
        ret, img = cap.read()
        if not ret:
            break

        # 이미지의 높이와 너비 추출
        h, w = img.shape[:2]

        # 이미지 전처리
        # ref. https://www.pyimagesearch.com/2017/11/06/deep-learning-opencvs-blobfromimage-works/
        blob = cv2.dnn.blobFromImage(img,
                                     scalefactor=1.,
                                     size=(224, 224),
                                     mean=(104., 177., 123.))

        # facenet의 input으로 blob을 설정
        facenet.setInput(blob)
        # facenet 결과 추론, 얼굴 추출 결과가 dets의 저장
        dets = facenet.forward()

        # 한 프레임 내의 여러 얼굴들을 받음
        result_img = img.copy()

        # 마스크를 찾용했는지 확인
        for i in range(dets.shape[2]):

            # 검출한 결과가 신뢰도
            confidence = dets[0, 0, i, 2]
            # 신뢰도를 0.5로 임계치 지정
            if confidence < 0.5:
                continue

            # 바운딩 박스를 구함
            x1 = int(dets[0, 0, i, 3] * w)
            y1 = int(dets[0, 0, i, 4] * h)
            x2 = int(dets[0, 0, i, 5] * w)
            y2 = int(dets[0, 0, i, 6] * h)

            # 원본 이미지에서 얼굴영역 추출
            face = img[y1:y2, x1:x2]

            # 추출한 얼굴영역을 전처리

            while True:
                try:
                    face_input = cv2.resize(face, dsize=(224, 224))
                    face_input = cv2.cvtColor(face_input, cv2.COLOR_BGR2RGB)
                    face_input = preprocess_input(face_input)
                    face_input = np.expand_dims(face_input, axis=0)
                    break
                except:
                    print("resize error")
                    break

            # 마스크 검출 모델로 결과값 return
            (hmask, mask, nomask) = model.predict(face_input).squeeze()

            # 마스크를 꼈는지 안겼는지에 따라 라벨링해줌
            if mask > nomask and mask > hmask:
                color = (0, 255, 0)
                label = 'Mask %d%%' % (mask * 100)

            elif hmask > mask and hmask > nomask:
                color = (255, 0, 0)
                label = 'Haf Mask %d%%' % (hmask * 100)

            elif nomask > hmask:
                color = (0, 0, 255)
                label = 'No Mask %d%%' % (nomask * 100)

            # 화면에 얼굴부분과 마스크 유무를 출력해줌
            cv2.rectangle(result_img,
                          pt1=(x1, y1),
                          pt2=(x2, y2),
                          thickness=2,
                          color=color,
                          lineType=cv2.LINE_AA)
            cv2.putText(result_img,
                        text=label,
                        org=(x1, y1 - 10),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.8,
                        color=color,
                        thickness=2,
                        lineType=cv2.LINE_AA)

        cv2.imshow('Mask Detection', result_img)

        # q를 누르면 종료
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
def predict_model(img_path):
        
    label=''   
    
    image_name=img_path[img_path.rindex('\\')+1:]
    #print("[INFO] ",image_name)
    
    # PATH="C:\\Users\\Gerosh Shibu George\\Desktop\\Face Mask Detection\\model"
    PATH=os.path.join(os.getcwd(),"model")
    
    #print("[INFO] ",PATH)
    
    print("[INFO] loading face detector model...")
    prototxtPath = os.path.join(PATH, "deploy.prototxt")
    weightsPath = os.path.join(PATH,
	"res10_300x300_ssd_iter_140000.caffemodel")
    net = cv2.dnn.readNet(prototxtPath, weightsPath)
    
    print("[INFO] loading face mask detector model...")
    model = load_model(os.path.join(PATH,'model1'))   
    

    # load the input image from disk, clone it, and grab the image spatial
    # dimensions
    image = cv2.imread(img_path)
    orig = image.copy()
    (h, w) = image.shape[:2]
    # construct a blob from the image
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
	(104.0, 177.0, 123.0))
    # pass the blob through the network and obtain the face detections
    print("[INFO] computing face detections...")
    net.setInput(blob)
    detections = net.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence > 0.6:
            
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")  
		
            # ensure the bounding boxes fall within the dimensions of
            # the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))  
  	
            # extract the face ROI, convert it from BGR to RGB channel
            # ordering, resize it to 128x128, and preprocess it
            face = image[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (128, 128))
            face = img_to_array(face)
            face = preprocess_input(face)
            face = np.expand_dims(face, axis=0)      
		
            # pass the face through the model to determine if the face
		    # has a mask or not 
      
            (without_Mask,with_Mask) = model.predict(face)[0]
    
            # determine the class label and color we'll use to draw
            # the bounding box and text
            label = "Mask" if with_Mask > without_Mask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)  
		
            # include the probability in the label
            label = "{}: {:.2f}%".format(label, max(with_Mask, without_Mask) * 100)
		
            # display the label and bounding box rectangle on the output
		    # frame
            cv2.putText(image, label, (startX + 5, endY + 20),
			    cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(image, (startX, startY),(endX, endY), color, 2)
    
    # show the output image
    print("[INFO] Saving resultant image:")
    
    #save_path="C:\\Users\\Gerosh Shibu George\\Desktop\\Face Mask Detection\\webapp\\static\\"+ image_name
    image_name=image_name[:image_name.index('.')]+"_predicted"+image_name[image_name.index('.'):]
    save_path=os.path.join(os.getcwd(),"webapp\\static\\"+image_name)
    print("[INFO] ",save_path)
    
    result={
        'label' :'',
        'filename' :'',
    }
    
    if(label!=''):
        
       if(cv2.imwrite(save_path,image)):
             result={
            'label':label,
            'filename':image_name,
        }    
    
    return result
    
    
# img_path=os.path.join(os.getcwd(),'uploads')
# img_path=os.path.join(img_path,'example1.jpg')
# print(img_path)
# print(predict_model(img_path))
Пример #4
0
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', help="SavedModel Directory")
args = parser.parse_args()

model_path = args.model_dir

##
batch_size = 8
batched_input = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)

print('Load Images')
for i in range(batch_size):
    img = read_img('./data/img%d.JPG' % (i % 4))
    x = np.expand_dims(img, axis=0)
    x = preprocess_input(x)
    batched_input[i, :] = x
batched_input = tf.constant(batched_input)
print('batched_input shape: ', batched_input.shape)

##
print('Load Model')
saved_model_loaded = tf.saved_model.load(model_path,
                                         tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
print(signature_keys)

infer = saved_model_loaded.signatures['serving_default']
print(infer.structured_outputs)

print('Pillow')
Пример #5
0
    def detect_and_predict_mask(frame, faceNet, maskNet):
        # grab the dimensions of the frame and then construct a blob
        # from it
        (h, w) = frame.shape[:2]
        blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),
                                     (104.0, 177.0, 123.0))

        # pass the blob through the network and obtain the face detections
        faceNet.setInput(blob)
        detections = faceNet.forward()
        print(detections.shape)

        # initialize our list of faces, their corresponding locations,
        # and the list of predictions from our face mask network
        faces = []
        locs = []
        preds = []

        # loop over the detections
        for i in range(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the detection
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the confidence is
            # greater than the minimum confidence
            if confidence > 0.5:
                # compute the (x, y)-coordinates of the bounding box for
                # the object
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # ensure the bounding boxes fall within the dimensions of
                # the frame
                (startX, startY) = (max(0, startX), max(0, startY))
                (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                # extract the face ROI, convert it from BGR to RGB channel
                # ordering, resize it to 224x224, and preprocess it
                face = frame[startY:endY, startX:endX]
                face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                face = cv2.resize(face, (224, 224))
                face = img_to_array(face)
                face = preprocess_input(face)

                # add the face and bounding boxes to their respective
                # lists
                faces.append(face)
                locs.append((startX, startY, endX, endY))

        # only make a predictions if at least one face was detected
        if len(faces) > 0:
            # for faster inference we'll make batch predictions on *all*
            # faces at the same time rather than one-by-one predictions
            # in the above `for` loop
            faces = np.array(faces, dtype="float32")
            preds = maskNet.predict(faces, batch_size=32)

        # return a 2-tuple of the face locations and their corresponding
        # locations
        return (locs, preds)
learning_rate = 1e-4
epochs = 20
batch_size = 32

print("loading images")
path = list(paths.list_images("Dataset"))
data = []
label = []

for imagePath in path:
    # extract class label from filename
    classLabel = imagePath.split(os.path.sep)[-2]
    # load image and preprocess it
    img = load_img(imagePath, target_size=(224, 224))
    img = img_to_array(img)
    img = preprocess_input(img)
    # update data and label list
    data.append(img)
    label.append(classLabel)
# convert data and label list to np array
data = np.array(data, dtype="float32")
label = np.array(label)

# perform encoding on label array
lb = LabelBinarizer()
label = lb.fit_transform(label)
label = to_categorical(label)

# partition dataset into train test set
(x_train, x_test, y_train, y_test) = train_test_split(data,
                                                      label,
Пример #7
0
    def encode(self, input_image):
        """
        :param input_image: (batch, height, width, channel)
        """
        input_shape = input_image.get_shape()
        height = input_shape[1]
        net_name = self.net_name
        weights = "imagenet" if self.pretrained_weight else None

        jsonfile = op.join(opts.PROJECT_ROOT, "model", "build_model",
                           "scaled_layers.json")
        output_layers = self.read_output_layers(jsonfile)
        out_layer_names = output_layers[net_name]

        if net_name == "MobileNetV2":
            from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
            pproc_img = layers.Lambda(lambda x: preprocess_input(x),
                                      name="preprocess_mobilenet")(input_image)
            ptmodel = tfapp.MobileNetV2(input_shape=input_shape,
                                        include_top=False,
                                        weights=weights)

        elif net_name == "NASNetMobile":
            from tensorflow.keras.applications.nasnet import preprocess_input
            assert height == 128

            def preprocess_layer(x):
                x = preprocess_input(x)
                x = tf.image.resize(x,
                                    size=NASNET_SHAPE[:2],
                                    method="bilinear")
                return x

            pproc_img = layers.Lambda(lambda x: preprocess_layer(x),
                                      name="preprocess_nasnet")(input_image)
            ptmodel = tfapp.NASNetMobile(input_shape=NASNET_SHAPE,
                                         include_top=False,
                                         weights=weights)

        elif net_name == "DenseNet121":
            from tensorflow.keras.applications.densenet import preprocess_input
            pproc_img = layers.Lambda(lambda x: preprocess_input(x),
                                      name="preprocess_densenet")(input_image)
            ptmodel = tfapp.DenseNet121(input_shape=input_shape,
                                        include_top=False,
                                        weights=weights)

        elif net_name == "VGG16":
            from tensorflow.keras.applications.vgg16 import preprocess_input
            pproc_img = layers.Lambda(lambda x: preprocess_input(x),
                                      name="preprocess_vgg16")(input_image)
            ptmodel = tfapp.VGG16(input_shape=input_shape,
                                  include_top=False,
                                  weights=weights)

        elif net_name == "Xception":
            from tensorflow.keras.applications.xception import preprocess_input
            assert height == 128

            def preprocess_layer(x):
                x = preprocess_input(x)
                x = tf.image.resize(x,
                                    size=XCEPTION_SHAPE[:2],
                                    method="bilinear")
                return x

            pproc_img = layers.Lambda(lambda x: preprocess_layer(x),
                                      name="preprocess_xception")(input_image)
            ptmodel = tfapp.Xception(input_shape=XCEPTION_SHAPE,
                                     include_top=False,
                                     weights=weights)

        elif net_name == "ResNet50V2":
            from tensorflow.keras.applications.resnet import preprocess_input
            pproc_img = layers.Lambda(lambda x: preprocess_input(x),
                                      name="preprocess_resnet")(input_image)
            ptmodel = tfapp.ResNet50V2(input_shape=input_shape,
                                       include_top=False,
                                       weights=weights)

        elif net_name == "NASNetLarge":
            from tensorflow.keras.applications.nasnet import preprocess_input
            assert height == 128

            def preprocess_layer(x):
                x = preprocess_input(x)
                x = tf.image.resize(x,
                                    size=NASNET_SHAPE[:2],
                                    method="bilinear")
                return x

            pproc_img = layers.Lambda(lambda x: preprocess_layer(x),
                                      name="preprocess_nasnet")(input_image)
            ptmodel = tfapp.NASNetLarge(input_shape=NASNET_SHAPE,
                                        include_top=False,
                                        weights=weights)
        else:
            raise WrongInputException("Wrong pretrained model name: " +
                                      net_name)

        # collect multi scale convolutional features
        layer_outs = []
        for layer_name in out_layer_names:
            layer = ptmodel.get_layer(name=layer_name[1], index=layer_name[0])
            # print("extract feature layers:", layer.name, layer.get_input_shape_at(0), layer.get_output_shape_at(0))
            layer_outs.append(layer.output)

        # create model with multi scale features
        multi_scale_model = tf.keras.Model(ptmodel.input,
                                           layer_outs,
                                           name=net_name + "_base")
        features_ms = multi_scale_model(pproc_img)
        return features_ms
Пример #8
0
def detect_mask_image(image):
    label = '_'
    image = cv2.imdecode(np.fromstring(image.read(), np.uint8),
                         1)  #read the image from temporary memory
    image = cv2.cvtColor(image,
                         cv2.COLOR_BGR2RGB)  # convert image from BGR to RGB
    orig = image.copy()  # get a copy of the image
    (h, w) = image.shape[:2]  # get image height and weight
    blob = cv2.dnn.blobFromImage(
        image,
        1.0,
        (300, 300),  # construct a blob from the image
        (104.0, 177.0, 123.0))
    net.setInput(
        blob
    )  # pass the blob through the detection, get region that differ in propertes, and the face region
    detection = net.forward()

    for i in range(0, detection.shape[2]):  # loop through the detection

        confidence = detection[0, 0, i, 2]  # extract confidence vaalue

        if confidence > 0.50:  # if the confidence is greater than the selected confidence from the side bar

            box = detection[0, 0, i, 3:7] * np.array(
                [w, h, w, h])  # get x and y coordinate for the bounding box
            (startX, startY, endX, endY) = box.astype("int")

            (startX, startY) = (max(0, startX), max(0, startY))
            (endX,
             endY) = (min(w - 1, endX), min(h - 1, endY)
                      )  # ensure bounding box does not exceed image frame

            face = image[startY:endY, startX:endX]
            face = cv2.cvtColor(
                face,
                cv2.COLOR_BGR2RGB)  # extract face ROI, convert from BGR to RGB
            face = cv2.resize(face, (224, 224))  # resize to 224, 224
            face = img_to_array(face)  # convert resized face to an array
            face = preprocess_input(face)  # preprocess the array
            face = np.expand_dims(face, axis=0)  # expand array to 2D

            (mask, withoutMask) = model.predict(
                face
            )[0]  # pass the face through the mask model, detect if there is mask or not

            label = "Mask on" if mask > withoutMask else "No Mask"  # define label

            color = (0, 255, 0) if label == "Mask on" else (
                255, 0, 0)  # bbox is green if 'mask' else red

            label = "{}: {:.2f}%".format(label,
                                         max(mask, withoutMask) *
                                         100)  # add label probability

            cv2.putText(image, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.20, color, 2)
            cv2.rectangle(image, (startX, startY), (endX, endY), color,
                          2)  #display label and bbox rectangle in output frame

        return image, label  # return image and label
Пример #9
0
def DetectImage(imgpath):
    print("[INFO] loading face detector model...")
    prototxtPath = os.path.join("face_detector/deploy.prototxt")
    weightsPath = os.path.join(
        "face_detector/res10_300x300_ssd_iter_140000.caffemodel")
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

    print("[INFO] loading face mask detector model...")
    model = load_model('mask_model')
    #print(model.summary())

    frame = cv2.imread(imgpath)
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    faceNet.setInput(blob)
    detections = faceNet.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence < 0.5:
            pass
        elif confidence > 0.5:
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (x1, y1, x2, y2) = box.astype("int")
            #define face
            face = frame[y1:y2, x1:x2]
            #print(face.shape)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face) / 255.
            face = preprocess_input(face)
            face = np.expand_dims(face, axis=0)
            pred = model.predict(face)
            #print(pred)
            for z in pred:
                mask, without_mask = z
            label = "Mask" if mask > without_mask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            label = "{}: {:.2f}%".format(label, max(mask, without_mask) * 100)
            # include the probability in the label
            cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.45, color, 2)
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            print(label)
    while True:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        #if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
Пример #10
0
def main(ctx, msg):

    #load image from kps datasource
    unpacked_dict = msgpack.unpackb(msg, raw=False, max_bin_len=3145728)
    raw_image = np.fromstring(unpacked_dict["Data"], dtype=unpacked_dict["DataType"])
    raw_image = raw_image.reshape((unpacked_dict["Height"], unpacked_dict["Width"], unpacked_dict["Channels"]))
    image = cv2.cvtColor(raw_image, cv2.COLOR_RGB2BGR)
    logging.info('Received image, starting model detection')

    # prepare image for detection
    (h, w) = image.shape[:2]
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
    facenet.setInput(blob)
    detections = facenet.forward()

    faces = []
    face_locations = []
    faces_without_mask = 0
    faces_with_mask = 0

    for i in range(0, detections.shape[2]):
        image_confidence = detections[0, 0, i, 2]

        if image_confidence > confidence:
            # compute the (x, y)-coordinates of the bounding box for the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype('int')

            # ensure the bounding boxes fall within the dimensions of the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

            # extract the face ROI, convert it from BGR to RGB channel ordering, resize it to 224x224, and preprocess it
            face = image[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)

            # add the face and bounding boxes to their respective to result lists
            faces.append(face)
            face_locations.append((startX, startY, endX, endY))


    logging.info(f'Found [{len(faces)} faces in the image')
    # only make a predictions if at least one face was detected
    if len(faces) > 0:
        # for faster inference we'll make batch predictions on *all*
        # faces at the same time rather than one-by-one predictions
        # in the above `for` loop
        faces = np.array(faces, dtype="float32")
        mask_predictions = masknet.predict(faces, batch_size=32)

    # draw boxes around the faces
    for (box, pred) in zip(face_locations, mask_predictions):
        (startX, startY, endX, endY) = box
        (mask, withoutMask) = pred

        # determine the class label and color we'll use to draw
        # the bounding box and text
        if mask > withoutMask:
            label = f'Mask: {mask:.2f}'
            faces_with_mask += 1
            color = (0, 255, 0)
        else:
            label = f'No Mask: {withoutMask:.2f}'
            faces_without_mask += 1
            color = (0, 0, 255)

        cv2.putText(image, label, (startX, startY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
        cv2.rectangle(image, (startX, startY), (endX, endY), color, 4)
    logging.info(f'detected ({faces_with_mask}) faces with mask and ({faces_without_mask}) without mask ')
    _, img_encoded = cv2.imencode('.jpg', image)
    payload = {
        'values': [
            {'id': 'image_timestamp', 'name': 'Timestamp', 'value': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'icon': 'fa-hourglass-half'},
            {'id': 'mask_count', 'name': 'Masks', 'value': faces_with_mask, 'icon': 'fa-check-circle'},
            {'id': 'nomask_count', 'name': 'No masks', 'value': faces_without_mask, 'icon': 'fa-times-circle'}],
        'image': img_encoded.tobytes()
    }

    ctx.send(pickle.dumps(payload))
    return
Пример #11
0
# the list of data (i.e., images) and class images
print("[INFO] loading images...")

data = []  #ALL images !!!!!!
labels = []  # All The Labels

for category in CATEGORIES:
    path = os.path.join(DIRECTORY, category)  # Joining categories and path!!!
    for img in os.listdir(path):
        img_path = os.path.join(path, img)
        image = load_img(img_path, target_size=(
            224, 224
        ))  #load image is coming from keras model .. (called preprocessing)
        image = img_to_array(
            image)  #Image size becomes 224*224...... img to array ...
        image = preprocess_input(
            image)  #mobel nets in model we need to use preprocess input

        data.append(image)
        labels.append(category)

# perform one-hot encoding on the labels  So labels are categorical.. convert them tp one hot encoding..
lb = LabelBinarizer()  #sklearn preprocessing model
labels = lb.fit_transform(labels)
labels = to_categorical(labels)

# We convert data and labels to numpy array.....
data = np.array(data, dtype="float32")
labels = np.array(labels)

# train and test split...
(trainX, testX, trainY, testY) = train_test_split(data,
Пример #12
0
def pretrained_preprocess_input(img_arr: np.ndarray) -> np.ndarray:
    return preprocess_input(img_arr)
Пример #13
0
def detector(frame, networkFace, networkMask):
    # Mevcut fotoğraf karenin boyutlarını alalım.
    (h, w) = frame.shape[:2]

    # Kareyi sayısal olarak işlemek için bir BLOB'a (Binary Large OBject) dönüştürelim.
    blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))

    # BLOB'u, yüzleri tespit etmek için yüz tespit ağından geçirelim.
    networkFace.setInput(blob)
    detections = networkFace.forward()

    # Bulunan yüzleri konsola basalım.
    print(detections.shape)

    # Yüzler, yüzlerin bulunduğu konumlar ve face mask network'ten elde edeceğimiz veriler için listeleri tanımlayalı.
    faces = []
    locations = []
    predictions = []

    # faceNet kullanarak tespit ettiğimiz sonuçlar üzerinde döngü başlatalım.
    for i in range(0, detections.shape[2]):
        # Tespitlerden güvenilir bulduğumuz sonuçları ayıklayalım.
        confidence = detections[0, 0, i, 2]

        # Belirlediğimiz minimum güvenilirlik seviyesi altında kalan sonuçları eleyelim.
        if confidence > 0.5:
            # Tespit edilen bölgenin alanını (x, y) koordinatlarına dönüştürelim.
            location = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (x1, y1, x2, y2) = location.astype("int")

            # Tespit edilen bölgenin, fotoğraf karemizin sınırları dahilinde olduğundan emin olalım.
            (x1, y1) = (max(0, x1), max(0, y1))
            (x2, y2) = (min(w - 1, x2), min(h - 1, y2))

            # İlgilendiğimiz bölgedeki yüzü ayıklayalım.
            face = frame[y1:y2, x1:x2]

            # Ayıklanan yüzü, BGR renk formatından RGB renk formatına çevirelim.
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)

            # Fotoğrafı rahat işlemek için 224x224 boyutuna yeniden ölçeklendirelim.
            face = cv2.resize(face, (224, 224))

            # Yüzü işlemek için bir NumPY dizisine dönüştürelim.
            face = img_to_array(face)

            # Resmi işleyelim.
            face = preprocess_input(face)

            # Tespit ettiğimiz yüzü ve bölgeyi, ilgili listelerimize ekleyelim.
            faces.append(face)
            locations.append((x1, y1, x2, y2))

    # Maske tespit işelimimizi, karede en az bir yüz varsa gerçekleştirelim.
    if len(faces) > 0:
        # Operasyon hızını arttırmak için, yüzleri bir bir işlemektense, karede bulunan tüm yüzlerde toplu olarak
        # aynı anda maske bulmayı deneyeceğiz.
        faces = np.array(faces, dtype="float32")
        predictions = networkMask.predict(faces, batch_size=32)

    # Geriye, yüzlerin konumlarını ve bu konumlara ait tahminleri bir iki boyutlu bir vektör olarak döndürelim.
    return (locations, predictions)
Пример #14
0
# grab the list of images in our dataset directory, then initialize the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images(ds_path))
data = []
labels_origin = []

# label_candidates = ['nomask','mask_inc', 'mask_cor']
# loop over the image paths
for imagePath in imagePaths:
	# extract the class label from the filename
	label = imagePath.split(os.path.sep)[-2]

	# 다 keras 기반 함수들 load_img -> img_to_arrary -> preprocess_input 
	image = load_img(imagePath, target_size=(224, 224)) # resizing
	image = img_to_array(image) # numpy array로 변환 
	image = preprocess_input(image) # compatible 한 전처리를 위해 위의 load_img랑 세트 ,mobilnetv2만을 위한 preprocess class # 픽셀값들이 샘플별로 -1 ~ 1로 스케일링

	# update the data and labels lists, respectively
	data.append(image)
	labels_origin.append(label)
 
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels_origin = np.array(labels_origin)
print(labels_origin)

# perform one-hot encoding on the labels
lb = LabelEncoder()
labels = lb.fit_transform(labels_origin)
labels = to_categorical(labels)
Пример #15
0
        # print(f'***** face.shape={face.shape}')
        # faces.append(face) #얼굴들 저장

        # 얼굴만 잘 저장되었는지 확인하기.
        # for i, face in enumerate(faces):
        # plt.subplot(1,len(faces),i+1)
        # plt.imshow(face[:,:,::-1])

        # detect masks from faces
        # for i,face in enumerate(faces): #마스크를 썻나 안썻나 확인하는 for문(detect mask from faces) 71~79line까지 들어가야함.
        # 71~74까지는 전처리하는 코드
        try:
            face_input = cv2.resize(face, dsize=(224, 224)) #dsize=(224, 224):이미지 크기 변형.
            # print(f'***** face_input.shape={face_input.shape}')
            face_input = cv2.cvtColor(face_input, cv2.COLOR_BGR2RGB) #opencv는 기본 BGR로 되어있는데 RGB로 바꿈(mean substraction): 컬러시스템 변경.
            face_input = preprocess_input(face_input) # MobileNetV2에서 하는 preprocessing과 똑같이 하기 위해 preprocess_input을 해줌.
            face_input = np.expand_dims(face_input, axis=0) # 위까지 추가하면 RGB는 (224,224,3)으로 생성되는데 (1,224,224,3)으로 나와야 되서 차원을 추가하는 expand_dims 사용해서 0번 axis에 차원을 하나 추가해줌,.
            # print(f'***** face_input.shape={face_input.shape}')

            mask, nomask = model.predict(face_input).squeeze() # 마스크를 쓴 확률, 안쓴 확률이 나옴.
            # plt.subplot(1,len(faces,i+1))
            # plt.imshow(face[:,:,::-1])
            # plt.title('%2.f%%' % (mask*100))

            # for 문으로 위 79라인 까지 돌려서 마스크를 쓴 확률 안 쓴 확률을 사진에서 나타내본다.

            if mask > nomask:
                color = (0, 255, 0)
                label = 'Mask %d%%' % (mask * 100)
            else:
                color = (0, 0, 255)
                                                    j*step_size[1]: j*step_size[1] + win_size[1],  # width
                                                    :]  # channels

                                # Resize to NN image size requirement
                                if img_snippet.shape[0:2] != target_img_size:
                                    img_snippet = cv2.resize(img_snippet, dsize=target_img_size, interpolation=cv2.INTER_CUBIC)

                                # Stack onto collection of images to run NN on
                                all_image_snippets = np.concatenate((all_image_snippets, np.expand_dims(img_snippet, axis=0)))
                                img_count += 1


                if all_image_snippets.shape[0] != 0:
                    # Pre process all image snippets
                    # print('Preprocessing images ...')
                    all_image_snippets = preprocess_input(all_image_snippets)

                    # Run model predictions
                    # print('Running model ...')
                    pred = model.predict(all_image_snippets)
                    # print('Done')

                    ## BIRD IDX

                    # Get index of classified birds/animals
                    bird_idx = np.argmax(pred, axis=1)

                    bird_idx_2d = np.zeros(num_steps).astype(np.int)
                    bird_idx_2d[y_s, x_s] = bird_idx

                    # Get rid of background detections
Пример #17
0
width = 224
height = 224
cam_id = 0

camSet ='nvarguscamerasrc sensor-id=' + str(cam_id) + \
    ' ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1,format=NV12 ! nvvidconv flip-method=0 ! video/x-raw, ' + \
    'width=' + str(width) + ', height=' + str(height) + ', format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
cap = cv.VideoCapture(camSet)

try:
    while True:
        ret, frame = cap.read()
        x = cv.resize(frame, (height, width))
        x = np.expand_dims(x, axis=0)
        x = mobilenet_v2.preprocess_input(x)
        x = tf.constant(x)
        labeling = infer(x)
        preds = labeling['predictions'].numpy()
        print(preds)

        # prob_blocked = y.argmax()
        # if prob_blocked < 0.5:
        #     robot.forward(speed)
        # else:
        #     robot.right(speed)

        time.sleep(0.001)

        if cv.waitKey(1) == ord('q'):
            break
Пример #18
0
def preprocess(path):
    img = image.load_img(path, target_size=(224, 224))
    return preprocess_input(np.expand_dims(image.img_to_array(img), axis=0))
Пример #19
0
    def Detect_Face_Mask(self):
        try:
            # message box requesting the user to wait because it takes some amount of time to detect the face mask
            messagebox.showinfo("Loading",
                                "Please Wait while updated image is loading")

            # converting RGB to BGR
            img = cv2.cvtColor(np.array(self.saveimg), cv2.COLOR_RGB2BGR)

            # labels for mask or no mask
            labelfinder = {0: 'mask', 1: 'No Mask'}
            detector = MTCNN()
            try:
                # loading the model
                model = load_model('curr_best_model.h5')
            except:
                # if the model is not present inside cuurent directory.
                print("Model Not Found Error")
                messagebox.showerror(
                    "Error",
                    "Please Add the curr_best_model.h5 file to current directory"
                )
                return self.myvar.image
            try:
                # firstly detecting faces inside an image
                faces = detector.detect_faces(img)
            except:
                # in case no face is present  inside an image
                print("Face Not detected Error")
                messagebox.showerror(
                    "Error", "Please check that image is valid jpg type")

                return "File not an jpeg/jpg"

            newimg = img[:]
            # newimg = cv2.cvtColor(newimg,cv2.COLOR_BGR2RGB)
            if len(faces) == 0:
                messagebox.showinfo(
                    "No Face", "No Image Could Be detected in current Image")

            # detecting the mask for every face detected inside the image.
            for face in faces:
                curr_box = (face['box'])
                # top left coordinate of box
                x = curr_box[0]
                y = curr_box[1]
                # height and width of the box
                h = curr_box[2]
                w = curr_box[3]

                # cropping the image
                im1 = img[y:y + h, x:x + w]
                # converting color BGR to RGB
                im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)
                # print(x,y,h,w)
                #         plt.imshow(im1)
                #         plt.show()
                # resizing the image
                im1 = cv2.resize(im1, (64, 64))
                # converting it to array
                im1 = np.asarray(im1)
                im1 = preprocess_input(im1)
                a = []
                a.append(im1)
                # creating a numpy array
                a = np.array(a)

                # predicting the mask using our model
                pred = model.predict(a)
                # reading the label
                predLabel = labelfinder[np.argmax(pred)]
                # print(x,y,w,h)
                color = (random.randint(0, 255), random.randint(0, 255),
                         random.randint(0, 255))

                # enclosing inside a rectangle
                newimg = cv2.rectangle(newimg, (x, y), (x + w, y + h), (color),
                                       4)
                try:
                    # put the result on the image as a text
                    cv2.putText(newimg, predLabel, (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (color), 1)
                except:
                    cv2.putText(newimg, predLabel, (x, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (color), 1)

            newimg = cv2.cvtColor(newimg, cv2.COLOR_BGR2RGB)
            output = newimg
            # converting array to PIL Image format
            pil_image = Image.fromarray(output)
            self.saveimg = pil_image
            # converting PIL Image to ImageTk format to show inside the label
            tkimage = ImageTk.PhotoImage(pil_image)
            # updating the image inside label
            self.myvar.configure(image=tkimage)
            self.myvar.image = tkimage
            print("Image Updated")
            return tkimage
        except:
            print("Detect_Face_Mask Error")
            messagebox.showerror(
                "Error", "Please run only on RGB images in jpg format")
            return self.myvar.image
Пример #20
0
def mask_image():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="path to input image")
    ap.add_argument("-m",
                    "--model",
                    type=str,
                    default="version_2/mask_detector.model",
                    help="path to trained face mask detector model")
    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.5,
                    help="minimum probability to filter weak detections")
    args = vars(ap.parse_args())

    # load our serialized face detector model from disk
    print("[INFO] loading face detector model...")
    prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
    weightsPath = os.path.sep.join(
        ["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"])
    net = cv2.dnn.readNet(prototxtPath, weightsPath)

    # load the face mask detector model from disk
    print("[INFO] loading face mask detector model...")
    model = load_model(args["model"])

    # load the input image from disk, clone it, and grab the image spatial
    # dimensions
    image_ogr = cv2.imread(args["image"])
    orig = image_ogr.copy()
    (h, w) = image_ogr.shape[:2]

    # construct a blob from the image
    blob = cv2.dnn.blobFromImage(image_ogr, 1.0, (300, 300),
                                 (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    print("[INFO] computing face detections...")
    net.setInput(blob)
    detections = net.forward()

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence > args["confidence"]:
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # ensure the bounding boxes fall within the dimensions of
            # the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

            # extract the face ROI, convert it from BGR to RGB channel
            # ordering, resize it and preprocess it
            face = image_ogr[startY:endY, startX:endX]

            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (IMG_HEIGHT, IMG_WIDTH))
            face = image.img_to_array(face)
            face = preprocess_input(face)
            face = np.expand_dims(face, axis=0)

            # cv2.imwrite("test_images/face.jpg", face)
            # face = image.load_img("test_images/face.jpg", target_size=(IMG_HEIGHT, IMG_WIDTH))
            # face = image.img_to_array(face)
            # face = np.expand_dims(face, axis = 0)

            # pass the face through the model to determine if the face
            # has a mask or not
            (without_mask, with_mask) = model.predict(face)[0]
            print(with_mask, without_mask)

            # determine the class label and color we'll use to draw
            # the bounding box and text
            label = "Mask" if without_mask > with_mask else "No Mask"
            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

            # include the probability in the label
            label = "{}: {:.2f}%".format(label,
                                         max(with_mask, without_mask) * 100)

            # display the label and bounding box rectangle on the output
            # frame
            cv2.putText(image_ogr, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(image_ogr, (startX, startY), (endX, endY), color, 2)

    # show the output image
    cv2.imshow("Output", image_ogr)
    cv2.waitKey(0)
Пример #21
0
 def preprocess_layer(x):
     x = preprocess_input(x)
     x = tf.image.resize(x,
                         size=XCEPTION_SHAPE[:2],
                         method="bilinear")
     return x
Пример #22
0
    def get_frame(self):
        # playsound(starter)
        while True:
            ret, frame = self.cap.read()
            frame = imutils.resize(frame, width=500)
            (h, w) = frame.shape[:2]
            blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
                                         (104, 177, 123))

            face_detector.setInput(blob)
            detections = face_detector.forward()

            faces = []
            bbox = []
            results = []

            for i in range(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]

                if confidence > 0.5:
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    face = frame[startY:endY, startX:endX]
                    face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                    face = cv2.resize(face, (224, 224))
                    face = img_to_array(face)
                    face = preprocess_input(face)
                    face = np.expand_dims(face, axis=0)

                    faces.append(face)
                    bbox.append((startX, startY, endX, endY))

            if len(faces) > 0:
                results = mask_detector.predict(faces)

            for (face_box, result) in zip(bbox, results):
                (startX, startY, endX, endY) = face_box
                (mask, withoutMask) = result

                label = ""

                if mask > withoutMask:

                    label = "Mask"
                    color = (0, 255, 0)
                    if voice.get_busy() == False:
                        voice.play(sound1)

                else:
                    label = "No Mask"
                    color = (0, 0, 255)
                    if voice.get_busy() == False:
                        voice.play(sound)

                cv2.putText(frame, label, (startX, startY - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
                self.cap.release
                # cv2.imshow("Frame", frame)
                # key = cv2.waitKey(1) & 0xFF

            return frame
def detect_and_predict_mask(frame, faceNet, maskNet):
	# grab the dimensions of the frame and then construct a blob
	# from it
	(h, w) = frame.shape[:2]
	blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
		(104.0, 177.0, 123.0))

	# pass the blob through the network and obtain the face detections
	faceNet.setInput(blob)
	detections = faceNet.forward()
		

	# initialize our list of faces, their corresponding locations,
	# and the list of predictions from our face mask network
	faces = []
	locs = []
	preds = []
	formiddle = []
	Mid=[]

	# loop over the detections
	try:

		for i in range(0, detections.shape[2]):
			# extract the confidence (i.e., probability) associated with
			# the detection
			confidence = detections[0, 0, i, 2]
	
			# filter out weak detections by ensuring the confidence is
			# greater than the minimum confidence
			if confidence > args["confidence"]:
				# compute the (x, y)-coordinates of the bounding box for
				# the object
				box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
				(startX, startY, endX, endY) = box.astype("int")
	
				# ensure the bounding boxes fall within the dimensions of
				# the frame
				(startX, startY) = (max(0, startX), max(0, startY))
				(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
	
				# extract the face ROI, convert it from BGR to RGB channel
				# ordering, resize it to 224x224, and preprocess it
				face = frame[startY:endY, startX:endX]
				face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
				face = cv2.resize(face, (224, 224))
				face = img_to_array(face)
				face = preprocess_input(face)
	
				# add the face and bounding boxes to their respective
				# lists
				faces.append(face)
				locs.append((startX, startY, endX, endY))
				formiddle.append([startX, startY, endX, endY])

	except:
		pass

	# only make a predictions if at least one face was detected
	if len(faces) > 0:
		# for faster inference we'll make batch predictions on *all*
		# faces at the same time rather than one-by-one predictions
		# in the above `for` loop

		#if len(faces) >= 2:	
		#	D = dist.cdist(Mid, Mid, metric="euclidean")
		#	print(D)
		faces = np.array(faces, dtype="float32")


		preds = maskNet.predict(faces, batch_size=32)

	# return a 2-tuple of the face locations and their corresponding
	# locations	
	violate=set()
	if(len(faces)) >= 2:
		for i in range(len(faces)):
			MidX=int(formiddle[i][0]+(formiddle[i][2]-formiddle[i][0])/2)
			MidY=int(formiddle[i][1]+(formiddle[i][3]-formiddle[i][1])/2)
			Mid.append((MidX,MidY))
		Mid = np.asarray(Mid)
		distt = dist.cdist(Mid, Mid, metric="euclidean")

				# loop over the upper triangular of the distance matrix
		for i in range(0, distt.shape[0]):
			for j in range(i + 1, distt.shape[1]):
				# check to see if the distance between any two
				# centroid pairs is less than the configured number
				# of pixels
				if distt[i, j] < MIN_DISTANCE:
					# update our violation set with the indexes of
					# the centroid pairs
					violate.add(i)
					violate.add(j)

		print(Mid)
		print(distt)
	return (locs, preds,Mid)
def detect_and_predict_mask(frame):
    # grab the dimensions of the frame and then construct a blob
    # from it
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224), (104.0, 177.0, 123.0))

    # pass the blob through the network and obtain the face detections
    faceNet.setInput(blob)
    detections = faceNet.forward()
    print(detections.shape)

    # initialize our list of faces, their corresponding locations,
    # and the list of predictions from our face mask network
    faces = []
    locs = []
    preds = []

    # loop over the detections
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]

        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence > 0.5:
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")

            # ensure the bounding boxes fall within the dimensions of
            # the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

            # extract the face ROI, convert it from BGR to RGB channel
            # ordering, resize it to 224x224, and preprocess it
            face = frame[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)

            # add the face and bounding boxes to their respective
            # lists
            faces.append(face)
            locs.append((startX, startY, endX, endY))

    # only make a predictions if at least one face was detected
    if len(faces) > 0:
        # for faster inference we'll make batch predictions on *all*
        # faces at the same time rather than one-by-one predictions
        # in the above `for` loop
        faces = np.array(faces, dtype="float32")
        preds = maskNet.predict(faces, batch_size=32)

    # return a 2-tuple of the face locations and their corresponding
    # locations
    return (locs, preds)


# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
# image = cv2.imread(args['input'])
# image = imutils.resize(image, width=400)

# detect faces in the image and determine if they are wearing a
# face mask or not
# (locs, preds) = detect_and_predict_mask(image)

# loop over the detected face locations and their corresponding
# # locations
# for (box, pred) in zip(locs, preds):
# 	# unpack the bounding box and predictions
# 	(startX, startY, endX, endY) = box
# 	(mask, withoutMask) = pred

# 	# determine the class label and color we'll use to draw
# 	# the bounding box and text
# 	label = "Mask" if mask > withoutMask else "No Mask"
# 	color = (0, 255, 0) if label == "Mask" else (0, 0, 255)

# 	# include the probability in the label
# 	label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)

# 	# display the label and bounding box rectangle on the output
# 	# image
# 	cv2.putText(image, label, (startX, startY - 10),
# 		cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
# 	cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)

# # show the output image
# cv2.imshow("image", image)
# key = cv2.waitKey(0)

# # do a bit of cleanup
# cv2.destroyAllWindows()
# vs.stop()
Пример #25
0
        if confidence < 0.5:
            continue

        x1 = int(dets[0, 0, i, 3] * w)  # bounding 박스 구해주기
        y1 = int(dets[0, 0, i, 4] * h)
        x2 = int(dets[0, 0, i, 5] * w)
        y2 = int(dets[0, 0, i, 6] * h)
        # print(i, confidence, x1, y1, x2, y2) i는 몇번째 얼굴인지, cofidence는 실제 얼굴이맞을 확률. 그 뒤는 좌표
        face = img[y1:y2, x1:x2]  # bounding Box을 통해 얼굴만 저장

        # 마스크를 썼나 안썼나 예측
        # 전처리하는 부분
        face_input = cv2.resize(face, dsize=(224, 224))  # 이미지 크기 변경
        face_input = cv2.cvtColor(face_input,
                                  cv2.COLOR_BGR2RGB)  # 이미지의 컬러시스템 변경
        face_input = preprocess_input(
            face_input)  # mobileNetV2에서 하는 preprocessing과 똑같이 하기위해 처리
        face_input = np.expand_dims(
            face_input, axis=0
        )  # 이렇게 하면 shape이 (224,224,3) 으로 나오는데 넣을때는 (1,224,224,3)이 되어야 하므로 차원하나 추가

        mask, nomask = model.predict(face_input).squeeze(
        )  # load해놓은 모델에 predict method를 통해, 마스크 여부 확률을 반환

        if mask > nomask:
            color = (0, 255, 0)
            label = 'Mask %d%%' % (mask * 100)
        else:
            color = (0, 0, 255)
            label = 'No Mask %d%%' % (nomask * 100)

        # mask 썼을확률 계산후 그에대한 결과를 보여주는 곳. 해당 얼굴영역보다 이전 인덱스는 이미 계산되어 이미지에 저장되어 있다.
Пример #26
0
                  }

                  
    # Draw a rectangle around the faces
    for rect in faces:
        (x, y, w, h) = rect
        face_frame = frame[y:y + h, x:x + w]
        # preprocess image
        face_frame_prepared = preprocess_face_frame(face_frame)
           
      
        faces_dict["faces_list"].append(face_frame_prepared)
        faces_dict["faces_rect"].append(rect)

    if faces_dict["faces_list"]:
        faces_preprocessed = preprocess_input(np.array(faces_dict["faces_list"]))
        preds = model.predict(faces_preprocessed)

        for i, pred in enumerate(preds):
                mask_or_not, confidence = decode_prediction(pred)
                write_bb(mask_or_not, confidence, faces_dict["faces_rect"][i], frame)
        
    #Converting Frame to Blob
    (H, W) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843, (300, 300), 127.5)
    
    
    #Passing Blob through network to detect and predict
    nn.setInput(blob)
    detections = nn.forward()
        
Пример #27
0
    # filter out weak detections by ensuring the confidence is greater than the minimum confidence
    if confidence > args["confidence"]:
        # compute the (x, y)-coordinates of the bounding box for the object
        box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
        (startX, startY, endX, endY) = box.astype("int")

        # ensure the bounding boxes fall within the dimensions of the frame
        (startX, startY) = (max(0, startX), max(0, startY))
        (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

        # extract the face ROI, convert it from BGR to RGB channel ordering, resize it to 224x224, and preprocess it
        face = image[startY:endY, startX:endX]
        face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
        face = cv2.resize(face, (224, 224))
        face = img_to_array(face)
        face = preprocess_input(face)
        face = np.expand_dims(face, axis=0)
        # pass the face through the model to determine if the face has a mask or not
        (mask, withoutMask) = model.predict(face)[0]

        # determine the class label and color we'll use to draw the bounding box and text
        label = "Mask" if mask > withoutMask else "No Mask"
        color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
        # include the probability in the label
        label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
        print("[INFO] Face Detected: ", label)

        # display the label and bounding box rectangle on the output frame
        cv2.putText(image, label, (startX, startY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
        cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
Пример #28
0
BATCH_SIZE = 32

# LOADING AND PREPROCESSING IMAGES(TRAINING DATA)
print("Loading images ...")
imagePaths = list(paths.list_images(args["dataset"]))
data = []
labels = []

for imagePath in imagePaths:
    label = imagePath.split(
        os.path.sep
    )[-2]  # so we can get "with_mask" or "without_mask" to get associated with the image

    image = load_img(imagePath, target_size=(224, 224))
    image = img_to_array(image)
    image = preprocess_input(
        image)  # scaling pixel intensities to range [-1, 1] for convenience

    data.append(image)
    labels.append(label)

# ENCODING LABELS
data = np.array(data, dtype="float32")
labels = np.array(labels)

label_bin = LabelBinarizer()
labels = label_bin.fit_transform(labels)
labels = to_categorical(labels)

#SPLITTING DATASET INTO TRAIN AND TEST SETS
(X_train, X_test, y_train, y_test) = train_test_split(data,
                                                      labels,
CATEGORIES = ["with_mask", "without_mask"]

# Tomar la lista de imágenes en nuestro directorio de datos, luego inicializar
#  la lista de data(i.e., images) y las clases
print("[INFO] loading images...")

data = []
labels = []

for category in CATEGORIES:
    path = os.path.join(DIRECTORY, category)
    for img in os.listdir(path):
        img_path = os.path.join(path, img)
        image = load_img(img_path, target_size=(224, 224))
        image = img_to_array(image)
        image = preprocess_input(image)

        data.append(image)
        labels.append(category)

# Realizar one - hot encoding en las etiquetas(labels)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)

data = np.array(data, dtype="float32")
labels = np.array(labels)

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.20,
Пример #30
0
def get_number_masks(img):
    # load the input image from disk, clone it, and grab the image spatial
    # dimensions
    image = cv2.imread(img)
    orig = image.copy()
    (h, w) = image.shape[:2]
    # construct a blob from the image
    blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
        (104.0, 177.0, 123.0))
    # pass the blob through the network and obtain the face detections
    print("[INFO] computing face detections...")
    net.setInput(blob)
    detections = net.forward()

    masks = 0
    nonmasks = 0
    for i in range(0, detections.shape[2]):
        # extract the confidence (i.e., probability) associated with
        # the detection
        confidence = detections[0, 0, i, 2]
        # filter out weak detections by ensuring the confidence is
        # greater than the minimum confidence
        if confidence > CONFIDENCE:
            # compute the (x, y)-coordinates of the bounding box for
            # the object
            box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            (startX, startY, endX, endY) = box.astype("int")
            # ensure the bounding boxes fall within the dimensions of
            # the frame
            (startX, startY) = (max(0, startX), max(0, startY))
            (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

            # extract the face ROI, convert it from BGR to RGB channel
            # ordering, resize it to 224x224, and preprocess it
            face = image[startY:endY, startX:endX]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
            face = cv2.resize(face, (224, 224))
            face = img_to_array(face)
            face = preprocess_input(face)
            face = np.expand_dims(face, axis=0)
            # pass the face through the model to determine if the face
            # has a mask or not
            (mask, withoutMask) = model.predict(face)[0]

            if mask > withoutMask:
                masks += 1
            else:
                nonmasks += 1

            # determine the class label and color we'll use to draw
            # the bounding box and text
            #label = "Mask" if mask > withoutMask else "No Mask"
            #color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            # include the probability in the label
            #label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
            # display the label and bounding box rectangle on the output
            # frame
            #cv2.putText(image, label, (startX, startY - 10),
            #	cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            #cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)

    return masks, nonmasks