Example #1
0
 def __init__(self):
     '''Initialize ros publisher, ros subscriber'''
     # topic where we publish
     self.image_pub = rospy.Publisher("/face_mask/output/compressed",
                                       CompressedImage)
     self.face_pub = rospy.Publisher("/face_mask/output/faces", MaskFrame)
     # self.bridge = CvBridge()
     # subscribed Topic
     self.subscriber = rospy.Subscriber("/face_mask/input/compressed",
                                         CompressedImage, self._callback,  queue_size = 1)
     #self.model = load_pytorch_model('models/face_mask_detection.pth');
     self.model = load_pytorch_model('/ari_public_ws/src/FaceMaskDetection/models/model360.pth');
     # anchor configuration
     #feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
     feature_map_sizes = [[45, 45], [23, 23], [12, 12], [6, 6], [4, 4]]
     anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
     anchor_ratios = [[1, 0.62, 0.42]] * 5
     
     # generate anchors
     anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)
     
     # for inference , the batch size is 1, the model output shape is [1, N, 4],
     # so we expand dim for anchors to [1, anchor_num, 4]
     self.anchors_exp = np.expand_dims(anchors, axis=0)
     self.id2class = {0: 'Mask', 1: 'NoMask'}
Example #2
0
import torch
from load_model.pytorch_loader import load_pytorch_model

model = load_pytorch_model('models/face_mask_detection.pth')

def convert(image):

    traced_script_module = torch.jit.trace(model, image)
    traced_script_module.save("models/face_mask_detection_libtorch.pth")


if __name__ == "__main__":

    convert(torch.rand(2, 3, 500,500)

# -*- coding:utf-8 -*-
import cv2
import time

import argparse
import numpy as np
from PIL import Image
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from load_model.pytorch_loader import load_pytorch_model, pytorch_inference

# model = load_pytorch_model('models/face_mask_detection.pth');
model = load_pytorch_model('models/model360.pth')
# anchor configuration
#feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
feature_map_sizes = [[45, 45], [23, 23], [12, 12], [6, 6], [4, 4]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45],
                [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5

# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)

# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)

id2class = {0: 'Mask', 1: 'NoMask'}

Example #4
0
import cv2
import time
import warnings

warnings.filterwarnings("ignore")

import argparse
import numpy as np
from PIL import Image
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from load_model.pytorch_loader import load_pytorch_model, pytorch_inference

# model = load_pytorch_model('models/face_mask_detection.pth');
model = load_pytorch_model('static/detection/models/model360.pth')
# anchor configuration
#feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
feature_map_sizes = [[45, 45], [23, 23], [12, 12], [6, 6], [4, 4]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45],
                [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5

# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)

# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)

id2class = {0: 'Mask', 1: 'NoMask'}
Example #5
0
    def mask_recognize(self):
        model = load_pytorch_model(self.model_path)
        # generate anchors
        anchors = generate_anchors(self.feature_map_sizes, self.anchor_sizes,
                                   self.anchor_ratios)
        anchors_exp = np.expand_dims(anchors, axis=0)

        id2class = {0: 'Mask', 1: 'NoMask'}

        def inference(image,
                      conf_thresh=0.5,
                      iou_thresh=0.4,
                      target_shape=(160, 160),
                      draw_result=True,
                      show_result=True):
            output_info = []
            height, width, _ = image.shape
            image_resized = cv2.resize(image, target_shape)
            image_np = image_resized / 255.0  # 归一化到0~1
            image_exp = np.expand_dims(image_np, axis=0)

            image_transposed = image_exp.transpose((0, 3, 1, 2))

            y_bboxes_output, y_cls_output = pytorch_inference(
                model, image_transposed)
            # remove the batch dimension, for batch is always 1 for inference.
            y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
            y_cls = y_cls_output[0]
            # To speed up, do single class NMS, not multiple classes NMS.
            bbox_max_scores = np.max(y_cls, axis=1)
            bbox_max_score_classes = np.argmax(y_cls, axis=1)

            # keep_idx is the alive bounding box after nms.
            keep_idxs = single_class_non_max_suppression(
                y_bboxes,
                bbox_max_scores,
                conf_thresh=conf_thresh,
                iou_thresh=iou_thresh,
            )

            for idx in keep_idxs:
                conf = float(bbox_max_scores[idx])
                class_id = bbox_max_score_classes[idx]
                bbox = y_bboxes[idx]
                # clip the coordinate, avoid the value exceed the image boundary.
                xmin = max(0, int(bbox[0] * width))
                ymin = max(0, int(bbox[1] * height))
                xmax = min(int(bbox[2] * width), width)
                ymax = min(int(bbox[3] * height), height)

                if draw_result:
                    if class_id == 0:
                        color = (0, 255, 0)
                    else:
                        color = (255, 0, 0)
                    cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
                    cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf),
                                (xmin + 2, ymin - 2), cv2.FONT_HERSHEY_SIMPLEX,
                                0.8, color)
                output_info.append([class_id, conf, xmin, ymin, xmax, ymax])

            if show_result:
                Image.fromarray(image).show()
            return output_info

        cap = cv2.VideoCapture(0)
        height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        if not cap.isOpened():
            raise ValueError("Video open failed.")
            return
        status = True
        while status:
            status, img_raw = cap.read()
            img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
            if (status):
                inference(img_raw,
                          conf_thresh=0.5,
                          iou_thresh=0.5,
                          target_shape=(360, 360),
                          draw_result=True,
                          show_result=False)
            cv2.imshow('image', img_raw[:, :, ::-1])
            k = cv2.waitKey(20)
            if k == 27:
                break
        cv2.destroyAllWindows()