import _pickle as cPickle import cv2 # construct the argument parser and parse the argument ap.add_argument("-c", "--conf", required=True, help="path to configuration file") ap.add_argument("-i", "--image", required=True, help="path to the image being classified") args = vars(ap.parse_args()) # load the configuration file conf = Conf(args["conf"]) #load the classifier model = cPickle.loads(open(conf["classifier_path"]).read()) hog = HOG(orientations=conf["orientations"], pixelsPerCell=tuple(conf["pixels_per_cell"]), cellsPerBlock=tuple(conf["cells_per_block"]), normalize=conf["normalize"]) od = ObjectDetector(model, hog) # loads the image and convert it to grayscale image = cv2.imread(args["image"]) image = imutils.resize(image, width=min(260, image.shape[1])) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #detect the objects
import cv2 import find_face import use_model from tensorflow.keras.models import load_model from pyimagesearch.notifications.twilionotifier import TwilioNotifier from pyimagesearch.utils.conf import Conf import time conf = Conf('config/config.json') tn = TwilioNotifier(conf) model = load_model('mask_model.h5') capture = cv2.VideoCapture(0) while True: find = False ret, frame = capture.read() cv2.imshow("VideoFrame", frame) find = find_face.img_processing('./sv_img/face.jpg', frame) if cv2.waitKey(1) > 0: break if not find: continue pred = use_model.predict_mask(model) if pred == 0: start_time = time.time() while True: ret, frame = capture.read()