Ejemplo n.º 1
0
time.sleep(2.0)
output="AshirTestVid"
fourcc="MJPG"
fps=20
i=0
buffersize=64
kwc=KeyClipWriter(buffersize,1.0)
green1=(29,86,6)
green2=(64,255,255)
ap = argparse.ArgumentParser()
#ap.add_argument("-o", "--output", required=True,
	#help="path to output directory")
ap.add_argument("-c", "--codec", type=str, default="MJPG",
	help="codec of output video")
args = vars(ap.parse_args())
frame=vid.read()
frame=imutils.resize(frame,width=600)
kwc.update(frame)

while True:
    
    frame=vid.read()
    
    updateConsecFrame=True
    frame=imutils.resize(frame,width=600)
    blur=cv2.GaussianBlur(frame,(11,11),0)
    
    #hsv=cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
    mask=cv2.inRange(blur,green1,green2)
    mask=cv2.erode(mask,None,iterations=2)
    mask=cv2.dilate(mask,None,iterations=2)
Ejemplo n.º 2
0
import numpy as np
from VideoStream import VideoStream
from KeyClipWrite import KeyClipWriter
outputfile = "Ash"
write = None
vs = VideoStream(src=0).start()
fourcc = "MJPG"
fps = 20
(h, w) = (None, None)
#kcw=KeyClipWriter(32,1.0)
#frame=vs.read()
#frame=imutils.resize(frame,width=600)

#kcw.update(frame)
while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=300)

    if write is None:
        out = "{}.avi".format(outputfile)
        (h, w) = frame.shape[:2]
        write = cv2.VideoWriter(out, cv2.VideoWriter_fourcc(*fourcc), fps,
                                (w * 2, h * 2), True)

    zeros = np.zeros(frame.shape[:2], dtype="uint8")
    (B, G, R) = cv2.split(frame)
    R = cv2.merge([zeros, zeros, R])
    G = cv2.merge([zeros, G, zeros])
    B = cv2.merge([B, zeros, zeros])
    #R=(zeros,zeros,R)
    #G=(zeros,G,zeros)
from Stepper import Stepper

ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1, help="whether the PiCamera being used")
args = vars(ap.parse_args())

vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)

# Creating a VideoCapture object while also specifying which camera will be used for the purpose
# of capturing the video using hte input parameter 0.
cap = cv2.VideoCapture(0)

while True:

        frame = vs.read()
        frame = imutils.resize(frame, width=400)
	
        # Converting captured frame to monochrome
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Blurring the image using the GaussianBlur() method of the opencv object
        blur = cv2.GaussianBlur(gray, (9, 9), 0)

        # Using an opencv method to identify the threshold intensities and locations
        (darkest_value, brightest_value, darkest_loc, brightest_loc) = cv2.minMaxLoc(blur)

        # Threshold the blurred frame accordingly
        # First argument is the source image, which is the grayscale image. Second argument is the threshold value
        # which is used to classify the pixel values. Third argument is the maxVal which represents the value to be given
        # if pixel value is more than (sometimes less than) the threshold value
        out, threshold = cv2.threshold(blur, brightest_value - 10, 230, cv2.THRESH_BINARY)
Ejemplo n.º 4
0
class ONNXRuntimeModelDeploy(ObjectDetection, ImageClassification):
    """Object Detection class for ONNX Runtime
    """
    def __init__(self,
                 manifest_dir,
                 cam_type="video_file",
                 cam_source="/sample_video/video.mp4",
                 tu_flag_=False):
        # Default system params
        #ToDo make this twin property
        self.render = False

        # Application parameters
        self.img_width = 0
        self.img_height = 0
        self.cap_handle = None
        self.vs = None
        self.session = None

        self.cam_type = cam_type
        self.cam_source = cam_source
        self.video_handle = None
        self.twin_update_flag = tu_flag_
        self.m_parser(manifest_dir)

    def m_parser(self, model_dir):

        m_file = open(model_dir + str("/cvexport.manifest"))
        data = json.load(m_file)

        # cvexport manifest prameters
        self.domain_type = str(data["DomainType"])
        print("Domain Type:", self.domain_type)

        # default anchors
        if str(self.domain_type) == "ObjectDetection":
            objdet = ObjectDetection(data, model_dir, None)
            ret = self.model_inference(objdet, iot_hub_manager, 1)
        elif str(self.domain_type) == "Classification":
            imgcls = ImageClassification(data, model_dir)
            ret = self.model_inference(imgcls, iot_hub_manager, 0)
        else:
            print(
                "Error: No matching DomainType: Object Detection/Image Classificaiton \n"
            )
            print("Exiting.....!!!! \n")
            sys.exit(0)
        if ret == 1:
            print("App finished running Inference...Exiting...!!!")
            sys.exit(1)

    #def predict(self, preprocessed_image):
    #    inputs = np.array(preprocessed_image, dtype=np.float32)[np.newaxis,:,:,(2,1,0)] # RGB -> BGR
    #    inputs = np.ascontiguousarray(np.rollaxis(inputs, 3, 1))
    #    start = time.time()
    #    outputs = self.session.run(None, {self.input_name: inputs})
    #    end = time.time()
    #    inference_time = end - start
    #    return np.squeeze(outputs).transpose((1,2,0)), inference_time

    def create_video_handle(self,
                            cam_type="video_file",
                            cam_source="/sample_video/video.mp4"):
        global stream_handle
        print("cam_source:: " + cam_source + " cam_type :: " + cam_type)
        if cam_type == "video_file":
            video_dir = "sample_video"
            # By default video file name should be video.mp4/avi
            if os.path.exists(str(video_dir) + "/video.mp4"):
                #if cam_source:
                self.video_handle = str(str(video_dir) + "/video.mp4")
            elif os.path.exists(str(video_dir) + "/video.avi"):
                #if cam_source:
                self.video_handle = str(str(video_dir) + "/video.avi")
            else:
                print("\n ERROR: Camera source Not Found...!!!")
                print("\n Exiting inference...")
                sys.exit(0)
        elif cam_type == "rtsp_stream":
            if cam_source:
                self.video_handle = str(cam_source)
                print("settin cam_source to value :: " + cam_source)
            else:
                print("\n ERROR: Camera source Not Found...!!!")
                print("\n Exiting inference...")
                sys.exit(0)
        else:
            web_cam_found = False
            for i in range(4):
                if os.path.exists("/dev/video" + str(i)):
                    web_cam_found = True
                    break

            if web_cam_found:
                self.video_handle = "/dev/video" + str(i)
            else:
                print("\n Error: Input Camera device not found/detected")
                print("\n Exiting inference...")
                sys.exit(0)

        self.vs = VideoStream(self.video_handle).start()

        # Reading widht and height details
        self.img_width = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.img_height = int(self.vs.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
        stream_handle = True

    def model_inference(self, obj, iot_hub_manager, pp_flag):
        global stream_handle
        last_time = time.time()
        # Default video surce to usb_cam @ /dev/video0
        # If can change it to video_file in twin updates
        # ***** Requirments to pass a video_file *****
        # Video file should be .mp4/.avi extension with name of file a "video" Ex: video.mp4/avi
        # Video file should be an url link to a .zip folder
        self.create_video_handle(cam_type=self.cam_type,
                                 cam_source=self.cam_source)

        while self.vs.stream.isOpened():
            if iot_hub_manager.setRestartCamera == True:
                iot_hub_manager.setRestartCamera = False

                if iot_hub_manager.model_url == None:
                    model_folder = "./default_model"
                else:
                    model_folder = iot_hub_manager.model_dst_folder

                #self.cap_handle.release()
                obj.session = None
                #RunOptions.terminate = True
                self.vs.stream.release()
                if (self.render):
                    cv2.destroyAllWindows()

                if os.path.exists(
                        str(model_folder) + str('/cvexport.manifest')):
                    print("\n Reading cvexport.config file from model folder")
                    config_file_dir = str(model_folder)
                    #self.create_video_handle(iot_hub_manager.cam_type, iot_hub_manager.cam_source)
                    self.__init__(config_file_dir, iot_hub_manager.cam_type,
                                  iot_hub_manager.cam_source, True)
                elif os.path.exists("./default_model/cvexport.manifest"):
                    config_file_dir = "./default_model"
                    print(
                        "\n Reading cvexport.manifest file from default_model folder"
                    )
                    #self.create_video_handle(iot_hub_manager.cam_type, iot_hub_manager.cam_source)
                    self.__init__(config_file_dir, iot_hub_manager.cam_type,
                                  iot_hub_manager.cam_source, True)
                else:
                    print(
                        "\n ERROR: cvexport.manifest not found check root/model dir"
                    )
                    print("\n Exiting inference....")
                    sys.exit(0)
                #iot_hub_manager.setRestartCamera = False

            # Caputre frame-by-frame
            frame = self.vs.read()
            print(frame)
            if self.twin_update_flag:
                predictions, infer_time = obj.predict_image(frame)
                print(pp_flag)
                # if Object Detection
                if pp_flag:
                    for d in predictions:
                        x = int(d['boundingBox']['left'] * self.img_width)
                        y = int(d['boundingBox']['top'] * self.img_height)
                        w = int(d['boundingBox']['width'] * self.img_width)
                        h = int(d['boundingBox']['height'] * self.img_height)

                        x_end = x + w
                        y_end = y + h

                        start = (x, y)
                        end = (x_end, y_end)
                        out_label = str(d['tagName'])
                        score = str(int(d['probability'] * 100))
                        print("Found label " + out_label +
                              " with probability :: " + score)
                        if 0.50 < d['probability']:
                            frame = cv2.rectangle(frame, start, end,
                                                  (100, 255, 100), 2)

                            out_label = str(d['tagName'])
                            score = str(int(d['probability'] * 100))
                            cv2.putText(frame, out_label, (x - 5, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (255, 255, 255), 2)
                            #cv2.putText(frame, score, (x+w-50, y), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 2)

                            message = {
                                "Label":
                                out_label,
                                "Confidence":
                                score,
                                "Position": [x, y, x_end, y_end],
                                "TimeStamp":
                                datetime.datetime.utcnow().strftime(
                                    "%Y-%m-%d %H:%M:%S")
                            }

                            # Send message to IoT Hub
                            if iot_hub_manager is not None:
                                last_time = iot_hub_manager.send_message_to_upstream(
                                    json.dumps(message), last_time)

                else:  #Postprocessing for Classificaton model
                    res = obj.postprocess(predictions)
                    idx = np.argmax(res)

                    frame = cv2.putText(frame, obj.labels[idx], (15, 15),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 255, 0), 2)

                    message = {
                        "Label":
                        obj.labels[idx],
                        "TimeStamp":
                        datetime.datetime.utcnow().strftime(
                            "%Y-%m-%d %H:%M:%S")
                    }

                    # Send message to IoT Hub
                    # ToDo send to module
                    # ToDo set the frequncy from module twin
                    if iot_hub_manager is not None:
                        last_time = iot_hub_manager.send_message_to_upstream(
                            json.dumps(message), last_time)

                cv2.putText(frame, 'FPS: {}'.format(1.0 / infer_time),
                            (10, 40), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                            (255, 0, 255), 1)

            if self.render:
                # Displaying the image
                cv2.imshow("Inference results", frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        cv2.destroyAllWindows()
        # when everything done, release the capture
        self.vs.__exit__(None, None, None)
        return True
Ejemplo n.º 5
0
class VideoCapture(object):
    def __init__(self,
                 videoPath="",
                 verbose=True,
                 displayW=1920,
                 displayH=1080,
                 fontScale=1.0,
                 inference=True,
                 confidenceLevel=0.5):

        self.verbose = verbose
        self._debug = False

        self.videoPath = videoPath
        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown
        self.videoStream = None

        self._videoReadyEvent = Event()

        self._capture_in_progress = False

        # Display Resolution
        # Will try to set camera's resolution to the specified resolution
        self._displayW = displayW
        self._displayH = displayH

        self._cameraW = 0
        self._cameraH = 0

        # Camera's FPS
        self._cameraFPS = 30

        # Font Scale for putText
        self._fontScale = float(fontScale)

        # turn inference on/off
        self.runInference = inference

        # confidence level threshold
        self.confidenceLevel = confidenceLevel

        # various frame data

        # frame data for UI
        self._displayFrame = None

        # wallpapers for UI
        self._frame_wp_init_system = cv2.imread(
            "./www/WP-InitializingSystem.png")
        self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png")
        self._frame_wp_init_iothub = cv2.imread(
            "./www/WP-InitializeIotHub.png")

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        logging.info(
            '===============================================================')
        logging.info(
            'Initializing Video Capture with the following parameters:')
        logging.info('   - OpenCV Version     : {}'.format(cv2.__version__))
        logging.info('   - Video path         : {}'.format(self.videoPath))
        logging.info('   - Display Resolution : {} x {}'.format(
            self._displayW, self._displayH))
        logging.info('   - Font Scale         : {}'.format(self._fontScale))
        logging.info('   - Inference?         : {}'.format(self.runInference))
        logging.info('   - ConfidenceLevel    : {}'.format(
            self.confidenceLevel))
        logging.info(
            '===============================================================')

        # set wallpaper
        self.set_Wallpaper(self._frame_wp_init_system)

        # set FPS
        self.fps = FPS()

        self.imageStreamHandler = None

        # Start Web Server for View
        self.imageServer = ImageServer(80, self)
        self.imageServer.start()

        # Set Video Source
        self.set_Video_Source(self.videoPath)

        self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png"))
        # logging.info('Yolo Inference Initializing\r\n')
        self.yoloInference = YoloInference(self._fontScale, sendMessage=False)
        # logging.info('Yolo Inference Initialized\r\n')

    def __enter__(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # self.set_Video_Source(self.videoPath)

        return self

    def videoStreamReadTimeoutHandler(self, signum, frame):
        raise Exception("VideoStream Read Timeout")

    #
    # Video Source Management
    #
    def _set_Video_Source_Type(self, videoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        self._reset_Video_Source()

        if '/dev/video' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Hololens

        if self.verbose:
            logging.info('<< ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(self._videoSourceType))

    def _get_Video_Source_Type(self, videoPath):

        videoType = CaptureDevice.Unknown

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        if '/dev/video' in videoPath.lower():
            videoType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            videoType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            videoType = CaptureDevice.Hololens

        return videoType

    #
    # Resets video capture/stream settings
    #
    def _reset_Video_Source(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        if self.videoStream:
            self.videoStream.stop()
        #    self.videoStream.close()
        #     self.videoStream = None

        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown

    def set_Video_Source(self, newVideoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        retVal = False
        realVideoPath = newVideoPath

        if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running:
            return True

        if self.imageStreamHandler != None:
            statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format(
                self._remove_credential(newVideoPath), 1)
            self.imageStreamHandler.submit_write(statusMsg)

        self._videoSourceState = CaptureDeviceState.Stop

        if self._capture_in_progress:
            # wait for queue to drain and loop to exit
            time.sleep(1.0)

        self._capture_in_progress = False

        self._set_Video_Source_Type(realVideoPath)

        if self._videoSourceType == CaptureDevice.Unknown:
            self._videoSourceState = CaptureDeviceState.ErrorNotSupported
            logging.error('>> ' + self.__class__.__name__ + "." +
                          sys._getframe().f_code.co_name +
                          '() : Unsupported Video Source {}'.format(
                              self._videoSourceType))
        else:
            self._videoSourceState = CaptureDeviceState.Init

            if self._videoSourceType == CaptureDevice.Hololens:
                strHololens = realVideoPath.split('?')
                # disable audio
                realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format(
                    strHololens[0])

            self.videoStream = VideoStream(videoCapture=self,
                                           path=realVideoPath)

            fps_override = 30

            if not self.videoStream.videoCapture == None:

                # get resolution
                cameraH1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                cameraW1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                cameraFPS1 = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                if self._videoSourceType == CaptureDevice.Webcam:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Rtsp:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Hololens:

                    holo_w = 1280
                    holo_h = 720

                    if 'live_med.mp4' in realVideoPath:
                        holo_w = 854
                        holo_h = 480
                    elif 'live_low.mp4' in realVideoPath:
                        holo_w = 428
                        holo_h = 240
                        fps_override = 15

                    self.videoStream.videoCapture.set(
                        cv2.CAP_PROP_FRAME_HEIGHT, holo_h)
                    self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH,
                                                      holo_w)

                self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS,
                                                  fps_override)

                self._cameraH = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                self._cameraW = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                self._cameraFPS = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                logging.info(
                    '==============================================================='
                )
                logging.info(
                    'Setting Video Capture with the following parameters:')
                logging.info('   - Video Source Type  : {}'.format(
                    self._videoSourceType))
                logging.info('   - Display Resolution : {} x {}'.format(
                    self._displayW, self._displayH))
                logging.info('   Original             : {} x {} @ {}'.format(
                    cameraW1, cameraH1, cameraFPS1))
                logging.info('   New                  : {} x {} @ {}'.format(
                    self._cameraW, self._cameraH, self._cameraFPS))
                logging.info(
                    '==============================================================='
                )

                if self.videoStream.start():
                    self._videoSourceState = CaptureDeviceState.Running
                    retVal = True
                else:
                    self._videoSourceState = CaptureDeviceState.ErrorRead
            else:

                if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp:
                    url_parsed = urlparse(realVideoPath)

                    if url_parsed.password != None or url_parsed.username != None:
                        url_parsed = url_parsed._replace(
                            netloc="{}".format(url_parsed.hostname))

                    ipAddress = url_parsed.netloc

                    ping_ret = subprocess.call(
                        ['ping', '-c', '5', '-W', '3', ipAddress],
                        stdout=open(os.devnull, 'w'),
                        stderr=open(os.devnull, 'w'))

                    if ping_ret == 0:
                        self._videoSourceState = CaptureDeviceState.ErrorOpen
                    else:
                        self._videoSourceState = CaptureDeviceState.ErrorNoNetwork

                logging.error('>> ' + self.__class__.__name__ + "." +
                              sys._getframe().f_code.co_name +
                              '() : Failed to open Video Capture')

        self.videoPath = realVideoPath

        if retVal == False:
            self.set_Wallpaper(self._frame_wp_no_video)
        else:
            self._videoReadyEvent.set()

        self.sendCurrentVideoPath(realVideoPath)

        return retVal

    def get_display_frame(self):
        return self.displayFrame

    def set_status(self, device_status):
        self._videoSourceState = device_status

        if self._videoSourceState != CaptureDeviceState.Running:
            self.sendCurrentVideoPath("")

    def sendCurrentVideoPath(self, videoPath):

        if videoPath == "":
            video_path = self._remove_credential(self.videoPath)
        else:
            video_path = self._remove_credential(videoPath)

        logging.info('>> Current Video Status {}'.format(
            self._videoSourceState))

        if self.imageStreamHandler != None:
            if self._videoSourceState == CaptureDeviceState.Running:
                strUserName = ""
                strPassword = ""

                videoType = self._get_Video_Source_Type(videoPath)

                if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
                    url_parsed = urlparse(videoPath)

                    if url_parsed.password != None:
                        strPassword = url_parsed.password
                    if url_parsed.username != None:
                        strUserName = url_parsed.username

                statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format(
                    video_path, 1, strUserName, strPassword)
            else:
                statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format(
                    self._videoSourceState, video_path, 0)
            self.imageStreamHandler.submit_write(statusMsg)

    def setVideoPathFromUI(self, json_Data):

        videoPath = ""
        json_Data = json.loads(json_Data)
        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name +
                     '() : {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["UserName"]))
        logging.info('>> {}'.format(json_Data["Password"]))

        videoType = self._get_Video_Source_Type(json_Data["VideoPath"])

        if videoType == CaptureDevice.Webcam:
            videoPath = json_Data["VideoPath"].strip()
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
            url_parsed = urlparse(json_Data["VideoPath"].strip())

            if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0:
                # already contains password or user name not specified
                videoPath = json_Data["VideoPath"]
            else:
                url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format(
                    json_Data["UserName"], json_Data["Password"],
                    url_parsed.netloc))
                videoPath = url_parsed.geturl()

        self.set_Video_Source(videoPath)

    def _remove_credential(self, videoPath):

        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name + '()')

        ret_Path = ""
        videoType = self._get_Video_Source_Type(videoPath)

        if videoType == CaptureDevice.Webcam:
            ret_Path = videoPath
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:

            url_parsed = urlparse(videoPath)

            if url_parsed.password != None or url_parsed.username != None:
                url_parsed = url_parsed._replace(
                    netloc="{}".format(url_parsed.hostname))

            ret_Path = url_parsed.geturl()

        return ret_Path

    def set_Wallpaper(self, image):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes()

    def start(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        while True:
            if self._videoSourceState == CaptureDeviceState.Running:
                self._capture_in_progress = True
                self.__Run__()
                self._capture_in_progress = False
            else:

                if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead:
                    self.set_Wallpaper(self._frame_wp_no_video)

                if self._videoSourceType == CaptureDevice.Unknown:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Unknown Device')
                    time.sleep(1.0)
                else:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Device Not Running')
                    # time.sleep(1.0)
                    logging.info('>> Video Ready Event Enter ---------------')
                    self._videoReadyEvent.wait()
                    logging.info('<< Video Ready Event Exit  ---------------')
                    self._videoReadyEvent.clear()

    def __Run__(self):

        if self.verbose:
            logging.info(
                '==============================================================='
            )
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # Check camera's FPS
        if self._cameraFPS == 0:
            logging.error('Error : Could not read FPS')
            # raise Exception("Unable to acquire FPS for Video Source")
            return

        logging.info('>> Frame rate (FPS)     : {}'.format(self._cameraFPS))
        logging.info('>> Run Inference {}'.format(self.runInference))

        perFrameTimeInMs = 1000 / self._cameraFPS

        self.fps.start()
        self.fps.reset()

        while True:

            # Get current time before we capture a frame
            tFrameStart = time.time()
            frame = np.array([])
            captureRet = False

            if not self._videoSourceState == CaptureDeviceState.Running:
                break

            captureRet, frame = self.videoStream.read()

            if captureRet == False:
                self._videoSourceState = CaptureDeviceState.ErrorRead
                logging.error("ERROR : Failed to read from video source")
                break

            if frame.size > 0:

                # Run Object Detection
                if self.runInference:
                    self.yoloInference.runInference(frame, self._cameraW,
                                                    self._cameraH,
                                                    self.confidenceLevel)

                # Calculate FPS
                currentFPS = self.fps.fps()

                if (currentFPS > self._cameraFPS):
                    # Cannot go faster than Camera's FPS
                    currentFPS = self._cameraFPS

                # Add FPS Text to the frame
                cv2.putText(frame, "FPS " + str(currentFPS),
                            (10, int(30 * self._fontScale)),
                            cv2.FONT_HERSHEY_SIMPLEX, self._fontScale,
                            (0, 0, 255), 2)

                self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()

            timeElapsedInMs = (time.time() - tFrameStart) * 1000

            if perFrameTimeInMs > timeElapsedInMs:
                # This is faster than image source (e.g. camera) can feed.
                waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs
                time.sleep(waitTimeBetweenFrames / 1000.0)

    def __exit__(self, exception_type, exception_value, traceback):

        self.imageServer.close()
        cv2.destroyAllWindows()
number_of_spots = len(spots)
print("\nNumber of parking spots:", number_of_spots)

# Select camera

# Select camera
vs = VideoStream(usePiCamera=True, resolution=(1024, 768))
time.sleep(2)
vs = vs.start()
time.sleep(1)

# Capture webcam stream and classify the parking spots
while True:
    try:
        # Capture frame-by-frame
        im = vs.read()
        # im = getVideo()

        shape = im.shape
        im = Image.fromarray(im)
        # Needs to be set to image dimensions
        im = im.resize((shape[1], shape[0]))
        im = np.array(im)

        # Cut all spots out of the big image and store in images
        images = []

        for i, s in enumerate(spots):
            try:
                im_ = Image.fromarray(im[s[0][1]:s[1][1], s[0][0]:s[1][0]])
                # Resize to input size of CNN
Ejemplo n.º 7
0
class CameraCapture(object):

    def __IsInt(self, string):
        try:
            int(string)
            return True
        except ValueError:
            return False

    def __localize_text(self, key):
        value = None
        if self.speech_map is not None:
            result = list(
                filter(lambda text: text['key'] == key, self.speech_map))
            if len(result) > 0:
                value = result[0]['value']
        return value

    def __init__(
            self,
            videoPath,
            azureSpeechServiceKey,
            predictThreshold,
            imageProcessingEndpoint,
            sendToHubCallback,
            speechMapFileName
    ):
        self.videoPath = videoPath

        self.predictThreshold = predictThreshold
        self.imageProcessingEndpoint = imageProcessingEndpoint
        self.imageProcessingParams = ""
        self.sendToHubCallback = sendToHubCallback


        if self.__IsInt(videoPath):
            # case of a usb camera (usually mounted at /dev/video* where * is an int)
            self.isWebcam = True

        self.vs = None

        self.speech_map = None
        self.speech_voice = 'en-AU-Catherine'

        self.speech_map_filename = speechMapFileName

        if speechMapFileName is not None and os.path.isfile(self.speech_map_filename):
            with open(self.speech_map_filename, encoding='utf-8') as f:
                json_data = json.load(f)
                self.speech_voice = json_data.get('voice')
                self.speech_map = json_data.get('map')

        self.tts = text2speech.TextToSpeech(
            azureSpeechServiceKey, enableMemCache=True, enableDiskCache=True, voice=self.speech_voice)
        
        text = self.__localize_text('Starting scanner')
        self.tts.play('Starting scanner' if text is None else text)


    def __buildSentence(self, tag):
        vowels = ('a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U')
        sentence = 'You scanned '
        if tag.startswith(vowels):
            sentence = sentence + 'an '
        else:
            sentence = sentence + 'a '
        return sentence + tag

    def __sendFrameForProcessing(self, frame):
        global count, lastTagSpoken
        count = count + 1
        print("sending frame to model: " + str(count))

        headers = {'Content-Type': 'application/octet-stream'}

        retry = 0
        while retry < maxRetry:
            try:
                response = requests.post(self.imageProcessingEndpoint, headers=headers,
                                         params=self.imageProcessingParams, data=frame)
                break
            except:
                retry = retry + 1
                print(
                    'Image Classification REST Endpoint - Retry attempt # ' + str(retry))
                time.sleep(retry)

        if retry >= maxRetry:
            print("retry inference")
            return []

        predictions = response.json()['predictions']
        sortResponse = sorted(
            predictions, key=lambda k: k['probability'], reverse=True)[0]
        probability = sortResponse['probability']

        print("label: {}, probability {}".format(
            sortResponse['tagName'], sortResponse['probability']))

        if sortResponse['tagName'] == 'Hand':
            lastTagSpoken = sortResponse['tagName']
            return []

        if probability > self.predictThreshold and sortResponse['tagName'] != lastTagSpoken:
            lastTagSpoken = sortResponse['tagName']
            print('text to speech ' + lastTagSpoken)

            text = self.__localize_text(lastTagSpoken)
            self.tts.play(self.__buildSentence(lastTagSpoken) if text is None else text)

            return json.dumps(predictions)
        else:
            return []

    def __displayTimeDifferenceInMs(self, endTime, startTime):
        return str(int((endTime-startTime) * 1000)) + " ms"

    def __enter__(self):
        self.vs = VideoStream(int(self.videoPath)).start()
        # needed to load at least one frame into the VideoStream class
        time.sleep(1.0)

        return self

    def start(self):

        frameCounter = 0
        while True:
            frameCounter += 1
            frame = self.vs.read()

            if self.imageProcessingEndpoint != "":

                encodedFrame = cv2.imencode(".jpg", frame)[1].tostring()
                try:
                    response = self.__sendFrameForProcessing(encodedFrame)
                    # print(response)
                    # forwarding outcome of external processing to the EdgeHub
                    if response != "[]" and self.sendToHubCallback is not None:
                        try:
                            self.sendToHubCallback(response)
                        except:
                            print(
                                'Issue sending telemetry')
                except:
                    print('connectivity issue')

            # slow things down a bit - 4 frame a second is fine for demo purposes and less battery drain and lower Raspberry Pi CPU Temperature
            time.sleep(0.25)

    def __exit__(self, exception_type, exception_value, traceback):
        pass
Ejemplo n.º 8
0
# Initialize video stream
videostream = VideoStream(resolution=(imW, imH), framerate=30).start()
time.sleep(1)

# Create window
cv2.namedWindow('Pi Cam Test', cv2.WINDOW_NORMAL)

#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:

    # Start timer (for calculating frame rate)
    t1 = cv2.getTickCount()

    # Grab frame from video stream
    frame1 = videostream.read()

    # Acquire frame and resize to expected shape [1xHxWx3]
    frame = frame1.copy()
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    frame_resized = cv2.resize(frame_rgb, (width, height))
    input_data = np.expand_dims(frame_resized, axis=0)

    # Draw framerate in corner of frame
    cv2.putText(frame, 'FPS: {0:.2f}'.format(frame_rate_calc), (30, 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)

    # All the results have been drawn on the frame, so it's time to display it.
    cv2.imshow('Pi Cam Test', frame)

    # Calculate framerate
Ejemplo n.º 9
0
class InitialiseVideoStream():
    t = None

    def __init__(self):
        self.videostream = None
        self.lista = []

    #def start(self):
    # Start the thread that reads frames from the video stream
    #Thread(target=self.compute(), args=()).start()
    #return self

    def stream(self):

        parameters = Parameters()
        model = Model(parameters.PATH_TO_CKPT)

        # Initialize frame rate calculation
        frame_rate_calc = 1
        freq = cv2.getTickFrequency()

        # Initialize video stream
        self.videostream = VideoStream(resolution=(parameters.imW,
                                                   parameters.imH),
                                       framerate=30)

        self.videostream.start()

        time.sleep(1)

        # for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
        while True:

            # Start timer (for calculating frame rate)
            t1 = cv2.getTickCount()

            # Grab frame from video stream
            frame1 = self.videostream.read()

            # Acquire frame and resize to expected shape [1xHxWx3]
            frame = frame1.copy()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb, (model.width, model.height))
            input_data = np.expand_dims(frame_resized, axis=0)

            # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
            if model.floating_model:
                input_data = (np.float32(input_data) -
                              model.input_mean) / model.input_std

            # Perform the actual detection by running the model with the image as input
            model.interpreter.set_tensor(model.input_details[0]['index'],
                                         input_data)
            model.interpreter.invoke()

            # Retrieve detection results
            boxes = model.interpreter.get_tensor(
                model.output_details[0]['index'])[
                    0]  # Bounding box coordinates of detected objects
            classes = model.interpreter.get_tensor(
                model.output_details[1]['index'])[
                    0]  # Class index of detected objects
            scores = model.interpreter.get_tensor(
                model.output_details[2]['index'])[
                    0]  # Confidence of detected objects
            # num = interpreter.get_tensor(output_details[3]['index'])[0]  # Total number of detected objects (inaccurate and not needed)

            # Loop over all detections and draw detection box if confidence is above minimum threshold
            for i in range(len(scores)):
                if ((scores[i] > parameters.min_conf_threshold)
                        and (scores[i] <= 1.0)):
                    # Get bounding box coordinates and draw box
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(1, (boxes[i][0] * parameters.imH)))
                    xmin = int(max(1, (boxes[i][1] * parameters.imW)))
                    ymax = int(
                        min(parameters.imH, (boxes[i][2] * parameters.imH)))
                    xmax = int(
                        min(parameters.imW, (boxes[i][3] * parameters.imW)))

                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (10, 255, 0), 2)

                    if self.lista.__len__() < 2:
                        self.lista.append((xmin + xmax) / 2)

                    # with serial.Serial('COM4', 9600) as ser:
                    #     print(ser.read())
                    #     ser.close()

                    # Draw label
                    object_name = parameters.labels[int(
                        classes[i]
                    )]  # Look up object name from "labels" array using class index
                    label = '%s: %d%%' % (object_name, int(scores[i] * 100)
                                          )  # Example: 'person: 72%'
                    labelSize, baseLine = cv2.getTextSize(
                        label, cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        2)  # Get font size
                    label_ymin = max(
                        ymin, labelSize[1] + 10
                    )  # Make sure not to draw label too close to top of window
                    cv2.rectangle(
                        frame, (xmin, label_ymin - labelSize[1] - 10),
                        (xmin + labelSize[0], label_ymin + baseLine - 10),
                        (255, 255, 255),
                        cv2.FILLED)  # Draw white box to put label text in
                    cv2.putText(frame, label, (xmin, label_ymin - 7),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0),
                                2)  # Draw label text

            # Draw framerate in corner of frame
            cv2.putText(frame, 'FPS: {0:.2f}'.format(frame_rate_calc),
                        (30, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0),
                        2, cv2.LINE_AA)

            # All the results have been drawn on the frame, so it's time to display it.
            cv2.imshow('Press q to quit.', frame)

            # Calculate framerate
            t2 = cv2.getTickCount()
            time1 = (t2 - t1) / freq
            frame_rate_calc = 1 / time1

            # Press 'q' to quit
            if cv2.waitKey(1) == ord('q'):
                break

        # Clean up
        cv2.destroyAllWindows()
        self.videostream.stop()
        print(self.lista)
Ejemplo n.º 10
0
video = cv2.VideoCapture(0)
fps = FPS().start()
while (fps._numframes < limit):
    (grab, frames) = video.read()
    frames = imutils.resize(frames, width=400)

    if (display):
        cv2.imshow("Frames", frames)
        key = cv2.waitKey(1) & 0xFF
    fps.update()
fps.stop()
calc = fps.fps()
print("Elapsed time is : {:2f}".format(fps.elapsed()))
print("Total fps is : {:2f}".format(calc))

tvideo = VideoStream(src=0).start()
tfps = FPS().start()
while (tfps._numframes < limit):
    frame = tvideo.read()
    frame = imutils.resize(frame, width=400)

    if (display):
        cv2.imshow("TFrame", frame)
        key = cv2.waitKey(1) & 0xFF
    tfps.update()
tfps.stop()

calc = tfps.fps()
print("Elapsed time is : {:2f}".format(tfps.elapsed()))
print("Total fps is : {:2f}".format(calc))
class CameraCapture(object):

    def __IsInt(self, string):
        try:
            int(string)
            return True
        except ValueError:
            return False

    def __init__(
            self,
            videoPath,
            imageProcessingEndpoint="",
            imageProcessingParams="",
            showVideo=False,
            verbose=False,
            loopVideo=True,
            convertToGray=False,
            resizeWidth=0,
            resizeHeight=0,
            annotate=False,
            sendToHubCallback=None):
        self.videoPath = videoPath
        if self.__IsInt(videoPath):
            # case of a usb camera (usually mounted at /dev/video* where * is an int)
            self.isWebcam = True
        else:
            # case of a video file
            self.isWebcam = False
        self.imageProcessingEndpoint = imageProcessingEndpoint
        if imageProcessingParams == "":
            self.imageProcessingParams = ""
        else:
            self.imageProcessingParams = json.loads(imageProcessingParams)
        self.showVideo = showVideo
        self.verbose = verbose
        self.loopVideo = loopVideo
        self.convertToGray = convertToGray
        self.resizeWidth = resizeWidth
        self.resizeHeight = resizeHeight
        self.annotate = (self.imageProcessingEndpoint !=
                         "") and self.showVideo & annotate
        self.nbOfPreprocessingSteps = 0
        self.autoRotate = False
        self.sendToHubCallback = sendToHubCallback
        self.vs = None

        if self.convertToGray:
            self.nbOfPreprocessingSteps += 1
        if self.resizeWidth != 0 or self.resizeHeight != 0:
            self.nbOfPreprocessingSteps += 1
        if self.verbose:
            print("Initialising the camera capture with the following parameters: ")
            print("   - Video path: " + self.videoPath)
            print("   - Image processing endpoint: " +
                  self.imageProcessingEndpoint)
            print("   - Image processing params: " +
                  json.dumps(self.imageProcessingParams))
            print("   - Show video: " + str(self.showVideo))
            print("   - Loop video: " + str(self.loopVideo))
            print("   - Convert to gray: " + str(self.convertToGray))
            print("   - Resize width: " + str(self.resizeWidth))
            print("   - Resize height: " + str(self.resizeHeight))
            print("   - Annotate: " + str(self.annotate))
            print("   - Send processing results to hub: " +
                  str(self.sendToHubCallback is not None))
            print()

        self.displayFrame = None
        if self.showVideo:
            self.imageServer = ImageServer(5012, self)
            self.imageServer.start()

    def __annotate(self, frame, response):
        AnnotationParserInstance = AnnotationParser()
        # TODO: Make the choice of the service configurable
        listOfRectanglesToDisplay = AnnotationParserInstance.getCV2RectanglesFromProcessingService1(
            response)
        for rectangle in listOfRectanglesToDisplay:
            cv2.rectangle(frame, (rectangle(0), rectangle(1)),
                          (rectangle(2), rectangle(3)), (0, 0, 255), 4)
        return

    def __sendFrameForProcessing(self, frame):
        headers = {'Content-Type': 'application/octet-stream'}
        try:
            response = requests.post(
                self.imageProcessingEndpoint, headers=headers, params=self.imageProcessingParams, data=frame)
        except Exception as e:
            print('__sendFrameForProcessing Excpetion -' + str(e))
            return "[]"

        if self.verbose:
            try:
                print("Response from external processing service: (" +
                      str(response.status_code) + ") " + json.dumps(response.json()))
            except Exception:
                print("Response from external processing service (status code): " +
                      str(response.status_code))
        return json.dumps(response.json())

    def __displayTimeDifferenceInMs(self, endTime, startTime):
        return str(int((endTime-startTime) * 1000)) + " ms"

    def __enter__(self):
        if self.isWebcam:
            # The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames.
            self.vs = VideoStream(int(self.videoPath)).start()
            # needed to load at least one frame into the VideoStream class
            time.sleep(1.0)
            #self.capture = cv2.VideoCapture(int(self.videoPath))
        else:
            # In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class
            self.capture = cv2.VideoCapture(self.videoPath)
        return self

    def get_display_frame(self):
        return self.displayFrame

    def start(self):
        frameCounter = 0
        perfForOneFrameInMs = None
        while True:
            if self.showVideo or self.verbose:
                startOverall = time.time()
            if self.verbose:
                startCapture = time.time()

            frameCounter += 1
            if self.isWebcam:
                frame = self.vs.read()
            else:
                frame = self.capture.read()[1]
                if frameCounter == 1:
                    if self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) < self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT):
                        self.autoRotate = True
                if self.autoRotate:
                    # The counterclockwise is random...It coudl well be clockwise. Is there a way to auto detect it?
                    frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
            if self.verbose:
                if frameCounter == 1:
                    if not self.isWebcam:
                        print("Original frame size: " + str(int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
                                                            ) + "x" + str(int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))
                        print("Frame rate (FPS): " +
                              str(int(self.capture.get(cv2.CAP_PROP_FPS))))
                print("Frame number: " + str(frameCounter))
                print("Time to capture (+ straighten up) a frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startCapture))
                startPreProcessing = time.time()

            # Loop video
            if not self.isWebcam:
                if frameCounter == self.capture.get(cv2.CAP_PROP_FRAME_COUNT):
                    if self.loopVideo:
                        frameCounter = 0
                        self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
                    else:
                        break

            # Pre-process locally
            if self.nbOfPreprocessingSteps == 1 and self.convertToGray:
                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0):
                preprocessedFrame = cv2.resize(
                    frame, (self.resizeWidth, self.resizeHeight))

            if self.nbOfPreprocessingSteps > 1:
                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                preprocessedFrame = cv2.resize(
                    preprocessedFrame, (self.resizeWidth, self.resizeHeight))

            if self.verbose:
                print("Time to pre-process a frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startPreProcessing))
                startEncodingForProcessing = time.time()

            # Process externally
            if self.imageProcessingEndpoint != "":

                # Encode frame to send over HTTP
                if self.nbOfPreprocessingSteps == 0:
                    encodedFrame = cv2.imencode(".jpg", frame)[1].tostring()
                else:
                    encodedFrame = cv2.imencode(".jpg", preprocessedFrame)[
                        1].tostring()

                if self.verbose:
                    print("Time to encode a frame for processing: " +
                          self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
                    startProcessingExternally = time.time()

                # Send over HTTP for processing
                response = self.__sendFrameForProcessing(encodedFrame)
                if self.verbose:
                    print("Time to process frame externally: " +
                          self.__displayTimeDifferenceInMs(time.time(), startProcessingExternally))
                    startSendingToEdgeHub = time.time()

                # forwarding outcome of external processing to the EdgeHub
                if response != "[]" and self.sendToHubCallback is not None:
                    self.sendToHubCallback(response)
                    if self.verbose:
                        print("Time to message from processing service to edgeHub: " +
                              self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
                        startDisplaying = time.time()

            # Display frames
            if self.showVideo:
                try:
                    if self.nbOfPreprocessingSteps == 0:
                        if self.verbose and (perfForOneFrameInMs is not None):
                            cv2.putText(frame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),
                                        (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
                        if self.annotate:
                            # TODO: fix bug with annotate function
                            self.__annotate(frame, response)
                        self.displayFrame = cv2.imencode(
                            '.jpg', frame)[1].tobytes()
                    else:
                        if self.verbose and (perfForOneFrameInMs is not None):
                            cv2.putText(preprocessedFrame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),
                                        (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
                        if self.annotate:
                            # TODO: fix bug with annotate function
                            self.__annotate(preprocessedFrame, response)
                        self.displayFrame = cv2.imencode(
                            '.jpg', preprocessedFrame)[1].tobytes()
                except Exception as e:
                    print("Could not display the video to a web browser.")
                    print('Excpetion -' + str(e))
                if self.verbose:
                    if 'startDisplaying' in locals():
                        print("Time to display frame: " +
                              self.__displayTimeDifferenceInMs(time.time(), startDisplaying))
                    elif 'startSendingToEdgeHub' in locals():
                        print("Time to display frame: " +
                              self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
                    else:
                        print("Time to display frame: " + self.__displayTimeDifferenceInMs(
                            time.time(), startEncodingForProcessing))
                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
                if not self.isWebcam:
                    waitTimeBetweenFrames = max(
                        int(1000 / self.capture.get(cv2.CAP_PROP_FPS))-perfForOneFrameInMs, 1)
                    print("Wait time between frames :" +
                          str(waitTimeBetweenFrames))
                    if cv2.waitKey(waitTimeBetweenFrames) & 0xFF == ord('q'):
                        break

            if self.verbose:
                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
                print("Total time for one frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startOverall))

    def __exit__(self, exception_type, exception_value, traceback):
        if not self.isWebcam:
            self.capture.release()
        if self.showVideo:
            self.imageServer.close()
            cv2.destroyAllWindows()
Ejemplo n.º 12
0
class Face_App:

    # 初始化函数
    def __init__(self):
        self.face_tools = ft()
        self.vs = VideoStream(0)
        self.vs.run()
        self.frame = None
        self.thread = None
        self.guanliyuan_accept_login = True
        self.run_manage = False
        self.change_list = self.vs.get_sources()
        self.source_number = 0
        self.update_label_flag = True

        # 主界面
        self.root = tk.Tk()
        self.root.wm_resizable(False, False)  # 使窗口尺寸不变
        self.root.wm_title("基于机器视觉的人脸识别系统")
        self.root.wm_protocol("WM_DELETE_WINDOW", self.close)

        # 人脸画面
        self.face_image = tk.Label()
        self.face_image.pack(side="top", padx=10, pady=10)

        # 添加人脸按钮相关
        self.add_button_var = tk.StringVar()
        self.add_button_var.set("添加人脸")
        self.press_add = False
        self.add_button = tk.Button(self.root,
                                    textvariable=self.add_button_var,
                                    command=self.add)

        # 切换源按钮相关
        self.is_vedio_source = True
        self.change_source_var = tk.StringVar()
        self.change_source_var.set("切换图像")
        self.press_change_source = False
        self.change_source_button = tk.Button(
            self.root,
            textvariable=self.change_source_var,
            command=self.change_source)

        # 人脸识别按钮相关
        self.recognize_button_var = tk.StringVar()
        self.recognize_button_var.set("人脸识别")
        self.press_recognize = False
        self.recognize_button = tk.Button(
            self.root,
            textvariable=self.recognize_button_var,
            command=self.recognize)
        self.max_face = []

        # 人脸检测按钮相关
        self.detect_button_var = tk.StringVar()
        self.detect_button_var.set("人脸检测")
        self.press_detect = False
        self.detect_button = tk.Button(self.root,
                                       textvariable=self.detect_button_var,
                                       command=self.detect)

        # 姓名编号输入界面
        self.frame_h = tk.Frame(self.root)
        self.frame_l = tk.Frame(self.root)
        self.name_label = tk.Label(self.frame_h, text="姓名:")
        self.id_label = tk.Label(self.frame_l, text="编号:")
        self.name_entry = tk.Entry(self.frame_h)
        self.id_entry = tk.Entry(self.frame_l)
        self.ok_button = tk.Button(self.root,
                                   text="确认",
                                   width=20,
                                   command=self.save_id)

        # 是我按钮
        self.is_me_var = tk.StringVar()
        self.is_me_var.set("是我")
        self.is_me_button = tk.Button(self.root,
                                      textvariable=self.is_me_var,
                                      command=self.is_me)
        self.press_is_me_button = False

        # 不是我按钮
        self.not_me_button = tk.Button(self.root,
                                       text="不是我",
                                       command=self.not_me)
        self.press_not_me_button = False

        # 线程相关
        self.run_thread = True
        self.thread = threading.Thread(target=self.video_loop)
        self.thread.start()
        self.last_img = None
        self.last_max_face = [0, 0, 0, 0]
        self.pack_all_button()
        self.root.mainloop()

    # 视频循环流
    def video_loop(self):
        try:
            print("启动画面")
            while self.run_thread:
                if self.press_change_source:
                    self.change_sources()
                    self.press_change_source = False
                self.grabbed, self.frame = self.vs.read()
                if self.grabbed:
                    self.update_img(self.frame)
                elif self.run_thread == True:
                    self.change_sources()
                else:
                    break
        except RuntimeError:
            print("视频流出现异常")

    # 根据用户所选功能能的不同,将每一帧画面进行不同处理
    def update_img(self, img):
        if self.press_recognize:
            user_id, new_img = self.face_tools.face_recognize(img)
            self.update_label(new_img)
            if user_id == '1' and self.guanliyuan_accept_login == True and self.is_vedio_source == True and self.press_recognize and self.source_number == 0:
                if tk.messagebox.askokcancel(message="你好像是管理员,你要登录吗?"):
                    self.guanliyuan_accept_login = True
                    self.run_manage = True
                    self.run_thread = False
                    tk.messagebox.showinfo(message="请关闭本页面在终端进行操作。")
                else:
                    self.guanliyuan_accept_login = False
        elif self.press_detect:
            new_img = self.face_tools.face_detection(img)
            self.update_label(new_img)
        elif self.press_add:
            if self.update_label_flag:
                new_img, self.max_face = self.face_tools.face_collect(img)
                self.update_label(new_img)
            if self.max_face[2] > 10:
                self.vs.stop()
                self.update_label_flag = False
            if self.press_is_me_button:
                self.press_is_me_button = False
                self.update_label_flag = True
                self.face_tools.face_save(img, self.max_face)
                self.is_me_var.set("是我" + " (" + str(self.face_tools.number) +
                                   ")")
                self.vs.go_on()
            elif self.press_not_me_button:
                self.update_label_flag = True
                self.press_not_me_button = False
                self.vs.go_on()
        else:
            self.update_label(img)

    # 更新显示画面
    def update_label(self, new_img):
        cv2image = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGBA)  # 转换颜色从BGR到RGBA
        current_image = Image.fromarray(cv2image)  # 将图像转换成Image对象
        imgtk = ImageTk.PhotoImage(image=current_image)
        if imgtk == None:
            tk.messagebox.showerror(message="更新画面出现异常请关闭程序。")
            return
        if self.face_image is None:
            self.face_image = tk.Label(image=imgtk)
            self.face_image.image = imgtk
            self.face_image.pack(side="top", padx=10, pady=10)
        else:
            if self.run_thread == False:
                return
            self.face_image.configure(image=imgtk)
            self.face_image.image = imgtk

    # 人脸识别按钮对应程序
    def recognize(self):
        if self.press_recognize == False:
            is_recognize = tk.messagebox.askokcancel(title='',
                                                     message="确定要开始识别吗?")
            if is_recognize:
                self.press_recognize = True
                self.destroy_all_button()
                self.recognize_button_var.set("取消识别")
                self.recognize_button.pack(fill="both",
                                           expand="yes",
                                           padx=200,
                                           pady=10)
                print("识别")
        else:
            is_cancel_recognize = tk.messagebox.askokcancel(
                title='', message="确定要取消识别吗?")
            if is_cancel_recognize:
                print('\n' + str(self.face_tools.predict_dict) + '\n')
                self.face_tools.predict_dict = {}
                self.face_tools.predict_number = 0
                self.press_recognize = False
                self.guanliyuan_accept_login = True
                self.destroy_all_button()
                self.recognize_button_var.set("人脸识别")
                self.pack_all_button()
                print("取消识别")

    # 人脸检测按钮对应程序
    def detect(self):

        if self.press_detect == False:
            is_detect = tk.messagebox.askokcancel(
                title='', message="确定要开始检测吗?")  # return True 0r False
            if is_detect:
                self.press_detect = True
                self.destroy_all_button()
                self.detect_button_var.set("取消检测")
                self.detect_button.pack(fill="both",
                                        expand="yes",
                                        padx=200,
                                        pady=10)
                print("人脸检测")
        else:
            is_cancel_detect = tk.messagebox.askokcancel(
                title='', message="确定要取消检测吗?")  # return True 0r False
            if is_cancel_detect:
                self.press_detect = False
                self.detect_button_var.set("人脸检测")
                self.destroy_all_button()
                self.pack_all_button()
                print("取消检测")

    # 添加人脸/停止添加按钮对应程序
    def add(self):
        if self.press_add == False:
            is_add = tk.messagebox.askokcancel(
                title='', message="确定要添加人脸吗?")  # return True 0r False
            if is_add:
                self.press_add = True
                self.destroy_all_button()
                self.add_button_var.set("停止添加")
                self.get_name_id()
                print("人脸识别")
        else:
            is_cancel_add = tk.messagebox.askokcancel(
                title='', message="确定要停止添加吗?")  # return True 0r False
            if is_cancel_add:
                self.face_tools.number = 0
                self.is_me_var.set("是我")
                self.update_label_flag = True
                self.press_add = False
                self.destroy_all_button()
                self.add_button_var.set("添加人脸")
                self.pack_all_button()
                self.face_tools.face_train()
                tk.messagebox.showinfo(message="已添加新的人脸,正在重新训练模型!")
                self.vs.go_on()
                print("添加完成")

    # 切换视频源
    def change_source(self):
        if self.press_change_source == False:
            self.press_change_source = True

    # 是我按钮对应程序
    def is_me(self):
        self.press_not_me_button = False
        self.press_is_me_button = True

    # 不是我按钮对应程序
    def not_me(self):
        self.press_not_me_button = True
        self.press_is_me_button = False

    # 得到姓名及编号
    def get_name_id(self):
        self.frame_h.pack()
        self.frame_l.pack()
        self.name_label.pack(side="left", fill="both", padx=5, pady=5)
        self.name_entry.pack(side="right", fill="both", padx=5, pady=5)
        self.id_label.pack(side="left", fill="both", padx=5, pady=5)
        self.id_entry.pack(side="right", fill="both", padx=5, pady=5)
        self.ok_button.pack(side="bottom", fill="y", ipadx=5, pady=5)

    # 保存id
    def save_id(self):
        get_name = self.name_entry.get()
        get_id = self.id_entry.get()
        if get_id != "" and get_name != "" and len(get_id) <= 6 and len(
                get_name) <= 20:
            if get_id.isdigit():
                if get_id in self.face_tools.usrs_dict.keys():
                    tk.messagebox.showinfo(
                        message="此id已使用,如果你再使用会影响之前用户!\n请联系管理员操作。")
                    meaage = "有一个名为%s的用户想使用已被使用编号%s被拒绝。" % (get_name, get_id)
                    self.face_tools.sent_message("有人要有已有编号", meaage)
                    return
                self.face_tools.usrs_dict[str(int(get_id))] = get_name
                self.face_tools.save_dict()
                meaage = "有一个名为%s,编号为%s的用户录入了人脸信息。" % (get_name, get_id)
                self.face_tools.sent_message("有新人脸添加", meaage)
                self.face_tools.face_id = get_id
                print("已经保存了您的姓名和编号")
                print(self.face_tools.usrs_dict)
                self.destroy_all_button()
                self.press_not_me_button = True
                self.is_me_button.pack(side="left",
                                       fill="both",
                                       expand="yes",
                                       padx=5,
                                       pady=5)
                self.not_me_button.pack(side="left",
                                        fill="both",
                                        expand="yes",
                                        padx=5,
                                        pady=5)
                self.add_button.pack(side="right",
                                     fill="both",
                                     expand="yes",
                                     padx=5,
                                     pady=5)
            else:
                tk.messagebox.showinfo(message="编号只能为数字哦!")
        else:
            tk.messagebox.showinfo(message="姓名和编号都不能为空且符合字数要求哦!")

    # 切换视频源
    def change_sources(self):
        self.source_number += 1
        if self.source_number == len(self.change_list):
            self.source_number = 0
        self.vs.close()
        self.vs = VideoStream(self.change_list[self.source_number])
        self.vs.run()

    # 放置初始四大按钮
    def pack_all_button(self):
        self.add_button.pack(side="right",
                             fill="both",
                             expand="yes",
                             padx=10,
                             pady=10)
        self.detect_button.pack(side="right",
                                fill="both",
                                expand="yes",
                                padx=10,
                                pady=10)
        self.recognize_button.pack(side="right",
                                   fill="both",
                                   expand="yes",
                                   padx=10,
                                   pady=10)
        self.change_source_button.pack(side="right",
                                       fill="both",
                                       expand="yes",
                                       padx=10,
                                       pady=10)

    # 删除所有按钮
    def destroy_all_button(self):
        self.recognize_button.pack_forget()
        self.add_button.pack_forget()
        self.detect_button.pack_forget()
        self.frame_h.pack_forget()
        self.frame_l.pack_forget()
        self.name_label.pack_forget()
        self.name_entry.pack_forget()
        self.id_label.pack_forget()
        self.id_entry.pack_forget()
        self.ok_button.pack_forget()
        self.is_me_button.pack_forget()
        self.not_me_button.pack_forget()
        self.change_source_button.pack_forget()

    # 关闭vedio_loop线程
    def close(self):
        if self.run_manage:
            self.vs.close()
            self.root.destroy()
            print("关闭窗口")
            return
        self.run_thread = False
        is_quit = tk.messagebox.askyesno(title='请求确认', message="确定退出了吗?")
        if is_quit:
            self.vs.close()
            self.root.destroy()
        else:
            self.run_thread = True
            self.thread = threading.Thread(target=self.video_loop)
            self.thread.start()