Exemplo n.º 1
0
def application(env, start_response):
    uri = env['REQUEST_URI']
    im = ImageServer('imageserver.ini')

    try:
        image = im.resize(uri)
        result = open(image,'rb').read()
    except ImageServerException:
        # serve default image or text
        result = 'Not Found' 

    # headers
    status = '200 OK'
    headers = [
                ('Content-Type', im.get_mime() ),
              ]
    start_response(status, headers)
    return result
Exemplo n.º 2
0
def application(env, start_response):
    uri = env['REQUEST_URI']
    im = ImageServer('imageserver.ini')

    try:
        image = im.resize(uri)
        result = open(image, 'rb').read()
    except ImageServerException:
        # serve default image or text
        result = 'Not Found'

    # headers
    status = '200 OK'
    headers = [
        ('Content-Type', im.get_mime()),
    ]
    start_response(status, headers)
    return result
Exemplo n.º 3
0
challengingSetImageDirs = ["../data/images/ibug/"]
challengingSetBoundingBoxFiles = ["../data/boxesIBUG.pkl"]

w300SetImageDirs = [
    "../data/images/300W/01_Indoor/", "../data/images/300W/02_Outdoor/"
]
w300SetBoundingBoxFiles = [
    "../data/boxes300WIndoor.pkl", "../data/boxes300WOutdoor.pkl"
]

datasetDir = "../data/"

meanShape = np.load("../data/meanFaceShape.npz")["meanShape"]

commonSet = ImageServer(initialization='box')
commonSet.PrepareData(commonSetImageDirs, commonSetBoundingBoxFiles, meanShape,
                      0, 1000, False)
commonSet.LoadImages()
commonSet.CropResizeRotateAll()
commonSet.imgs = commonSet.imgs.astype(np.float32)
commonSet.Save(datasetDir, "commonSet.npz")

challengingSet = ImageServer(initialization='box')
challengingSet.PrepareData(challengingSetImageDirs,
                           challengingSetBoundingBoxFiles, meanShape, 0, 1000,
                           False)
challengingSet.LoadImages()
challengingSet.CropResizeRotateAll()
challengingSet.imgs = challengingSet.imgs.astype(np.float32)
challengingSet.Save(datasetDir, "challengingSet.npz")
Exemplo n.º 4
0
## 主要对脚本 TrainingSetPreparation 产生的数据(所有图片集,best关键点,文件提供的关键点);
## 通过代码实现图片集中关于关键点的标记;以确保是人脸的关键点

import tensorflow as tf
import numpy as np
from ImageServer import ImageServer
from models_test import DAN
import os
import cv2
from PIL import Image
import matplotlib.pyplot as plt
##  加载数据
N_LANDMARK = 68
datasetDir = "../data/result/"
trainSet = ImageServer.Load(datasetDir + "dataset_nimgs=60960_perturbations=[0.2, 0.2, 20, 0.25]_size=[112, 112].npz")
validationSet = ImageServer.Load(datasetDir + "dataset_nimgs=100_perturbations=[]_size=[112, 112].npz")

##  获取每个数据样本的标签---其实是68个关键点的坐标位置
def getLabelsForDataset(imageServer):
    nSamples = imageServer.gtLandmarks.shape[0]
    nLandmarks = imageServer.gtLandmarks.shape[1]
    y = np.zeros((nSamples, nLandmarks, 2), dtype=np.float32)
    y = imageServer.gtLandmarks
    return y.reshape((nSamples, nLandmarks * 2))
## 获取样本数,样本的尺寸及通道
nSamples = trainSet.gtLandmarks.shape[0]
imageHeight = trainSet.imgSize[0]
imageWidth = trainSet.imgSize[1]
nChannels = trainSet.imgs.shape[3]
##  获取图片数据及对应图片中关键点的位置
Exemplo n.º 5
0
    "../data/boxesLFPWTest.pkl", "../data/boxesHelenTest.pkl"
]

challengingSetImageDirs = ["../data/images/ibug/"]
challengingSetBoundingBoxFiles = ["../data/boxesIBUG.pkl"]

w300SetImageDirs = [
    "../data/images/300w/01_Indoor/", "../data/images/300w/02_Outdoor/"
]
w300SetBoundingBoxFiles = [
    "../data/boxes300WIndoor.pkl", "../data/boxes300WOutdoor.pkl"
]

datasetDir = "../data/"
trainSet = ImageServer.Load(
    datasetDir +
    "dataset_nimgs=62960_perturbations=[0.2, 0.2, 20, 0.25]_size=[112, 112].npz"
)
meanShape = np.load("../data/meanFaceShape.npz")["meanShape"]

# print(meanShape.shape)  (68,2)
'''
commonSet = ImageServer(initialization='box')
commonSet.PrepareData(commonSetImageDirs, commonSetBoundingBoxFiles, meanShape, 0, 1000, False)
commonSet.LoadImages()
commonSet.CropResizeRotateAll()
commonSet.imgs = commonSet.imgs.astype(np.float32)
commonSet.NormalizeImages(trainSet) #去均值,除以标准差
# commonSet.NormalizeImages()
commonSet.Save(datasetDir, "commonSet.npz")

imageDirs = [
    "../data/images/lfpw/trainset/", "../data/images/helen/trainset/",
    "../data/images/afw/"
]
boundingBoxFiles = [
    "../data/py3boxesLFPWTrain.pkl", "../data/py3boxesHelenTrain.pkl",
    "../data/py3boxesAFW.pkl"
]

datasetDir = "../data/"

meanShape = np.load("../data/meanFaceShape.npz")["meanShape"]

print("Load trainSet")
trainSet = ImageServer(initialization='rect')  #相当于没有用bbx做训练,直接用的特征点截取框
trainSet.PrepareData(
    imageDirs, None, meanShape, 100, 100000, True
)  #准备好图片名list,对应图片landmark的list,和对应图片的bbx的list,和meanshape。令我疑惑的是,startIdx=100,nImgs=100000,,300W数据集可没有那么多图片
trainSet.LoadImages()  #读取图片,并对每张图调整好meanShape
trainSet.GeneratePerturbations(10, [0.2, 0.2, 20, 0.25])  #位移0.2,旋转20度,放缩+-0.25
# import pdb; pdb.set_trace()
trainSet.NormalizeImages()  #去均值,除以标准差
trainSet.Save(datasetDir)  #保存成字典形式,key为'imgs','initlandmarks','gtlandmarks'

print("Load validationSet")
validationSet = ImageServer(initialization='box')
validationSet.PrepareData(imageDirs, boundingBoxFiles, meanShape, 0, 100,
                          False)
validationSet.LoadImages()
validationSet.CropResizeRotateAll()
Exemplo n.º 7
0
network = FaceAlignment(112, 112, 1, 2, n_T)
network.loadNetwork(networkFilename)

print ("Network being tested: " + networkFilename)
print ("Normalization is set to: " + normalization)
print ("Failure threshold is set to: " + str(failureThreshold))

extraLandmarks = sio.loadmat(datasetDir + 'extraLandmarks.mat')['extraLandmarks']
extraOrgsInd = np.array([[0, 18, 33, 42, 51, 58, 67, 79, 91, 104, 113, 124],
                    [18, 33, 42, 51, 58, 67, 79, 91, 104, 113, 124, 129]])

resultsDir_common = './300w_Results/Common Set/'
resultsDir_challenging = './300w_Results/Challenging Set/'
resultsDir_300w = './300w_Results/300w private set/'

commonSet = ImageServer.Load(datasetDir + "commonSet.npz")
challengingSet = ImageServer.Load(datasetDir + "challengingSet.npz")
w300 = ImageServer.Load(datasetDir + "w300Set.npz")

print ("Processing common subset of the 300W public test set (test sets of LFPW and HELEN)")
commonErrs = tests.LandmarkError(commonSet, network, extraLandmarks, extraOrgsInd,  resultsDir_common, normalization, showResults, verbose)
print ("Processing challenging subset of the 300W public test set (IBUG dataset)")
challengingErrs = tests.LandmarkError(challengingSet, network, extraLandmarks, extraOrgsInd, resultsDir_challenging, normalization, showResults, verbose)

fullsetErrs = commonErrs + challengingErrs
print ("Showing results for the entire 300W pulic test set (IBUG dataset, test sets of LFPW and HELEN")
print("Average error: {0}".format(np.mean(fullsetErrs)))
tests.AUCError(fullsetErrs, failureThreshold, showCurve=showCED)

print ("Processing 300W private test set")
w300Errs = tests.LandmarkError(w300, network, extraLandmarks, extraOrgsInd,  resultsDir_300w, normalization, showResults, verbose)
Exemplo n.º 8
0
from ImageServer import ImageServer

import numpy as np
# 训练前准备数据
# imageDirs = ["../data/images/lfpw/trainset/", "../data/images/helen/trainset/", "../data/images/afw/"] # 图像文件夹的位置
# boundingBoxFiles = ["../data/boxesLFPWTrain.pkl", "../data/boxesHelenTrain.pkl", "../data/boxesAFW.pkl"] # pkl文件保存label信息,用Pickle模块进行读写

imageDirs = ["../data/images/lfpw/trainset/"]  # 图像文件夹的位置
boundingBoxFiles = ["../data/boxesLFPWTrain.pkl"
                    ]  # pkl文件保存label信息,用Pickle模块进行读写

datasetDir = "../data/"

meanShape = np.load("../data/meanFaceShape.npz")[
    "meanShape"]  # 所有人脸的68个点的平均值,做为初始值,以此为基础,算偏差。

trainSet = ImageServer(initialization='rect')
trainSet.PrepareData(imageDirs, None, meanShape, 100, 100000, True)
trainSet.LoadImages()
trainSet.GeneratePerturbations(10, [0.2, 0.2, 20, 0.25])  #
trainSet.NormalizeImages()
trainSet.Save(datasetDir)

validationSet = ImageServer(initialization='box')
validationSet.PrepareData(imageDirs, boundingBoxFiles, meanShape, 0, 100,
                          False)
validationSet.LoadImages()
validationSet.CropResizeRotateAll()
validationSet.imgs = validationSet.imgs.astype(np.float32)
validationSet.NormalizeImages(trainSet)
validationSet.Save(datasetDir)
Exemplo n.º 9
0
##测试部分的代码
import utils
import tensorflow as tf
# import ImageServer
from ImageServer import ImageServer
from models import DAN
import numpy as np
from scipy import misc
from scipy import ndimage
from pylab import *
import os
import cv2

datasetDir = "../data/"
# testSet = ImageServer.Load(datasetDir + "commonSet.npz")
testSet = ImageServer.Load(datasetDir + "w300Set.npz")


def evaluateError(landmarkGt, landmarkP):
    e = np.zeros(68)
    #  ocular_dist = np.mean(np.linalg.norm(landmarkGt[36:42] - landmarkGt[42:48], axis=1)) # 瞳孔之间的距离作为norm
    ocular_dist = np.mean(np.linalg.norm(landmarkGt[36] -
                                         landmarkGt[45]))  # 眼角之间的距离作为norm
    for i in range(68):
        e[i] = np.linalg.norm(landmarkGt[i] - landmarkP[i])
    e = e / ocular_dist
    return e


def evaluateBatchError(landmarkGt, landmarkP, batch_size):
    e = np.zeros([batch_size, 68])
imageDirs = [
    "../data/images/lfpw/trainset/", "../data/images/helen/trainset/",
    "../data/images/afw/"
]
boundingBoxFiles = [
    "../data/py3boxesLFPWTrain.pkl", "../data/py3boxesHelenTrain.pkl",
    "../data/py3boxesAFW.pkl"
]

datasetDir = "../data/"

meanShape = np.load("../data/meanFaceShape.npz")["meanShape"]

trainSet = ImageServer(
    initialization='rect'
)  # 훈련을하기 위해 bbx를 사용하지 않는 것과 동일하게, feature point interception box를 직접 사용하십시오.
trainSet.PrepareData(
    imageDirs, None, meanShape, 0, 2, True
)  # 이미지 이름 목록, 이미지 랜드 마크 목록, 해당 이미지의 bbx 목록 및 도구 모음을 준비하십시오. 저를 괴롭히는 이유는 startIdx = 100, nImgs = 100000 및 300W 데이터 세트에는 이미지가 많지 않기 때문입니다. 准备好图片名list,对应图片landmark的list,和对应图片的bbx的list,和meanshape。令我疑惑的是,startIdx=100,nImgs=100000,,300W数据集可没有那么多图片
trainSet.LoadImages()  # 그림을 읽고 각 그림의 meanShape를 조정하십시오.
trainSet.GeneratePerturbations(
    10, [0.2, 0.2, 20, 0.25])  # 변위 0.2, 회전 20도, 축척 + -0.25
# import pdb; pdb.set_trace()
trainSet.NormalizeImages()  # Deavered, 표준 편차로 나눈 값 <<< 去均值,除以标准差
# trainSet.Save(datasetDir) # 사전으로 저장, 키는 'imgs', 'initlandmarks', 'gtlandmarks' <<< 保存成字典形式,key为'imgs','initlandmarks','gtlandmarks'

validationSet = ImageServer(initialization='box')
validationSet.PrepareData(imageDirs, boundingBoxFiles, meanShape, 0, 100,
                          False)
validationSet.LoadImages()
Exemplo n.º 11
0
    def __init__(self,
                 videoPath="",
                 verbose=True,
                 displayW=1920,
                 displayH=1080,
                 fontScale=1.0,
                 inference=True,
                 confidenceLevel=0.5):

        self.verbose = verbose
        self._debug = False

        self.videoPath = videoPath
        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown
        self.videoStream = None

        self._videoReadyEvent = Event()

        self._capture_in_progress = False

        # Display Resolution
        # Will try to set camera's resolution to the specified resolution
        self._displayW = displayW
        self._displayH = displayH

        self._cameraW = 0
        self._cameraH = 0

        # Camera's FPS
        self._cameraFPS = 30

        # Font Scale for putText
        self._fontScale = float(fontScale)

        # turn inference on/off
        self.runInference = inference

        # confidence level threshold
        self.confidenceLevel = confidenceLevel

        # various frame data

        # frame data for UI
        self._displayFrame = None

        # wallpapers for UI
        self._frame_wp_init_system = cv2.imread(
            "./www/WP-InitializingSystem.png")
        self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png")
        self._frame_wp_init_iothub = cv2.imread(
            "./www/WP-InitializeIotHub.png")

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        logging.info(
            '===============================================================')
        logging.info(
            'Initializing Video Capture with the following parameters:')
        logging.info('   - OpenCV Version     : {}'.format(cv2.__version__))
        logging.info('   - Video path         : {}'.format(self.videoPath))
        logging.info('   - Display Resolution : {} x {}'.format(
            self._displayW, self._displayH))
        logging.info('   - Font Scale         : {}'.format(self._fontScale))
        logging.info('   - Inference?         : {}'.format(self.runInference))
        logging.info('   - ConfidenceLevel    : {}'.format(
            self.confidenceLevel))
        logging.info(
            '===============================================================')

        # set wallpaper
        self.set_Wallpaper(self._frame_wp_init_system)

        # set FPS
        self.fps = FPS()

        self.imageStreamHandler = None

        # Start Web Server for View
        self.imageServer = ImageServer(80, self)
        self.imageServer.start()

        # Set Video Source
        self.set_Video_Source(self.videoPath)

        self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png"))
        # logging.info('Yolo Inference Initializing\r\n')
        self.yoloInference = YoloInference(self._fontScale, sendMessage=False)
Exemplo n.º 12
0
class VideoCapture(object):
    def __init__(self,
                 videoPath="",
                 verbose=True,
                 displayW=1920,
                 displayH=1080,
                 fontScale=1.0,
                 inference=True,
                 confidenceLevel=0.5):

        self.verbose = verbose
        self._debug = False

        self.videoPath = videoPath
        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown
        self.videoStream = None

        self._videoReadyEvent = Event()

        self._capture_in_progress = False

        # Display Resolution
        # Will try to set camera's resolution to the specified resolution
        self._displayW = displayW
        self._displayH = displayH

        self._cameraW = 0
        self._cameraH = 0

        # Camera's FPS
        self._cameraFPS = 30

        # Font Scale for putText
        self._fontScale = float(fontScale)

        # turn inference on/off
        self.runInference = inference

        # confidence level threshold
        self.confidenceLevel = confidenceLevel

        # various frame data

        # frame data for UI
        self._displayFrame = None

        # wallpapers for UI
        self._frame_wp_init_system = cv2.imread(
            "./www/WP-InitializingSystem.png")
        self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png")
        self._frame_wp_init_iothub = cv2.imread(
            "./www/WP-InitializeIotHub.png")

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        logging.info(
            '===============================================================')
        logging.info(
            'Initializing Video Capture with the following parameters:')
        logging.info('   - OpenCV Version     : {}'.format(cv2.__version__))
        logging.info('   - Video path         : {}'.format(self.videoPath))
        logging.info('   - Display Resolution : {} x {}'.format(
            self._displayW, self._displayH))
        logging.info('   - Font Scale         : {}'.format(self._fontScale))
        logging.info('   - Inference?         : {}'.format(self.runInference))
        logging.info('   - ConfidenceLevel    : {}'.format(
            self.confidenceLevel))
        logging.info(
            '===============================================================')

        # set wallpaper
        self.set_Wallpaper(self._frame_wp_init_system)

        # set FPS
        self.fps = FPS()

        self.imageStreamHandler = None

        # Start Web Server for View
        self.imageServer = ImageServer(80, self)
        self.imageServer.start()

        # Set Video Source
        self.set_Video_Source(self.videoPath)

        self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png"))
        # logging.info('Yolo Inference Initializing\r\n')
        self.yoloInference = YoloInference(self._fontScale, sendMessage=False)
        # logging.info('Yolo Inference Initialized\r\n')

    def __enter__(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # self.set_Video_Source(self.videoPath)

        return self

    def videoStreamReadTimeoutHandler(self, signum, frame):
        raise Exception("VideoStream Read Timeout")

    #
    # Video Source Management
    #
    def _set_Video_Source_Type(self, videoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        self._reset_Video_Source()

        if '/dev/video' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Hololens

        if self.verbose:
            logging.info('<< ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(self._videoSourceType))

    def _get_Video_Source_Type(self, videoPath):

        videoType = CaptureDevice.Unknown

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        if '/dev/video' in videoPath.lower():
            videoType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            videoType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            videoType = CaptureDevice.Hololens

        return videoType

    #
    # Resets video capture/stream settings
    #
    def _reset_Video_Source(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        if self.videoStream:
            self.videoStream.stop()
        #    self.videoStream.close()
        #     self.videoStream = None

        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown

    def set_Video_Source(self, newVideoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        retVal = False
        realVideoPath = newVideoPath

        if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running:
            return True

        if self.imageStreamHandler != None:
            statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format(
                self._remove_credential(newVideoPath), 1)
            self.imageStreamHandler.submit_write(statusMsg)

        self._videoSourceState = CaptureDeviceState.Stop

        if self._capture_in_progress:
            # wait for queue to drain and loop to exit
            time.sleep(1.0)

        self._capture_in_progress = False

        self._set_Video_Source_Type(realVideoPath)

        if self._videoSourceType == CaptureDevice.Unknown:
            self._videoSourceState = CaptureDeviceState.ErrorNotSupported
            logging.error('>> ' + self.__class__.__name__ + "." +
                          sys._getframe().f_code.co_name +
                          '() : Unsupported Video Source {}'.format(
                              self._videoSourceType))
        else:
            self._videoSourceState = CaptureDeviceState.Init

            if self._videoSourceType == CaptureDevice.Hololens:
                strHololens = realVideoPath.split('?')
                # disable audio
                realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format(
                    strHololens[0])

            self.videoStream = VideoStream(videoCapture=self,
                                           path=realVideoPath)

            fps_override = 30

            if not self.videoStream.videoCapture == None:

                # get resolution
                cameraH1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                cameraW1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                cameraFPS1 = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                if self._videoSourceType == CaptureDevice.Webcam:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Rtsp:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Hololens:

                    holo_w = 1280
                    holo_h = 720

                    if 'live_med.mp4' in realVideoPath:
                        holo_w = 854
                        holo_h = 480
                    elif 'live_low.mp4' in realVideoPath:
                        holo_w = 428
                        holo_h = 240
                        fps_override = 15

                    self.videoStream.videoCapture.set(
                        cv2.CAP_PROP_FRAME_HEIGHT, holo_h)
                    self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH,
                                                      holo_w)

                self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS,
                                                  fps_override)

                self._cameraH = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                self._cameraW = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                self._cameraFPS = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                logging.info(
                    '==============================================================='
                )
                logging.info(
                    'Setting Video Capture with the following parameters:')
                logging.info('   - Video Source Type  : {}'.format(
                    self._videoSourceType))
                logging.info('   - Display Resolution : {} x {}'.format(
                    self._displayW, self._displayH))
                logging.info('   Original             : {} x {} @ {}'.format(
                    cameraW1, cameraH1, cameraFPS1))
                logging.info('   New                  : {} x {} @ {}'.format(
                    self._cameraW, self._cameraH, self._cameraFPS))
                logging.info(
                    '==============================================================='
                )

                if self.videoStream.start():
                    self._videoSourceState = CaptureDeviceState.Running
                    retVal = True
                else:
                    self._videoSourceState = CaptureDeviceState.ErrorRead
            else:

                if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp:
                    url_parsed = urlparse(realVideoPath)

                    if url_parsed.password != None or url_parsed.username != None:
                        url_parsed = url_parsed._replace(
                            netloc="{}".format(url_parsed.hostname))

                    ipAddress = url_parsed.netloc

                    ping_ret = subprocess.call(
                        ['ping', '-c', '5', '-W', '3', ipAddress],
                        stdout=open(os.devnull, 'w'),
                        stderr=open(os.devnull, 'w'))

                    if ping_ret == 0:
                        self._videoSourceState = CaptureDeviceState.ErrorOpen
                    else:
                        self._videoSourceState = CaptureDeviceState.ErrorNoNetwork

                logging.error('>> ' + self.__class__.__name__ + "." +
                              sys._getframe().f_code.co_name +
                              '() : Failed to open Video Capture')

        self.videoPath = realVideoPath

        if retVal == False:
            self.set_Wallpaper(self._frame_wp_no_video)
        else:
            self._videoReadyEvent.set()

        self.sendCurrentVideoPath(realVideoPath)

        return retVal

    def get_display_frame(self):
        return self.displayFrame

    def set_status(self, device_status):
        self._videoSourceState = device_status

        if self._videoSourceState != CaptureDeviceState.Running:
            self.sendCurrentVideoPath("")

    def sendCurrentVideoPath(self, videoPath):

        if videoPath == "":
            video_path = self._remove_credential(self.videoPath)
        else:
            video_path = self._remove_credential(videoPath)

        logging.info('>> Current Video Status {}'.format(
            self._videoSourceState))

        if self.imageStreamHandler != None:
            if self._videoSourceState == CaptureDeviceState.Running:
                strUserName = ""
                strPassword = ""

                videoType = self._get_Video_Source_Type(videoPath)

                if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
                    url_parsed = urlparse(videoPath)

                    if url_parsed.password != None:
                        strPassword = url_parsed.password
                    if url_parsed.username != None:
                        strUserName = url_parsed.username

                statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format(
                    video_path, 1, strUserName, strPassword)
            else:
                statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format(
                    self._videoSourceState, video_path, 0)
            self.imageStreamHandler.submit_write(statusMsg)

    def setVideoPathFromUI(self, json_Data):

        videoPath = ""
        json_Data = json.loads(json_Data)
        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name +
                     '() : {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["UserName"]))
        logging.info('>> {}'.format(json_Data["Password"]))

        videoType = self._get_Video_Source_Type(json_Data["VideoPath"])

        if videoType == CaptureDevice.Webcam:
            videoPath = json_Data["VideoPath"].strip()
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
            url_parsed = urlparse(json_Data["VideoPath"].strip())

            if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0:
                # already contains password or user name not specified
                videoPath = json_Data["VideoPath"]
            else:
                url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format(
                    json_Data["UserName"], json_Data["Password"],
                    url_parsed.netloc))
                videoPath = url_parsed.geturl()

        self.set_Video_Source(videoPath)

    def _remove_credential(self, videoPath):

        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name + '()')

        ret_Path = ""
        videoType = self._get_Video_Source_Type(videoPath)

        if videoType == CaptureDevice.Webcam:
            ret_Path = videoPath
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:

            url_parsed = urlparse(videoPath)

            if url_parsed.password != None or url_parsed.username != None:
                url_parsed = url_parsed._replace(
                    netloc="{}".format(url_parsed.hostname))

            ret_Path = url_parsed.geturl()

        return ret_Path

    def set_Wallpaper(self, image):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes()

    def start(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        while True:
            if self._videoSourceState == CaptureDeviceState.Running:
                self._capture_in_progress = True
                self.__Run__()
                self._capture_in_progress = False
            else:

                if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead:
                    self.set_Wallpaper(self._frame_wp_no_video)

                if self._videoSourceType == CaptureDevice.Unknown:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Unknown Device')
                    time.sleep(1.0)
                else:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Device Not Running')
                    # time.sleep(1.0)
                    logging.info('>> Video Ready Event Enter ---------------')
                    self._videoReadyEvent.wait()
                    logging.info('<< Video Ready Event Exit  ---------------')
                    self._videoReadyEvent.clear()

    def __Run__(self):

        if self.verbose:
            logging.info(
                '==============================================================='
            )
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # Check camera's FPS
        if self._cameraFPS == 0:
            logging.error('Error : Could not read FPS')
            # raise Exception("Unable to acquire FPS for Video Source")
            return

        logging.info('>> Frame rate (FPS)     : {}'.format(self._cameraFPS))
        logging.info('>> Run Inference {}'.format(self.runInference))

        perFrameTimeInMs = 1000 / self._cameraFPS

        self.fps.start()
        self.fps.reset()

        while True:

            # Get current time before we capture a frame
            tFrameStart = time.time()
            frame = np.array([])
            captureRet = False

            if not self._videoSourceState == CaptureDeviceState.Running:
                break

            captureRet, frame = self.videoStream.read()

            if captureRet == False:
                self._videoSourceState = CaptureDeviceState.ErrorRead
                logging.error("ERROR : Failed to read from video source")
                break

            if frame.size > 0:

                # Run Object Detection
                if self.runInference:
                    self.yoloInference.runInference(frame, self._cameraW,
                                                    self._cameraH,
                                                    self.confidenceLevel)

                # Calculate FPS
                currentFPS = self.fps.fps()

                if (currentFPS > self._cameraFPS):
                    # Cannot go faster than Camera's FPS
                    currentFPS = self._cameraFPS

                # Add FPS Text to the frame
                cv2.putText(frame, "FPS " + str(currentFPS),
                            (10, int(30 * self._fontScale)),
                            cv2.FONT_HERSHEY_SIMPLEX, self._fontScale,
                            (0, 0, 255), 2)

                self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()

            timeElapsedInMs = (time.time() - tFrameStart) * 1000

            if perFrameTimeInMs > timeElapsedInMs:
                # This is faster than image source (e.g. camera) can feed.
                waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs
                time.sleep(waitTimeBetweenFrames / 1000.0)

    def __exit__(self, exception_type, exception_value, traceback):

        self.imageServer.close()
        cv2.destroyAllWindows()
    def __init__(
            self,
            videoPath,
            imageProcessingEndpoint="",
            imageProcessingParams="",
            showVideo=False,
            verbose=False,
            loopVideo=True,
            convertToGray=False,
            resizeWidth=0,
            resizeHeight=0,
            annotate=False,
            sendToHubCallback=None):
        self.videoPath = videoPath
        if self.__IsInt(videoPath):
            # case of a usb camera (usually mounted at /dev/video* where * is an int)
            self.isWebcam = True
        else:
            # case of a video file
            self.isWebcam = False
        self.imageProcessingEndpoint = imageProcessingEndpoint
        if imageProcessingParams == "":
            self.imageProcessingParams = ""
        else:
            self.imageProcessingParams = json.loads(imageProcessingParams)
        self.showVideo = showVideo
        self.verbose = verbose
        self.loopVideo = loopVideo
        self.convertToGray = convertToGray
        self.resizeWidth = resizeWidth
        self.resizeHeight = resizeHeight
        self.annotate = (self.imageProcessingEndpoint !=
                         "") and self.showVideo & annotate
        self.nbOfPreprocessingSteps = 0
        self.autoRotate = False
        self.sendToHubCallback = sendToHubCallback
        self.vs = None

        if self.convertToGray:
            self.nbOfPreprocessingSteps += 1
        if self.resizeWidth != 0 or self.resizeHeight != 0:
            self.nbOfPreprocessingSteps += 1
        if self.verbose:
            print("Initialising the camera capture with the following parameters: ")
            print("   - Video path: " + self.videoPath)
            print("   - Image processing endpoint: " +
                  self.imageProcessingEndpoint)
            print("   - Image processing params: " +
                  json.dumps(self.imageProcessingParams))
            print("   - Show video: " + str(self.showVideo))
            print("   - Loop video: " + str(self.loopVideo))
            print("   - Convert to gray: " + str(self.convertToGray))
            print("   - Resize width: " + str(self.resizeWidth))
            print("   - Resize height: " + str(self.resizeHeight))
            print("   - Annotate: " + str(self.annotate))
            print("   - Send processing results to hub: " +
                  str(self.sendToHubCallback is not None))
            print()

        self.displayFrame = None
        if self.showVideo:
            self.imageServer = ImageServer(5012, self)
            self.imageServer.start()
class CameraCapture(object):

    def __IsInt(self, string):
        try:
            int(string)
            return True
        except ValueError:
            return False

    def __init__(
            self,
            videoPath,
            imageProcessingEndpoint="",
            imageProcessingParams="",
            showVideo=False,
            verbose=False,
            loopVideo=True,
            convertToGray=False,
            resizeWidth=0,
            resizeHeight=0,
            annotate=False,
            sendToHubCallback=None):
        self.videoPath = videoPath
        if self.__IsInt(videoPath):
            # case of a usb camera (usually mounted at /dev/video* where * is an int)
            self.isWebcam = True
        else:
            # case of a video file
            self.isWebcam = False
        self.imageProcessingEndpoint = imageProcessingEndpoint
        if imageProcessingParams == "":
            self.imageProcessingParams = ""
        else:
            self.imageProcessingParams = json.loads(imageProcessingParams)
        self.showVideo = showVideo
        self.verbose = verbose
        self.loopVideo = loopVideo
        self.convertToGray = convertToGray
        self.resizeWidth = resizeWidth
        self.resizeHeight = resizeHeight
        self.annotate = (self.imageProcessingEndpoint !=
                         "") and self.showVideo & annotate
        self.nbOfPreprocessingSteps = 0
        self.autoRotate = False
        self.sendToHubCallback = sendToHubCallback
        self.vs = None

        if self.convertToGray:
            self.nbOfPreprocessingSteps += 1
        if self.resizeWidth != 0 or self.resizeHeight != 0:
            self.nbOfPreprocessingSteps += 1
        if self.verbose:
            print("Initialising the camera capture with the following parameters: ")
            print("   - Video path: " + self.videoPath)
            print("   - Image processing endpoint: " +
                  self.imageProcessingEndpoint)
            print("   - Image processing params: " +
                  json.dumps(self.imageProcessingParams))
            print("   - Show video: " + str(self.showVideo))
            print("   - Loop video: " + str(self.loopVideo))
            print("   - Convert to gray: " + str(self.convertToGray))
            print("   - Resize width: " + str(self.resizeWidth))
            print("   - Resize height: " + str(self.resizeHeight))
            print("   - Annotate: " + str(self.annotate))
            print("   - Send processing results to hub: " +
                  str(self.sendToHubCallback is not None))
            print()

        self.displayFrame = None
        if self.showVideo:
            self.imageServer = ImageServer(5012, self)
            self.imageServer.start()

    def __annotate(self, frame, response):
        AnnotationParserInstance = AnnotationParser()
        # TODO: Make the choice of the service configurable
        listOfRectanglesToDisplay = AnnotationParserInstance.getCV2RectanglesFromProcessingService1(
            response)
        for rectangle in listOfRectanglesToDisplay:
            cv2.rectangle(frame, (rectangle(0), rectangle(1)),
                          (rectangle(2), rectangle(3)), (0, 0, 255), 4)
        return

    def __sendFrameForProcessing(self, frame):
        headers = {'Content-Type': 'application/octet-stream'}
        try:
            response = requests.post(
                self.imageProcessingEndpoint, headers=headers, params=self.imageProcessingParams, data=frame)
        except Exception as e:
            print('__sendFrameForProcessing Excpetion -' + str(e))
            return "[]"

        if self.verbose:
            try:
                print("Response from external processing service: (" +
                      str(response.status_code) + ") " + json.dumps(response.json()))
            except Exception:
                print("Response from external processing service (status code): " +
                      str(response.status_code))
        return json.dumps(response.json())

    def __displayTimeDifferenceInMs(self, endTime, startTime):
        return str(int((endTime-startTime) * 1000)) + " ms"

    def __enter__(self):
        if self.isWebcam:
            # The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames.
            self.vs = VideoStream(int(self.videoPath)).start()
            # needed to load at least one frame into the VideoStream class
            time.sleep(1.0)
            #self.capture = cv2.VideoCapture(int(self.videoPath))
        else:
            # In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class
            self.capture = cv2.VideoCapture(self.videoPath)
        return self

    def get_display_frame(self):
        return self.displayFrame

    def start(self):
        frameCounter = 0
        perfForOneFrameInMs = None
        while True:
            if self.showVideo or self.verbose:
                startOverall = time.time()
            if self.verbose:
                startCapture = time.time()

            frameCounter += 1
            if self.isWebcam:
                frame = self.vs.read()
            else:
                frame = self.capture.read()[1]
                if frameCounter == 1:
                    if self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) < self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT):
                        self.autoRotate = True
                if self.autoRotate:
                    # The counterclockwise is random...It coudl well be clockwise. Is there a way to auto detect it?
                    frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
            if self.verbose:
                if frameCounter == 1:
                    if not self.isWebcam:
                        print("Original frame size: " + str(int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
                                                            ) + "x" + str(int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))
                        print("Frame rate (FPS): " +
                              str(int(self.capture.get(cv2.CAP_PROP_FPS))))
                print("Frame number: " + str(frameCounter))
                print("Time to capture (+ straighten up) a frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startCapture))
                startPreProcessing = time.time()

            # Loop video
            if not self.isWebcam:
                if frameCounter == self.capture.get(cv2.CAP_PROP_FRAME_COUNT):
                    if self.loopVideo:
                        frameCounter = 0
                        self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
                    else:
                        break

            # Pre-process locally
            if self.nbOfPreprocessingSteps == 1 and self.convertToGray:
                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0):
                preprocessedFrame = cv2.resize(
                    frame, (self.resizeWidth, self.resizeHeight))

            if self.nbOfPreprocessingSteps > 1:
                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                preprocessedFrame = cv2.resize(
                    preprocessedFrame, (self.resizeWidth, self.resizeHeight))

            if self.verbose:
                print("Time to pre-process a frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startPreProcessing))
                startEncodingForProcessing = time.time()

            # Process externally
            if self.imageProcessingEndpoint != "":

                # Encode frame to send over HTTP
                if self.nbOfPreprocessingSteps == 0:
                    encodedFrame = cv2.imencode(".jpg", frame)[1].tostring()
                else:
                    encodedFrame = cv2.imencode(".jpg", preprocessedFrame)[
                        1].tostring()

                if self.verbose:
                    print("Time to encode a frame for processing: " +
                          self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
                    startProcessingExternally = time.time()

                # Send over HTTP for processing
                response = self.__sendFrameForProcessing(encodedFrame)
                if self.verbose:
                    print("Time to process frame externally: " +
                          self.__displayTimeDifferenceInMs(time.time(), startProcessingExternally))
                    startSendingToEdgeHub = time.time()

                # forwarding outcome of external processing to the EdgeHub
                if response != "[]" and self.sendToHubCallback is not None:
                    self.sendToHubCallback(response)
                    if self.verbose:
                        print("Time to message from processing service to edgeHub: " +
                              self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
                        startDisplaying = time.time()

            # Display frames
            if self.showVideo:
                try:
                    if self.nbOfPreprocessingSteps == 0:
                        if self.verbose and (perfForOneFrameInMs is not None):
                            cv2.putText(frame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),
                                        (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
                        if self.annotate:
                            # TODO: fix bug with annotate function
                            self.__annotate(frame, response)
                        self.displayFrame = cv2.imencode(
                            '.jpg', frame)[1].tobytes()
                    else:
                        if self.verbose and (perfForOneFrameInMs is not None):
                            cv2.putText(preprocessedFrame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),
                                        (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
                        if self.annotate:
                            # TODO: fix bug with annotate function
                            self.__annotate(preprocessedFrame, response)
                        self.displayFrame = cv2.imencode(
                            '.jpg', preprocessedFrame)[1].tobytes()
                except Exception as e:
                    print("Could not display the video to a web browser.")
                    print('Excpetion -' + str(e))
                if self.verbose:
                    if 'startDisplaying' in locals():
                        print("Time to display frame: " +
                              self.__displayTimeDifferenceInMs(time.time(), startDisplaying))
                    elif 'startSendingToEdgeHub' in locals():
                        print("Time to display frame: " +
                              self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
                    else:
                        print("Time to display frame: " + self.__displayTimeDifferenceInMs(
                            time.time(), startEncodingForProcessing))
                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
                if not self.isWebcam:
                    waitTimeBetweenFrames = max(
                        int(1000 / self.capture.get(cv2.CAP_PROP_FPS))-perfForOneFrameInMs, 1)
                    print("Wait time between frames :" +
                          str(waitTimeBetweenFrames))
                    if cv2.waitKey(waitTimeBetweenFrames) & 0xFF == ord('q'):
                        break

            if self.verbose:
                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
                print("Total time for one frame: " +
                      self.__displayTimeDifferenceInMs(time.time(), startOverall))

    def __exit__(self, exception_type, exception_value, traceback):
        if not self.isWebcam:
            self.capture.release()
        if self.showVideo:
            self.imageServer.close()
            cv2.destroyAllWindows()