def kamera_ip(self):
        width = int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5)
        height = int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('../recording/' + str(self.waktu)+'.avi', fourcc, 20.0, (width, height))
        while True:

            #time.sleep(0.1)
            imgResp = urllib.urlopen(self.url)
            imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
            img = cv2.imdecode(imgNp,-1)

            while(self.cam.isOpened()):
                if self.grabbed == True:
                    out.write(img)
                    #cv2.imshow('IPWebcam',img)
                    if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
                        break
                    else:
                        break

            print(img)
        out.release()
        self.cam.release()
        cv2.destroyAllwindows()
Example #2
0
def draw_geometric_shapes_example():
    """ draw_geometric_shapes_example function """
    img = np.zeros([1914, 1080, 3],
                   np.uint8)  # create img with black bg (1914 x 1080)

    # draw line to img from (0, 0) to (255, 255) with red color (0, 0, 255) (B G R) and 5 thickness
    img = cv2.line(img, (0, 0), (255, 255), (0, 0, 255), 5)

    # draw arrow line to img from (0, 200) to (255, 500) with blue color (255, 0, 0) (B G R) and 5 thickness
    img = cv2.arrowedLine(img, (0, 200), (255, 500), (255, 0, 0), 5)

    # draw rectagle to img with green color (255, 0, 0) (B G R) and 5 thickness
    # x1,y1 -------
    # |           |
    # |           |
    # |           |
    # ------- x2,y2
    img = cv2.rectangle(img, (600, 100), (900, 400), (0, 255, 0), 5)

    # draw circle to img at (1000, 40) 50 radius with red color (0, 0, 255) (B G R) and 5 thickness
    img = cv2.circle(img, (1000, 400), 50, (0, 0, 255), 5)

    # put text msg "Hello world!!" to img at (70, 750) hershey font 5 font size with text color white (255, 255, 255)
    # and thickness 5 with line type AA
    img = cv2.putText(img, "Hello world!!", (70, 750),
                      cv2.FONT_HERSHEY_SIMPLEX, 5, (255, 255, 255), 5,
                      cv2.LINE_AA)

    cv2.imshow("Image", img)  # display img
    cv2.waitKey(0)
    cv2.destroyAllwindows()
Example #3
0
def prepareproposal(path):
    img =  cv.imread(path)
    imgmultiproposals = img.copy()
    proposals = []
    init_react1 = [10,10,200,300]
    # 4 个
    proposals = genreateproposal(init_react1,proposals,4)

    init_react2 = [100,120,300,500]

    proposals = genreateproposal(init_react2,proposals,5)

    proposals = np.array(proposals)

    for rec in proposals:
        cv.rectangle(imgmultiproposals,(rec[0],rec[1]), (rec[2],rec[3]),(0,0,255)) 
    cv.imshow("before",imgmultiproposals)

    keepindex = py_nms(proposals,0.5)
    imgnms = img.copy()
    for i in keepindex:
        rec = proposals[i]
        cv.rectangle(imgnms,(rec[0],rec[1]), (rec[2],rec[3]),(0,0,255)) 
    cv.imshow("after",imgnms)

    cv.waitKey(0)
    cv.destroyAllwindows()
Example #4
0
def gen(video):
    while True:
        tmpfile_list = os.listdir(input_dir)
        infile_list = [file for file in tmpfile_list if file.endswith(".jpg")]
        #        print(infile_list)

        if (len(infile_list) == 0):
            sleep(2)

        for i in range(0, len(infile_list)):
            infilename = input_dir + '/' + infile_list[i]

            #            print(infilename)
            image = cv.imread(infilename, 1)

            ret, jpeg = cv.imencode('.jpg', image)
            frame = jpeg.tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
            sleep(1)
            shutil.move(infilename, saved_dir + '/' + infile_list[i])
            #            shutil.copy(infilename,saved_dir+'/'+infile_list[i])
            sleep(1)
        tmpfile_list = ""


#        cv.waitKey(1000)
    cv.destroyAllwindows()
Example #5
0
def readVideo(skipFrame):  # pathName为视频文件路径,skipFrame为视频的第skipFrame帧
    selectXY = [(0, 0), (0, 0)]

    camera = cv2.VideoCapture(0)  # 读取摄像头
    if not camera.isOpened():  # 如果为发现摄像头,则按照路径pathName读取视频文件
        camera = cv2.VideoCapture(
            selectDir())  # 读取视频文件,如pathName='D:/test/test.mp4'
    c = 1

    while (camera.isOpened()):
        ret, frame = camera.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.rectangle(frame, selectXY[0], selectXY[1], (0, 255, 0), 2)
        if (c >= skipFrame):
            mask = np.zeros(
                gray.shape,
                dtype=np.uint8)  # 掩码操作,该矩阵与图片大小类型一致,为初始化全0像素值,之后对其操作区域赋值为1即可
            if (c == skipFrame):
                selectXY = get_rect(frame, title='get_rect')  # 鼠标画矩形框
                img01, img02 = frame, frame
                gray01, gray02 = gray, gray
            else:
                img1, img2 = prev_frame, frame
                gray1, gray2 = prev_frame, frame
            cv2.imshow('frame', frame)
        c = c + 1
        prev_gray = gray
        prev_frame = frame
        if cv2.waitKey(1) & 0xFF == ord('q'):  # 点击视频窗口,按q键退出
            break
    camera.release()
    cv2.destroyAllwindows()
Example #6
0
def record():
    cap = cv2.VideoCapture(0)
    date = time.gmtime(time.time())
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(
        str(date.tm_year) + str(date.tm_mon) + str(date.tm_mday) + '.avi',
        fourcc, 25.0, (640, 480))

    while (cap.isOpened()):
        ret, frame = cap.read()

        if ret:

            out.write(frame)

            cv2.imshow('frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllwindows()
Example #7
0
def main(args):
    ic = Image_checker()
    rospy.init_node('node_estimate_checker.py', anonymous=True)
    print("node_estimate_checker")
    try:
        rospy.spin()

    except KeyboardInterrupt:
        print("Shut down")
    cv2.destroyAllwindows()
Example #8
0
def tracking():
    cap = cv2.VideoCapture(0)
    cap.set(3, 320)
    cap.set(4, 240)
    while True:
        ret, frame = cap.read()
        cv2.imshow('video', frame)

        if cv2.waitKey(1) & 0xff == ord('q'):
            break
    cv2.destroyAllwindows()
Example #9
0
def img_move():
    img = cv2.imread('../images/joy.jpeg')
    h, w, c = img.shape[0:3]
    print(h, w, c)
    M = np.float32([[1, 0, 100], [0, 1, 50]])
    print(M.shape)
    img2 = cv2.warpAffine(img, M, (w, h))
    cv2.imshow('joy ^^', img)
    cv2.imshow('moved_joy ^^', img2)
    cv2.waitKey(0)
    cv2.destroyAllwindows()
Example #10
0
def access_camera(cam_id):
    """
    cam_id : camera ID obtained from user.
    """
    # Invoke VideoCapture method by passing camera ID arguement from user
    cap = cv2.VideoCapture(cam_id)

    # Check to see if the camera opens up and if it doesn't raise appropriate error
    if not cap.isOpened():
        raise IOError(
            "Cannot access camera, please check if you entered the ID correctly"
        )

    # While camera is opened
    while True:

        # Read the camera input
        """
        ret : return True if the frame is available from the camera reading operation below.
        frame : array of frames as it loops.
        """
        start_time = time.time()
        ret, frame = cap.read()

        # Resizing window to save resources and have defined window
        cv2.resize(frame, (640, 480))

        # FPS = 1 / time to process loop
        fps = 1.0 / (time.time() - start_time)
        print("FPS: ", fps)
        # fps = cv2.VideoCapture.get(CV_CAP_PROP_FPS)
        # print(fps)

        cv2.putText(frame, "FPS: {}".format(fps), (100, 100),
                    cv2.FONT_HERSHEY_SIMPLEX, 2, 255)

        # Display the result
        cv2.imshow("output", frame)

        # Check to see if escape key was pressed every defined time interval and break if pressed
        check = cv2.waitKey(1)
        if check == 27:
            break

    # Release the camera resource
    cap.release()

    # Destroy all windows
    cv2.destroyAllwindows()
Example #11
0
def camShift():
    global frame, frame2, inputmode, trackWindow, roi_hist, out

    try:
        cap = cv2.VideoCapture(1)
        cap.set(3, 480)
        cap.set(4, 320)
    except:
        print('카메라 구동 실패')
        return

    ret, frame = cap.read()

    cv2.nameWindow('frame')
    cv2.setMouseCallback('frame', onMouse, param=(frame, frame2))

    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        if trackWindow is not None:
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
            ret, trackWindow = cv2.Camshift(dst, trackWindow, termination)

            pts = cv2.boxPoints(ret)
            pts = np.int0(pts)
            cv2.polylines(frame, [pts], True, (0, 255, 0), 2)

        cv2.imshow('frame', frame)

        k = cv2.waitKey(60) & 0xFF
        if k == 27:
            break
        if k == ord('i'):
            print('추적할 영역을 지정하고 아무키나 누르세요')
            inputmode = True
            frame2 = frame.copy()

            while inputmode:
                cv2.imshow('frame', frame)
                cv2.waitKey(0)

        cap.release()
        cv2.destroyAllwindows()
Example #12
0
def transform():
    img = cv2.imread('../images/joy.jpeg')
    h, w = img.shape[0:2]

    print(h, w)

    # 리사이징할 이미지 원본, dsize를 나타내는 튜플, 배율인자 x와y, 리사이징할 방법
    img2 = cv2.resize(img, None, fx=0.5, fy=1, interpolation=cv2.INTER_AREA)
    img3 = cv2.resize(img, None, fx=1, fy=0.5, interpolation=cv2.INTER_AREA)
    img4 = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)

    cv2.imshow('joy', img)
    cv2.imshow('joy2', img2)
    cv2.imshow('joy3', img3)
    cv2.imshow('joy4', img4)

    cv2.waitKey(0)
    cv2.destroyAllwindows()
Example #13
0
def cam():

    global imgencode

	while True:

		ret1, img1 = cam1.read()

		if img1 != []:
			img1 = cv2.resize(img1,(320,240)) 

		result, imgencode = cv2.imencode('.jpg',img1) 

		print imgencode

		if cv2.waitKey(10) == 27:
			break
	cv2.destroyAllwindows()
def main():
    global img1, img2

    img1 = cv2.imread('C:/Users/HP NOTE/Desktop/test/1.jpg')
    img2 = img1.copy()

    cv2.namedWindow('original'), cv2.namedWindow('backproj')
    cv2.setMouseCallback('original', onMouse, param=None)

    cv2.imshow('backproj', img2)

    while True:
        cv2.imshow('original', img1)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break

    cv2.destroyAllwindows()
Example #15
0
def tracking():
    try:
        print('카메라를 구동합니다')
        cap = cv2.VideoCapture('../videos/sekyung.mp4')
    except:
        print('카메라 구동 실패')
        return

    while True:
        ret, frame = cap.read()
        #frame = cv2.imread('../images/traffic_light3.png')
        # BGR -> HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        lower_blue = np.array([90, 100, 100])
        upper_blue = np.array([130, 255, 255])

        lower_green = np.array([30, 100, 100])
        upper_green = np.array([70, 255, 255])

        lower_red = np.array([-10, 100, 100])
        upper_red = np.array([10, 255, 255])

        # HSV 이미지에서 청색, 초록색, 빨간색 추출하기 위한 임계값
        mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
        mask_green = cv2.inRange(hsv, lower_green, upper_green)
        mask_red = cv2.inRange(hsv, lower_red, upper_red)

        # mask와 원본 이미지 비트를 연산함
        res1 = cv2.bitwise_and(frame, frame, mask=mask_blue)
        res2 = cv2.bitwise_and(frame, frame, mask=mask_green)
        res3 = cv2.bitwise_and(frame, frame, mask=mask_red)

        cv2.imshow('original', frame)
        cv2.imshow('blue', res1)
        cv2.imshow('green', res2)
        cv2.imshow('red', res3)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
    cv2.destroyAllwindows()
def video():

    cap = cv2.VideoCapture(0)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))

    while True:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        out.write(frame)

        cv2.imshow('frame', frame)
        cv2.imshow('gray', gray)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    out.release()
    cv2.destroyAllwindows()

    return
Example #17
0
def drawLines():
    img = np.full((400, 400, 3), 255, np.uint8)
    cv2.line(img, (50, 50), (200, 50), (0, 0, 255))
    cv2.line(img, (50, 100), (200, 100), (255, 0, 255), 3)
    cv2.line(img, (50, 150), (200, 150), (255, 0, 0), 10)
    cv2.line(img, (250, 50), (350, 100), (0, 0, 255), 1, cv2.LINE_4)
    cv2.line(img, (250, 70), (350, 120), (255, 0, 255), 1, cv2.LINE_8)
    cv2.line(img, (250, 90), (350, 140), (255, 0, 0), 1, cv2.LINE_AA)
    cv2.arrowedLine(img, (50, 200), (150, 200), (0, 0, 255), 1)
    cv2.arrowedLine(img, (50, 250), (350, 250), (255, 0, 255), 1)
    cv2.arrowedLine(img, (50, 300), (350, 300), (255, 0, 0), 1, cv2.LINE_8, 0,
                    0.05)
    cv2.drawMarker(img, (50, 350), (0, 0, 255), cv2.MARKER_CROSS)
    cv2.drawMarker(img, (100, 350), (0, 0, 255), cv2.MARKER_TILTED_CROSS)
    cv2.drawMarker(img, (150, 350), (0, 0, 255), cv2.MARKER_STAR)
    cv2.drawMarker(img, (200, 350), (0, 0, 255), cv2.MARKER_DIAMOND)
    cv2.drawMarker(img, (250, 350), (0, 0, 255), cv2.MARKER_SQUARE)
    cv2.drawMarker(img, (300, 350), (0, 0, 255), cv2.MARKER_TRIANGLE_UP)
    cv2.drawMarker(img, (350, 350), (0, 0, 255), cv2.MARKER_TRIANGLE_DOWN)
    cv2.imshow("img", img)
    cv2.waitKey()
    cv2.destroyAllwindows()
def create(a, b):
    from Name import name
    user = name()
    filename = 'write_data.txt'
    with open(filename, 'a') as f:
        #id = input("Please input an id: ")
        #Sname = input("please input a name: ")
        id = a
        user.id.append(a)
        user.name.append(b)
        user.index = user.index + 1
        f.write((str)(user.id[user.index - 1]) + " " +
                user.name[user.index - 1])
        f.write("\n")

    faceDetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    #cam = cv2.VideoCapture(0)
    sampleNum = 0
    while (True):
        ret, img = self.cap.read()
        if len(img.shape) == 3 or len(img.shape) == 4:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img
        faces = faceDetect.detectMultiScale(gray, 1.3, 5)
        for (x, y, w, h) in faces:
            sampleNum = sampleNum + 1
            cv2.imwrite(
                "dataSet/User." + str(id) + "." + str(sampleNum) + ".jpg",
                gray[y:y + h, x:x + w])
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
            cv2.waitKey(100)
        cv2.imshow("Face", img)
        cv2.waitKey(1)
        cv2.destroyAllwindows()
        if (sampleNum > 100):
            break
        #cv2.destroyAllWindows()
    cam.release()
Example #19
0
def readVideo(skipFrame):  # pathName为视频文件路径,skipFrame为视频的第skipFrame帧
    selectXY = [(0, 0), (0, 0)]

    camera = cv2.VideoCapture(0)  # 读取摄像头
    if not camera.isOpened():  # 如果为发现摄像头,则按照路径pathName读取视频文件
        camera = cv2.VideoCapture(
            selectDir())  # 读取视频文件,如pathName='D:/test/test.mp4'
    i = 0

    while (camera.isOpened()):
        ret, frame = camera.read()
        cv2.rectangle(frame, selectXY[0], selectXY[1], (255, 255, 0), 2)
        i += 1
        if (i >= 20):
            if (i == skipFrame):
                selectXY = get_rect(frame, title='get_rect')  # 鼠标画矩形框
                print(selectXY)
            cv2.imshow('frame', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):  # 点击视频窗口,按q键退出
            break
    camera.release()
    cv2.destroyAllwindows()
Example #20
0
def draw_circle(event,x,y,flags,param):
    
    if event == cv2.EVENT
    
    pass

cv2.namedWindow(winname='my_drawing')

cv2.setMouseCallBack('my_drawing', draw_circle)


##showing img with open cv

img =- np.zeros((512,512,3), np.int8)

while True:
    
        cv2.imshow('my_drawing',img)
        
        if cv2.waitKey(20) & 0xFF == 27
        break
        
cv2.destroyAllwindows()
Example #21
0
def genmask(video):
    while True:
        mask_list = os.listdir(mask_dir)
        #        print(mask_list)
        if (len(mask_list) == 0):
            detectmask()
            mask_list = os.listdir(mask_dir)

        for i in range(0, len(mask_list)):
            maskname = mask_dir + '/' + mask_list[i]
            image = cv.imread(maskname, 1)

            ret, jpeg = cv.imencode('.jpg', image)
            frame = jpeg.tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
            sleep(1)
            shutil.move(maskname, unmask_dir + '/' + mask_list[i])


#            os.remove(filename)
        cv.waitKey(1000)
    cv.destroyAllwindows()
Example #22
0
import os
import cv2
import numpy as np
from PIL import Image

recognizer = cv2.createLBPHFaceRecognizer()
path = 'dataSet'


def getImagesWithID(path):
    imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
    faces = []
    IDs = []
    for imagePath in imagePaths:
        faceImg = Image.open(imagePath).convert('L')
        faceNp = np.array(faceImg, 'uint8')
        ID = int(os.path.split(imagePath)[-1].split('.')[1])
        faces.append(faceNp)
        print ID
        IDs.append(ID)
        cv2.imshow("training", faceNp)
        cv2.waitKey(10)
    return IDs, faces


Ids, faces = getImagesWithID(path)
recognizer.train(faces, np.array(Ids))
recognizer.save('recognizer/reainningData.yml')
cv2.destroyAllwindows()
Example #23
0
# E-MAIL: [email protected]
#
# ===========================================================
#
# DESCRICAO: lab01.py 
#
# O codigo abaixo armazena num espaco de memoria uma imagem contida no mesmo
# diretorio em que foi salvo o programa. Em seguida exibe a imagem salva em 
# uma janela chamada 'image'. A janela permanece aberta ate que a tecla 'esc' 
# seja pressionada, se a tecla 's' for pressionada, a imagem e salva na pasta
# do diretorio no formato PNG, sob o nome 'messigray'
# 
# ===========================================================

#importacao da biblioteca numpy (computacao cientifica, manipulacao de arrays e algebra linear)
import numpy as np 
# importacao da biblioteca OpenCV (Open Source Computer Vision Library)
import cv2 as cv

# carrega a imagem 'messi5.jpg' para a variavel 'img'. O argumento '0' retorna a imagem em escala de cinza
img = cv.imread('messi5.jpg',0)
# exibe a imagem salva na variavel 'img' em uma janela chamada 'image'
cv.imshow('image',img)

k = cv.waitKey(0) # inicializa a espera por tecla pressionada
if k == 27: # se 'Esc' pressionado...
	cv.destroyAllwindows() # fecha todas as janelas
elif k == ord('s'): # se 'S' pressionado...
	cv.imwrite('messigray.png',img) # salva a imagem 'img' no diretorio do programa sob o nome 'messigray' no formato PNG
	cv.destroyAllwindows() # fecha todas as janelas
Example #24
0
def run(source=0, source2=0, source3=0):
    # ---- Initializing DNN  -----
    from sklearn.preprocessing import normalize
    caffe.set_mode_gpu()
    # cap=cv2.VideoCapture('../../vd1.mp4')
   #net = caffe.Net('../../segnet_inference.prototxt','../../segnet_iter_10000.caffemodel',caffe.TEST)
    #net = caffe.Net('../../enet_deploy_final.prototxt','../../segnet_iter_32000.caffemodel',caffe.TEST)
    net = caffe.Net('../../enet_deploy_final.prototxt','../../segnet_iter_55000.caffemodel',caffe.TEST)
    
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})

    background = [255,0,0]
    car = [0,255,0]
    numberplate = [0,0,255]
    label_colours = np.array([background, car, numberplate])
    label_colour = np.array([2, 1, 0])
    cropped=[]
    resizeimg=[]
    # ---- End of Initializing DNN  -----
    # ---- Initializing Tracker  -----
    # initializing yolo
    options = {"model": "cfg/tiny-yolo-voc.cfg", "load": "tiny-yolo-voc.weights", "threshold": 0.5}  # , "gpu":1.0
    tfnet = TFNet(options)

    #----violation detection intial data starts----
    temp_count = 0
    pre_plate=''
    URL = "fypandroid-gpu"
    slot1_violation = "no"
    slot2_violation = "no"
    slot1_occupancy = "free"
    slot2_occupancy = "free"
    slot1_violation_new = "no"
    slot2_violation_new = "no"
    slot1_occupancy_new = "free"
    slot2_occupancy_new = "free"
    update_na = 0
    slot1_num = "n/a"
    slot2_num = "n/a"
    updateonce = 0
    
    firebase.patch(URL, {'Slot_01_vehicle_number': "n/a"})
    firebase.patch(URL, {'Slot_02_vehicle_number': "n/a"})

    imgavgback = cv2.imread(avgbackgrndpath, 1)
    imgavgbackori = imgavgback.copy()
    imgavgbackorigray = cv2.cvtColor(imgavgback,cv2.COLOR_BGR2GRAY)
    imgavgback = imgavgback[80:400, 200:595]
    imgrayavgback = cv2.cvtColor(imgavgback,cv2.COLOR_BGR2GRAY)
    imgavgback -= 20

    #emptyavg1 = np.mean(imgavgbackorigray[100:180,400:470])
    #emptyavg2 = np.mean(imgavgbackorigray[205:300,400:470])

    MidPoints = []
    MidPoints.append([250,55]) #x,y slot 1
    MidPoints.append([255,170]) #x,y slot 2

    xyminmax = []
    xyminmax.append([135,390,5,120]) #xmin,xmax,ymin,ymax slot 1
    xyminmax.append([135,390,115,235]) #xmin,xmax,ymin,ymax slot 2
    #cap = cv2.VideoCapture('cam1.mp4')

    #----violation detection intial data ends----
    # Create the VideoCapture object
    cam = cv2.VideoCapture(source)
    cam2 = cv2.VideoCapture(source2)
    cap = cv2.VideoCapture(source3)
    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print "Video device 1 or file 1 couldn't be opened"
        exit()
    if not cam2.isOpened():
        print "Video device 2 or file 2 couldn't be opened"
        exit()
    if not cap.isOpened():
        print "Video device 3 or file 3 couldn't be opened"
        exit()
    retval, img = cam.read()
    retval2, img2 = cam2.read()
    ret,frame = cap.read()
    if not retval:
        print "Cannot capture frame device 1"
        exit()
    if not retval2:
        print "Cannot capture frame device 2"
        exit()
    if not ret:
        print "Cannot capture frame device 3"
        exit()
    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.namedWindow("Image2", cv2.WINDOW_NORMAL)
    cv2.namedWindow("frame", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)
    cv2.imshow("Image2", img2)
    cv2.imshow("frame", frame)

    # values for tracking
    threshld = 10
    height = 480 -threshld
    width = 640 -threshld
    limit = height - 20
    tryframes = 8 

    points = []
    points2 = []

    # Create the tracker object
    tracker = [dlib.correlation_tracker() for _ in xrange(0)]
    tracker2 = [dlib.correlation_tracker() for _ in xrange(0)]
    # Provide the tracker the initial position of the object
    # [tracker[i].start_track(img, dlib.rectangle(*rect)) for i, rect in enumerate(points)]
    for k, rect in enumerate(points):
        tracker[k].start_track(img, dlib.rectangle(*rect))
    for k2, rect in enumerate(points2):
        tracker[k2].start_track(img2, dlib.rectangle(*rect))

    temp_tracker = list(tracker) #to remove the deleted trackers from tracker without affecting for loop 
    temp_tracker2 = list(tracker2)
    trigerYolo = 0
    justdetected = 100
    licenseplates = []
    licenseplates2 = []
    predetect = False
    # time.sleep(5)
    # ---- End Initializing Tracker  -----
    # count = 0  # facilitate to set the flag to start tracking (temporary) ****************
    # refimg=cv2.imread("111.jpg",0)
    # retref, refthresh = cv2.threshold(refimg, -1, 255,0)
    # refimg, refcontours, refhierarchy = cv2.findContours(refthresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # refcnt = refcontours[0]
    while True:
        # count += 1   # facilitate to set the flag to start tracking (temporary) ****************
        # print count
        # print "----- "+str(justdetected)+" --"
    # ---- DNN starts -----
        # time.sleep(.5)
        start=time.time()    
  
        # Read frame from device or file
        retval, img = cam.read()
        retval2, img2 = cam2.read()
        ret,frame=cap.read()
        img = cv2.resize(img, (640,480))
        img2 = cv2.resize(img2, (640,480))
        s=cv2.resize(frame,(480,360))
        img2ori = img2.copy()
        if not retval:
            print "Cannot capture frame device 1 | CODE TERMINATION....."
            exit()
        if not retval2:
            print "Cannot capture frame device 2 | CODE TERMINATION....."
            exit()
        if not ret:
            print "Cannot capture frame device 3 | CODE TERMINATION....."
            exit()

        justdetected += 1
        if justdetected >100 :
            justdetected =100 
        start_violation = 0
        start_tracking = False
        if justdetected > 4:
            
            #ret=cap.set(cv2.CAP_PROP_FPS,4)
            #ret=cap.set(3,360)
            #ret=cap.set(4,480)
            #frame = cv2.imread("myimage.png",1)
            transformer.set_transpose('data', (2,0,1))
            transformer.set_channel_swap('data', (2,1,0)) # if using RGB instead of BGR
            transformer.set_raw_scale('data', 255.0)
            net.blobs['data'].reshape(1,3,360,480)
            net.blobs['data'].data[...] = transformer.preprocess('data', s)

            output = net.forward()
            # print("---%s in sec"%(time.time()-start))
            predicted = net.blobs['prob'].data
            output = np.squeeze(predicted[0,:,:,:])
            ind = np.argmax(output, axis=0)
            p = ind.copy()
            d = ind.copy()
            r = ind.copy()
            g = ind.copy()
            b = ind.copy()
            for l in range(0,3):
                p[ind==l] = label_colour[l]
            cv2.imwrite("ind.png", p)
            ind = cv2.imread("ind.png",0)
            ctimage, contours, hierarchy = cv2.findContours(ind,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
            # Vimgc = cv2.drawContours(s, contours, -1, (0,0,255), 3)
            #label_colours = np.array([Sky, Building, Pole])
            for l in range(0,3):
                r[ind==l] = label_colours[l,0]
                g[ind==l] = label_colours[l,1]
                b[ind==l] = label_colours[l,2]
            # cv2.imshow('framex',d)
            rgb = np.zeros((ind.shape[0], ind.shape[1], 3))
            rgb[:,:,0] = r/255.0
            rgb[:,:,1] = g/255.0
            rgb[:,:,2] = b/255.0
            #plt.figure()
            #plt.imshow(rgb,vmin=0, vmax=1)
            #plt.show()
             # im = cv2.imread(sys.argv[1])
             # im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) / 255.

            #f = numpy.load(sys.argv[2])
            #param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]

             # letter_probs = detect(im_gray, param_vals)


            #code = letter_probs_to_code(letter_probs)
             # print("Number Plate ->",code)
            #pre_plate='' 
            detectnow = False
            NoAllVContours = len(hierarchy[0])
            # print NoAllVContours 
            if(NoAllVContours<6):    
                for i in range(NoAllVContours):
                    M = cv2.moments(contours[i])
                    # print M['m00']
                    # print "#############################################################################################################################"
                    # print cv2.matchShapes(refcnt,contours[i],1,0.0)
                    if (5000>M['m00']>3000):
                        if (int(M['m01']/M['m00'])>170):
                            # if ( cv2.matchShapes(refcnt,contours[i],1,0.0) < 0.7):
                            x,y,w,h=cv2.boundingRect(contours[i])
                            cropped = s[y:y+h, x:x+w]
                            resizeimg=cv2.resize(cropped,(256,32))
                            cv2.imshow('framerty',resizeimg)
                            
                            im_gray = cv2.cvtColor(resizeimg, cv2.COLOR_BGR2GRAY) / 255.
                            start=time.time()
                            f = np.load("weights.npz")
                            param_vals = [f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))]
                            letter_probs = detect(im_gray, param_vals)
                            code = letter_probs_to_code(letter_probs)
                            print("---%s in sec"%(time.time()-start))
                            #tempcode = code
                            if code[0] == "Z":
                                code = code[1:]
                            print("Number Plate ->",code)
                            detectnow = True
                            if (detectnow & predetect) : #((pre_plate == code) & (temp_count == 1)):
                                temp_count = 0
                                start_tracking = True
                                justdetected = 0
                                with open('GUI/VehicleNumber.csv','w') as f:
                                 writer=csv.writer(f)
                                 writer.writerow([code])
                            else:
                                if (pre_plate!=code):
                                    temp_count = 1
                                pre_plate=code
                            # elif(cv2.matchShapes(refcnt,contours[i],1,0.0)==0.0):   
                                # cv2.imshow('framex',rgb)     
                                # cv2.imwrite("sd.png", rgb)
                        # print pre_plate
            predetect = detectnow 
            # print licenseplates
            #print cap.get(cv2.CAP_PROP_FPS)
            #print frame.shape
            cv2.imshow('frame2',rgb)
            if cv2.waitKey(1) & 0xff== ord('q'):
                break
            # DNN sets the flag to start tracking
            # if count == 36:  # facilitate to set the flag to start tracking (temporary) ****************
            #     start_tracking = True
            #     print licenseplates
        # ---- DNN stops -----
        # ---- Tracker starts -----
        # print temp_tracker
        tracker = list(temp_tracker) 
        tracker2 = list(temp_tracker2) 
        
        
        new_points = []
        if trigerYolo > 0:  # if detection was not happened for the last frame
            yoloresult = tfnet.return_predict(img)
            for detectObj in yoloresult:
                if (inRegion(detectObj)):
                    vehicleloc = (detectObj['topleft']['x'],detectObj['topleft']['y']+20,detectObj['bottomright']['x'],detectObj['bottomright']['y']+10)
                    new_points.append(vehicleloc)
                    licenseplates.append(code)
                    print ("Tracking started for the request")
                    trigerYolo = 0
                    break
            print("Vehicle detection fails for the request for the next frame as well.")   
            trigerYolo -= 1
            cv2.imwrite('track/im_'+str(trigerYolo)+'.png', img)

        if(start_tracking):  #new vehicle trying to add if "p" pressed
            cv2.imwrite('track/im_'+str(trigerYolo)+'.png', img) # to save photos at tracking
            # yolo detection
            yoloresult = tfnet.return_predict(img) 
            for detectObj in yoloresult:
                if (inRegion(detectObj)):
                    vehicleloc = (detectObj['topleft']['x'],detectObj['topleft']['y']+20,detectObj['bottomright']['x'],detectObj['bottomright']['y']+10)
                    new_points.append(vehicleloc)
                    licenseplates.append(code)
                    # print licenseplates
                    print ("Tracking started for the request")
                    break # only one vehicle will be added per one request *************** optimize this to reduce issues with multi vehicles trying to enter at the same time
            if (len(new_points)<1):
                print "no vehicle found in frame 1: trying few other frames triggered"
                trigerYolo = tryframes   # trying few frames to capture a vehicle if not detected from first few frames

        
        if len(new_points) > 0: # if there is a new detected vehicled
            # Create the tracker object
            new_tracker = [dlib.correlation_tracker() for _ in xrange(len(new_points))]
            # Provide the tracker the new positions of the object
            for j, rect2 in enumerate(new_points):
                new_tracker[j].start_track(img, dlib.rectangle(*rect2))
            # [tracker[j].start_track(img, dlib.rectangle(*rect)) for j, rect in enumerate(points)]
            tracker.extend(new_tracker)
            print "Success: Tracking started for the newly entered vehicle."

        temp_tracker = list(tracker)
        # Update the tracker  
        for i in xrange(len(tracker)): #for number of objects to track
            confdnc = tracker[i].update(img)
            print confdnc
            
            # Get the position of th object, draw a 
            # bounding box around it and display it.
            rect = tracker[i].get_position()
            pt1 = (int(rect.left()), int(rect.top()))
            pt2 = (int(rect.right()), int(rect.bottom()))


            if (pt1[0]< threshld or pt1[1]< threshld or pt2[0]>=width):
                del licenseplates[i]
                del temp_tracker[i]
                print "Object removed: out of the frame/ unexpected behavior"
                continue
            if (pt2[1]>= limit):
                licenseplates2.append(licenseplates[i])
                del licenseplates[i]
                del temp_tracker[i]
                print "Object removed: vehicle enters next view"
                new_points2 = [(10, 150, 120, 260)]
                # new_tracker2 = dlib.correlation_tracker()
                # # Provide the tracker the new positions of the object
                # new_tracker2.start_track(img2, dlib.rectangle(*new_points2[0]))
                # # [tracker[j].start_track(img, dlib.rectangle(*rect)) for j, rect in enumerate(points)]
                # tracker2.append(new_tracker2)

                new_tracker2 = [dlib.correlation_tracker() for _ in xrange(len(new_points2))]
                # Provide the tracker the new positions of the object
                for j2, rect2_2 in enumerate(new_points2):
                    new_tracker2[j2].start_track(img2, dlib.rectangle(*rect2_2))
                # [tracker[j].start_track(img, dlib.rectangle(*rect)) for j, rect in enumerate(points)]
                tracker2.extend(new_tracker2)


                print "Success: Tracking started from view 2."
                continue
            # if (confdnc < 3.5):
            #     print "Low Confidence"
            #     # continue

            cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
            # print "Object {} tracked at [{}, {}] \r".format(i, pt1, pt2),

            # show location of box if mentionedd
            loc = (int(rect.left()), int(rect.top()-20))
            txt = "Vehicle : [{}]".format(licenseplates[i])
            cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1)
        temp_tracker2 = list(tracker2)
        for i2 in xrange(len(tracker2)): #for number of objects to track
            confdnc2 = tracker2[i2].update(img2)
            rect_2 = tracker2[i2].get_position()
            
            pt1_2 = (int(rect_2.left()), int(rect_2.top()))
            pt2_2 = (int(rect_2.right()), int(rect_2.bottom()))

            cv2.rectangle(img2, pt1_2, pt2_2, (255, 255, 255), 3)
            loc = (int(rect_2.left()), int(rect_2.top()-20))
            txt = "Vehicle : [{}]".format(licenseplates2[i2])
            cv2.putText(img2, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1)
            if rect_2.right() >200:
                dist1 = ((MidPoints[0][0]- ((pt1_2[0]+pt2_2[0])/2))**2+(MidPoints[0][1]-((pt1_2[1]+pt2_2[1])/2))**2)
                dist2 = ((MidPoints[1][0]- ((pt1_2[0]+pt2_2[0])/2))**2+(MidPoints[1][1]-((pt1_2[1]+pt2_2[1])/2))**2)
                if dist1<dist2:
                    if slot1_num != txt:
                        firebase.patch(URL, {'Slot_01_vehicle_number': licenseplates2[i2]})
                        slot1_num = txt
                        if len(tracker2) == 1:
                            firebase.patch(URL, {'Slot_02_vehicle_number': "n/a"})
                else:
                    if slot2_num != txt:
                        firebase.patch(URL, {'Slot_02_vehicle_number': licenseplates2[i2]})
                        slot2_num = txt
                        if len(tracker2) == 1:
                            firebase.patch(URL, {'Slot_01_vehicle_number': "n/a"})
                start_violation = 1
                update_na = 1
            else:
                start_violation = 0
                if update_na == 1:
                    firebase.patch(URL, {'Slot_01_vehicle_number': "n/a"})
                    firebase.patch(URL, {'Slot_02_vehicle_number': "n/a"})
                    update_na = 0


        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)        # ***** commented *******
        # cv2.namedWindow("Image2", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
        cv2.imshow("Image2", img2)
        cv2.imshow('frame',s)
    # ---- end of tracker -----
    # ---- Violation detection starts ----
        #print "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq" + str(start_violation)
        if start_violation == 1:
            #retpark, carpark = cam2.read()
            carpark = img2ori
            # carpark = cv2.resize(carpark, (640,480))   # Width,Height
            #carparkori = carpark.copy()
            #carparkgrayori = cv2.cvtColor(carpark,cv2.COLOR_BGR2GRAY)

            #vehavg1 = np.mean(carparkgrayori[100:180,400:470])
            #vehavg2 = np.mean(carparkgrayori[205:300,400:470])
            updateonce = 0
            carpark = carpark[80:400, 200:595]
            carparkgray = cv2.cvtColor(carpark,cv2.COLOR_BGR2GRAY)

            #cv2.imshow('frame',frame)
            #if cv2.waitKey(1) & 0xFF == ord('q'):
                #break

            vehicles = cv2.subtract(carpark, imgavgback)
            #vehicles = carpark - imgavgback
            #vehiclesgray = imgavgbackgray - imgrayavgback
            vehiclesgray = cv2.cvtColor(vehicles,cv2.COLOR_BGR2GRAY)

            #retv,vehiclesgray = cv2.threshold(imgrayavgback,50,255,cv2.THRESH_BINARY)

            blur = cv2.GaussianBlur(vehiclesgray,(5,5),0)
            ret3,vehiclesgray = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
            ret1,vehiclesgray = cv2.threshold(blur,ret3-25,255,cv2.THRESH_BINARY)

            #ret2,vehiclesgray = cv2.threshold(vehiclesgray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

            #vehiclesgray = cv2.adaptiveThreshold(vehiclesgray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
            #vehiclesgray = cv2.adaptiveThreshold(vehiclesgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)


            vcontimage, vehicontours, vehihierarchy = cv2.findContours(vehiclesgray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

            NoAllContours = len(vehihierarchy[0])

            #print(NoAllContours)
            #print(vehicles)

            #Getting the parent Vcontour numbers
            VehicleNo = []

            #Mid points of vehicles
            VehicleMid = []

            for i in range(NoAllContours):
                #if (hierarchy[0,i,3]==-1):     
                    M = cv2.moments(vehicontours[i])
                    if ((M['m00']>3000)):
                            VehicleNo.append(i)
                            cx = int(M['m10']/M['m00'])
                            cy = int(M['m01']/M['m00'])
                            VehicleMid.append([cx,cy])


            #Number of all child countours
            NumOfVehicles = len(VehicleNo)

            #for i in range(NumOfVehicles):
                #Vimgc = cv2.drawContours(Pimg, contours, VehicleNo[i] , (0,0,255), 3)

            if (NumOfVehicles==0):
                    imgavgbackori[100:180,400:470] = (0, 255, 0)
                    imgavgbackori[205:300,400:470] = (0, 255, 0)
                    if slot1_occupancy != slot1_occupancy_new:
                        firebase.patch(URL, {'Slot_01_occupancy_status': 'free'})
                        slot1_occupancy = slot1_occupancy_new
                    if slot2_occupancy != slot2_occupancy_new:
                        firebase.patch(URL, {'Slot_02_occupancy_status': 'free'})
                        slot2_occupancy = slot2_occupancy_new
                    if slot1_violation != slot1_violation_new:
                        firebase.patch(URL, {'Slot_01_violations': 'no'})
                        slot1_violation = slot1_violation_new
                    if slot2_violation != slot2_violation_new:
                        firebase.patch(URL, {'Slot_02_violations': 'no'})
                        slot2_violation = slot2_violation_new
                    #if ((abs(vehavg1-emptyavg1))>30):
                        #img[100:180,400:470] = (0, 0, 255)
                    #else:
                        #img[100:180,400:470] = (0, 255, 0)
                    #if ((abs(vehavg2-emptyavg2))>30):
                        #img[205:300,400:470] = (0, 0, 255)
                    #else:
                        #img[205:300,400:470] = (0, 255, 0)
                #if (NumOfVehicles>0):
            else:
                    counter = 0
                    #error = 0

                    for i in VehicleNo:
                        NoIndices = len(vehicontours[i])
                        vehix = []  
                        vehiy = []
                        for j in range(NoIndices):
                                vehix.append(vehicontours[i][j][0,0])
                                vehiy.append(vehicontours[i][j][0,1])

                        #Vehicle Contour min max coordinate values
                        xmin = min(vehix)
                        xmax = max(vehix)
                        ymin = min(vehiy)
                        ymax = max(vehiy)

                        distance = []

                        for k in range(2):
                                vehid = ((MidPoints[k][0]-VehicleMid[counter][0])**2+(MidPoints[k][1]-VehicleMid[counter][1])**2)
                                distance.append(vehid)

                        parkingslot = distance.index(min(distance))

                        drawcon = 0
                        if (xmin < xyminmax[parkingslot][0]):
                                #error += 1
                                drawcon = 1
                        elif (xmax > xyminmax[parkingslot][1]):
                                #error += 1
                                drawcon = 1
                        elif (ymin < xyminmax[parkingslot][2]):
                                #error += 1
                                drawcon = 1
                        elif (ymax > xyminmax[parkingslot][3]):
                                #error += 1
                                drawcon = 1

                        if ((parkingslot == 0)&(drawcon == 1)):
                                veimgc = cv2.drawContours(carpark, vehicontours, i, (0,0,255), 3)
                                imgavgbackori[100:180,400:470] = (255, 0, 0)
                                slot1_violation_new = "yes"
                                slot1_occupancy_new = "occupied"
                                if slot1_occupancy != slot1_occupancy_new:
                                    firebase.patch(URL, {'Slot_01_occupancy_status': 'occupied'})
                                    slot1_occupancy = slot1_occupancy_new
                                if slot1_violation != slot1_violation_new:
                                    firebase.patch(URL, {'Slot_01_violations': 'yes'})
                                    slot1_violation = slot1_violation_new
                                if (NumOfVehicles == 1):
                                        imgavgbackori[205:300,400:470] = (0, 255, 0)
                                        slot2_violation_new = "no"
                                        slot2_occupancy_new = "free"
                                        if slot2_occupancy != slot2_occupancy_new:
                                            firebase.patch(URL, {'Slot_02_occupancy_status': 'free'})
                                            slot2_occupancy = slot2_occupancy_new
                                        if slot2_violation != slot2_violation_new:
                                            firebase.patch(URL, {'Slot_02_violations': 'no'})
                                            slot2_violation = slot2_violation_new
                        elif((parkingslot == 0)&(drawcon == 0)):
                                veimgc = cv2.drawContours(carpark, vehicontours, i, (0,255,0), 3)
                                imgavgbackori[100:180,400:470] = (0, 0, 255)
                                slot1_violation_new = "no"
                                slot1_occupancy_new = "occupied"
                                if slot1_occupancy != slot1_occupancy_new:
                                    firebase.patch(URL, {'Slot_01_occupancy_status': 'occupied'})
                                    slot1_occupancy = slot1_occupancy_new
                                if slot1_violation != slot1_violation_new:
                                    firebase.patch(URL, {'Slot_01_violations': 'no'})
                                    slot1_violation = slot1_violation_new
                                if (NumOfVehicles == 1):
                                        imgavgbackori[205:300,400:470] = (0, 255, 0)
                                        slot2_violation_new = "no"
                                        slot2_occupancy_new = "free"
                                        if slot2_occupancy != slot2_occupancy_new:
                                            firebase.patch(URL, {'Slot_02_occupancy_status': 'free'})
                                            slot2_occupancy = slot2_occupancy_new
                                        if slot2_violation != slot2_violation_new:
                                            firebase.patch(URL, {'Slot_02_violations': 'no'})
                                            slot2_violation = slot2_violation_new
                        elif ((parkingslot == 1)&(drawcon == 1)):
                                veimgc = cv2.drawContours(carpark, vehicontours, i, (0,0,255), 3)
                                imgavgbackori[205:300,400:470] = (255, 0, 0)
                                slot2_violation_new = "yes"
                                slot2_occupancy_new = "occupied"
                                if slot2_occupancy != slot2_occupancy_new:
                                    firebase.patch(URL, {'Slot_02_occupancy_status': 'occupied'})
                                    slot2_occupancy = slot2_occupancy_new
                                if slot2_violation != slot2_violation_new:
                                    firebase.patch(URL, {'Slot_02_violations': 'yes'})
                                    slot2_violation = slot2_violation_new
                                if (NumOfVehicles == 1):
                                        imgavgbackori[100:180,400:470] = (0, 255, 0)
                                        slot1_violation_new = "no"
                                        slot1_occupancy_new = "free"
                                        if slot1_occupancy != slot1_occupancy_new:
                                            firebase.patch(URL, {'Slot_01_occupancy_status': 'free'})
                                            slot1_occupancy = slot1_occupancy_new
                                        if slot1_violation != slot1_violation_new:
                                            firebase.patch(URL, {'Slot_01_violations': 'no'})
                                            slot1_violation = slot1_violation_new
                        elif((parkingslot == 1)&(drawcon == 0)):
                                veimgc = cv2.drawContours(carpark, vehicontours, i, (0,255,0), 3)
                                imgavgbackori[205:300,400:470] = (0, 0, 255)
                                slot2_violation_new = "no"
                                slot2_occupancy_new = "occupied"
                                if slot2_occupancy != slot2_occupancy_new:
                                    firebase.patch(URL, {'Slot_02_occupancy_status': 'occupied'})
                                    slot2_occupancy = slot2_occupancy_new
                                if slot2_violation != slot2_violation_new:
                                    firebase.patch(URL, {'Slot_02_violations': 'no'})
                                    slot2_violation = slot2_violation_new
                                if (NumOfVehicles == 1):
                                        imgavgbackori[100:180,400:470] = (0, 255, 0)
                                        slot1_violation_new = "no"
                                        slot1_occupancy_new = "free"
                                        if slot1_occupancy != slot1_occupancy_new:
                                            firebase.patch(URL, {'Slot_01_occupancy_status': 'free'})
                                            slot1_occupancy = slot1_occupancy_new
                                        if slot1_violation != slot1_violation_new:
                                            firebase.patch(URL, {'Slot_01_violations': 'no'})
                                            slot1_violation = slot1_violation_new

                        counter += 1
                    cv2.imshow('thresh',veimgc)
        else:
            imgavgbackori[100:180,400:470] = (0, 255, 0)
            imgavgbackori[205:300,400:470] = (0, 255, 0)
            if updateonce == 0:
            	firebase.patch(URL, {'Slot_01_occupancy_status': 'free'})
            	firebase.patch(URL, {'Slot_02_occupancy_status': 'free'})
            	firebase.patch(URL, {'Slot_01_violations': 'no'})
            	firebase.patch(URL, {'Slot_02_violations': 'no'})
            	slot1_violation_new = 'no'
            	slot2_violation_new = 'no'
            	slot1_occupancy_new = 'free'
            	slot2_occupancy_new = 'free'
            	slot1_occupancy = slot1_occupancy_new
            	slot2_occupancy = slot2_occupancy_new
            	slot1_violation = slot1_violation_new
            	slot2_violation = slot2_violation_new
            	updateonce = 1

            # if slot1_occupancy != slot1_occupancy_new:
            #     firebase.patch(URL, {'Slot_01_occupancy_status': 'free'})
            #     slot1_occupancy = slot1_occupancy_new
            # if slot2_occupancy != slot2_occupancy_new:
            #     firebase.patch(URL, {'Slot_02_occupancy_status': 'free'})
            #     slot2_occupancy = slot2_occupancy_new
            # if slot1_violation != slot1_violation_new:
            #     firebase.patch(URL, {'Slot_01_violations': 'no'})
            #     slot1_violation = slot1_violation_new
            # if slot2_violation != slot2_violation_new:
            #     firebase.patch(URL, {'Slot_02_violations': 'no'})

        cv2.imshow('image',imgavgbackori)
        #cv2.imshow('imgrayneg',vehiclesgray)
    # ---- Violation detection ends ----

    # ---- Empty parking space detection starts ----
        #ret, carpark = cam2.read()
        #carparkgray = cv2.cvtColor(carpark, cv2.COLOR_BGR2GRAY)

        ##cv2.imshow('frame',frame)
        #if cv2.waitKey(1) & 0xFF == ord('q'):
            #break
    
        #vehavg = np.mean(carparkgray[250:300,400:470])

        #if abs(vehavg-emptyavg)>30:
            #imgavgback[250:300,400:470] = (0, 0, 255)
        #else:
            #imgavgback[250:300,400:470] = (0, 255, 0)

        #cv2.imshow('park',carpark)
        #cv2.imshow('empty',imgavgback)

    # ---- Empty parking space detection ends ----

    # Relase the VideoCapture object
    cam.release()
    cam2.release()
    cap.release()
    cv2.destroyAllwindows()
def correspondence_problem(factor):
    img1 = cv2.imread('book.jpg', cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread('image2.jpeg', cv2.IMREAD_GRAYSCALE)

    ## resize
    img1 = cv2.resize(img1, dsize=(480, 640), interpolation=cv2.INTER_AREA)
    img2 = cv2.resize(img2, dsize=(480, 640), interpolation=cv2.INTER_AREA)

    # sift 선언
    sift = cv2.xfeatures2d.SIFT_create()

    # SIFT 검출
    kp1 = sift.detect(img1, None)
    kp2 = sift.detect(img2, None)

    # SIFT 기술
    kp1, des1 = sift.compute(img1, kp1)
    kp2, des2 = sift.compute(img2, kp2)

    # FLANN 매칭
    FlANN_INDEX_KDTREE = 0
    index_params = dict(algorithm=FlANN_INDEX_KDTREE, trees=5)
    search_params = dict(checks=50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    res = None
    good = []
    for m, n in matches:
        if m.distance < factor * n.distance:
            good.append(m)
    res = cv2.drawMatches(img1, kp1, img2, kp2, good, res, flags=2)

    # 이미지 출력
    img1_2, img2_2 = None, None
    img1_2 = cv2.drawKeypoints(img1, kp1, img1_2)
    img2_2 = cv2.drawKeypoints(
        img2, kp2, img2_2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    #
    # cv2.imshow('SIFT1 detect', img1_2)
    # cv2.imshow('SIFT2 detect', img2_2)
    # cv2.imshow('Feature Matching', res)

    # homography to find objects
    MIN_MATCH_COUNT = 30
    print(len(good))
    if len(good) > MIN_MATCH_COUNT:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)

        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        # We have seen that there can be some possible errors while matching which may affect the result.
        # To solve this problem, algorithm uses RANSAC or LEAST_MEDIAN (which can be decided by the flags)
        matchesMask = mask.ravel().tolist()
        # ravel(): return a contiguous flattened array, tolist(): transfer tensor to list.

        h, w = img1.shape[:2]
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        dst += np.float32([w, 0])
        dst = cv2.polylines(dst, [np.int32(dst)], True, 255, 2, cv2.LINE_AA)
        # 그릴 대상 이미지, pts[i]위치 배열의 포인트 수,마지막과 처음 포인트 연결
    else:
        print("not enough matches", len(good))
        matchesMask = None

    draw_params = dict(matchColor=(0, 255, 0),
                       singlePointColor=(255, 0, 0),
                       matchesMask=matchesMask,
                       flags=0)
    # img4 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)

    # Draw bounding box in Red
    img4 = cv2.polylines(res, [np.int32(dst)], True, (255, 0, 0), 2,
                         cv2.LINE_AA)
    cv2.imshow('drawMatches', img4)

    cv2.waitKey(0)
    cv2.destroyAllwindows()
Example #26
0
def find_person_face(path_to_img=None,
                     path_to_save=None,
                     path_to_contr=None,
                     confid=90.0,
                     show=False,
                     cascade=args.home + '/' +
                     'haarcascade_frontalface_default.xml'):
    label_id = 0
    ids = list()
    datas = list()
    contr_list = dbh.sort_files(path_to_contr, ['.jpg', '.png'])
    os.makedirs(path_to_save, exist_ok=True)
    file_list = dbh.sort_files(path_to_img, ['.jpg', '.png'])
    face_cascade = None
    resize_const = 800

    if len(file_list) == 0:
        print('Искомых файлов в директории не найдено.')
        return 0
    try:
        face_cascade = cv2.CascadeClassifier(cascade)
    except:
        print('Ошибка... [', cascade, '] не найден')

    if show:
        cv2.namedWindow('Controls images', cv2.WINDOW_NORMAL)

    for name_img in contr_list:
        original_contr = cv2.imread(name_img)
        gray = cv2.cvtColor(original_contr, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=3,
                                              minSize=(30, 30))
        for (x, y, w, h) in faces:
            datas.append(gray[y:y + h, x:x + w])
            ids.append(label_id)
            label_id += 1
            if show:
                cv2.imshow('Controls images', gray[y:y + h, x:x + w])
                cv2.waitKey(50)
    if show:
        cv2.destroyAllWindows()

    recognizer = cv2.face.LBPHFaceRecognizer_create()
    recognizer.train(datas, np.array(ids))

    if show:
        cv2.namedWindow('original', cv2.WINDOW_NORMAL)
        cv2.namedWindow('saving', cv2.WINDOW_NORMAL)

    for img_name in file_list:
        original = cv2.imread(img_name)
        s = original.shape
        ys = s[0]
        xs = s[1]
        if xs > resize_const and xs > ys:
            dim = xs / resize_const
            xs = resize_const
            ys = int(ys / dim)
        elif ys > resize_const and ys > xs:
            dim = ys / resize_const
            ys = resize_const
            xs = int(xs / dim)
        else:
            pass
        original = cv2.resize(original, (xs, ys), cv2.INTER_AREA)
        if show:
            cv2.imshow('original', original)
            key = cv2.waitKey(50)
            if key == 27:
                cv2.destroyAllwindows()
                return
        gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=4,
                                              minSize=(30, 30))
        for (x, y, w, h) in faces:
            num, conf = recognizer.predict(gray[y:y + h, x:x + w])
            print('Num:', num, '  Conf:', round(conf, 5))
            if conf <= confid:
                cv2.imwrite(
                    path_to_save + '/' + 'person_' +
                    os.path.basename(img_name), original[y:y + h, x:x + w])
                print('Saving ' + 'person_' + os.path.basename(img_name), 'to',
                      path_to_save)
                if show:
                    cv2.imshow('saving', original[y:y + h, x:x + w])
                    key = cv2.waitKey(50)
                    if key == 27:
                        cv2.destroyAllwindows()
                        return
    if show:
        cv2.destroyAllWindows()
Example #27
0
    def perspectiveTransform(self):

        self.video=str(self.fullpath)
        self.camera = cv2.VideoCapture(self.video)
        
        success, firstFrame = self.camera.read() #reads only the first image of the video for calibration function
        rows,cols,ch = firstFrame.shape
        self.xdist=[]
        self.ydist=[]
        self.pts1 = []
        self.pts2 = []
        self.count = 0
        #mouse callback function, draws points and captures coordinates
        def draw_circle(event,x,y,flags,param):
#            global ix,iy,count,xdist,ydist
        
            if event == cv2.EVENT_LBUTTONDOWN:
                
#                    break
                cv2.circle(firstFrame,(x,y),2,(0,255,0),-1)
                cv2.circle(firstFrame,(x,y),10,(255,0,0),1)
                cv2.circle(firstFrame,(x,y),15,(255,0,0),1)
                cv2.putText(firstFrame, label, (x+20, y+20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                
                ix=x
                iy=y
                self.xdist.append(ix)
                self.ydist.append(iy)
                a=[ix,iy]
                self.pts1.append(a)
                
                if self.count==0:
                    toAppend=[ix,iy]
                    self.pts2.append(toAppend)
                if self.count == 1:
                    toAppendix=ix
                    toAppendiy=self.pts2[0][1]
                    toAppend=[toAppendix,toAppendiy]
                    self.pts2.append(toAppend)
                if self.count == 2:
                    toAppendix=self.pts2[0][0]
                    toAppendiy=iy
                    toAppend=[toAppendix,toAppendiy]
                    self.pts2.append(toAppend)
                self.count=self.count+1
        
        cv2.namedWindow('firstframe')
        cv2.setMouseCallback('firstframe',draw_circle)
        
        while self.count <= 2:#keeps window open as long as count is less than 3 so user can interact with setMouseCallback
            cv2.imshow('firstframe',firstFrame)
            if self.count==0:
                cv2.putText(firstFrame, "1. Choose top left corner: (1)", (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Point 1"
                
            if self.count==1:
                cv2.putText(firstFrame, "2. Choose top right corner:", (10, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Point 2"
                
            if self.count==2:
                cv2.putText(firstFrame, "3. Choose bottom right corner", (10, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Point 3"
                
            k = cv2.waitKey(1) & 0xFF
            if k == ord('q'):
                cv2.camera.release()
                cv2.destroyAllwindows()
                break
        if k == ord('q'):
            cv2.camera.release()
            cv2.destroyAllwindows()
            

        self.pts1 = np.float32(self.pts1)
        self.pts2 = np.float32(self.pts2)

        self.M = cv2.getAffineTransform(self.pts1,self.pts2)
        dst = cv2.warpAffine(firstFrame,self.M,(cols,rows))
        cv2.imshow('dst',dst)
        
        print self.M
Example #28
0
    def doCalibration(self,rectify):
        self.cal_TE.append("Calibration window open:")
        self.rectify=rectify
        
        self.video=str(self.fullpath)
        self.camera = cv2.VideoCapture(self.video)

          
        success, firstFrame = self.camera.read() 
        
        if rectify==True:
            rows,cols,ch = firstFrame.shape
            firstFrame = cv2.warpAffine(firstFrame,self.M,(cols,rows))

        
        self.xdist=[]
        self.ydist=[]
        self.count = 0
        #mouse callback function, draws points and captures coordinates
        def draw_circle(event,x,y,flags,param):
            global ix,iy,count,xdist,ydist
        
            if event == cv2.EVENT_LBUTTONDOWN:
                cv2.circle(firstFrame,(x,y),2,(0,255,0),-1)
                cv2.circle(firstFrame,(x,y),10,(255,0,0),1)
                cv2.circle(firstFrame,(x,y),15,(255,0,0),1)
                cv2.putText(firstFrame, label, (x+20, y+20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                self.count=self.count+1
                
                ix=x
                iy=y
                self.xdist.append(ix)
                self.ydist.append(iy)
                self.cal_TE.append("Point %d chosen with x-pixel coordinates of %d and y-pixel coordinates of %d" % (self.count, ix, iy))
            
        cv2.namedWindow('Calibration frame')
        cv2.setMouseCallback('Calibration frame',draw_circle)

        while self.count <= 3:
            cv2.imshow('Calibration frame',firstFrame)

            if self.count==0:
                cv2.putText(firstFrame, "1. Place first point (1)", (10, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Point 1"
                
            if self.count==1:
                cv2.putText(firstFrame, "2. Place second point (2)", (10, 40),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Point 2"
                
            if self.count==2:
                cv2.putText(firstFrame, "3. Place point of known distance from flume entrance and bottom wall", (10, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                label="Known point"
                
            if self.count==3:
                cv2.putText(firstFrame, "Exit pop-up and enter parameters ...", (10, 80),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                
            self.k = cv2.waitKey(1) & 0xFF
            if self.k == ord('q'):
                self.cal_TE.append("Enter calibration parameters and then click output calibration")
                cv2.camera.release()
                cv2.destroyAllwindows()
# -*- coding: utf-8 -*-
"""
Created on Tue Aug  7 15:04:08 2018

@author: Administrator
"""
'''
视频捕捉
'''

import cv2 as cv

vc = cv.VideoCapture(0)              # 0 - 视频捕捉设备编号
while True:
    frame = vc.read()[1]             # return 视频帧属性,取index=1的属性
    cv.imshow('VideoCapture', frame)
    
    if cv.waitKey(33) == 27:
        '''
        # 27为ESC按键, 33ms(1s看30张图片相当于连续的图片动图串成视频,即帧速率30fps/s) 表示30fps 帧速率 30帧/s
        '''
        break

vc.release()                         # 使用完后释放视频设备
cv.destroyAllwindows()               # 销毁所有窗口,包括隐藏窗口 如缓存

Example #30
0
def align_video(video_path, predictor_path, out_path, showStabilized = False):
    fourcc = cv2.VideoWriter_fourcc(*"XVID")
    writer = cv2.VideoWriter(out_path, fourcc, 20, (640,480), True)

    # Initializing video capture object.
    cap = cv2.VideoCapture(videoFileName)

    if(cap.isOpened()==False):
      print("Unable to load video")

    winSize = 101
    maxLevel = 10
    fps = 30.0
    # Grab a frame
    ret,imPrev = cap.read()

    size = imPrev.shape[0:1]

    detector = dlib.get_frontal_face_detector()
    landmarkDetector = dlib.shape_predictor(predictor_path)
    # Initializing the parameters
    points=[]
    pointsPrev=[]
    pointsDetectedCur=[]
    pointsDetectedPrev=[]

    eyeDistanceNotCalculated = True
    eyeDistance = 0
    isFirstFrame = True
    # Initial value, actual value calculated after 100 frames
    fps = 10
    count =0


    ret, frame1 = cap.read()
    imGrayPrev = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)

    try:
        while(True):
            if (count==0):
                t = cv2.getTickCount()
            # Grab a frame
            ret,im = cap.read()
            if im is None:
                break
            imDlib = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
            # COnverting to grayscale
            imGray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            height = im.shape[0]
            IMAGE_RESIZE = float(height)/RESIZE_HEIGHT
            # Resize image for faster face detection
            imSmall = cv2.resize(im, None, fx=1.0/IMAGE_RESIZE, fy=1.0/IMAGE_RESIZE,interpolation = cv2.INTER_LINEAR)
            imSmallDlib = cv2.cvtColor(imSmall, cv2.COLOR_BGR2RGB)
            # Skipping the frames for faster processing
            if (count % SKIP_FRAMES == 0):
                faces = detector(imSmallDlib,0)
            # If no face was detected
            if len(faces)==0:
                print("No face detected")
            # If faces are detected, iterate through each image and detect landmark points
            else:
                for i in range(0,len(faces)):
                # Face detector was found over a smaller image.
                # So, we scale face rectangle to correct size.
                newRect = dlib.rectangle(int(faces[i].left() * IMAGE_RESIZE),
                    int(faces[i].top() * IMAGE_RESIZE),
                    int(faces[i].right() * IMAGE_RESIZE),
                    int(faces[i].bottom() * IMAGE_RESIZE))
                
                # Detect landmarks in current frame
                landmarks = landmarkDetector(imDlib, newRect).parts()
                
                # Handling the first frame of video differently,for the first frame copy the current frame points
                
                if (isFirstFrame==True):
                    pointsPrev=[]
                    pointsDetectedPrev = []
                    [pointsPrev.append((p.x, p.y)) for p in landmarks]
                    [pointsDetectedPrev.append((p.x, p.y)) for p in landmarks]
                # If not the first frame, copy points from previous frame.
                else:
                    pointsPrev=[]
                    pointsDetectedPrev = []
                    pointsPrev = points
                    pointsDetectedPrev = pointsDetectedCur
                # pointsDetectedCur stores results returned by the facial landmark detector
                # points stores the stabilized landmark points
                points = []
                pointsDetectedCur = []
                [points.append((p.x, p.y)) for p in landmarks]
                [pointsDetectedCur.append((p.x, p.y)) for p in landmarks]
                # Convert to numpy float array
                pointsArr = np.array(points,np.float32)
                pointsPrevArr = np.array(pointsPrev,np.float32)
                # If eye distance is not calculated before
                if eyeDistanceNotCalculated:
                    eyeDistance = interEyeDistance(landmarks)
                    print(eyeDistance)
                    eyeDistanceNotCalculated = False
                if eyeDistance > 100:
                    dotRadius = 3
                else:
                    dotRadius = 2
                sigma = eyeDistance * eyeDistance / 400
                s = 2*int(eyeDistance/4)+1
                #  Set up optical flow params
                lk_params = dict(winSize  = (s, s), maxLevel = 5, criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 20, 0.03))
                # Python Bug. Calculating pyramids and then calculating optical flow results in an error. So directly images are used.
                # ret, imGrayPyr= cv2.buildOpticalFlowPyramid(imGray, (winSize,winSize), maxLevel)
                pointsArr,status, err = cv2.calcOpticalFlowPyrLK(imGrayPrev,imGray,pointsPrevArr,pointsArr,**lk_params)
                
                # Converting to float
                pointsArrFloat = np.array(pointsArr,np.float32)
                # Converting back to list
                points = pointsArrFloat.tolist()
                # Final landmark points are a weighted average of
                # detected landmarks and tracked landmarks
                for k in range(0,len(landmarks)):
                    d = cv2.norm(np.array(pointsDetectedPrev[k]) - np.array(pointsDetectedCur[k]))
                    alpha = math.exp(-d*d/sigma)
                    points[k] = (1 - alpha) * np.array(pointsDetectedCur[k]) + alpha * np.array(points[k])
                # Drawing over the stabilized landmark points
                if showStabilized is True:
                    for p in points:
                    cv2.circle(im,(int(p[0]),int(p[1])),dotRadius, (255,0,0),-1)
                else:
                    for p in pointsDetectedCur:
                    cv2.circle(im,(int(p[0]),int(p[1])),dotRadius, (0,0,255),-1)
                isFirstFrame = False
                count = count+1
                # Calculating the fps value
                if ( count == NUM_FRAMES_FOR_FPS):
                    t = (cv2.getTickCount()-t)/cv2.getTickFrequency()
                    fps = NUM_FRAMES_FOR_FPS/t
                    count = 0
                    isFirstFrame = True
                # Display the landmarks points
                cv2.putText(im, "{:.1f}-fps".format(fps), (50, size[0]-50), cv2.FONT_HERSHEY_COMPLEX, 1.5, (0, 0, 255), 3,cv2.LINE_AA)
                winName = "Aligned facial landmark detector"
                cv2.imshow(winName, im)
                frame_1 = cv2.resize(im,(640,480)) # manually resize frame
                writer.write(frame_1)
                key = cv2.waitKey(25) & 0xFF
                # Use spacebar to toggle between Stabilized and Unstabilized version.
                if key==32:
                    showStabilized = not showStabilized
                # Stop the program.
                if key==27:
                    sys.exit()
                # Getting ready for next frame
                imPrev = im
                imGrayPrev = imGray
    except Exception as e:
        print("Exceptions occurs......")
        writer.release()
        cap.release()
        cv2.destroyAllwindows()
import numpy as np
import cv2

print 'Loading image'

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
test = face_cascade.load('C:\opencv\sources\data\haarcascades\haarcascade_frontalface_default.xml')
print(test)


img = cv2.imread('test2.jpg')
grayimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

print 'Loaded image, applying cascade classifiers'

faces = face_cascade.detectMultiScale(grayimg, 1.3, 5)

for (x, y, w, h) in faces:
	img = cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
	roi_gray = grayimg[y:y+h, x:x+w]
	roi_color = img[y:y+h, x:x+w]
	
imgResized = cv2.resize(img, (0,0), fx=0.5, fy=0.5)

cv2.imshow('Face Detection', imgResized)
cv2.waitKey(0)
cv2.destroyAllwindows()