def mainWorker(camId):
    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    #variable declarations
    lastTime = time.time()
    transposeTime = 0
    frameCount = 0
    avFrameRate = 0
    numberCars = 0
    numberClose = 0
    numberFar = 0
    numberTruck = 0
    lastSnapshot = None
    baseColor = (255,255,255)
    baseRes = scaledRes
    scale = 800/1920
    factor = baseRes/320
    showLines = False
    logsOn = False
    showYolo = False
    IS_CAM_OK = True
    MODE = "DAY"
    CLOSE_TRIGGER_METHOD = "DELAY" # DELAY, CALC
    triggerDelay = 0.5
    VARIANCE_METHOD = False

    # SET MODE BASED ON CURRENT TIME
    
    # SET THRESHES LOCALLY
    uproadThresh = 0 
    truckThresh = 0
    closeThresh = 0
    extraThresh = 0
    leftBound = 0
    leftBound2 = 0
    rightBound = 0
    rightBound2 = 0
    marginOfError = 0

    isFarClear = True
    isTruckClear = True
    isCloseClear = True

    farBoxCenter= [97, 298],
    farBoxWidth= 5,
    farBoxHeight= 10,
    truckBoxCenter= [97, 170],
    truckBoxWidth= 5,
    truckBoxHeight= 10,
    closeBoxCenter= [97, 50],
    closeBoxWidth= 15,
    closeBoxHeight= 15,
    sdThreshold= 30,
    tsdThreshold= 30,
    csdThreshold= 30,

    def setDefaultValues(MODE):
        values = DEFAULT_VALUES[MODE]
        nonlocal farBoxCenter
        nonlocal farBoxWidth
        nonlocal farBoxHeight
        nonlocal truckBoxCenter
        nonlocal truckBoxWidth
        nonlocal truckBoxHeight
        nonlocal closeBoxCenter
        nonlocal closeBoxWidth
        nonlocal closeBoxHeight
        nonlocal sdThreshold
        nonlocal tsdThreshold
        nonlocal csdThreshold

        farBoxCenter = values['farBoxCenter']
        farBoxWidth = values['farBoxWidth']
        farBoxHeight = values['farBoxHeight']
        truckBoxCenter = values['truckBoxCenter']
        truckBoxWidth = values['truckBoxWidth']
        truckBoxHeight = values['truckBoxHeight']
        closeBoxCenter = values['closeBoxCenter']
        closeBoxWidth = values['closeBoxWidth']
        closeBoxHeight = values['closeBoxHeight']
        sdThreshold = values['sdThreshold']
        tsdThreshold = values['tsdThreshold']
        csdThreshold = values['csdThreshold']

    def setThresholds(MODE, factor):
        thresh = THRESHOLDS[MODE]
        nonlocal uproadThresh
        nonlocal truckThresh
        nonlocal closeThresh
        nonlocal extraThresh
        nonlocal leftBound
        nonlocal leftBound2
        nonlocal rightBound
        nonlocal rightBound2
        nonlocal marginOfError
        uproadThresh = int(thresh['uproadThresh']*factor)
        truckThresh = int(thresh['truckThresh']*factor)
        closeThresh = int(thresh['closeThresh']*factor)
        extraThresh = int(thresh['extraThresh']*factor)
        leftBound = int(thresh['leftBound']*factor)
        leftBound2 = int(thresh['leftBound2']*factor)
        rightBound = int(thresh['rightBound']*factor)
        rightBound2 = int(thresh['rightBound2']*factor)
        marginOfError = int(thresh['marginOfError']*factor)

    setThresholds(MODE, factor)

    setDefaultValues(MODE)

    startTime = time.time()
    IS_CAM_OK = True
    while(IS_CAM_OK):
        print(camId, "Creating harvester modules and loading genlcam producers ...")
        h = Harvester()
        h.add_cti_file(CTI_FILE)
        h.update_device_info_list()
        cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
        cam.start_image_acquisition()
        cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window

        #print(dir(cam.device.node_map))

        def nothing(x):
            pass

        def changeDelay(x):
            nonlocal triggerDelay
            triggerDelay = x/1000

        def toggleBoxes(x):
            nonlocal showLines
            if x==1:
                showLines = True
                print("SHOWING TRIGGER BOXES")
            else:
                showLines = False
                print("HIDING TRIGGER BOXES")
        def toggleLogs(x):
            nonlocal logsOn
            if x==1:
                logsOn = True
                print("SHOWING LOGS")
            else:
                logsOn = False
                print("HIDING LOGS")
        def toggleVarianceMethod(x):
            nonlocal VARIANCE_METHOD
            if x==1:
                VARIANCE_METHOD = True
                print("SWITCHED TO USER CONTROLLED VALUES FOR TRIGGERING")
            else:
                VARIANCE_METHOD = False
                print("SWITCHED TO AVERAGE VALUES FOR TRIGGERING")
        def switchMode(x):
            nonlocal MODE
            if x==1:
                MODE = "DAY"
                setDefaultValues(MODE)
                #set camera defaults
                print("SWITCHED TO DAY MODE")
                cam.device.node_map.ExposureAuto = 'Continuous'
                cam.device.node_map.GainAuto = 'Continuous'
            else:
                MODE = "NIGHT"
                setDefaultValues(MODE)
                #set camera defaults
                print("SWITCHED TO NIGHT MODE")
                cam.device.node_map.ExposureAuto = 'Off'
                cam.device.node_map.GainAuto = 'Off'
                #cam.device.node_map.ExposureTime.value = 150.69
                #cam.device.node_map.Gain.value = 15.5
        def handleChangeInTrigger(x):
            nonlocal logsOn
            if x==1:
                logsOn = True
                print("SHOWING LOGS")
            else:
                logsOn = False
                print("HIDING LOGS")

        uproadLastTrigger = time.time()
        truckLastTrigger = time.time()
        closeLastTrigger = time.time()
        farStdAv = 0.0
        closeStdAv = 0.0
        truckStdAv = 0.0
        baseAv = 0.0

        # create variable track bars
        showBoxes = '0 : BOXES OFF \n1 : BOXES ON'
        outputLogs = '0 : LOGS OFF \n1 : LOGS ON'
        autoExposureSwitch = '0 : Auto Exp OFF \n1 : Auto Exp ON'
        autoGainSwitch = '0 : Auto Gain OFF \n1 : Auto Gain ON'
        modeSwitch = '0 : Night Mode\n1 : Day Mode'
        varianceMethodSwitch = '0 : Auto Variance\n1 : Manual Variance'

        currentHour = int(float(time.strftime('%H')))

        if(currentHour>=DAY_MODE_HOUR and currentHour<=NIGHT_MODE_HOUR ):
            cv2.createTrackbar(modeSwitch,WINDOW_NAME,0,1,switchMode)
        else:
            cv2.createTrackbar(modeSwitch,WINDOW_NAME,0,1,switchMode)


        cv2.createTrackbar(showBoxes,WINDOW_NAME,1,1,nothing)
        cv2.createTrackbar(outputLogs,WINDOW_NAME,0,1,nothing)
        cv2.createTrackbar('Trigger Reset Delay ms',WINDOW_NAME,int(triggerDelay*1000),1000,nothing)

        cv2.createTrackbar(varianceMethodSwitch,WINDOW_NAME,0,1,nothing)
        cv2.createTrackbar('Far Gray',WINDOW_NAME,0,255,nothing)
        cv2.createTrackbar('Truck Gray',WINDOW_NAME,0,255,nothing)
        cv2.createTrackbar('Close Gray',WINDOW_NAME,0,255,nothing)

        showBoxesValue = None
        outputLogsValue = None
        triggerResetDelayValue = None
        modeSwitchValue = None
        varianceMethodSwitchValue = None
        farGrayValue = None
        truckGrayValue = None
        closeGrayValue = None

        while(IS_CAM_OK): # MAIN WHILE LOOP FOR IMAGE ACQUISITION
            with timeout(seconds=5, error_message='FETCH_ERROR'):
                frame = cam.fetch_buffer()

            if(IS_CAM_OK and frame.payload.components):
                timestamp = time.strftime('%H:%M:%S')
                image = frame.payload.components[0].data
                frameScaled = cv2.resize(image, dsize=(baseRes, int(baseRes*scale)), interpolation=cv2.INTER_CUBIC)
                frameColorised = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2RGB)
                c, h1, w1 = frameColorised.shape[2], frameColorised.shape[1], frameColorised.shape[0]
                currentHour = int(float(time.strftime('%H')))
                if(MODE=="NIGHT" and currentHour>=DAY_MODE_HOUR and currentHour<NIGHT_MODE_HOUR):
                    switchMode(modeSwitchValue)
                    cv2.setTrackbarPos(modeSwitch,WINDOW_NAME, 1)
                if(False and MODE=="DAY" and currentHour>=NIGHT_MODE_HOUR):
                    switchMode(modeSwitchValue)
                    cv2.setTrackbarPos(modeSwitch,WINDOW_NAME, 0)

                # CHECKING POSITION OF ALL TRACKBARS
                # cv2.setTrackbarPos(trackbarname, winname, pos)
                if cv2.getTrackbarPos(showBoxes,WINDOW_NAME)!=showBoxesValue:
                    showBoxesValue = cv2.getTrackbarPos(showBoxes,WINDOW_NAME)
                    toggleBoxes(showBoxesValue)
                if cv2.getTrackbarPos(outputLogs,WINDOW_NAME)!=outputLogsValue:
                    outputLogsValue = cv2.getTrackbarPos(outputLogs,WINDOW_NAME)
                    toggleLogs(outputLogsValue)
                if cv2.getTrackbarPos(modeSwitch,WINDOW_NAME)!=modeSwitchValue:
                    modeSwitchValue = cv2.getTrackbarPos(modeSwitch,WINDOW_NAME)
                    switchMode(modeSwitchValue)
                    
                triggerBoxFar = frameScaled[farBoxCenter[0]-farBoxWidth:farBoxCenter[0]+farBoxWidth,farBoxCenter[1]:farBoxCenter[1]+farBoxHeight]   #frameScaled[uproadThresh:uproadThresh+boxHeight,farBoxCenter-farBoxWidth:farBoxCenter+farBoxWidth]    
                triggerBoxTruck = frameScaled[truckBoxCenter[0]-truckBoxWidth:truckBoxCenter[0]+truckBoxWidth,truckBoxCenter[1]:truckBoxCenter[1]+truckBoxHeight]  #frameScaled[truckThresh:truckThresh+boxHeight,truckBoxCenter-truckBoxWidth:truckBoxCenter+truckBoxWidth] 
                triggerBoxClose = frameScaled[closeBoxCenter[0]-closeBoxWidth:closeBoxCenter[0]+closeBoxWidth,closeBoxCenter[1]:closeBoxCenter[1]+closeBoxHeight]   #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth] 

                # ARRAY METRICS FOR TRIGGERING
                triggerBoxFarStd= np.mean(triggerBoxFar)
                triggerBoxTruckStd= np.mean(triggerBoxTruck)
                triggerBoxCloseStd= np.mean(triggerBoxClose)

                numberOfFrames = 200

                farStdAv = farStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxFarStd/numberOfFrames
                truckStdAv = truckStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxTruckStd/numberOfFrames
                closeStdAv = closeStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxCloseStd/numberOfFrames

                if cv2.getTrackbarPos(varianceMethodSwitch,WINDOW_NAME)!=varianceMethodSwitchValue:
                    varianceMethodSwitchValue = cv2.getTrackbarPos(varianceMethodSwitch,WINDOW_NAME)
                    toggleVarianceMethod(varianceMethodSwitchValue)
                    cv2.setTrackbarPos('Far Gray',WINDOW_NAME, int(farStdAv))
                    cv2.setTrackbarPos('Truck Gray',WINDOW_NAME, int(truckStdAv))
                    cv2.setTrackbarPos('Close Gray',WINDOW_NAME, int(closeStdAv))

                if cv2.getTrackbarPos('Far Gray',WINDOW_NAME)!=farGrayValue:
                    farGrayValue = cv2.getTrackbarPos('Far Gray',WINDOW_NAME)
                if cv2.getTrackbarPos('Truck Gray',WINDOW_NAME)!=truckGrayValue:
                    truckGrayValue = cv2.getTrackbarPos('Truck Gray',WINDOW_NAME)
                if cv2.getTrackbarPos('Close Gray',WINDOW_NAME)!=closeGrayValue:
                    closeGrayValue = cv2.getTrackbarPos('Close Gray',WINDOW_NAME)

                if (VARIANCE_METHOD == True):
                    farDiff = abs(farGrayValue -triggerBoxFarStd)
                    truckDiff = abs(truckGrayValue-triggerBoxTruckStd)
                    closeDiff = abs(closeGrayValue-triggerBoxCloseStd)

                else:
                    farDiff = abs(farStdAv -triggerBoxFarStd)
                    truckDiff = abs(truckStdAv-triggerBoxTruckStd)
                    closeDiff = abs(closeStdAv-triggerBoxCloseStd)

                currentTime = time.time()

                # WAS GOING TO BE USED TO IDENTIFY CARS, NOW JUST A VISUAL AID FOR WHEN ZONES ARE RESETTING OR NOT
                if isFarClear and farDiff>sdThreshold:
                    isFarClear = False
                elif isFarClear == False and (currentTime-uproadLastTrigger)>triggerDelay: 
                    isFarClear = True
                if isTruckClear and truckDiff>tsdThreshold:
                    isTruckClear = False
                elif isTruckClear == False and (currentTime-truckLastTrigger)>triggerDelay: 
                    isTruckClear = True
                if isCloseClear and closeDiff>csdThreshold:
                    isCloseClear = False
                elif isCloseClear == False  and (currentTime-closeLastTrigger)>triggerDelay:
                    isCloseClear = True

                if currentTime-startTime>DELAY_TIME_FROM_INIT_TO_TRIGGER:
                    if farDiff>sdThreshold and (currentTime-uproadLastTrigger)>triggerDelay:
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        numberFar += 1
                        uproadLastTrigger = currentTime
                    if truckDiff>tsdThreshold and (currentTime-truckLastTrigger)>triggerDelay:
                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()
                        numberTruck += 1
                        truckLastTrigger = currentTime
                    if closeDiff>csdThreshold and (currentTime-closeLastTrigger)>triggerDelay:
                        urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                        numberClose += 1
                        closeLastTrigger = currentTime

                # SHOW LINES SECTION
                if showLines:
                    cv2.rectangle(frameColorised, (farBoxCenter[1],farBoxCenter[0]-farBoxWidth),(farBoxCenter[1]+farBoxHeight,farBoxCenter[0]+farBoxWidth),(0,255,0) if isFarClear else (0,0,255))
                    cv2.rectangle(frameColorised, (truckBoxCenter[1],truckBoxCenter[0]-truckBoxWidth),(truckBoxCenter[1]+truckBoxHeight,truckBoxCenter[0]+truckBoxWidth),(0,255,0) if isTruckClear else (0,0,255))
                    cv2.rectangle(frameColorised, (closeBoxCenter[1],closeBoxCenter[0]-closeBoxWidth),(closeBoxCenter[1]+closeBoxHeight,closeBoxCenter[0]+closeBoxWidth),(0,255,0) if isCloseClear else (0,0,255))

                # DISPLAY FRAME IN WINDOW
                cv2.imshow(WINDOW_NAME, np.rot90(frameColorised) if IS_ROTATE else frameColorised)
                        
                frame.queue()
                cv2.waitKey(1)
                avFrameRate=avFrameRate*49/50+int(1.0/(time.time()-lastTime))/50
                if frameCount%1==0 and logsOn:
                    print("CF", numberFar, "CT", numberTruck,"CC", numberClose,"avFPS", int(avFrameRate),"FV", int(triggerBoxFarStd),"TV", int(triggerBoxTruckStd), "CV", int(triggerBoxCloseStd))
                lastTime = time.time()
                frameCount += 1

        #IF CAM NOT OK
        cam.stop_image_acquisition()
        cam.destroy()
def worker(camId):
    pyyolo.init(darknet_path, datacfg, cfgfile, weightfile)
    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    h = Harvester()
    h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')
    h.update_device_info_list()
    try:
        cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
        print("Camera found")

    except:
        print("Camera Not Found")
        exit()

    cam.start_image_acquisition()

    lastTime = time.time()
    transposeTime = 0
    i = 0
    numberCars = 0
    lastSnapshot = None
    cv2.namedWindow(WINDOW_NAME, flags=0)

    while (True):
        buffer = cam.fetch_buffer()
        image = buffer.payload.components[0].data
        small = cv2.resize(image,
                           dsize=(320, 200),
                           interpolation=cv2.INTER_CUBIC)
        clone = small.copy()

        rgb = cv2.cvtColor(clone, cv2.COLOR_BayerRG2RGB)
        #img = rgb.transpose(2,0,1)
        img = np.rot90(rgb)
        print(rgb.shape)
        c, h, w = img.shape[0], img.shape[1], img.shape[2]
        #c, h, w = img.shape[2], img.shape[1], img.shape[0]
        data = img.ravel() / 255.0
        #data = np.ascontiguousarray(data, dtype=np.float32)
        predictions = pyyolo.detect(w, h, c, data, thresh, hier_thresh)

        #im = np.zeros((3,small.shape[1],small.shape[0]))

        #im[0,:,:] = np.rot90(small)
        #im[1,:,:] = np.rot90(small)
        #im[2,:,:] = np.rot90(small)

        #im = rgb
        print(rgb.shape)
        #c, h, w = im.shape[0], im.shape[1], im.shape[2]

        # im = im.transpose(2,0,1)

        #c, h, w = im.shape[0], im.shape[1], im.shape[2]

        #c, h, w = 1, image.shape[0], image.shape[1]
        #im = image.copy()
        #data = im.ravel()/255.0
        #print(data.shape)
        #data = np.ascontiguousarray(data, dtype=np.float32)
        #print(data.shape)

        for output in predictions:
            left, right, top, bottom, what, prob = output['left'], output[
                'right'], output['top'], output['bottom'], output[
                    'class'], output['prob']
            #print(output)
            #lastSnapshot = snapshot.copy()
            #cv2.imshow("Snapshots", lastSnapshot)
            if (what == 'car'):
                print(output)
                numberCars += 1
                cv2.rectangle(rgb, (50, 50), (100, 150), (0, 255, 0), 5)
                if (camId == "CAM_2"):
                    urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                    urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                    urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()

        if IS_ROTATE:
            cv2.imshow(WINDOW_NAME, np.rot90(rgb))
        else:
            cv2.imshow(WINDOW_NAME, rgb)

        cv2.waitKey(1)
        buffer.queue()

        print("Count: ", numberCars, " Frame: ", i, " FPS: ",
              1.0 / (time.time() - lastTime))
        lastTime = time.time()
        i += 1

    cam.stop_image_acquisition()
    cam.destroy()
def mainWorker(camId):
    # declare yolo detector in isolation from camera object for reinitiation of camera under failure.
    print(camId, ' Constructing Yolo Model ...')
    net = Detector(bytes(cfgfile, encoding="utf-8"), bytes(weightfile, encoding="utf-8"), 0, bytes(datacfg, encoding="utf-8"))

    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    # main worker event loop, will keep restarting on camera dropout

    #variable declarations
    lastTime = time.time()
    transposeTime = 0
    frameCount = 0
    numberCars = 0
    numberClose = 0
    numberFar = 0
    numberTruck = 0
    lastSnapshot = None
    baseColor = (255,255,255)
    baseRes = scaledRes
    scale = 800/1920
    factor = baseRes/320
    showLines = False
    showYolo = False
    IS_CAM_OK = True
    MODE = "DAY"
    CLOSE_TRIGGER_METHOD = "DELAY" # DELAY, CALC

    # SET MODE BASED ON TIME
    
    # SET THRESHES LOCALLY
    uproadThresh = 0 
    truckThresh = 0
    closeThresh = 0
    extraThresh = 0
    leftBound = 0
    leftBound2 = 0
    rightBound = 0
    rightBound2 = 0
    marginOfError = 0

    def setThresholds(MODE, factor):
        thresh = THRESHOLDS[MODE]
        nonlocal uproadThresh
        nonlocal truckThresh
        nonlocal closeThresh
        nonlocal extraThresh
        nonlocal leftBound
        nonlocal leftBound2
        nonlocal rightBound
        nonlocal rightBound2
        nonlocal marginOfError
        uproadThresh = int(thresh['uproadThresh']*factor)
        truckThresh = int(thresh['truckThresh']*factor)
        closeThresh = int(thresh['closeThresh']*factor)
        extraThresh = int(thresh['extraThresh']*factor)
        leftBound = int(thresh['leftBound']*factor)
        leftBound2 = int(thresh['leftBound2']*factor)
        rightBound = int(thresh['rightBound']*factor)
        rightBound2 = int(thresh['rightBound2']*factor)
        marginOfError = int(thresh['marginOfError']*factor)

    setThresholds(MODE, factor)
    while(True):
        try:
            print(camId, "Creating harvester modules and loading genlcam producers ...")
            h = Harvester()
            h.add_cti_file(CTI_FILE)
            h.update_device_info_list()

            try:
                cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
                print ("Camera found!")
                IS_CAM_OK = True
            except:
                print ("Camera Not Found! Waiting 10 seconds and retrying ...")
                time.sleep(10) #sleep for 10 seconds and then retry!
                continue #exit ()

            cam.start_image_acquisition()
            cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window

            uproadLastTrigger = time.time()
            truckLastTrigger = time.time()
            closeLastTrigger = time.time()
            uproadTruckDelay = 0.0

            def setUproadTruckDelay():
                nonlocal uproadTruckDelay
                nonlocal uproadLastTrigger
                nonlocal truckLastTrigger
                if truckLastTrigger > uproadLastTrigger and truckLastTrigger-uproadLastTrigger<5:
                    uproadTruckDelay = truckLastTrigger-uproadLastTrigger
                    
            while(IS_CAM_OK):
                #print(dir(cam.device.node_map))
                try:
                    with timeout(seconds=3, error_message='FETCH_ERROR'):
                        frame = cam.fetch_buffer()
                except:
                    IS_CAM_OK = False
                    print('CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!')
                    sendMessageToSlack('Streaming Camera has Failed - Restarting ...', '#ff3300')
                    cam.stop_image_acquisition()
                    cam.destroy()
                if(IS_CAM_OK and frame.payload.components):
                    image = frame.payload.components[0].data
                    if LOG:
                        print(image)
                    # USER CONTROLS
                    user_input_key = cv2.waitKey(1)
                    if user_input_key==113: #q
                        showLines = True
                    elif user_input_key==97: #a
                        showLines = False
                    elif user_input_key==122: #z
                        showYolo = True
                    elif user_input_key==120: #x
                        showYolo = False
                    elif user_input_key==119: #w
                        MODE="DAY"
                        setThresholds("DAY", factor)
                        # CHANGE EXPOSURE AND GAIN
                    elif user_input_key==115: #s
                        MODE="NIGHT"
                        setThresholds("NIGHT", factor)
                    elif user_input_key==101: #o
                        CLOSE_TRIGGER_METHOD = "CALC"
                    elif user_input_key==100: #l
                        CLOSE_TRIGGER_METHOD = "DELAY"

                    frameScaled = cv2.resize(image, dsize=(baseRes, int(baseRes*scale)), interpolation=cv2.INTER_CUBIC)
                    frameColorised = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2RGB)
                    c, h1, w1 = frameColorised.shape[2], frameColorised.shape[1], frameColorised.shape[0]
                    # SHOW LINES SECTION
                    if showLines and camId=='CAM_1' and MODE=="DAY":
                        cv2.line(frameColorised, (uproadThresh,0), (uproadThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (uproadThresh+marginOfError,0), (uproadThresh+marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (uproadThresh-marginOfError,0), (uproadThresh-marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (truckThresh,0), (truckThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (truckThresh+marginOfError,0), (truckThresh+marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (truckThresh-marginOfError,0), (truckThresh-marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (closeThresh,0), (closeThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (closeThresh+marginOfError,0), (closeThresh+marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (closeThresh-marginOfError,0), (closeThresh-marginOfError, w1), (255,0,0), 1)
                        cv2.line(frameColorised, (0,rightBound), (h1, rightBound), (255,255,255), 1)
                        cv2.line(frameColorised, (0,leftBound2), (h1, leftBound2), (255,255,255), 1)
                        cv2.line(frameColorised, (0,leftBound), (h1, leftBound), (255,255,255), 1) 

                    if showLines and camId=='CAM_1' and MODE=="NIGHT":
                        cv2.line(frameColorised, (uproadThresh,0), (uproadThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (truckThresh,0), (truckThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (closeThresh,0), (closeThresh, w1), (255,255,0), 1)
                        cv2.line(frameColorised, (0,rightBound), (h1, rightBound), (255,255,255), 1)
                        cv2.line(frameColorised, (0,leftBound), (h1, leftBound), (255,255,255), 1)
                        cv2.line(frameColorised, (0,leftBound2), (h1, leftBound2), (255,0,255), 1)
                    # PROCESSING SPECIFIC
                    if MODE=="NIGHT":
                        frameGray = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2GRAY)
                        thresh = cv2.threshold(frameGray,  grayThresh, 255, cv2.THRESH_BINARY)[1]
                        labels = measure.label(thresh, neighbors=8, background=0)
                        mask = np.zeros(thresh.shape, dtype="uint8")
                        for label in np.unique(labels):
                            # if this is the background label, ignore it
                            if label == 0:
                                continue
                            labelMask = np.zeros(thresh.shape, dtype="uint8")
                            labelMask[labels == label] = 255
                            mask = cv2.add(mask, labelMask)
                        if len(np.unique(labels))>0:
                            cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                            cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                            # loop over the contours
                            for (i, c) in enumerate(cnts):
                                currentTime = time.time()
                                # draw the bright spot on the image
                                (x, y, w, h) = cv2.boundingRect(c)
                                ((cX, cY), radius) = cv2.minEnclosingCircle(c)
                                if showYolo:
                                    cv2.circle(frameColorised, (int(cX), int(cY)), int(5),
                                        (0, 0, 255), 3)
                                if cY <= rightBound and cY >= leftBound and camId=='CAM_1':
                                    if cX>=uproadThresh-marginOfError and cX<=uproadThresh+marginOfError and (currentTime-uproadLastTrigger)>triggerDelay and cY>=leftBound2:
                                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                                        uproadLastTrigger = currentTime
                                        numberCars += 1
                                    if cX>=truckThresh-marginOfError and cX<=truckThresh+marginOfError and (currentTime-truckLastTrigger)>triggerDelay:
                                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()
                                        truckLastTrigger = currentTime
                                        setUproadTruckDelay()
                                    if cX>=closeThresh-marginOfError and cX<=closeThresh+marginOfError and (currentTime-closeLastTrigger)>triggerDelay:
                                        urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                                        closeLastTrigger = currentTime
                    if MODE=="DAY":
                        img = np.rot90(frameColorised, 1)
                        img2 = Image(img)
                        results = net.detect(img2, thresh=yolo_thresh, nms=yolo_nms, hier_thresh=yolo_hier_thresh)
                        for cat, score, bounds in results:
                                x, y, w, h = bounds
                                x, y = (h1-int(y), int(x))
                                x1,y1,x2,y2 = [int(x-h/2),int(y-w/2),int(x+h/2),int(y+w/2)]

                                type = str(cat.decode("utf-8"))
                                color = baseColor
                                if showYolo and h>5:
                                    cv2.rectangle(frameColorised, (x1,y1),(x2,y2),color)
                                    cv2.circle(frameColorised, (int(x), int(y)), int(2),(0, 255, 0), 3)

                                currentTime = time.time()
                                if y <= rightBound and camId=='CAM_1' and h>10 and w>10:
                                    if x>=uproadThresh-10 and x<=uproadThresh+10 and y>=leftBound2 and (currentTime-uproadLastTrigger)>triggerDelay:
                                        urllib.request.urlopen(TRIGGER_FAR_URL).read()
                                        numberFar += 1
                                        uproadLastTrigger = currentTime
                                    if x>=truckThresh-marginOfError and x<=truckThresh+marginOfError and y>=leftBound and (currentTime-truckLastTrigger)>triggerDelay:
                                        urllib.request.urlopen(TRIGGER_TRUCK_URL).read()
                                        numberTruck += 1
                                        truckLastTrigger = currentTime
                                        setUproadTruckDelay()
                                    if x>=closeThresh-marginOfError*2 and x<=closeThresh+marginOfError*2 and y>=leftBound and (currentTime-closeLastTrigger)>triggerDelay:
                                        urllib.request.urlopen(TRIGGER_CLOSE_URL).read()
                                        numberClose += 1
                                        closeLastTrigger = currentTime

                    # DISPLAY FRAME IN WINDOW
                    if IS_ROTATE:
                        cv2.imshow(WINDOW_NAME, np.rot90(frameColorised))
                    else:
                        cv2.imshow(WINDOW_NAME, frameColorised)
                            
                    frame.queue()
                    cv2.waitKey(1)
                    if frameCount%10==0:
                        print("mode:", MODE,"close mode:", CLOSE_TRIGGER_METHOD, "Count Far", numberFar, "Count Truck", numberTruck,"Count Close", numberClose,"frame:", frameCount, "fps:", int(1.0/(time.time()-lastTime)),"trigger dif",uproadTruckDelay)
                    lastTime = time.time()
                    frameCount += 1

            #IF CAM NOT OK
            cam.stop_image_acquisition()
            cam.destroy()
        except Exception as e:
            print ("Critical Script error! Trying again in 5 seconds ...")
            print(e)
            time.sleep(5) #sleep for 10 seconds and then retry!
def worker(camId):

    # declare Detector in isolation from Camera object for reinitiation of Camera under failure.
    #
    net = Detector(bytes(cfgfile, encoding="utf-8"),
                   bytes(weightfile, encoding="utf-8"), 0,
                   bytes(datacfg, encoding="utf-8"))

    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    h = Harvester()
    h.add_cti_file(CTI_FILE)
    h.update_device_info_list()

    try:
        cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
        print("Camera found")

    except:
        print("Camera Not Found")
        exit()

    cam.start_image_acquisition()

    #variable declarations

    lastTime = time.time()
    transposeTime = 0
    i = 0
    numberCars = 0
    lastSnapshot = None
    cv2.namedWindow(WINDOW_NAME, flags=0)

    carColor = (255, 0, 0)
    busColor = (0, 255, 0)
    truckColor = (0, 0, 255)
    phoneColor = (0, 255, 255)
    baseColor = (255, 255, 255)

    baseRes = 400
    scale = 800 / 1920

    #as percentages

    uproadThresh = 295
    truckThresh = 230
    closeThresh = 180
    extraThresh = 50
    leftBound = 50
    leftBound2 = 70
    rightBound = 125
    rightBound2 = 125
    marginOfError = 20

    factor = baseRes / 320
    uproadThresh = int(uproadThresh * factor)
    truckThresh = int(truckThresh * factor)
    closeThresh = int(closeThresh * factor)
    extraThresh = int(50 * factor)
    leftBound = int(leftBound * factor)
    leftBound2 = int(leftBound2 * factor)
    rightBound = int(125 * factor)
    rightBound2 = int(125 * factor)

    showLines = False
    showYolo = False

    triggerDelay = 0.250
    uproadLastTrigger = time.time()
    truckLastTrigger = time.time()
    closeLastTrigger = time.time()

    while (True):
        buffer = cam.fetch_buffer()  #cam fails here often!
        payload = buffer.payload.components
        if LOG:
            print(payload)
        if (payload):
            image = payload[0].data
            if LOG:
                print(image)
            small = cv2.resize(image,
                               dsize=(baseRes, int(baseRes * scale)),
                               interpolation=cv2.INTER_CUBIC)
            rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2RGB)
            img = np.rot90(rgb, 1)
            c, h1, w1 = rgb.shape[2], rgb.shape[1], rgb.shape[0]

            img2 = Image(img)
            results = net.detect(img2)
            if LOG:
                print(results)
            k = cv2.waitKey(1)

            if k == 113:  # Esc key to stop
                showLines = True
            elif k == 97:
                showLines = False
            elif k == 122:
                showYolo = True
            elif k == 120:
                showYolo = False

            if showLines and camId == 'CAM_2':
                cv2.line(rgb, (uproadThresh, 0), (uproadThresh, w1),
                         (255, 255, 0), 1)
                cv2.line(rgb, (uproadThresh + marginOfError, 0),
                         (uproadThresh + marginOfError, w1), (255, 0, 0), 1)
                cv2.line(rgb, (uproadThresh - marginOfError, 0),
                         (uproadThresh - marginOfError, w1), (255, 0, 0), 1)
                cv2.putText(rgb, 'Up-Road', (uproadThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(rgb, (truckThresh, 0), (truckThresh, w1),
                         (255, 255, 0), 1)
                cv2.line(rgb, (truckThresh + marginOfError, 0),
                         (truckThresh + marginOfError, w1), (255, 0, 0), 1)
                cv2.line(rgb, (truckThresh - marginOfError, 0),
                         (truckThresh - marginOfError, w1), (255, 0, 0), 1)
                cv2.putText(rgb, 'Truck', (truckThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(rgb, (closeThresh, 0), (closeThresh, w1),
                         (255, 255, 0), 1)
                cv2.line(rgb, (closeThresh + marginOfError, 0),
                         (closeThresh + marginOfError, w1), (255, 0, 0), 1)
                cv2.line(rgb, (closeThresh - marginOfError, 0),
                         (closeThresh - marginOfError, w1), (255, 0, 0), 1)
                cv2.putText(rgb, 'Close', (closeThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(rgb, (0, rightBound), (h1, rightBound),
                         (255, 255, 255), 1)
                cv2.line(rgb, (0, leftBound2), (h1, leftBound2),
                         (255, 255, 255), 1)
                cv2.line(rgb, (0, leftBound), (h1, leftBound), (255, 255, 255),
                         1)

            if showLines and camId == 'CAM_1':
                cv2.line(rgb, (extraThresh, 0), (extraThresh, w1),
                         (255, 255, 0), 1)
                cv2.putText(rgb, 'Up-Road', (extraThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                #cv2.line(rgb, (0,rightBound2), (h1, rightBound2), (255,255,255), 1)

            bounds = 100

            for cat, score, bounds in results:
                x, y, w, h = bounds
                x, y = (h1 - int(y), int(x))
                x1, y1, x2, y2 = [
                    int(x - h / 2),
                    int(y - w / 2),
                    int(x + h / 2),
                    int(y + w / 2)
                ]

                type = str(cat.decode("utf-8"))
                color = baseColor
                if (type == 'car'):
                    color = carColor
                if (type == 'bus'):
                    color = busColor
                if (type == 'truck'):
                    color = truckColor
                if (type == 'phone'):
                    color = phoneColor
                #x1,y1,x2,y2 = [int(x+w/2),int(y+h/2),int(x-w/2),int(y-h/2)]
                #cv2.rectangle(rgb, (int(x-w/2),int(y-h/2)),(int(x+w/2),int(y+h/2)),(255,0,0))
                #cv2.line(rgb, (x1,y1), (x1, y2), color, 2)
                if showYolo and h > 5:
                    #cv2.rectangle(rgb, (x1,y1),(x2,y2),color)
                    #cv2.putText(rgb, str(cat.decode("utf-8")), (int(x), int(y)), cv2.FONT_HERSHEY_COMPLEX, 1, color)
                    cv2.circle(rgb, (int(x), int(y)), int(2), (0, 255, 0), 3)

                currentTime = time.time()

                if y <= rightBound and camId == 'CAM_2' and h > 10 and w > 10:
                    if x >= uproadThresh - 10 and x <= uproadThresh + 10 and y >= leftBound2 and (
                            currentTime - uproadLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        if LOG:
                            print('FAR TRIG')
                        uproadLastTrigger = currentTime
                    #if x1<=truckThresh and x2>=truckThresh and (currentTime-truckLastTrigger)>triggerDelay:
                    if x >= truckThresh - marginOfError and x <= truckThresh + marginOfError and y >= leftBound and (
                            currentTime - truckLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()
                        if LOG:
                            print('TRUCK TRIG')
                        numberCars += 1
                        truckLastTrigger = currentTime
                    #if x1<=closeThresh and x2>=closeThresh and (currentTime-closeLastTrigger)>triggerDelay:
                    if x >= closeThresh - marginOfError * 2 and x <= closeThresh + marginOfError * 2 and y >= leftBound and (
                            currentTime - closeLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_CLOSE_URL).read()
                        if LOG:
                            print('CLOSE TRIG')
                        closeLastTrigger = currentTime

                if camId == 'CAM_1':
                    if y1 <= rightBound2 and y2 >= rightBound2 and False:
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        numberCars += 1
            '''predictions = []
            for output in predictions:
                left, right, top, bottom, what, prob = output['left'],output['right'],output['top'],output['bottom'],output['class'],output['prob']
                #print(output)
                #lastSnapshot = snapshot.copy()
                #cv2.imshow("Snapshots", lastSnapshot)
                if( what == 'car' ):
                    print(output)
                    numberCars += 1
                    cv2.rectangle(rgb, (50,50), (100,150), (255, 255, 255), 20)
                    if ( camId =="CAM_2" ):
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()'''

            if IS_ROTATE:
                cv2.imshow(WINDOW_NAME, np.rot90(rgb))
            else:
                cv2.imshow(WINDOW_NAME, rgb)

            cv2.waitKey(1)
            buffer.queue()

            print("Count: ", numberCars, " Frame: ", i, " FPS: ",
                  1.0 / (time.time() - lastTime))
            lastTime = time.time()
            i += 1

    cam.stop_image_acquisition()
    cam.destroy()
Ejemplo n.º 5
0
def yoloWorker(camId):
    # declare yolo detector in isolation from camera object for reinitiation of camera under failure.
    print(camId, ' Constructing Yolo Model ...')
    net = Detector(bytes(cfgfile, encoding="utf-8"),
                   bytes(weightfile, encoding="utf-8"), 0,
                   bytes(datacfg, encoding="utf-8"))

    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']

    # main worker event loop, will keep restarting on camera dropout
    while (True):
        try:
            print(
                camId,
                ' Creating harvester modules and loading genlcam producers ...'
            )
            h = Harvester()
            h.add_cti_file(CTI_FILE)
            h.update_device_info_list()

            try:
                cam = h.create_image_acquisition_manager(
                    serial_number=CAM_NAME)
                print("Camera found!")
            except:
                print("Camera Not Found! Waiting 10 seconds and retrying ...")
                time.sleep(10)  #sleep for 10 seconds and then retry!
                continue  #exit ()

            cam.start_image_acquisition()
            cv2.namedWindow(WINDOW_NAME,
                            flags=0)  # create dedicated stream window

            #variable declarations
            lastTime = time.time()
            transposeTime = 0
            i = 0
            numberCars = 0
            lastSnapshot = None

            baseColor = (255, 255, 255)
            baseRes = 400
            scale = 800 / 1920
            #as percentages to do
            uproadThresh = 235
            truckThresh = 155
            closeThresh = 110
            extraThresh = 50
            leftBound = 50
            leftBound2 = 60
            rightBound = 90
            rightBound2 = 125
            marginOfError = 15
            # rescaling
            factor = baseRes / 320
            uproadThresh = int(uproadThresh * factor)
            truckThresh = int(truckThresh * factor)
            closeThresh = int(closeThresh * factor)
            extraThresh = int(50 * factor)
            leftBound = int(leftBound * factor)
            leftBound2 = int(leftBound2 * factor)
            rightBound = int(rightBound * factor)
            rightBound2 = int(125 * factor)

            showLines = False
            showYolo = False

            uproadLastTrigger = time.time()
            truckLastTrigger = time.time()
            closeLastTrigger = time.time()

            IS_CAM_OK = True

            IS_MULTI = False

            def fetchBuffer(shared, camera):
                frame = camera.fetch_buffer()
                shared['buffer'] = frame.payload.components[0].data
                frame.queue()

            while (IS_CAM_OK):
                if IS_MULTI:
                    dict = {"buffer": None}
                    manager = multiprocessing.Manager()
                    shared = manager.dict()

                    p = multiprocessing.Process(target=fetchBuffer,
                                                args=(shared, cam))
                    p.start()

                    # Wait for 5 seconds or until process finishes
                    p.join(TIMEOUT_DELAY)

                    print('3')
                    # If thread is still active
                    if p.is_alive():
                        p.terminate()
                        p.join()
                        IS_CAM_OK = False

                try:
                    with timeout(seconds=3, error_message='FETCH_ERROR'):
                        frame = cam.fetch_buffer()
                except:
                    IS_CAM_OK = False
                    print(
                        'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!'
                    )
                    sendMessageToSlack(
                        'Streaming Camera has Failed - Restarting ...',
                        '#ff3300')

                if LOG:
                    print(shared['buffer'])

                if (IS_CAM_OK and frame.payload.components):
                    image = frame.payload.components[0].data
                    if LOG:
                        print(image)
                    small = cv2.resize(image,
                                       dsize=(baseRes, int(baseRes * scale)),
                                       interpolation=cv2.INTER_CUBIC)
                    rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2RGB)
                    img = np.rot90(rgb, 1)
                    c, h1, w1 = rgb.shape[2], rgb.shape[1], rgb.shape[0]

                    img2 = Image(img)
                    results = net.detect(img2)

                    if LOG:
                        print(results)

                    user_input_key = cv2.waitKey(1)

                    if user_input_key == 113:  #q
                        showLines = True
                    elif user_input_key == 97:  #a
                        showLines = False
                    elif user_input_key == 122:  #z
                        showYolo = True
                    elif user_input_key == 120:  #x
                        showYolo = False

                    if showLines and camId == 'CAM_2':
                        cv2.line(rgb, (uproadThresh, 0), (uproadThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.line(rgb, (uproadThresh + marginOfError, 0),
                                 (uproadThresh + marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (uproadThresh - marginOfError, 0),
                                 (uproadThresh - marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (truckThresh, 0), (truckThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.line(rgb, (truckThresh + marginOfError, 0),
                                 (truckThresh + marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (truckThresh - marginOfError, 0),
                                 (truckThresh - marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (closeThresh, 0), (closeThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.line(rgb, (closeThresh + marginOfError, 0),
                                 (closeThresh + marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (closeThresh - marginOfError, 0),
                                 (closeThresh - marginOfError, w1),
                                 (255, 0, 0), 1)
                        cv2.line(rgb, (0, rightBound), (h1, rightBound),
                                 (255, 255, 255), 1)
                        cv2.line(rgb, (0, leftBound2), (h1, leftBound2),
                                 (255, 255, 255), 1)
                        cv2.line(rgb, (0, leftBound), (h1, leftBound),
                                 (255, 255, 255), 1)

                    for cat, score, bounds in results:
                        x, y, w, h = bounds
                        x, y = (h1 - int(y), int(x))
                        x1, y1, x2, y2 = [
                            int(x - h / 2),
                            int(y - w / 2),
                            int(x + h / 2),
                            int(y + w / 2)
                        ]

                        type = str(cat.decode("utf-8"))
                        color = baseColor
                        if showYolo and h > 5:
                            cv2.rectangle(rgb, (x1, y1), (x2, y2), color)
                            cv2.circle(rgb, (int(x), int(y)), int(2),
                                       (0, 255, 0), 3)

                        currentTime = time.time()
                        if y <= rightBound and camId == 'CAM_2' and h > 10 and w > 10:
                            if x >= uproadThresh - 10 and x <= uproadThresh + 10 and y >= leftBound2 and (
                                    currentTime -
                                    uproadLastTrigger) > triggerDelay:
                                urllib.request.urlopen(
                                    TRIGGER_FAR_FLASH_URL).read()
                                if LOG:
                                    print('FAR TRIG')
                                numberCars += 1
                                uproadLastTrigger = currentTime
                            if x >= truckThresh - marginOfError and x <= truckThresh + marginOfError and y >= leftBound and (
                                    currentTime -
                                    truckLastTrigger) > triggerDelay:
                                urllib.request.urlopen(
                                    TRIGGER_TRUCK_FLASH_URL).read()
                                if LOG:
                                    print('TRUCK TRIG')
                                truckLastTrigger = currentTime
                            if x >= closeThresh - marginOfError * 2 and x <= closeThresh + marginOfError * 2 and y >= leftBound and (
                                    currentTime -
                                    closeLastTrigger) > triggerDelay:
                                urllib.request.urlopen(
                                    TRIGGER_CLOSE_FLASH_URL).read()
                                if LOG:
                                    print('CLOSE TRIG')
                                closeLastTrigger = currentTime

                        if camId == 'CAM_1':
                            if y1 <= rightBound2 and y2 >= rightBound2 and False:
                                urllib.request.urlopen(
                                    TRIGGER_FAR_FLASH_URL).read()
                                numberCars += 1

                    if IS_ROTATE:
                        cv2.imshow(WINDOW_NAME, np.rot90(rgb))
                    else:
                        cv2.imshow(WINDOW_NAME, rgb)
                    frame.queue()
                    cv2.waitKey(1)
                    print("Count: ", numberCars, " Frame: ", i, " FPS: ",
                          1.0 / (time.time() - lastTime))
                    lastTime = time.time()
                    i += 1

            #IF CAM NOT OK
            cam.stop_image_acquisition()
            cam.destroy()
        except:
            print("Critical Script error! Trying again in 5 seconds ...")
            time.sleep(5)  #sleep for 10 seconds and then retry!
Ejemplo n.º 6
0
def openCvWorker(camId):
    while (True):
        try:
            CAM_NAME = CAM_CONFIG[camId]['name']
            WINDOW_NAME = CAM_CONFIG[camId]['window']
            IS_ROTATE = CAM_CONFIG[camId]['rotate']

            h = Harvester()
            h.add_cti_file(CTI_FILE)
            h.update_device_info_list()
            try:
                cam = h.create_image_acquisition_manager(
                    serial_number=CAM_NAME)
                print("Camera found")

            except:
                print("Camera Not Found")
                exit()

            cam.start_image_acquisition()
            cv2.namedWindow(WINDOW_NAME, flags=0)

            lastTime = time.time()
            transposeTime = 0
            frame = 0
            numberCars = 0
            lastSnapshot = None

            carColor = (255, 0, 0)
            busColor = (0, 255, 0)
            truckColor = (0, 0, 255)
            phoneColor = (0, 255, 255)
            baseColor = (255, 255, 255)

            baseRes = 300
            scale = 800 / 1920

            #as percentages

            uproadThresh = 230
            truckThresh = 155
            closeThresh = 90
            extraThresh = 50
            leftBound = 50
            leftBound2 = 60
            rightBound = 80
            rightBound2 = 125
            marginOfError = 10
            grayThresh = 150

            factor = baseRes / 320
            uproadThresh = int(uproadThresh * factor)
            truckThresh = int(truckThresh * factor)
            closeThresh = int(closeThresh * factor)
            extraThresh = int(50 * factor)
            leftBound = int(leftBound * factor)
            leftBound2 = int(leftBound2 * factor)
            rightBound = int(rightBound * factor)
            rightBound2 = int(125 * factor)

            showLines = False
            showYolo = False

            triggerDelay = 0.500
            uproadLastTrigger = time.time()
            truckLastTrigger = time.time()
            closeLastTrigger = time.time()

            IS_CAM_OK = True

            while IS_CAM_OK:
                try:
                    with timeout(seconds=3, error_message='FETCH_ERROR'):
                        buffer = cam.fetch_buffer()
                except:
                    IS_CAM_OK = False
                    print(
                        'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!'
                    )
                    sendMessageToSlack(
                        'Streaming Camera has Failed - Restarting ...',
                        '#ff3300')
                if LOG:
                    print(payload)
                if (IS_CAM_OK and buffer.payload.components):
                    image = buffer.payload.components[0].data
                    if showLines or showYolo:
                        conver = cv2.resize(image,
                                            dsize=(baseRes,
                                                   int(baseRes * scale)),
                                            interpolation=cv2.INTER_CUBIC)
                        small = cv2.cvtColor(conver, cv2.COLOR_BayerRG2RGB)
                        rgb = cv2.cvtColor(conver, cv2.COLOR_BayerRG2GRAY)
                    else:
                        small = cv2.resize(image,
                                           dsize=(baseRes,
                                                  int(baseRes * scale)),
                                           interpolation=cv2.INTER_CUBIC)
                        rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2GRAY)

                    thresh = cv2.threshold(rgb, grayThresh, 255,
                                           cv2.THRESH_BINARY)[1]
                    labels = measure.label(thresh, neighbors=8, background=0)
                    mask = np.zeros(thresh.shape, dtype="uint8")

                    if LOG:
                        print(labels)

                    # loop over the unique components
                    for label in np.unique(labels):
                        # if this is the background label, ignore it
                        if label == 0:
                            continue
                        labelMask = np.zeros(thresh.shape, dtype="uint8")
                        labelMask[labels == label] = 255
                        mask = cv2.add(mask, labelMask)

                    if len(np.unique(labels)) > 0:
                        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
                        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                        # loop over the contours
                        for (i, c) in enumerate(cnts):
                            currentTime = time.time()
                            # draw the bright spot on the image
                            (x, y, w, h) = cv2.boundingRect(c)
                            ((cX, cY), radius) = cv2.minEnclosingCircle(c)
                            if showYolo:
                                cv2.circle(small, (int(cX), int(cY)), int(5),
                                           (0, 0, 255), 3)
                            if cY <= rightBound and cY >= leftBound and camId == 'CAM_2':
                                if cX >= uproadThresh - marginOfError and cX <= uproadThresh + marginOfError and (
                                        currentTime - uproadLastTrigger
                                ) > triggerDelay and cY >= leftBound2:
                                    urllib.request.urlopen(
                                        TRIGGER_FAR_FLASH_URL).read()
                                    uproadLastTrigger = currentTime
                                    numberCars += 1
                                if cX >= truckThresh - marginOfError and cX <= truckThresh + marginOfError and (
                                        currentTime -
                                        truckLastTrigger) > triggerDelay:
                                    urllib.request.urlopen(
                                        TRIGGER_TRUCK_FLASH_URL).read()
                                    truckLastTrigger = currentTime
                                if cX >= closeThresh - marginOfError and cX <= closeThresh + marginOfError and (
                                        currentTime -
                                        closeLastTrigger) > triggerDelay:
                                    urllib.request.urlopen(
                                        TRIGGER_CLOSE_FLASH_URL).read()
                                    closeLastTrigger = currentTime

                    # show the output image
                    k = cv2.waitKey(1)
                    h1, w1 = small.shape[1], small.shape[0]

                    if k == 113:  # Esc key to stop
                        showLines = True
                    elif k == 97:
                        showLines = False
                    elif k == 122:
                        showYolo = True
                    elif k == 120:
                        showYolo = False

                    if showLines and camId == 'CAM_2':
                        cv2.line(small, (uproadThresh, 0), (uproadThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.putText(small, 'Up-Road', (uproadThresh, 50),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.2,
                                    (255, 255, 0))
                        cv2.line(small, (truckThresh, 0), (truckThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.putText(small, 'Truck', (truckThresh, 50),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.2,
                                    (255, 255, 0))
                        cv2.line(small, (closeThresh, 0), (closeThresh, w1),
                                 (255, 255, 0), 1)
                        cv2.putText(small, 'Close', (closeThresh, 50),
                                    cv2.FONT_HERSHEY_COMPLEX, 0.2,
                                    (255, 255, 0))
                        cv2.line(small, (0, rightBound), (h1, rightBound),
                                 (255, 255, 255), 1)
                        cv2.line(small, (0, leftBound), (h1, leftBound),
                                 (255, 255, 255), 1)
                        cv2.line(small, (0, leftBound2), (h1, leftBound2),
                                 (255, 0, 255), 1)

                    if IS_ROTATE:
                        cv2.imshow(WINDOW_NAME, np.rot90(small))
                    else:
                        cv2.imshow(WINDOW_NAME, small)

                    cv2.waitKey(1)
                    buffer.queue()
                    print("Count: ", numberCars, " Frame: ", frame, " FPS: ",
                          1.0 / (time.time() - lastTime))
                    lastTime = time.time()
                    frame += 1

            cam.stop_image_acquisition()
            cam.destroy()
        except:
            print("Critical Script Error! Waiting 5 seconds and retrying ...")
            time.sleep(5)  #sleep for 10 seconds and then retry!
Ejemplo n.º 7
0
def mainWorker(camId):
    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    # main worker event loop, will keep restarting on camera dropout

    #variable declarations
    lastTime = time.time()
    transposeTime = 0
    frameCount = 0
    avFrameRate = 0
    numberCars = 0
    numberClose = 0
    numberFar = 0
    numberTruck = 0
    lastSnapshot = None
    baseColor = (255, 255, 255)
    baseRes = scaledRes
    scale = 800 / 1920
    factor = baseRes / 320
    showLines = False
    showYolo = False
    IS_CAM_OK = True
    MODE = "DAY"
    CLOSE_TRIGGER_METHOD = "DELAY"  # DELAY, CALC

    # SET MODE BASED ON TIME

    # SET THRESHES LOCALLY
    uproadThresh = 0
    truckThresh = 0
    closeThresh = 0
    extraThresh = 0
    leftBound = 0
    leftBound2 = 0
    rightBound = 0
    rightBound2 = 0
    marginOfError = 0

    isFarClear = True
    isTruckClear = True
    isCloseClear = True

    def setThresholds(MODE, factor):
        thresh = THRESHOLDS[MODE]
        nonlocal uproadThresh
        nonlocal truckThresh
        nonlocal closeThresh
        nonlocal extraThresh
        nonlocal leftBound
        nonlocal leftBound2
        nonlocal rightBound
        nonlocal rightBound2
        nonlocal marginOfError
        uproadThresh = int(thresh['uproadThresh'] * factor)
        truckThresh = int(thresh['truckThresh'] * factor)
        closeThresh = int(thresh['closeThresh'] * factor)
        extraThresh = int(thresh['extraThresh'] * factor)
        leftBound = int(thresh['leftBound'] * factor)
        leftBound2 = int(thresh['leftBound2'] * factor)
        rightBound = int(thresh['rightBound'] * factor)
        rightBound2 = int(thresh['rightBound2'] * factor)
        marginOfError = int(thresh['marginOfError'] * factor)

    setThresholds(MODE, factor)
    startTime = time.time()
    while (True):
        try:
            print(
                camId,
                "Creating harvester modules and loading genlcam producers ...")
            h = Harvester()
            h.add_cti_file(CTI_FILE)
            h.update_device_info_list()

            try:
                cam = h.create_image_acquisition_manager(
                    serial_number=CAM_NAME)
                print("Camera found!")
                IS_CAM_OK = True
            except:
                print("Camera Not Found! Waiting 20 seconds and retrying ...")
                time.sleep(20)  #sleep for 10 seconds and then retry!
                cam.stop_image_acquisition()
                cam.destroy()
                continue  #exit ()

            cam.start_image_acquisition()
            cv2.namedWindow(WINDOW_NAME,
                            flags=0)  # create dedicated stream window

            uproadLastTrigger = time.time()
            truckLastTrigger = time.time()
            closeLastTrigger = time.time()
            uproadTruckDelay = 0.0
            farStdAv = 0.0
            closeStdAv = 0.0
            truckStdAv = 0.0
            baseAv = 0.0

            def setUproadTruckDelay():
                nonlocal uproadTruckDelay
                nonlocal uproadLastTrigger
                nonlocal truckLastTrigger
                if truckLastTrigger > uproadLastTrigger and truckLastTrigger - uproadLastTrigger < 5:
                    uproadTruckDelay = truckLastTrigger - uproadLastTrigger

            while (IS_CAM_OK):
                #print(dir(cam.device.node_map))
                try:
                    with timeout(seconds=3, error_message='FETCH_ERROR'):
                        frame = cam.fetch_buffer()
                except:
                    IS_CAM_OK = False
                    print(
                        'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!'
                    )
                    sendMessageToSlack(
                        'Streaming Camera has Failed - Restarting ...',
                        '#ff3300')
                    cv2.destroyWindow(WINDOW_NAME)
                    cam.stop_image_acquisition()
                    cam.destroy()
                    time.sleep(5)

                if (IS_CAM_OK and frame.payload.components):
                    image = frame.payload.components[0].data
                    if LOG:
                        print(image)
                    # USER CONTROLS
                    user_input_key = cv2.waitKey(1)
                    if user_input_key == 113:  #q
                        showLines = True
                    elif user_input_key == 97:  #a
                        showLines = False
                    elif user_input_key == 122:  #z
                        showYolo = True
                    elif user_input_key == 120:  #x
                        showYolo = False
                    elif user_input_key == 119:  #w
                        MODE = "DAY"
                        setThresholds("DAY", factor)
                        # CHANGE EXPOSURE AND GAIN
                    elif user_input_key == 115:  #s
                        MODE = "NIGHT"
                        setThresholds("NIGHT", factor)
                    elif user_input_key == 101:  #o
                        CLOSE_TRIGGER_METHOD = "CALC"
                    elif user_input_key == 100:  #l
                        CLOSE_TRIGGER_METHOD = "DELAY"

                    frameScaled = cv2.resize(image,
                                             dsize=(baseRes,
                                                    int(baseRes * scale)),
                                             interpolation=cv2.INTER_CUBIC)
                    frameColorised = cv2.cvtColor(frameScaled,
                                                  cv2.COLOR_BayerRG2RGB)
                    c, h1, w1 = frameColorised.shape[2], frameColorised.shape[
                        1], frameColorised.shape[0]

                    boxWidth = 40
                    farBoxCenter = 97
                    farBoxWidth = 5
                    truckBoxCenter = 97
                    truckBoxWidth = 10
                    closeBoxCenter = 97
                    closeBoxWidth = 10
                    boxHeight = 10
                    closeBoxHeight = 15

                    baseValueCenter = 50
                    baseValueWidth = 3
                    baseValueHeight = 20
                    baseValueThresh = 50

                    farBoxCenter = [97, 298]  # [97,285]
                    farBoxWidth = 5  # 15 # 5
                    farBoxHeight = 10

                    truckBoxCenter = [97, 190]  # [97, 155]
                    truckBoxWidth = 10  #30 # 5
                    truckBoxHeight = 10  #10

                    closeBoxCenter = [97, 50]  # [97, 70]
                    closeBoxWidth = 15  #35 #5
                    closeBoxHeight = 15  #15

                    triggerBoxFar = frameScaled[
                        farBoxCenter[0] - farBoxWidth:farBoxCenter[0] +
                        farBoxWidth, farBoxCenter[1]:farBoxCenter[1] +
                        farBoxHeight]  #frameScaled[uproadThresh:uproadThresh+boxHeight,farBoxCenter-farBoxWidth:farBoxCenter+farBoxWidth]
                    triggerBoxTruck = frameScaled[
                        truckBoxCenter[0] - truckBoxWidth:truckBoxCenter[0] +
                        truckBoxWidth, truckBoxCenter[1]:truckBoxCenter[1] +
                        truckBoxHeight]  #frameScaled[truckThresh:truckThresh+boxHeight,truckBoxCenter-truckBoxWidth:truckBoxCenter+truckBoxWidth]
                    triggerBoxClose = frameScaled[
                        closeBoxCenter[0] - closeBoxWidth:closeBoxCenter[0] +
                        closeBoxWidth, closeBoxCenter[1]:closeBoxCenter[1] +
                        closeBoxHeight]  #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth]
                    baseAvBox = frameScaled[
                        baseValueCenter - baseValueWidth:baseValueCenter +
                        baseValueWidth, baseValueThresh:baseValueThresh +
                        baseValueHeight]  #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth]

                    # ARRAY METRICS FOR TRIGGERING
                    triggerBoxFarStd = np.mean(triggerBoxFar)
                    triggerBoxTruckStd = np.mean(triggerBoxTruck)
                    triggerBoxCloseStd = np.mean(triggerBoxClose)
                    baseAvStd = np.mean(triggerBoxClose)

                    numberOfFrames = 200

                    farStdAv = farStdAv * (
                        numberOfFrames - 1
                    ) / numberOfFrames + triggerBoxFarStd / numberOfFrames  # numberOfFrames frame floating average
                    truckStdAv = truckStdAv * (
                        numberOfFrames - 1
                    ) / numberOfFrames + triggerBoxTruckStd / numberOfFrames  # numberOfFrames frame floating average
                    closeStdAv = closeStdAv * (
                        numberOfFrames - 1
                    ) / numberOfFrames + triggerBoxCloseStd / numberOfFrames  # numberOfFrames frame floating average
                    baseAv = baseAv * (
                        numberOfFrames -
                        1) / numberOfFrames + baseAvStd / numberOfFrames

                    sdThreshold = 32  #30 #2
                    tsdThreshold = 32  #0.8
                    csdThreshold = 32  #0.3

                    farDiff = abs(farStdAv - triggerBoxFarStd)
                    truckDiff = abs(truckStdAv - triggerBoxTruckStd)
                    closeDiff = abs(closeStdAv - triggerBoxCloseStd)
                    currentTime = time.time()

                    if isFarClear and farDiff > sdThreshold:
                        isFarClear = False
                    elif isFarClear == False and (
                            currentTime - uproadLastTrigger) > triggerDelay:
                        isFarClear = True

                    if isTruckClear and truckDiff > tsdThreshold:
                        isTruckClear = False
                    elif isTruckClear == False and (
                            currentTime - truckLastTrigger) > triggerDelay:
                        isTruckClear = True

                    if isCloseClear and closeDiff > csdThreshold:
                        isCloseClear = False
                    elif isCloseClear == False and (
                            currentTime - closeLastTrigger) > triggerDelay:
                        isCloseClear = True

                    if currentTime - startTime > 20 and farDiff > sdThreshold and (
                            currentTime - uproadLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        numberFar += 1
                        uproadLastTrigger = currentTime
                    if currentTime - startTime > 20 and truckDiff > tsdThreshold and (
                            currentTime - truckLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()
                        numberTruck += 1
                        truckLastTrigger = currentTime
                        setUproadTruckDelay()
                    if currentTime - startTime > 20 and closeDiff > csdThreshold and (
                            currentTime - closeLastTrigger) > triggerDelay:
                        urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                        numberClose += 1
                        closeLastTrigger = currentTime

                    # SHOW LINES SECTION
                    if showLines and MODE == "DAY":
                        if isFarClear:
                            cv2.rectangle(frameColorised,
                                          (farBoxCenter[1],
                                           farBoxCenter[0] - farBoxWidth),
                                          (farBoxCenter[1] + farBoxHeight,
                                           farBoxCenter[0] + farBoxWidth),
                                          (0, 255, 0))
                        else:
                            cv2.rectangle(frameColorised,
                                          (farBoxCenter[1],
                                           farBoxCenter[0] - farBoxWidth),
                                          (farBoxCenter[1] + farBoxHeight,
                                           farBoxCenter[0] + farBoxWidth),
                                          (0, 0, 255))
                        if isTruckClear:
                            cv2.rectangle(frameColorised,
                                          (truckBoxCenter[1],
                                           truckBoxCenter[0] - truckBoxWidth),
                                          (truckBoxCenter[1] + truckBoxHeight,
                                           truckBoxCenter[0] + truckBoxWidth),
                                          (0, 255, 0))
                        else:
                            cv2.rectangle(frameColorised,
                                          (truckBoxCenter[1],
                                           truckBoxCenter[0] - truckBoxWidth),
                                          (truckBoxCenter[1] + truckBoxHeight,
                                           truckBoxCenter[0] + truckBoxWidth),
                                          (0, 0, 255))
                        if isCloseClear:
                            cv2.rectangle(frameColorised,
                                          (closeBoxCenter[1],
                                           closeBoxCenter[0] - closeBoxWidth),
                                          (closeBoxCenter[1] + closeBoxHeight,
                                           closeBoxCenter[0] + closeBoxWidth),
                                          (0, 255, 0))
                        else:
                            cv2.rectangle(frameColorised,
                                          (closeBoxCenter[1],
                                           closeBoxCenter[0] - closeBoxWidth),
                                          (closeBoxCenter[1] + closeBoxHeight,
                                           closeBoxCenter[0] + closeBoxWidth),
                                          (0, 0, 255))
                    # DISPLAY FRAME IN WINDOW
                    if IS_ROTATE:
                        cv2.imshow(WINDOW_NAME, np.rot90(frameColorised))
                    else:
                        cv2.imshow(WINDOW_NAME, frameColorised)

                    frame.queue()
                    cv2.waitKey(1)
                    avFrameRate = avFrameRate * 49 / 50 + int(
                        1.0 / (time.time() - lastTime)) / 50
                    if frameCount % 1 == 0:
                        #print("mode:", MODE,"close mode:", CLOSE_TRIGGER_METHOD, "Count Far", numberFar, "Count Truck", numberTruck,"Count Close", numberClose,"avFPS:", avFrameRate ,"frame:", frameCount, "fps:", int(1.0/(time.time()-lastTime)),"trigger dif",uproadTruckDelay)
                        print("CF", numberFar, "CT", numberTruck, "CC",
                              numberClose, "avFPS", int(avFrameRate), "FV",
                              int(triggerBoxFarStd), "TV",
                              int(triggerBoxTruckStd), "CV",
                              int(triggerBoxCloseStd))
                    lastTime = time.time()
                    frameCount += 1

            #IF CAM NOT OK
            cam.stop_image_acquisition()
            cam.destroy()
        except Exception as e:
            print("Critical Script error! Trying again in 5 seconds ...")
            print(e)
            time.sleep(5)  #sleep for 10 seconds and then retry!
Ejemplo n.º 8
0
from harvesters.core import Harvester

h = Harvester()

h.add_cti_file('/usr/local/lib/baumer/libbgapi2_gige.cti')
#h.add_cti_file('/home/server/Desktop/ctifiles/GxGVTL.cti')

h.update_device_info_list()

print(h.device_info_list)

iam = h.create_image_acquisition_manager(serial_number='QG0170070016')

iam.start_image_acquisition()

buffer = iam.fetch_buffer()

print(buffer)
def worker(camId):

    CAM_NAME = CAM_CONFIG[camId]['name']
    WINDOW_NAME = CAM_CONFIG[camId]['window']
    IS_ROTATE = CAM_CONFIG[camId]['rotate']
    h = Harvester()
    h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti')
    h.update_device_info_list()
    try:
        cam = h.create_image_acquisition_manager(serial_number=CAM_NAME)
        print("Camera found")

    except:
        print("Camera Not Found")
        exit()

    cam.start_image_acquisition()

    lastTime = time.time()
    transposeTime = 0
    frame = 0
    numberCars = 0
    lastSnapshot = None
    cv2.namedWindow(WINDOW_NAME, flags=0)

    carColor = (255, 0, 0)
    busColor = (0, 255, 0)
    truckColor = (0, 0, 255)
    phoneColor = (0, 255, 255)
    baseColor = (255, 255, 255)

    baseRes = 300
    scale = 800 / 1920

    #as percentages

    uproadThresh = 295
    truckThresh = 220
    closeThresh = 170
    extraThresh = 50
    leftBound = 50
    leftBound2 = 70
    rightBound = 100
    rightBound2 = 125
    marginOfError = 10

    factor = baseRes / 320
    uproadThresh = int(uproadThresh * factor)
    truckThresh = int(truckThresh * factor)
    closeThresh = int(closeThresh * factor)
    extraThresh = int(50 * factor)
    leftBound = int(leftBound * factor)
    leftBound2 = int(leftBound2 * factor)
    rightBound = int(rightBound * factor)
    rightBound2 = int(125 * factor)

    showLines = False
    showYolo = False

    triggerDelay = 0.250
    uproadLastTrigger = time.time()
    truckLastTrigger = time.time()
    closeLastTrigger = time.time()

    while (True):
        print('Fetching Buffer!')
        buffer = cam.fetch_buffer()
        print('Fetched Buffer!')
        payload = buffer.payload.components
        if LOG:
            print(payload)
        if (payload):
            image = payload[0].data
            if showLines or showYolo:
                conver = cv2.resize(image,
                                    dsize=(baseRes, int(baseRes * scale)),
                                    interpolation=cv2.INTER_CUBIC)
                small = cv2.cvtColor(conver, cv2.COLOR_BayerRG2RGB)
                rgb = cv2.cvtColor(conver, cv2.COLOR_BayerRG2GRAY)
            else:
                small = cv2.resize(image,
                                   dsize=(baseRes, int(baseRes * scale)),
                                   interpolation=cv2.INTER_CUBIC)
                rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2GRAY)

            #blurred = cv2.GaussianBlur(rgb, (11, 11), 0)
            thresh = cv2.threshold(rgb, 200, 255, cv2.THRESH_BINARY)[1]
            #thresh = cv2.erode(thresh, None, iterations=2)
            #thresh = cv2.dilate(thresh, None, iterations=4)

            labels = measure.label(thresh, neighbors=8, background=0)
            mask = np.zeros(thresh.shape, dtype="uint8")

            if LOG:
                print(labels)
            # loop over the unique components
            for label in np.unique(labels):
                # if this is the background label, ignore it
                if label == 0:
                    continue
                labelMask = np.zeros(thresh.shape, dtype="uint8")
                labelMask[labels == label] = 255
                mask = cv2.add(mask, labelMask)
                """ # otherwise, construct the label mask and count the
                # number of pixels 
                labelMask = np.zeros(thresh.shape, dtype="uint8")
                labelMask[labels == label] = 255

                numPixels = cv2.countNonZero(labelMask)
            
                # if the number of pixels in the component is sufficiently
                # large, then add it to our mask of "large blobs"
                if numPixels > 300:
                    mask = cv2.add(mask, labelMask) """
            if len(np.unique(labels)) > 0:
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                #cnts = contours.sort_contours(cnts)[0]

                # loop over the contours
                for (i, c) in enumerate(cnts):
                    currentTime = time.time()
                    # draw the bright spot on the image
                    (x, y, w, h) = cv2.boundingRect(c)
                    ((cX, cY), radius) = cv2.minEnclosingCircle(c)
                    if showYolo:
                        cv2.circle(small, (int(cX), int(cY)), int(5),
                                   (0, 0, 255), 3)
                    if cY <= rightBound and cY >= leftBound and camId == 'CAM_2':
                        if cX >= uproadThresh - marginOfError and cX <= uproadThresh + marginOfError and (
                                currentTime - uproadLastTrigger
                        ) > triggerDelay and cY >= leftBound2:
                            urllib.request.urlopen(
                                TRIGGER_FAR_FLASH_URL).read()
                            uproadLastTrigger = currentTime
                            numberCars += 1
                        if cX >= truckThresh - marginOfError and cX <= truckThresh + marginOfError and (
                                currentTime - truckLastTrigger) > triggerDelay:
                            urllib.request.urlopen(
                                TRIGGER_TRUCK_FLASH_URL).read()
                            truckLastTrigger = currentTime
                        if cX >= closeThresh - marginOfError and cX <= closeThresh + marginOfError and (
                                currentTime - closeLastTrigger) > triggerDelay:
                            urllib.request.urlopen(TRIGGER_CLOSE_URL).read()
                            closeLastTrigger = currentTime

            # show the output image
            # cv2.imshow("Image", rgb)
            k = cv2.waitKey(1)
            h1, w1 = small.shape[1], small.shape[0]

            if k == 113:  # Esc key to stop
                showLines = True
            elif k == 97:
                showLines = False
            elif k == 122:
                showYolo = True
            elif k == 120:
                showYolo = False

            if showLines and camId == 'CAM_2':
                cv2.line(small, (uproadThresh, 0), (uproadThresh, w1),
                         (255, 255, 0), 1)
                cv2.putText(small, 'Up-Road', (uproadThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(small, (truckThresh, 0), (truckThresh, w1),
                         (255, 255, 0), 1)
                cv2.putText(small, 'Truck', (truckThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(small, (closeThresh, 0), (closeThresh, w1),
                         (255, 255, 0), 1)
                cv2.putText(small, 'Close', (closeThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                cv2.line(small, (0, rightBound), (h1, rightBound),
                         (255, 255, 255), 1)
                cv2.line(small, (0, leftBound), (h1, leftBound),
                         (255, 255, 255), 1)
                cv2.line(small, (0, leftBound2), (h1, leftBound2),
                         (255, 0, 255), 1)

            if showLines and camId == 'CAM_1':
                cv2.line(small, (extraThresh, 0), (extraThresh, w1),
                         (255, 255, 0), 1)
                cv2.putText(small, 'Up-Road', (extraThresh, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0))

                #cv2.line(rgb, (0,rightBound2), (h1, rightBound2), (255,255,255), 1)

            bounds = 100
            results = []
            '''predictions = []
            for output in predictions:
                left, right, top, bottom, what, prob = output['left'],output['right'],output['top'],output['bottom'],output['class'],output['prob']
                #print(output)
                #lastSnapshot = snapshot.copy()
                #cv2.imshow("Snapshots", lastSnapshot)
                if( what == 'car' ):
                    print(output)
                    numberCars += 1
                    cv2.rectangle(rgb, (50,50), (100,150), (255, 255, 255), 20)
                    if ( camId =="CAM_2" ):
                        urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read()
                        urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read()
                        urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()'''

            if IS_ROTATE:
                cv2.imshow(WINDOW_NAME, np.rot90(small))
            else:
                cv2.imshow(WINDOW_NAME, small)

            cv2.waitKey(1)
            buffer.queue()

            print("Count: ", numberCars, " Frame: ", frame, " FPS: ",
                  1.0 / (time.time() - lastTime))
            print('getting time')
            lastTime = time.time()
            print('setting time')
            frame += 1
            print('setting frame')

    cam.stop_image_acquisition()
    cam.destroy()