class TestHarvesterCoreBase(unittest.TestCase): _cti_file_path = get_cti_file_path() sys.path.append(_cti_file_path) def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self._harvester = None self._ia = None self._thread = None self._logger = get_logger(name='harvesters', level=DEBUG) self._buffers = [] def setUp(self): # super().setUp() # self._harvester = Harvester(logger=self._logger) self._harvester.add_cti_file(self._cti_file_path) self._harvester.update_device_info_list() def tearDown(self): # if self.ia: self.ia.destroy() # self._harvester.reset() # self._ia = None # super().tearDown() @property def harvester(self): return self._harvester @property def ia(self): return self._ia @ia.setter def ia(self, value): self._ia = value @property def general_purpose_thread(self): return self._thread @general_purpose_thread.setter def general_purpose_thread(self, value): self._thread = value def is_running_with_default_target(self): return True if 'TLSimu.cti' in self._cti_file_path else False
def main(unused_argv): if not create_output_dir( os.path.join(flags.FLAGS.local_data_dir, IMAGE_DIR_NAME)): print("Cannot create output annotations directory.") return use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False if use_s3: if not s3_bucket_exists(flags.FLAGS.s3_bucket_name): use_s3 = False print( "Bucket: %s either does not exist or you do not have access to it" % flags.FLAGS.s3_bucket_name) else: print("Bucket: %s exists and you have access to it" % flags.FLAGS.s3_bucket_name) h = Harvester() h.add_cti_file(flags.FLAGS.gentl_producer_path) if len(h.cti_files) == 0: print("No valid cti file found at %s" % flags.FLAGS.gentl_producer_path) h.reset() return print("Currently available genTL Producer CTI files: ", h.cti_files) h.update_device_info_list() if len(h.device_info_list) == 0: print("No compatible devices detected.") h.reset() return print("Available devices List: ", h.device_info_list) print("Using device: ", h.device_info_list[0]) cam = h.create_image_acquirer(list_index=0) apply_camera_settings(cam) save_queue = queue.Queue() save_thread = threading.Thread(target=save_images, args=( save_queue, use_s3, )) save_thread.start() acquire_images(cam, save_queue) save_thread.join() # clean up cam.destroy() h.reset() print("Exiting.")
class TestTutorials2(unittest.TestCase): def test_traversable_tutorial(self): # Create a Harvester object: self.harvester = Harvester() # The following block is just for administrative purpose; # you should not include it in your code: cti_file_path = get_cti_file_path() if 'TLSimu.cti' not in cti_file_path: return # Add a CTI file path: self.harvester.add_cti_file(cti_file_path) self.harvester.update_device_info_list() # Connect to the first camera in the list: self.ia = self.harvester.create_image_acquirer(0) # num_images_to_acquire = 0 # Then start image acquisition: self.ia.start_image_acquisition() while num_images_to_acquire < 100: # with self.ia.fetch_buffer() as buffer: # self.do_something(buffer) pass num_images_to_acquire += 1 # We don't need the ImageAcquirer object. Destroy it: self.ia.destroy()
class TestTutorials2(unittest.TestCase): def test_traversable_tutorial(self): # Create a Harvester object: self.harvester = Harvester() # Add a CTI file path: self.harvester.add_cti_file( get_cti_file_path() ) self.harvester.update_device_info_list() # Connect to the first camera in the list: self.ia = self.harvester.create_image_acquirer(0) # num_images_to_acquire = 0 # Then start image acquisition: self.ia.start_image_acquisition() while num_images_to_acquire < 100: # with self.ia.fetch_buffer() as buffer: # self.do_something(buffer) pass num_images_to_acquire += 1 # We don't need the ImageAcquirer object. Destroy it: self.ia.destroy()
class TestHarvesterCoreBase(unittest.TestCase): _path = 'C:/Users/z1533tel/dev/genicam/bin/Win64_x64/' \ if is_running_on_windows() else \ '/Users/kznr/dev/genicam/bin/Maci64_x64/' _filename = 'TLSimu.cti' sys.path.append(_path) def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self._harvester = None self._iam = None self._thread = None def setUp(self): # super().setUp() # self._harvester = Harvester() self._harvester.add_cti_file(self._path + self._filename) self._harvester.update_device_info_list() self._iam = None self._thread = None def tearDown(self): # if self.iam: self.iam.destroy() # self._harvester.reset() # super().tearDown() @property def harvester(self): return self._harvester @property def iam(self): return self._iam @iam.setter def iam(self, value): self._iam = value @property def general_purpose_thread(self): return self._thread @general_purpose_thread.setter def general_purpose_thread(self, value): self._thread = value
def main(): h = Harvester() h.add_cti_file('C:/Program Files/JAI/SDK/bin/JaiGevTL.cti') # JaiUSB3vTL JaiGevTL print(h.cti_files) h.update_device_info_list() print("file", h.device_info_list) print("dty", list(dict.fromkeys(h.device_info_list))) # x = unique(h.device_info_list) # print("pp",x) print("devices end") ia = h.create_image_acquirer(0) print("dsssx", ia.device) print("infor", ia.device.node_map.device_info) # ia.device.node_map.Test # ia.device.node_mapdsx key.PixelFormat.value = 'RGB12' # ia.device.node_map.RGB12Packed.value = 'RGB12' # ia.device.node_map.has_node() # ia.device.node_map.TestPattern = 'HorizontalColorBar' # ia.acquirer.remote_device.node_map print(ia.is_acquiring()) try: print("acqui") ia.start_image_acquisition() print("acquired", ia.is_acquiring()) print("acqui 2", ia.fetch_buffer()) i = 0 done = False while not done: with ia.fetch_buffer() as buffer: print("checkr 1") img = buffer.payload.components[0].data img = img.reshape(buffer.payload.components[0].height, buffer.payload.components[0].width) # img_copy = img.copy() img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB) cv2.namedWindow("window", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.imshow("window", img_copy) fps = ia.statistics.fps print("FPS: ", fps) if cv2.waitKey(10) == ord('q'): done = True print('break') i = i + 1 except Exception as e: traceback.print_exc(file=sys.stdout) finally: ia.stop_image_acquisition() ia.destroy() cv2.destroyAllWindows() print('fin') h.reset()
def camera_stream(): global outputFrame, lock h = Harvester() h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti') h.update_device_info_list() ia = h.create_image_acquirer(0) #ia.device.node_map.PixelFormat.value = 'BayerRG8' #ia.device.node_map.TestPattern = 'HorizontalColorBar' try: ia.start_image_acquisition() i = 0 done = False while not done: with ia.fetch_buffer() as buffer: img = buffer.payload.components[0].data img = img.reshape( buffer.payload.components[0].height, buffer.payload.components[0].width) img_copy = img.copy() img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB) if i == 0: first = img_copy.copy() change = np.allclose(first, img_copy, 3) #print(change) if not change: # cv2.namedWindow("window", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) # cv2.imshow("window", img_copy) #cv2.imwrite(f'./images/image_{i}.png', img_copy) outputFrame = img_copy.copy() print(outputFrame) first = img_copy.copy() if cv2.waitKey(10) == ord('q'): fps = ia.statistics.fps print("FPS: ", fps) done = True print('break') i = i + 1 if i == 500: break except Exception as e: traceback.print_exc(file=sys.stdout) finally: ia.stop_image_acquisition() ia.destroy() cv2.destroyAllWindows() print('fin') h.reset()
def run(self): h = Harvester() h.add_cti_file(self._cti_file_path) h.update_device_info_list() try: ia = h.create_image_acquirer(0) except: # Transfer the exception anyway: self._message_queue.put(sys.exc_info()) else: ia.start_image_acquisition() ia.stop_image_acquisition() ia.destroy() h.reset()
def create(observer, sc=None): h = Harvester() h.add_cti_file( "C:\\Program Files\\MATRIX VISION\\mvIMPACT Acquire\\bin\\x64\\mvGenTLProducer.cti" ) h.update_device_info_list() acquirer = h.create_image_acquirer(0) print("create") def on_new_buffer_arrival(): buffer = acquirer.fetch_buffer() print("buffer") observer.on_next(buffer) buffer.queue() def dispose(): # scheduler.NewThreadScheduler().schedule(lambda *args: acquirer.stop_image_acquisition()) acquirer.stop_image_acquisition() acquirer.on_new_buffer_arrival = on_new_buffer_arrival acquirer.start_image_acquisition() return Disposable(dispose)
class Camera(QtCore.QObject): def __init__(self, parent=None): super().__init__(parent) self.harvester = Harvester() self.acquirer = None def load_cti_from_env(self): if arch.is_64bits: env_name = "GENICAM_GENTL64_PATH" else: env_name = "GENICAM_GENTL32_PATH" genicam_paths = os.environ[env_name].split(os.pathsep) cti_files = glob.glob(os.path.join(genicam_paths, "*.cti")) for cti in cti_files: self.harvester.add_cti_file(cti) def reload_device(self): self.harvester.update_device_info_list() def list_devices(self, reload=False) -> DeviceInfoList: if reload: self.reload_device() return self.harvester.device_info_list def use_device(self, cam_id, cfg_file): devices = self.list_devices() self.acquirer = self.harvester.create_image_acquirer(id_=cam_id) parser = configparser.ConfigParser() parser.read(cfg_file) config: dict = parser["DEVICE_CONFIG"] for k, v in config.items(): node_map: NodeMap = self.acquirer.device.node_map
from harvesters.core import Harvester import numpy as np # This is just for a demonstration. h = Harvester() h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti') h.update_device_info_list() print(h.device_info_list[0])
def mainWorker(camId): CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] # main worker event loop, will keep restarting on camera dropout #variable declarations lastTime = time.time() transposeTime = 0 frameCount = 0 avFrameRate = 0 numberCars = 0 numberClose = 0 numberFar = 0 numberTruck = 0 lastSnapshot = None baseColor = (255, 255, 255) baseRes = scaledRes scale = 800 / 1920 factor = baseRes / 320 showLines = False showYolo = False IS_CAM_OK = True MODE = "DAY" CLOSE_TRIGGER_METHOD = "DELAY" # DELAY, CALC # SET MODE BASED ON TIME # SET THRESHES LOCALLY uproadThresh = 0 truckThresh = 0 closeThresh = 0 extraThresh = 0 leftBound = 0 leftBound2 = 0 rightBound = 0 rightBound2 = 0 marginOfError = 0 isFarClear = True isTruckClear = True isCloseClear = True def setThresholds(MODE, factor): thresh = THRESHOLDS[MODE] nonlocal uproadThresh nonlocal truckThresh nonlocal closeThresh nonlocal extraThresh nonlocal leftBound nonlocal leftBound2 nonlocal rightBound nonlocal rightBound2 nonlocal marginOfError uproadThresh = int(thresh['uproadThresh'] * factor) truckThresh = int(thresh['truckThresh'] * factor) closeThresh = int(thresh['closeThresh'] * factor) extraThresh = int(thresh['extraThresh'] * factor) leftBound = int(thresh['leftBound'] * factor) leftBound2 = int(thresh['leftBound2'] * factor) rightBound = int(thresh['rightBound'] * factor) rightBound2 = int(thresh['rightBound2'] * factor) marginOfError = int(thresh['marginOfError'] * factor) setThresholds(MODE, factor) startTime = time.time() while (True): try: print( camId, "Creating harvester modules and loading genlcam producers ...") h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() try: cam = h.create_image_acquisition_manager( serial_number=CAM_NAME) print("Camera found!") IS_CAM_OK = True except: print("Camera Not Found! Waiting 20 seconds and retrying ...") time.sleep(20) #sleep for 10 seconds and then retry! cam.stop_image_acquisition() cam.destroy() continue #exit () cam.start_image_acquisition() cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() uproadTruckDelay = 0.0 farStdAv = 0.0 closeStdAv = 0.0 truckStdAv = 0.0 baseAv = 0.0 def setUproadTruckDelay(): nonlocal uproadTruckDelay nonlocal uproadLastTrigger nonlocal truckLastTrigger if truckLastTrigger > uproadLastTrigger and truckLastTrigger - uproadLastTrigger < 5: uproadTruckDelay = truckLastTrigger - uproadLastTrigger while (IS_CAM_OK): #print(dir(cam.device.node_map)) try: with timeout(seconds=3, error_message='FETCH_ERROR'): frame = cam.fetch_buffer() except: IS_CAM_OK = False print( 'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!' ) sendMessageToSlack( 'Streaming Camera has Failed - Restarting ...', '#ff3300') cv2.destroyWindow(WINDOW_NAME) cam.stop_image_acquisition() cam.destroy() time.sleep(5) if (IS_CAM_OK and frame.payload.components): image = frame.payload.components[0].data if LOG: print(image) # USER CONTROLS user_input_key = cv2.waitKey(1) if user_input_key == 113: #q showLines = True elif user_input_key == 97: #a showLines = False elif user_input_key == 122: #z showYolo = True elif user_input_key == 120: #x showYolo = False elif user_input_key == 119: #w MODE = "DAY" setThresholds("DAY", factor) # CHANGE EXPOSURE AND GAIN elif user_input_key == 115: #s MODE = "NIGHT" setThresholds("NIGHT", factor) elif user_input_key == 101: #o CLOSE_TRIGGER_METHOD = "CALC" elif user_input_key == 100: #l CLOSE_TRIGGER_METHOD = "DELAY" frameScaled = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) frameColorised = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2RGB) c, h1, w1 = frameColorised.shape[2], frameColorised.shape[ 1], frameColorised.shape[0] boxWidth = 40 farBoxCenter = 97 farBoxWidth = 5 truckBoxCenter = 97 truckBoxWidth = 10 closeBoxCenter = 97 closeBoxWidth = 10 boxHeight = 10 closeBoxHeight = 15 baseValueCenter = 50 baseValueWidth = 3 baseValueHeight = 20 baseValueThresh = 50 farBoxCenter = [97, 298] # [97,285] farBoxWidth = 5 # 15 # 5 farBoxHeight = 10 truckBoxCenter = [97, 190] # [97, 155] truckBoxWidth = 10 #30 # 5 truckBoxHeight = 10 #10 closeBoxCenter = [97, 50] # [97, 70] closeBoxWidth = 15 #35 #5 closeBoxHeight = 15 #15 triggerBoxFar = frameScaled[ farBoxCenter[0] - farBoxWidth:farBoxCenter[0] + farBoxWidth, farBoxCenter[1]:farBoxCenter[1] + farBoxHeight] #frameScaled[uproadThresh:uproadThresh+boxHeight,farBoxCenter-farBoxWidth:farBoxCenter+farBoxWidth] triggerBoxTruck = frameScaled[ truckBoxCenter[0] - truckBoxWidth:truckBoxCenter[0] + truckBoxWidth, truckBoxCenter[1]:truckBoxCenter[1] + truckBoxHeight] #frameScaled[truckThresh:truckThresh+boxHeight,truckBoxCenter-truckBoxWidth:truckBoxCenter+truckBoxWidth] triggerBoxClose = frameScaled[ closeBoxCenter[0] - closeBoxWidth:closeBoxCenter[0] + closeBoxWidth, closeBoxCenter[1]:closeBoxCenter[1] + closeBoxHeight] #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth] baseAvBox = frameScaled[ baseValueCenter - baseValueWidth:baseValueCenter + baseValueWidth, baseValueThresh:baseValueThresh + baseValueHeight] #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth] # ARRAY METRICS FOR TRIGGERING triggerBoxFarStd = np.mean(triggerBoxFar) triggerBoxTruckStd = np.mean(triggerBoxTruck) triggerBoxCloseStd = np.mean(triggerBoxClose) baseAvStd = np.mean(triggerBoxClose) numberOfFrames = 200 farStdAv = farStdAv * ( numberOfFrames - 1 ) / numberOfFrames + triggerBoxFarStd / numberOfFrames # numberOfFrames frame floating average truckStdAv = truckStdAv * ( numberOfFrames - 1 ) / numberOfFrames + triggerBoxTruckStd / numberOfFrames # numberOfFrames frame floating average closeStdAv = closeStdAv * ( numberOfFrames - 1 ) / numberOfFrames + triggerBoxCloseStd / numberOfFrames # numberOfFrames frame floating average baseAv = baseAv * ( numberOfFrames - 1) / numberOfFrames + baseAvStd / numberOfFrames sdThreshold = 32 #30 #2 tsdThreshold = 32 #0.8 csdThreshold = 32 #0.3 farDiff = abs(farStdAv - triggerBoxFarStd) truckDiff = abs(truckStdAv - triggerBoxTruckStd) closeDiff = abs(closeStdAv - triggerBoxCloseStd) currentTime = time.time() if isFarClear and farDiff > sdThreshold: isFarClear = False elif isFarClear == False and ( currentTime - uproadLastTrigger) > triggerDelay: isFarClear = True if isTruckClear and truckDiff > tsdThreshold: isTruckClear = False elif isTruckClear == False and ( currentTime - truckLastTrigger) > triggerDelay: isTruckClear = True if isCloseClear and closeDiff > csdThreshold: isCloseClear = False elif isCloseClear == False and ( currentTime - closeLastTrigger) > triggerDelay: isCloseClear = True if currentTime - startTime > 20 and farDiff > sdThreshold and ( currentTime - uproadLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() numberFar += 1 uproadLastTrigger = currentTime if currentTime - startTime > 20 and truckDiff > tsdThreshold and ( currentTime - truckLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read() numberTruck += 1 truckLastTrigger = currentTime setUproadTruckDelay() if currentTime - startTime > 20 and closeDiff > csdThreshold and ( currentTime - closeLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() numberClose += 1 closeLastTrigger = currentTime # SHOW LINES SECTION if showLines and MODE == "DAY": if isFarClear: cv2.rectangle(frameColorised, (farBoxCenter[1], farBoxCenter[0] - farBoxWidth), (farBoxCenter[1] + farBoxHeight, farBoxCenter[0] + farBoxWidth), (0, 255, 0)) else: cv2.rectangle(frameColorised, (farBoxCenter[1], farBoxCenter[0] - farBoxWidth), (farBoxCenter[1] + farBoxHeight, farBoxCenter[0] + farBoxWidth), (0, 0, 255)) if isTruckClear: cv2.rectangle(frameColorised, (truckBoxCenter[1], truckBoxCenter[0] - truckBoxWidth), (truckBoxCenter[1] + truckBoxHeight, truckBoxCenter[0] + truckBoxWidth), (0, 255, 0)) else: cv2.rectangle(frameColorised, (truckBoxCenter[1], truckBoxCenter[0] - truckBoxWidth), (truckBoxCenter[1] + truckBoxHeight, truckBoxCenter[0] + truckBoxWidth), (0, 0, 255)) if isCloseClear: cv2.rectangle(frameColorised, (closeBoxCenter[1], closeBoxCenter[0] - closeBoxWidth), (closeBoxCenter[1] + closeBoxHeight, closeBoxCenter[0] + closeBoxWidth), (0, 255, 0)) else: cv2.rectangle(frameColorised, (closeBoxCenter[1], closeBoxCenter[0] - closeBoxWidth), (closeBoxCenter[1] + closeBoxHeight, closeBoxCenter[0] + closeBoxWidth), (0, 0, 255)) # DISPLAY FRAME IN WINDOW if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(frameColorised)) else: cv2.imshow(WINDOW_NAME, frameColorised) frame.queue() cv2.waitKey(1) avFrameRate = avFrameRate * 49 / 50 + int( 1.0 / (time.time() - lastTime)) / 50 if frameCount % 1 == 0: #print("mode:", MODE,"close mode:", CLOSE_TRIGGER_METHOD, "Count Far", numberFar, "Count Truck", numberTruck,"Count Close", numberClose,"avFPS:", avFrameRate ,"frame:", frameCount, "fps:", int(1.0/(time.time()-lastTime)),"trigger dif",uproadTruckDelay) print("CF", numberFar, "CT", numberTruck, "CC", numberClose, "avFPS", int(avFrameRate), "FV", int(triggerBoxFarStd), "TV", int(triggerBoxTruckStd), "CV", int(triggerBoxCloseStd)) lastTime = time.time() frameCount += 1 #IF CAM NOT OK cam.stop_image_acquisition() cam.destroy() except Exception as e: print("Critical Script error! Trying again in 5 seconds ...") print(e) time.sleep(5) #sleep for 10 seconds and then retry!
def worker(camId): pyyolo.init(darknet_path, datacfg, cfgfile, weightfile) CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] h = Harvester() h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti') h.update_device_info_list() try: cam = h.create_image_acquisition_manager(serial_number=CAM_NAME) print("Camera found") except: print("Camera Not Found") exit() cam.start_image_acquisition() lastTime = time.time() transposeTime = 0 i = 0 numberCars = 0 lastSnapshot = None cv2.namedWindow(WINDOW_NAME, flags=0) while (True): buffer = cam.fetch_buffer() image = buffer.payload.components[0].data small = cv2.resize(image, dsize=(320, 200), interpolation=cv2.INTER_CUBIC) clone = small.copy() rgb = cv2.cvtColor(clone, cv2.COLOR_BayerRG2RGB) #img = rgb.transpose(2,0,1) img = np.rot90(rgb) print(rgb.shape) c, h, w = img.shape[0], img.shape[1], img.shape[2] #c, h, w = img.shape[2], img.shape[1], img.shape[0] data = img.ravel() / 255.0 #data = np.ascontiguousarray(data, dtype=np.float32) predictions = pyyolo.detect(w, h, c, data, thresh, hier_thresh) #im = np.zeros((3,small.shape[1],small.shape[0])) #im[0,:,:] = np.rot90(small) #im[1,:,:] = np.rot90(small) #im[2,:,:] = np.rot90(small) #im = rgb print(rgb.shape) #c, h, w = im.shape[0], im.shape[1], im.shape[2] # im = im.transpose(2,0,1) #c, h, w = im.shape[0], im.shape[1], im.shape[2] #c, h, w = 1, image.shape[0], image.shape[1] #im = image.copy() #data = im.ravel()/255.0 #print(data.shape) #data = np.ascontiguousarray(data, dtype=np.float32) #print(data.shape) for output in predictions: left, right, top, bottom, what, prob = output['left'], output[ 'right'], output['top'], output['bottom'], output[ 'class'], output['prob'] #print(output) #lastSnapshot = snapshot.copy() #cv2.imshow("Snapshots", lastSnapshot) if (what == 'car'): print(output) numberCars += 1 cv2.rectangle(rgb, (50, 50), (100, 150), (0, 255, 0), 5) if (camId == "CAM_2"): urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read() if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(rgb)) else: cv2.imshow(WINDOW_NAME, rgb) cv2.waitKey(1) buffer.queue() print("Count: ", numberCars, " Frame: ", i, " FPS: ", 1.0 / (time.time() - lastTime)) lastTime = time.time() i += 1 cam.stop_image_acquisition() cam.destroy()
def generate(previewName): """Video streaming generator function.""" if previewName == 'harvesters': h = Harvester() h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti') h.update_device_info_list() ia = h.create_image_acquirer(0) ia.remote_device.node_map.ExposureTimeRaw.value = 20_000 #ia.dremote_deviceevice.node_map.PixelFormat.value = 'BayerRG8' #ia.remote_device.node_map.TestPattern = 'HorizontalColorBar' time.sleep(1) try: ia.start_image_acquisition() i = 0 done = False while not done: with ia.fetch_buffer() as buffer: img = buffer.payload.components[0].data img = img.reshape(buffer.payload.components[0].height, buffer.payload.components[0].width) img_copy = img.copy() img_copy = cv2.cvtColor(img, cv2.COLOR_BayerRG2RGB) if i == 0: first = img_copy.copy() is_change = np.allclose(first, img_copy, 3) #print(is_change) if not is_change: # cv2.namedWindow("window", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) # cv2.imshow("window", img_copy) # cv2.imwrite(f'./images/image_{i}.png', img_copy) img_copy_ = cv2.resize(img_copy, (640, 480)) frame = cv2.imencode('.jpg', img_copy_)[1].tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') first = img_copy.copy() if cv2.waitKey(10) == ord('q'): fps = ia.statistics.fps print("FPS: ", fps) done = True print('break') i = i + 1 # if i == 200: # break except Exception as e: traceback.print_exc(file=sys.stdout) finally: ia.stop_image_acquisition() ia.destroy() print('fin') h.reset() else: cap = cv2.VideoCapture(0) # Read until video is completed while (cap.isOpened()): # Capture frame-by-frame ret, img = cap.read() if ret == True: img = cv2.resize(img, (0, 0), fx=1, fy=1) frame = cv2.imencode('.jpg', img)[1].tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') time.sleep(0.001) else: break
def worker(camId): # declare Detector in isolation from Camera object for reinitiation of Camera under failure. # net = Detector(bytes(cfgfile, encoding="utf-8"), bytes(weightfile, encoding="utf-8"), 0, bytes(datacfg, encoding="utf-8")) CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() try: cam = h.create_image_acquisition_manager(serial_number=CAM_NAME) print("Camera found") except: print("Camera Not Found") exit() cam.start_image_acquisition() #variable declarations lastTime = time.time() transposeTime = 0 i = 0 numberCars = 0 lastSnapshot = None cv2.namedWindow(WINDOW_NAME, flags=0) carColor = (255, 0, 0) busColor = (0, 255, 0) truckColor = (0, 0, 255) phoneColor = (0, 255, 255) baseColor = (255, 255, 255) baseRes = 400 scale = 800 / 1920 #as percentages uproadThresh = 295 truckThresh = 230 closeThresh = 180 extraThresh = 50 leftBound = 50 leftBound2 = 70 rightBound = 125 rightBound2 = 125 marginOfError = 20 factor = baseRes / 320 uproadThresh = int(uproadThresh * factor) truckThresh = int(truckThresh * factor) closeThresh = int(closeThresh * factor) extraThresh = int(50 * factor) leftBound = int(leftBound * factor) leftBound2 = int(leftBound2 * factor) rightBound = int(125 * factor) rightBound2 = int(125 * factor) showLines = False showYolo = False triggerDelay = 0.250 uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() while (True): buffer = cam.fetch_buffer() #cam fails here often! payload = buffer.payload.components if LOG: print(payload) if (payload): image = payload[0].data if LOG: print(image) small = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2RGB) img = np.rot90(rgb, 1) c, h1, w1 = rgb.shape[2], rgb.shape[1], rgb.shape[0] img2 = Image(img) results = net.detect(img2) if LOG: print(results) k = cv2.waitKey(1) if k == 113: # Esc key to stop showLines = True elif k == 97: showLines = False elif k == 122: showYolo = True elif k == 120: showYolo = False if showLines and camId == 'CAM_2': cv2.line(rgb, (uproadThresh, 0), (uproadThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (uproadThresh + marginOfError, 0), (uproadThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (uproadThresh - marginOfError, 0), (uproadThresh - marginOfError, w1), (255, 0, 0), 1) cv2.putText(rgb, 'Up-Road', (uproadThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(rgb, (truckThresh, 0), (truckThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (truckThresh + marginOfError, 0), (truckThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (truckThresh - marginOfError, 0), (truckThresh - marginOfError, w1), (255, 0, 0), 1) cv2.putText(rgb, 'Truck', (truckThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(rgb, (closeThresh, 0), (closeThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (closeThresh + marginOfError, 0), (closeThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (closeThresh - marginOfError, 0), (closeThresh - marginOfError, w1), (255, 0, 0), 1) cv2.putText(rgb, 'Close', (closeThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(rgb, (0, rightBound), (h1, rightBound), (255, 255, 255), 1) cv2.line(rgb, (0, leftBound2), (h1, leftBound2), (255, 255, 255), 1) cv2.line(rgb, (0, leftBound), (h1, leftBound), (255, 255, 255), 1) if showLines and camId == 'CAM_1': cv2.line(rgb, (extraThresh, 0), (extraThresh, w1), (255, 255, 0), 1) cv2.putText(rgb, 'Up-Road', (extraThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) #cv2.line(rgb, (0,rightBound2), (h1, rightBound2), (255,255,255), 1) bounds = 100 for cat, score, bounds in results: x, y, w, h = bounds x, y = (h1 - int(y), int(x)) x1, y1, x2, y2 = [ int(x - h / 2), int(y - w / 2), int(x + h / 2), int(y + w / 2) ] type = str(cat.decode("utf-8")) color = baseColor if (type == 'car'): color = carColor if (type == 'bus'): color = busColor if (type == 'truck'): color = truckColor if (type == 'phone'): color = phoneColor #x1,y1,x2,y2 = [int(x+w/2),int(y+h/2),int(x-w/2),int(y-h/2)] #cv2.rectangle(rgb, (int(x-w/2),int(y-h/2)),(int(x+w/2),int(y+h/2)),(255,0,0)) #cv2.line(rgb, (x1,y1), (x1, y2), color, 2) if showYolo and h > 5: #cv2.rectangle(rgb, (x1,y1),(x2,y2),color) #cv2.putText(rgb, str(cat.decode("utf-8")), (int(x), int(y)), cv2.FONT_HERSHEY_COMPLEX, 1, color) cv2.circle(rgb, (int(x), int(y)), int(2), (0, 255, 0), 3) currentTime = time.time() if y <= rightBound and camId == 'CAM_2' and h > 10 and w > 10: if x >= uproadThresh - 10 and x <= uproadThresh + 10 and y >= leftBound2 and ( currentTime - uproadLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() if LOG: print('FAR TRIG') uproadLastTrigger = currentTime #if x1<=truckThresh and x2>=truckThresh and (currentTime-truckLastTrigger)>triggerDelay: if x >= truckThresh - marginOfError and x <= truckThresh + marginOfError and y >= leftBound and ( currentTime - truckLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read() if LOG: print('TRUCK TRIG') numberCars += 1 truckLastTrigger = currentTime #if x1<=closeThresh and x2>=closeThresh and (currentTime-closeLastTrigger)>triggerDelay: if x >= closeThresh - marginOfError * 2 and x <= closeThresh + marginOfError * 2 and y >= leftBound and ( currentTime - closeLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_URL).read() if LOG: print('CLOSE TRIG') closeLastTrigger = currentTime if camId == 'CAM_1': if y1 <= rightBound2 and y2 >= rightBound2 and False: urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() numberCars += 1 '''predictions = [] for output in predictions: left, right, top, bottom, what, prob = output['left'],output['right'],output['top'],output['bottom'],output['class'],output['prob'] #print(output) #lastSnapshot = snapshot.copy() #cv2.imshow("Snapshots", lastSnapshot) if( what == 'car' ): print(output) numberCars += 1 cv2.rectangle(rgb, (50,50), (100,150), (255, 255, 255), 20) if ( camId =="CAM_2" ): urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()''' if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(rgb)) else: cv2.imshow(WINDOW_NAME, rgb) cv2.waitKey(1) buffer.queue() print("Count: ", numberCars, " Frame: ", i, " FPS: ", 1.0 / (time.time() - lastTime)) lastTime = time.time() i += 1 cam.stop_image_acquisition() cam.destroy()
def worker(camId): CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] h = Harvester() h.add_cti_file('/opt/mvIMPACT_Acquire/lib/x86_64/mvGenTLProducer.cti') h.update_device_info_list() try: cam = h.create_image_acquisition_manager(serial_number=CAM_NAME) print("Camera found") except: print("Camera Not Found") exit() cam.start_image_acquisition() lastTime = time.time() transposeTime = 0 frame = 0 numberCars = 0 lastSnapshot = None cv2.namedWindow(WINDOW_NAME, flags=0) carColor = (255, 0, 0) busColor = (0, 255, 0) truckColor = (0, 0, 255) phoneColor = (0, 255, 255) baseColor = (255, 255, 255) baseRes = 300 scale = 800 / 1920 #as percentages uproadThresh = 295 truckThresh = 220 closeThresh = 170 extraThresh = 50 leftBound = 50 leftBound2 = 70 rightBound = 100 rightBound2 = 125 marginOfError = 10 factor = baseRes / 320 uproadThresh = int(uproadThresh * factor) truckThresh = int(truckThresh * factor) closeThresh = int(closeThresh * factor) extraThresh = int(50 * factor) leftBound = int(leftBound * factor) leftBound2 = int(leftBound2 * factor) rightBound = int(rightBound * factor) rightBound2 = int(125 * factor) showLines = False showYolo = False triggerDelay = 0.250 uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() while (True): print('Fetching Buffer!') buffer = cam.fetch_buffer() print('Fetched Buffer!') payload = buffer.payload.components if LOG: print(payload) if (payload): image = payload[0].data if showLines or showYolo: conver = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) small = cv2.cvtColor(conver, cv2.COLOR_BayerRG2RGB) rgb = cv2.cvtColor(conver, cv2.COLOR_BayerRG2GRAY) else: small = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2GRAY) #blurred = cv2.GaussianBlur(rgb, (11, 11), 0) thresh = cv2.threshold(rgb, 200, 255, cv2.THRESH_BINARY)[1] #thresh = cv2.erode(thresh, None, iterations=2) #thresh = cv2.dilate(thresh, None, iterations=4) labels = measure.label(thresh, neighbors=8, background=0) mask = np.zeros(thresh.shape, dtype="uint8") if LOG: print(labels) # loop over the unique components for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(thresh.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) """ # otherwise, construct the label mask and count the # number of pixels labelMask = np.zeros(thresh.shape, dtype="uint8") labelMask[labels == label] = 255 numPixels = cv2.countNonZero(labelMask) # if the number of pixels in the component is sufficiently # large, then add it to our mask of "large blobs" if numPixels > 300: mask = cv2.add(mask, labelMask) """ if len(np.unique(labels)) > 0: cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] #cnts = contours.sort_contours(cnts)[0] # loop over the contours for (i, c) in enumerate(cnts): currentTime = time.time() # draw the bright spot on the image (x, y, w, h) = cv2.boundingRect(c) ((cX, cY), radius) = cv2.minEnclosingCircle(c) if showYolo: cv2.circle(small, (int(cX), int(cY)), int(5), (0, 0, 255), 3) if cY <= rightBound and cY >= leftBound and camId == 'CAM_2': if cX >= uproadThresh - marginOfError and cX <= uproadThresh + marginOfError and ( currentTime - uproadLastTrigger ) > triggerDelay and cY >= leftBound2: urllib.request.urlopen( TRIGGER_FAR_FLASH_URL).read() uproadLastTrigger = currentTime numberCars += 1 if cX >= truckThresh - marginOfError and cX <= truckThresh + marginOfError and ( currentTime - truckLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_TRUCK_FLASH_URL).read() truckLastTrigger = currentTime if cX >= closeThresh - marginOfError and cX <= closeThresh + marginOfError and ( currentTime - closeLastTrigger) > triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_URL).read() closeLastTrigger = currentTime # show the output image # cv2.imshow("Image", rgb) k = cv2.waitKey(1) h1, w1 = small.shape[1], small.shape[0] if k == 113: # Esc key to stop showLines = True elif k == 97: showLines = False elif k == 122: showYolo = True elif k == 120: showYolo = False if showLines and camId == 'CAM_2': cv2.line(small, (uproadThresh, 0), (uproadThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Up-Road', (uproadThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (truckThresh, 0), (truckThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Truck', (truckThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (closeThresh, 0), (closeThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Close', (closeThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (0, rightBound), (h1, rightBound), (255, 255, 255), 1) cv2.line(small, (0, leftBound), (h1, leftBound), (255, 255, 255), 1) cv2.line(small, (0, leftBound2), (h1, leftBound2), (255, 0, 255), 1) if showLines and camId == 'CAM_1': cv2.line(small, (extraThresh, 0), (extraThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Up-Road', (extraThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) #cv2.line(rgb, (0,rightBound2), (h1, rightBound2), (255,255,255), 1) bounds = 100 results = [] '''predictions = [] for output in predictions: left, right, top, bottom, what, prob = output['left'],output['right'],output['top'],output['bottom'],output['class'],output['prob'] #print(output) #lastSnapshot = snapshot.copy() #cv2.imshow("Snapshots", lastSnapshot) if( what == 'car' ): print(output) numberCars += 1 cv2.rectangle(rgb, (50,50), (100,150), (255, 255, 255), 20) if ( camId =="CAM_2" ): urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read()''' if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(small)) else: cv2.imshow(WINDOW_NAME, small) cv2.waitKey(1) buffer.queue() print("Count: ", numberCars, " Frame: ", frame, " FPS: ", 1.0 / (time.time() - lastTime)) print('getting time') lastTime = time.time() print('setting time') frame += 1 print('setting frame') cam.stop_image_acquisition() cam.destroy()
def mainWorker(camId): # declare yolo detector in isolation from camera object for reinitiation of camera under failure. print(camId, ' Constructing Yolo Model ...') net = Detector(bytes(cfgfile, encoding="utf-8"), bytes(weightfile, encoding="utf-8"), 0, bytes(datacfg, encoding="utf-8")) CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] # main worker event loop, will keep restarting on camera dropout #variable declarations lastTime = time.time() transposeTime = 0 frameCount = 0 numberCars = 0 numberClose = 0 numberFar = 0 numberTruck = 0 lastSnapshot = None baseColor = (255,255,255) baseRes = scaledRes scale = 800/1920 factor = baseRes/320 showLines = False showYolo = False IS_CAM_OK = True MODE = "DAY" CLOSE_TRIGGER_METHOD = "DELAY" # DELAY, CALC # SET MODE BASED ON TIME # SET THRESHES LOCALLY uproadThresh = 0 truckThresh = 0 closeThresh = 0 extraThresh = 0 leftBound = 0 leftBound2 = 0 rightBound = 0 rightBound2 = 0 marginOfError = 0 def setThresholds(MODE, factor): thresh = THRESHOLDS[MODE] nonlocal uproadThresh nonlocal truckThresh nonlocal closeThresh nonlocal extraThresh nonlocal leftBound nonlocal leftBound2 nonlocal rightBound nonlocal rightBound2 nonlocal marginOfError uproadThresh = int(thresh['uproadThresh']*factor) truckThresh = int(thresh['truckThresh']*factor) closeThresh = int(thresh['closeThresh']*factor) extraThresh = int(thresh['extraThresh']*factor) leftBound = int(thresh['leftBound']*factor) leftBound2 = int(thresh['leftBound2']*factor) rightBound = int(thresh['rightBound']*factor) rightBound2 = int(thresh['rightBound2']*factor) marginOfError = int(thresh['marginOfError']*factor) setThresholds(MODE, factor) while(True): try: print(camId, "Creating harvester modules and loading genlcam producers ...") h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() try: cam = h.create_image_acquisition_manager(serial_number=CAM_NAME) print ("Camera found!") IS_CAM_OK = True except: print ("Camera Not Found! Waiting 10 seconds and retrying ...") time.sleep(10) #sleep for 10 seconds and then retry! continue #exit () cam.start_image_acquisition() cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() uproadTruckDelay = 0.0 def setUproadTruckDelay(): nonlocal uproadTruckDelay nonlocal uproadLastTrigger nonlocal truckLastTrigger if truckLastTrigger > uproadLastTrigger and truckLastTrigger-uproadLastTrigger<5: uproadTruckDelay = truckLastTrigger-uproadLastTrigger while(IS_CAM_OK): #print(dir(cam.device.node_map)) try: with timeout(seconds=3, error_message='FETCH_ERROR'): frame = cam.fetch_buffer() except: IS_CAM_OK = False print('CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!') sendMessageToSlack('Streaming Camera has Failed - Restarting ...', '#ff3300') cam.stop_image_acquisition() cam.destroy() if(IS_CAM_OK and frame.payload.components): image = frame.payload.components[0].data if LOG: print(image) # USER CONTROLS user_input_key = cv2.waitKey(1) if user_input_key==113: #q showLines = True elif user_input_key==97: #a showLines = False elif user_input_key==122: #z showYolo = True elif user_input_key==120: #x showYolo = False elif user_input_key==119: #w MODE="DAY" setThresholds("DAY", factor) # CHANGE EXPOSURE AND GAIN elif user_input_key==115: #s MODE="NIGHT" setThresholds("NIGHT", factor) elif user_input_key==101: #o CLOSE_TRIGGER_METHOD = "CALC" elif user_input_key==100: #l CLOSE_TRIGGER_METHOD = "DELAY" frameScaled = cv2.resize(image, dsize=(baseRes, int(baseRes*scale)), interpolation=cv2.INTER_CUBIC) frameColorised = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2RGB) c, h1, w1 = frameColorised.shape[2], frameColorised.shape[1], frameColorised.shape[0] # SHOW LINES SECTION if showLines and camId=='CAM_1' and MODE=="DAY": cv2.line(frameColorised, (uproadThresh,0), (uproadThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (uproadThresh+marginOfError,0), (uproadThresh+marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (uproadThresh-marginOfError,0), (uproadThresh-marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (truckThresh,0), (truckThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (truckThresh+marginOfError,0), (truckThresh+marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (truckThresh-marginOfError,0), (truckThresh-marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (closeThresh,0), (closeThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (closeThresh+marginOfError,0), (closeThresh+marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (closeThresh-marginOfError,0), (closeThresh-marginOfError, w1), (255,0,0), 1) cv2.line(frameColorised, (0,rightBound), (h1, rightBound), (255,255,255), 1) cv2.line(frameColorised, (0,leftBound2), (h1, leftBound2), (255,255,255), 1) cv2.line(frameColorised, (0,leftBound), (h1, leftBound), (255,255,255), 1) if showLines and camId=='CAM_1' and MODE=="NIGHT": cv2.line(frameColorised, (uproadThresh,0), (uproadThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (truckThresh,0), (truckThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (closeThresh,0), (closeThresh, w1), (255,255,0), 1) cv2.line(frameColorised, (0,rightBound), (h1, rightBound), (255,255,255), 1) cv2.line(frameColorised, (0,leftBound), (h1, leftBound), (255,255,255), 1) cv2.line(frameColorised, (0,leftBound2), (h1, leftBound2), (255,0,255), 1) # PROCESSING SPECIFIC if MODE=="NIGHT": frameGray = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2GRAY) thresh = cv2.threshold(frameGray, grayThresh, 255, cv2.THRESH_BINARY)[1] labels = measure.label(thresh, neighbors=8, background=0) mask = np.zeros(thresh.shape, dtype="uint8") for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(thresh.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) if len(np.unique(labels))>0: cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours for (i, c) in enumerate(cnts): currentTime = time.time() # draw the bright spot on the image (x, y, w, h) = cv2.boundingRect(c) ((cX, cY), radius) = cv2.minEnclosingCircle(c) if showYolo: cv2.circle(frameColorised, (int(cX), int(cY)), int(5), (0, 0, 255), 3) if cY <= rightBound and cY >= leftBound and camId=='CAM_1': if cX>=uproadThresh-marginOfError and cX<=uproadThresh+marginOfError and (currentTime-uproadLastTrigger)>triggerDelay and cY>=leftBound2: urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() uproadLastTrigger = currentTime numberCars += 1 if cX>=truckThresh-marginOfError and cX<=truckThresh+marginOfError and (currentTime-truckLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read() truckLastTrigger = currentTime setUproadTruckDelay() if cX>=closeThresh-marginOfError and cX<=closeThresh+marginOfError and (currentTime-closeLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() closeLastTrigger = currentTime if MODE=="DAY": img = np.rot90(frameColorised, 1) img2 = Image(img) results = net.detect(img2, thresh=yolo_thresh, nms=yolo_nms, hier_thresh=yolo_hier_thresh) for cat, score, bounds in results: x, y, w, h = bounds x, y = (h1-int(y), int(x)) x1,y1,x2,y2 = [int(x-h/2),int(y-w/2),int(x+h/2),int(y+w/2)] type = str(cat.decode("utf-8")) color = baseColor if showYolo and h>5: cv2.rectangle(frameColorised, (x1,y1),(x2,y2),color) cv2.circle(frameColorised, (int(x), int(y)), int(2),(0, 255, 0), 3) currentTime = time.time() if y <= rightBound and camId=='CAM_1' and h>10 and w>10: if x>=uproadThresh-10 and x<=uproadThresh+10 and y>=leftBound2 and (currentTime-uproadLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_FAR_URL).read() numberFar += 1 uproadLastTrigger = currentTime if x>=truckThresh-marginOfError and x<=truckThresh+marginOfError and y>=leftBound and (currentTime-truckLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_TRUCK_URL).read() numberTruck += 1 truckLastTrigger = currentTime setUproadTruckDelay() if x>=closeThresh-marginOfError*2 and x<=closeThresh+marginOfError*2 and y>=leftBound and (currentTime-closeLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_URL).read() numberClose += 1 closeLastTrigger = currentTime # DISPLAY FRAME IN WINDOW if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(frameColorised)) else: cv2.imshow(WINDOW_NAME, frameColorised) frame.queue() cv2.waitKey(1) if frameCount%10==0: print("mode:", MODE,"close mode:", CLOSE_TRIGGER_METHOD, "Count Far", numberFar, "Count Truck", numberTruck,"Count Close", numberClose,"frame:", frameCount, "fps:", int(1.0/(time.time()-lastTime)),"trigger dif",uproadTruckDelay) lastTime = time.time() frameCount += 1 #IF CAM NOT OK cam.stop_image_acquisition() cam.destroy() except Exception as e: print ("Critical Script error! Trying again in 5 seconds ...") print(e) time.sleep(5) #sleep for 10 seconds and then retry!
def yoloWorker(camId): # declare yolo detector in isolation from camera object for reinitiation of camera under failure. print(camId, ' Constructing Yolo Model ...') net = Detector(bytes(cfgfile, encoding="utf-8"), bytes(weightfile, encoding="utf-8"), 0, bytes(datacfg, encoding="utf-8")) CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] # main worker event loop, will keep restarting on camera dropout while (True): try: print( camId, ' Creating harvester modules and loading genlcam producers ...' ) h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() try: cam = h.create_image_acquisition_manager( serial_number=CAM_NAME) print("Camera found!") except: print("Camera Not Found! Waiting 10 seconds and retrying ...") time.sleep(10) #sleep for 10 seconds and then retry! continue #exit () cam.start_image_acquisition() cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window #variable declarations lastTime = time.time() transposeTime = 0 i = 0 numberCars = 0 lastSnapshot = None baseColor = (255, 255, 255) baseRes = 400 scale = 800 / 1920 #as percentages to do uproadThresh = 235 truckThresh = 155 closeThresh = 110 extraThresh = 50 leftBound = 50 leftBound2 = 60 rightBound = 90 rightBound2 = 125 marginOfError = 15 # rescaling factor = baseRes / 320 uproadThresh = int(uproadThresh * factor) truckThresh = int(truckThresh * factor) closeThresh = int(closeThresh * factor) extraThresh = int(50 * factor) leftBound = int(leftBound * factor) leftBound2 = int(leftBound2 * factor) rightBound = int(rightBound * factor) rightBound2 = int(125 * factor) showLines = False showYolo = False uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() IS_CAM_OK = True IS_MULTI = False def fetchBuffer(shared, camera): frame = camera.fetch_buffer() shared['buffer'] = frame.payload.components[0].data frame.queue() while (IS_CAM_OK): if IS_MULTI: dict = {"buffer": None} manager = multiprocessing.Manager() shared = manager.dict() p = multiprocessing.Process(target=fetchBuffer, args=(shared, cam)) p.start() # Wait for 5 seconds or until process finishes p.join(TIMEOUT_DELAY) print('3') # If thread is still active if p.is_alive(): p.terminate() p.join() IS_CAM_OK = False try: with timeout(seconds=3, error_message='FETCH_ERROR'): frame = cam.fetch_buffer() except: IS_CAM_OK = False print( 'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!' ) sendMessageToSlack( 'Streaming Camera has Failed - Restarting ...', '#ff3300') if LOG: print(shared['buffer']) if (IS_CAM_OK and frame.payload.components): image = frame.payload.components[0].data if LOG: print(image) small = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2RGB) img = np.rot90(rgb, 1) c, h1, w1 = rgb.shape[2], rgb.shape[1], rgb.shape[0] img2 = Image(img) results = net.detect(img2) if LOG: print(results) user_input_key = cv2.waitKey(1) if user_input_key == 113: #q showLines = True elif user_input_key == 97: #a showLines = False elif user_input_key == 122: #z showYolo = True elif user_input_key == 120: #x showYolo = False if showLines and camId == 'CAM_2': cv2.line(rgb, (uproadThresh, 0), (uproadThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (uproadThresh + marginOfError, 0), (uproadThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (uproadThresh - marginOfError, 0), (uproadThresh - marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (truckThresh, 0), (truckThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (truckThresh + marginOfError, 0), (truckThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (truckThresh - marginOfError, 0), (truckThresh - marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (closeThresh, 0), (closeThresh, w1), (255, 255, 0), 1) cv2.line(rgb, (closeThresh + marginOfError, 0), (closeThresh + marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (closeThresh - marginOfError, 0), (closeThresh - marginOfError, w1), (255, 0, 0), 1) cv2.line(rgb, (0, rightBound), (h1, rightBound), (255, 255, 255), 1) cv2.line(rgb, (0, leftBound2), (h1, leftBound2), (255, 255, 255), 1) cv2.line(rgb, (0, leftBound), (h1, leftBound), (255, 255, 255), 1) for cat, score, bounds in results: x, y, w, h = bounds x, y = (h1 - int(y), int(x)) x1, y1, x2, y2 = [ int(x - h / 2), int(y - w / 2), int(x + h / 2), int(y + w / 2) ] type = str(cat.decode("utf-8")) color = baseColor if showYolo and h > 5: cv2.rectangle(rgb, (x1, y1), (x2, y2), color) cv2.circle(rgb, (int(x), int(y)), int(2), (0, 255, 0), 3) currentTime = time.time() if y <= rightBound and camId == 'CAM_2' and h > 10 and w > 10: if x >= uproadThresh - 10 and x <= uproadThresh + 10 and y >= leftBound2 and ( currentTime - uproadLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_FAR_FLASH_URL).read() if LOG: print('FAR TRIG') numberCars += 1 uproadLastTrigger = currentTime if x >= truckThresh - marginOfError and x <= truckThresh + marginOfError and y >= leftBound and ( currentTime - truckLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_TRUCK_FLASH_URL).read() if LOG: print('TRUCK TRIG') truckLastTrigger = currentTime if x >= closeThresh - marginOfError * 2 and x <= closeThresh + marginOfError * 2 and y >= leftBound and ( currentTime - closeLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_CLOSE_FLASH_URL).read() if LOG: print('CLOSE TRIG') closeLastTrigger = currentTime if camId == 'CAM_1': if y1 <= rightBound2 and y2 >= rightBound2 and False: urllib.request.urlopen( TRIGGER_FAR_FLASH_URL).read() numberCars += 1 if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(rgb)) else: cv2.imshow(WINDOW_NAME, rgb) frame.queue() cv2.waitKey(1) print("Count: ", numberCars, " Frame: ", i, " FPS: ", 1.0 / (time.time() - lastTime)) lastTime = time.time() i += 1 #IF CAM NOT OK cam.stop_image_acquisition() cam.destroy() except: print("Critical Script error! Trying again in 5 seconds ...") time.sleep(5) #sleep for 10 seconds and then retry!
def openCvWorker(camId): while (True): try: CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() try: cam = h.create_image_acquisition_manager( serial_number=CAM_NAME) print("Camera found") except: print("Camera Not Found") exit() cam.start_image_acquisition() cv2.namedWindow(WINDOW_NAME, flags=0) lastTime = time.time() transposeTime = 0 frame = 0 numberCars = 0 lastSnapshot = None carColor = (255, 0, 0) busColor = (0, 255, 0) truckColor = (0, 0, 255) phoneColor = (0, 255, 255) baseColor = (255, 255, 255) baseRes = 300 scale = 800 / 1920 #as percentages uproadThresh = 230 truckThresh = 155 closeThresh = 90 extraThresh = 50 leftBound = 50 leftBound2 = 60 rightBound = 80 rightBound2 = 125 marginOfError = 10 grayThresh = 150 factor = baseRes / 320 uproadThresh = int(uproadThresh * factor) truckThresh = int(truckThresh * factor) closeThresh = int(closeThresh * factor) extraThresh = int(50 * factor) leftBound = int(leftBound * factor) leftBound2 = int(leftBound2 * factor) rightBound = int(rightBound * factor) rightBound2 = int(125 * factor) showLines = False showYolo = False triggerDelay = 0.500 uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() IS_CAM_OK = True while IS_CAM_OK: try: with timeout(seconds=3, error_message='FETCH_ERROR'): buffer = cam.fetch_buffer() except: IS_CAM_OK = False print( 'CAM TOOK TOO LONG TO FETCH BUFFER - KILLING AND RESTARTING!' ) sendMessageToSlack( 'Streaming Camera has Failed - Restarting ...', '#ff3300') if LOG: print(payload) if (IS_CAM_OK and buffer.payload.components): image = buffer.payload.components[0].data if showLines or showYolo: conver = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) small = cv2.cvtColor(conver, cv2.COLOR_BayerRG2RGB) rgb = cv2.cvtColor(conver, cv2.COLOR_BayerRG2GRAY) else: small = cv2.resize(image, dsize=(baseRes, int(baseRes * scale)), interpolation=cv2.INTER_CUBIC) rgb = cv2.cvtColor(small, cv2.COLOR_BayerRG2GRAY) thresh = cv2.threshold(rgb, grayThresh, 255, cv2.THRESH_BINARY)[1] labels = measure.label(thresh, neighbors=8, background=0) mask = np.zeros(thresh.shape, dtype="uint8") if LOG: print(labels) # loop over the unique components for label in np.unique(labels): # if this is the background label, ignore it if label == 0: continue labelMask = np.zeros(thresh.shape, dtype="uint8") labelMask[labels == label] = 255 mask = cv2.add(mask, labelMask) if len(np.unique(labels)) > 0: cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours for (i, c) in enumerate(cnts): currentTime = time.time() # draw the bright spot on the image (x, y, w, h) = cv2.boundingRect(c) ((cX, cY), radius) = cv2.minEnclosingCircle(c) if showYolo: cv2.circle(small, (int(cX), int(cY)), int(5), (0, 0, 255), 3) if cY <= rightBound and cY >= leftBound and camId == 'CAM_2': if cX >= uproadThresh - marginOfError and cX <= uproadThresh + marginOfError and ( currentTime - uproadLastTrigger ) > triggerDelay and cY >= leftBound2: urllib.request.urlopen( TRIGGER_FAR_FLASH_URL).read() uproadLastTrigger = currentTime numberCars += 1 if cX >= truckThresh - marginOfError and cX <= truckThresh + marginOfError and ( currentTime - truckLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_TRUCK_FLASH_URL).read() truckLastTrigger = currentTime if cX >= closeThresh - marginOfError and cX <= closeThresh + marginOfError and ( currentTime - closeLastTrigger) > triggerDelay: urllib.request.urlopen( TRIGGER_CLOSE_FLASH_URL).read() closeLastTrigger = currentTime # show the output image k = cv2.waitKey(1) h1, w1 = small.shape[1], small.shape[0] if k == 113: # Esc key to stop showLines = True elif k == 97: showLines = False elif k == 122: showYolo = True elif k == 120: showYolo = False if showLines and camId == 'CAM_2': cv2.line(small, (uproadThresh, 0), (uproadThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Up-Road', (uproadThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (truckThresh, 0), (truckThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Truck', (truckThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (closeThresh, 0), (closeThresh, w1), (255, 255, 0), 1) cv2.putText(small, 'Close', (closeThresh, 50), cv2.FONT_HERSHEY_COMPLEX, 0.2, (255, 255, 0)) cv2.line(small, (0, rightBound), (h1, rightBound), (255, 255, 255), 1) cv2.line(small, (0, leftBound), (h1, leftBound), (255, 255, 255), 1) cv2.line(small, (0, leftBound2), (h1, leftBound2), (255, 0, 255), 1) if IS_ROTATE: cv2.imshow(WINDOW_NAME, np.rot90(small)) else: cv2.imshow(WINDOW_NAME, small) cv2.waitKey(1) buffer.queue() print("Count: ", numberCars, " Frame: ", frame, " FPS: ", 1.0 / (time.time() - lastTime)) lastTime = time.time() frame += 1 cam.stop_image_acquisition() cam.destroy() except: print("Critical Script Error! Waiting 5 seconds and retrying ...") time.sleep(5) #sleep for 10 seconds and then retry!
def mainWorker(camId): CAM_NAME = CAM_CONFIG[camId]['name'] WINDOW_NAME = CAM_CONFIG[camId]['window'] IS_ROTATE = CAM_CONFIG[camId]['rotate'] #variable declarations lastTime = time.time() transposeTime = 0 frameCount = 0 avFrameRate = 0 numberCars = 0 numberClose = 0 numberFar = 0 numberTruck = 0 lastSnapshot = None baseColor = (255,255,255) baseRes = scaledRes scale = 800/1920 factor = baseRes/320 showLines = False logsOn = False showYolo = False IS_CAM_OK = True MODE = "DAY" CLOSE_TRIGGER_METHOD = "DELAY" # DELAY, CALC triggerDelay = 0.5 VARIANCE_METHOD = False # SET MODE BASED ON CURRENT TIME # SET THRESHES LOCALLY uproadThresh = 0 truckThresh = 0 closeThresh = 0 extraThresh = 0 leftBound = 0 leftBound2 = 0 rightBound = 0 rightBound2 = 0 marginOfError = 0 isFarClear = True isTruckClear = True isCloseClear = True farBoxCenter= [97, 298], farBoxWidth= 5, farBoxHeight= 10, truckBoxCenter= [97, 170], truckBoxWidth= 5, truckBoxHeight= 10, closeBoxCenter= [97, 50], closeBoxWidth= 15, closeBoxHeight= 15, sdThreshold= 30, tsdThreshold= 30, csdThreshold= 30, def setDefaultValues(MODE): values = DEFAULT_VALUES[MODE] nonlocal farBoxCenter nonlocal farBoxWidth nonlocal farBoxHeight nonlocal truckBoxCenter nonlocal truckBoxWidth nonlocal truckBoxHeight nonlocal closeBoxCenter nonlocal closeBoxWidth nonlocal closeBoxHeight nonlocal sdThreshold nonlocal tsdThreshold nonlocal csdThreshold farBoxCenter = values['farBoxCenter'] farBoxWidth = values['farBoxWidth'] farBoxHeight = values['farBoxHeight'] truckBoxCenter = values['truckBoxCenter'] truckBoxWidth = values['truckBoxWidth'] truckBoxHeight = values['truckBoxHeight'] closeBoxCenter = values['closeBoxCenter'] closeBoxWidth = values['closeBoxWidth'] closeBoxHeight = values['closeBoxHeight'] sdThreshold = values['sdThreshold'] tsdThreshold = values['tsdThreshold'] csdThreshold = values['csdThreshold'] def setThresholds(MODE, factor): thresh = THRESHOLDS[MODE] nonlocal uproadThresh nonlocal truckThresh nonlocal closeThresh nonlocal extraThresh nonlocal leftBound nonlocal leftBound2 nonlocal rightBound nonlocal rightBound2 nonlocal marginOfError uproadThresh = int(thresh['uproadThresh']*factor) truckThresh = int(thresh['truckThresh']*factor) closeThresh = int(thresh['closeThresh']*factor) extraThresh = int(thresh['extraThresh']*factor) leftBound = int(thresh['leftBound']*factor) leftBound2 = int(thresh['leftBound2']*factor) rightBound = int(thresh['rightBound']*factor) rightBound2 = int(thresh['rightBound2']*factor) marginOfError = int(thresh['marginOfError']*factor) setThresholds(MODE, factor) setDefaultValues(MODE) startTime = time.time() IS_CAM_OK = True while(IS_CAM_OK): print(camId, "Creating harvester modules and loading genlcam producers ...") h = Harvester() h.add_cti_file(CTI_FILE) h.update_device_info_list() cam = h.create_image_acquisition_manager(serial_number=CAM_NAME) cam.start_image_acquisition() cv2.namedWindow(WINDOW_NAME, flags=0) # create dedicated stream window #print(dir(cam.device.node_map)) def nothing(x): pass def changeDelay(x): nonlocal triggerDelay triggerDelay = x/1000 def toggleBoxes(x): nonlocal showLines if x==1: showLines = True print("SHOWING TRIGGER BOXES") else: showLines = False print("HIDING TRIGGER BOXES") def toggleLogs(x): nonlocal logsOn if x==1: logsOn = True print("SHOWING LOGS") else: logsOn = False print("HIDING LOGS") def toggleVarianceMethod(x): nonlocal VARIANCE_METHOD if x==1: VARIANCE_METHOD = True print("SWITCHED TO USER CONTROLLED VALUES FOR TRIGGERING") else: VARIANCE_METHOD = False print("SWITCHED TO AVERAGE VALUES FOR TRIGGERING") def switchMode(x): nonlocal MODE if x==1: MODE = "DAY" setDefaultValues(MODE) #set camera defaults print("SWITCHED TO DAY MODE") cam.device.node_map.ExposureAuto = 'Continuous' cam.device.node_map.GainAuto = 'Continuous' else: MODE = "NIGHT" setDefaultValues(MODE) #set camera defaults print("SWITCHED TO NIGHT MODE") cam.device.node_map.ExposureAuto = 'Off' cam.device.node_map.GainAuto = 'Off' #cam.device.node_map.ExposureTime.value = 150.69 #cam.device.node_map.Gain.value = 15.5 def handleChangeInTrigger(x): nonlocal logsOn if x==1: logsOn = True print("SHOWING LOGS") else: logsOn = False print("HIDING LOGS") uproadLastTrigger = time.time() truckLastTrigger = time.time() closeLastTrigger = time.time() farStdAv = 0.0 closeStdAv = 0.0 truckStdAv = 0.0 baseAv = 0.0 # create variable track bars showBoxes = '0 : BOXES OFF \n1 : BOXES ON' outputLogs = '0 : LOGS OFF \n1 : LOGS ON' autoExposureSwitch = '0 : Auto Exp OFF \n1 : Auto Exp ON' autoGainSwitch = '0 : Auto Gain OFF \n1 : Auto Gain ON' modeSwitch = '0 : Night Mode\n1 : Day Mode' varianceMethodSwitch = '0 : Auto Variance\n1 : Manual Variance' currentHour = int(float(time.strftime('%H'))) if(currentHour>=DAY_MODE_HOUR and currentHour<=NIGHT_MODE_HOUR ): cv2.createTrackbar(modeSwitch,WINDOW_NAME,0,1,switchMode) else: cv2.createTrackbar(modeSwitch,WINDOW_NAME,0,1,switchMode) cv2.createTrackbar(showBoxes,WINDOW_NAME,1,1,nothing) cv2.createTrackbar(outputLogs,WINDOW_NAME,0,1,nothing) cv2.createTrackbar('Trigger Reset Delay ms',WINDOW_NAME,int(triggerDelay*1000),1000,nothing) cv2.createTrackbar(varianceMethodSwitch,WINDOW_NAME,0,1,nothing) cv2.createTrackbar('Far Gray',WINDOW_NAME,0,255,nothing) cv2.createTrackbar('Truck Gray',WINDOW_NAME,0,255,nothing) cv2.createTrackbar('Close Gray',WINDOW_NAME,0,255,nothing) showBoxesValue = None outputLogsValue = None triggerResetDelayValue = None modeSwitchValue = None varianceMethodSwitchValue = None farGrayValue = None truckGrayValue = None closeGrayValue = None while(IS_CAM_OK): # MAIN WHILE LOOP FOR IMAGE ACQUISITION with timeout(seconds=5, error_message='FETCH_ERROR'): frame = cam.fetch_buffer() if(IS_CAM_OK and frame.payload.components): timestamp = time.strftime('%H:%M:%S') image = frame.payload.components[0].data frameScaled = cv2.resize(image, dsize=(baseRes, int(baseRes*scale)), interpolation=cv2.INTER_CUBIC) frameColorised = cv2.cvtColor(frameScaled, cv2.COLOR_BayerRG2RGB) c, h1, w1 = frameColorised.shape[2], frameColorised.shape[1], frameColorised.shape[0] currentHour = int(float(time.strftime('%H'))) if(MODE=="NIGHT" and currentHour>=DAY_MODE_HOUR and currentHour<NIGHT_MODE_HOUR): switchMode(modeSwitchValue) cv2.setTrackbarPos(modeSwitch,WINDOW_NAME, 1) if(False and MODE=="DAY" and currentHour>=NIGHT_MODE_HOUR): switchMode(modeSwitchValue) cv2.setTrackbarPos(modeSwitch,WINDOW_NAME, 0) # CHECKING POSITION OF ALL TRACKBARS # cv2.setTrackbarPos(trackbarname, winname, pos) if cv2.getTrackbarPos(showBoxes,WINDOW_NAME)!=showBoxesValue: showBoxesValue = cv2.getTrackbarPos(showBoxes,WINDOW_NAME) toggleBoxes(showBoxesValue) if cv2.getTrackbarPos(outputLogs,WINDOW_NAME)!=outputLogsValue: outputLogsValue = cv2.getTrackbarPos(outputLogs,WINDOW_NAME) toggleLogs(outputLogsValue) if cv2.getTrackbarPos(modeSwitch,WINDOW_NAME)!=modeSwitchValue: modeSwitchValue = cv2.getTrackbarPos(modeSwitch,WINDOW_NAME) switchMode(modeSwitchValue) triggerBoxFar = frameScaled[farBoxCenter[0]-farBoxWidth:farBoxCenter[0]+farBoxWidth,farBoxCenter[1]:farBoxCenter[1]+farBoxHeight] #frameScaled[uproadThresh:uproadThresh+boxHeight,farBoxCenter-farBoxWidth:farBoxCenter+farBoxWidth] triggerBoxTruck = frameScaled[truckBoxCenter[0]-truckBoxWidth:truckBoxCenter[0]+truckBoxWidth,truckBoxCenter[1]:truckBoxCenter[1]+truckBoxHeight] #frameScaled[truckThresh:truckThresh+boxHeight,truckBoxCenter-truckBoxWidth:truckBoxCenter+truckBoxWidth] triggerBoxClose = frameScaled[closeBoxCenter[0]-closeBoxWidth:closeBoxCenter[0]+closeBoxWidth,closeBoxCenter[1]:closeBoxCenter[1]+closeBoxHeight] #frameScaled[closeThresh:closeThresh+boxHeight,closeBoxCenter-closeBoxWidth:closeBoxCenter+closeBoxWidth] # ARRAY METRICS FOR TRIGGERING triggerBoxFarStd= np.mean(triggerBoxFar) triggerBoxTruckStd= np.mean(triggerBoxTruck) triggerBoxCloseStd= np.mean(triggerBoxClose) numberOfFrames = 200 farStdAv = farStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxFarStd/numberOfFrames truckStdAv = truckStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxTruckStd/numberOfFrames closeStdAv = closeStdAv*(numberOfFrames-1)/numberOfFrames + triggerBoxCloseStd/numberOfFrames if cv2.getTrackbarPos(varianceMethodSwitch,WINDOW_NAME)!=varianceMethodSwitchValue: varianceMethodSwitchValue = cv2.getTrackbarPos(varianceMethodSwitch,WINDOW_NAME) toggleVarianceMethod(varianceMethodSwitchValue) cv2.setTrackbarPos('Far Gray',WINDOW_NAME, int(farStdAv)) cv2.setTrackbarPos('Truck Gray',WINDOW_NAME, int(truckStdAv)) cv2.setTrackbarPos('Close Gray',WINDOW_NAME, int(closeStdAv)) if cv2.getTrackbarPos('Far Gray',WINDOW_NAME)!=farGrayValue: farGrayValue = cv2.getTrackbarPos('Far Gray',WINDOW_NAME) if cv2.getTrackbarPos('Truck Gray',WINDOW_NAME)!=truckGrayValue: truckGrayValue = cv2.getTrackbarPos('Truck Gray',WINDOW_NAME) if cv2.getTrackbarPos('Close Gray',WINDOW_NAME)!=closeGrayValue: closeGrayValue = cv2.getTrackbarPos('Close Gray',WINDOW_NAME) if (VARIANCE_METHOD == True): farDiff = abs(farGrayValue -triggerBoxFarStd) truckDiff = abs(truckGrayValue-triggerBoxTruckStd) closeDiff = abs(closeGrayValue-triggerBoxCloseStd) else: farDiff = abs(farStdAv -triggerBoxFarStd) truckDiff = abs(truckStdAv-triggerBoxTruckStd) closeDiff = abs(closeStdAv-triggerBoxCloseStd) currentTime = time.time() # WAS GOING TO BE USED TO IDENTIFY CARS, NOW JUST A VISUAL AID FOR WHEN ZONES ARE RESETTING OR NOT if isFarClear and farDiff>sdThreshold: isFarClear = False elif isFarClear == False and (currentTime-uproadLastTrigger)>triggerDelay: isFarClear = True if isTruckClear and truckDiff>tsdThreshold: isTruckClear = False elif isTruckClear == False and (currentTime-truckLastTrigger)>triggerDelay: isTruckClear = True if isCloseClear and closeDiff>csdThreshold: isCloseClear = False elif isCloseClear == False and (currentTime-closeLastTrigger)>triggerDelay: isCloseClear = True if currentTime-startTime>DELAY_TIME_FROM_INIT_TO_TRIGGER: if farDiff>sdThreshold and (currentTime-uproadLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_FAR_FLASH_URL).read() numberFar += 1 uproadLastTrigger = currentTime if truckDiff>tsdThreshold and (currentTime-truckLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_TRUCK_FLASH_URL).read() numberTruck += 1 truckLastTrigger = currentTime if closeDiff>csdThreshold and (currentTime-closeLastTrigger)>triggerDelay: urllib.request.urlopen(TRIGGER_CLOSE_FLASH_URL).read() numberClose += 1 closeLastTrigger = currentTime # SHOW LINES SECTION if showLines: cv2.rectangle(frameColorised, (farBoxCenter[1],farBoxCenter[0]-farBoxWidth),(farBoxCenter[1]+farBoxHeight,farBoxCenter[0]+farBoxWidth),(0,255,0) if isFarClear else (0,0,255)) cv2.rectangle(frameColorised, (truckBoxCenter[1],truckBoxCenter[0]-truckBoxWidth),(truckBoxCenter[1]+truckBoxHeight,truckBoxCenter[0]+truckBoxWidth),(0,255,0) if isTruckClear else (0,0,255)) cv2.rectangle(frameColorised, (closeBoxCenter[1],closeBoxCenter[0]-closeBoxWidth),(closeBoxCenter[1]+closeBoxHeight,closeBoxCenter[0]+closeBoxWidth),(0,255,0) if isCloseClear else (0,0,255)) # DISPLAY FRAME IN WINDOW cv2.imshow(WINDOW_NAME, np.rot90(frameColorised) if IS_ROTATE else frameColorised) frame.queue() cv2.waitKey(1) avFrameRate=avFrameRate*49/50+int(1.0/(time.time()-lastTime))/50 if frameCount%1==0 and logsOn: print("CF", numberFar, "CT", numberTruck,"CC", numberClose,"avFPS", int(avFrameRate),"FV", int(triggerBoxFarStd),"TV", int(triggerBoxTruckStd), "CV", int(triggerBoxCloseStd)) lastTime = time.time() frameCount += 1 #IF CAM NOT OK cam.stop_image_acquisition() cam.destroy()
for genicam_path in genicam_paths: glob_path = os.path.join(genicam_path, "*.cti") cti = glob.glob(glob_path) cti_list.extend(cti) return cti_list if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--cti", required=False, help="Path to .cti file", default=None) args = parser.parse_args() cti = args.cti if cti is None: # get from env cti = get_cti_list() if isinstance(cti, str): cti = list(cti) driver = Harvester() for cti_path in cti: driver.add_cti_file(cti_path) print (cti) driver.update_device_info_list() info_list: List[DeviceInfo] = driver.device_info_list for i, info in enumerate(info_list): print(f"Device {i}: ID={info.id_}")
class TestHarvesterCoreBase(unittest.TestCase): name = 'HARVESTER_TEST_TARGET' if name in os.environ: _cti_file = os.getenv(name) else: if is_running_on_windows(): _cti_file = 'C:/Users/z1533tel/dev/genicam/bin/Win64_x64' else: if is_running_on_macos(): _cti_file = '/Users/kznr/dev/genicam/bin/Maci64_x64' else: _cti_file = '/home/vagrant/dev/genicam/bin/Linux64_x64' _cti_file += '/TLSimu.cti' sys.path.append(_cti_file) def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self._harvester = None self._ia = None self._thread = None self._logger = get_logger(name='harvesters', level=DEBUG) def setUp(self): # super().setUp() # self._harvester = Harvester(logger=self._logger) self._harvester.add_cti_file(self._cti_file) self._harvester.update_device_info_list() def tearDown(self): # if self.ia: self.ia.destroy() # self._harvester.reset() # self._ia = None # super().tearDown() @property def harvester(self): return self._harvester @property def ia(self): return self._ia @ia.setter def ia(self, value): self._ia = value @property def general_purpose_thread(self): return self._thread @general_purpose_thread.setter def general_purpose_thread(self, value): self._thread = value def is_running_with_default_target(self): return True if 'TLSimu.cti' in self._cti_file else False
def main(unused_argv): use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False if use_s3: if not s3_bucket_exists(flags.FLAGS.s3_bucket_name): use_s3 = False print( "Bucket: %s either does not exist or you do not have access to it" % flags.FLAGS.s3_bucket_name ) else: print( "Bucket: %s exists and you have access to it" % flags.FLAGS.s3_bucket_name ) if use_s3: # Get the newest model s3_download_highest_numbered_file( flags.FLAGS.s3_bucket_name, "/".join([flags.FLAGS.s3_data_dir, MODEL_STATE_DIR_NAME]), os.path.join(flags.FLAGS.local_data_dir, MODEL_STATE_DIR_NAME), MODEL_STATE_FILE_TYPE, flags.FLAGS.network, ) label_file_path = os.path.join(flags.FLAGS.local_data_dir, LABEL_FILE_NAME) if not os.path.isfile(label_file_path): print("Missing file %s" % label_file_path) return # read in the category labels labels = open(label_file_path).read().splitlines() if len(labels) == 0: print("No label categories found in %s" % label_file_path) return # Add the background as the first class labels.insert(0, "background") print("Labels found:") print(labels) saved_model_file_path = ( flags.FLAGS.model_path if flags.FLAGS.model_path is not None else get_newest_saved_model_path( os.path.join(flags.FLAGS.local_data_dir, MODEL_STATE_DIR_NAME), flags.FLAGS.network, ) ) if saved_model_file_path is None: print("No saved model state found") return h = Harvester() h.add_cti_file(flags.FLAGS.gentl_producer_path) if len(h.cti_files) == 0: print("No valid cti file found at %s" % flags.FLAGS.gentl_producer_path) h.reset() return print("Currently available genTL Producer CTI files: ", h.cti_files) h.update_device_info_list() if len(h.device_info_list) == 0: print("No compatible devices detected.") h.reset() return print("Available devices List: ", h.device_info_list) print("Using device: ", h.device_info_list[0]) cam = h.create_image_acquirer(list_index=0) apply_camera_settings(cam) display_images(cam, labels, saved_model_file_path) # clean up cam.destroy() h.reset() print("Exiting.")
from harvesters.core import Harvester # Harvester object h = Harvester() # Add .cti file from mvGenTL installation path cti_file_path = 'C:/MATRIX_VISION/mvIMPACT_Acquire/bin/x64/mvGenTLProducer.cti' h.add_cti_file(cti_file_path) # Update connected hardwares (if there are connected device(s), device's info is read h.update_device_info_list() print(h.device_info_list)
class TestTutorials2(unittest.TestCase): def setUp(self) -> None: # The following block is just for administrative purpose; # you should not include it in your code: self.cti_file_path = get_cti_file_path() if 'TLSimu.cti' not in self.cti_file_path: self.skipTest('The target is not TLSimu.') # Create a Harvester object: self.harvester = Harvester() def tearDown(self) -> None: # self.harvester.reset() def test_traversable_tutorial(self): # Add a CTI file path: self.harvester.add_file(self.cti_file_path) self.harvester.update() # Connect to the first camera in the list: ia = self.harvester.create_image_acquirer(0) # num_images_to_acquire = 0 # Then start image acquisition: ia.start_acquisition() while num_images_to_acquire < 100: # with ia.fetch_buffer() as buffer: # self.do_something(buffer) pass num_images_to_acquire += 1 # We don't need the ImageAcquirer object. Destroy it: ia.destroy() def test_ticket_127(self): # self.harvester.add_cti_file(self.cti_file_path) self.harvester.remove_cti_file(self.cti_file_path) # self.harvester.add_cti_file(self.cti_file_path) self.harvester.remove_cti_files() # self.harvester.add_cti_file(self.cti_file_path) self.assertIsNotNone(self.harvester.cti_files) # self.harvester.update_device_info_list() # Connect to the first camera in the list: ia = self.harvester.create_image_acquirer(0) # ia.start_image_acquisition() self.assertTrue(ia.is_acquiring_images()) ia.stop_image_acquisition() self.assertFalse(ia.is_acquiring_images())
from harvesters.core import Harvester h = Harvester() h.add_cti_file('/usr/local/lib/baumer/libbgapi2_gige.cti') #h.add_cti_file('/home/server/Desktop/ctifiles/GxGVTL.cti') h.update_device_info_list() print(h.device_info_list) iam = h.create_image_acquisition_manager(serial_number='QG0170070016') iam.start_image_acquisition() buffer = iam.fetch_buffer() print(buffer)
#OpenCV,numpy,Harvesterをロード import cv2 import time import os import datetime import numpy as np from harvesters.core import Harvester #Harvesterオブジェクト作成 h = Harvester() #ctiファイル(GeniCamシステムファイル)をロード #USB3用 #h.add_cti_file("bgapi2_usb.cti") #GigE用 h.add_cti_file("./libbgapi2_gige.cti") #デバイスリストを更新 h.update_device_info_list() #デバイスリストのインデックスは0から始まる print(h.device_info_list) #インデックス0のデバイスオブジェクトを作成 ia = h.create_image_acquirer(0) print("open first device") #デバイスの設定変更はdevice.node_map.のあとにNode名を指定し.valueを取得したり操作する #Node名はSDK付属ビュアーのCameraExplorerで右上のProfileをGeniCamGuruに変更することで確認可能(パラメータ名に半角スペースは不要) #利用可能なNode一覧表示 #print(dir(ia.device.node_map)) #print(dir(ia._data_streams[0].local_node_map))
class HarvestersSource(ImageSource): """ This source disregard analyzer back pressure. User has to control the frame rate. """ config_prefix = "Harvesters_Source" def __init__(self): from harvesters.core import Harvester super().__init__() self.config = config.SettingAccessor(self.config_prefix) self.logger = logging.getLogger("console") self._stop = subject.Subject() self.driver = Harvester() self.driver.add_cti_file(self.config["cti_path"]) self.acquirer = None self.simex_instance = None self.running = False self.scheduler = scheduler.NewThreadScheduler() self.scheduler.schedule( lambda sc, state: self.driver.update_device_info_list()) def is_running(self): return self.running @staticmethod @config.DefaultSettingRegistration(config_prefix) def defaultSettings(configPrefix): config.default_settings(configPrefix, [ config.SettingRegistry("cti_path", "/tmp/TL.cti", type="str", title="Path to .cti file"), config.SettingRegistry("gain", 0, type="float"), config.SettingRegistry( "invert_polarity", False, type="bool", title="Invert polarity"), config.SettingRegistry( "id", "", type="str", title="Camera Id (use camera_id.py)"), config.SettingRegistry( "fps", 5, type="float", title="frame per second") ]) def _read_buffer(self): try: buffer: Buffer = self.acquirer.fetch_buffer(timeout=0.1) payload = buffer.payload component = payload.components[0] width = component.width height = component.height content = component.data.reshape(height, width) time = buffer.timestamp_ns self.next_image( AcquiredImage(content.copy(), time / 1e9, f"{time}.jpg")) buffer.queue() except TimeoutException as ex: pass except Exception as ex: self.logger.error(ex) def reload_camera_driver(self): id_ = self.config["id"] if not id_: self.acquirer = self.driver.create_image_acquirer(list_index=0) else: self.acquirer = self.driver.create_image_acquirer(id_=id) self.acquirer.on_new_buffer_arrival = self._read_buffer node = self.acquirer.device.node_map node.LineSelector.value = 'Line1' node.LineMode.value = 'Output' node.LineInverter.value = True node.LineSource.value = "ExposureActive" node.ExposureTime.value = 45.0 node.AcquisitionFrameRateMode.value = "Basic" node.AcquisitionFrameRate.value = self.config["fps"] def start(self): super().start() self.reload_camera_driver() if self.acquirer: self.acquirer.start_image_acquisition() self.running = True def stop(self): if self.acquirer: self.acquirer.stop_image_acquisition() self.acquirer.destroy() self._stop.on_next(True) self.running = False super().stop()