def test_bounded_semaphore(timeout, check_interval, monkeypatch): n = 2 name = random.random() monkeypatch.setattr(utils, 'DEFAULT_TIMEOUT', 0.0001) monkeypatch.setattr(utils, 'DEFAULT_CHECK_INTERVAL', 0.0005) semaphore_a = portalocker.BoundedSemaphore(n, name=name, timeout=timeout) semaphore_b = portalocker.BoundedSemaphore(n, name=name, timeout=timeout) semaphore_c = portalocker.BoundedSemaphore(n, name=name, timeout=timeout) semaphore_a.acquire(timeout=timeout) semaphore_b.acquire() with pytest.raises(portalocker.AlreadyLocked): semaphore_c.acquire(check_interval=check_interval, timeout=timeout)
def __init__(self, options={}): self.net = None self.classes = None self.options = options self.is_locked = False self.name = self.options.get('name', 'MobileNetSSD') self.processor = self.options.get('object_processor', 'cpu') self.lock_maximum = int( options.get(self.processor + '_max_processes', 1)) self.lock_timeout = int( options.get(self.processor + '_max_lock_wait', 120)) self.lock_name = 'pyzm_uid{}_{}_lock'.format(os.getuid(), self.processor) self.disable_locks = options.get('disable_locks', 'no') if self.disable_locks == 'no': g.logger.Debug( 2, f'portalock: max:{self.lock_maximum}, name:{self.lock_name}, timeout:{self.lock_timeout}' ) self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name, timeout=self.lock_timeout) self.model_height = self.options.get('model_height', self.MODEL_HEIGHT) self.model_width = self.options.get('model_width', self.MODEL_WIDTH)
def __init__(self, options={}): self.net = None self.classes = None self.options = options self.is_locked = False self.processor = self.options.get('object_processor') or 'cpu' self.lock_maximum = int( options.get(self.processor + '_max_processes') or 1) self.lock_timeout = int( options.get(self.processor + '_max_lock_wait') or 120) #self.lock_name='pyzm_'+self.processor+'_lock' self.lock_name = 'pyzm_uid{}_{}_lock'.format(os.getuid(), self.processor) self.disable_locks = options.get('disable_locks', 'no') if self.disable_locks == 'no': g.logger.Debug( 2, 'portalock: max:{}, name:{}, timeout:{}'.format( self.lock_maximum, self.lock_name, self.lock_timeout)) self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name, timeout=self.lock_timeout) self.model_height = self.options.get('model_height', 416) self.model_width = self.options.get('model_width', 416)
def __init__(self, options={}, logger=None): Base.__init__(self, logger) self.classes = {} self.options = options #self.logger.Debug (1, 'UID:{} EUID:{}'.format( os.getuid(), os.geteuid())) #self.logger.Debug (4, 'TPU init params: {}'.format(options)) self.processor = 'tpu' self.lock_maximum = int( options.get(self.processor + '_max_processes') or 1) self.lock_name = 'pyzm_uid{}_{}_lock'.format(os.getuid(), self.processor) self.lock_timeout = int( options.get(self.processor + '_max_lock_wait') or 120) self.disable_locks = options.get('disable_locks', 'no') if self.disable_locks == 'no': self.logger.Debug( 2, f'portalock: max:{self.lock_maximum}, name:{self.lock_name}, timeout:{self.lock_timeout}' ) self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name, timeout=self.lock_timeout) self.is_locked = False self.model = None self.populate_class_labels()
def _atexit(): this_pid = os.getpid() logging.debug(f"atExit handler pid={this_pid}") with portalocker.BoundedSemaphore( 1, XCOREDeviceServer.lock_key, timeout=XCOREDeviceServer.FILE_LOCK_TIMEOUT): if XCOREDeviceServer.devices_path.is_file(): # search (by parent pid) for this process in cached devices with open(XCOREDeviceServer.devices_path, "r+") as fd: cached_devices = json.loads(fd.read()) for cached_device in cached_devices: if this_pid == cached_device["parent_pid"]: cached_device = XCOREDeviceServer._reset_device_use( cached_device) with open(XCOREDeviceServer.devices_path, "w") as fd: fd.write(json.dumps(cached_devices))
def release(endpoint): with portalocker.BoundedSemaphore( 1, XCOREDeviceServer.lock_key, timeout=XCOREDeviceServer.FILE_LOCK_TIMEOUT): if XCOREDeviceServer.devices_path.is_file(): # search (by port) for endpoint in cached devices with open(XCOREDeviceServer.devices_path, "r+") as fd: cached_devices = json.loads(fd.read()) for cached_device in cached_devices: if endpoint.port == cached_device["xscope_port"]: endpoint.disconnect() if endpoint.error: # the device was lft in an error state so reset it cached_device = XCOREDeviceServer._reset_device_use( cached_device) cached_device["in_use"] = False logging.debug(f"Released device: {cached_device}") with open(XCOREDeviceServer.devices_path, "w") as fd: fd.write(json.dumps(cached_devices))
def __init__(self, options={}): self.classes = {} self.options = options self.processor = 'tpu' self.lock_maximum = int( options.get(self.processor + '_max_processes') or 1) self.lock_name = 'pyzm_uid{}_{}_lock'.format(os.getuid(), self.processor) self.lock_timeout = int( options.get(self.processor + '_max_lock_wait') or 120) self.disable_locks = options.get('disable_locks', 'no') if self.disable_locks == 'no': g.logger.Debug( 2, 'portalock: max:{}, name:{}, timeout:{}'.format( self.lock_maximum, self.lock_name, self.lock_timeout)) self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name, timeout=self.lock_timeout) self.is_locked = False self.model = None self.populate_class_labels()
def __init__(self, options={}): global g_diff_time self.options = options g.logger.Debug(1, 'Initializing face detection') self.processor = 'tpu' self.lock_maximum = int( options.get(self.processor + '_max_processes') or 1) self.lock_name = 'pyzm_uid{}_{}_lock'.format(os.getuid(), self.processor) self.lock_timeout = int( options.get(self.processor + '_max_lock_wait') or 120) self.disable_locks = options.get('disable_locks', 'no') if self.disable_locks == 'no': g.logger.Debug( 2, 'portalock: max:{}, name:{}, timeout:{}'.format( self.lock_maximum, self.lock_name, self.lock_timeout)) self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name, timeout=self.lock_timeout) self.is_locked = False self.model = None
def __init__(self, logger=None, options={},upsample_times=1, num_jitters=0, model='hog'): super().__init__(logger) global g_diff_time #self.logger.Debug (4, 'Face init params: {}'.format(options)) if dlib.DLIB_USE_CUDA and dlib.cuda.get_num_devices() >=1 : self.processor = 'gpu' else: self.processor = 'cpu' self.logger.Debug( 1,'perf: processor:{} Face Recognition library load time took: {} '.format( self.processor, g_diff_time)) self.logger.Debug( 1,'Initializing face recognition with model:{} upsample:{}, jitters:{}' .format(model, upsample_times, num_jitters)) self.disable_locks = options.get('disable_locks', 'no') self.upsample_times = upsample_times self.num_jitters = num_jitters if options.get('face_model'): self.face_model = options.get('face_model') else: self.face_model = model self.knn = None self.options = options self.is_locked = False self.lock_maximum=int(options.get(self.processor+'_max_processes') or 1) self.lock_timeout = int(options.get(self.processor+'_max_lock_wait') or 120) #self.lock_name='pyzm_'+self.processor+'_lock' self.lock_name='pyzm_uid{}_{}_lock'.format(os.getuid(),self.processor) if self.disable_locks == 'no': self.logger.Debug (2,f'portalock: max:{self.lock_maximum}, name:{self.lock_name}, timeout:{self.lock_timeout}') self.lock = portalocker.BoundedSemaphore(maximum=self.lock_maximum, name=self.lock_name,timeout=self.lock_timeout) if self.options.get('face_detection_framework') != 'dlib' and self.options.get('face_recognition_framework') != 'dlib': raise ValueError ('Error: As of now,only dlib is supported for face detection and recognition. Unkown {}/{}'.format(self.options.get('face_detection_framework'),self.options.get('face_recognition_framework'))) encoding_file_name = self.options.get('known_images_path') + '/faces.dat' try: if (os.path.isfile(self.options.get('known_images_path') + '/faces.pickle')): # old version, we no longer want it. begone self.logger.Debug( 1,'removing old faces.pickle, we have moved to clustering') os.remove(self.options.get('known_images_path') + '/faces.pickle') except Exception as e: self.logger.Error('Error deleting old pickle file: {}'.format(e)) # to increase performance, read encodings from file if (os.path.isfile(encoding_file_name)): self.logger.Debug( 1,'pre-trained faces found, using that. If you want to add new images, remove: {}' .format(encoding_file_name)) #self.known_face_encodings = data["encodings"] #self.known_face_names = data["names"] else: # no encodings, we have to read and train self.logger.Debug( 1,'trained file not found, reading from images and doing training...' ) self.logger.Debug( 1,'If you are using a GPU and run out of memory, do the training using zm_train_faces.py. In this case, other models like yolo may already take up a lot of GPU memory' ) train.FaceTrain(options=self.options).train() try: with open(encoding_file_name, 'rb') as f: self.knn = pickle.load(f) f.close() except Exception as e: self.logger.Error ('Error loading KNN model: {}'.format(e))
def acquire(): atexit.register(XCOREDeviceServer._atexit) with portalocker.BoundedSemaphore( 1, XCOREDeviceServer.lock_key, timeout=XCOREDeviceServer.FILE_LOCK_TIMEOUT): logging.debug("Device acquire requested") # get the connected devices connected_devices = get_devices() # get the cached devices if XCOREDeviceServer.devices_path.is_file(): with open(XCOREDeviceServer.devices_path, "r+") as fd: cached_devices = json.loads(fd.read()) else: cached_devices = [] logging.debug(f"Connected devices: {connected_devices}") logging.debug(f"Cached devices: {cached_devices}") # sync connected and cached devices synced_devices = [] for connected_device in connected_devices: # check cached_devices for this connected device new_device = True # until proven otherwise for cached_device in cached_devices: if cached_device["adaptor_id"] == connected_device[ "adaptor_id"]: logging.debug(f"Found cached device: {cached_device}") if cached_device.get("xrun_pid", None) == None: # cached device but need to be setup cached_device = XCOREDeviceServer._setup_device_use( cached_device) elif cached_device["in_use"] == False: # ensure device is responding try: ping_succeeded = XCOREDeviceServer._ping_device( cached_device["xscope_port"], timeout=5) logging.debug("Ping succeeded") except: logging.debug("Ping failed") # device did not respond so reset it cached_device = XCOREDeviceServer._reset_device_use( cached_device) # then setup the device cached_device = XCOREDeviceServer._setup_device_use( cached_device) # this is a known device so save in synced devices synced_devices.append(cached_device) new_device = False break if new_device: # connected device not found in cached devices so it must be new logging.debug(f"Found new device: {connected_device}") # setup this new device connected_device = XCOREDeviceServer._setup_device_use( connected_device) # new devices are save in synced devices synced_devices.append(connected_device) # now search synced_devices for an available device acquired_device = None for synced_device in synced_devices: if synced_device.get("in_use", False) == False: acquired_device = synced_device acquired_device["in_use"] = True break if not acquired_device: raise Exception( "Could not acquire device (ensure that device is connected)" ) # save the synched devices logging.debug(f"Saving synced devices: {synced_devices}") with open(XCOREDeviceServer.devices_path, "w") as fd: fd.write(json.dumps(synced_devices)) logging.debug(f"Acquired device: {acquired_device}") # create endpoint from acquired_device and return ep = XCOREDeviceEndpoint( release_callback=XCOREDeviceServer.release) ep.connect(port=acquired_device["xscope_port"]) return ep