def waitForClient(self): logger.trace("Creating pipe %s", self._pipeName) PIPE_ACCESS_DUPLEX = 0x3 PIPE_TYPE_MESSAGE = 0x4 PIPE_READMODE_MESSAGE = 0x2 PIPE_WAIT = 0 PIPE_UNLIMITED_INSTANCES = 255 NMPWAIT_USE_DEFAULT_WAIT = 0 INVALID_HANDLE_VALUE = -1 self._pipe = windll.kernel32.CreateNamedPipeA( self._pipeName.encode("ascii"), PIPE_ACCESS_DUPLEX, PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, self.bufferSize, self.bufferSize, NMPWAIT_USE_DEFAULT_WAIT, None) if self._pipe == INVALID_HANDLE_VALUE: raise Exception( f"Failed to create named pipe: {windll.kernel32.GetLastError()}" ) logger.debug("Pipe %s created, waiting for client to connect", self._pipeName) # This call is blocking until a client connects fConnected = windll.kernel32.ConnectNamedPipe(self._pipe, None) if fConnected == 0 and windll.kernel32.GetLastError() == 535: # ERROR_PIPE_CONNECTED fConnected = 1 if fConnected == 1: logger.notice("Client connected to %s", self._pipeName) self._client_id += 1 return (self._pipe, f"#{self._client_id}") error = windll.kernel32.GetLastError() windll.kernel32.CloseHandle(self._pipe) raise RuntimeError(f"Failed to connect to pipe (error: {error})")
def config_getObjects(self, attributes=[], **filter): # pylint: disable=dangerous-default-value,redefined-builtin configs = self._backend.config_getObjects(attributes, **filter) for idx, _config in enumerate(configs): if _config.id == 'clientconfig.depot.id': configs[idx].defaultValues = [self._depotId] logger.trace("config_getObjects returning %s", configs) return configs
def setup(self): logger.trace("Creating socket %s", self._socketName) self.teardown() self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.bind(self._socketName) self._socket.listen(1) logger.trace("Socket %s created", self._socketName)
def write(self, data): if not data or not self._connection: return logger.trace("Writing to connection %s", self._connection) if not isinstance(data, bytes): data = data.encode(self._encoding) self._connection.settimeout(self._writeTimeout) try: self._connection.sendall(data) except Exception as err: # pylint: disable=broad-except raise RuntimeError(f"Failed to write to socket: {err}") from err
def read(self): logger.trace("Reading from connection %s", self._connection) self._connection.settimeout(self._readTimeout) try: data = self._connection.recv(4096) if not data: self.clientDisconnected() return data.decode(self._encoding) except Exception as err: # pylint: disable=broad-except logger.trace("Failed to read from socket: %s", err) return None
def processEvent(self, event): logger.trace("check lock (ocd), currently %s -> locking if not True", self._opsiclientd.eventLock.locked()) # if triggered by Basic.py fire_event, lock is already acquired if not self._opsiclientd.eventLock.locked(): self._opsiclientd.eventLock.acquire() try: logger.info("GUI started") self._guiStarted.set() finally: logger.trace("release lock (WaitForGUI)") self._opsiclientd.eventLock.release()
def __init__(self, controller, connection, client_id): threading.Thread.__init__(self) self._controller = controller self._connection = connection self.client_id = client_id self._readTimeout = 1 self._writeTimeout = 1 self._encoding = "utf-8" self.clientInfo = [] self.comLock = threading.Lock() self._stopEvent = threading.Event() self._stopEvent.clear() self.login_capable = False logger.trace("%s created controller=%s connection=%s", self.__class__.__name__, self._controller, self._connection)
def _createInstanceMethods(self): for Class in (Backend, ConfigDataBackend): for methodName, funcRef in inspect.getmembers( Class, inspect.isfunction): if methodName.startswith('_') or methodName in ( 'backend_info', 'backend_getLicensingInfo', 'user_getCredentials', 'user_setCredentials', 'log_write', 'licenseOnClient_getObjects', 'configState_getObjects', 'config_getObjects'): continue (argString, callString) = getArgAndCallString(funcRef) logger.trace("Adding method '%s' to execute on work backend", methodName) exec( f'def {methodName}(self, {argString}): return self._executeMethod("{methodName}", {callString})' ) # pylint: disable=exec-used setattr(self, methodName, MethodType(eval(methodName), self)) # pylint: disable=eval-used
def cancelOthersAndWaitUntilReady(self): WAIT_SECONDS = 30 with self._eptListLock: eptListCopy = self._eventProcessingThreads.copy() for ept in self._eventProcessingThreads: if ept.event.eventConfig.actionType != 'login': #trying to cancel all non-login events - RuntimeError if impossible logger.notice("Canceling event processing thread %s (ocd)", ept) ept.cancel(no_lock=True) logger.trace("Waiting for cancellation to conclude") # Use copy to allow for epts to be removed from eptList for ept in eptListCopy: if ept.event.eventConfig.actionType != 'login': logger.trace("Waiting for ending of ept %s (ocd)", ept) for _ in range(WAIT_SECONDS): if not ept or not ept.running: break time.sleep(1) if ept and ept.running: raise ValueError( f"Event {ept.event.eventConfig.name} didn't stop after {WAIT_SECONDS} seconds - aborting" ) logger.debug("Successfully canceled event '%s' of type %s", ept.event.eventConfig.name, ept.event.eventConfig.actionType) try: cache_service = self.getCacheService() logger.debug( "Got config_service with state: %s - marking dirty", cache_service.getConfigCacheState()) # mark cache as dirty when bypassing cache mechanism for installation cache_service.setConfigCacheFaulty() except RuntimeError as err: logger.info( "Could not mark config service cache dirty: %s", err, exc_info=True)
def read(self): data = b"" while True: logger.trace("Reading from pipe") chBuf = create_string_buffer(self._controller.bufferSize) cbRead = c_ulong(0) fSuccess = windll.kernel32.ReadFile(self._connection, chBuf, self._controller.bufferSize, byref(cbRead), None) logger.trace("Read %d bytes from pipe", cbRead.value) if cbRead.value > 0: data += chBuf.value if fSuccess != 1: if windll.kernel32.GetLastError() == 234: # ERROR_MORE_DATA continue if data: return data.decode() if windll.kernel32.GetLastError() == 109: # ERROR_BROKEN_PIPE self.clientDisconnected() return data.decode()
def write(self, data): if not data: return logger.trace("Writing to pipe") if not isinstance(data, bytes): data = data.encode(self._encoding) data += b"\0" cbWritten = c_ulong(0) fSuccess = windll.kernel32.WriteFile(self._connection, c_char_p(data), len(data), byref(cbWritten), None) windll.kernel32.FlushFileBuffers(self._connection) logger.trace("Wrote %d bytes to pipe", cbWritten.value) if not fSuccess: error = windll.kernel32.GetLastError() if error in (232, 109): # ERROR_NO_DATA, ERROR_BROKEN_PIPE self.clientDisconnected() return raise RuntimeError(f"Failed to write to pipe (error: {error})") if len(data) != cbWritten.value: raise RuntimeError( f"Failed to write all bytes to pipe ({cbWritten.value}/{len(data)})", )
def processEvent(self, event): logger.notice("Processing event %s", event) description = f"Event {event.eventConfig.getId()} occurred\n" description += "Config:\n" _config = event.eventConfig.getConfig() configKeys = list(_config.keys()) configKeys.sort() for configKey in configKeys: description += f"{configKey}: {_config[configKey]}\n" logger.trace("check lock (ocd), currently %s -> locking if not True", self.eventLock.locked()) # if triggered by Basic.py fire_event, lock is already acquired if not self.eventLock.locked(): self.eventLock.acquire() # pylint: disable=consider-using-with try: timeline.addEvent(title=f"Event {event.eventConfig.getName()}", description=description, category="event_occurrence") # if processEvent is called through Event.fireEvent(), this check is already done #self.canProcessEvent(event) # A user login event should not cancel running non-login Event if event.eventConfig.actionType != 'login': self.cancelOthersAndWaitUntilReady() except (ValueError, RuntimeError) as err: # skipping execution if event cannot be created logger.warning("Could not start event: %s", err, exc_info=True) logger.trace("release lock (ocd cannot process event)") self.eventLock.release() return try: logger.debug("Creating new ept (ocd)") eventProcessingThread = EventProcessingThread(self, event) self.createActionProcessorUser(recreate=False) with self._eptListLock: self._eventProcessingThreads.append(eventProcessingThread) finally: logger.trace("release lock (ocd)") self.eventLock.release() try: eventProcessingThread.start() eventProcessingThread.join() logger.notice("Done processing event %s", event) finally: with self._eptListLock: self._eventProcessingThreads.remove(eventProcessingThread) if not self._eventProcessingThreads: try: self.deleteActionProcessorUser() except Exception as err: # pylint: disable=broad-except logger.warning(err)
def selectDepotserver(self, configService, mode="mount", event=None, productIds=[], masterOnly=False): # pylint: disable=dangerous-default-value,too-many-arguments,too-many-locals,too-many-branches,too-many-statements,redefined-builtin assert mode in ("mount", "sync") productIds = forceProductIdList(productIds) logger.notice("Selecting depot for products %s", productIds) logger.notice("MasterOnly --> '%s'", masterOnly) if event and event.eventConfig.useCachedProducts: cacheDepotDir = os.path.join( self.get('cache_service', 'storage_dir'), 'depot').replace('\\', '/').replace('//', '/') logger.notice("Using depot cache: %s", cacheDepotDir) self.set_temporary_depot_path(cacheDepotDir) if RUNNING_ON_WINDOWS: self.setTemporaryDepotDrive(cacheDepotDir.split(':')[0] + ':') else: self.setTemporaryDepotDrive(cacheDepotDir) self.set( 'depot_server', 'url', 'smb://localhost/noshare/' + ('/'.join(cacheDepotDir.split('/')[1:]))) return if not configService: raise Exception("Not connected to config service") selectedDepot = None configService.backend_setOptions({"addConfigStateDefaults": True}) depotIds = [] configStates = [] dynamicDepot = False depotProtocol = 'cifs' configStates = configService.configState_getObjects(configId=[ 'clientconfig.depot.dynamic', 'clientconfig.depot.protocol', 'opsiclientd.depot_server.depot_id', 'opsiclientd.depot_server.url' ], objectId=self.get( 'global', 'host_id')) for configState in configStates: if not configState.values or not configState.values[0]: continue if configState.configId == 'opsiclientd.depot_server.url' and configState.values: try: depotUrl = forceUrl(configState.values[0]) self.set('depot_server', 'depot_id', '') self.set('depot_server', 'url', depotUrl) logger.notice( "Depot url was set to '%s' from configState %s", depotUrl, configState) return except Exception as err: # pylint: disable=broad-except logger.error( "Failed to set depot url from values %s in configState %s: %s", configState.values, configState, err) elif configState.configId == 'opsiclientd.depot_server.depot_id' and configState.values: try: depotId = forceHostId(configState.values[0]) depotIds.append(depotId) logger.notice("Depot was set to '%s' from configState %s", depotId, configState) except Exception as err: # pylint: disable=broad-except logger.error( "Failed to set depot id from values %s in configState %s: %s", configState.values, configState, err) elif not masterOnly and ( configState.configId == 'clientconfig.depot.dynamic') and configState.values: dynamicDepot = forceBool(configState.values[0]) elif configState.configId == 'clientconfig.depot.protocol' and configState.values: depotProtocol = configState.values[0] logger.info("Using depot protocol '%s' from config state '%s'", depotProtocol, configState.configId) if event and event.eventConfig.depotProtocol: logger.info("Using depot protocol '%s' from event '%s'", event.eventConfig.depotProtocol, event.eventConfig.getName()) depotProtocol = event.eventConfig.depotProtocol if depotProtocol not in ("webdav", "cifs"): logger.error("Invalid protocol %s specified, using cifs", depotProtocol) depotProtocol = "cifs" #if depotProtocol == "webdav" and mode == "mount" and not RUNNING_ON_LINUX and not self.get('global', 'install_opsi_ca_into_os_store'): # logger.error("Using cifs instead of webdav to mount depot share because global.install_opsi_ca_into_os_store is disabled") # depotProtocol = "cifs" if dynamicDepot: if not depotIds: logger.info("Dynamic depot selection enabled") else: logger.info( "Dynamic depot selection enabled, but depot is already selected" ) else: logger.info("Dynamic depot selection disabled") if not depotIds: clientToDepotservers = configService.configState_getClientToDepotserver( clientIds=[self.get('global', 'host_id')], masterOnly=bool(not dynamicDepot), productIds=productIds) if not clientToDepotservers: raise Exception("Failed to get depot config from service") depotIds = [clientToDepotservers[0]['depotId']] if dynamicDepot: depotIds.extend(clientToDepotservers[0].get( 'alternativeDepotIds', [])) logger.debug("Fetching depot servers %s from config service", depotIds) masterDepot = None alternativeDepots = [] for depot in configService.host_getObjects(type='OpsiDepotserver', id=depotIds): logger.trace("Depot: %s", depot) if depot.id == depotIds[0]: masterDepot = depot else: alternativeDepots.append(depot) if not masterDepot: raise Exception( f"Failed to get info for master depot '{depotIds[0]}'") logger.info("Master depot for products %s is %s", productIds, masterDepot.id) selectedDepot = masterDepot if dynamicDepot: if alternativeDepots: logger.info("Got alternative depots for products: %s", productIds) for index, depot in enumerate(alternativeDepots, start=1): logger.info("%d. alternative depot is %s", index, depot.id) try: clientConfig = { "clientId": self.get('global', 'host_id'), "opsiHostKey": self.get('global', 'opsi_host_key'), "ipAddress": None, "netmask": None, "defaultGateway": None } try: gateways = netifaces.gateways() # pylint: disable=c-extension-no-member clientConfig["defaultGateway"], iface_name = gateways[ 'default'][netifaces.AF_INET] # pylint: disable=c-extension-no-member addr = netifaces.ifaddresses(iface_name)[ netifaces.AF_INET][0] # pylint: disable=c-extension-no-member clientConfig["netmask"] = addr["netmask"] clientConfig["ipAddress"] = addr["addr"] except Exception as gwe: raise RuntimeError( f"Failed to get network interface with default gateway: {gwe}" ) from gwe logger.info( "Passing client configuration to depot selection algorithm: %s", clientConfig) depotSelectionAlgorithm = configService.getDepotSelectionAlgorithm( ) logger.trace("depotSelectionAlgorithm:\n%s", depotSelectionAlgorithm) currentLocals = locals() exec(depotSelectionAlgorithm, None, currentLocals) # pylint: disable=exec-used selectDepot = currentLocals['selectDepot'] selectedDepot = selectDepot( clientConfig=clientConfig, masterDepot=masterDepot, alternativeDepots=alternativeDepots) if not selectedDepot: selectedDepot = masterDepot except Exception as err: # pylint: disable=broad-except logger.error("Failed to select depot: %s", err, exc_info=True) else: logger.info("No alternative depot for products: %s", productIds) logger.notice("Selected depot for mode '%s' is '%s', protocol '%s'", mode, selectedDepot, depotProtocol) self.set('depot_server', 'depot_id', selectedDepot.id) if depotProtocol == 'webdav': self.set('depot_server', 'url', selectedDepot.depotWebdavUrl) else: self.set('depot_server', 'url', selectedDepot.depotRemoteUrl)
def fireEvent(self, event=None, can_cancel=False): logger.debug("Trying to fire event %s", event) if self._stopped: logger.debug('%s is stopped, not firing event.', self) return if not event: logger.info("No event to fire") return self._lastEventOccurence = time.time() logger.info("Firing event '%s'", event) logger.info("Event info:") for (key, value) in event.eventInfo.items(): logger.info(" %s: %s", key, value) class FireEventThread(threading.Thread): def __init__(self, eventListener, event): threading.Thread.__init__(self) self._eventListener = eventListener self._event = event def run(self): with opsicommon.logging.log_context({ 'instance': 'event generator ' + self._event.eventConfig.getId() }): if self._event.eventConfig.notificationDelay > 0: logger.debug( "Waiting %d seconds before notifying listener '%s' of event '%s'", self._event.eventConfig.notificationDelay, self._eventListener, self._event) time.sleep(self._event.eventConfig.notificationDelay) try: logger.info("Calling processEvent on listener %s", self._eventListener) self._eventListener.processEvent(self._event) except Exception as err: # pylint: disable=broad-except logger.error(err, exc_info=True) logger.info("Starting FireEventThread for listeners: %s", self._eventListeners) keep_lock = False logger.trace("acquire lock (Basic), currently %s", self._opsiclientd.eventLock.locked()) # timeout should be less than 15s as this is default opsi-admin call timeout if not self._opsiclientd.eventLock.acquire(timeout=5): raise ValueError( "Could not get event handling lock due to another event currently running" ) try: for listener in self._eventListeners: # Check if all event listeners can handle the event # raises ValueError if another event is already running listener.canProcessEvent(event, can_cancel=can_cancel) for listener in self._eventListeners: # Create a new thread for each event listener FireEventThread(listener, event).start() keep_lock = True logger.debug("keeping event processing lock (Basic)") finally: if not keep_lock: logger.trace("release lock (Basic)") self._opsiclientd.eventLock.release()