def hostControlSafe_start(self, hostIds=[]): ''' Switches on remote computers using WOL. ''' if not hostIds: raise BackendMissingDataError(u"No matching host ids found") hosts = self._context.host_getObjects(attributes=['hardwareAddress'], id=hostIds) # pylint: disable=maybe-no-member result = {} for host in hosts: try: if not host.hardwareAddress: raise BackendMissingDataError( u"Failed to get hardware address for host '%s'" % host.id) mac = host.hardwareAddress.replace(':', '') data = ''.join(['FFFFFFFFFFFF', mac * 16]) # Pad the synchronization stream. # Split up the hex values and pack. payload = '' for i in range(0, len(data), 2): payload = ''.join( [payload, struct.pack('B', int(data[i:i + 2], 16))]) for broadcastAddress, targetPorts in self._broadcastAddresses.items( ): logger.debug( u"Sending data to network broadcast {0} [{1}]", broadcastAddress, data) for port in targetPorts: logger.debug("Broadcasting to port {0!r}", port) with closing( socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True) sock.sendto(payload, (broadcastAddress, port)) result[host.id] = {"result": "sent", "error": None} except Exception as error: logger.logException(error, LOG_DEBUG) result[host.id] = { "result": None, "error": forceUnicode(error) } return result
def productPackageFile(filename, tempDir, depotId): try: depots = self._depotBackend._context.host_getObjects( id=depotId) depot = depots[0] del depots except IndexError: raise BackendMissingDataError( u"Depot '%s' not found in backend" % depotId) depotLocalUrl = depot.getDepotLocalUrl() if not depotLocalUrl.startswith(u'file:///'): raise BackendBadValueError( u"Value '%s' not allowed for depot local url (has to start with 'file:///')" % depotLocalUrl) clientDataDir = depotLocalUrl[7:] ppf = ProductPackageFile(filename, tempDir=tempDir) ppf.setClientDataDir(clientDataDir) ppf.getMetaData() try: yield ppf ppf.setAccessRights() finally: try: ppf.cleanup() except Exception as cleanupError: logger.error("Cleanup failed: {0!r}", cleanupError)
def _getDepotConnection(self, depotId): depotId = forceHostId(depotId) if depotId == self._depotId: return self try: return self._depotConnections[depotId] except KeyError: if not self._opsiHostKey: depots = self._context.host_getObjects(id=self._depotId) # pylint: disable=maybe-no-member if not depots or not depots[0].getOpsiHostKey(): raise BackendMissingDataError( u"Failed to get opsi host key for depot '{0}'".format( self._depotId)) self._opsiHostKey = depots[0].getOpsiHostKey() try: self._depotConnections[depotId] = JSONRPCBackend( address=u'https://%s:4447/rpc/backend/dhcpd' % (depotId), username=self._depotId, password=self._opsiHostKey) except Exception as error: raise BackendUnableToConnectError( u"Failed to connect to depot '%s': %s" % (depotId, error)) return self._depotConnections[depotId]
def hostControlSafe_getActiveSessions(self, hostIds=[]): if not hostIds: raise BackendMissingDataError(u"No matching host ids found") hostIds = self._context.host_getIdents(id=hostIds, returnType='unicode') # pylint: disable=maybe-no-member return self._opsiclientdRpc(hostIds=hostIds, method='getActiveSessions', params=[])
def hostControlSafe_showPopup(self, message, hostIds=[]): if not hostIds: raise BackendMissingDataError(u"No matching host ids found") message = forceUnicode(message) hostIds = self._context.host_getIdents(id=hostIds, returnType='unicode') # pylint: disable=maybe-no-member return self._opsiclientdRpc(hostIds=hostIds, method='showPopup', params=[message])
def hostControlSafe_fireEvent(self, event, hostIds=[]): if not hostIds: raise BackendMissingDataError(u"No matching host ids found") event = forceUnicode(event) hostIds = self._context.host_getIdents(id=hostIds, returnType='unicode') # pylint: disable=maybe-no-member return self._opsiclientdRpc(hostIds=hostIds, method='fireEvent', params=[event])
def __init__(self, backend, **kwargs): self._name = 'depotserver' ExtendedBackend.__init__(self, backend) self._packageLog = os.path.join(LOG_DIR, 'package.log') self._sshRSAPublicKeyFile = u'/etc/ssh/ssh_host_rsa_key.pub' self._depotId = forceHostId(getfqdn()) if not self._context.host_getIdents(id=self._depotId): # pylint: disable=maybe-no-member raise BackendMissingDataError(u"Depot '%s' not found in backend" % self._depotId) self._packageManager = DepotserverPackageManager(self)
def hostControlSafe_opsiclientdRpc(self, method, params=[], hostIds=[], timeout=None): if not hostIds: raise BackendMissingDataError(u"No matching host ids found") hostIds = self._context.host_getIdents(id=hostIds, returnType='unicode') # pylint: disable=maybe-no-member return self._opsiclientdRpc(hostIds=hostIds, method=method, params=params, timeout=timeout)
def _getScalabilityDepotConnection(self, depot, port): try: return self._depotConnections[depot] except KeyError: if not self._opsiHostKey: depots = self._context.host_getObjects(type="OpsiConfigserver") # pylint: disable=maybe-no-member if not depots or not depots[0].getOpsiHostKey(): raise BackendMissingDataError( u"Failed to get opsi host key for depot '%s'" % self._depotId) self._opsiHostKey = depots[0].getOpsiHostKey() self._depotConnections[depot] = self._getExternalBackendConnection( depot, self._depotId, self._opsiHostKey, port=port) return self._depotConnections[depot]
def hostControlSafe_execute(self, command, hostIds=[], waitForEnding=True, captureStderr=True, encoding=None, timeout=300): if not hostIds: raise BackendMissingDataError(u"No matching host ids found") command = forceUnicode(command) hostIds = self._context.host_getIdents(id=hostIds, returnType='unicode') # pylint: disable=maybe-no-member return self._opsiclientdRpc( hostIds=hostIds, method='execute', params=[command, waitForEnding, captureStderr, encoding, timeout])
def _getDepotConnection(self, depotId): depotId = forceHostId(depotId) if depotId == self._depotId: return self try: return self._depotConnections[depotId] except KeyError: if not self._opsiHostKey: depots = self._context.host_getObjects(id=self._depotId) # pylint: disable=maybe-no-member if not depots or not depots[0].getOpsiHostKey(): raise BackendMissingDataError( u"Failed to get opsi host key for depot '%s'" % self._depotId) self._opsiHostKey = depots[0].getOpsiHostKey() self._depotConnections[ depotId] = self._getExternalBackendConnection( depotId, self._depotId, self._opsiHostKey) return self._depotConnections[depotId]
def _replicateMasterToWorkBackend(self): # pylint: disable=too-many-branches,too-many-locals,too-many-statements if not self._masterBackend: raise BackendConfigurationError("Master backend undefined") # This is needed for the following situation: # - package1 is set to "setup" which defines a dependency to package2 "setup after" # - opsiclientd processes these actions with cached config # - before opsiclientd config sync returns the results back to config service, package3 is set to "setup" # - package3 also defines a dependency to package2 "setup after" # - so package3 will be set to setup on service side too # - now the sync starts again and the actions for package1 and package2 will be set to "none" on service side # - setup of packgage2 which is required by package3 will not be exceuted productOnClients = {} product_ids_with_action = [] for productOnClient in self._masterBackend.productOnClient_getObjects( clientId=self._clientId): productOnClients[productOnClient.productId] = productOnClient if productOnClient.actionRequest not in (None, 'none'): product_ids_with_action.append(productOnClient.productId) if productOnClients and product_ids_with_action: updateProductOnClients = [] for productDependency in self._masterBackend.productDependency_getObjects( productId=product_ids_with_action): if (productDependency.requiredAction not in (None, '') and productDependency.productId in productOnClients and productOnClients[productDependency.productId]. actionRequest == productDependency.productAction and productDependency.requiredProductId in productOnClients and productOnClients[productDependency.requiredProductId]. actionRequest != productDependency.requiredAction): logger.notice( "Setting missing required action for dependency %s/%s %s/%s", productDependency.productId, productDependency.productAction, productDependency.requiredProductId, productDependency.requiredAction) productOnClients[ productDependency. requiredProductId].actionRequest = productDependency.productAction updateProductOnClients.append( productOnClients[productDependency.requiredProductId]) if updateProductOnClients: # Update is sufficient, creating a ProductOnClient is not required (see comment above) self._masterBackend.productOnClient_updateObjects( updateProductOnClients) self._cacheBackendInfo(self._masterBackend.backend_info()) self._workBackend.backend_deleteBase() self._workBackend.backend_createBase() br = BackendReplicator(readBackend=self._masterBackend, writeBackend=self._workBackend) br.replicate(serverIds=[], depotIds=[self._depotId], clientIds=[self._clientId], groupIds=[], productIds=[], productTypes=['LocalbootProduct'], audit=False, licenses=False) self._snapshotBackend.backend_deleteBase() licenseOnClients = self._masterBackend.licenseOnClient_getObjects( clientId=self._clientId) for productOnClient in self._workBackend.productOnClient_getObjects( clientId=self._clientId): if productOnClient.actionRequest in (None, 'none'): continue licensePools = self._masterBackend.licensePool_getObjects( productIds=[productOnClient.productId]) if not licensePools: logger.debug("No license pool found for product '%s'", productOnClient.productId) continue licensePool = licensePools[0] try: licenseOnClient = None for loc in licenseOnClients: if loc.licensePoolId == licensePool.id: licenseOnClient = loc logger.notice("Reusing existing licenseOnClient '%s'", licenseOnClient) break else: logger.notice("Acquiring license for product '%s'", productOnClient.productId) licenseOnClient = self._masterBackend.licenseOnClient_getOrCreateObject( clientId=self._clientId, productId=productOnClient.productId) if licenseOnClient: # Fake deletion # This will delete the licenseOnClient (free the license) while syncing config back to server # In case licenseOnClient_getObjects will be called on the CacheBackend the licenseOnClients # will be recreated, so the objects will be recreated after deletion self._fireEvent('objectsDeleted', [licenseOnClient]) self._fireEvent('backendModified') statistics = { "licensePools": 0, "softwareLicenses": 0, "licenseContracts": 0 } for licensePool in self._masterBackend.licensePool_getObjects( id=licenseOnClient.licensePoolId): logger.debug("Storing LicensePool: %s", licensePool) self._workBackend.licensePool_insertObject(licensePool) statistics["licensePools"] += 1 for softwareLicense in self._masterBackend.softwareLicense_getObjects( id=licenseOnClient.softwareLicenseId): logger.debug("Storing SoftwareLicense: %s", softwareLicense) for licenseContract in self._masterBackend.licenseContract_getObjects( id=softwareLicense.licenseContractId): logger.debug("Storing LicenseContract: %s", licenseContract) self._workBackend.licenseContract_insertObject( licenseContract) statistics["licenseContracts"] += 1 self._workBackend.softwareLicense_insertObject( softwareLicense) statistics["softwareLicenses"] += 1 logger.debug("Storing LicenseOnClient: %s", licenseOnClient) self._workBackend.licenseOnClient_insertObject( licenseOnClient) logger.notice( "LicenseOnClient stored for product '%s', %s", productOnClient.productId, statistics) except Exception as license_sync_error: # pylint: disable=broad-except logger.error("Failed to acquire license for product '%s': %s", productOnClient.productId, license_sync_error) self._snapshotBackend.backend_createBase() br = BackendReplicator(readBackend=self._workBackend, writeBackend=self._snapshotBackend) br.replicate() if self._clientId != config.get('global', 'host_id'): logger.error( "Client id '%s' does not match config global.host_id '%s'", self._clientId, config.get('global', 'host_id')) clients = self._workBackend.host_getObjects(id=self._clientId) if not clients: raise BackendMissingDataError( "Host '{self._clientId}' not found in replicated backend") opsiHostKey = clients[0].getOpsiHostKey() if opsiHostKey != config.get('global', 'opsi_host_key'): logger.error( "Host key '%s' from work backend does not match config global.opsi_host_key '%s'", opsiHostKey, config.get('global', 'opsi_host_key')) password = self._masterBackend.user_getCredentials( username='******', hostId=self._clientId) password = password['password'] logger.notice( "Creating opsi passwd file '%s' using opsi host key '%s...'", self._opsiPasswdFile, opsiHostKey[:10]) self.user_setCredentials(username='******', password=blowfishDecrypt( opsiHostKey, password)) auditHardwareConfig = self._masterBackend.auditHardware_getConfig() with codecs.open(self._auditHardwareConfigFile, 'w', 'utf8') as file: file.write(json.dumps(auditHardwareConfig)) self._workBackend._setAuditHardwareConfig(auditHardwareConfig) # pylint: disable=protected-access self._workBackend.backend_createBase()
def getDefaultConfigs(backend, configServer=None, pathToSMBConf=SMB_CONF): configIdents = set(backend.config_getIdents(returnType='unicode')) # pylint: disable=maybe-no-member if Posix.isUCS(): # We have a domain present and people might want to change this. if u'clientconfig.depot.user' not in configIdents: LOGGER.debug("Missing clientconfig.depot.user - adding it.") depotuser = u'pcpatch' depotdomain = readWindowsDomainFromUCR() if not depotdomain: LOGGER.info(u"Reading domain from UCR returned no result. " u"Trying to read from samba config.") depotdomain = readWindowsDomainFromSambaConfig(pathToSMBConf) if depotdomain: depotuser = u'\\'.join((depotdomain, depotuser)) LOGGER.debug(u"Using {0!r} as clientconfig.depot.user.", depotuser) yield UnicodeConfig(id=u'clientconfig.depot.user', description=u'User for depot share', possibleValues=[], defaultValues=[depotuser], editable=True, multiValue=False) if configServer and u'clientconfig.configserver.url' not in configIdents: LOGGER.debug("Missing clientconfig.configserver.url - adding it.") ipAddress = configServer.getIpAddress() if not ipAddress: raise BackendMissingDataError( "No IP address configured for the configserver {0}".format( configServer.id)) yield UnicodeConfig( id=u'clientconfig.configserver.url', description=u'URL(s) of opsi config service(s) to use', possibleValues=[u'https://%s:4447/rpc' % ipAddress], defaultValues=[u'https://%s:4447/rpc' % ipAddress], editable=True, multiValue=True) if configServer and u'clientconfig.depot.id' not in configIdents: LOGGER.debug(u"Missing clientconfig.depot.id - adding it.") yield UnicodeConfig(id=u'clientconfig.depot.id', description=u'ID of the opsi depot to use', possibleValues=[configServer.getId()], defaultValues=[configServer.getId()], editable=True, multiValue=False) if u'clientconfig.depot.dynamic' not in configIdents: LOGGER.debug(u"Missing clientconfig.depot.dynamic - adding it.") yield BoolConfig(id=u'clientconfig.depot.dynamic', description=u'Use dynamic depot selection', defaultValues=[False]) if u'clientconfig.depot.drive' not in configIdents: LOGGER.debug(u"Missing clientconfig.depot.drive - adding it.") yield UnicodeConfig(id=u'clientconfig.depot.drive', description=u'Drive letter for depot share', possibleValues=[ u'a:', u'b:', u'c:', u'd:', u'e:', u'f:', u'g:', u'h:', u'i:', u'j:', u'k:', u'l:', u'm:', u'n:', u'o:', u'p:', u'q:', u'r:', u's:', u't:', u'u:', u'v:', u'w:', u'x:', u'y:', u'z:', u'dynamic' ], defaultValues=[u'p:'], editable=False, multiValue=False) if u'clientconfig.depot.protocol' not in configIdents: LOGGER.debug(u"Missing clientconfig.depot.protocol - adding it.") yield UnicodeConfig(id=u'clientconfig.depot.protocol', description=u'Protocol for file transfer', possibleValues=['cifs', 'webdav'], defaultValues=['cifs'], editable=False, multiValue=False) if u'clientconfig.windows.domain' not in configIdents: LOGGER.debug(u"Missing clientconfig.windows.domain - adding it.") yield UnicodeConfig( id=u'clientconfig.windows.domain', description=u'Windows domain', possibleValues=[], defaultValues=[readWindowsDomainFromSambaConfig(pathToSMBConf)], editable=True, multiValue=False) if u'opsi-linux-bootimage.append' not in configIdents: LOGGER.debug(u"Missing opsi-linux-bootimage.append - adding it.") yield UnicodeConfig( id=u'opsi-linux-bootimage.append', description=u'Extra options to append to kernel command line', possibleValues=[ u'acpi=off', u'irqpoll', u'noapic', u'pci=nomsi', u'vga=normal', u'reboot=b' ], defaultValues=[u''], editable=True, multiValue=True) if u'license-management.use' not in configIdents: LOGGER.debug(u"Missing license-management.use - adding it.") yield BoolConfig(id=u'license-management.use', description=u'Activate license management', defaultValues=[False]) if u'software-on-demand.active' not in configIdents: LOGGER.debug(u"Missing software-on-demand.active - adding it.") yield BoolConfig(id=u'software-on-demand.active', description=u'Activate software-on-demand', defaultValues=[False]) if u'software-on-demand.product-group-ids' not in configIdents: LOGGER.debug( u"Missing software-on-demand.product-group-ids - adding it.") yield UnicodeConfig( id=u'software-on-demand.product-group-ids', description=(u'Product group ids containing products which are ' u'allowed to be installed on demand'), possibleValues=[u'software-on-demand'], defaultValues=[u'software-on-demand'], editable=True, multiValue=True) if u'product_sort_algorithm' not in configIdents: LOGGER.debug(u"Missing product_sort_algorithm - adding it.") yield UnicodeConfig(id=u'product_sort_algorithm', description=u'Product sorting algorithm', possibleValues=[u'algorithm1', u'algorithm2'], defaultValues=[u'algorithm1'], editable=False, multiValue=False) if u'clientconfig.dhcpd.filename' not in configIdents: LOGGER.debug(u"Missing clientconfig.dhcpd.filename - adding it.") yield UnicodeConfig( id=u'clientconfig.dhcpd.filename', description=( u"The name of the file that will be presented to the " u"client on an TFTP request. For an client that should " u"boot via UEFI this must include the term 'elilo'."), possibleValues=[u'elilo'], defaultValues=[u''], editable=True, multiValue=False)