def process(self, context): descriptor = context.application.getApplicationComponent( ).getExplicitPluginById(ConfigurationFilesPlugin.PLUGIN_ID) if not descriptor: return element = descriptor.getRootElement() configElements = element.getChildren( ConfigurationFilesPlugin.ELEMENT_CONFIG_FILE) # parse DOM paths = ifilter(None, imap(self._getLocationFromElement, configElements)) # evaluate expressions parseRuleContexts = context.application.getParseRuleContexts() parseFn = fptools.partiallyApply(self._parseLocation, fptools._, parseRuleContexts) parsedPaths = ifilter(None, imap(parseFn, paths)) # retrieve content fileSystem = file_system.createFileSystem(context.client) retrieveFn = fptools.partiallyApply(self._retrieveConfigFile, fptools._, fileSystem) configFiles = ifilter(None, imap(retrieveFn, parsedPaths)) # report builder = file_topology.Builder() reporter = file_topology.Reporter(builder) reportFn = fptools.partiallyApply(self._reportConfigFile, fptools._, reporter, context) oshs = ifilter(None, imap(reportFn, configFiles)) for osh in oshs: context.resultsVector.add(osh)
def process(self, context): descriptor = context.application.getApplicationComponent().getExplicitPluginById( ConfigurationFilesPlugin.PLUGIN_ID ) if not descriptor: return element = descriptor.getRootElement() configElements = element.getChildren(ConfigurationFilesPlugin.ELEMENT_CONFIG_FILE) # parse DOM paths = ifilter(None, imap(self._getLocationFromElement, configElements)) # evaluate expressions parseRuleContexts = context.application.getParseRuleContexts() parseFn = fptools.partiallyApply(self._parseLocation, fptools._, parseRuleContexts) parsedPaths = ifilter(None, imap(parseFn, paths)) # retrieve content fileSystem = file_system.createFileSystem(context.client) retrieveFn = fptools.partiallyApply(self._retrieveConfigFile, fptools._, fileSystem) configFiles = ifilter(None, imap(retrieveFn, parsedPaths)) # report builder = file_topology.Builder() reporter = file_topology.Reporter(builder) reportFn = fptools.partiallyApply(self._reportConfigFile, fptools._, reporter, context) oshs = ifilter(None, imap(reportFn, configFiles)) for osh in oshs: context.resultsVector.add(osh)
def _getCredentialsForIp(self, ip): ''' string -> list[string] Method returns all available credentials for given IP address Credentials are marked as VMware category or no Category (lower priority) ''' credentialsList = self.framework.getAvailableProtocols(ip, cim.Protocol.SHORT) vmwareCredentialsFilter = fptools.partiallyApply(cim_discover.isCredentialOfCategory, fptools._, CimCategory.VMWARE, self.framework) vmwareCredentials = filter(vmwareCredentialsFilter, credentialsList) noCategoryCredentialsFilter = fptools.partiallyApply(cim_discover.isCredentialOfCategory, fptools._, cim.CimCategory.NO_CATEGORY, self.framework) noCategoryCredentials = filter(noCategoryCredentialsFilter, credentialsList) return vmwareCredentials + noCategoryCredentials
def get_common_topology_context(discoverer, dnsresolver, installpath, config): discovererRegistry = {} discovererRegistry[baseCits.node] = partial(discoverer.getDeploymentHosts, Sfn(dnsresolver.resolve_ips)) discovererRegistry[baseCits.ip] = hana_host.Host.ips.fget discovererRegistry[cits.hanadb] = discoverer.getHanaDatabaseServer discovererRegistry[cits.hanadbInstance] = lambda host: discoverer.getHanaDatabaseInstance(host.name) discovererRegistry[baseCits.configFile] = lambda dbServer: discoverer.getHanaDbConfigFiles() discovererRegistry[baseCits.ipServiceEndpoint] = lambda host: discoverer.getHanaDbInstanceEndpoints(host.name) discovererRegistry[dbCits.schema] = lambda dbServer: discoverer.getHanaDbSchemas() discovererRegistry[dbCits.user] = lambda dbServer: discoverer.getHanaDbUsers() discovererRegistry[dbCits.dataFile] = lambda db_instance: discoverer.getHanaDbDataFiles(db_instance) discovererRegistry[dbCits.logFile] = lambda db_instance: discoverer.getHanaDbLogFiles(db_instance) discovererRegistry[dbCits.traceFile] = lambda db_instance: discoverer.getHanaDbTraceFiles(db_instance) # linkage condition discovererRegistry[(dbCits.user, baseCits.ownership, dbCits.schema)] = lambda user, schema: schema.owner == user.name discovererRegistry[(cits.hanadbInstance, baseCits.usage, baseCits.ipServiceEndpoint)] = lambda hana_instance, endpoint: endpoint.getAddress() == hana_instance.hostname and endpoint.getPortType() == hana.PortTypeEnum.HANA discovererRegistry[(cits.hanadb, baseCits.membership, cits.hanadbInstance)] = lambda hanaDb, hanaInstance: True discovererRegistry[(baseCits.node, baseCits.containment, baseCits.ip)] = lambda host, ip: ip in host.ips pdoBuilderRegistry = {} pdoBuilderRegistry[cits.hanadbInstance] = lambda instance: buildDatabaseInstancePdo(instance, installpath, sid=config.sid) pdoBuilderRegistry[dbCits.user] = buildDbUserPdoFromDatabaseUser pdoBuilderRegistry[baseCits.ipServiceEndpoint] = partial(buildEndpointPdoFromEndpoint, Sfn(dnsresolver.resolve_ips)) #Should be coming from core hana_topology module baseTopologyBuilderRegistry = { # ignore the name as it is could be an alias and not a real hostname baseCits.node: lambda node_pdo: hana_host.Builder().build_host(node_pdo._replace(name=None)), baseCits.ip: modeling.createIpOSH, baseCits.configFile: fptools.partiallyApply(modeling.createConfigurationDocumentOshByFile, fptools._, None), baseCits.ipServiceEndpoint: netutils.ServiceEndpointBuilder().visitEndpoint } linkReporter = hana.LinkReporter() linkReporterRegistry = { baseCits.containment: linkReporter.reportContainment, baseCits.composition: linkReporter.reportComposition, baseCits.membership: lambda do1, osh1, do2, osh2: linkReporter.reportMembership(osh1, osh2), baseCits.ownership: lambda do1, osh1, do2, osh2: linkReporter.reportOwnership(osh1, osh2), baseCits.usage: lambda do1, osh1, do2, osh2: linkReporter.reportUsage(osh1, osh2), baseCits.replicated: hana.DatabaseTopologyReporter().reportReplication, } topologyBuilderRegistry = {} topologyBuilderRegistry.update(baseTopologyBuilderRegistry) topologyBuilderRegistry.update(linkReporterRegistry) dbTopologyBuilder = hana.DatabaseTopologyBuilder() topologyBuilderRegistry[cits.hanadb] = lambda _: modeling.createOshByCmdbIdString(cits.hanadb, config.hanadb_cmdbid) topologyBuilderRegistry[cits.hanadbInstance] = dbTopologyBuilder.buildDatabaseInstanceOsh topologyBuilderRegistry[dbCits.schema] = dbTopologyBuilder.buildSchemaOsh topologyBuilderRegistry[dbCits.user] = dbTopologyBuilder.buildUserOsh topologyBuilderRegistry[dbCits.dataFile] = dbTopologyBuilder.buildDataFileOsh topologyBuilderRegistry[dbCits.logFile] = dbTopologyBuilder.buildLogFileOsh topologyBuilderRegistry[dbCits.traceFile] = dbTopologyBuilder.buildTraceFileOsh return discovererRegistry, pdoBuilderRegistry, topologyBuilderRegistry
def discoverEsxVirtualTopology(ipAddress, credentialsId, esxBiosUuid, framework): client = None try: client = cim_discover.createClient(framework, ipAddress, vmware_cim_discover.CimNamespace.ESXV2, credentialsId) esxList = vmware_cim_discover.getVmwareEsxComputerSystems(client) isValidEsxFn = fptools.partiallyApply(_esxMatchesBiosUuid, fptools._, esxBiosUuid.lower()) esxInstance = fptools.findFirst(isValidEsxFn, esxList) if not esxInstance: raise ValueError("Cannot find ESX Server instance in '%s' namespace" % vmware_cim_discover.CimNamespace.ESXV2) virtualMachines = vmware_cim_discover.getVirtualMachinesByEsx(client, esxInstance) totalVms = len(virtualMachines) virtualMachines = filter(_vmIsReportable, virtualMachines) reportableVms = len(virtualMachines) logger.debug("Virtual machines found: %s, filtered out: %s" % (totalVms, totalVms - reportableVms)) return virtualMachines finally: if client is not None: client.close()
def _parseServerPorts(self, document, instanceNumber, hostname): r'@types: IniDocument -> tuple[Endpoint]' portDeclarations = document.findIndexedValues('icm/server_port') parsePortDeclaration = fptools.partiallyApply( InstanceProfileParser._parsePortDeclarationToEndpoint, fptools._, instanceNumber, hostname) return filter(None, map(parsePortDeclaration, portDeclarations))
def reportInstances(instances, sys_, sysOsh, get_inst_creds=None, connectionClientNr=None): r'''@types: list[InstanceInfo], System, osh, callable, str -> dict, oshv @type get_inst_creds: (InstanceInfo, list[_BaseIP] -> str?) @param get_inst_creds: function to get credentials for the specified instance ''' vector = ObjectStateHolderVector() #resolve IP for instance host dnsResolver = dns_resolver.SocketDnsResolver() resolveAddressToIps = fptools.partiallyApply(_resolveInstanceAddressToIps, fptools._, dnsResolver) ipsOfInstances = map(resolveAddressToIps, instances) # report instances oshPerInstance = {} hasIps = second for instInfo, ips in filter(hasIps, zip(instances, ipsOfInstances)): serverOsh, iVector = reportInstanceWithSystem(instInfo, ips, sys_, sysOsh, connectionClientNr) vector.addAll(iVector) oshPerInstance[instInfo.instance] = serverOsh return oshPerInstance, vector
def executeCommandAndParse(self, command, parseFn, dumpRows=False): ''' string, function, boolean -> list(?) parseFn: ResultWrapper -> ? (result entity) Execute command and parse each row via parseFn function while producing items ''' queryFn = fptools.partiallyApply(queryFnByCommand, fptools._, command) return self.queryAndParse(queryFn, parseFn, dumpRows)
def buildEndpointsFromNsOption(nsOption, dnsResolver): r'''@types: NsOption -> list[netutils.Endpoint] ''' ips = fptools.safeFunc(dnsResolver.resolveIpsByHostname)(nsOption.address) if ips: return map(fptools.partiallyApply(netutils.createTcpEndpoint, fptools._, nsOption.port), ips) return []
def get_db_sessions(executor, db_name=None, db_partition_number=None): r''' @types: command.ExecutorCmdlet, str?, str? -> tuple[db2_sql_v9x_discoverer.Session] ''' parse_session = fptools.safeFunc(_parse_session) parse_sessions = fptools.partiallyApply(keep, parse_session, fptools._) parse_sessions = command.FnCmdlet(parse_sessions) sessions = GetApplInfo(db_name) | executor | parse_sessions return tuple(sessions)
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' shell = context.client dnsResolver = netutils.DnsResolverByShell(shell) addressToEndpoints = {} for nsOption in self._nsOptions: parsedEndpoints = buildEndpointsFromNsOption(nsOption, dnsResolver) addressToEndpoints.setdefault(nsOption.address, []).extend(parsedEndpoints) hostBuilder = HostBuilder() linkReporter = LinkReporter() endpointBuilder = netutils.ServiceEndpointBuilder() endpointReporter = netutils.EndpointReporter(endpointBuilder) endpointsOshs = [] for address, endpoints in addressToEndpoints.items(): hostOsh = hostBuilder.buildHostByHostname(parseHostnameFromAddress(address)) context.resultsVector.add(hostOsh) ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(context.resultsVector.add, ipOshs) reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(context.resultsVector.add, map(reportContainment, ipOshs)) endpointsOshs.extend(map(fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh), endpoints)) applicationOsh = context.application.applicationOsh for endpointsOsh in endpointsOshs: context.resultsVector.add(endpointsOsh) clientServerOsh = linkReporter.reportClientServerRelation(applicationOsh, endpointsOsh) context.resultsVector.add(clientServerOsh)
def getAssociatorsWithTypeEnforcement(client, objectPath, associationClass, targetClass): ''' CIMObjectPath, String, String -> list[CIMInstance] or None Get associators and ensure only instances of specific class are returned. While API declares it will filter the classes it does not always happen, so we need to filter manually Limitation: does not support subclasses ''' associatorsList = client.getAssociators(objectPath, associationClass, targetClass) _isCimInstanceOfClass = fptools.partiallyApply(isCimInstanceOfClass, fptools._, targetClass) return filter(_isCimInstanceOfClass, associatorsList)
def getSystemInfo(self, address, port): handlers = (lambda result: result.output, fptools.partiallyApply(_buildDocumentForXpath, fptools._, 0) ) return command.Cmd(self._SYSTEM_INFO_URL_PATTERN % {'address': address, 'port': port}, command.ChainedCmdlet(*map(command.FnCmdlet, handlers)))
def __getSystemComponent(self, queriedJ2eeType, serviceClass): r'@types: str, T -> list[T]' parse = fptools.partiallyApply(self.__parseSystemComponent, fptools._, serviceClass) attributes = ("DisplayName", "ProviderName", "MinorVersion", "MicroVersion", "Description", "MajorVersion", "Name", "Jars") pattern = '*:*,j2eeType=%s' % queriedJ2eeType items = self._getClient().getMbeansByNamePattern(pattern, attributes) return keep(fptools.safeFunc(parse), items)
def _discoverTopology(framework, credentialsId, hostId, applicationPort, tryToDiscoverGlobalCatalogFlag, result): r'@types: Framework, str, int, bool, DiscoveryResult -> ObjectStateHolderVector' discoveryFn = fptools.partiallyApply(discover, fptools._, hostId, None, result) try: return _withDaoService(framework, credentialsId, LdapEnvironmentBuilder(applicationPort), discoveryFn) except (Exception, JException), e: logger.warnException(str(e))
def _buildJavaInstance(system, dlgInstance): r'@types: System, SapJEEMonitoringXmlParser.DialogInstance -> sap.Instance' paths = [] workers = _getInstanceWorkers(dlgInstance) paths.extend(flatten(map(_getPathsWithInstanceBasePath, workers))) _parseInstance = partiallyApply(parseInstFromHomeDir, system, fptools._) _, inst = untilFirstTruth(safeFunc(_parseInstance), paths) if not inst: raise Exception("Not enough information to build instance") hostname = _getDlgInstanceHostname(dlgInstance) return sap.Instance(inst.name, inst.number, hostname)
def _discoverTopology(framework, credentialsId, hostId, applicationPort, tryToDiscoverGlobalCatalogFlag, isOuUnitsTreeReportedAsConfig, result): r'@types: Framework, str, int, bool, bool, DiscoveryResult -> ObjectStateHolderVector' discoveryFn = fptools.partiallyApply(discover, fptools._, hostId, None, isOuUnitsTreeReportedAsConfig, result) try: return _withDaoService(framework, credentialsId, LdapEnvironmentBuilder(applicationPort), discoveryFn) except (Exception, JException), e: logger.warnException(str(e))
def _getCredentialsForIp(self, ip): ''' string -> list[string] Method returns all available credentials for given IP address Credentials are marked as VMware category or no Category (lower priority) ''' credentialsList = self.framework.getAvailableProtocols( ip, cim.Protocol.SHORT) vmwareCredentialsFilter = fptools.partiallyApply( cim_discover.isCredentialOfCategory, fptools._, CimCategory.VMWARE, self.framework) vmwareCredentials = filter(vmwareCredentialsFilter, credentialsList) noCategoryCredentialsFilter = fptools.partiallyApply( cim_discover.isCredentialOfCategory, fptools._, cim.CimCategory.NO_CATEGORY, self.framework) noCategoryCredentials = filter(noCategoryCredentialsFilter, credentialsList) return vmwareCredentials + noCategoryCredentials
def _parseMsHttpPortConfigs(doc, number=None, hostname=None): r''' Get message server port configurations of format PROT=HTTP,PORT=81$$ @types: IniDocument, str?, str? -> list[Endpoint] @param number: Instance number, where port declared @param hostname: Instance hostname, where port declared @return: list of port declarations in the order they are marked with corresponding index ''' portValues = doc.findIndexedValues('ms/server_port') fn = InstanceProfileParser._parsePortDeclarationToEndpoint portToEndpointFn = partiallyApply(fn, __, number, hostname) return keep(portToEndpointFn, portValues)
def reportPartition(self, partition, container_osh, node_osh=None, pg_oshs=None): r'@types: db2.topology.BaseBuilder.PartitionPdo, ObjectStateHolder, ObjectStateHolder, [ObjectStateHolder(db2_partition_group)] -> ObjectStateHolderVector' vector = ObjectStateHolderVector() osh = self._builder.buildPartition(partition) osh.setContainer(container_osh) vector.add(osh) if node_osh: vector.add(self._link_reporter.report_containment(node_osh, osh)) if pg_oshs: report_membership = self._link_reporter.report_membership report_membership = partiallyApply(report_membership, fptools._, osh) report_membership = comp(vector.add, report_membership) map(report_membership, pg_oshs) return osh, vector
def parse(self, url): r'''@types: str -> tuple[db.DatabaseServer] ''' url = self.trimUrlPrefix(url) obj = OracleTnsRecordParser().parse(url) addresses = self._filterAddresses(obj) description = self._getDescription(obj) sid = description.connect_data.sid.strip() # TODO: ek: vendor='oracle', do we need to set it here? # dbServer = db.DatabaseServer(addresses[0].address.host.strip(),addresses[0].address.port.strip(), instance =obj.description.connect_data.sid.strip(), vendor = 'oracle') buildServer = fptools.partiallyApply(self._buildDatabaseServer, fptools._, sid) return tuple(map(buildServer, addresses))
def cimConnectionCheck(credentialId, ipAddress, framework): credentialsCategory = framework.getProtocolProperty(credentialId, cim.ProtocolProperty.CATEGORY) categories = cim_discover.getCimCategories(framework) if credentialsCategory and credentialsCategory != cim.CimCategory.NO_CATEGORY: categories = [category for category in categories if category.getName() == credentialsCategory] namespaces = [ns for category in categories for ns in category.getNamespaces()] testFunction = fptools.partiallyApply(cim_discover.safeTestConnectionWithNamespace, framework, ipAddress, credentialId, fptools._) try: testedNamespaces = map(testFunction, namespaces) testedNamespaces = filter(None, testedNamespaces) if len(testedNamespaces) == 0: raise ValueError("Failed to establish connection to any namespace") return Result(True) except ValueError, ex: return Result(False, str(ex))
def process(self, context): shell = context.client fs = file_system.createFileSystem(shell) path_util = file_system.getPath(fs) application = context.application osh = application.getOsh() process = application.getMainProcesses()[0] cmd_line = process.commandLine jvm_cmd_line_descriptor = jee.JvmCommandLineDescriptor(cmd_line) cmd_line_elements = jvm_cmd_line_descriptor.parseElements() java_options = filter(self.__is_java_option, cmd_line_elements) parse_fn = partiallyApply(self.parse_server_name, fptools._, path_util) server_name = first(keep(parse_fn, java_options)) logger.debug('server name: %s' % server_name) if server_name is not None: osh.setAttribute('j2eeserver_servername', server_name) #TODO: replace to jee.ServerTopologyBuilder._composeFullName osh.setAttribute('j2eeserver_fullname', server_name) modeling.setAppServerType(osh)
def connect(framework, checkConnectFn=None, parametersToCheck=None): r''' Returns ObjectStateHolderVector containing ObjectStateHolder instance with following attributes set: 'status' - contains boolean value. 1 - check credential process succeeded, 0 - check credential process ended with failure. 'error_msg' - contains valid error message in case of connection failure. @types: FrameworkImpl, function, list[str] - ObjectStateHolderVector ''' vector = ObjectStateHolderVector() ipAddress = framework.getDestinationAttribute(DESTINATION_DATA_IP_ADDRESS) credentialId = framework.getDestinationAttribute(ATTR_CREDENTIALS_ID) protocol_ = protocol.MANAGER_INSTANCE.getProtocolById(credentialId) protocolName = protocol_.getProtocolName() checkConnectFn = checkConnectFn or partiallyApply(_genericConnect, _, _, _, protocolName) if protocol_.isInScope(ipAddress): missingParams = [] if parametersToCheck: missingParams = _checkProtocolParameters(protocol_, parametersToCheck) if len(missingParams) == 0: try: result = checkConnectFn(credentialId, ipAddress, framework) except java.lang.Exception, e: logger.debugException("Connection to %s by '%s' failed" % (ipAddress, protocolName)) result = Result(False, 'Error: %s' % e.getMessage()) except Exception, e: logger.debugException("Connection to %s by '%s' failed" % (ipAddress, protocolName)) result = Result(False, 'Error: %s' % e.message) if not result: result = Result(False, 'Failed to get result')
def _reportInstance(sapSystem, serverInstance, systemOsh, clusterOsh, sapJeeVersionInfo, knownPortsConfigFile): r''' @param sapSystem: System @param serverInstance: SapJEEMonitoringXmlParser.DialogInstance @param sapJeeVersionInfo: JEEDiscovererByHTTP.SapJ2EEVersionInfo @rtype: ObjectStateHolderVector @raise ValueError: ''' instanceReporter = sap_jee.InstanceReporter(sap_jee.InstanceBuilder()) serverReporter = sap_jee.ServerReporter(sap_jee.ServerBuilder()) endpointReporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() vector = ObjectStateHolderVector() ip = serverInstance.host.ip hostOsh, _, vector_ = _buildHostAndIpOshs(ip) vector.addAll(vector_) systemOsh.setStringAttribute('data_note', 'This SAP System link to ' + hostOsh.getAttributeValue('host_key')) vector.add(systemOsh) instance = _buildJavaInstance(sapSystem, serverInstance) instanceOsh = instanceReporter.reportInstancePdo( sap_jee.InstanceBuilder.InstancePdo(instance, sapSystem, sap.createIp(ip), sapJeeVersionInfo), hostOsh) vector.add(instanceOsh) vector.add(linkReporter.reportMembership(clusterOsh, instanceOsh)) vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) if serverInstance.dispatcherServer: dispatcherProcess = _buildDispatcher(serverInstance.dispatcherServer) vector.add(serverReporter.reportServer(dispatcherProcess, instance, instanceOsh)) httpPort = serverInstance.dispatcherServer.httpPort p4Port = serverInstance.dispatcherServer.p4Port telnetPort = serverInstance.dispatcherServer.telnetPort isPortDefined = lambda t: t[1] ports = filter(isPortDefined, ((ip, httpPort, knownPortsConfigFile.getTcpPortName(httpPort)), (ip, p4Port, knownPortsConfigFile.getTcpPortName(p4Port)), (ip, telnetPort, knownPortsConfigFile.getTcpPortName(telnetPort)))) endpoints = map(lambda t: netutils.createTcpEndpoint(*t), ports) reportEndpoint = endpointReporter.reportEndpoint reportEndpoint = fptools.partiallyApply(reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) reportUsage = linkReporter.reportUsage reportUsage = fptools.partiallyApply(reportUsage, instanceOsh, fptools._) usageOshs = map(reportUsage, endpointOshs) fptools.each(vector.add, usageOshs) for serverProcess in serverInstance.serverProcesses: serverProcess = _buildServerProcess(serverProcess) vector.add(serverReporter.reportServer(serverProcess, instance, instanceOsh)) #note:ek:no debug port reporting for now return vector
r'''@types: str -> TopologyConfigParser.Node @raise TrexTopologyConfig.NodeNotFound ''' node = fptools.findFirst(lambda c, name=name: c.getName() == name, node.children) if not node: raise TrexTopologyConfig.NodeNotFound() return node def _isProcessNameStartswith(prefixInLowerCase, process): r'@types: str, process.Process -> bool' return (process and process.getName().lower().startswith(prefixInLowerCase)) isTrexDaemonProcess = fptools.partiallyApply(_isProcessNameStartswith, 'trexdaemon', fptools._) isTrexLaunchProcess = fptools.partiallyApply(_isProcessNameStartswith, 'trx.sap', fptools._) class SystemLayout(sap_discoverer.Layout): def getRfcServerConfigFilePath(self): r''' Get path to the TREX RFC Server configuration file @resource-file: <SID>/SYS/global/trex/TREXRfcServer.ini @types: -> str ''' return self._getPathTools().join(self.getRootPath(), 'global', 'trex', 'custom', 'config', 'TREXRfcServer.ini')
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ==================== DISCOVERY shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # 1) get process related application application = context.application connectionIp = application.getConnectionIp() # 2) find out process where path to the instance profile is stored logger.info(" Get executable path of main process ") mainProcess = application.getMainProcesses()[0] # 3) logger.info("Found out path to instance profile") instanceProfilePath = self.__getProfilePath(mainProcess) # 4) logger.info("Instance profile path: ", instanceProfilePath, ". Get content") getContent = fptools.safeFunc(self.__getContent, Exception) profileFile = (instanceProfilePath and getContent(shell, pathtools, instanceProfilePath)) if not profileFile: logger.warn("Failed to get content of instance profile") return # 5) parse content using instance and default profile parsers logger.info("Make configuration parsing") iniParser = sap_discoverer.IniParser() instancePfParser = sap_discoverer.InstanceProfileParser(iniParser) try: instanceProfile = instancePfParser.parseContent(profileFile.content) except Exception: logger.warnException("Failed to parse instance profile") else: traceConfig = None runtimeConfig = None sapInstance = instanceProfile.instance sapInstance = sap.Instance(sapInstance.name + sapInstance.number, sapInstance.number, sapInstance.hostname) # 6) Process runtime.properties that contains information about # Solution Manager and SLD if present logger.info("Create agent layout") logger.info("Get content of runtime properties") agentLayout = fptools.safeFunc(sap_smd_discoverer.createAgentLayoutFromBinPath)( (pathtools.isAbsolute(mainProcess.executablePath) and mainProcess.executablePath or discoverExecutablePath(shell, mainProcess) ), fs, pathtools ) if agentLayout: propertiesFile = getContent(shell, pathtools, agentLayout.getRuntimePropertiesPath()) if propertiesFile: parser = sap_smd_discoverer.RuntimePropertiesParser( sap_discoverer.IniParser()) try: runtimeConfig = parser.parse(propertiesFile.content) except Exception: logger.warnException("Failed to parse runtime properties") logger.info("Find out version information") devSmdAgentFile = getContent(shell, pathtools, agentLayout.getDevSmdAgentConfigFile()) if devSmdAgentFile: configParser = sap_smd_discoverer.DevSmdAgentConfigParser() # find config with corresponding PID (of main process) hasMainProcessPid = lambda c, pid = mainProcess.getPid(): c.pid == pid traceConfig = fptools.findFirst(hasMainProcessPid, configParser.parse(devSmdAgentFile.content)) if not traceConfig: logger.warn("Failed to find trace information for the main process") # === REPORT === smdAgentOsh = application.getOsh() vector = context.resultsVector endpointReporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) configFileReporter = file_topology.Reporter(file_topology.Builder()) linkReporter = sap.LinkReporter() smdAgentBuilder = sap_smd.Builder() softwareBuilder = sap.SoftwareBuilder() softwareReporter = sap.SoftwareReporter(sap.SoftwareBuilder()) resolverByShell = netutils.DnsResolverByShell(shell) processOsh = mainProcess.getOsh() # x) update name of application using instance name softwareBuilder.updateName(smdAgentOsh, sapInstance.getName()) # x) configuration files related to running_software vector.add(configFileReporter.report(profileFile, smdAgentOsh)) if traceConfig: # x) update version information in application smdAgentOsh = softwareBuilder.updateVersionInfo( smdAgentOsh, traceConfig.versionInfo) if traceConfig.jstartVersionInfo: smdAgentOsh = smdAgentBuilder.updateJstartVersionInfo( smdAgentOsh, traceConfig.jstartVersionInfo) # x) show relation between agent and # - SMD server / no enough information / # - message server of SCS OR Solution Manager, represented as agent connection endpoint # - SLD if propertiesFile and runtimeConfig: # x) report properties file as configuration document vector.add(configFileReporter.report(propertiesFile, smdAgentOsh)) # x) Report relation between agent and SLD server and SolMan # Resolve endpoint addresses # make function that will accept endpoint only resolveEndpointFn = fptools.partiallyApply( self.__resolveEndpointAddress, fptools.safeFunc(resolverByShell.resolveIpsByHostname, []), fptools._ ) # - SLD relation if runtimeConfig.sldEndpoint: for endpoint in resolveEndpointFn(runtimeConfig.sldEndpoint): sldHostOsh = endpointReporter.reportHostFromEndpoint(endpoint) vector.add(sldHostOsh) sldEndpointOsh = endpointReporter.reportEndpoint(endpoint, sldHostOsh) vector.add(sldEndpointOsh) # this unknown server type must be SLD server sldOsh = softwareReporter.reportUknownSoftware(sldHostOsh) vector.add(sldOsh) vector.add(linkReporter.reportUsage(sldOsh, sldEndpointOsh)) # report link between process and SLD server endpoint vector.add(linkReporter.reportClientServerRelation(processOsh, sldEndpointOsh)) # - Solution Manager relation agentConnectionEndpoint = runtimeConfig.getAgentConnecitonEndpoint() if agentConnectionEndpoint: for endpoint in resolveEndpointFn(agentConnectionEndpoint): hostOsh = endpointReporter.reportHostFromEndpoint(endpoint) vector.add(hostOsh) endpointOsh = endpointReporter.reportEndpoint(endpoint, hostOsh) vector.add(endpointOsh) softwareOsh = softwareReporter.reportUknownSoftware(hostOsh) vector.add(softwareOsh) vector.add(linkReporter.reportUsage(softwareOsh, endpointOsh)) # report link between process and SolMan end-point vector.add(linkReporter.reportClientServerRelation(processOsh, endpointOsh))
import entity from appilog.common.system.types import ObjectStateHolder from java.lang import Integer import modeling import fptools def _optional(type_, value): if value is not None: try: return type_(value) except: pass # safe constructors for int and long types _int = fptools.partiallyApply(_optional, int, fptools._) _long = fptools.partiallyApply(_optional, long, fptools._) class Cluster(entity.Immutable): def __init__(self, name, version=None, vendor=None, details=None): r'@types: str, str, str, ClusterDetails' assert name self.name = name self.version = version self.vendor = vendor self.details = details def __str__(self): return "Cluster(%s, %s, %s)" % (self.name, self.version, self.vendor)
from appilog.common.system.types import ObjectStateHolder from java.lang import Integer import modeling import fptools def _optional(type_, value): if value is not None: try: return type_(value) except: pass # safe constructors for int and long types _int = fptools.partiallyApply(_optional, int, fptools._) _long = fptools.partiallyApply(_optional, long, fptools._) class Cluster(entity.Immutable): def __init__(self, name, version=None, vendor=None, details=None): r'@types: str, str, str, ClusterDetails' assert name self.name = name self.version = version self.vendor = vendor self.details = details def __str__(self): return "Cluster(%s, %s, %s)" % (self.name, self.version, self.vendor)
def get_common_topology_context(discoverer, dnsresolver, installpath, config): discovererRegistry = {} discovererRegistry[baseCits.node] = partial(discoverer.getDeploymentHosts, Sfn(dnsresolver.resolve_ips)) discovererRegistry[baseCits.ip] = hana_host.Host.ips.fget discovererRegistry[cits.hanadb] = discoverer.getHanaDatabaseServer discovererRegistry[ cits.hanadbInstance] = lambda host: discoverer.getHanaDatabaseInstance( host.name) discovererRegistry[ baseCits. configFile] = lambda dbServer: discoverer.getHanaDbConfigFiles() discovererRegistry[ baseCits. ipServiceEndpoint] = lambda host: discoverer.getHanaDbInstanceEndpoints( host.name) discovererRegistry[ dbCits.schema] = lambda dbServer: discoverer.getHanaDbSchemas() discovererRegistry[ dbCits.user] = lambda dbServer: discoverer.getHanaDbUsers() discovererRegistry[ dbCits.dataFile] = lambda db_instance: discoverer.getHanaDbDataFiles( db_instance) discovererRegistry[ dbCits.logFile] = lambda db_instance: discoverer.getHanaDbLogFiles( db_instance) discovererRegistry[ dbCits.traceFile] = lambda db_instance: discoverer.getHanaDbTraceFiles( db_instance) # linkage condition discovererRegistry[( dbCits.user, baseCits.ownership, dbCits.schema)] = lambda user, schema: schema.owner == user.name discovererRegistry[( cits.hanadbInstance, baseCits.usage, baseCits.ipServiceEndpoint )] = lambda hana_instance, endpoint: endpoint.getAddress( ) == hana_instance.hostname and endpoint.getPortType( ) == hana.PortTypeEnum.HANA discovererRegistry[( cits.hanadb, baseCits.membership, cits.hanadbInstance)] = lambda hanaDb, hanaInstance: True discovererRegistry[(baseCits.node, baseCits.containment, baseCits.ip)] = lambda host, ip: ip in host.ips pdoBuilderRegistry = {} pdoBuilderRegistry[ cits.hanadbInstance] = lambda instance: buildDatabaseInstancePdo( instance, installpath, sid=config.sid) pdoBuilderRegistry[dbCits.user] = buildDbUserPdoFromDatabaseUser pdoBuilderRegistry[baseCits.ipServiceEndpoint] = partial( buildEndpointPdoFromEndpoint, Sfn(dnsresolver.resolve_ips)) #Should be coming from core hana_topology module baseTopologyBuilderRegistry = { # ignore the name as it is could be an alias and not a real hostname baseCits.node: lambda node_pdo: hana_host.Builder().build_host( node_pdo._replace(name=None)), baseCits.ip: modeling.createIpOSH, baseCits.configFile: fptools.partiallyApply(modeling.createConfigurationDocumentOshByFile, fptools._, None), baseCits.ipServiceEndpoint: netutils.ServiceEndpointBuilder().visitEndpoint } linkReporter = hana.LinkReporter() linkReporterRegistry = { baseCits.containment: linkReporter.reportContainment, baseCits.composition: linkReporter.reportComposition, baseCits.membership: lambda do1, osh1, do2, osh2: linkReporter.reportMembership(osh1, osh2), baseCits.ownership: lambda do1, osh1, do2, osh2: linkReporter.reportOwnership(osh1, osh2), baseCits.usage: lambda do1, osh1, do2, osh2: linkReporter.reportUsage(osh1, osh2), baseCits.replicated: hana.DatabaseTopologyReporter().reportReplication, } topologyBuilderRegistry = {} topologyBuilderRegistry.update(baseTopologyBuilderRegistry) topologyBuilderRegistry.update(linkReporterRegistry) dbTopologyBuilder = hana.DatabaseTopologyBuilder() topologyBuilderRegistry[ cits.hanadb] = lambda _: modeling.createOshByCmdbIdString( cits.hanadb, config.hanadb_cmdbid) topologyBuilderRegistry[ cits.hanadbInstance] = dbTopologyBuilder.buildDatabaseInstanceOsh topologyBuilderRegistry[dbCits.schema] = dbTopologyBuilder.buildSchemaOsh topologyBuilderRegistry[dbCits.user] = dbTopologyBuilder.buildUserOsh topologyBuilderRegistry[ dbCits.dataFile] = dbTopologyBuilder.buildDataFileOsh topologyBuilderRegistry[dbCits.logFile] = dbTopologyBuilder.buildLogFileOsh topologyBuilderRegistry[ dbCits.traceFile] = dbTopologyBuilder.buildTraceFileOsh return discovererRegistry, pdoBuilderRegistry, topologyBuilderRegistry
def DiscoveryMain(framework): weblogicJmxConnect = partiallyApply(jmxConnect, _, _, _, WEBLOGIC_PROTOCOL_NAME) return connect(framework, checkConnectFn=weblogicJmxConnect, parametersToCheck=[PROTOCOL_ATTRIBUTE_PORT])
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ------------------------------------------------------------ DISCOVERY "SAP TREX plug-in DISCOVERY start" | info shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # x) get process related application hostOsh = context.hostOsh application = context.application destinationIp = application.getConnectionIp() "x) Find TREX Daemon process that has profile path as parameter" | info mainProcess = findFirst(isMainTrexProcess, context.application.getProcesses()) profilePath = sap_discoverer.getProfilePathFromCommandline(mainProcess.commandLine) "x) Read profile content: %s" % profilePath | info getFileContent = Sfn(Fn(self.__getFileWithContent, shell, pathtools, __)) profileFile = profilePath and getFileContent(profilePath) if not profileFile: "Plug-in flow broken. Failed to read instance profile\ content based on path from the TREXDaemon command line" | warn return "x) Instance profile parsing" | info sapIniParser = sap_discoverer.IniParser() instanceProfileParser = sap_discoverer.InstanceProfileParser(sapIniParser) defaultProfileParser = sap_trex_discoverer.DefaultProfileParser(sapIniParser) try: resultAsIni = instanceProfileParser.parseAsIniResult(profileFile.content) instanceProfile = instanceProfileParser.parse(resultAsIni) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse instance profile") return rfcConfigs = [] trexSystem = defaultProfile.getSystem() trexInstance = instanceProfile.getInstance() trexInstanceName = trexInstance.getName() + trexInstance.getNumber() isBiaProduct = 0 versionInfo = None # # master by default, if topology file is not found that means # # that current one is the only instance # isMaster = 1 trexTopology = None "x) Initialize TREX instance layout" | debug systemName = trexSystem.getName() systemBasePath = sap_discoverer.findSystemBasePath( mainProcess.getExecutablePath(), systemName ) if systemBasePath: systemLayout = sap_trex_discoverer.SystemLayout(pathtools, systemBasePath, systemName) 'System path: %s' % systemLayout.getRootPath() | info instancePath = systemLayout.composeInstanceDirPath(trexInstanceName) 'Instance path: %s' % instancePath | debug instanceLayout = sap_trex_discoverer.InstanceLayout(pathtools, instancePath, trexInstanceName) "x) Get content of default profile as it contains information about product" "x) Determine whether we deal with BIA based on version information" | debug defaultProfilePath = systemLayout.getDefaultProfileFilePath() defaultProfileFile = getFileContent(defaultProfilePath) try: resultAsIni = instanceProfileParser.parseAsIniResult(defaultProfileFile.content) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse default profile") else: isBiaProduct = defaultProfile.getProductType() == sap_trex.Product.BIA (isBiaProduct and "BIA" or "non-BIA", "product detected") | info # get instance host name from profile name instanceHostname = None try: destinationSystem = sap_discoverer.parseSapSystemFromInstanceProfileName(profileFile.getName()) except Exception: msg = "Failed to parse instance hostname from profile file name" logger.debugException(msg) else: instanceHostname = first(destinationSystem.getInstances()).getHostname() "x) Discover whole topology from (topology.ini)" | info # topology.ini file location and format differs depending on the # product: # -a) BIA (has plain-ini file at <SID>/sys/global/trex/data/topology.ini # -b) TREX (has several places where topology.ini can be stored) discoverTopologyIniFilePath = fptools.safeFunc(sap_trex_discoverer.discoverTopologyIniFilePath) topologyFilePath = (isBiaProduct and systemLayout.getTopologyIniPath() or discoverTopologyIniFilePath(fs, instanceLayout, instanceHostname)) topologyFile = topologyFilePath and getFileContent(topologyFilePath) if topologyFile: try: configParser = sap_trex_discoverer.TopologyConfigParser() trexTopology = sap_trex_discoverer.TrexTopologyConfig( configParser.parse(topologyFile.content)) # find instance between master end-points # landscapeSnapshot = topology.getGlobals().getLandscapeSnapshot() # masterEndpoints = landscapeSnapshot.getMasterEndpoints() # activeMasterEndpoints = landscapeSnapshot.getActiveMasterEndpoints() # topologyNodes = topology.getHostNodes() ## # isEndpointWithInstanceHostname = (lambda # ep, hostname = instanceHostname: ep.getAddress() == hostname) # isMaster = len(filter(isEndpointWithInstanceHostname, # landscapeSnapshot.getMasterEndpoints())) # "host role is %s" % (isMaster and "master" or "slave") | info except: logger.warnException("Failed to parse topology configuration") else: logger.warn("Failed to get content for the topology configuration") "x) Discover TREX version information from saptrexmanifest.mf" | info # read version info from manifest file manifestFile = getFileContent(instanceLayout.getManifestFilePath()) if manifestFile: manifestParser = sap_trex_discoverer.SapTrexManifestParser(sapIniParser) versionInfo = manifestParser.parseVersion(manifestFile.content) else: 'Failed to discover version from manifest file' | warn 'Second attept to get version from updateConfig.ini file' | info profileSystem = Sfn(sap_discoverer.parseSapSystemFromInstanceProfileName)(profileFile.getName()) if profileSystem: hostname = first(profileSystem.getInstances()).getHostname() updateConfigFile = getFileContent(instanceLayout.composeUpdateConfigIniFilePath(hostname)) versionInfo = updateConfigFile and sap.VersionInfo(updateConfigFile.content.strip()) "x) Discover served systems ( in case if RFC configuration established )" | info rfcServerIniFilePath = (isBiaProduct and systemLayout.getRfcServerConfigFilePath() or instanceLayout.composeTrexRfcServerIniFilePath(instanceHostname)) rfcServerIniFile = getFileContent(rfcServerIniFilePath) if rfcServerIniFile: rfcConfigs = filter(None, (fptools.safeFunc( sap_trex_discoverer.parseConnectionsInRfcServerIni) (rfcServerIniFile.content))) # -------------------------------------------------------- REPORTING "SAP TREX plug-in REPORTING start" | info trexOsh = application.getOsh() vector = context.resultsVector configFileReporter = file_topology.Reporter(file_topology.Builder()) trexReporter = sap_trex.Reporter(sap_trex.Builder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() "x) - report profile content as configuration document for the application" | info vector.add(configFileReporter.report(profileFile, trexOsh)) ("x) - report %s" % trexSystem) | info trexSystemOsh = trexReporter.reportSystem(trexSystem) vector.add(trexSystemOsh) vector.add(linkReporter.reportMembership(trexSystemOsh, trexOsh)) "x) - report instance name and version" | info softwareBuilder.updateName(trexOsh, trexInstanceName) "x) report instance number: %s" % trexInstance.getNumber() | info instanceBuilder = sap_trex.Builder() instanceBuilder.updateInstanceNumber(trexOsh, trexInstance.getNumber()) if versionInfo: softwareBuilder.updateVersionInfo(trexOsh, versionInfo) if isBiaProduct: softwareBuilder.updateDiscoveredProductName(trexOsh, sap_trex.Product.BIA.instanceProductName) "x) report RFC connections" | info dnsResolver = netutils.DnsResolverByShell(shell, destinationIp) vector.addAll(reportRfcConfigs(rfcConfigs, dnsResolver, hostOsh)) "x) report all topology nodes" | info if trexTopology: reportHostNode = fptools.partiallyApply(reportTrexHostNode, fptools._, trexTopology, isBiaProduct) vectors = map(reportHostNode, trexTopology.getHostNodes()) fptools.each(vector.addAll, vectors)
# This can lead to incorrect merging of to systems (inside OSH vector) # systemOsh = trexReporter.reportSystem(system) # vector.add(systemOsh) # oshs.append(linkReporter.reportMembership(sapOsh, instOsh)) return oshs def _reportHostByEndpoints(endpoints): ''' Return node osh, list of endpoint OSHs and all oshs in one list @types: list[Endpoint] -> osh, list[osh], oshv''' hostReporter = sap.HostReporter(sap.HostBuilder()) ips = map(netutils.Endpoint.getAddress, endpoints) hostOsh, vector = hostReporter.reportHostWithIps(*ips) reporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) oshs = [reporter.reportEndpoint(e, hostOsh) for e in endpoints] return hostOsh, oshs, oshs + [hostOsh] + list(vector) # function that returns true when process is TREXDaemon or sap.TREX launch process # implemented as paritially applied function isMainTrexProcess = fptools.partiallyApply( # create function composed of two that will return true if any of them return true fptools.anyFn( bool, # any true value (sap_trex_discoverer.isTrexDaemonProcess, sap_trex_discoverer.isTrexLaunchProcess)), # missed parameter - process itself fptools._)
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ==================== DISCOVERY shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # 1) get process related application application = context.application connectionIp = application.getConnectionIp() # 2) find out process where path to the instance profile is stored logger.info(" Get executable path of main process ") mainProcess = application.getMainProcesses()[0] # 3) logger.info("Found out path to instance profile") instanceProfilePath = self.__getProfilePath(mainProcess) # 4) logger.info("Instance profile path: ", instanceProfilePath, ". Get content") getContent = fptools.safeFunc(self.__getContent, Exception) profileFile = (instanceProfilePath and getContent(shell, pathtools, instanceProfilePath)) if not profileFile: logger.warn("Failed to get content of instance profile") return # 5) parse content using instance and default profile parsers logger.info("Make configuration parsing") iniParser = sap_discoverer.IniParser() instancePfParser = sap_discoverer.InstanceProfileParser(iniParser) try: instanceProfile = instancePfParser.parseContent( profileFile.content) except Exception: logger.warnException("Failed to parse instance profile") else: traceConfig = None runtimeConfig = None sapInstance = instanceProfile.instance sapInstance = sap.Instance(sapInstance.name + sapInstance.number, sapInstance.number, sapInstance.hostname) # 6) Process runtime.properties that contains information about # Solution Manager and SLD if present logger.info("Create agent layout") logger.info("Get content of runtime properties") agentLayout = fptools.safeFunc( sap_smd_discoverer.createAgentLayoutFromBinPath)( (pathtools.isAbsolute(mainProcess.executablePath) and mainProcess.executablePath or discoverExecutablePath(shell, mainProcess)), fs, pathtools) if agentLayout: propertiesFile = getContent( shell, pathtools, agentLayout.getRuntimePropertiesPath()) if propertiesFile: parser = sap_smd_discoverer.RuntimePropertiesParser( sap_discoverer.IniParser()) try: runtimeConfig = parser.parse(propertiesFile.content) except Exception: logger.warnException( "Failed to parse runtime properties") logger.info("Find out version information") devSmdAgentFile = getContent( shell, pathtools, agentLayout.getDevSmdAgentConfigFile()) if devSmdAgentFile: configParser = sap_smd_discoverer.DevSmdAgentConfigParser() # find config with corresponding PID (of main process) hasMainProcessPid = lambda c, pid=mainProcess.getPid( ): c.pid == pid traceConfig = fptools.findFirst( hasMainProcessPid, configParser.parse(devSmdAgentFile.content)) if not traceConfig: logger.warn( "Failed to find trace information for the main process" ) # === REPORT === smdAgentOsh = application.getOsh() vector = context.resultsVector endpointReporter = netutils.EndpointReporter( netutils.ServiceEndpointBuilder()) configFileReporter = file_topology.Reporter( file_topology.Builder()) linkReporter = sap.LinkReporter() smdAgentBuilder = sap_smd.Builder() softwareBuilder = sap.SoftwareBuilder() softwareReporter = sap.SoftwareReporter(sap.SoftwareBuilder()) resolverByShell = netutils.DnsResolverByShell(shell) processOsh = mainProcess.getOsh() # x) update name of application using instance name softwareBuilder.updateName(smdAgentOsh, sapInstance.getName()) # x) configuration files related to running_software vector.add(configFileReporter.report(profileFile, smdAgentOsh)) if traceConfig: # x) update version information in application smdAgentOsh = softwareBuilder.updateVersionInfo( smdAgentOsh, traceConfig.versionInfo) if traceConfig.jstartVersionInfo: smdAgentOsh = smdAgentBuilder.updateJstartVersionInfo( smdAgentOsh, traceConfig.jstartVersionInfo) # x) show relation between agent and # - SMD server / no enough information / # - message server of SCS OR Solution Manager, represented as agent connection endpoint # - SLD if propertiesFile and runtimeConfig: # x) report properties file as configuration document vector.add( configFileReporter.report(propertiesFile, smdAgentOsh)) # x) Report relation between agent and SLD server and SolMan # Resolve endpoint addresses # make function that will accept endpoint only resolveEndpointFn = fptools.partiallyApply( self.__resolveEndpointAddress, fptools.safeFunc(resolverByShell.resolveIpsByHostname, []), fptools._) # - SLD relation if runtimeConfig.sldEndpoint: for endpoint in resolveEndpointFn( runtimeConfig.sldEndpoint): sldHostOsh = endpointReporter.reportHostFromEndpoint( endpoint) vector.add(sldHostOsh) sldEndpointOsh = endpointReporter.reportEndpoint( endpoint, sldHostOsh) vector.add(sldEndpointOsh) # this unknown server type must be SLD server sldOsh = softwareReporter.reportUknownSoftware( sldHostOsh) vector.add(sldOsh) vector.add( linkReporter.reportUsage(sldOsh, sldEndpointOsh)) # report link between process and SLD server endpoint vector.add( linkReporter.reportClientServerRelation( processOsh, sldEndpointOsh)) # - Solution Manager relation agentConnectionEndpoint = runtimeConfig.getAgentConnecitonEndpoint( ) if agentConnectionEndpoint: for endpoint in resolveEndpointFn(agentConnectionEndpoint): hostOsh = endpointReporter.reportHostFromEndpoint( endpoint) vector.add(hostOsh) endpointOsh = endpointReporter.reportEndpoint( endpoint, hostOsh) vector.add(endpointOsh) softwareOsh = softwareReporter.reportUknownSoftware( hostOsh) vector.add(softwareOsh) vector.add( linkReporter.reportUsage(softwareOsh, endpointOsh)) # report link between process and SolMan end-point vector.add( linkReporter.reportClientServerRelation( processOsh, endpointOsh))
def executeAlternateCmds(shell, cmdList): _executeCmd = fptools.partiallyApply(executeCmd, shell, fptools._) return iteratortools.findFirst(isCommandSuccessful, imap(_executeCmd, cmdList))
def reportTrexHostNode(hostNode, topology, isBiaProduct): r'@types: TrexTopologyConfig.HostNode, TrexTopologyConfig, bool -> ObjectStateHolderVector' trexBuilder = sap_trex.Builder() trexReporter = sap_trex.Reporter(trexBuilder) hostReporter = sap_trex.HostReporter(sap_trex.HostBuilder()) endpointReporter = netutils.EndpointReporter( netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() # x) create sap system system = hostNode.system vector = ObjectStateHolderVector() # process NameServer endpoints and ignore loopback endpoints isLoopbackEndpoint = lambda e: netutils.isLoopbackIp(e.getAddress()) _, endpoints = fptools.partition(isLoopbackEndpoint, hostNode.nameServerEndpoints) # x) create host OSH hostOsh = hostReporter.reportHostByHostname(hostNode.name) vector.add(hostOsh) # x) report IPs ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(vector.add, ipOshs) #vector.addAll(ipOshs) # x) report containment between host nad ips reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(vector.add, map(reportContainment, ipOshs)) # x) report end-points reportEndpoint = fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) # x) report TREX instance itself instanceOsh = trexReporter.reportInstance(first(system.getInstances()), hostOsh) # x) mark as BIA or plain-TREX productName = (isBiaProduct and sap_trex.Product.BIA.instanceProductName or sap_trex.Product.TREX.instanceProductName) softwareBuilder.updateDiscoveredProductName(instanceOsh, productName) # x) set name server role (master, slave or 1st master) nameServerPort = first(endpoints).getPort() nameServerEndpoint = netutils.createTcpEndpoint(hostNode.name, nameServerPort) topologyGlobals = topology.getGlobals() isMaster = nameServerEndpoint in (fptools.safeFunc( topologyGlobals.getMasterEndpoints)() or ()) isActiveMaster = nameServerEndpoint in (fptools.safeFunc( topologyGlobals.getActiveMasterEndpoints)() or ()) trexBuilder.updateNameServerMode( instanceOsh, (isMaster and (isActiveMaster and sap_trex.NameServerMode.FIRST_MASTER or sap_trex.NameServerMode.MASTER) or sap_trex.NameServerMode.SLAVE)) vector.add(instanceOsh) # x) DO NOT report 'membership' between system and instance # Explanation: # sometimes you can discover systems that don't have relationship to current host. # This can lead to incorrect merging of to systems (inside OSH vector) # systemOsh = trexReporter.reportSystem(system) # vector.add(systemOsh) # vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) # x) report 'usage' between instance and endpoints of name-server reportUsage = fptools.partiallyApply(linkReporter.reportUsage, instanceOsh, fptools._) fptools.each(vector.add, map(reportUsage, endpointOshs)) return vector
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ------------------------------------------------------------ DISCOVERY "SAP TREX plug-in DISCOVERY start" | info shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # x) get process related application hostOsh = context.hostOsh application = context.application destinationIp = application.getConnectionIp() "x) Find TREX Daemon process that has profile path as parameter" | info mainProcess = findFirst(isMainTrexProcess, context.application.getProcesses()) profilePath = sap_discoverer.getProfilePathFromCommandline( mainProcess.commandLine) "x) Read profile content: %s" % profilePath | info getFileContent = Sfn( Fn(self.__getFileWithContent, shell, pathtools, __)) profileFile = profilePath and getFileContent(profilePath) if not profileFile: "Plug-in flow broken. Failed to read instance profile\ content based on path from the TREXDaemon command line" | warn return "x) Instance profile parsing" | info sapIniParser = sap_discoverer.IniParser() instanceProfileParser = sap_discoverer.InstanceProfileParser( sapIniParser) defaultProfileParser = sap_trex_discoverer.DefaultProfileParser( sapIniParser) try: resultAsIni = instanceProfileParser.parseAsIniResult( profileFile.content) instanceProfile = instanceProfileParser.parse(resultAsIni) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse instance profile") return rfcConfigs = [] trexSystem = defaultProfile.getSystem() trexInstance = instanceProfile.getInstance() trexInstanceName = trexInstance.getName() + trexInstance.getNumber() isBiaProduct = 0 versionInfo = None # # master by default, if topology file is not found that means # # that current one is the only instance # isMaster = 1 trexTopology = None "x) Initialize TREX instance layout" | debug systemName = trexSystem.getName() systemBasePath = sap_discoverer.findSystemBasePath( mainProcess.getExecutablePath(), systemName) if systemBasePath: systemLayout = sap_trex_discoverer.SystemLayout( pathtools, systemBasePath, systemName) 'System path: %s' % systemLayout.getRootPath() | info instancePath = systemLayout.composeInstanceDirPath( trexInstanceName) 'Instance path: %s' % instancePath | debug instanceLayout = sap_trex_discoverer.InstanceLayout( pathtools, instancePath, trexInstanceName) "x) Get content of default profile as it contains information about product" "x) Determine whether we deal with BIA based on version information" | debug defaultProfilePath = systemLayout.getDefaultProfileFilePath() defaultProfileFile = getFileContent(defaultProfilePath) try: resultAsIni = instanceProfileParser.parseAsIniResult( defaultProfileFile.content) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse default profile") else: isBiaProduct = defaultProfile.getProductType( ) == sap_trex.Product.BIA (isBiaProduct and "BIA" or "non-BIA", "product detected") | info # get instance host name from profile name instanceHostname = None try: destinationSystem = sap_discoverer.parseSapSystemFromInstanceProfileName( profileFile.getName()) except Exception: msg = "Failed to parse instance hostname from profile file name" logger.debugException(msg) else: instanceHostname = first( destinationSystem.getInstances()).getHostname() "x) Discover whole topology from (topology.ini)" | info # topology.ini file location and format differs depending on the # product: # -a) BIA (has plain-ini file at <SID>/sys/global/trex/data/topology.ini # -b) TREX (has several places where topology.ini can be stored) discoverTopologyIniFilePath = fptools.safeFunc( sap_trex_discoverer.discoverTopologyIniFilePath) topologyFilePath = (isBiaProduct and systemLayout.getTopologyIniPath() or discoverTopologyIniFilePath( fs, instanceLayout, instanceHostname)) topologyFile = topologyFilePath and getFileContent( topologyFilePath) if topologyFile: try: configParser = sap_trex_discoverer.TopologyConfigParser() trexTopology = sap_trex_discoverer.TrexTopologyConfig( configParser.parse(topologyFile.content)) # find instance between master end-points # landscapeSnapshot = topology.getGlobals().getLandscapeSnapshot() # masterEndpoints = landscapeSnapshot.getMasterEndpoints() # activeMasterEndpoints = landscapeSnapshot.getActiveMasterEndpoints() # topologyNodes = topology.getHostNodes() ## # isEndpointWithInstanceHostname = (lambda # ep, hostname = instanceHostname: ep.getAddress() == hostname) # isMaster = len(filter(isEndpointWithInstanceHostname, # landscapeSnapshot.getMasterEndpoints())) # "host role is %s" % (isMaster and "master" or "slave") | info except: logger.warnException( "Failed to parse topology configuration") else: logger.warn( "Failed to get content for the topology configuration") "x) Discover TREX version information from saptrexmanifest.mf" | info # read version info from manifest file manifestFile = getFileContent(instanceLayout.getManifestFilePath()) if manifestFile: manifestParser = sap_trex_discoverer.SapTrexManifestParser( sapIniParser) versionInfo = manifestParser.parseVersion(manifestFile.content) else: 'Failed to discover version from manifest file' | warn 'Second attept to get version from updateConfig.ini file' | info profileSystem = Sfn( sap_discoverer.parseSapSystemFromInstanceProfileName)( profileFile.getName()) if profileSystem: hostname = first( profileSystem.getInstances()).getHostname() updateConfigFile = getFileContent( instanceLayout.composeUpdateConfigIniFilePath( hostname)) versionInfo = updateConfigFile and sap.VersionInfo( updateConfigFile.content.strip()) "x) Discover served systems ( in case if RFC configuration established )" | info rfcServerIniFilePath = ( isBiaProduct and systemLayout.getRfcServerConfigFilePath() or instanceLayout.composeTrexRfcServerIniFilePath( instanceHostname)) rfcServerIniFile = getFileContent(rfcServerIniFilePath) if rfcServerIniFile: rfcConfigs = filter(None, (fptools.safeFunc( sap_trex_discoverer.parseConnectionsInRfcServerIni)( rfcServerIniFile.content))) # -------------------------------------------------------- REPORTING "SAP TREX plug-in REPORTING start" | info trexOsh = application.getOsh() vector = context.resultsVector configFileReporter = file_topology.Reporter(file_topology.Builder()) trexReporter = sap_trex.Reporter(sap_trex.Builder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() "x) - report profile content as configuration document for the application" | info vector.add(configFileReporter.report(profileFile, trexOsh)) ("x) - report %s" % trexSystem) | info trexSystemOsh = trexReporter.reportSystem(trexSystem) vector.add(trexSystemOsh) vector.add(linkReporter.reportMembership(trexSystemOsh, trexOsh)) "x) - report instance name and version" | info softwareBuilder.updateName(trexOsh, trexInstanceName) "x) report instance number: %s" % trexInstance.getNumber() | info instanceBuilder = sap_trex.Builder() instanceBuilder.updateInstanceNumber(trexOsh, trexInstance.getNumber()) if versionInfo: softwareBuilder.updateVersionInfo(trexOsh, versionInfo) if isBiaProduct: softwareBuilder.updateDiscoveredProductName( trexOsh, sap_trex.Product.BIA.instanceProductName) "x) report RFC connections" | info dnsResolver = netutils.DnsResolverByShell(shell, destinationIp) vector.addAll(reportRfcConfigs(rfcConfigs, dnsResolver, hostOsh)) "x) report all topology nodes" | info if trexTopology: reportHostNode = fptools.partiallyApply(reportTrexHostNode, fptools._, trexTopology, isBiaProduct) vectors = map(reportHostNode, trexTopology.getHostNodes()) fptools.each(vector.addAll, vectors)
def DiscoveryMain(framework): jbossJmxConnect = partiallyApply(jmxConnect, _, _, _, JBOSS_PROTOCOL_NAME) return connect(framework, checkConnectFn=jbossJmxConnect, parametersToCheck=[PROTOCOL_ATTRIBUTE_PORT])
def reportTrexHostNode(hostNode, topology, isBiaProduct): r'@types: TrexTopologyConfig.HostNode, TrexTopologyConfig, bool -> ObjectStateHolderVector' trexBuilder = sap_trex.Builder() trexReporter = sap_trex.Reporter(trexBuilder) hostReporter = sap_trex.HostReporter(sap_trex.HostBuilder()) endpointReporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() # x) create sap system system = hostNode.system vector = ObjectStateHolderVector() # process NameServer endpoints and ignore loopback endpoints isLoopbackEndpoint = lambda e: netutils.isLoopbackIp(e.getAddress()) _, endpoints = fptools.partition(isLoopbackEndpoint, hostNode.nameServerEndpoints) # x) create host OSH hostOsh = hostReporter.reportHostByHostname(hostNode.name) vector.add(hostOsh) # x) report IPs ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(vector.add, ipOshs) #vector.addAll(ipOshs) # x) report containment between host nad ips reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(vector.add, map(reportContainment, ipOshs)) # x) report end-points reportEndpoint = fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) # x) report TREX instance itself instanceOsh = trexReporter.reportInstance(first(system.getInstances()), hostOsh) # x) mark as BIA or plain-TREX productName = (isBiaProduct and sap_trex.Product.BIA.instanceProductName or sap_trex.Product.TREX.instanceProductName) softwareBuilder.updateDiscoveredProductName(instanceOsh, productName) # x) set name server role (master, slave or 1st master) nameServerPort = first(endpoints).getPort() nameServerEndpoint = netutils.createTcpEndpoint(hostNode.name, nameServerPort) topologyGlobals = topology.getGlobals() isMaster = nameServerEndpoint in ( fptools.safeFunc(topologyGlobals.getMasterEndpoints)() or () ) isActiveMaster = nameServerEndpoint in ( fptools.safeFunc(topologyGlobals.getActiveMasterEndpoints)() or () ) trexBuilder.updateNameServerMode( instanceOsh, (isMaster and (isActiveMaster and sap_trex.NameServerMode.FIRST_MASTER or sap_trex.NameServerMode.MASTER) or sap_trex.NameServerMode.SLAVE)) vector.add(instanceOsh) # x) DO NOT report 'membership' between system and instance # Explanation: # sometimes you can discover systems that don't have relationship to current host. # This can lead to incorrect merging of to systems (inside OSH vector) # systemOsh = trexReporter.reportSystem(system) # vector.add(systemOsh) # vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) # x) report 'usage' between instance and endpoints of name-server reportUsage = fptools.partiallyApply(linkReporter.reportUsage, instanceOsh, fptools._) fptools.each(vector.add, map(reportUsage, endpointOshs)) return vector
def getUnderlyingNodeByName(node, name): r'''@types: str -> TopologyConfigParser.Node @raise TrexTopologyConfig.NodeNotFound ''' node = fptools.findFirst(lambda c, name = name: c.getName() == name, node.children) if not node: raise TrexTopologyConfig.NodeNotFound() return node def _isProcessNameStartswith(prefixInLowerCase, process): r'@types: str, process.Process -> bool' return (process and process.getName().lower().startswith(prefixInLowerCase)) isTrexDaemonProcess = fptools.partiallyApply( _isProcessNameStartswith, 'trexdaemon', fptools._ ) isTrexLaunchProcess = fptools.partiallyApply( _isProcessNameStartswith, 'trx.sap', fptools._ ) class SystemLayout(sap_discoverer.Layout): def getRfcServerConfigFilePath(self): r''' Get path to the TREX RFC Server configuration file @resource-file: <SID>/SYS/global/trex/TREXRfcServer.ini @types: -> str ''' return self._getPathTools().join( self.getRootPath(), 'global', 'trex', 'custom', 'config', 'TREXRfcServer.ini')
# vector.add(systemOsh) # oshs.append(linkReporter.reportMembership(sapOsh, instOsh)) return oshs def _reportHostByEndpoints(endpoints): ''' Return node osh, list of endpoint OSHs and all oshs in one list @types: list[Endpoint] -> osh, list[osh], oshv''' hostReporter = sap.HostReporter(sap.HostBuilder()) ips = map(netutils.Endpoint.getAddress, endpoints) hostOsh, vector = hostReporter.reportHostWithIps(*ips) reporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) oshs = [reporter.reportEndpoint(e, hostOsh) for e in endpoints] return hostOsh, oshs, oshs + [hostOsh] + list(vector) # function that returns true when process is TREXDaemon or sap.TREX launch process # implemented as paritially applied function isMainTrexProcess = fptools.partiallyApply( # create function composed of two that will return true if any of them return true fptools.anyFn(bool, # any true value (sap_trex_discoverer.isTrexDaemonProcess, sap_trex_discoverer.isTrexLaunchProcess) ), # missed parameter - process itself fptools._ )