def iterate_over_creds(main_fn, proto_name, stop_on_first=True, is_cred_ok_fn=identity): ''' Decorator for the DiscoveryMain function in case when connection attempts performed over available protocols @param main_fn: DiscoveryMain function reference @param proto_name: protocol to connect with @param stop_on_first: Stop on first successful discovery @param is_cred_ok_fn: predicate to check whether credentials are suitable Signature is (Framework, CredsManager, str -> bool) Usage: from fptools import paritallyApply as Fn, _ as __ @Fn(iterate_over_creds, __, ClientsConsts.SSH_PROTOCOL_NAME) def DiscoveryMain(rich_framework, creds_manager, cred_id): '@types: Framework, CredsManager, str -> list[osh], list[str]' ... return oshs, warnings ''' @wraps(main_fn) def decorator(framework): vector = ObjectStateHolderVector() framework = RichFramework(framework) creds_manager = CredsManager(framework) creds = creds_manager.get_creds_for_destination(proto_name) creds = filter(Fn(is_cred_ok_fn, framework, creds_manager, __), creds) if not creds: logger.reportErrorObject(_create_missed_creds_error(proto_name)) else: connection_exs = [] discovery_exs = [] warnings = [] at_least_once_discovered = False for cred_id in creds: try: oshs, warnings = main_fn(framework, creds_manager, cred_id) vector.addAll(oshs) at_least_once_discovered = True if stop_on_first: break except ConnectionException, ce: logger.debugException(str(ce)) connection_exs.append(ce) except (DiscoveryException, Exception, JException), de: logger.debugException(str(de)) discovery_exs.append(de) if at_least_once_discovered: if warnings: each(logger.reportWarning, warnings) else: for ex in connection_exs: obj = _create_connection_errorobj(proto_name, ex.message) logger.reportWarningObject(obj) for ex in discovery_exs: obj = _create_discovery_errorobj(proto_name, ex.message) logger.reportErrorObject(obj)
def wrapper(*args_, **kwargs_): original_toolnames = args and args[0] or () factories_by_name = basic_tool_factories.copy() tools = {} if 'tools' in kwargs_: tools = kwargs_['tools'].copy() kwargs_toolnames = original_toolnames[len(args_):] original_toolnames = original_toolnames[:len(args_)] for nr, toolname in enumerate(original_toolnames): tools[toolname] = args_[nr] for toolname in kwargs_toolnames: tools[toolname] = kwargs_[toolname] if len(args) > 1: each(factories_by_name.update, args[1:]) if 'factories' in tools: factories_by_name.update(tools['factories']) if 'factories' in kwargs_: factories_by_name.update(kwargs_['factories']) tools['factories'] = factories_by_name tools['tools'] = tools required_toolnames = _get_fn_arg_names(original_fn) optional_toolnames = {} args_, kwargs_ = _build_tools(required_toolnames, optional_toolnames, factories_by_name, tools) return original_fn(*args_, **kwargs_)
def decorator_fn(original_fn): @wraps(original_fn) def wrapper(framework): client_factories_provider = kwargs.get('client_factories_provider') if not client_factories_provider: client_factories_provider = default_client_factories_provider proto_name = kwargs.get('protocol_name') stop_on_first = kwargs.get('stop_on_first') vector = ObjectStateHolderVector() framework = RichFramework(framework) if not proto_name: proto_name = framework.get_dest_attribute('Protocol') creds_manager = CredsManager(framework) client_factories = client_factories_provider(framework, creds_manager) first_factory = take(0, 1, client_factories) if not first_factory: logger.reportErrorObject(_create_missed_creds_error(proto_name)) else: connection_exs = [] discovery_exs = [] warnings = [] at_least_once_discovered = False oshs = [] client_factories = list(itertools.chain(first_factory, client_factories)) main_fn = original_fn for index, client_factory in enumerate(client_factories): try: with client_factory() as client: args_ = (framework, client, index) kwargs_ = {} oshs_, warnings_ = main_fn(*args_, **kwargs_) oshs.extend(oshs_) warnings.extend(warnings_) at_least_once_discovered = True if stop_on_first: break except ConnectionException, ce: logger.debugException(str(ce)) connection_exs.append(ce) except (DiscoveryException, Exception), de: logger.debugException(str(de)) discovery_exs.append(de) if at_least_once_discovered: each(logger.reportWarningObject, warnings) else: for ex in connection_exs: obj = _create_connection_errorobj(proto_name, ex.message) logger.reportWarningObject(obj) for ex in discovery_exs: obj = _create_discovery_errorobj(proto_name, ex.message) logger.reportErrorObject(obj) vector.addAll(oshs)
def discoverSwitches(client, ips_set, allowDnsLookup, ignoreNodesWithoutIP): ''' @types: Client, set[IPAddress], bool, bool -> generator @return: generator of tuples witch such elements storage array osh list[tuple[Port, osh]] list[tuple[Hba, osh]] list[tuple[LogicalVolume, osh]] @types: Client, set[IPAddress], bool, bool -> iterable[osh] ''' try: switches = _query_switches(client) except (Exception, JException): logger.warnException("Failed to get switches") else: for switch in switches: ips = _discover_switch_ips(switch, ips_set, allowDnsLookup) if ips: # change address of switch to IP as reporting depends on it switch = switch._replace(address=first(ips)) each(ips_set.add, ips) elif ignoreNodesWithoutIP: logger.debug("%s is ignored due to missing IPs" % str(switch)) continue switch_osh = _build_fc_switch_osh(switch) port_to_osh = _discoverPortsPerSwitch(client, switch, switch_osh) ip_oshs = chain(*[_report_ips(switch_osh, ip) for ip in ips]) yield switch, switch_osh, port_to_osh, ip_oshs
def DiscoveryMain(Framework): vector = ObjectStateHolderVector() knownPortsConfigFile = Framework.getConfigFile(CollectorsParameters.KEY_COLLECTORS_SERVERDATA_PORTNUMBERTOPORTNAME) endpoint = Framework.getDestinationAttribute('ip_port_pair') address, port = endpoint.rsplit(':', 1) endpoint = netutils.createTcpEndpoint(address, port) ipServiceName = Framework.getDestinationAttribute('ip_service_name') if ipServiceName == 'sap_jmx': endpoint = convertToHttpEndpoint(endpoint) if endpoint: logger.debug('Current %s' % endpoint) vector, errors, warnings = _discover(Framework, knownPortsConfigFile, endpoint) logger.debug('Result vector size: %d' % vector.size()) logger.debug('Errors: %s' % errors) logger.debug('Warnings: %s' % warnings) fptools.each(logger.reportErrorObject, errors) fptools.each(logger.reportWarningObject, warnings) else: protocolName = 'sapjmxprotocol' protocolLabel = errormessages.protocolNames[protocolName] messagePattern = errormessages.NO_HTTP_ENDPOINTS_TO_PROCESS_ERROR msg = errormessages.makeErrorMessage(protocolName, pattern=messagePattern) errCode = errorcodes.NO_HTTP_ENDPOINTS_TO_PROCESS errobj = errorobject.createError(errCode, [protocolLabel], msg) logger.reportWarningObject(errobj) return vector
def wrapper(*args_, **kwargs_): original_toolnames = args and args[0] or () factories_by_name = basic_tool_factories.copy() tools = {} if "tools" in kwargs_: tools = kwargs_["tools"].copy() kwargs_toolnames = original_toolnames[len(args_) :] original_toolnames = original_toolnames[: len(args_)] for nr, toolname in enumerate(original_toolnames): tools[toolname] = args_[nr] for toolname in kwargs_toolnames: tools[toolname] = kwargs_[toolname] if len(args) > 1: each(factories_by_name.update, args[1:]) if "factories" in tools: factories_by_name.update(tools["factories"]) if "factories" in kwargs_: factories_by_name.update(kwargs_["factories"]) tools["factories"] = factories_by_name tools["tools"] = tools required_toolnames = _get_fn_arg_names(original_fn) optional_toolnames = {} args_, kwargs_ = _build_tools(required_toolnames, optional_toolnames, factories_by_name, tools) return original_fn(*args_, **kwargs_)
def reportSoftwareCmpsAsCis(cmps, containerOsh): r'@types: list[sap.SoftwareComponent], osh -> oshv' builder = sap.SoftwareComponentBuilder() reporter = sap.SoftwareComponentReporter(builder) report = Fn(reporter.reportSoftwareComponent, fptools._, containerOsh) vector = ObjectStateHolderVector() fptools.each(vector.add, imap(report, cmps)) return vector
def execute_reg_query(reg_provider, command): hive_key = command.hkey key_path = command.key_path attributes = command.ATTRIBUTES query_builder = reg_provider.getBuilder(hive_key, key_path) fptools.each(query_builder.addAttribute, attributes) items = reg_provider.getAgent().execQuery(query_builder) return command.handler(items)
def DiscoveryMain(Framework): ''' Discovery process consists of two steps: 1. Connect domain controller and get whole topology 2. Strive to connect to the same controller with the same credentials but in role of global catalog. 2.1 GC indexes more hierarchical data but less object specific data, so not all data will be rediscovered. ''' vector = ObjectStateHolderVector() ## Destination Attribute Section hostId = Framework.getDestinationAttribute('hostId') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') OU_REPORTING_PARAM = 'reportOUAsConfigurationDocument' isOuUnitsTreeReportedAsConfig = Framework.getParameter(OU_REPORTING_PARAM) isOuUnitsTreeReportedAsConfig = parseBoolean(isOuUnitsTreeReportedAsConfig) tryToDiscoverGlobalCatalogFlag = Boolean.parseBoolean( Framework.getParameter('tryToDiscoverGlobalCatalog')) globalCatalogPort = Framework.getParameter('globalCatalogPort') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort try: result = DiscoveryResult() vector.addAll( _discoverTopology(Framework, credentialsId, hostId, applicationPort, None, isOuUnitsTreeReportedAsConfig, result)) #no reason to connect to the GC if port is specified in credentials if (tryToDiscoverGlobalCatalogFlag and str(globalCatalogPort).isdigit() and globalCatalogPort != applicationPort): vector.addAll( _discoverTopology(Framework, credentialsId, hostId, globalCatalogPort, tryToDiscoverGlobalCatalogFlag, isOuUnitsTreeReportedAsConfig, result)) dtoToOsh = result.getMap(DOMAINT_DTO_TO_CONFIG_OSH_TYPE) fptools.each(vector.add, dtoToOsh.values()) except Exception, e: msg = 'Failure in discovering Active Directory Topology. %s' % e Framework.reportError(msg) logger.debug(logger.prepareFullStackTrace(msg)) logger.errorException(msg)
def parse(self, url): r'''@types: str -> tuple[db.DatabaseServer] ''' url = self.trimUrlPrefix(url) obj = OracleTnsRecordParser().parse(url) addresses = self._filterAddresses(obj) server = self._buildDatabaseServer(addresses.pop(0)) addEndpoint = lambda addressItem: server.addEndpoint( netutils.createTcpEndpoint(*self._decomposeAddressPort(addressItem))) fptools.each(addEndpoint, addresses) return (server, )
def discoverSystems(solman): r'@types: saputils.SapSolman -> oshv, list[tuple[System, osh]]' logger.info('Discover SAP Systems') getSystemsQuery = GetSystems() queryExecutor = sap_abap_discoverer.TableQueryExecutor(solman) systemDetails = queryExecutor.executeQuery(getSystemsQuery) systems = map(first, systemDetails) reporter = sap.Reporter(sap.Builder()) oshs = map(reporter.reportSystem, systems) logger.info("Discovered %s systems" % len(oshs)) vector = ObjectStateHolderVector() fptools.each(vector.add, oshs) return vector, zip(systems, oshs)
def unpackContext(fnContext): fnToArgsPairs = [] if isinstance(fnContext, (tuple, list)): fn = fnContext[0] # fn or another context if isinstance(fn, (tuple, list)): each(fnToArgsPairs.extend, imap(unpackContext, fnContext)) else: args = fnContext[1:] fnToArgsPairs.append((fn, args)) else: fn = fnContext fnToArgsPairs.append((fn, ())) return fnToArgsPairs
def discoverServers(client, ips_set, ignoreNodesWithoutIP, allowDnsLookup): ''' Discover host resources @types: Client, set[IPAddress], bool, bool -> generator @return: generator of tuples witch such elements host osh seq[osh] - built IPs seq[osh] - built CPUs list[tuple[Port, osh]] list[tuple[HostHba, osh]] list[tuple[LogicalVolume, osh]] ''' try: hosts = _query_hosts(client) name_to_host = zip(imap(_get_host_name, hosts), hosts) has_hostname = comp(any, first) name_to_host = _drop("Hosts without hostname", has_hostname, name_to_host) for (hostName, hostDnsName), host in name_to_host: logger.info("Discover (%s:%s) host topology" % (host.name, host.ip)) ips = _discover_host_ips(host, hostName, hostDnsName, ips_set, allowDnsLookup) if ips: each(ips_set.add, ips) host = host._replace(ip=first(ips)) elif ignoreNodesWithoutIP: logger.debug("(%s: %s) is ignored due to missing " "or duplicated IP" % (host.id, host.name)) continue hostOSH = _build_host(host, hostName) ipOshs = chain(*[_report_ips(hostOSH, ip, hostDnsName) for ip in ips]) cpuOshs = _report_cpus(host, hostOSH) ports = _query_ports(partial(_query_host_ports, client, host.id)) portOshs = (_report_port(hostOSH, port) for port in ports) port_2_osh = zip(ports, portOshs) host_hbas = _query_host_hbas(client, host.id) hbaOshs = (_report_host_hba(hostOSH, hba) for hba in host_hbas) hba_2_osh = zip(host_hbas, hbaOshs) volumes = _query_host_logical_volumes(client, host.id) volumes = ifilter(LogicalVolume.name.fget, volumes) volumeOshs = (_report_logical_volume(hostOSH, v) for v in volumes) volume_2_osh = zip(volumes, volumeOshs) yield hostOSH, ipOshs, cpuOshs, port_2_osh, hba_2_osh, volume_2_osh except: excInfo = logger.prepareJythonStackTrace('') logger.warn('[' + SCRIPT_NAME + ':discoverServers] Exception: <%s>' % excInfo)
def unpack_context(fnContext): fnToArgsPairs = [] if isinstance(fnContext, (tuple, list)): fn = fnContext[0] # fn or another context if isinstance(fn, (tuple, list)): each(fnToArgsPairs.extend, imap(unpack_context, fnContext)) else: args = fnContext[1:] fnToArgsPairs.append((fn, args)) else: fn = fnContext fnToArgsPairs.append((fn, ())) return fnToArgsPairs
def DiscoveryMain(Framework): ''' Discovery process consists of two steps: 1. Connect domain controller and get whole topology 2. Strive to connect to the same controller with the same credentials but in role of global catalog. 2.1 GC indexes more hierarchical data but less object specific data, so not all data will be rediscovered. ''' vector = ObjectStateHolderVector() ## Destination Attribute Section hostId = Framework.getDestinationAttribute('hostId') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') OU_REPORTING_PARAM = 'reportOUAsConfigurationDocument' isOuUnitsTreeReportedAsConfig = Framework.getParameter(OU_REPORTING_PARAM) isOuUnitsTreeReportedAsConfig = parseBoolean(isOuUnitsTreeReportedAsConfig) tryToDiscoverGlobalCatalogFlag = Boolean.parseBoolean( Framework.getParameter('tryToDiscoverGlobalCatalog')) globalCatalogPort = Framework.getParameter('globalCatalogPort') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort try: result = DiscoveryResult() vector.addAll(_discoverTopology(Framework, credentialsId, hostId, applicationPort, None, isOuUnitsTreeReportedAsConfig, result)) #no reason to connect to the GC if port is specified in credentials if (tryToDiscoverGlobalCatalogFlag and str(globalCatalogPort).isdigit() and globalCatalogPort != applicationPort): vector.addAll(_discoverTopology(Framework, credentialsId, hostId, globalCatalogPort, tryToDiscoverGlobalCatalogFlag, isOuUnitsTreeReportedAsConfig, result)) dtoToOsh = result.getMap(DOMAINT_DTO_TO_CONFIG_OSH_TYPE) fptools.each(vector.add, dtoToOsh.values()) except Exception, e: msg = 'Failure in discovering Active Directory Topology. %s' % e Framework.reportError(msg) logger.debug(logger.prepareFullStackTrace(msg)) logger.errorException(msg)
def _discoverInstanceDetails(client, baseTopology): r'@types: BaseSapJmxClient, str, System, osh, osh -> oshv' system, hostname, clusterOSH, systemOsh = baseTopology inst, servers = ServerProcessQuery().getSystemDetails(client) if not inst.hostname and hostname: inst = sap.Instance.replaceHostname(inst, hostname) instanceReporter = sap_jee.InstanceReporter(sap_jee.InstanceBuilder()) # report host by resolved IPs hostname = inst.hostname if not hostname: logger.warn("Failed to determine hostname for %s" % inst) return ObjectStateHolderVector() dnsResolver = netutils.JavaDnsResolver() vector = ObjectStateHolderVector() try: ips = dnsResolver.resolveIpsByHostname(hostname) except netutils.ResolveException: logger.warn("Failed to resolve hostname of %s" % inst) else: hostReporter = sap.HostReporter(sap.HostBuilder()) hostOSH, vector = hostReporter.reportHostWithIps(*ips) # report instance pdo = sap_jee.InstanceBuilder.InstancePdo(inst, system) instOsh = instanceReporter.reportInstancePdo(pdo, hostOSH) vector.add(instOsh) #report sap system systemOsh.setStringAttribute('data_note', 'This SAP System link to ' + hostOSH.getAttributeValue('host_key')) vector.add(systemOsh) # report j2ee_cluster -membership-> sap_app_server linkReporter = sap.LinkReporter() vector.add(linkReporter.reportMembership(clusterOSH, instOsh)) vector.add(linkReporter.reportMembership(systemOsh, instOsh)) # report server processes oshs = [_reportServerProcess(s, inst, instOsh) for s in servers] each(vector.add, oshs) # discover applications serverToOshs = filter(comp(_isWorkerProcess, first), zip(servers, oshs)) for server, osh in serverToOshs: id_ = server.id appNameToOsh = Sf(discoverEjbApplications)(client, id_, osh, clusterOSH, vector) Sf(buildWebApplications)(client, id_, osh, clusterOSH, appNameToOsh, vector) return vector
def get_cmdline(self, wmicmd): '''Builds wmic command line for passed WMI command @param wmicmd: a WMI command to build command line for @type wmicmd: wmi_base_command.Cmd @return: wmic command line for passed WMI command @rtype: basestring ''' wmi_clsname = wmicmd.get_wmi_class_name() query_builder = __WmicQueryBuilder(wmi_clsname, self.cmdline) each(query_builder.addQueryElement, wmicmd.fields) query_builder.usePathCommand(1) query_builder.useSplitListOutput(1) query_builder.setNamespace('\\\\%s' % wmicmd.NAMESPACE) return query_builder.buildQuery()
def parse(self, url): r'''@types: str -> tuple[db.DatabaseServer] ''' url = self.trimUrlPrefix(url) parser = OracleTnsRecordParser() obj = parser.parse(url) addresses = self._filterAddresses(obj) description = self._getDescription(obj) serviceName = description.connect_data.service_name.strip() oracleRacRole = db.OracleRacMember(serviceName) addOracleRacRole = lambda db: db.addRole(oracleRacRole) servers = map(self._buildDatabaseServer, addresses) fptools.each(addOracleRacRole, servers) return tuple(servers)
def _discoverServers(solman, hostnameToAddress, sysPairsBySysName, sendVector, reportError, resolveIps): ''' Discover SAP instances related to already discovered systems @type hostnameToAddress: dict[str, sap.Address] @type sysPairsBySysName: dict[str, tuple[System, osh]] ''' try: # get servers by one of the specified queries queries = (GetServersWithNotActiveFlag(), GetServers()) queryExecutor = TableQueryExecutor(solman) result = imap(Sfn(queryExecutor.executeQuery), queries) servers = findFirst(truth, result) or () # group servers by system name pairsBySysName = groupby(GetServers.Server.systemName.fget, servers) inDiscoveredSystems = comp(sysPairsBySysName.get, first) pairs = ifilter(inDiscoveredSystems, pairsBySysName.iteritems()) resolveIps = comp(resolveIps, GetServers.Server.hostname.fget) for sysName, servers in pairs: logger.info("Found %s servers for %s system" % (len(servers), sysName)) # collect parsed names for each server parseServerName = comp(GetServers.parseServerName, GetServers.Server.name.fget) parsedServerNames = imap(parseServerName, servers) # resolve IPs for each server ips = imap(resolveIps, servers) # get information for each server where name and IPs are present infoSeq = ifilter(all, izip(servers, parsedServerNames, ips)) # not interested in server nodes - only instances infoSeq = ifilterfalse(isServerNode, infoSeq) # report each server system, systemOsh = sysPairsBySysName.get(sysName) reportServer = F(_reportServer, fptools._, fptools._, fptools._, system, systemOsh) vector = ObjectStateHolderVector() each(vector.addAll, starmap(reportServer, infoSeq)) sendVector(vector) except (Exception, JException): msg = "Failed to discover servers" logger.warnException(msg) reportError(msg)
def _discoverRfcDestinations(sapUtils, systemOsh, config): r'@types: SapUtils, osh, flow.DiscoveryConfigBuilder -> oshv' if not config.discoverRFCConnections: return ObjectStateHolderVector() logger.info('Discover RFC connections') getRfcCmd = sap_abap_discoverer.GetRfcDestinationsRfcCommand() connections = Sfn(getRfcCmd.getAllRfcConnections)(sapUtils) or () logger.info("Found %s possible RFC connections" % len(connections)) connections = filter(comp(sap_abap_discoverer.isEnglishVersion, third), connections) logger.info("Found %s RFC connections with EN language" % len(connections)) connByName = applyMapping(first, connections) destinations = getRfcCmd.getAllRfcDestinations(sapUtils) logger.info("Found %s RFC destinations" % len(destinations)) # get destinations with valid host destinations = [d for d in destinations if _isDestFull(d)] logger.info("Found %s destinations with host available" % len(destinations)) destinationsByHost = groupby(lambda d: d.targetHost, destinations) ips = map(Sfn(_resolve), destinationsByHost.iterkeys()) pairIpToDestinations = zip(ips, destinationsByHost.itervalues()) resolved, notResolved = partition(first, pairIpToDestinations) if notResolved: skippedDestsCount = sum([len(dests) for ip, dests in notResolved]) logger.debug("%s destinations skipped due to not resolved %s hosts" % (skippedDestsCount, len(notResolved))) vector = ObjectStateHolderVector() for ip, destinations in resolved: # TODO: # 1) query RFC connections (to get description) only for these # destinations as it will reduce amount of data fetched from system # One query for connections returns ~8K rows of data, while we are # interested in less than ~50 or even less # 2) another improvement query only records in English language countOfDests = len(destinations) host = first(destinations).targetHost reportDst = Sfn(_reportRfcDestination) logger.debug("%s destinations resolved for %s" % (countOfDests, host)) vectors = (reportDst(dst, ip, connByName, systemOsh) for dst in destinations) each(vector.addAll, ifilter(None, vectors)) return vector
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' shell = context.client dnsResolver = netutils.DnsResolverByShell(shell) addressToEndpoints = {} for nsOption in self._nsOptions: parsedEndpoints = buildEndpointsFromNsOption(nsOption, dnsResolver) addressToEndpoints.setdefault(nsOption.address, []).extend(parsedEndpoints) hostBuilder = HostBuilder() linkReporter = LinkReporter() endpointBuilder = netutils.ServiceEndpointBuilder() endpointReporter = netutils.EndpointReporter(endpointBuilder) endpointsOshs = [] for address, endpoints in addressToEndpoints.items(): hostOsh = hostBuilder.buildHostByHostname(parseHostnameFromAddress(address)) context.resultsVector.add(hostOsh) ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(context.resultsVector.add, ipOshs) reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(context.resultsVector.add, map(reportContainment, ipOshs)) endpointsOshs.extend(map(fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh), endpoints)) applicationOsh = context.application.applicationOsh for endpointsOsh in endpointsOshs: context.resultsVector.add(endpointsOsh) clientServerOsh = linkReporter.reportClientServerRelation(applicationOsh, endpointsOsh) context.resultsVector.add(clientServerOsh)
def discoverAllInstancesByNamesOnly(client): r''' Can be only applied for discovery by JMX due to deserialization limitatations of WebServices client @types: BaseSapJmxClient, DiscoveryConfig -> tuple[oshv, tuple[str]]''' discoverer = sap_jee_discoverer.ClusterDiscoverer(client) cluster, instanceNames = discoverer.getClusterDetails() parseInst = Sf(_createAnonymousInstFromFullName) insts = keep(parseInst, instanceNames) system = sap.System(cluster.getName()) systemOsh, clusterOsh, vector = _reportSapSystem(system) ipsPerInst = zip(map(_resolveInstHostname, insts), insts) resolved, notResolved = partition(first, ipsPerInst) warnings = () if notResolved: warnings = ("Some instances are not reported " "due to unresolved address",) vectors = (third(reportInst(i, system, systemOsh, clusterOsh, ips)) for ips, i in resolved) each(vector.addAll, vectors) return vector, warnings
def wrapper(*args_, **kwargs_): original_toolnames = args[0] factories_by_name = basic_tool_factories.copy() each(factories_by_name.update, args[1:]) tools = {} kwargs_toolnames = original_toolnames[len(args_):] original_toolnames = original_toolnames[:len(args_)] for nr, toolname in enumerate(original_toolnames): tools[toolname] = args_[nr] for toolname in kwargs_toolnames: tools[toolname] = kwargs_[toolname] required_toolnames = _get_fn_arg_names(original_fn) optional_toolnames = {} args_, kwargs_ = _build_tools(required_toolnames, optional_toolnames, factories_by_name, tools) return original_fn(*args_, **kwargs_)
def _discoverClients(solman, sysPairsBySysName, sendVector, reportError): try: logger.info("Discover Clients") getClientsQuery = GetClients() executeQuery = TableQueryExecutor(solman).executeQuery sysNameToClientPairs = executeQuery(getClientsQuery) pairsBySysName = groupby(first, sysNameToClientPairs) logger.info("Discovered %s clients for %s systems" % ( len(sysNameToClientPairs), len(pairsBySysName))) reporter = sap.ClientReporter(sap.ClientBuilder()) for systemName in ifilter(sysPairsBySysName.get, pairsBySysName.iterkeys()): _, systemOsh = sysPairsBySysName.get(systemName) report = F(reporter.report, fptools._, systemOsh) clients = imap(second, pairsBySysName.get(systemName)) vector = ObjectStateHolderVector() each(vector.add, imap(report, clients)) sendVector(vector) except (Exception, JException): msg = "Failed to discover clients" logger.warnException(msg) reportError(msg)
filter(None, map(weblogic_discoverer.ServerRuntime.findAdminServerEndpoint, runtimes)) ) # find unique endpoints if len(adminServerEndpointByName) > 1: logger.info("""After analyze of running processes in single domain \ found servers pointing to different administrative servers """) logger.debug(adminServerEndpointByName) adminServerEndpoint = (len(adminServerEndpointByName) == 1 and adminServerEndpointByName.values()[0]) domain = jee.Domain(domainDescriptor.getName()) # add known clusters to the domain clusters = domainDescriptor.getClusters() each(domain.addCluster, clusters) # add config.xml to the domain domain.addConfigFiles(jee.createXmlConfigFile(domainDescriptorFile)) deploymentTargetByName = applyMapping(jee.Cluster.getName, clusters) machineByName = applyMapping( weblogic_discoverer.DomainConfigurationDescriptor.Machine.getName, domainDescriptor.getMachines() ) # find corresponding process for each discovered server to # provide additional information that can be overridden resolvedServers = [] servers = domainDescriptor.getServers() # for 7-8 versions config.xml hasn't info about admin-server at all
continue for interf in ifaceList: if (interf.name and re.search('[Tt]eam', interf.name)) or (interf.description and re.search('[Tt]eam', interf.description)): #picking up interface with max interfaceIndex value and setting it aggregate role try: iface = reduce(lambda x,y: int(x.interfaceIndex) > int(y.interfaceIndex) and x or y, ifaceList) iface.role = 'aggregate_interface' except: logger.debugException('') # add all interfaces to the host vector.addAll(modeling.createInterfacesOSHV(interfaceList, hostOSH)) roleManager = networking_win.InterfaceRoleManager() builtInets = filter(modeling.NetworkInterface.getOsh, interfaceList) fptools.each(roleManager.assignInterfaceRole, builtInets) isCompleteAttr = hostOSH.getAttribute('host_iscomplete') for i in range(vector.size()): osh = vector.get(i) if osh.getObjectClass() == 'ip': if (isCompleteAttr != None and isCompleteAttr.getBooleanValue() == 1): link = modeling.createLinkOSH('contained', hostOSH, osh) vector.add(link) elif osh.getObjectClass() == 'network': link = modeling.createLinkOSH('member', osh, hostOSH) vector.add(link) if interfacesToUpdateList: for interfaceToUpdateOSH in interfacesToUpdateList: interfaceToUpdateOSH.setContainer(hostOSH)
at_least_once_discovered = True if stop_on_first: break except flow.ConnectionException, ce: logger.debugException('%s' % ce) connection_exs.append(ce) except (flow.DiscoveryException, Exception), de: logger.debugException('%s' % de) discovery_exs.append(de) except JException, je: logger.debugException('%s' % je) java_exs.append(je) warnings = filter(None, warnings) if at_least_once_discovered: each(logger.reportWarning, warnings) else: for ex in connection_exs: obj = flow._create_connection_errorobj(proto_name, ex.message) logger.reportErrorObject(obj) for ex in discovery_exs: obj = flow._create_discovery_errorobj(proto_name, ex.message) logger.reportErrorObject(obj) for ex in java_exs: obj = flow._create_discovery_errorobj(proto_name, '%s %s' % (ex.__class__, ex.getMessage())) logger.reportErrorObject(obj) return vector def get_applicable_credentials(framework, portPattern): r'@types: Framework, str -> iterator[tuple[int, str]]'
def _reportInstance(sapSystem, serverInstance, systemOsh, clusterOsh, sapJeeVersionInfo, knownPortsConfigFile): r''' @param sapSystem: System @param serverInstance: SapJEEMonitoringXmlParser.DialogInstance @param sapJeeVersionInfo: JEEDiscovererByHTTP.SapJ2EEVersionInfo @rtype: ObjectStateHolderVector @raise ValueError: ''' instanceReporter = sap_jee.InstanceReporter(sap_jee.InstanceBuilder()) serverReporter = sap_jee.ServerReporter(sap_jee.ServerBuilder()) endpointReporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() vector = ObjectStateHolderVector() ip = serverInstance.host.ip hostOsh, _, vector_ = _buildHostAndIpOshs(ip) vector.addAll(vector_) systemOsh.setStringAttribute('data_note', 'This SAP System link to ' + hostOsh.getAttributeValue('host_key')) vector.add(systemOsh) instance = _buildJavaInstance(sapSystem, serverInstance) instanceOsh = instanceReporter.reportInstancePdo( sap_jee.InstanceBuilder.InstancePdo(instance, sapSystem, sap.createIp(ip), sapJeeVersionInfo), hostOsh) vector.add(instanceOsh) vector.add(linkReporter.reportMembership(clusterOsh, instanceOsh)) vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) if serverInstance.dispatcherServer: dispatcherProcess = _buildDispatcher(serverInstance.dispatcherServer) vector.add(serverReporter.reportServer(dispatcherProcess, instance, instanceOsh)) httpPort = serverInstance.dispatcherServer.httpPort p4Port = serverInstance.dispatcherServer.p4Port telnetPort = serverInstance.dispatcherServer.telnetPort isPortDefined = lambda t: t[1] ports = filter(isPortDefined, ((ip, httpPort, knownPortsConfigFile.getTcpPortName(httpPort)), (ip, p4Port, knownPortsConfigFile.getTcpPortName(p4Port)), (ip, telnetPort, knownPortsConfigFile.getTcpPortName(telnetPort)))) endpoints = map(lambda t: netutils.createTcpEndpoint(*t), ports) reportEndpoint = endpointReporter.reportEndpoint reportEndpoint = fptools.partiallyApply(reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) reportUsage = linkReporter.reportUsage reportUsage = fptools.partiallyApply(reportUsage, instanceOsh, fptools._) usageOshs = map(reportUsage, endpointOshs) fptools.each(vector.add, usageOshs) for serverProcess in serverInstance.serverProcesses: serverProcess = _buildServerProcess(serverProcess) vector.add(serverReporter.reportServer(serverProcess, instance, instanceOsh)) #note:ek:no debug port reporting for now return vector
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ------------------------------------------------------------ DISCOVERY "SAP TREX plug-in DISCOVERY start" | info shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # x) get process related application hostOsh = context.hostOsh application = context.application destinationIp = application.getConnectionIp() "x) Find TREX Daemon process that has profile path as parameter" | info mainProcess = findFirst(isMainTrexProcess, context.application.getProcesses()) profilePath = sap_discoverer.getProfilePathFromCommandline(mainProcess.commandLine) "x) Read profile content: %s" % profilePath | info getFileContent = Sfn(Fn(self.__getFileWithContent, shell, pathtools, __)) profileFile = profilePath and getFileContent(profilePath) if not profileFile: "Plug-in flow broken. Failed to read instance profile\ content based on path from the TREXDaemon command line" | warn return "x) Instance profile parsing" | info sapIniParser = sap_discoverer.IniParser() instanceProfileParser = sap_discoverer.InstanceProfileParser(sapIniParser) defaultProfileParser = sap_trex_discoverer.DefaultProfileParser(sapIniParser) try: resultAsIni = instanceProfileParser.parseAsIniResult(profileFile.content) instanceProfile = instanceProfileParser.parse(resultAsIni) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse instance profile") return rfcConfigs = [] trexSystem = defaultProfile.getSystem() trexInstance = instanceProfile.getInstance() trexInstanceName = trexInstance.getName() + trexInstance.getNumber() isBiaProduct = 0 versionInfo = None # # master by default, if topology file is not found that means # # that current one is the only instance # isMaster = 1 trexTopology = None "x) Initialize TREX instance layout" | debug systemName = trexSystem.getName() systemBasePath = sap_discoverer.findSystemBasePath( mainProcess.getExecutablePath(), systemName ) if systemBasePath: systemLayout = sap_trex_discoverer.SystemLayout(pathtools, systemBasePath, systemName) 'System path: %s' % systemLayout.getRootPath() | info instancePath = systemLayout.composeInstanceDirPath(trexInstanceName) 'Instance path: %s' % instancePath | debug instanceLayout = sap_trex_discoverer.InstanceLayout(pathtools, instancePath, trexInstanceName) "x) Get content of default profile as it contains information about product" "x) Determine whether we deal with BIA based on version information" | debug defaultProfilePath = systemLayout.getDefaultProfileFilePath() defaultProfileFile = getFileContent(defaultProfilePath) try: resultAsIni = instanceProfileParser.parseAsIniResult(defaultProfileFile.content) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse default profile") else: isBiaProduct = defaultProfile.getProductType() == sap_trex.Product.BIA (isBiaProduct and "BIA" or "non-BIA", "product detected") | info # get instance host name from profile name instanceHostname = None try: destinationSystem = sap_discoverer.parseSapSystemFromInstanceProfileName(profileFile.getName()) except Exception: msg = "Failed to parse instance hostname from profile file name" logger.debugException(msg) else: instanceHostname = first(destinationSystem.getInstances()).getHostname() "x) Discover whole topology from (topology.ini)" | info # topology.ini file location and format differs depending on the # product: # -a) BIA (has plain-ini file at <SID>/sys/global/trex/data/topology.ini # -b) TREX (has several places where topology.ini can be stored) discoverTopologyIniFilePath = fptools.safeFunc(sap_trex_discoverer.discoverTopologyIniFilePath) topologyFilePath = (isBiaProduct and systemLayout.getTopologyIniPath() or discoverTopologyIniFilePath(fs, instanceLayout, instanceHostname)) topologyFile = topologyFilePath and getFileContent(topologyFilePath) if topologyFile: try: configParser = sap_trex_discoverer.TopologyConfigParser() trexTopology = sap_trex_discoverer.TrexTopologyConfig( configParser.parse(topologyFile.content)) # find instance between master end-points # landscapeSnapshot = topology.getGlobals().getLandscapeSnapshot() # masterEndpoints = landscapeSnapshot.getMasterEndpoints() # activeMasterEndpoints = landscapeSnapshot.getActiveMasterEndpoints() # topologyNodes = topology.getHostNodes() ## # isEndpointWithInstanceHostname = (lambda # ep, hostname = instanceHostname: ep.getAddress() == hostname) # isMaster = len(filter(isEndpointWithInstanceHostname, # landscapeSnapshot.getMasterEndpoints())) # "host role is %s" % (isMaster and "master" or "slave") | info except: logger.warnException("Failed to parse topology configuration") else: logger.warn("Failed to get content for the topology configuration") "x) Discover TREX version information from saptrexmanifest.mf" | info # read version info from manifest file manifestFile = getFileContent(instanceLayout.getManifestFilePath()) if manifestFile: manifestParser = sap_trex_discoverer.SapTrexManifestParser(sapIniParser) versionInfo = manifestParser.parseVersion(manifestFile.content) else: 'Failed to discover version from manifest file' | warn 'Second attept to get version from updateConfig.ini file' | info profileSystem = Sfn(sap_discoverer.parseSapSystemFromInstanceProfileName)(profileFile.getName()) if profileSystem: hostname = first(profileSystem.getInstances()).getHostname() updateConfigFile = getFileContent(instanceLayout.composeUpdateConfigIniFilePath(hostname)) versionInfo = updateConfigFile and sap.VersionInfo(updateConfigFile.content.strip()) "x) Discover served systems ( in case if RFC configuration established )" | info rfcServerIniFilePath = (isBiaProduct and systemLayout.getRfcServerConfigFilePath() or instanceLayout.composeTrexRfcServerIniFilePath(instanceHostname)) rfcServerIniFile = getFileContent(rfcServerIniFilePath) if rfcServerIniFile: rfcConfigs = filter(None, (fptools.safeFunc( sap_trex_discoverer.parseConnectionsInRfcServerIni) (rfcServerIniFile.content))) # -------------------------------------------------------- REPORTING "SAP TREX plug-in REPORTING start" | info trexOsh = application.getOsh() vector = context.resultsVector configFileReporter = file_topology.Reporter(file_topology.Builder()) trexReporter = sap_trex.Reporter(sap_trex.Builder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() "x) - report profile content as configuration document for the application" | info vector.add(configFileReporter.report(profileFile, trexOsh)) ("x) - report %s" % trexSystem) | info trexSystemOsh = trexReporter.reportSystem(trexSystem) vector.add(trexSystemOsh) vector.add(linkReporter.reportMembership(trexSystemOsh, trexOsh)) "x) - report instance name and version" | info softwareBuilder.updateName(trexOsh, trexInstanceName) "x) report instance number: %s" % trexInstance.getNumber() | info instanceBuilder = sap_trex.Builder() instanceBuilder.updateInstanceNumber(trexOsh, trexInstance.getNumber()) if versionInfo: softwareBuilder.updateVersionInfo(trexOsh, versionInfo) if isBiaProduct: softwareBuilder.updateDiscoveredProductName(trexOsh, sap_trex.Product.BIA.instanceProductName) "x) report RFC connections" | info dnsResolver = netutils.DnsResolverByShell(shell, destinationIp) vector.addAll(reportRfcConfigs(rfcConfigs, dnsResolver, hostOsh)) "x) report all topology nodes" | info if trexTopology: reportHostNode = fptools.partiallyApply(reportTrexHostNode, fptools._, trexTopology, isBiaProduct) vectors = map(reportHostNode, trexTopology.getHostNodes()) fptools.each(vector.addAll, vectors)
def reportTrexHostNode(hostNode, topology, isBiaProduct): r'@types: TrexTopologyConfig.HostNode, TrexTopologyConfig, bool -> ObjectStateHolderVector' trexBuilder = sap_trex.Builder() trexReporter = sap_trex.Reporter(trexBuilder) hostReporter = sap_trex.HostReporter(sap_trex.HostBuilder()) endpointReporter = netutils.EndpointReporter(netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() # x) create sap system system = hostNode.system vector = ObjectStateHolderVector() # process NameServer endpoints and ignore loopback endpoints isLoopbackEndpoint = lambda e: netutils.isLoopbackIp(e.getAddress()) _, endpoints = fptools.partition(isLoopbackEndpoint, hostNode.nameServerEndpoints) # x) create host OSH hostOsh = hostReporter.reportHostByHostname(hostNode.name) vector.add(hostOsh) # x) report IPs ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(vector.add, ipOshs) #vector.addAll(ipOshs) # x) report containment between host nad ips reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(vector.add, map(reportContainment, ipOshs)) # x) report end-points reportEndpoint = fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) # x) report TREX instance itself instanceOsh = trexReporter.reportInstance(first(system.getInstances()), hostOsh) # x) mark as BIA or plain-TREX productName = (isBiaProduct and sap_trex.Product.BIA.instanceProductName or sap_trex.Product.TREX.instanceProductName) softwareBuilder.updateDiscoveredProductName(instanceOsh, productName) # x) set name server role (master, slave or 1st master) nameServerPort = first(endpoints).getPort() nameServerEndpoint = netutils.createTcpEndpoint(hostNode.name, nameServerPort) topologyGlobals = topology.getGlobals() isMaster = nameServerEndpoint in ( fptools.safeFunc(topologyGlobals.getMasterEndpoints)() or () ) isActiveMaster = nameServerEndpoint in ( fptools.safeFunc(topologyGlobals.getActiveMasterEndpoints)() or () ) trexBuilder.updateNameServerMode( instanceOsh, (isMaster and (isActiveMaster and sap_trex.NameServerMode.FIRST_MASTER or sap_trex.NameServerMode.MASTER) or sap_trex.NameServerMode.SLAVE)) vector.add(instanceOsh) # x) DO NOT report 'membership' between system and instance # Explanation: # sometimes you can discover systems that don't have relationship to current host. # This can lead to incorrect merging of to systems (inside OSH vector) # systemOsh = trexReporter.reportSystem(system) # vector.add(systemOsh) # vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) # x) report 'usage' between instance and endpoints of name-server reportUsage = fptools.partiallyApply(linkReporter.reportUsage, instanceOsh, fptools._) fptools.each(vector.add, map(reportUsage, endpointOshs)) return vector
def create(cls, discoverers, default_discoverer): discoverer_by_version = {} add_discoverer = partial(Registry.add_discoverer, discoverer_by_version) fptools.each(add_discoverer, discoverers) return cls(discoverer_by_version, default_discoverer)
def decorator_fn(main_fn): @wraps(main_fn) def wrapper(framework): vector = ObjectStateHolderVector() framework = RichFramework(framework) creds_manager = CredsManager(framework) creds = get_credentials_fn(framework, creds_manager) if creds is None: return vector first_cred = take(0, 1, creds) if not first_cred: logger.reportErrorObject( _create_missed_creds_error(proto_name)) else: connection_exs = [] discovery_exs = [] warnings = [] at_least_once_discovered = False oshs = [] creds = list(itertools.chain(first_cred, creds)) if with_dns_resolver: local_client = framework.createClient( LOCAL_SHELL_PROTOCOL_NAME) local_shell = shellutils.ShellUtils(local_client) dns_resolver = _get_dns_resolver(local_shell) for args in starmap( functools.partial(cred_to_client_args, creds_manager), creds): try: with create_client(framework, *args) as client: args = with_dns_resolver and ( dns_resolver, ) + args or args oshs_, warnings_ = main_fn(client, framework, *args) oshs.extend(oshs_) warnings.extend(warnings_) at_least_once_discovered = True if stop_on_first: break except ConnectionException, ce: logger.debugException(str(ce)) connection_exs.append(ce) except (DiscoveryException, Exception), de: logger.debugException(str(de)) discovery_exs.append(de) if with_dns_resolver: fptools.safeFunc(local_shell.closeClient)() if at_least_once_discovered: each(logger.reportWarningObject, warnings) else: for ex in connection_exs: obj = _create_connection_errorobj( proto_name, ex.message) logger.reportErrorObject(obj) for ex in discovery_exs: obj = _create_discovery_errorobj( proto_name, '', ex.message) logger.reportErrorObject(obj) vector.addAll(oshs)
def reportTrexHostNode(hostNode, topology, isBiaProduct): r'@types: TrexTopologyConfig.HostNode, TrexTopologyConfig, bool -> ObjectStateHolderVector' trexBuilder = sap_trex.Builder() trexReporter = sap_trex.Reporter(trexBuilder) hostReporter = sap_trex.HostReporter(sap_trex.HostBuilder()) endpointReporter = netutils.EndpointReporter( netutils.ServiceEndpointBuilder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() # x) create sap system system = hostNode.system vector = ObjectStateHolderVector() # process NameServer endpoints and ignore loopback endpoints isLoopbackEndpoint = lambda e: netutils.isLoopbackIp(e.getAddress()) _, endpoints = fptools.partition(isLoopbackEndpoint, hostNode.nameServerEndpoints) # x) create host OSH hostOsh = hostReporter.reportHostByHostname(hostNode.name) vector.add(hostOsh) # x) report IPs ips = map(netutils.Endpoint.getAddress, endpoints) ipOshs = map(modeling.createIpOSH, ips) fptools.each(vector.add, ipOshs) #vector.addAll(ipOshs) # x) report containment between host nad ips reportContainment = fptools.partiallyApply(linkReporter.reportContainment, hostOsh, fptools._) fptools.each(vector.add, map(reportContainment, ipOshs)) # x) report end-points reportEndpoint = fptools.partiallyApply(endpointReporter.reportEndpoint, fptools._, hostOsh) endpointOshs = map(reportEndpoint, endpoints) fptools.each(vector.add, endpointOshs) # x) report TREX instance itself instanceOsh = trexReporter.reportInstance(first(system.getInstances()), hostOsh) # x) mark as BIA or plain-TREX productName = (isBiaProduct and sap_trex.Product.BIA.instanceProductName or sap_trex.Product.TREX.instanceProductName) softwareBuilder.updateDiscoveredProductName(instanceOsh, productName) # x) set name server role (master, slave or 1st master) nameServerPort = first(endpoints).getPort() nameServerEndpoint = netutils.createTcpEndpoint(hostNode.name, nameServerPort) topologyGlobals = topology.getGlobals() isMaster = nameServerEndpoint in (fptools.safeFunc( topologyGlobals.getMasterEndpoints)() or ()) isActiveMaster = nameServerEndpoint in (fptools.safeFunc( topologyGlobals.getActiveMasterEndpoints)() or ()) trexBuilder.updateNameServerMode( instanceOsh, (isMaster and (isActiveMaster and sap_trex.NameServerMode.FIRST_MASTER or sap_trex.NameServerMode.MASTER) or sap_trex.NameServerMode.SLAVE)) vector.add(instanceOsh) # x) DO NOT report 'membership' between system and instance # Explanation: # sometimes you can discover systems that don't have relationship to current host. # This can lead to incorrect merging of to systems (inside OSH vector) # systemOsh = trexReporter.reportSystem(system) # vector.add(systemOsh) # vector.add(linkReporter.reportMembership(systemOsh, instanceOsh)) # x) report 'usage' between instance and endpoints of name-server reportUsage = fptools.partiallyApply(linkReporter.reportUsage, instanceOsh, fptools._) fptools.each(vector.add, map(reportUsage, endpointOshs)) return vector
def process(self, context): r''' @types: applications.ApplicationSignatureContext ''' # ------------------------------------------------------------ DISCOVERY "SAP TREX plug-in DISCOVERY start" | info shell = context.client fs = file_system.createFileSystem(shell) pathtools = file_system.getPath(fs) # x) get process related application hostOsh = context.hostOsh application = context.application destinationIp = application.getConnectionIp() "x) Find TREX Daemon process that has profile path as parameter" | info mainProcess = findFirst(isMainTrexProcess, context.application.getProcesses()) profilePath = sap_discoverer.getProfilePathFromCommandline( mainProcess.commandLine) "x) Read profile content: %s" % profilePath | info getFileContent = Sfn( Fn(self.__getFileWithContent, shell, pathtools, __)) profileFile = profilePath and getFileContent(profilePath) if not profileFile: "Plug-in flow broken. Failed to read instance profile\ content based on path from the TREXDaemon command line" | warn return "x) Instance profile parsing" | info sapIniParser = sap_discoverer.IniParser() instanceProfileParser = sap_discoverer.InstanceProfileParser( sapIniParser) defaultProfileParser = sap_trex_discoverer.DefaultProfileParser( sapIniParser) try: resultAsIni = instanceProfileParser.parseAsIniResult( profileFile.content) instanceProfile = instanceProfileParser.parse(resultAsIni) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse instance profile") return rfcConfigs = [] trexSystem = defaultProfile.getSystem() trexInstance = instanceProfile.getInstance() trexInstanceName = trexInstance.getName() + trexInstance.getNumber() isBiaProduct = 0 versionInfo = None # # master by default, if topology file is not found that means # # that current one is the only instance # isMaster = 1 trexTopology = None "x) Initialize TREX instance layout" | debug systemName = trexSystem.getName() systemBasePath = sap_discoverer.findSystemBasePath( mainProcess.getExecutablePath(), systemName) if systemBasePath: systemLayout = sap_trex_discoverer.SystemLayout( pathtools, systemBasePath, systemName) 'System path: %s' % systemLayout.getRootPath() | info instancePath = systemLayout.composeInstanceDirPath( trexInstanceName) 'Instance path: %s' % instancePath | debug instanceLayout = sap_trex_discoverer.InstanceLayout( pathtools, instancePath, trexInstanceName) "x) Get content of default profile as it contains information about product" "x) Determine whether we deal with BIA based on version information" | debug defaultProfilePath = systemLayout.getDefaultProfileFilePath() defaultProfileFile = getFileContent(defaultProfilePath) try: resultAsIni = instanceProfileParser.parseAsIniResult( defaultProfileFile.content) defaultProfile = defaultProfileParser.parse(resultAsIni) except Exception: logger.warnException("Failed to parse default profile") else: isBiaProduct = defaultProfile.getProductType( ) == sap_trex.Product.BIA (isBiaProduct and "BIA" or "non-BIA", "product detected") | info # get instance host name from profile name instanceHostname = None try: destinationSystem = sap_discoverer.parseSapSystemFromInstanceProfileName( profileFile.getName()) except Exception: msg = "Failed to parse instance hostname from profile file name" logger.debugException(msg) else: instanceHostname = first( destinationSystem.getInstances()).getHostname() "x) Discover whole topology from (topology.ini)" | info # topology.ini file location and format differs depending on the # product: # -a) BIA (has plain-ini file at <SID>/sys/global/trex/data/topology.ini # -b) TREX (has several places where topology.ini can be stored) discoverTopologyIniFilePath = fptools.safeFunc( sap_trex_discoverer.discoverTopologyIniFilePath) topologyFilePath = (isBiaProduct and systemLayout.getTopologyIniPath() or discoverTopologyIniFilePath( fs, instanceLayout, instanceHostname)) topologyFile = topologyFilePath and getFileContent( topologyFilePath) if topologyFile: try: configParser = sap_trex_discoverer.TopologyConfigParser() trexTopology = sap_trex_discoverer.TrexTopologyConfig( configParser.parse(topologyFile.content)) # find instance between master end-points # landscapeSnapshot = topology.getGlobals().getLandscapeSnapshot() # masterEndpoints = landscapeSnapshot.getMasterEndpoints() # activeMasterEndpoints = landscapeSnapshot.getActiveMasterEndpoints() # topologyNodes = topology.getHostNodes() ## # isEndpointWithInstanceHostname = (lambda # ep, hostname = instanceHostname: ep.getAddress() == hostname) # isMaster = len(filter(isEndpointWithInstanceHostname, # landscapeSnapshot.getMasterEndpoints())) # "host role is %s" % (isMaster and "master" or "slave") | info except: logger.warnException( "Failed to parse topology configuration") else: logger.warn( "Failed to get content for the topology configuration") "x) Discover TREX version information from saptrexmanifest.mf" | info # read version info from manifest file manifestFile = getFileContent(instanceLayout.getManifestFilePath()) if manifestFile: manifestParser = sap_trex_discoverer.SapTrexManifestParser( sapIniParser) versionInfo = manifestParser.parseVersion(manifestFile.content) else: 'Failed to discover version from manifest file' | warn 'Second attept to get version from updateConfig.ini file' | info profileSystem = Sfn( sap_discoverer.parseSapSystemFromInstanceProfileName)( profileFile.getName()) if profileSystem: hostname = first( profileSystem.getInstances()).getHostname() updateConfigFile = getFileContent( instanceLayout.composeUpdateConfigIniFilePath( hostname)) versionInfo = updateConfigFile and sap.VersionInfo( updateConfigFile.content.strip()) "x) Discover served systems ( in case if RFC configuration established )" | info rfcServerIniFilePath = ( isBiaProduct and systemLayout.getRfcServerConfigFilePath() or instanceLayout.composeTrexRfcServerIniFilePath( instanceHostname)) rfcServerIniFile = getFileContent(rfcServerIniFilePath) if rfcServerIniFile: rfcConfigs = filter(None, (fptools.safeFunc( sap_trex_discoverer.parseConnectionsInRfcServerIni)( rfcServerIniFile.content))) # -------------------------------------------------------- REPORTING "SAP TREX plug-in REPORTING start" | info trexOsh = application.getOsh() vector = context.resultsVector configFileReporter = file_topology.Reporter(file_topology.Builder()) trexReporter = sap_trex.Reporter(sap_trex.Builder()) linkReporter = sap.LinkReporter() softwareBuilder = sap.SoftwareBuilder() "x) - report profile content as configuration document for the application" | info vector.add(configFileReporter.report(profileFile, trexOsh)) ("x) - report %s" % trexSystem) | info trexSystemOsh = trexReporter.reportSystem(trexSystem) vector.add(trexSystemOsh) vector.add(linkReporter.reportMembership(trexSystemOsh, trexOsh)) "x) - report instance name and version" | info softwareBuilder.updateName(trexOsh, trexInstanceName) "x) report instance number: %s" % trexInstance.getNumber() | info instanceBuilder = sap_trex.Builder() instanceBuilder.updateInstanceNumber(trexOsh, trexInstance.getNumber()) if versionInfo: softwareBuilder.updateVersionInfo(trexOsh, versionInfo) if isBiaProduct: softwareBuilder.updateDiscoveredProductName( trexOsh, sap_trex.Product.BIA.instanceProductName) "x) report RFC connections" | info dnsResolver = netutils.DnsResolverByShell(shell, destinationIp) vector.addAll(reportRfcConfigs(rfcConfigs, dnsResolver, hostOsh)) "x) report all topology nodes" | info if trexTopology: reportHostNode = fptools.partiallyApply(reportTrexHostNode, fptools._, trexTopology, isBiaProduct) vectors = map(reportHostNode, trexTopology.getHostNodes()) fptools.each(vector.addAll, vectors)
def decorator_fn(main_fn): @wraps(main_fn) def wrapper(framework): vector = ObjectStateHolderVector() framework = RichFramework(framework) creds_manager = CredsManager(framework) creds = get_credentials_fn(framework, creds_manager) if creds is None: return vector first_cred = take(0, 1, creds) if not first_cred: logger.reportErrorObject(_create_missed_creds_error(proto_name)) else: connection_exs = [] discovery_exs = [] warnings = [] at_least_once_discovered = False oshs = [] creds = list(itertools.chain(first_cred, creds)) if with_dns_resolver: local_client = framework.createClient(LOCAL_SHELL_PROTOCOL_NAME) local_shell = shellutils.ShellUtils(local_client) dns_resolver = _get_dns_resolver(local_shell) for args in starmap(functools.partial(cred_to_client_args, creds_manager), creds): try: with create_client(framework, *args) as client: args = with_dns_resolver and (dns_resolver,) + args or args oshs_, warnings_ = main_fn(client, framework, *args) oshs.extend(oshs_) warnings.extend(warnings_) at_least_once_discovered = True if stop_on_first: break except ConnectionException, ce: logger.debugException(str(ce)) connection_exs.append(ce) except (DiscoveryException, Exception), de: logger.debugException(str(de)) discovery_exs.append(de) if with_dns_resolver: fptools.safeFunc(local_shell.closeClient)() if at_least_once_discovered: each(logger.reportWarningObject, warnings) else: for ex in connection_exs: obj = _create_connection_errorobj(proto_name, ex.message) logger.reportErrorObject(obj) for ex in discovery_exs: obj = _create_discovery_errorobj(proto_name, '', ex.message) logger.reportErrorObject(obj) vector.addAll(oshs)
findAdminServerEndpoint, runtimes))) # find unique endpoints if len(adminServerEndpointByName) > 1: logger.info( """After analyze of running processes in single domain \ found servers pointing to different administrative servers """) logger.debug(adminServerEndpointByName) adminServerEndpoint = (len(adminServerEndpointByName) == 1 and adminServerEndpointByName.values()[0]) domain = jee.Domain(domainDescriptor.getName()) # add known clusters to the domain clusters = domainDescriptor.getClusters() each(domain.addCluster, clusters) # add config.xml to the domain domain.addConfigFiles( jee.createXmlConfigFile(domainDescriptorFile)) deploymentTargetByName = applyMapping(jee.Cluster.getName, clusters) machineByName = applyMapping( weblogic_discoverer.DomainConfigurationDescriptor.Machine. getName, domainDescriptor.getMachines()) # find corresponding process for each discovered server to # provide additional information that can be overridden resolvedServers = [] servers = domainDescriptor.getServers()