def process(self, context): ''' Plugin gets system information from DEFAULT profile. In case if profile cannot be processed - system information is parsed from instance profile path. Default profile read for system (identified by its path) is shared between application component instances ''' shell = context.client osh = context.application.applicationOsh host_osh = context.hostOsh attrName = sap.InstanceBuilder.INSTANCE_PROFILE_PATH_ATTR pf_path = osh.getAttributeValue(attrName) logger.info("Instance pf path is: %s" % pf_path) system, pf_name = sap_discoverer.parsePfDetailsFromPath(pf_path) logger.info("Parsed details from pf name: %s" % str((system, pf_name))) topology = self._discover_topology(shell, system, pf_path) if topology: #resolver = dns_resolver.SocketDnsResolver() resolver = dns_resolver.create(shell, local_shell=None, dns_server=None, hosts_filename=None) db_host_osh, oshs = _report_db_host(topology, resolver) application_ip = _report_application_ip(shell, topology, resolver) if application_ip: logger.info("application ip is: %s" % application_ip) osh.setAttribute('application_ip', str(application_ip)) host_app_osh = modeling.createHostOSH(str(application_ip)) logger.info("set container: %s" % host_app_osh) osh.setContainer(host_app_osh) oshs.extend(self._report_topology(osh, host_osh, db_host_osh, topology)) context.resultsVector.addAll(oshs)
def handleDiskRow(self, fileSystem, mountedOn, size, usedSize=None): """ @param usedSize: disk used size in 1K-blocks @param size: disk size in 1K-blocks """ if mountedOn in self.mountPointToDisk: logger.reportWarning("File system object already reported for the mount point; skipping new one") logger.warn( "File system object already reported for the mount point '%s'; skipping new one (mount point: '%s'; file system: '%s')" % (mountedOn, mountedOn, fileSystem) ) return if str(size).isdigit(): sizeInMb = _kbToMb(size) else: sizeInMb = None if str(usedSize).isdigit(): usedSizeInMb = _kbToMb(usedSize) else: usedSizeInMb = None type_ = modeling.UNKNOWN_STORAGE_TYPE diskOsh = modeling.createDiskOSH( self.containerOsh, mountedOn, type_, size=sizeInMb, name=fileSystem, usedSize=usedSizeInMb ) if diskOsh: self.mountPointToDisk[mountedOn] = diskOsh self.resultVector.add(diskOsh) host_reporter = host_topology.Reporter() resolver = dns_resolver.create(shell=self.shell) try: (remoteHost, remoteMountPoint) = getRemoteHostAndMountPoint(fileSystem) if remoteHost and remoteMountPoint: if remoteHost.startswith("[") and remoteHost.endswith("]"): remoteHost = remoteHost[1:-1] host_osh = self.remoteHosts.get(remoteHost) if not host_osh: host = host_base_parser.parse_from_address(remoteHost, resolver.resolve_ips) # do not report hostname as it may be alias host_osh, _, oshs = host_reporter.report_host_with_ips(host.ips) self.remoteHosts[remoteHost] = host_osh self.resultVector.addAll(oshs) remoteShareOsh = ObjectStateHolder("networkshare") remoteShareOsh.setContainer(host_osh) remoteShareOsh.setStringAttribute("data_name", remoteMountPoint) remoteShareOsh.setStringAttribute("share_path", remoteMountPoint) self.resultVector.add(remoteShareOsh) self.resultVector.add(modeling.createLinkOSH("realization", remoteShareOsh, diskOsh)) except: stackTrace = logger.prepareFullStackTrace("Failed to link disk to the remote share.") logger.warn(stackTrace)
def discover(shell, iisOSH, hostIPs, iis_version, webservice_ext_filter=[]): appCmdDiscover = AppCmdDiscover(shell, hostIPs) endpoint_builder = netutils.ServiceEndpointBuilder() builder = iis_reporter.TopologyBuilder() endpoint_reporter = netutils.EndpointReporter(endpoint_builder) resolver = dns_resolver.create(shell=None) odbcBuilder = odbc_reporter.TopologyBuilder(resolver) odbcReporter = odbc_reporter.Reporter(odbcBuilder) odbc_data_cache = discover_odbc_info(shell) reporter = iis_reporter.TopologyReporter(builder, endpoint_reporter, odbcReporter, odbc_data_cache) app_pools = appCmdDiscover.discover_app_pools() vdirs = appCmdDiscover.discover_vdirs(webservice_ext_filter) apps = appCmdDiscover.discover_apps(vdirs, shell, webservice_ext_filter) sites = appCmdDiscover.discover_sites(app_pools, apps) topology = reporter.reportTopology(app_pools.values(), sites, iisOSH, iis_version, webservice_ext_filter) return topology
def getEndpointsFromListener(self): endpoints = [] resolver = dns_resolver.create(self.__shell) if self.__listenerStatus: raw_endpoints = re.findall('HOST=([\w\.\-]+)\)\s*\(PORT=(\d+)\)', self.__listenerStatus) if raw_endpoints: for endpoint in raw_endpoints: if netutils.isValidIp(endpoint[0]): endpoints.append(endpoint) else: try: ips = resolver.resolve_ips(endpoint[0]) for ip in ips: if netutils.isValidIp(str(ip)): endpoints.append((ip, endpoint[1])) except: logger.debugException('') return endpoints
def process(self, context): webseal_instance_osh = context.application.applicationOsh command_line = self._main_process.commandLine m = re.search('.*-config.+webseald-(\w+)', command_line) if m: logger.debug('Found instance name %s for Webseal Instance' % m.group(1)) webseal_instance_osh.setStringAttribute('name', m.group(1)) config_path = self._parseConfigPath(command_line) if not config_path: logger.warn('Failed to get more info') shell = context.client file_content = self._fetchConfigFileContent(shell, config_path) if not file_content: return master_host = self._parseMasterHostData(file_content) if not master_host: return ip = None if ip_addr.isValidIpAddressNotZero(master_host): ip = master_host else: try: resolver = dns_resolver.create(shell) ips = resolver.resolve_ips(master_host) ip = ips and ips[0] except: logger.debugException('Failed to resolve host name %s' % master_host) if ip: host_osh = modeling.createHostOSH(str(ip)) if host_osh: policy_osh = self.buildPolicyServer(host_osh) link_osh = modeling.createLinkOSH('usage', webseal_instance_osh, policy_osh) vector = context.resultsVector vector.add(host_osh) vector.add(policy_osh) vector.add(link_osh)
def process(self, context): ''' Plugin gets system information from DEFAULT profile. In case if profile cannot be processed - system information is parsed from instance profile path. Default profile read for system (identified by its path) is shared between application component instances ''' shell = context.client osh = context.application.applicationOsh host_osh = context.hostOsh attrName = sap.InstanceBuilder.INSTANCE_PROFILE_PATH_ATTR pf_path = osh.getAttributeValue(attrName) logger.info("Instance pf path is: %s" % pf_path) system, pf_name = sap_discoverer.parsePfDetailsFromPath(pf_path) logger.info("Parsed details from pf name: %s" % str((system, pf_name))) topology = self._discover_topology(shell, system, pf_path) if topology: #resolver = dns_resolver.SocketDnsResolver() resolver = dns_resolver.create(shell, local_shell=None, dns_server=None, hosts_filename=None) db_host_osh, oshs = _report_db_host(topology, resolver) application_ip = _report_application_ip(shell, topology, resolver) if application_ip: logger.info("application ip is: %s" % application_ip) osh.setAttribute('application_ip', str(application_ip)) host_app_osh = modeling.createHostOSH(str(application_ip)) logger.info("set container: %s" % host_app_osh) osh.setContainer(host_app_osh) oshs.extend( self._report_topology(osh, host_osh, db_host_osh, topology)) context.resultsVector.addAll(oshs)
buffer = shell.safecat(configFileLocation) if not buffer.strip(): logger.warn('Config file is blank: %s' % configFileLocation) return OSHVResult logger.debug('Config file: %s' % buffer) match = re.search(regexMatcher, buffer, re.MULTILINE) if match: result = match.group(int(groupOfMatchResult)).strip() if netutils.isValidIp(result): ip = result else: dns = dns_resolver.create(shell) ip = dns.resolve_ips(result)[0] ipOsh = modeling.createIpOSH(ip) OSHVResult.add(ipOsh) else: logger.warn('Cannot find the target IP address') except Exception, ex: exInfo = ex.getMessage() errormessages.resolveAndReport(exInfo, protocol, Framework) except: exInfo = logger.prepareJythonStackTrace('') errormessages.resolveAndReport(exInfo, protocol, Framework) try:
def dnsresolver(localshell, shell=None, dns_server=None, hosts_filename=None): return dns_resolver.create(shell=shell, local_shell=localshell, dns_server=dns_server, hosts_filename=hosts_filename)
def dnsresolver(localshell, shell=None, dns_server=None, hosts_filename=None): return dns_resolver.create( shell=shell, local_shell=localshell, dns_server=dns_server, hosts_filename=hosts_filename )
def handleDiskRow(self, fileSystem, mountedOn, size, usedSize=None): ''' @param usedSize: disk used size in 1K-blocks @param size: disk size in 1K-blocks ''' if mountedOn in self.mountPointToDisk: logger.reportWarning( 'File system object already reported for the mount point; skipping new one' ) logger.warn( "File system object already reported for the mount point '%s'; skipping new one (mount point: '%s'; file system: '%s')" % (mountedOn, mountedOn, fileSystem)) return if str(size).isdigit(): sizeInMb = _kbToMb(size) else: sizeInMb = None if str(usedSize).isdigit(): usedSizeInMb = _kbToMb(usedSize) else: usedSizeInMb = None type_ = modeling.UNKNOWN_STORAGE_TYPE diskOsh = modeling.createDiskOSH(self.containerOsh, mountedOn, type_, size=sizeInMb, name=fileSystem, usedSize=usedSizeInMb) if diskOsh: self.mountPointToDisk[mountedOn] = diskOsh self.resultVector.add(diskOsh) host_reporter = host_topology.Reporter() resolver = dns_resolver.create(shell=self.shell) try: (remoteHost, remoteMountPoint) = getRemoteHostAndMountPoint(fileSystem) if remoteHost and remoteMountPoint: if remoteHost.startswith('[') and remoteHost.endswith(']'): remoteHost = remoteHost[1:-1] host_osh = self.remoteHosts.get(remoteHost) if not host_osh: host = host_base_parser.parse_from_address( remoteHost, resolver.resolve_ips) #do not report hostname as it may be alias host_osh, _, oshs = host_reporter.report_host_with_ips( host.ips) self.remoteHosts[remoteHost] = host_osh self.resultVector.addAll(oshs) remoteShareOsh = ObjectStateHolder('networkshare') remoteShareOsh.setContainer(host_osh) remoteShareOsh.setStringAttribute('data_name', remoteMountPoint) remoteShareOsh.setStringAttribute('share_path', remoteMountPoint) self.resultVector.add(remoteShareOsh) self.resultVector.add( modeling.createLinkOSH('realization', remoteShareOsh, diskOsh)) except: stackTrace = logger.prepareFullStackTrace( 'Failed to link disk to the remote share.') logger.warn(stackTrace)