def DiscoveryMain(Framework): ipAddress = Framework.getDestinationAttribute('ip_address') shell = None try: client = Framework.createClient() shell = shellutils.ShellFactory().createShell(client) f5Discoverer = createF5Discoverer(shell, ipAddress) f5Discoverer.discover() return f5Discoverer.getTopology() except NoF5Exception: logger.reportWarning("No F5 LTM found on the remote machine") except: errorMsg = 'Failed to get general information' errobj = errorobject.createError(errorcodes.FAILED_GETTING_INFORMATION, ['shell', 'general information'], errorMsg) logger.debugException(errorMsg) logger.reportWarningObject(errobj) finally: try: shell and shell.closeClient() except: logger.debugException('') logger.error('Unable to close shell')
def discoverVolumeGroups(shell): try: output = ibm_hmc_lib.executeCommand(shell, 'lsvg') except ValueError, ex: logger.reportWarning('Failed to discover Logical Volumes') logger.warn(str(ex)) return {}
def process(self, context, filter = DEFAULT_FILTER): acceptedPlugins = filter.filterPlugins(self.idToPluginDescriptor.values()) logger.debug("Accepted plugins in chain: %d" % len (acceptedPlugins)) for pluginDescriptor in acceptedPlugins: pluginId = pluginDescriptor.getId() logger.debug("Executing plug-in with ID '%s'" % pluginId) try: plugin = self.__instantiatePlugin(pluginDescriptor) if plugin: if plugin.isApplicable(context): plugin.process(context) else: logger.debug("Plug-in with ID '%s' is not applicable" % pluginId) else: logger.warn("Failed to instantiate plug-in with ID '%s'" % pluginId) logger.reportWarning("Failed to instantiate plug-in") except PluginUncheckedException, ex: raise ex.__class__(ex) except (Exception, JavaException), e: logger.warnException("Exception during processing of plug-in with ID '%s'\n" % pluginId) if isinstance(e, JavaException): msg = e.getMessage() else: msg = e.message logger.reportWarning("Exception during processing of plug-in:%s" % msg)
def DiscoveryMain(Framework): logger.reportWarning( 'The job is deprecated. Use "Network Connectivity Data Analyzer" instead.' ) # netlinks = NetlinksPotentialServers(Framework) # netlinks.discover() return ObjectStateHolderVector()
def __getOdbcItems(self, shell, scope, root, path): """ :param root: Root of registry in where discovery will be performed :param scope: scope of ODBC information (ScopeEnum.SYSTEM or ScopeEnum.USER) :param path: ODBC DataSources root path :type root: regutils.RegistryFolder :type path: str :type scope: str :return: map[str, DSNInfo] """ builder = self._provider.getBuilder(root, path) items = self._agent.execQuery(builder) result = [] for item in items: values = item.getAsDict() for name in values.keys(): driverName = values[name] odbc_entries = [] try: discoverer = self.getDiscoverer(shell, driverName) discoverer.discover(name, driverName, scope, shell) odbc_entries.append(discoverer.getInfo()) except NotImplementedError, ex: logger.debugException(str(ex), ex) except Exception, ex: logger.debugException(str(ex), ex) logger.reportWarning("Cannot discover some of ODBC information") result.extend(odbc_entries)
def process(self, context): ConfigBasedPlugin.process(self, context) processFolder = self.getProcessFolder(context) discoveredVersion = "" try: discoveredVersion = context.application.getOsh().getAttributeValue("application_version_number") except: logger.debugException('') logger.info("Discovered version is: %s" % discoveredVersion) if discoveredVersion and not (discoveredVersion.startswith('9') or discoveredVersion.startswith('10')): raise applications.IgnoreApplicationException('UCMDB is not of a proper version') try: content = context.client.safecat(processFolder + '../../../conf/cmdb.conf') except: logger.reportWarning('Failed getting HP uCMDB configuration') return hostName = self.getPropertyByRegexp(r'dal\.datamodel\.host\.name=(.+)', content) dbType = self.getPropertyByRegexp(r'dal\.datamodel\.db\.type=(.+)', content) port = self.getPropertyByRegexp(r'dal\.datamodel\.port=(\d+)', content) sid = self.getPropertyByRegexp(r'dal\.datamodel\.sid=(\w+)', content) if ip_addr.isValidIpAddress(hostName): ipAddress = hostName ipAddress = ipAddress.encode('utf8').strip() hostName = netutils.getHostName(ipAddress) if (not sid) and hostName: sid = hostName.upper().split('.')[0] if hostName and dbType and port and sid: hostName = hostName.strip() resolver = netutils.IpResolver('', context.framework) dbHostIp = resolver.resolveHostIp(hostName) if dbHostIp: self.reportTopology(context, dbType, port, sid, dbHostIp) else: logger.warn('Failed resolving DB host "%s" ip address' % hostName) else: logger.warn('Failed parsing cmdb config file (datamodel part)') hostName = self.getPropertyByRegexp(r'dal\.history\.host\.name=(.+)', content) dbType = self.getPropertyByRegexp(r'dal\.history\.db\.type=(.+)', content) port = self.getPropertyByRegexp(r'dal\.history\.port=(\d+)', content) sid = self.getPropertyByRegexp(r'dal\.history\.sid=(\w+)', content) if ip_addr.isValidIpAddress(hostName): ipAddress = hostName ipAddress = ipAddress.encode('utf8').strip() hostName = netutils.getHostName(ipAddress) if (not sid) and hostName: sid = hostName.upper().split('.')[0] if hostName and dbType and port and sid: hostName = hostName.strip() resolver = netutils.IpResolver('', context.framework) dbHostIp = resolver.resolveHostIp(hostName) if dbHostIp: self.reportTopology(context, dbType, port, sid, dbHostIp) else: logger.warn('Failed resolving DB host "%s" ip address' % hostName) else: logger.warn('Failed parsing cmdb config file (history part)')
def ev2_getIplInfoOutput(ls): # process IPL Info --------------------------------------------------------- zOsRelease = '' ieasymList = '' ieasysList = '' machineBootDate = '' try: output = ls.evMvsCmd(_CMD_D_IPLINFO) if isNotNull(output) and output.isSuccess() and len(output.cmdResponseList) > 0: releaseList = output.getValuesFromLineList('s', output.cmdResponseList, 'RELEASE z/OS', 'LICENSE') if len(releaseList) > 0 and len(releaseList[0]) == 3: zOsRelease = releaseList[0][1] or '' ieasymList = output.getValuesFromLineList('s', output.cmdResponseList, 'IEASYM LIST =') if len(ieasymList) > 0 and len(ieasymList[0]) == 2: ieasymList = ieasymList[0][1] or '' ieasysList = output.getValuesFromLineList('s', output.cmdResponseList, 'IEASYS LIST =') if len(ieasymList) > 0 and len(ieasymList[0]) == 2: ieasysList = ieasysList[0][1] or '' bootList = output.getValuesFromLineList('s', output.cmdResponseList, 'SYSTEM IPLED AT', 'ON') if len(bootList) > 0 and len(bootList[0]) == 2: bootTime = bootList[0][1] or '' bootDate = bootList[0][2] or '' if eview_lib.isNotNull(bootDate) and eview_lib.isNotNull(bootTime): machineBootDate = modeling.getDateFromString('%s %s' % (bootDate, bootTime), 'MM/dd/yyyy kk.mm.ss') else: logger.reportWarning('Unable to get output for command - %s' % _CMD_D_IPLINFO) except: errMsg = 'Failure in method ev2_getIplInfoOutput()' logger.error(errMsg) logger.reportError(errMsg) return (zOsRelease, ieasymList, ieasysList, machineBootDate)
def process(self, context): logger.debug('UcmdbServer8VersionShellPlugin.process') ConfigBasedPlugin.process(self, context) processFolder = self.getProcessFolder(context) try: content = context.client.safecat(processFolder + '../../conf/FndInfra.ini') except: logger.reportWarning('Failed getting HP uCMDB configuration') return hostName = self.getPropertyByRegexp(r'dbHost="(.+)"', content) dbType = self.getPropertyByRegexp(r'dbType="(.+)"', content) port = self.getPropertyByRegexp(r'dbPort="(\d+)"', content) sid = self.getPropertyByRegexp(r'dbSID="(\w+)"', content) if hostName and dbType and port and sid: dbHostIp = netutils.getHostAddress(hostName.strip()) if dbHostIp: self.reportTopology(context, dbType, port, sid, dbHostIp) else: logger.warn('Failed resolving DB host "%s" ip address' % hostName) else: logger.warn('Failed parsing cmdb config file')
def resolveOsh(self, key): """ Resolves OSH in inner dictionary. Uses two types of the keys: 1) sheetName index - 'normal' keys to uniquely identify imported OSH 2) %key1|key2|key3% - used for the backward compatibility. This approach is a bit baggy because class name isn't used, a few OSH's can pass here. !AVOID such usage. Will be deprecated in further versions. """ key = self.getOshKeyFromFormula(key) osh = self.keysToOsh.get(key) if osh is None: #TODO: this part is left for backward compatibility #this code allows to refer CIs by single key attribute and should be deprecated. #Last CI which have this key (if more than one occurs) will be referenced. #@see: fillOshDict method if self.oshByCompositeKeysCount.has_key(key): count = self.oshByCompositeKeysCount[key] if count > 1: logger.warn('More than one OSH can be referenced by key %s. OSH count: %i' % (key, count)) logger.warn('Use last OSH') logger.reportWarning('More than one OSH can be referenced by key; use Excel references to avoid the problem.') osh = self.oshByCompositeKeys.get(key) else: logger.debug('unable to resolve OSH using %s' % key) return osh
def reportDatabaseTopology(self, dsnInfo, dependentObject): """ :param dsnInfo: list of all DSN's for which need to report topology :type dsnInfo: odbc.DSNInfo :return: vector which include all osh's for created topology :rtype: appilog.common.system.types.vectors.ObjectStateHolderVector """ vector = ObjectStateHolderVector() try: database_pdo = self.__builder.buildDatabaseServerPdo(dsnInfo) platform = db_platform.findPlatformBySignature(dsnInfo.driver) builder = db_builder.getBuilderByPlatform(platform) db_reporter = db.getReporter(platform, builder) host_descriptor = self.__builder.buildHostDescriptor( dsnInfo.address) host_descriptor = host_descriptor._replace(name=None) reporter = host_topology.Reporter() host_osh, ip_oshs, oshs = reporter.report_host(host_descriptor) server_osh, ipseOsh, database_oshs, topology = db_reporter.reportServerWithDatabases( database_pdo, host_osh, [dependentObject]) vector.addAll(topology) vector.addAll(oshs) return vector except dns_resolver.ResolveException: logger.reportWarning("Cannot resolve ip of node") return ObjectStateHolderVector()
def processReferences(self, processor): """ Sets OSHs root_container reference. SheetProcessor -> None """ sheetName = processor.getSheetName() for colNum in xrange(processor.getLastColNum(0)): attrName = processor.getCellValueAt(0, colNum) if attrName == 'root_container': for rowNum in xrange(1, processor.getLastRowNum() + 1): formula = processor.getCellValueAt(rowNum, colNum, 0) if self.isRemoteLink(formula): logger.reportWarning( 'Links on remote files are not currently supported.' ) logger.warn( 'Links on remote files are not currently supported: (%s)' % formula) continue parentOsh = self.resolveOsh(formula) currOsh = self.resolveOsh(self.getOshKey( sheetName, rowNum)) if not (currOsh and parentOsh): logger.warn('failed to resolve object container') else: currOsh.setContainer(parentOsh)
def reportDatabaseTopology(self, dsnInfo, dependentObject): """ :param dsnInfo: list of all DSN's for which need to report topology :type dsnInfo: odbc.DSNInfo :return: vector which include all osh's for created topology :rtype: appilog.common.system.types.vectors.ObjectStateHolderVector """ vector = ObjectStateHolderVector() try: database_pdo = self.__builder.buildDatabaseServerPdo(dsnInfo) platform = db_platform.findPlatformBySignature(dsnInfo.driver) builder = db_builder.getBuilderByPlatform(platform) db_reporter = db.getReporter(platform, builder) host_descriptor = self.__builder.buildHostDescriptor(dsnInfo.address) host_descriptor = host_descriptor._replace(name=None) reporter = host_topology.Reporter() host_osh, ip_oshs, oshs = reporter.report_host(host_descriptor) server_osh, ipseOsh, database_oshs, topology = db_reporter.reportServerWithDatabases(database_pdo, host_osh, [dependentObject]) vector.addAll(topology) vector.addAll(oshs) return vector except dns_resolver.ResolveException: logger.reportWarning("Cannot resolve ip of node") return ObjectStateHolderVector()
def __getOdbcItems(self, shell, scope, root, path): """ :param root: Root of registry in where discovery will be performed :param scope: scope of ODBC information (ScopeEnum.SYSTEM or ScopeEnum.USER) :param path: ODBC DataSources root path :type root: regutils.RegistryFolder :type path: str :type scope: str :return: map[str, DSNInfo] """ builder = self._provider.getBuilder(root, path) items = self._agent.execQuery(builder) result = [] for item in items: values = item.getAsDict() for name in values.keys(): driverName = values[name] odbc_entries = [] try: discoverer = self.getDiscoverer(shell, driverName) discoverer.discover(name, driverName, scope, shell) odbc_entries.append(discoverer.getInfo()) except NotImplementedError, ex: logger.debugException(str(ex), ex) except Exception, ex: logger.debugException(str(ex), ex) logger.reportWarning( "Cannot discover some of ODBC information") result.extend(odbc_entries)
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() try: client = Framework.createClient() vendor = Framework.getDestinationAttribute("discovered_vendor") hostId = Framework.getDestinationAttribute("hostId") host_osh = modeling.createOshByCmdbId("firewall", hostId) discoverer = firewall_discoverer.getDiscoverer(vendor, client) if not discoverer: raise ValueError("Unsupported device.") firewall_config = discoverer.discover() OSHVResult.addAll(firewall.reportTopology(firewall_config, host_osh)) except: import sys logger.debugException("") error_str = str(sys.exc_info()[1]).strip() logger.reportError(error_str) finally: client and client.close() ## Write implementation to return new result CIs here... if OSHVResult.size() == 0: logger.debug("No data discovered from destination.") logger.reportWarning("No data discovered from destination.") return OSHVResult
def process(self, context, filter=DEFAULT_FILTER): acceptedPlugins = filter.filterPlugins( self.idToPluginDescriptor.values()) logger.debug("Accepted plugins in chain: %d" % len(acceptedPlugins)) for pluginDescriptor in acceptedPlugins: pluginId = pluginDescriptor.getId() logger.debug("Executing plug-in with ID '%s'" % pluginId) try: plugin = self.__instantiatePlugin(pluginDescriptor) if plugin: if plugin.isApplicable(context): plugin.process(context) else: logger.debug("Plug-in with ID '%s' is not applicable" % pluginId) else: logger.warn("Failed to instantiate plug-in with ID '%s'" % pluginId) logger.reportWarning("Failed to instantiate plug-in") except PluginUncheckedException, ex: raise ex.__class__(ex) except (Exception, JavaException), e: logger.warnException( "Exception during processing of plug-in with ID '%s'\n" % pluginId) if isinstance(e, JavaException): msg = e.getMessage() else: msg = e.message logger.reportWarning( "Exception during processing of plug-in:%s" % msg)
def resolveOsh(self, key): """ Resolves OSH in inner dictionary. Uses two types of the keys: 1) sheetName index - 'normal' keys to uniquely identify imported OSH 2) %key1|key2|key3% - used for the backward compatibility. This approach is a bit baggy because class name isn't used, a few OSH's can pass here. !AVOID such usage. Will be deprecated in further versions. """ key = self.getOshKeyFromFormula(key) osh = self.keysToOsh.get(key) if osh is None: #TODO: this part is left for backward compatibility #this code allows to refer CIs by single key attribute and should be deprecated. #Last CI which have this key (if more than one occurs) will be referenced. #@see: fillOshDict method if self.oshByCompositeKeysCount.has_key(key): count = self.oshByCompositeKeysCount[key] if count > 1: logger.warn( 'More than one OSH can be referenced by key %s. OSH count: %i' % (key, count)) logger.warn('Use last OSH') logger.reportWarning( 'More than one OSH can be referenced by key; use Excel references to avoid the problem.' ) osh = self.oshByCompositeKeys.get(key) else: logger.debug('unable to resolve OSH using %s' % key) return osh
def DiscoveryMain(Framework): properties = Properties() properties.setProperty('timeoutDiscover', Framework.getParameter('timeoutDiscover')) properties.setProperty('retryDiscover', Framework.getParameter('retryDiscover')) properties.setProperty('pingProtocol', Framework.getParameter('pingProtocol')) properties.setProperty('threadPoolSize', Framework.getParameter('threadPoolSize')) virtualMode = Framework.getParameter( 'virtualModeDiscover').lower() == "true" byRangeFlag = Framework.getParameter("byScopeDiscover").lower() == "true" netAddress = Framework.getDestinationAttribute("netAddress") netMask = Framework.getDestinationAttribute("netMask") probeName = Framework.getDestinationAttribute("probeName") ignoreClientType = getGlobalSetting().getPropertyStringValue( 'pingClientTypeIp', "False").lower() == "false" try: client = Framework.createClient(ClientsConsts.ICMP_PROTOCOL_NAME, properties) try: ipRange = getRangeByNetwork(netAddress, netMask) if byRangeFlag: rangesList = icmp_utils.getProbeRanges([ipRange], probeName, Framework) else: rangesList = [ipRange] logger.info('Start working on range: ', len(rangesList)) totalReportedIps = 0 for aRange in rangesList: totalReportedIps += icmp_utils.pingIPsInRange( Framework, client, aRange, virtualMode, netAddress, netMask, ignoreClientType=ignoreClientType) Framework.saveState(aRange.toRangeString()) logger.debug('Total reported IPs %s ' % totalReportedIps) logger.info('Finished working on all ranges..') Framework.clearState() if not totalReportedIps: logger.reportWarning( "No live DataCenter IPs found in probe ranges") finally: client.close() except: msg = logger.prepareJythonStackTrace('') errormessages.resolveAndReport(msg, ClientsConsts.ICMP_PROTOCOL_NAME, Framework) return ObjectStateHolderVector()
def processWorkbook (self): """ Entry point to the importer -> OSHVResult """ numSheets = self.workbook.getNumberOfSheets() #get all sheets allSheets = [self.workbook.getSheetAt(i) for i in xrange(numSheets)] #get sheet processors to all non-comment sheets allSheetProcessors = [SheetProcessor(sheet) for sheet in allSheets if not sheet.getSheetName().startswith(self.__COMMENT_START)] #list of OSH sheets oshSheetProcessors = [proc for proc in allSheetProcessors if not proc.getSheetName().startswith(self.__RELATIONSHIPS_START)] #list of relationships sheets realtionSheetProcessors = [proc for proc in allSheetProcessors if proc.getSheetName().startswith(self.__RELATIONSHIPS_START)] logger.info ('\t------------------------------------------------------------------') logger.info ('\tProcess all tabs and create OSH from them; Root containers and links will be created lately') logger.info ('\t------------------------------------------------------------------') for processor in oshSheetProcessors: worksheetName = processor.getSheetName() logger.debug('Processing worksheet: %s' % worksheetName) try: colNames = processor.getSheetColumnNames() attrDefs = self.classModelUtils.getTypeDefs(worksheetName) xlsutils.validateSheet(colNames, attrDefs, worksheetName) self.fillOshDict(processor, attrDefs) except: logger.debugException('') logger.reportWarning('Skipping worksheet "%s" due to errors found' %(worksheetName)) logger.info ('\t------------------------------------------------------------------') logger.info ('\tProcess root containers from early created OSHes') logger.info ('\t------------------------------------------------------------------') for processor in oshSheetProcessors: worksheetName = processor.getSheetName() logger.debug('Processing references of worksheet: %s' % worksheetName) self.processReferences(processor) logger.info ('\t------------------------------------------------------------------') logger.info ('\tReport OSH to vector') logger.info ('\t------------------------------------------------------------------') OSHVResult = ObjectStateHolderVector() for key, osh in self.keysToOsh.items(): if self.classModelUtils.isValidOsh(osh): OSHVResult.add (osh) else: logger.warn('OSH at %s does not have all key attributes' % key) logger.reportWarning("Imported file doesn't contain mapping for key attribute") logger.debug('reported %i objects' % OSHVResult.size()) logger.info ('\t------------------------------------------------------------------') logger.info ('\tProcess relationships') logger.info ('\t------------------------------------------------------------------') for processor in realtionSheetProcessors: linksVec = self.processRelationshipTab(processor) OSHVResult.addAll(linksVec) if not len(realtionSheetProcessors): logger.info ('\tNo relationships tab was found for processing') return OSHVResult
def fillOshDict(self, processor, attrDefs): """ Sets OSHs attributes and stores them in the inner dictionaries. SheetProcessor, {str: AttrTypeDef} -> None """ sheetName = processor.getSheetName() for rowNum in xrange(1, processor.getLastRowNum()+1): osh = ObjectStateHolder(sheetName) reference = self.getOshKey(sheetName, rowNum) compositeKey = None for colNum in xrange(processor.getLastColNum(0)): attrName = processor.getCellValueAt(0, colNum) if not attrName: logger.debug('Column doesn\'t have the header representing an attribute name') continue if attrName.startswith(self.__COMMENT_START) or attrName == 'root_container': continue try: attrValue = processor.getCellValueAt(rowNum, colNum) if attrValue and ((type(attrValue) == type(' ')) or (type(attrValue) == type(u' '))): attrValue = attrValue.strip() if attrValue or self.set_empty_value: self.classModelUtils.setCiAttribute(osh, attrName, attrValue, attrDefs[attrName].attrType) #TODO: this part is left for backward compatibility #this code allows to refer CIs by composition of key attributes #but without class name and should be deprecated #@see: resolveOsh method if attrDefs[attrName].isId: if compositeKey: if type(attrValue) in [type(u''), type('')]: compositeKey += self.__KEY_DELIMITER + attrValue else: compositeKey += self.__KEY_DELIMITER + str(attrValue) else: compositeKey = attrValue except Exception, e: logger.debugException(reference) logger.reportWarning(e.args[0]) if osh.getAttributeAll().size() == 0: continue #prepare references to object in view like 'key1|key2|key3' -> OSH if compositeKey: if self.oshByCompositeKeys.has_key(compositeKey): logger.reportWarning('Object references are overlapped. Not all relations can be assigned') logger.warn("%s %i: Object reference '%s' is overlapped. It won't be accessible. " % (sheetName, rowNum, compositeKey)) self.oshByCompositeKeysCount[compositeKey] = self.oshByCompositeKeysCount[compositeKey] + 1 self.oshByCompositeKeys[compositeKey] = osh else: self.oshByCompositeKeys[compositeKey] = osh self.oshByCompositeKeysCount[compositeKey] = 1 #prepare object reference in form 'sheetName rowNumber' to uniquely identify OSH if self.keysToOsh.has_key(reference): raise Exception, 'This can only happen if references map was modified outside' else: self.keysToOsh[reference] = osh
def createShellObj(shell, client, ip, langBund, language, codePage, arpMac=None, connectedShellCredId=None): 'Shell, str, langBundle, str, str -> osh' # make sure that 'ip' is an ip and not a dns name # the reason is to make application_ip attribute hold an ip and not a dns name, # hence, when the application will be a trigger it will find the probe clientType = shell.getClientType() if clientType == ClientsConsts.NTCMD_PROTOCOL_NAME: clientType = "ntcmd" logger.debug('creating object for obj_name=%s' % clientType) ipObj = ip if ip_addr.isValidIpAddress(ip): ipObj = ip_addr.IPAddress(ip) else: # maybe it's a hostname? hostname = ip try: ips = SocketDnsResolver().resolve_ips(hostname) ipObj = ips[0] except ResolveException: logger.reportWarning('Could not resolve hostname' + hostname) ipObj = ip shellOsh = ObjectStateHolder(clientType) shellOsh.setAttribute('application_ip', str(ipObj)) shellOsh.setAttribute('data_name', clientType) if clientType != "ntcmd": shellOsh.setAttribute('application_port', shell.getPort()) shellOsh.setContainer(modeling.createHostOSH(str(ipObj))) # UDA client has a property of version, it should be reported if clientType == ClientsConsts.DDM_AGENT_PROTOCOL_NAME: shellOsh.setAttribute('version', client.getVersion()) if (language): shellOsh.setAttribute('language', language) if (codePage): shellOsh.setAttribute('codepage', codePage) shellOsh.setAttribute('credentials_id', shell.getCredentialId()) if arpMac: shellOsh.setAttribute(TopologyConstants.ATTR_APPLICATION_ARP_MAC, arpMac) if connectedShellCredId: shellOsh.setAttribute(TopologyConstants.ATTR_CONN_OS_CRED_ID, connectedShellCredId) return shellOsh
def collectData(self,sqlServerId): try: oshv = ObjectStateHolderVector() self.getPublishers(oshv,sqlServerId) except: logger.errorException("couldnt get cluster configuration for server: ", sqlServerId.toString()) logger.reportWarning() return oshv
def discoverPhysicalVolumes(shell): physVolumeDict = {} output = None # modified by Daniel La 29/03/2012 use to be output = '' try: output = ibm_hmc_lib.executeCommand(shell, 'lspv') except ValueError, ex: logger.reportWarning('Failed to discover Physical Volumes') logger.warn(str(ex))
def discoverPhysicalVolumes(shell): physVolumeDict = {} output = "" try: output = ibm_hmc_lib.executeCommand(shell, "lspv") except ValueError, ex: logger.reportWarning("Failed to discover Physical Volumes") logger.warn(str(ex))
def discoverPhysicalVolumes(shell): physVolumeDict = {} output = '' try: output = ibm_hmc_lib.executeCommand(shell, 'lspv') except ValueError, ex: logger.reportWarning('Failed to discover Physical Volumes') logger.warn(str(ex))
def discoverLinuxStorageInfo(shell): try: output = shell.execAlternateCmds('/usr/sbin/vgdisplay -v', 'vgdisplay -v') except: logger.reportWarning('Failed to discover storage info.') return ({}, {}, {}) if output and shell.getLastCmdReturnCode() == 0: return parseLinuxStorageInfo(output) return ({}, {}, {})
def collectData(self, sqlServerId): try: oshv = ObjectStateHolderVector() self.getPublishers(oshv, sqlServerId) except: logger.errorException( "couldnt get cluster configuration for server: ", sqlServerId.toString()) logger.reportWarning() return oshv
def get_web_configs(self, webconfig_path_list, shell, webservice_ext_filter): fs = file_system.createFileSystem(shell) configs = [] for webconfig_path in webconfig_path_list: try: webconfig_path = webconfig_path.find("%") != -1 and shell_interpreter.dereference_string(shell, webconfig_path) or webconfig_path default_configs = map( lambda obj: obj.find("%") != -1 and shell_interpreter.dereference_string(shell, obj) or obj, self.DEFAULT_WEB_CONFIGURATION_LOCATIONS) if not webconfig_path in default_configs and fs.exists(webconfig_path): file_attr = (file_topology.BASE_FILE_ATTRIBUTES + [file_topology.FileAttrs.CONTENT, file_topology.FileAttrs.LAST_MODIFICATION_TIME]) logger.debug("getting config file:", webconfig_path) resource_path = '' match = re.match('(.*)\\\\.*', webconfig_path) if match: resource_path = match.group(1) logger.debug("getting config file path:", resource_path) files = fs.getFiles(resource_path, filters = [file_system.ExtensionsFilter(webservice_ext_filter)], fileAttrs = [file_topology.FileAttrs.NAME, file_topology.FileAttrs.PATH]) for webservicefile in files: logger.debug("getting webservice file:", webservicefile.path) file = fs.getFile(webservicefile.path, file_attr) if file: content = file.content config_file = iis.ConfigFile(webservicefile.path, content, file.lastModificationTime(),) configs.append(config_file) webconfig = fs.getFile(webconfig_path, file_attr) if webconfig: content = webconfig.content content = content.strip() xmlContentStartIndex = content.find('<?xml') if xmlContentStartIndex != -1: content = content[xmlContentStartIndex:] # Lazy intilization of old code to prevent cyclic dependencies from NTCMD_IIS import WebConfig content = WebConfig.replacePasswords(content) db_datasources = self.get_db_datasources(content) config_file = iis.ConfigFile(webconfig_path, content, webconfig.lastModificationTime(), db_datasources) configs.append(config_file) except: logger.warn("Unable to discover %s" % webconfig_path) logger.debugException("") logger.reportWarning("Unable to discover some of config files") return configs
def discoverPhysScsiAndRaid(shell, physicalVolumesDict): scsiList = [] physVolumes = [] output = "" try: output = ibm_hmc_lib.executeCommand(shell, "lspath -field name parent") if output and re.search(r"\s+isk\d+\s+sc\s+si\d+", output, re.DOTALL): output = output.replace('isk', 'hdisk').replace('sc si', 'scsi') except ValueError, ex: logger.reportWarning('Failed to run lspath.') logger.warn(str(ex))
def __discoverInterfaces(self): try: self.__discoverRegularInterfaces() except: logger.warnException('Failed to discover interfaces') logger.reportWarning("Failed to discover network interfaces") try: self.__discoverClimInterfaces() except: logger.warnException('Failed to discover CLIM interfaces') logger.reportWarning("Failed to discover CLIM network interfaces")
def handleDiskRow(self, fileSystem, mountedOn, size, usedSize=None): """ @param usedSize: disk used size in 1K-blocks @param size: disk size in 1K-blocks """ if mountedOn in self.mountPointToDisk: logger.reportWarning("File system object already reported for the mount point; skipping new one") logger.warn( "File system object already reported for the mount point '%s'; skipping new one (mount point: '%s'; file system: '%s')" % (mountedOn, mountedOn, fileSystem) ) return if str(size).isdigit(): sizeInMb = _kbToMb(size) else: sizeInMb = None if str(usedSize).isdigit(): usedSizeInMb = _kbToMb(usedSize) else: usedSizeInMb = None type_ = modeling.UNKNOWN_STORAGE_TYPE diskOsh = modeling.createDiskOSH( self.containerOsh, mountedOn, type_, size=sizeInMb, name=fileSystem, usedSize=usedSizeInMb ) if diskOsh: self.mountPointToDisk[mountedOn] = diskOsh self.resultVector.add(diskOsh) host_reporter = host_topology.Reporter() resolver = dns_resolver.create(shell=self.shell) try: (remoteHost, remoteMountPoint) = getRemoteHostAndMountPoint(fileSystem) if remoteHost and remoteMountPoint: if remoteHost.startswith("[") and remoteHost.endswith("]"): remoteHost = remoteHost[1:-1] host_osh = self.remoteHosts.get(remoteHost) if not host_osh: host = host_base_parser.parse_from_address(remoteHost, resolver.resolve_ips) # do not report hostname as it may be alias host_osh, _, oshs = host_reporter.report_host_with_ips(host.ips) self.remoteHosts[remoteHost] = host_osh self.resultVector.addAll(oshs) remoteShareOsh = ObjectStateHolder("networkshare") remoteShareOsh.setContainer(host_osh) remoteShareOsh.setStringAttribute("data_name", remoteMountPoint) remoteShareOsh.setStringAttribute("share_path", remoteMountPoint) self.resultVector.add(remoteShareOsh) self.resultVector.add(modeling.createLinkOSH("realization", remoteShareOsh, diskOsh)) except: stackTrace = logger.prepareFullStackTrace("Failed to link disk to the remote share.") logger.warn(stackTrace)
def getSapSystemName(sapUtils): r'@types: saputils.SapUtils -> str' table = sapUtils.getSites() if table and table.getRowCount() > 0 and table.getColumnCount() > 0: return table.getCell(0, 0) else: logger.reportWarning('Failed to get SAP System name since empty tree is returned.') logger.warn('Failed to get SAP System name since empty tree is returned.') logger.warn('''The discovery process could not proceed, it may be casued by missing values of the attributes: monitorSetName, monitor name requested by function "BAPI_SYSTEM_MON_GETTREE" are not configured in the CCMS monitoring tree on the server.''') return None
def discoverLinuxVScsiAdapters(shell): try: output = shell.execAlternateCmds('/usr/sbin/vpdupdate', 'vpdupdate') except: logger.reportWarning('Failed running vpdupdate.') return [] if shell.getLastCmdReturnCode() != 0: logger.reportWarning('Failed running vpdupdate.') return [] output = shell.execAlternateCmds('/usr/sbin/lsvio -s', 'lsvio -s') if output and shell.getLastCmdReturnCode() == 0: return parseLinuxVScsiAdapters(output)
def DiscoveryMain(Framework): properties = Properties() properties.setProperty('timeoutDiscover', Framework.getParameter('timeoutDiscover')) properties.setProperty('retryDiscover', Framework.getParameter('retryDiscover')) properties.setProperty('pingProtocol', Framework.getParameter('pingProtocol')) properties.setProperty('threadPoolSize', Framework.getParameter('threadPoolSize')) excludePatterns = icmp_utils.preparePatterns(Framework.getParameter('excludePatternsList')) virtualMode = Framework.getParameter('virtualModeDiscover').lower() == "true" rangeString = Framework.getParameter('range') or 'NA' probeName = Framework.getDestinationAttribute('probeName') ignoreClientType = getGlobalSetting().getPropertyStringValue('pingClientTypeIp', "False").lower() == "false" maxAllowedIPv6CountPerRange = long(getGlobalSetting().getPropertyStringValue('maxPingIPv6CountPerRange', str(DEFAULT_MAX_PING_IPV6_COUNT_PER_RANGE))) logger.debug("Max allowed IPv6 range size:", maxAllowedIPv6CountPerRange) isPingIPv4, isPingIPv6 = getIPSupport(Framework) try: client = Framework.createClient(ClientsConsts.ICMP_PROTOCOL_NAME, properties) try: totalReportedIps = 0 selectedRangeList = _convertToRanges(rangeString) probeRanges = icmp_utils.getProbeRanges(selectedRangeList, probeName, Framework, isPingIPv4, isPingIPv6) logger.info('Start working on total probe ranges: ', len(probeRanges)) logger.info('ignoreClientType = ', ignoreClientType) #probeRanges = getDataCenterIPRanges(probeRanges) for probeRange in probeRanges: rangeSize = long(probeRange.getRangeSize()) if rangeSize > maxAllowedIPv6CountPerRange: logger.reportWarning( "The size of IPv6 range (%s) is %d, exceeds the max range size %d, will skip it." % ( probeRange.toRangeString(), rangeSize, maxAllowedIPv6CountPerRange)) continue totalReportedIps += icmp_utils.pingIPsInRange(Framework, client, probeRange, virtualMode, excludePatterns=excludePatterns,ignoreClientType=ignoreClientType) Framework.saveState(probeRange.toRangeString()) logger.debug('Total reported IPs %s ' % totalReportedIps) logger.info('Finished working on all Probes Ranges..') Framework.clearState() if not totalReportedIps: logger.reportWarning("No live IPs found in probe ranges") finally: client.close() except: msg = logger.prepareJythonStackTrace('') errormessages.resolveAndReport(msg, ClientsConsts.ICMP_PROTOCOL_NAME, Framework) return ObjectStateHolderVector()
def DiscoveryMain(Framework): rangeString = Framework.getParameter('range') probeName = Framework.getDestinationAttribute('probeName') nmapLocation = Framework.getParameter('nmap_location') or None protocol = Framework.getDestinationAttribute('Protocol') try: excludePatterns = icmp_utils.preparePatterns(Framework.getParameter('excludePatternsList')) client = Framework.createClient(ClientsConsts.LOCAL_SHELL_PROTOCOL_NAME) shell = ShellFactory().createShell(client) fs = file_system.createFileSystem(shell) try: if nmapLocation and fs.isDirectory(nmapLocation): path_tool = NtPath() nmapLocation = path_tool.join(nmapLocation, nmap.NMAP_EXECUTABLES[1]) except PathNotFoundException: logger.warn("Specified directory \"%s\" is not exists." % nmapLocation) if nmapLocation and not nmap.NmapPathValidator.get(fs).validate(nmapLocation): logger.warn("Specified Nmap path \"%s\" is not exists. Trying the system path..." % nmapLocation) nmapLocation = None nmapTool = nmap.getByShell(shell, nmapLocation) if not nmapTool.getVersion(): logger.reportWarning("NMAP command is not installed on the probe machine") return ObjectStateHolderVector() probeRanges = _buildProbeRanges(Framework, rangeString, probeName) logger.info('Start working on total probe ranges: ', len(probeRanges)) for probeRange in probeRanges: logger.debug("Start working on range ", probeRange.toRangeString()) rangeIps = probeRange.getAllIPs(probeRange.getTotalIPs()) byExcludePatterns = lambda ip, patterns = excludePatterns: icmp_utils.shouldPingIp(ip, patterns, None) filteredIps = filter(byExcludePatterns, rangeIps) excludedIpCount = len(rangeIps) - len(filteredIps) if excludedIpCount: logger.debug("Excluded IP's count: %s " % excludedIpCount) try: liveIps = nmapTool.doPingScan(filteredIps, issubclass(probeRange.__class__, IPv6Range) or issubclass(probeRange.__class__, IPv6RangeWIthDescription)) except Exception, ex: logger.warn(str(ex)) else: if liveIps: Framework.sendObjects(_reportIpOSHs(liveIps)) logger.info('Finished working on all Probes Ranges')
def createShellObj(shell, client, ip, langBund, language, codePage, arpMac = None, connectedShellCredId = None): 'Shell, str, langBundle, str, str -> osh' # make sure that 'ip' is an ip and not a dns name # the reason is to make application_ip attribute hold an ip and not a dns name, # hence, when the application will be a trigger it will find the probe clientType = shell.getClientType() if clientType == ClientsConsts.NTCMD_PROTOCOL_NAME: clientType = "ntcmd" logger.debug('creating object for obj_name=%s' % clientType) ipObj = ip if ip_addr.isValidIpAddress(ip): ipObj = ip_addr.IPAddress(ip) else: # maybe it's a hostname? hostname = ip try: ips = SocketDnsResolver().resolve_ips(hostname) ipObj = ips[0] except ResolveException: logger.reportWarning('Could not resolve hostname' + hostname) ipObj = ip shellOsh = ObjectStateHolder(clientType) shellOsh.setAttribute('application_ip', str(ipObj)) shellOsh.setAttribute('data_name', clientType) if clientType != "ntcmd": shellOsh.setAttribute('application_port', shell.getPort()) shellOsh.setContainer(modeling.createHostOSH(str(ipObj))) # UDA client has a property of version, it should be reported if clientType == ClientsConsts.DDM_AGENT_PROTOCOL_NAME: shellOsh.setAttribute('version', client.getVersion()) if(language): shellOsh.setAttribute('language', language) if(codePage): shellOsh.setAttribute('codepage', codePage) shellOsh.setAttribute('credentials_id', shell.getCredentialId()) if arpMac: shellOsh.setAttribute(TopologyConstants.ATTR_APPLICATION_ARP_MAC, arpMac) if connectedShellCredId: shellOsh.setAttribute(TopologyConstants.ATTR_CONN_OS_CRED_ID, connectedShellCredId) return shellOsh
def _discoverSharePoint(discoverer): """ discoverer, resources -> None Provides exception handling during discovery process """ #do not catch exception here. If no ID found then discovery should be stopped farm = discoverer.getFarm() resources = SharePointResources(farm) try: for farmMember in discoverer.getFarmMembers(): try: resources.addFarmMember(farmMember) except SharePointTopologyException: logger.reportWarning() except SharePointException: logger.reportWarning() try: for webService in discoverer.getWebServices(): try: resources.addWebService(webService) except SharePointTopologyException: logger.reportWarning() except SharePointException: logger.reportWarning() return resources
def ev3_getOutQueue(ls): outqueuelist = [] output = ls.evApiCmd(_CMD_GETOUT_QUEUE,'08') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] =='EOF': continue else: outqueuelist.append (splitline) else: logger.reportWarning( "Get Output Queue information command failed") #raise Exception, "Get Output Queue information command failed" return outqueuelist
def ev3_getOutQueue(ls): outqueuelist = [] output = ls.evApiCmd(_CMD_GETOUT_QUEUE, '08') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] == 'EOF': continue else: outqueuelist.append(splitline) else: logger.reportWarning("Get Output Queue information command failed") #raise Exception, "Get Output Queue information command failed" return outqueuelist
def ev5_getObjPgm(ls): pgmlist = [] output = ls.evApiCmd(_CMD_GETOBJ_LIB,'10','*ALL|*ALL|*PGM') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] =='EOF': continue else: pgmlist.append (splitline) else: logger.reportWarning( "GGet Object Program information command failed") #raise Exception, "Get Object Program information command failed" return pgmlist
def ev5_getObjPgm(ls): pgmlist = [] output = ls.evApiCmd(_CMD_GETOBJ_LIB, '10', '*ALL|*ALL|*PGM') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] == 'EOF': continue else: pgmlist.append(splitline) else: logger.reportWarning("GGet Object Program information command failed") #raise Exception, "Get Object Program information command failed" return pgmlist
def ev1_getJobs(ls): joblist = [] output = ls.evApiCmd(_CMD_GETJOBS, '01') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] == 'EOF': continue else: joblist.append(splitline) else: logger.reportWarning("Get Jobs information command failed") #raise Exception, "Get Jobs Queue information command failed" return joblist
def ev1_getJobs(ls): joblist = [] output = ls.evApiCmd(_CMD_GETJOBS,'01') if output.isSuccess() and len(output.cmdResponseList) > 0: for line in output.cmdResponseList: splitline = line.split('|') if splitline[0] =='EOF': continue else: joblist.append (splitline) else: logger.reportWarning( "Get Jobs information command failed") #raise Exception, "Get Jobs Queue information command failed" return joblist
def discoverLinuxVScsiAdapters(shell): try: output = shell.execAlternateCmds('/usr/sbin/vpdupdate', '/sbin/vpdupdate', 'vpdupdate') except: logger.reportWarning('Failed running vpdupdate.') return [] if shell.getLastCmdReturnCode() != 0: logger.reportWarning('Failed running vpdupdate.') return [] output = shell.execAlternateCmds('/usr/sbin/lsvio -s', '/sbin/lsvio -s', 'lsvio -s') if output and shell.getLastCmdReturnCode() == 0: return parseLinuxVScsiAdapters(output)
def getNetworkingInformation(self): """ This function performs discovery of IBM HMC Host Networking information @param shell: either SSH or Telnet Client wrapped with the ShellUtils @type shell: ShellUtils instance @return: host and ip information @rtype: instance of HostDo object """ try: output = self.executeCommand('lshmc -n') except ValueError: logger.reportWarning('IBM HMC not detected.') raise return self.parseNetworkingInformation(output)
def discoverNetworking(self): """ This function performs discovery of IBM FSM Host Networking information @param shell: either SSH or Telnet Client wrapped with the ShellUtils @type shell: ShellUtils instancwe @return: host and ip information @rtype: instance of HostDo object """ try: output = self.shell.execCmd('lsnetcfg -n') except ValueError: logger.reportWarning('IBM FSM not detected.') raise return self.parseNetworking(output)
def discoverNetworking(self): """ Discovers basic networking information of of HP NonStop box. Such as interfaces (including CLIM), ip addresses, and networks. @types: -> networking.UnixNetworking """ self.__discoverInterfaces() try: self.__discoverIps() except: logger.warnException('Failed to discover IPs') logger.reportWarning("Failed to discover IP addresses") return self.__getNetworking()
def getSapSystemName(sapUtils): r'@types: saputils.SapUtils -> str' table = sapUtils.getSites() if table and table.getRowCount() > 0 and table.getColumnCount() > 0: return table.getCell(0, 0) else: logger.reportWarning( 'Failed to get SAP System name since empty tree is returned.') logger.warn( 'Failed to get SAP System name since empty tree is returned.') logger.warn( '''The discovery process could not proceed, it may be casued by missing values of the attributes: monitorSetName, monitor name requested by function "BAPI_SYSTEM_MON_GETTREE" are not configured in the CCMS monitoring tree on the server.''') return None
def addOshToVector(self, resultVector): osh = ObjectStateHolder('ms_nlb_cluster') props = self.props osh.setAttribute('vendor', 'microsoft_corp') osh.setAttribute('cluster_ip_address', props['ClusterIPAddress']) osh.setAttribute('cluster_network_mask', props['ClusterNetworkMask']) osh.setAttribute('mcast_ip_address', props['McastIPAddress']) osh.setAttribute('cluster_domain_name', props['ClusterName']) #have to be transformed as MAC address clusterNetworkAddress = props['ClusterNetworkAddress'] if netutils.isValidMac(clusterNetworkAddress): clusterNetworkAddress = netutils.parseMac(clusterNetworkAddress) else: msg = 'Invalid network address %s' % str(clusterNetworkAddress) logger.reportWarning('Invalid network address') logger.warn(msg) osh.setAttribute('cluster_network_address', clusterNetworkAddress) osh.setBoolAttribute('ip_to_mac_enable', resolveBoolean(props['IPToMACEnable'])) osh.setBoolAttribute('multicast_support_enable', resolveBoolean(props['MulticastSupportEnable'])) osh.setBoolAttribute('igmp_support', resolveBoolean(props['IGMPSupport'])) osh.setBoolAttribute('remote_control_enabled', resolveBoolean(props.get('RemoteControlEnabled'))) osh.setAttribute('data_name', 'MS NLB Cluster') resultVector.add(osh) clusteredServer = modeling.createCompleteHostOSH( 'cluster_resource_group', clusterNetworkAddress) clusteredServer.setStringAttribute('name', props['ClusterName']) resultVector.add(clusteredServer) resultVector.add( modeling.createLinkOSH('contained', osh, clusteredServer)) resultVector.add(self.clusterIpOSH) resultVector.add( modeling.createLinkOSH('contained', clusteredServer, self.clusterIpOSH)) resultVector.add(self.hostOSH) resultVector.add( modeling.createLinkOSH('contained', self.hostOSH, self.clusterIpOSH)) self.config.setContainer(osh) resultVector.add(self.config) self.nlbClusterOSH = osh
def __discoverClimInterface(self, climName): """ @types: string -> None @raise ValueError: when command "gtacl -cv "climcmd %s /sbin/ifconfig -a % <clim_name>" gives no output or fails """ cmd = "climcmd %s /sbin/ifconfig -a" % climName cmdOutput = self.__shell.execCmd('gtacl -cv "%s"' % cmd) if not cmdOutput or self.__shell.getLastCmdReturnCode() != 0: raise ValueError('Failed to get CLIM') (header, interfaceData) = cmdOutput.split(cmd) if header and interfaceData: interfacesByName = {} matches = ShellDiscoverer.__INTERFACE_REGEXP.findall(interfaceData) for match in matches: name = match[0] uniqueName = "%s.%s" % (climName, match[0]) mac= match[1] if netutils.isValidMac(mac): interface = Interface(netutils.parseMac(mac), uniqueName) parentInterfaceName = self.__getParentInterfaceName(name) if parentInterfaceName and interfacesByName.has_key(parentInterfaceName): parentInterface = interfacesByName[parentInterfaceName] aliasRole = AliasRole() aliasRole.parentInterface = parentInterface interface._addRole(aliasRole) self.__networking.addInterface(interface) interfacesByName[name] = interface matches = ShellDiscoverer.__INTERFACE_AND_IP_REGEXP.findall(interfaceData) for match in matches: name = match[0] ip = match[2] netmask = match[4] if netutils.isValidIp(ip) and netutils.isValidIp(netmask): if interfacesByName.has_key(name): interface = interfacesByName[name] self.__networking.addIpAndNetwork(ip, netmask, interface.name) else: self.__networking.addIpAndNetwork(ip, netmask) else: logger.warn('Unrecognized output') logger.reportWarning("Failed to discover CLIM network interfaces")
def _discover_sap_system(sapUtils, systemName): '@types: SapUtils, str -> System' sapSystem = sap.System(systemName) systemBuilder = sap.Builder() sapReporter = sap.Reporter(systemBuilder) systemOsh = sapReporter.reportSystem(sapSystem) # x) sap system version is represented by master component version discoverer = sap_abap_discoverer.SoftwareComponentDiscovererByJco(sapUtils) try: masterCmpVersion = discoverer.discoveryMasterComponentVersionDescription() except: logger.reportWarning('Failed to discover master component version') else: # x) report SAP master component version if masterCmpVersion: systemOsh = sapReporter.reportSystemMasterComponentVersion(systemOsh, masterCmpVersion) return systemOsh