def discover(self): protocol = self.framework.getDestinationAttribute('Protocol') try: captureProcessInformation = self.framework.getParameter( 'CaptureProcessInformation') numberOfTCPSnapshots = int( self.framework.getParameter('NumberOfTCPSnapshots')) delayBetweenTCPSnapshots = float( self.framework.getParameter('DelayBetweenTCPSnapshots')) except: logger.error(logger.prepareFullStackTrace('')) raise ValueError("Job parameters are invalid") else: if numberOfTCPSnapshots < 1 or delayBetweenTCPSnapshots <= 0: raise ValueError("Job parameters are invalid") try: if captureProcessInformation.lower() == 'true': self._discoverProcesses() except UnknownOSTypeException, ex: msg = str(ex) errormessages.resolveAndReport(msg, self.client.getClientType(), self.framework) except:
def _discoverSqlMx(client, nodeName): ''' @summary: discovers SQL/MX databases using mxci interactive shell @param client: row SSH client (not Shell) @param nodeName: the name of the host @return: Map<string, SqlMx> (key in this map is SQL/MX catalog UID) or None if discovery failed @rtype: dictionary ''' catalogUidToSqlMx = {} try: sqlmxVersion = _getMxVersion(client) ## Set default logical schema using current node name __getCommandOutput( client, 'set schema nonstop_sqlmx_%s.system_schema;' % nodeName) ## Get list of catalogs and corresponding UIDs in this database catalogNameAndIdOut = __getCommandOutput( client, 'select cat_name, cat_uid from catsys;') ## We have catalog names and IDs catalogNameAndIdLines = catalogNameAndIdOut.split('\n') # Filter all empty lines catalogNameAndIdLines = [ line.strip() for line in catalogNameAndIdLines if line and line.strip() ] for catalogNameAndIdLine in catalogNameAndIdLines: ## Skip header lines if re.search('CAT_NAME', catalogNameAndIdLine, re.I): continue ## Skip separator line if re.match(r'\s*-+\s+-+\s?', catalogNameAndIdLine): continue ## Skip last line showing number of rows selected if re.search('row\(s\)', catalogNameAndIdLine, re.I): break ## Get catalog name and catalog UID m = re.match('(\S+)\s+(\S+)', catalogNameAndIdLine.strip()) if m: catalogName = m.group(1) catalogUID = m.group(2) sqlmx = _SqlMx() sqlmx.catalog_uuid = catalogUID sqlmx.database_dbsid = catalogName sqlmx.version = sqlmxVersion catalogUidToSqlMx[catalogUID] = sqlmx except: excInfo = logger.prepareFullStackTrace('') logger.warn("Failed to discover SQL/MX", excInfo) return catalogUidToSqlMx
def handleDiskRow(self, fileSystem, mountedOn, size, usedSize=None): """ @param usedSize: disk used size in 1K-blocks @param size: disk size in 1K-blocks """ if mountedOn in self.mountPointToDisk: logger.reportWarning("File system object already reported for the mount point; skipping new one") logger.warn( "File system object already reported for the mount point '%s'; skipping new one (mount point: '%s'; file system: '%s')" % (mountedOn, mountedOn, fileSystem) ) return if str(size).isdigit(): sizeInMb = _kbToMb(size) else: sizeInMb = None if str(usedSize).isdigit(): usedSizeInMb = _kbToMb(usedSize) else: usedSizeInMb = None type_ = modeling.UNKNOWN_STORAGE_TYPE diskOsh = modeling.createDiskOSH( self.containerOsh, mountedOn, type_, size=sizeInMb, name=fileSystem, usedSize=usedSizeInMb ) if diskOsh: self.mountPointToDisk[mountedOn] = diskOsh self.resultVector.add(diskOsh) host_reporter = host_topology.Reporter() resolver = dns_resolver.create(shell=self.shell) try: (remoteHost, remoteMountPoint) = getRemoteHostAndMountPoint(fileSystem) if remoteHost and remoteMountPoint: if remoteHost.startswith("[") and remoteHost.endswith("]"): remoteHost = remoteHost[1:-1] host_osh = self.remoteHosts.get(remoteHost) if not host_osh: host = host_base_parser.parse_from_address(remoteHost, resolver.resolve_ips) # do not report hostname as it may be alias host_osh, _, oshs = host_reporter.report_host_with_ips(host.ips) self.remoteHosts[remoteHost] = host_osh self.resultVector.addAll(oshs) remoteShareOsh = ObjectStateHolder("networkshare") remoteShareOsh.setContainer(host_osh) remoteShareOsh.setStringAttribute("data_name", remoteMountPoint) remoteShareOsh.setStringAttribute("share_path", remoteMountPoint) self.resultVector.add(remoteShareOsh) self.resultVector.add(modeling.createLinkOSH("realization", remoteShareOsh, diskOsh)) except: stackTrace = logger.prepareFullStackTrace("Failed to link disk to the remote share.") logger.warn(stackTrace)
def DiscoveryMain(Framework): ''' Discovery process consists of two steps: 1. Connect domain controller and get whole topology 2. Strive to connect to the same controller with the same credentials but in role of global catalog. 2.1 GC indexes more hierarchical data but less object specific data, so not all data will be rediscovered. ''' vector = ObjectStateHolderVector() ## Destination Attribute Section hostId = Framework.getDestinationAttribute('hostId') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') OU_REPORTING_PARAM = 'reportOUAsConfigurationDocument' isOuUnitsTreeReportedAsConfig = Framework.getParameter(OU_REPORTING_PARAM) isOuUnitsTreeReportedAsConfig = parseBoolean(isOuUnitsTreeReportedAsConfig) tryToDiscoverGlobalCatalogFlag = Boolean.parseBoolean( Framework.getParameter('tryToDiscoverGlobalCatalog')) globalCatalogPort = Framework.getParameter('globalCatalogPort') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort try: result = DiscoveryResult() vector.addAll( _discoverTopology(Framework, credentialsId, hostId, applicationPort, None, isOuUnitsTreeReportedAsConfig, result)) #no reason to connect to the GC if port is specified in credentials if (tryToDiscoverGlobalCatalogFlag and str(globalCatalogPort).isdigit() and globalCatalogPort != applicationPort): vector.addAll( _discoverTopology(Framework, credentialsId, hostId, globalCatalogPort, tryToDiscoverGlobalCatalogFlag, isOuUnitsTreeReportedAsConfig, result)) dtoToOsh = result.getMap(DOMAINT_DTO_TO_CONFIG_OSH_TYPE) fptools.each(vector.add, dtoToOsh.values()) except Exception, e: msg = 'Failure in discovering Active Directory Topology. %s' % e Framework.reportError(msg) logger.debug(logger.prepareFullStackTrace(msg)) logger.errorException(msg)
def _discoverSqlMx(client, nodeName): """ @summary: discovers SQL/MX databases using mxci interactive shell @param client: row SSH client (not Shell) @param nodeName: the name of the host @return: Map<string, SqlMx> (key in this map is SQL/MX catalog UID) or None if discovery failed @rtype: dictionary """ catalogUidToSqlMx = {} try: sqlmxVersion = _getMxVersion(client) ## Set default logical schema using current node name __getCommandOutput(client, "set schema nonstop_sqlmx_%s.system_schema;" % nodeName) ## Get list of catalogs and corresponding UIDs in this database catalogNameAndIdOut = __getCommandOutput(client, "select cat_name, cat_uid from catsys;") ## We have catalog names and IDs catalogNameAndIdLines = catalogNameAndIdOut.split("\n") # Filter all empty lines catalogNameAndIdLines = [line.strip() for line in catalogNameAndIdLines if line and line.strip()] for catalogNameAndIdLine in catalogNameAndIdLines: ## Skip header lines if re.search("CAT_NAME", catalogNameAndIdLine, re.I): continue ## Skip separator line if re.match(r"\s*-+\s+-+\s?", catalogNameAndIdLine): continue ## Skip last line showing number of rows selected if re.search("row\(s\)", catalogNameAndIdLine, re.I): break ## Get catalog name and catalog UID m = re.match("(\S+)\s+(\S+)", catalogNameAndIdLine.strip()) if m: catalogName = m.group(1) catalogUID = m.group(2) sqlmx = _SqlMx() sqlmx.catalog_uuid = catalogUID sqlmx.database_dbsid = catalogName sqlmx.version = sqlmxVersion catalogUidToSqlMx[catalogUID] = sqlmx except: excInfo = logger.prepareFullStackTrace("") logger.warn("Failed to discover SQL/MX", excInfo) return catalogUidToSqlMx
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort envBuilder = active_directory_utils.LdapEnvironmentBuilder(applicationPort) client = None daoService = None try: try: client = Framework.createClient(credentialsId, envBuilder.build()) logger.debug("Connected to AD") configurationNamingContext = getConfigurationNamingContext(client) if not configurationNamingContext: raise ValueError, "Failed fetching configuration naming context from Active Directory" daoService = ms_exchange_ad_utils.BaseExchangeDaoService( client, Framework, ipAddress) exchangeDiscoverer = daoService.getExchange( configurationNamingContext) exchangeDiscoverer.discover() exchangeDiscoverer.addResultsToVector(OSHVResult) finally: if client is not None: try: client.close() except: logger.warn("Failed to close client") if daoService is not None: daoService.close() except: msg = logger.prepareFullStackTrace('') logger.debug(msg) if msg.find('No object found for name') != -1 or msg.find( 'Error while fetching Microsoft Exchange node ') != -1: msg = "Active Directory does not hold an information about Microsoft Exchange" errormessages.resolveAndReport(msg, ms_exchange_ad_utils.LDAP_PROTOCOL_NAME, Framework) return OSHVResult
def DiscoveryMain(Framework): ''' Discovery process consists of two steps: 1. Connect domain controller and get whole topology 2. Strive to connect to the same controller with the same credentials but in role of global catalog. 2.1 GC indexes more hierarchical data but less object specific data, so not all data will be rediscovered. ''' vector = ObjectStateHolderVector() ## Destination Attribute Section hostId = Framework.getDestinationAttribute('hostId') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') OU_REPORTING_PARAM = 'reportOUAsConfigurationDocument' isOuUnitsTreeReportedAsConfig = Framework.getParameter(OU_REPORTING_PARAM) isOuUnitsTreeReportedAsConfig = parseBoolean(isOuUnitsTreeReportedAsConfig) tryToDiscoverGlobalCatalogFlag = Boolean.parseBoolean( Framework.getParameter('tryToDiscoverGlobalCatalog')) globalCatalogPort = Framework.getParameter('globalCatalogPort') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort try: result = DiscoveryResult() vector.addAll(_discoverTopology(Framework, credentialsId, hostId, applicationPort, None, isOuUnitsTreeReportedAsConfig, result)) #no reason to connect to the GC if port is specified in credentials if (tryToDiscoverGlobalCatalogFlag and str(globalCatalogPort).isdigit() and globalCatalogPort != applicationPort): vector.addAll(_discoverTopology(Framework, credentialsId, hostId, globalCatalogPort, tryToDiscoverGlobalCatalogFlag, isOuUnitsTreeReportedAsConfig, result)) dtoToOsh = result.getMap(DOMAINT_DTO_TO_CONFIG_OSH_TYPE) fptools.each(vector.add, dtoToOsh.values()) except Exception, e: msg = 'Failure in discovering Active Directory Topology. %s' % e Framework.reportError(msg) logger.debug(logger.prepareFullStackTrace(msg)) logger.errorException(msg)
def _discoverSqlMp(client): ''' @summary: discovers SQL/MP @param client: row SSH client (not Shell) @return: Map<string, string>, key is catalog UID, value is schema name or empty dictionary if parsing failed. @rtype: dictionary ''' sqlmpList = [] try: sqlmpVersion = _getMpVersion(client) ## Get catalog file information fileInfoOut = __getCommandOutput( client, 'fileinfo $system.system.sqlci2, detail;') m = re.search('CATALOG\s+(\$.*)\s?', fileInfoOut) if m: catalogFileName = m.group(1).strip() else: raise ValueError, "Failed to get catalog" ## Get list of catalogs in this database catalogNameOut = __getCommandOutput( client, 'select catalogname from %s.catalogs;' % catalogFileName) catalogNameLines = catalogNameOut.strip().split('\n') catalogNameLines = [ line.strip() for line in catalogNameLines if line and line.strip() ] for catalogName in catalogNameLines: ## Skip last line showing number of rows selected if re.search('row\(s\)', catalogName, re.I): break ## Skip header lines if re.search('CATALOGNAME', catalogName, re.I): continue ## Skip separator line if re.match(r'\s*-+', catalogName): continue sqlmp = _HpDatabase('NonStop SQL/MP') sqlmp.database_dbsid = catalogName sqlmp.version = sqlmpVersion sqlmpList.append(sqlmp) except: excInfo = logger.prepareFullStackTrace('') logger.warn("Failed to discover SQL/MP", excInfo) return sqlmpList
def getMapping(mappingFileName, bmcNamespace, ucmdbServerIp): try: objectTypeList = [] relationshipList = [] integrationAPI = IntegrationAPI(ucmdbServerIp, SCRIPT_NAME) integrationObjectList = integrationAPI.getMapping(mappingFileName) if not integrationObjectList: logger.warn('Unable to retrieve a list of objects from the mapping XML!') return else: debugPrint(4, '[' + SCRIPT_NAME + ':getMapping] Got <%s> objects and links from mapping XML' % len(integrationObjectList)) for integrationObject in integrationObjectList: attList = [] childList = [] parentList = [] ## Pull attribute list attributeMap = integrationObject.getAttributeMap() attributeList = attributeMap.getAttributeList() for attribute in attributeList: attList.append(attribute) ## Pull child list childHashMap = attributeMap.getChildList() for childName in childHashMap.keySet(): childList.append([childName, childHashMap[childName]]) ## Pull parent list parentHashMap = attributeMap.getParentList() for parentName in parentHashMap.keySet(): parentList.append([parentName, parentHashMap[parentName]]) nameSpace = integrationObject.getNameSpace() or bmcNamespace if integrationObject.isLink(): relationshipList.append(SourceLinks(integrationObject.getObjectName(), integrationObject.getEnd1Object(), integrationObject.getEnd2Object(), nameSpace, integrationObject.getQuery(), attList)) else: objectTypeList.append(SourceObjects(integrationObject.getObjectName(), nameSpace, integrationObject.getQuery(), attList, childList, parentList)) if objectTypeList: debugPrint(3, '[' + SCRIPT_NAME + ':getMapping] Got <%s> objects from mapping XML' % len(objectTypeList)) if relationshipList: debugPrint(3, '[' + SCRIPT_NAME + ':getMapping] Got <%s> links from mapping XML' % len(relationshipList)) return (objectTypeList, relationshipList) except: excInfo = logger.prepareFullStackTrace('') logger.warn('[' + SCRIPT_NAME + ':getMapping] Exception: <%s>' % excInfo) pass
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') hostname = Framework.getDestinationAttribute('hostname') if hostname is None: hostname = 'NA' else: hostname = hostname.upper() protocolType = Framework.getParameter('protocolType') hostOSH = modeling.createHostOSH(ipAddress) protocols = Framework.getAvailableProtocols( ipAddress, ClientsConsts.SQL_PROTOCOL_NAME) sidList = [] for protocol in protocols: protocol_validation_status = isValidProtocol(Framework, protocol, protocolType) if protocol_validation_status == PROTOCOL_NO_PORT: logger.debug('Protocol ', protocol, ' has no defined port') elif protocol_validation_status == PROTOCOL_NO_SID: logger.debug('Protocol ', protocol, ' has no defined SID') elif dbutils.protocolMatch(Framework, protocol, protocolType, None, None): logger.debugException('Trying to connect with protocol:', protocol) dbClient = None try: try: dbClient = Framework.createClient(protocol) sid = getDbSid(dbClient, hostname) if sid is None: continue if ((sid in sidList) != 0): logger.debug( str('Database : ' + sid + ' already reported.')) continue databaseServer = createDatabaseOSH( hostOSH, dbClient, sid, dbClient.getDbVersion(), dbClient.getAppVersion()) OSHVResult.add(databaseServer) sidList.append(sid) except SQLException, sqlex: logger.debug(sqlex.getMessage()) except: msg = logger.prepareFullStackTrace('') errormessages.resolveAndReport( msg, ClientsConsts.SQL_PROTOCOL_NAME, Framework) finally:
def _discoverSqlMxSchemas(client): ''' @summary: discovers SQL/MX schemas @param client: row SSH client (not Shell) @return: Map<string, string>, key is catalog UID, value is schema name or empty dictionary if parsing failed. @rtype: dictionary ''' catalogUidToSchemaNames = {} try: ## Get a list of schemas schemaAndCatIdOut = __getCommandOutput( client, 'select schema_name, cat_uid from schemata;') ## We have schema names schemaAndCatIdLines = schemaAndCatIdOut.strip().split('\n') # Filter all empty lines schemaAndCatIdLines = [ line.strip() for line in schemaAndCatIdLines if line and line.strip() ] for schemaAndCatIdLine in schemaAndCatIdLines: ## Skip header lines if re.search('SCHEMA_NAME', schemaAndCatIdLine, re.I): continue ## Skip separator line if re.match(r'\s*-+\s+-+\s?', schemaAndCatIdLine): continue ## Skip last line showing number of rows selected if re.search('row\(s\)', schemaAndCatIdLine, re.I): break ## Get schema names schemaAndCatIdMatch = re.match('(\S+)\s+(\S+)', schemaAndCatIdLine) if schemaAndCatIdMatch: schemaName = schemaAndCatIdMatch.group(1) catalogUID = schemaAndCatIdMatch.group(2) schemaNames = catalogUidToSchemaNames.get(catalogUID) if schemaNames is None: schemaNames = [] schemaNames.append(schemaName) catalogUidToSchemaNames[catalogUID] = schemaNames except: excInfo = logger.prepareFullStackTrace('') logger.warn("Failed to discover SQL/MX schemas", excInfo) return catalogUidToSchemaNames
def _discoverSqlMp(client): """ @summary: discovers SQL/MP @param client: row SSH client (not Shell) @return: Map<string, string>, key is catalog UID, value is schema name or empty dictionary if parsing failed. @rtype: dictionary """ sqlmpList = [] try: sqlmpVersion = _getMpVersion(client) ## Get catalog file information fileInfoOut = __getCommandOutput(client, "fileinfo $system.system.sqlci2, detail;") m = re.search("CATALOG\s+(\$.*)\s?", fileInfoOut) if m: catalogFileName = m.group(1).strip() else: raise ValueError, "Failed to get catalog" ## Get list of catalogs in this database catalogNameOut = __getCommandOutput(client, "select catalogname from %s.catalogs;" % catalogFileName) catalogNameLines = catalogNameOut.strip().split("\n") catalogNameLines = [line.strip() for line in catalogNameLines if line and line.strip()] for catalogName in catalogNameLines: ## Skip last line showing number of rows selected if re.search("row\(s\)", catalogName, re.I): break ## Skip header lines if re.search("CATALOGNAME", catalogName, re.I): continue ## Skip separator line if re.match(r"\s*-+", catalogName): continue sqlmp = _HpDatabase("NonStop SQL/MP") sqlmp.database_dbsid = catalogName sqlmp.version = sqlmpVersion sqlmpList.append(sqlmp) except: excInfo = logger.prepareFullStackTrace("") logger.warn("Failed to discover SQL/MP", excInfo) return sqlmpList
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') credentialsId = Framework.getDestinationAttribute('credentials_id') applicationPort = Framework.getDestinationAttribute("application_port") serviceAddressPort = Framework.getDestinationAttribute('port') if not applicationPort or applicationPort == 'NA': applicationPort = serviceAddressPort envBuilder = active_directory_utils.LdapEnvironmentBuilder(applicationPort) client = None daoService = None try: try: client = Framework.createClient(credentialsId, envBuilder.build()) logger.debug("Connected to AD") configurationNamingContext = getConfigurationNamingContext(client) if not configurationNamingContext: raise ValueError, "Failed fetching configuration naming context from Active Directory" daoService = ms_exchange_ad_utils.BaseExchangeDaoService(client, Framework, ipAddress) exchangeDiscoverer = daoService.getExchange(configurationNamingContext) exchangeDiscoverer.discover() exchangeDiscoverer.addResultsToVector(OSHVResult) finally: if client is not None: try: client.close() except: logger.warn("Failed to close client") if daoService is not None: daoService.close() except: msg = logger.prepareFullStackTrace('') logger.debug(msg) if msg.find('No object found for name') != -1 or msg.find('Error while fetching Microsoft Exchange node ') != -1: msg = "Active Directory does not hold an information about Microsoft Exchange" errormessages.resolveAndReport(msg, ms_exchange_ad_utils.LDAP_PROTOCOL_NAME, Framework) return OSHVResult
def _discoverSqlMxSchemas(client): """ @summary: discovers SQL/MX schemas @param client: row SSH client (not Shell) @return: Map<string, string>, key is catalog UID, value is schema name or empty dictionary if parsing failed. @rtype: dictionary """ catalogUidToSchemaNames = {} try: ## Get a list of schemas schemaAndCatIdOut = __getCommandOutput(client, "select schema_name, cat_uid from schemata;") ## We have schema names schemaAndCatIdLines = schemaAndCatIdOut.strip().split("\n") # Filter all empty lines schemaAndCatIdLines = [line.strip() for line in schemaAndCatIdLines if line and line.strip()] for schemaAndCatIdLine in schemaAndCatIdLines: ## Skip header lines if re.search("SCHEMA_NAME", schemaAndCatIdLine, re.I): continue ## Skip separator line if re.match(r"\s*-+\s+-+\s?", schemaAndCatIdLine): continue ## Skip last line showing number of rows selected if re.search("row\(s\)", schemaAndCatIdLine, re.I): break ## Get schema names schemaAndCatIdMatch = re.match("(\S+)\s+(\S+)", schemaAndCatIdLine) if schemaAndCatIdMatch: schemaName = schemaAndCatIdMatch.group(1) catalogUID = schemaAndCatIdMatch.group(2) schemaNames = catalogUidToSchemaNames.get(catalogUID) if schemaNames is None: schemaNames = [] schemaNames.append(schemaName) catalogUidToSchemaNames[catalogUID] = schemaNames except: excInfo = logger.prepareFullStackTrace("") logger.warn("Failed to discover SQL/MX schemas", excInfo) return catalogUidToSchemaNames
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') try: snmpClient = Framework.createClient() try: a10Discoverer = createA10Discoverer(snmpClient, Framework, OSHVResult) a10Discoverer.getTopology(ipAddress) finally: snmpClient.close() except NoA10Exception: logger.reportWarning("No A10 vThunder found on the remote machine") except: #TODO: use errormessages here msg = logger.prepareFullStackTrace('') errobj = errormessages.resolveError(msg, 'snmp') logger.reportErrorObject(errobj) logger.debugException('') return OSHVResult
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') try: snmpClient = Framework.createClient() try: f5Discoverer = createF5Discoverer(snmpClient, Framework, OSHVResult) f5Discoverer.getTopology(ipAddress) finally: snmpClient.close() except NoF5Exception: logger.reportWarning("No F5 LTM found on the remote machine") except: #TODO: use errormessages here msg = logger.prepareFullStackTrace('') errobj = errormessages.resolveError(msg, 'snmp') logger.reportErrorObject(errobj) logger.debugException('') return OSHVResult
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') hostname = Framework.getDestinationAttribute('hostname') if hostname is None: hostname = 'NA' else: hostname = hostname.upper() protocolType = Framework.getParameter('protocolType') hostOSH = modeling.createHostOSH(ipAddress) protocols = Framework.getAvailableProtocols(ipAddress, ClientsConsts.SQL_PROTOCOL_NAME) sidList = [] for protocol in protocols: protocol_validation_status = isValidProtocol(Framework, protocol, protocolType) if protocol_validation_status == PROTOCOL_NO_PORT: logger.debug('Protocol ', protocol, ' has no defined port') elif protocol_validation_status == PROTOCOL_NO_SID: logger.debug('Protocol ', protocol, ' has no defined SID') elif dbutils.protocolMatch(Framework, protocol, protocolType, None, None): logger.debugException('Trying to connect with protocol:', protocol) dbClient = None try: try: dbClient = Framework.createClient(protocol) sid = getDbSid(dbClient, hostname) if sid is None: continue if ((sid in sidList) != 0): logger.debug(str('Database : ' + sid + ' already reported.')) continue databaseServer = createDatabaseOSH(hostOSH, dbClient, sid, dbClient.getDbVersion(), dbClient.getAppVersion()) OSHVResult.add(databaseServer) sidList.append(sid) except SQLException, sqlex: logger.debug(sqlex.getMessage()) except: msg = logger.prepareFullStackTrace('') errormessages.resolveAndReport(msg, ClientsConsts.SQL_PROTOCOL_NAME, Framework) finally:
def discover(self): protocol = self.framework.getDestinationAttribute('Protocol') processes = [] endpoints = [] connections = [] try: numberOfTCPSnapshots = int(self.framework.getParameter('NumberOfTCPSnapshots')) delayBetweenTCPSnapshots = float(self.framework.getParameter('DelayBetweenTCPSnapshots')) except: logger.error(logger.prepareFullStackTrace('')) raise ValueError("Job parameters are invalid") return [], [] if numberOfTCPSnapshots < 1 or delayBetweenTCPSnapshots <= 0: raise ValueError("Job parameters are invalid") try: processes = self._discoverProcesses() except UnknownOSTypeException, ex: msg = str(ex) errormessages.resolveAndReport(msg, self.client.getClientType(), self.framework)
def discover(self): protocol = self.framework.getDestinationAttribute('Protocol') try: captureProcessInformation = self.framework.getParameter('CaptureProcessInformation') numberOfTCPSnapshots = int(self.framework.getParameter('NumberOfTCPSnapshots')) delayBetweenTCPSnapshots = float(self.framework.getParameter('DelayBetweenTCPSnapshots')) except: logger.error(logger.prepareFullStackTrace('')) raise ValueError("Job parameters are invalid") else: if numberOfTCPSnapshots < 1 or delayBetweenTCPSnapshots <= 0: raise ValueError("Job parameters are invalid") try: if captureProcessInformation.lower() == 'true': self._discoverProcesses() except UnknownOSTypeException, ex: msg = str(ex) errormessages.resolveAndReport(msg, self.client.getClientType(), self.framework) except:
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() ipAddress = Framework.getDestinationAttribute('ip_address') try: snmpClient = Framework.createClient() snmpAgent = SnmpAgent(CISCO_ACE_OID_BASE, snmpClient, Framework) try: cisco_Discoverer = Cisco_Discoverer(snmpAgent, OSHVResult, Framework) cisco_Discoverer.getTopology(ipAddress) finally: snmpClient.close() except NO_CISCO_ACE_Exception: logger.reportWarning("No Cisco ACE found on the remote machine") except: #TODO: use errormessages here msg = logger.prepareFullStackTrace('') errobj = errormessages.resolveError(msg, 'snmp') logger.reportErrorObject(errobj) logger.debugException('') return OSHVResult
def discover(self): protocol = self.framework.getDestinationAttribute('Protocol') processes = [] endpoints = [] connections = [] try: numberOfTCPSnapshots = int( self.framework.getParameter('NumberOfTCPSnapshots')) delayBetweenTCPSnapshots = float( self.framework.getParameter('DelayBetweenTCPSnapshots')) except: logger.error(logger.prepareFullStackTrace('')) raise ValueError("Job parameters are invalid") return [], [] if numberOfTCPSnapshots < 1 or delayBetweenTCPSnapshots <= 0: raise ValueError("Job parameters are invalid") try: processes = self._discoverProcesses() except UnknownOSTypeException, ex: msg = str(ex) errormessages.resolveAndReport(msg, self.client.getClientType(), self.framework)
interfaces_dict = discoverer.hostDataObject.interfaces ports_map = parsePorts(interfaces_dict.values()) vlan_discoverer = layer2_shell_discoverer.VlanDiscoverer(shell) vlans = vlan_discoverer.discoverVlans() layer2_discoverer = layer2_shell_discoverer.Layer2Discoverer(shell) remote_peers_map = layer2_discoverer.discover() OSHVResult.addAll(layer2.reportTopology(switchOsh, interfaces_dict, vlans, remote_peers_map, ports_map)) finally: try: shell and shell.closeClient() except: logger.debugException("") logger.error("Unable to close shell") except JException, ex: errorMessage = ex.getMessage() except: errorObject = sys.exc_info()[1] if errorObject: errorMessage = str(errorObject) else: errorMessage = logger.prepareFullStackTrace("") if errorMessage: logger.debugException(errorMessage) errormessages.resolveAndReport(errorMessage, protocol, Framework) return OSHVResult
def handleDiskRow(self, fileSystem, mountedOn, size, usedSize=None): ''' @param usedSize: disk used size in 1K-blocks @param size: disk size in 1K-blocks ''' if mountedOn in self.mountPointToDisk: logger.reportWarning( 'File system object already reported for the mount point; skipping new one' ) logger.warn( "File system object already reported for the mount point '%s'; skipping new one (mount point: '%s'; file system: '%s')" % (mountedOn, mountedOn, fileSystem)) return if str(size).isdigit(): sizeInMb = _kbToMb(size) else: sizeInMb = None if str(usedSize).isdigit(): usedSizeInMb = _kbToMb(usedSize) else: usedSizeInMb = None type_ = modeling.UNKNOWN_STORAGE_TYPE diskOsh = modeling.createDiskOSH(self.containerOsh, mountedOn, type_, size=sizeInMb, name=fileSystem, usedSize=usedSizeInMb) if diskOsh: self.mountPointToDisk[mountedOn] = diskOsh self.resultVector.add(diskOsh) host_reporter = host_topology.Reporter() resolver = dns_resolver.create(shell=self.shell) try: (remoteHost, remoteMountPoint) = getRemoteHostAndMountPoint(fileSystem) if remoteHost and remoteMountPoint: if remoteHost.startswith('[') and remoteHost.endswith(']'): remoteHost = remoteHost[1:-1] host_osh = self.remoteHosts.get(remoteHost) if not host_osh: host = host_base_parser.parse_from_address( remoteHost, resolver.resolve_ips) #do not report hostname as it may be alias host_osh, _, oshs = host_reporter.report_host_with_ips( host.ips) self.remoteHosts[remoteHost] = host_osh self.resultVector.addAll(oshs) remoteShareOsh = ObjectStateHolder('networkshare') remoteShareOsh.setContainer(host_osh) remoteShareOsh.setStringAttribute('data_name', remoteMountPoint) remoteShareOsh.setStringAttribute('share_path', remoteMountPoint) self.resultVector.add(remoteShareOsh) self.resultVector.add( modeling.createLinkOSH('realization', remoteShareOsh, diskOsh)) except: stackTrace = logger.prepareFullStackTrace( 'Failed to link disk to the remote share.') logger.warn(stackTrace)
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() shell = None protocol = Framework.getDestinationAttribute('Protocol') listenerName = Framework.getDestinationAttribute('listenerName') listenerPath = Framework.getDestinationAttribute('listener_process_path') defOraHomes = Framework.getParameter('OracleHomes') listenerIp = Framework.getDestinationAttribute('listener_ip') listenedIPs = Framework.getTriggerCIDataAsList('listened_ips') try: try: client = Framework.createClient() shell = shellutils.ShellUtils(client) if listenerPath: envConf = UnixOracleEnvConfig(shell) if shell.isWinOs(): envConf = WindowsOracleEnvConfig(shell) envConf.setOracleHomeEnvVar(listenerPath) else: envConf = EnvConfigurator(shell, defOraHomes) if not listenedIPs: Framework.reportError( 'No listened_ips attribute values found.') return OSHVResult lookuper = LookupManager(listenedIPs) serviceToNodesMap = {} srvDiscoverer = SrvctlBasedDiscoverer(shell, envConf) databases = srvDiscoverer.getDatabases() for database in databases: instanceAndNodes = srvDiscoverer.getInstancesWithNodes( database) if instanceAndNodes: serviceToNodesMap[database] = instanceAndNodes for elem in instanceAndNodes: resolver = DNSResolver(shell, elem.get('Node')) ipAddr = resolver.resolveNSLookup() if not ipAddr: ipAddr = resolver.resolveNSLookupAliasBased() ipAddr = ipAddr and ipAddr[0] try: if not ipAddr: ipAddr = resolver.resolveHostsFile() except: pass elem['ip'] = ipAddr for (serviceName, params) in serviceToNodesMap.items(): try: listeners = [] oracles = [] for elem in params: ipAddr = elem.get('ip') if not ipAddr: raise ValueError( 'One of the Node Ip is not discovered. Can not create full topology.' ) hostOSH = modeling.createHostOSH(ipAddr) OSHVResult.add(hostOSH) listenerOSH = createWeakListener(hostOSH) if listenerIp == ipAddr: listenerOSH.setStringAttribute( 'name', listenerName) listeners.append(listenerOSH) oracleOsh = modeling.createDatabaseOSH( 'oracle', elem['Instance'], None, ipAddr, hostOSH) listeners.append(oracleOsh) oracles.append(oracleOsh) racName = '' nodes = [x['Node'] for x in params if x['Node']] nodes.sort() racName = ':'.join(nodes) racOsh = createRacOSH(racName, len(params), serviceName, None) OSHVResult.add(racOsh) for listener in listeners: OSHVResult.add(listener) OSHVResult.add( modeling.createLinkOSH('member', racOsh, listener)) for oracle in oracles: OSHVResult.add(oracle) OSHVResult.add( modeling.createLinkOSH('member', racOsh, oracle)) except: Framework.reportWarning( 'Failed to lookup host name of the node. Probably not all nodes were discovered by \"Oracle Listener by Shell\" Job. No RAC CI will be created.' ) logger.warn( 'Failed to lookup host name for node with ip. No RAC CI will be created.' ) if not serviceToNodesMap: logger.warn( 'Failed to get information via srvctl. Will use old approach.' ) else: return OSHVResult #old flow tnsConfig = {} try: tnsConfig = TNSNamesConfig(shell, envConf) except: logger.debug( 'Failed to get tnsnames.ora. Trying different home.') envConf = EnvConfigurator(shell, defOraHomes) oraHome = envConf.findMatchingDefaultOracleHome() envConf = UnixOracleEnvConfig(shell) envConf.setOracleHomeEnvVar(oraHome) tnsConfig = TNSNamesConfig(shell, envConf) racParams = tnsConfig.getRacParams() servChec = ServiceChecker(shell, envConf, listenerName=listenerName) for racServiceName in racParams.keys(): parametersDict = racParams[racServiceName] racNodeNameList = [] racInstCount = len(parametersDict.keys()) if not servChec.isServiceRunning(racServiceName.upper( )) or racInstCount == 0 or racInstCount != int( servChec.getServiceInstancesNumber(racServiceName)): Framework.reportWarning( 'Oracle RAC is not running or not all Instances were detected' ) continue racVersion = servChec.getVersion() shortVersion = servChec.getShortVersion() listeners = [] oracles = [] for ip in parametersDict.keys(): hostName = lookuper.lookupByIp(ip) or ' ' hostPrimIp = lookuper.getPrimaryIp(ip) actIp = ip if not hostName: Framework.reportError( 'Failed to lookup host name of the node. Probably not all nodes were discovered by \"Oracle Listener by Shell\" Job. No RAC CI will be created.' ) logger.error( 'Failed to lookup host name for node with ip %s . No RAC CI will be created.' % ip) return ObjectStateHolderVector() racNodeNameList.append(hostName) dbSid = parametersDict[actIp] if hostPrimIp: actIp = hostPrimIp hostOSH = modeling.createHostOSH(actIp) OSHVResult.add(hostOSH) listenerOSH = createWeakListener(hostOSH) listeners.append(listenerOSH) oracleOSH = modeling.createDatabaseOSH( 'oracle', dbSid, None, actIp, hostOSH, None, None, None, shortVersion, racVersion, shortVersion) instanceNumber = servChec.getDbInstanceNumber( dbSid, racServiceName) if instanceNumber: oracleOSH.setAttribute('oracle_instancenumber', instanceNumber) oracles.append(oracleOSH) racNodeNameList.sort() racName = '' for nodeName in racNodeNameList: if racName == '': racName = nodeName else: racName += ':' + nodeName racOSH = createRacOSH(racName, racInstCount, racServiceName, racVersion) OSHVResult.add(racOSH) for listener in listeners: OSHVResult.add(listener) OSHVResult.add( modeling.createLinkOSH('member', racOSH, listener)) for oracle in oracles: OSHVResult.add(oracle) OSHVResult.add( modeling.createLinkOSH('member', racOSH, oracle)) finally: try: shell and shell.closeClient() except: logger.debugException('') logger.error('Unable to close shell') except: msg = logger.prepareFullStackTrace('') errormessages.resolveAndReport(msg, protocol, Framework) return OSHVResult
logger.debug( "More than one set of credentials found, the first one is used" ) connection = connections[0] strategy = nnmi.getDiscoveryStrategy(Framework, configuration) strategy.discover(connection) except nnmi.IntegrationException, ex: msg = str(ex) logger.error(msg) errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) except JException, ex: msg = ex.getMessage() or '' logger.debugException(msg) match = re.match('.*\(404\)\/NmsSdkService/(.*)', msg, re.I) if match: logger.debug("Service %s is not accessible" % match.group(1)) else: logger.error(msg) errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) except: logger.errorException("") msg = logger.prepareFullStackTrace("") errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) return resultVector
def __init__(self, *args): Exception.__init__(self, *args) # (self.rootClass, self.rootValue, self.rootStacktrace) = sys.exc_info() self.rootStacktrace = logger.prepareFullStackTrace('')
def doHPCmd(client, ntcmd_obj, ip_address, langBund, Framework, host_cmdbid=None, host_key=None, host_macs=None, uduid=None, nat_ip=None): 'Shell, osh, str, Properties, Framework, .. -> oshVector' resultVector = ObjectStateHolderVector() ipAddress = ip_addr.IPAddress(ip_address) wmiProvider = wmiutils.WmicProvider(client) hostDiscoverer = WmiHostDiscoverer(wmiProvider) hostDo = hostDiscoverer.discover() hostDiscoverer = HostDiscovererByShell(client, langBund, Framework, hostDo) hostDiscoverer.discover() hostDo = hostDiscoverer.getResults() wmiDnsServersDiscoverer = WmiDnsServersDiscoverer(wmiProvider, ipAddress) wmiDnsServersDiscoverer.discover() dnsServersIpList = wmiDnsServersDiscoverer.getResults() if not dnsServersIpList: dnsServersDiscoverer = DnsServersDiscoverer(client, ipAddress, langBund, Framework) dnsServersDiscoverer.discover() dnsServersIpList = dnsServersDiscoverer.getResults() winsWmiServersDiscoverer = WmiWinsServersDiscoverer(wmiProvider, ipAddress) winsWmiServersDiscoverer.discover() winsServersIpList = winsWmiServersDiscoverer.getResults() if not winsServersIpList: winsServerDiscoverer = WinsServerDicoverer(client, ipAddress, langBund, Framework) winsServerDiscoverer.discover() winsServersIpList = winsServerDiscoverer.getResults() dhcpWmiServersDiscoverer = WmiDhcpServersDiscoverer(wmiProvider, ipAddress) dhcpWmiServersDiscoverer.discover() dhcpServersIpList = dhcpWmiServersDiscoverer.getResults() if not dhcpServersIpList: dhcpServerDiscoverer = DhcpServerDiscoverer(client, ipAddress, langBund, Framework) dhcpServerDiscoverer.discover() dhcpServersIpList = dhcpServerDiscoverer.getResults() interfaceDiscoverer = WmiInterfaceDiscoverer(wmiProvider, ipAddress) try: interfaceDiscoverer.discover() logger.debug('Interfaces successfully discovered via wmic.') try: shellIfaceDiscoverer = IpConfigInterfaceDiscoverer( client, ipAddress, Framework, langBund) shellIfaceDiscoverer.discover() ifaces = shellIfaceDiscoverer.getResults() interfaceDiscoverer.interfacesList.extend(ifaces) except: logger.debugException('') except: msg = logger.prepareFullStackTrace('') logger.debugException(msg) logger.warn( 'Failed getting interfaces information via wmic. Falling back to ipconfig.' ) interfaceDiscoverer = IpConfigInterfaceDiscoverer( client, ipAddress, Framework, langBund) interfaceDiscoverer.discover() hostDo.ipIsVirtual = interfaceDiscoverer.isIpVirtual() hostDo.ipIsNATed = interfaceDiscoverer.isIpNATed(nat_ip) interfacesList = interfaceDiscoverer.getResults() ucmdbversion = modeling.CmdbClassModel().version() topoBuilder = TopologyBuilder(interfacesList, hostDo, ipAddress, ntcmd_obj, dnsServersIpList, dhcpServersIpList, winsServersIpList, host_cmdbid, host_key, host_macs, ucmdbversion) topoBuilder.build() # access built host OSH to update UD UID attribute if topoBuilder.hostOsh and uduid: _updateHostUniversalDiscoveryUid(topoBuilder.hostOsh, uduid) topoBuilder.addResultsToVector(resultVector) return resultVector
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() shell = None protocol = Framework.getDestinationAttribute("Protocol") listenerName = Framework.getDestinationAttribute("listenerName") listenerPath = Framework.getDestinationAttribute("listener_process_path") defOraHomes = Framework.getParameter("OracleHomes") listenerIp = Framework.getDestinationAttribute("listener_ip") listenedIPs = Framework.getTriggerCIDataAsList("listened_ips") try: try: client = Framework.createClient() shell = shellutils.ShellUtils(client) if listenerPath: envConf = UnixOracleEnvConfig(shell) if shell.isWinOs(): envConf = WindowsOracleEnvConfig(shell) envConf.setOracleHomeEnvVar(listenerPath) else: envConf = EnvConfigurator(shell, defOraHomes) if not listenedIPs: Framework.reportError("No listened_ips attribute values found.") return OSHVResult lookuper = LookupManager(listenedIPs) serviceToNodesMap = {} srvDiscoverer = SrvctlBasedDiscoverer(shell, envConf) databases = srvDiscoverer.getDatabases() for database in databases: instanceAndNodes = srvDiscoverer.getInstancesWithNodes(database) if instanceAndNodes: serviceToNodesMap[database] = instanceAndNodes for elem in instanceAndNodes: resolver = DNSResolver(shell, elem.get("Node")) ipAddr = resolver.resolveNSLookup() if not ipAddr: ipAddr = resolver.resolveNSLookupAliasBased() ipAddr = ipAddr and ipAddr[0] try: if not ipAddr: ipAddr = resolver.resolveHostsFile() except: pass elem["ip"] = ipAddr for (serviceName, params) in serviceToNodesMap.items(): try: listeners = [] oracles = [] for elem in params: ipAddr = elem.get("ip") if not ipAddr: raise ValueError("One of the Node Ip is not discovered. Can not create full topology.") hostOSH = modeling.createHostOSH(ipAddr) OSHVResult.add(hostOSH) listenerOSH = createWeakListener(hostOSH) if listenerIp == ipAddr: listenerOSH.setStringAttribute("name", listenerName) listeners.append(listenerOSH) oracleOsh = modeling.createDatabaseOSH("oracle", elem["Instance"], None, ipAddr, hostOSH) listeners.append(oracleOsh) oracles.append(oracleOsh) racName = "" nodes = [x["Node"] for x in params if x["Node"]] nodes.sort() racName = ":".join(nodes) racOsh = createRacOSH(racName, len(params), serviceName, None) OSHVResult.add(racOsh) for listener in listeners: OSHVResult.add(listener) OSHVResult.add(modeling.createLinkOSH("member", racOsh, listener)) for oracle in oracles: OSHVResult.add(oracle) OSHVResult.add(modeling.createLinkOSH("member", racOsh, oracle)) except: Framework.reportWarning( 'Failed to lookup host name of the node. Probably not all nodes were discovered by "Oracle Listener by Shell" Job. No RAC CI will be created.' ) logger.warn("Failed to lookup host name for node with ip. No RAC CI will be created.") if not serviceToNodesMap: logger.warn("Failed to get information via srvctl. Will use old approach.") else: return OSHVResult # old flow tnsConfig = {} try: tnsConfig = TNSNamesConfig(shell, envConf) except: logger.debug("Failed to get tnsnames.ora. Trying different home.") envConf = EnvConfigurator(shell, defOraHomes) oraHome = envConf.findMatchingDefaultOracleHome() envConf = UnixOracleEnvConfig(shell) envConf.setOracleHomeEnvVar(oraHome) tnsConfig = TNSNamesConfig(shell, envConf) racParams = tnsConfig.getRacParams() servChec = ServiceChecker(shell, envConf, listenerName=listenerName) for racServiceName in racParams.keys(): parametersDict = racParams[racServiceName] racNodeNameList = [] racInstCount = len(parametersDict.keys()) if ( not servChec.isServiceRunning(racServiceName.upper()) or racInstCount == 0 or racInstCount != int(servChec.getServiceInstancesNumber(racServiceName)) ): Framework.reportWarning("Oracle RAC is not running or not all Instances were detected") continue racVersion = servChec.getVersion() shortVersion = servChec.getShortVersion() listeners = [] oracles = [] for ip in parametersDict.keys(): hostName = lookuper.lookupByIp(ip) or " " hostPrimIp = lookuper.getPrimaryIp(ip) actIp = ip if not hostName: Framework.reportError( 'Failed to lookup host name of the node. Probably not all nodes were discovered by "Oracle Listener by Shell" Job. No RAC CI will be created.' ) logger.error("Failed to lookup host name for node with ip %s . No RAC CI will be created." % ip) return ObjectStateHolderVector() racNodeNameList.append(hostName) dbSid = parametersDict[actIp] if hostPrimIp: actIp = hostPrimIp hostOSH = modeling.createHostOSH(actIp) OSHVResult.add(hostOSH) listenerOSH = createWeakListener(hostOSH) listeners.append(listenerOSH) oracleOSH = modeling.createDatabaseOSH( "oracle", dbSid, None, actIp, hostOSH, None, None, None, shortVersion, racVersion, shortVersion ) instanceNumber = servChec.getDbInstanceNumber(dbSid, racServiceName) if instanceNumber: oracleOSH.setAttribute("oracle_instancenumber", instanceNumber) oracles.append(oracleOSH) racNodeNameList.sort() racName = "" for nodeName in racNodeNameList: if racName == "": racName = nodeName else: racName += ":" + nodeName racOSH = createRacOSH(racName, racInstCount, racServiceName, racVersion) OSHVResult.add(racOSH) for listener in listeners: OSHVResult.add(listener) OSHVResult.add(modeling.createLinkOSH("member", racOSH, listener)) for oracle in oracles: OSHVResult.add(oracle) OSHVResult.add(modeling.createLinkOSH("member", racOSH, oracle)) finally: try: shell and shell.closeClient() except: logger.debugException("") logger.error("Unable to close shell") except: msg = logger.prepareFullStackTrace("") errormessages.resolveAndReport(msg, protocol, Framework) return OSHVResult
if len(connections) > 1: logger.debug("More than one set of credentials found, the first one is used") connection = connections[0] strategy = nnmi.getDiscoveryStrategy(Framework, configuration) strategy.discover(connection) except nnmi.IntegrationException, ex: msg = str(ex) logger.error(msg) errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) except JException, ex: msg = ex.getMessage() or '' logger.debugException(msg) match = re.match('.*\(404\)\/NmsSdkService/(.*)', msg, re.I) if match: logger.debug("Service %s is not accessible" % match.group(1)) else: logger.error(msg) errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) except: logger.errorException("") msg = logger.prepareFullStackTrace("") errormessages.resolveAndReport(msg, nnmi.NNM_PROTOCOL_NAME, Framework) return resultVector
def doHPCmd(client, ntcmd_obj, ip_address, langBund, Framework, host_cmdbid=None, host_key=None, host_macs=None, uduid=None, nat_ip = None): 'Shell, osh, str, Properties, Framework, .. -> oshVector' resultVector = ObjectStateHolderVector() ipAddress = ip_addr.IPAddress(ip_address) wmiProvider = wmiutils.WmicProvider(client) hostDiscoverer = WmiHostDiscoverer(wmiProvider) hostDo = hostDiscoverer.discover() hostDiscoverer = HostDiscovererByShell(client, langBund, Framework, hostDo) hostDiscoverer.discover() hostDo = hostDiscoverer.getResults() wmiDnsServersDiscoverer = WmiDnsServersDiscoverer(wmiProvider, ipAddress) wmiDnsServersDiscoverer.discover() dnsServersIpList = wmiDnsServersDiscoverer.getResults() if not dnsServersIpList: dnsServersDiscoverer = DnsServersDiscoverer(client, ipAddress, langBund, Framework) dnsServersDiscoverer.discover() dnsServersIpList = dnsServersDiscoverer.getResults() winsWmiServersDiscoverer = WmiWinsServersDiscoverer(wmiProvider, ipAddress) winsWmiServersDiscoverer.discover() winsServersIpList = winsWmiServersDiscoverer.getResults() if not winsServersIpList: winsServerDiscoverer = WinsServerDicoverer(client, ipAddress, langBund, Framework) winsServerDiscoverer.discover() winsServersIpList = winsServerDiscoverer.getResults() dhcpWmiServersDiscoverer = WmiDhcpServersDiscoverer(wmiProvider, ipAddress) dhcpWmiServersDiscoverer.discover() dhcpServersIpList = dhcpWmiServersDiscoverer.getResults() if not dhcpServersIpList: dhcpServerDiscoverer = DhcpServerDiscoverer(client, ipAddress, langBund, Framework) dhcpServerDiscoverer.discover() dhcpServersIpList = dhcpServerDiscoverer.getResults() interfaceDiscoverer = WmiInterfaceDiscoverer(wmiProvider, ipAddress) try: interfaceDiscoverer.discover() logger.debug('Interfaces successfully discovered via wmic.') try: shellIfaceDiscoverer = IpConfigInterfaceDiscoverer(client, ipAddress, Framework, langBund) shellIfaceDiscoverer.discover() ifaces = shellIfaceDiscoverer.getResults() interfaceDiscoverer.interfacesList.extend(ifaces) except: logger.debugException('') except: msg = logger.prepareFullStackTrace('') logger.debugException(msg) logger.warn('Failed getting interfaces information via wmic. Falling back to ipconfig.') interfaceDiscoverer = IpConfigInterfaceDiscoverer(client, ipAddress, Framework, langBund) interfaceDiscoverer.discover() hostDo.ipIsVirtual = interfaceDiscoverer.isIpVirtual() hostDo.ipIsNATed = interfaceDiscoverer.isIpNATed(nat_ip) interfacesList = interfaceDiscoverer.getResults() ucmdbversion = modeling.CmdbClassModel().version() topoBuilder = TopologyBuilder(interfacesList, hostDo, ipAddress, ntcmd_obj, dnsServersIpList, dhcpServersIpList, winsServersIpList, host_cmdbid, host_key, host_macs, ucmdbversion) topoBuilder.build() # access built host OSH to update UD UID attribute if topoBuilder.hostOsh and uduid: _updateHostUniversalDiscoveryUid(topoBuilder.hostOsh, uduid) topoBuilder.addResultsToVector(resultVector) return resultVector
listenedFull = hostDnsName + ':' + hostPrimaryIP + '@' + listenedIPs listenerName = listenerConf.getListenerName() listenerVersion = listenerConf.getVersion() listenerOSH = createListenerOSH(hostOSH, listenedFull, listenerName, listenerVersion) if listenerOSH: OSHVResult.add(listenerOSH) else: Framework.reportWarning('Failed to create listener OSH. Either host name or listened ips are not defined.') except: logger.debugException('') Framework.reportWarning('Failed to discover one or more listeners parameters.') finally: try: shell and shell.closeClient() except: logger.debugException('') logger.error('Unable to close shell') except JException, ex: errorMessage = ex.getMessage() except: errorObject = sys.exc_info()[1] if errorObject: errorMessage = str(errorObject) else: errorMessage = logger.prepareFullStackTrace('') if errorMessage: logger.debugException(errorMessage) errormessages.resolveAndReport(errorMessage, protocol, Framework) return OSHVResult
def getMapping(mappingFileName, bmcNamespace, ucmdbServerIp): try: objectTypeList = [] relationshipList = [] integrationAPI = IntegrationAPI(ucmdbServerIp, SCRIPT_NAME) integrationObjectList = integrationAPI.getMapping(mappingFileName) if not integrationObjectList: logger.warn( 'Unable to retrieve a list of objects from the mapping XML!') return else: debugPrint( 4, '[' + SCRIPT_NAME + ':getMapping] Got <%s> objects and links from mapping XML' % len(integrationObjectList)) for integrationObject in integrationObjectList: attList = [] childList = [] parentList = [] ## Pull attribute list attributeMap = integrationObject.getAttributeMap() attributeList = attributeMap.getAttributeList() for attribute in attributeList: attList.append(attribute) ## Pull child list childHashMap = attributeMap.getChildList() for childName in childHashMap.keySet(): childList.append([childName, childHashMap[childName]]) ## Pull parent list parentHashMap = attributeMap.getParentList() for parentName in parentHashMap.keySet(): parentList.append([parentName, parentHashMap[parentName]]) nameSpace = integrationObject.getNameSpace() or bmcNamespace if integrationObject.isLink(): relationshipList.append( SourceLinks(integrationObject.getObjectName(), integrationObject.getEnd1Object(), integrationObject.getEnd2Object(), nameSpace, integrationObject.getQuery(), attList)) else: objectTypeList.append( SourceObjects(integrationObject.getObjectName(), nameSpace, integrationObject.getQuery(), attList, childList, parentList)) if objectTypeList: debugPrint( 3, '[' + SCRIPT_NAME + ':getMapping] Got <%s> objects from mapping XML' % len(objectTypeList)) if relationshipList: debugPrint( 3, '[' + SCRIPT_NAME + ':getMapping] Got <%s> links from mapping XML' % len(relationshipList)) return (objectTypeList, relationshipList) except: excInfo = logger.prepareFullStackTrace('') logger.warn('[' + SCRIPT_NAME + ':getMapping] Exception: <%s>' % excInfo) pass
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() hostIp = Framework.getDestinationAttribute('ip_address') hostIdString = Framework.getDestinationAttribute('hostId') ports = Framework.getTriggerCIDataAsList('port_number') ''' Retrieving a list of LDAP ports we strive to connect to domain controller in member role first. So we have to choose the lowest port number in the list. ''' if ports: ports = map(lambda port: int(port), ports) ports.sort() else: raise Exception("No LDAP ports provided to connect") protocol = "ldap" credentialIds = Framework.getAvailableProtocols(hostIp, protocol) client = None warningList = [] defiscoveryPassed = 0 connectedOnce = 0 if not len(credentialIds): msg = 'Protocol not defined or IP out of protocol network range' Framework.reportError(msg) else: #go over all protocols and for each protocol try all available ports for credentialsId in credentialIds: portsToIterate = None protocolPort = Framework.getProtocolProperty( credentialsId, "protocol_port") if str(protocolPort).isdigit(): portsToIterate = [protocolPort] else: portsToIterate = ports for port in portsToIterate: try: # build environment and connect try: envBuilder = LdapEnvironmentBuilder(port) client = Framework.createClient( credentialsId, envBuilder.build()) connectedOnce = 1 baseDn = active_directory_utils.getBaseDnFromJobsParameters( Framework) daoService = LdapDaoService(client, baseDn) # discover domain controller warningList = [] hostOsh = modeling.createOshByCmdbIdString( 'host', hostIdString) discoverer = AdDomainControllerDiscoverer( daoService, hostOsh) OSHVResult = discoverer.discover() #add container hosts for domain controllers to the result vector containerOshs = discoverer.getResult( ).getContainerOshMap().values() for osh in containerOshs: OSHVResult.add(osh) #skip other ports defiscoveryPassed = 1 break except: msg = logger.prepareFullStackTrace('') warning = errormessages.resolveError(msg, protocol) warningList.append(warning) finally: client and client.close() #skip other protocols in case when discovery passed for current one if defiscoveryPassed: break if not connectedOnce: warning = errorobject.createError( errorcodes.CONNECTION_FAILED_NO_PROTOCOL_WITH_DETAILS, ['Tried all protocols'] * 2, 'Failed to connect using all protocols') warningList = [warning] #print collected warning message for warning in warningList: logger.reportWarningObject(warning) return OSHVResult
def DiscoveryMain(Framework): OSHVResult = ObjectStateHolderVector() hostIp = Framework.getDestinationAttribute("ip_address") hostIdString = Framework.getDestinationAttribute("hostId") ports = Framework.getTriggerCIDataAsList("port_number") """ Retrieving a list of LDAP ports we strive to connect to domain controller in member role first. So we have to choose the lowest port number in the list. """ if ports: ports = map(lambda port: int(port), ports) ports.sort() else: raise Exception("No LDAP ports provided to connect") protocol = "ldap" credentialIds = Framework.getAvailableProtocols(hostIp, protocol) client = None warningList = [] defiscoveryPassed = 0 connectedOnce = 0 if not len(credentialIds): msg = "Protocol not defined or IP out of protocol network range" Framework.reportError(msg) else: # go over all protocols and for each protocol try all available ports for credentialsId in credentialIds: portsToIterate = None protocolPort = Framework.getProtocolProperty(credentialsId, "protocol_port") if str(protocolPort).isdigit(): portsToIterate = [protocolPort] else: portsToIterate = ports for port in portsToIterate: try: # build environment and connect try: envBuilder = LdapEnvironmentBuilder(port) client = Framework.createClient(credentialsId, envBuilder.build()) connectedOnce = 1 baseDn = active_directory_utils.getBaseDnFromJobsParameters(Framework) daoService = LdapDaoService(client, baseDn) # discover domain controller warningList = [] hostOsh = modeling.createOshByCmdbIdString("host", hostIdString) discoverer = AdDomainControllerDiscoverer(daoService, hostOsh) OSHVResult = discoverer.discover() # add container hosts for domain controllers to the result vector containerOshs = discoverer.getResult().getContainerOshMap().values() for osh in containerOshs: OSHVResult.add(osh) # skip other ports defiscoveryPassed = 1 break except: msg = logger.prepareFullStackTrace("") warning = errormessages.resolveError(msg, protocol) warningList.append(warning) finally: client and client.close() # skip other protocols in case when discovery passed for current one if defiscoveryPassed: break if not connectedOnce: warning = errorobject.createError( errorcodes.CONNECTION_FAILED_NO_PROTOCOL_WITH_DETAILS, ["Tried all protocols"] * 2, "Failed to connect using all protocols", ) warningList = [warning] # print collected warning message for warning in warningList: logger.reportWarningObject(warning) return OSHVResult