def get_remote_databases(executor, shell_interpreter, instance_name, db2_home_path=None): r'''@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure ''' db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) list_db_directory = db2cmd.list_db_directory() dcs_entries = fptools.safeFunc(executor)(db2cmd.list_dcs_directory()) or () lt_names = ((e.local_dbname, e.target_dbname) for e in dcs_entries) is_remote = fptools.comp(partial(operator.eq, DatabaseEntryTypes.REMOTE), Db2.DatabaseEntry.entry_type.fget) remote_db_entries = list_db_directory | executor remote_db_entries = ifilter(is_remote, remote_db_entries) resolve_db_name = partial(_resolve_remotedb_name, dict(lt_names)) remote_db_entries = imap(resolve_db_name, remote_db_entries) get_nodename = Db2.DatabaseEntry.node_name.fget dbs_by_nodename = fptools.groupby(get_nodename, remote_db_entries) get_dbname = Db2.DatabaseEntry.name.fget node_map_pairs = [(node, fptools.groupby(get_dbname, db_entries)) for node, db_entries in dbs_by_nodename.iteritems()] return _parse_remote_databases(node_map_pairs)
def get_remote_databases(executor, shell_interpreter, instance_name, db2_home_path=None): r"""@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure """ db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) list_db_directory = db2cmd.list_db_directory() dcs_entries = fptools.safeFunc(executor)(db2cmd.list_dcs_directory()) or () lt_names = ((e.local_dbname, e.target_dbname) for e in dcs_entries) is_remote = fptools.comp(partial(operator.eq, DatabaseEntryTypes.REMOTE), Db2.DatabaseEntry.entry_type.fget) remote_db_entries = list_db_directory | executor remote_db_entries = ifilter(is_remote, remote_db_entries) resolve_db_name = partial(_resolve_remotedb_name, dict(lt_names)) remote_db_entries = imap(resolve_db_name, remote_db_entries) get_nodename = Db2.DatabaseEntry.node_name.fget dbs_by_nodename = fptools.groupby(get_nodename, remote_db_entries) get_dbname = Db2.DatabaseEntry.name.fget node_map_pairs = [ (node, fptools.groupby(get_dbname, db_entries)) for node, db_entries in dbs_by_nodename.iteritems() ] return _parse_remote_databases(node_map_pairs)
def get_alias_tcid_2d(framework, cred_manager): r'@types: db2_flow.RichFramework, db2_flow.CredsManager -> generator(tuple[tuple(str, str, str, str, str)])' if framework.tcidHasValues('alias_id'): alias_id = itemgetter(4) ips = framework.getTriggerCIDataAsList('ip_address') result = chain(*(get_alias_tcid_2d_for_ip(ip, framework, cred_manager) for ip in ips)) return fptools.groupby(alias_id, result).values()
def report(host_id, mgr_version, server_configs, vm_configs): '@types: str, str, list[ShowServerCmd.Config], list[ShowVmCmd.Config] -> list[osh]' # take manager from one of discovered server configs # if present build in one way otherwise in other get_manager_uuid = ovm_cli.ShowServerCmd.Config.manager_uuid.fget config = findFirst(get_manager_uuid, server_configs) mgr_osh, oshs = _report_manager(config.manager_uuid, mgr_version, host_id) get_server_id = comp(first, ShowVmCmd.Config.server.fget) vm_configs_by_server_id = groupby(get_server_id, vm_configs) for server_config in server_configs: # report server pool p_osh = report_server_pool(server_config.server_pool, mgr_osh) oshs.append(p_osh) # report OVM server ips = (server_config.server.ip_address,) server_name = server_config.server.name host_osh, h_oshs = report_server_node(server_name, ips, p_osh) oshs.extend(h_oshs) # report hypervisor hypervisor_osh = report_hypervisor(server_config, host_osh) oshs.append(hypervisor_osh) # report domain config oshs.append(report_server_domain_config(server_config, host_osh)) server_id = server_config.server.id # report VMs vms = vm_configs_by_server_id.get(server_id, ()) for vm_config in vms: vm_host_osh, _oshs = report_vm_node(vm_config, hypervisor_osh) if _oshs: oshs.extend(_oshs) oshs.append(report_vm_domain_config(vm_config, vm_host_osh)) return filter(None, oshs)
def _discoverDatabases(solman, sysPairsBySysName, sendVector, reportError, resolveIps): try: logger.info("Discover Databases") query = GetDatabaseUsages() queryExecutor = TableQueryExecutor(solman) dbName = second usagePairsByDbName = groupby(dbName, queryExecutor.executeQuery(query)) logger.info("Found %s databases in use" % len(usagePairsByDbName)) query = GetDatabaseInstances() inDiscoveredSystems = F(_inDiscoveredSystems, fptools._, sysPairsBySysName) isUsedDb = comp(inDiscoveredSystems, usagePairsByDbName.get, _getDbInstance) findPlatform = comp(db_platform.findPlatformBySignature, _getDbVendor) dbInsts = queryExecutor.executeQuery(query) logger.info("Found %s database instances" % len(dbInsts)) dbs_1, dbs_2, dbs_3 = tee(ifilter(isUsedDb, dbInsts), 3) platforms = imap(findPlatform, dbs_1) ips = imap(comp(resolveIps, _getAddress), dbs_2) dbs = ifilter(all, izip(dbs_3, platforms, ips)) reported = len(map(comp(sendVector, _reportDatabase), dbs)) logger.info("Reported %s databases" % reported) except Exception: msg = 'Failed to discover databases' logger.debugException(msg) reportError(msg)
def report(host_id, mgr_version, server_configs, vm_configs): '@types: str, str, list[ShowServerCmd.Config], list[ShowVmCmd.Config] -> list[osh]' # take manager from one of discovered server configs # if present build in one way otherwise in other get_manager_uuid = ovm_cli.ShowServerCmd.Config.manager_uuid.fget config = findFirst(get_manager_uuid, server_configs) mgr_osh, oshs = _report_manager(config.manager_uuid, mgr_version, host_id) get_server_id = comp(first, ShowVmCmd.Config.server.fget) vm_configs_by_server_id = groupby(get_server_id, vm_configs) for server_config in server_configs: # report server pool p_osh = report_server_pool(server_config.server_pool, mgr_osh) oshs.append(p_osh) # report OVM server ips = (server_config.server.ip_address, ) server_name = server_config.server.name host_osh, h_oshs = report_server_node(server_name, ips, p_osh) oshs.extend(h_oshs) # report hypervisor hypervisor_osh = report_hypervisor(server_config, host_osh) oshs.append(hypervisor_osh) # report domain config oshs.append(report_server_domain_config(server_config, host_osh)) server_id = server_config.server.id # report VMs vms = vm_configs_by_server_id.get(server_id, ()) for vm_config in vms: vm_host_osh, _oshs = report_vm_node(vm_config, hypervisor_osh) if _oshs: oshs.extend(_oshs) oshs.append(report_vm_domain_config(vm_config, vm_host_osh)) return filter(None, oshs)
def resolve_servicename(network_services, svcename, protocol=u"tcp"): """[db2_base_shell_discoverer.NetworkService], str, str? -> db2_base_shell_discoverer.NetworkService""" def svcename_protocol_pairs(network_service): return network_service.service_name, network_service.protocol make_key = juxt(NetworkService.service_name.fget, NetworkService.protocol.fget) network_serivces_by_name = fptools.groupby(make_key, network_services) return first(network_serivces_by_name.get((svcename.strip(), protocol)))
def resolve_servicename(network_services, svcename, protocol=u'tcp'): '''[db2_base_shell_discoverer.NetworkService], str, str? -> db2_base_shell_discoverer.NetworkService''' def svcename_protocol_pairs(network_service): return network_service.service_name, network_service.protocol make_key = juxt(NetworkService.service_name.fget, NetworkService.protocol.fget) network_serivces_by_name = fptools.groupby(make_key, network_services) return first(network_serivces_by_name.get((svcename.strip(), protocol)))
def get_databases_by_db2command(executor, shell_interpreter, instance_name, db2_home_path=None): r"""@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure """ db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) db_entries = db2cmd.list_db_directory() | executor db_by_name = fptools.groupby(Db2.DatabaseEntry.name.fget, db_entries) return _parse_databases(db_by_name.iteritems())
def parse_config_file_content_from_MIniFileContents(items): """ @types: list[_Item]-> str @tito: {r'''FILE_NAME,LAYER_NAME,TENANT_NAME,HOST,SECTION,KEY,VALUE "some.ini","DEFAULT","","","communication","default_read_timeout","-1" "some.ini","DEFAULT","","","communication","default_read_timeout_override","yes" "some.ini","DEFAULT","","","communication","listenport","3$(SAPSYSTEM)03" "some.ini","DEFAULT","","","communication","maxchannels","240" "some.ini","DEFAULT","","","communication","maxendpoints","250" "some.ini","DEFAULT","","","mergedog","active","no" "some.ini","DEFAULT","","","persistence","log_segment_size_mb","10" "some.ini","DEFAULT","","","trace","alert","error" "some.ini","DEFAULT","","","trace","alertfilename","trace/scriptserver_alert" "some.ini","DEFAULT","","","trace","default","error" "some.ini","DEFAULT","","","trace","filename","trace/scriptserver" "some.ini","DEFAULT","","","trace","flushinterval","5" "some.ini","DEFAULT","","","trace","maxfiles","7" "some.ini","DEFAULT","","","trace","maxfilesize","1003000" "some.ini","DEFAULT","","","trace","saptracelevel","0" 15 rows selected (0 usec)''' : r'''[mergedog] active = no [communication] default_read_timeout = -1 default_read_timeout_override = yes listenport = 3$(SAPSYSTEM)03 maxchannels = 240 maxendpoints = 250 [trace] alert = error alertfilename = trace/scriptserver_alert default = error filename = trace/scriptserver flushinterval = 5 maxfiles = 7 maxfilesize = 1003000 saptracelevel = 0 [persistence] log_segment_size_mb = 10''' } """ assert items groupedItems = fptools.groupby(lambda item: item.section, items) content = [] for category, items in groupedItems.items(): content.append(r'[%s]' % category) content.extend( map(lambda item: r'%s = %s' % (item.key, item.value), items)) content.append('') return '\n'.join(content).strip()
def parse_config_file_content_from_MIniFileContents(items): """ @types: list[_Item]-> str @tito: {r'''FILE_NAME,LAYER_NAME,TENANT_NAME,HOST,SECTION,KEY,VALUE "some.ini","DEFAULT","","","communication","default_read_timeout","-1" "some.ini","DEFAULT","","","communication","default_read_timeout_override","yes" "some.ini","DEFAULT","","","communication","listenport","3$(SAPSYSTEM)03" "some.ini","DEFAULT","","","communication","maxchannels","240" "some.ini","DEFAULT","","","communication","maxendpoints","250" "some.ini","DEFAULT","","","mergedog","active","no" "some.ini","DEFAULT","","","persistence","log_segment_size_mb","10" "some.ini","DEFAULT","","","trace","alert","error" "some.ini","DEFAULT","","","trace","alertfilename","trace/scriptserver_alert" "some.ini","DEFAULT","","","trace","default","error" "some.ini","DEFAULT","","","trace","filename","trace/scriptserver" "some.ini","DEFAULT","","","trace","flushinterval","5" "some.ini","DEFAULT","","","trace","maxfiles","7" "some.ini","DEFAULT","","","trace","maxfilesize","1003000" "some.ini","DEFAULT","","","trace","saptracelevel","0" 15 rows selected (0 usec)''' : r'''[mergedog] active = no [communication] default_read_timeout = -1 default_read_timeout_override = yes listenport = 3$(SAPSYSTEM)03 maxchannels = 240 maxendpoints = 250 [trace] alert = error alertfilename = trace/scriptserver_alert default = error filename = trace/scriptserver flushinterval = 5 maxfiles = 7 maxfilesize = 1003000 saptracelevel = 0 [persistence] log_segment_size_mb = 10''' } """ assert items groupedItems = fptools.groupby(lambda item: item.section, items) content = [] for category, items in groupedItems.items(): content.append(r'[%s]' % category) content.extend(map(lambda item: r'%s = %s' % (item.key, item.value), items)) content.append('') return '\n'.join(content).strip()
def get_databases_by_db2command(executor, shell_interpreter, instance_name, db2_home_path=None): r'''@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure ''' db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) db_entries = db2cmd.list_db_directory() | executor db_by_name = fptools.groupby(Db2.DatabaseEntry.name.fget, db_entries) return _parse_databases(db_by_name.iteritems())
def get_local_databases(executor, shell_interpreter, instance_name, db2_home_path=None, db2cmdline=None): r"""@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure """ db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) get_type = Db2.DatabaseEntry.entry_type.fget is_local = fptools.comp(DatabaseEntryTypes.is_local, get_type) if db2cmdline: local_db_entries = filter(is_local, Db2(db2cmdline).file_list_db_directory() | executor) else: local_db_entries = filter(is_local, db2cmd.list_db_directory() | executor) db_by_name = fptools.groupby(Db2.DatabaseEntry.name.fget, local_db_entries) return _parse_databases(db_by_name.iteritems())
def discover(self): ''' -> dict(int, Device), list(Connectivity) Main discovery method ''' devices = self.discoverDevices() if not devices: return devicesById = fptools.applyMapping(lambda d: d.deviceId, devices) allPorts = getPorts(self.client) for port in allPorts: device = devicesById.get(port.deviceId) if device and self._isPortValid(port): device.portsById[port.portId] = port modules = getModules(self.client) validModules = itertools.ifilter(_isModuleValid, modules) for module in validModules: device = devicesById.get(module.deviceId) if device is not None: device.modulesBySlot[module.slot] = module for device in devicesById.values(): vlansById = self.discoverVlansByDevice(device) device.vlansById = vlansById allConnections = getLayer3Topology(self.client) connectivitiesByDeviceId = fptools.groupby(lambda c: c.deviceId, allConnections) if self.reportDeviceConfigs: for device in devices: config = self.discoverDeviceConfig(device) if config: device.config = config for device in devices: self.analyseDevice(device) return devicesById, connectivitiesByDeviceId
def _discoverServers(solman, hostnameToAddress, sysPairsBySysName, sendVector, reportError, resolveIps): ''' Discover SAP instances related to already discovered systems @type hostnameToAddress: dict[str, sap.Address] @type sysPairsBySysName: dict[str, tuple[System, osh]] ''' try: # get servers by one of the specified queries queries = (GetServersWithNotActiveFlag(), GetServers()) queryExecutor = TableQueryExecutor(solman) result = imap(Sfn(queryExecutor.executeQuery), queries) servers = findFirst(truth, result) or () # group servers by system name pairsBySysName = groupby(GetServers.Server.systemName.fget, servers) inDiscoveredSystems = comp(sysPairsBySysName.get, first) pairs = ifilter(inDiscoveredSystems, pairsBySysName.iteritems()) resolveIps = comp(resolveIps, GetServers.Server.hostname.fget) for sysName, servers in pairs: logger.info("Found %s servers for %s system" % (len(servers), sysName)) # collect parsed names for each server parseServerName = comp(GetServers.parseServerName, GetServers.Server.name.fget) parsedServerNames = imap(parseServerName, servers) # resolve IPs for each server ips = imap(resolveIps, servers) # get information for each server where name and IPs are present infoSeq = ifilter(all, izip(servers, parsedServerNames, ips)) # not interested in server nodes - only instances infoSeq = ifilterfalse(isServerNode, infoSeq) # report each server system, systemOsh = sysPairsBySysName.get(sysName) reportServer = F(_reportServer, fptools._, fptools._, fptools._, system, systemOsh) vector = ObjectStateHolderVector() each(vector.addAll, starmap(reportServer, infoSeq)) sendVector(vector) except (Exception, JException): msg = "Failed to discover servers" logger.warnException(msg) reportError(msg)
def _discoverRfcDestinations(sapUtils, systemOsh, config): r'@types: SapUtils, osh, flow.DiscoveryConfigBuilder -> oshv' if not config.discoverRFCConnections: return ObjectStateHolderVector() logger.info('Discover RFC connections') getRfcCmd = sap_abap_discoverer.GetRfcDestinationsRfcCommand() connections = Sfn(getRfcCmd.getAllRfcConnections)(sapUtils) or () logger.info("Found %s possible RFC connections" % len(connections)) connections = filter(comp(sap_abap_discoverer.isEnglishVersion, third), connections) logger.info("Found %s RFC connections with EN language" % len(connections)) connByName = applyMapping(first, connections) destinations = getRfcCmd.getAllRfcDestinations(sapUtils) logger.info("Found %s RFC destinations" % len(destinations)) # get destinations with valid host destinations = [d for d in destinations if _isDestFull(d)] logger.info("Found %s destinations with host available" % len(destinations)) destinationsByHost = groupby(lambda d: d.targetHost, destinations) ips = map(Sfn(_resolve), destinationsByHost.iterkeys()) pairIpToDestinations = zip(ips, destinationsByHost.itervalues()) resolved, notResolved = partition(first, pairIpToDestinations) if notResolved: skippedDestsCount = sum([len(dests) for ip, dests in notResolved]) logger.debug("%s destinations skipped due to not resolved %s hosts" % (skippedDestsCount, len(notResolved))) vector = ObjectStateHolderVector() for ip, destinations in resolved: # TODO: # 1) query RFC connections (to get description) only for these # destinations as it will reduce amount of data fetched from system # One query for connections returns ~8K rows of data, while we are # interested in less than ~50 or even less # 2) another improvement query only records in English language countOfDests = len(destinations) host = first(destinations).targetHost reportDst = Sfn(_reportRfcDestination) logger.debug("%s destinations resolved for %s" % (countOfDests, host)) vectors = (reportDst(dst, ip, connByName, systemOsh) for dst in destinations) each(vector.addAll, ifilter(None, vectors)) return vector
def discoverComponents(self): r''' Collect all data from different queries about available software components @types: -> list[sap.SoftwareComponent]''' componentDescriptions = self.getComponentLocalizedDescriptions() # may exist several localized descriptions per component descrsByName = fptools.groupby( self.ComponentLocalizedDescription.getName, componentDescriptions) components = [] for component in self.getComponents(): # find description in english isEn = SoftwareComponentDiscovererByJco.isEnglishCmpDescription descrs = descrsByName.get(component.name, ()) description = fptools.findFirst(isEn, descrs) components.append( sap.SoftwareComponent(component.name, component.type, description and description.value, component.versionInfo)) return components
def discoverComponents(self): r''' Collect all data from different queries about available software components @types: -> list[sap.SoftwareComponent]''' componentDescriptions = self.getComponentLocalizedDescriptions() # may exist several localized descriptions per component descrsByName = fptools.groupby( self.ComponentLocalizedDescription.getName, componentDescriptions) components = [] for component in self.getComponents(): # find description in english isEn = SoftwareComponentDiscovererByJco.isEnglishCmpDescription descrs = descrsByName.get(component.name, ()) description = fptools.findFirst(isEn, descrs) components.append(sap.SoftwareComponent( component.name, component.type, description and description.value, component.versionInfo)) return components
def get_fc_hba_descriptors(executor): r''' Discovers fibre channel info using wmi `MSFC_FCAdapterHBAAttributes` and `MSFC_FibrePortHBAAttributes` classes @param executor: an executor instance to run WMI commands @type executor: command.ExecutorCmdlet @return: collection of FC HBA descriptor and its ports pairs @rtype: tuple[MSFC_FCAdapterHBAAttributesCmd.WMI_CLASS, tuple[MSFC_FibrePortHBAAttributesCmd.WMI_CLASS]] @raise ValueError: if no executor passed @raise command.ExecutionException: on command execution failure @raise com.hp.ucmdb.discovery.library.clients.protocols.command.TimeoutException: on command timeout ''' executor = ChainedCmdlet(executor, command.cmdlet.produceResult) adapters = hba_wmi_command.MSFC_FCAdapterHBAAttributesCmd() | executor ports = hba_wmi_command.MSFC_FibrePortHBAAttributesCmd() | executor fn = operator.attrgetter('InstanceName') ports_by_instancename = fptools.groupby(fn, ports) result = [] for adapter in adapters: result.append((adapter, ports_by_instancename.get(adapter.InstanceName) or ())) return tuple(result)
def _discoverCmps(solman, sysPairsBySysName, sendVector, reportAsConfig, reportError): ''' Report components for systems that are discovered before @type sysNameToCmpPairs: tuple[str, sap.SoftwareComponent] @type sysPairsBySysName: dict[str, tuple[System, osh]] @type sendVector: oshv -> None ''' try: sysNameToCmpPairs = _discoverSoftwareCmps(solman) cmpPairsBySysName = groupby(first, sysNameToCmpPairs) systems = sysPairsBySysName.iterkeys() hasComponents = cmpPairsBySysName.get for sysName in ifilter(hasComponents, systems): _, systemOsh = sysPairsBySysName.get(sysName) cmps = imap(second, cmpPairsBySysName.get(sysName)) vec = sap_abap.reportSoftwareCmps(cmps, systemOsh, reportAsConfig) sendVector(vec) except (Exception, JException): msg = "Failed to discover Software Components" logger.warnException(msg) reportError(msg)
def _discoverClients(solman, sysPairsBySysName, sendVector, reportError): try: logger.info("Discover Clients") getClientsQuery = GetClients() executeQuery = TableQueryExecutor(solman).executeQuery sysNameToClientPairs = executeQuery(getClientsQuery) pairsBySysName = groupby(first, sysNameToClientPairs) logger.info("Discovered %s clients for %s systems" % ( len(sysNameToClientPairs), len(pairsBySysName))) reporter = sap.ClientReporter(sap.ClientBuilder()) for systemName in ifilter(sysPairsBySysName.get, pairsBySysName.iterkeys()): _, systemOsh = sysPairsBySysName.get(systemName) report = F(reporter.report, fptools._, systemOsh) clients = imap(second, pairsBySysName.get(systemName)) vector = ObjectStateHolderVector() each(vector.add, imap(report, clients)) sendVector(vector) except (Exception, JException): msg = "Failed to discover clients" logger.warnException(msg) reportError(msg)
def get_local_databases(executor, shell_interpreter, instance_name, db2_home_path=None, db2cmdline=None): r'''@types: command.CmdExecutor, shell_interpreter.Interpreter, unicode, file_topology.Path -> list(db2_model.Database) @raise ExecuteError: on DB2INSTANCE variable setting failure @raise command.ExecuteException: on db2 cmdb execution failure ''' db2cmd = __get_configured_db2_cmd(shell_interpreter, instance_name, db2_home_path) get_type = Db2.DatabaseEntry.entry_type.fget is_local = fptools.comp(DatabaseEntryTypes.is_local, get_type) if db2cmdline: local_db_entries = filter( is_local, Db2(db2cmdline).file_list_db_directory() | executor) else: local_db_entries = filter(is_local, db2cmd.list_db_directory() | executor) db_by_name = fptools.groupby(Db2.DatabaseEntry.name.fget, local_db_entries) return _parse_databases(db_by_name.iteritems())
def _countUniqueHosts(self, obj): addresses = self._filterAddresses(obj) groupedByHost = fptools.groupby( lambda item: str(item.address.host).strip(), addresses) return len(groupedByHost.keys())
#discover tnsnames.ora file logger.debug("try to find tnsnames.ora file") hostId = Framework.getDestinationAttribute('hostId') Framework.sendObjects( jee_discoverer.discoverTnsnamesOra(hostId, client)) # In case if there is not processes found - discovery stops with # warning message to the UI if not processes: logger.reportWarning( "No Websphere processes currently running") return ObjectStateHolderVector() r'''1)''' runtimes = map(createRuntime, processes) # group runtimes of processes by configuration directory path runtimesByConfigDirPath = groupby( websphere_discoverer.ServerRuntime.getConfigDirPath, runtimes) debugGroupping(runtimesByConfigDirPath) # find out platform version for each runtime where several runtimes # may use the same binary installation placed in so called 'isntall root directory' # so to reduce FS calls for the same root directory we will group # runtimes by this path installRootDirPaths = applySet( websphere_discoverer.ServerRuntime.findInstallRootDirPath, runtimes) # for install root directory get platform version productInfoParser = websphere_discoverer.ProductInfoParser( loadExternalDtd) productInfoByInstallDirPath = applyReverseMapping( curry(determineVersion, _, productInfoParser, fs), installRootDirPaths)
#discover tnsnames.ora file logger.debug("try to find tnsnames.ora file") hostId = Framework.getDestinationAttribute('hostId') Framework.sendObjects(jee_discoverer.discoverTnsnamesOra(hostId, client)) # In case if there is not processes found - discovery stops with # warning message to the UI if not processes: logger.reportWarning("No Websphere processes currently running") return ObjectStateHolderVector() r'''1)''' runtimes = map(createRuntime, processes) # group runtimes of processes by configuration directory path runtimesByConfigDirPath = groupby( websphere_discoverer.ServerRuntime.getConfigDirPath, runtimes ) debugGroupping(runtimesByConfigDirPath) # find out platform version for each runtime where several runtimes # may use the same binary installation placed in so called 'isntall root directory' # so to reduce FS calls for the same root directory we will group # runtimes by this path installRootDirPaths = applySet( websphere_discoverer.ServerRuntime.findInstallRootDirPath, runtimes ) # for install root directory get platform version productInfoParser = websphere_discoverer.ProductInfoParser(loadExternalDtd) productInfoByInstallDirPath = applyReverseMapping( curry(determineVersion, _, productInfoParser, fs),
def get_fc_hbas(self, shell): result = defaultdict(list) executor = command.cmdlet.executeCommand(shell) esxcli = find_esxcli_impl(executor)() esxcli = esxcli.formatter('csv') esxcli_exec = command.get_exec_fn(esxcli, executor) storage = EsxcliStorageNamespace(esxcli) software = EsxcliSoftwareNamespace(esxcli) scsi_path_by_adapter_identifier = fptools.groupby(methodcaller('get', 'AdapterIdentifier'), esxcli_exec(storage.core.path.list())) adapters = esxcli_exec(storage.core.adapter.list()) grouped_adapters = dict((adapter.get('HBAName'), adapter) for adapter in adapters) grouped = defaultdict(list) for descriptor in esxcli_exec(storage.san.fc.list()): grouped[(descriptor.get('Adapter'), descriptor.get('NodeName'))].append(descriptor) get_vendor = Sfn(self.get_vendor) get_fchba_descriptor = Sfn(self._get_fchba_descriptor) for key, descriptors in grouped.iteritems(): try: vmhba, nodewwn = key nodewwn = wwn.parse_from_str(nodewwn) name = vmhba id_ = vmhba adapter_descriptor = grouped_adapters.get(vmhba) driverversion = None vendor = get_vendor(vmhba, executor) model = None fwversion = None serialnum = None if adapter_descriptor: id_ = adapter_descriptor.get('UID') driver = adapter_descriptor.get('Driver') vib_descriptor = esxcli_exec(software.vib.get(vibname=driver)) driverversion = vib_descriptor.get('Version') fchabdescriptor = get_fchba_descriptor(driver, vmhba, executor) if fchabdescriptor: model = fchabdescriptor.model fwversion = fchabdescriptor.firmwareversion serialnum = fchabdescriptor.serialnumber driverversion = fchabdescriptor.driverversion fchba = fc_hba_model.FcHba(id_, name, wwn=nodewwn, vendor=vendor, model=model, serial_number=serialnum, driver_version=driverversion, firmware_version=fwversion) ports = [] for fcdescriptor in descriptors: try: portwwn = fcdescriptor.get('PortName') porttype = fcdescriptor.get('PortType') portwwn = wwn.parse_from_str(portwwn) portid = fcdescriptor.get('PortID') port_speed = _parse_port_speed(fcdescriptor.get('Speed')) portid = Sfn(int)(portid, 16) adapter_identifier = self._compose_adapter_identifier(nodewwn, portwwn) scsi_paths = scsi_path_by_adapter_identifier.get(adapter_identifier) target_fcdescriptors = self._create_target_fchba_details(scsi_paths) ports.append((fc_hba_model.FcPort(portid, portwwn, porttype, None, port_speed), target_fcdescriptors)) except (command.ExecuteException, TypeError, ValueError), ex: logger.debugException('Failed to create fcport data object') result[fchba].extend(ports) except (command.ExecuteException, TypeError, ValueError), ex: logger.debugException('Failed to create fchba data object')
def _countUniqueHosts(self, obj): addresses = self._filterAddresses(obj) groupedByHost = fptools.groupby(lambda item: str(item.address.host).strip(), addresses) return len(groupedByHost.keys())