def scan_address_job( ip_address=None, plugins=None, results=None, automerge=AUTOMERGE_MODE, called_from_ui=False, **kwargs ): """The function that is actually running on the worker.""" job = rq.get_current_job() available_plugins = getattr(settings, 'SCAN_PLUGINS', {}).keys() if not plugins: plugins = available_plugins run_postprocessing = not (set(available_plugins) - set(plugins)) if ip_address and plugins: if not kwargs: ip, created = IPAddress.concurrent_get_or_create( address=ip_address, ) if not (ip.snmp_name and ip.snmp_community): message = "SNMP name/community is missing. Forcing autoscan." job.meta['messages'] = [ (ip_address, 'ralph.scan', 'info', message) ] job.save() autoscan_address(ip_address) kwargs = { 'snmp_community': ip.snmp_community, 'snmp_version': ip.snmp_version, 'http_family': ip.http_family, 'snmp_name': ip.snmp_name, } results = _run_plugins(ip_address, plugins, job, **kwargs) if run_postprocessing: _scan_postprocessing(results, job, ip_address) if automerge and job.meta.get('changed', True): # Run only when automerge mode is enabled and some change was # detected. When `change` state is not available just run it... save_job_results(job.id) elif not called_from_ui and job.args and job.meta.get('changed', True): # Run only when some change was detected. When `change` state is # not available just run it... try: ip_obj = IPAddress.objects.select_related().get( address=job.args[0] # job.args[0] == ip_address ) except IPAddress.DoesNotExist: pass else: for plugin_name in getattr( settings, 'SCAN_POSTPROCESS_ENABLED_JOBS', [] ): try: module = import_module(plugin_name) except ImportError as e: logger.error(unicode(e)) else: module.run_job(ip_obj) return results
def _run_ssh_catalyst(ip): ssh = _connect_ssh(ip) try: mac = '\n'.join(ssh.cisco_command( "show version | include Base ethernet MAC Address" )) raw = '\n'.join(ssh.cisco_command("show inventory")) finally: ssh.close() mac = mac.strip() if mac.startswith("Base ethernet MAC Address") and ':' in mac: ethernets = [ Eth( "Base ethernet MAC Address", mac.split(':', 1)[1].strip(), None, ), ] else: ethernets = None inventory = list(cisco_inventory(raw)) serials = [inv['sn'] for inv in inventory] dev_inv = inventory[0] try: dev = Device.objects.get(sn__in=serials) except MultipleObjectsReturned: raise Error( "Stacked devices with serials %r should be merged.", serials, ) except Device.DoesNotExist: sn = dev_inv['sn'] model_name='Cisco %s' % dev_inv['pid'] else: # This is a stacked device, use the base device for it sn = dev.sn model_name = dev.model.name dev = Device.create( ethernets=ethernets, sn=sn, model_name=model_name, model_type=DeviceType.switch, name=dev_inv['descr'][:255], ) dev.save(update_last_seen=True) for inv in inventory: cisco_component(dev, inv) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.device = dev ip_address.is_management = True ip_address.save(update_last_seen=True) return dev.name
def attach_ip(dev, ip): """Attach the IP address""" ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = False ipaddr.save()
def _run_ssh_catalyst(ip): ssh = _connect_ssh(ip) try: raw = "\n".join(ssh.cisco_command("show inventory")) finally: ssh.close() inventory = list(cisco_inventory(raw)) serials = [inv["sn"] for inv in inventory] try: dev = Device.objects.get(sn__in=serials) except Device.DoesNotExist: dev_inv = inventory[0] dev = Device.create( sn=dev_inv["sn"], model_name="Cisco %s" % dev_inv["pid"], model_type=DeviceType.switch, name=dev_inv["descr"][:255], ) dev.save(update_last_seen=True) for inv in inventory: cisco_component(dev, inv) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.device = dev ip_address.is_management = True ip_address.save(update_last_seen=True) return dev.name
def _save_shares(dev, luns, mounts): wwns = [] for lun, volume in luns.iteritems(): rest, wwn_end = lun.rsplit('_', 1) try: share = DiskShare.objects.get(wwn__endswith=wwn_end) except DiskShare.DoesNotExist: continue wwns.append(share.wwn) clients = mounts.get(volume, []) for client in clients: ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=client) mount, created = DiskShareMount.concurrent_get_or_create( address=ipaddr, device=ipaddr.device, share=share, server=dev) mount.volume = volume mount.save(update_last_seen=True) if not clients: mount, created = DiskShareMount.concurrent_get_or_create( address=None, device=None, share=share, server=dev) mount.volume = volume mount.save(update_last_seen=True) for mount in DiskShareMount.objects.filter( server=dev ).exclude( share__wwn__in=wwns ): mount.delete()
def run_idrac(ip): idrac = IDRAC(ip) base_info = idrac.get_base_info() model_name = "{} {}".format( base_info['manufacturer'].replace(" Inc.", ""), base_info['model'] ) ethernets = _save_ethernets(idrac.get_ethernets()) ip_address, _ = IPAddress.concurrent_get_or_create(address=ip) ip_address.is_management = True ip_address.save() dev = Device.create( ethernets=ethernets, model_name=model_name, sn=base_info['sn'], model_type=DeviceType.rack_server, ) dev.management = ip_address dev.save(priority=SAVE_PRIORITY) ip_address.device = dev ip_address.save() _save_cpu(dev, idrac.get_cpu()) _save_memory(dev, idrac.get_memory()) _save_storage(dev, idrac.get_storage()) _save_fc_cards(dev, idrac.get_fc_cards()) return model_name
def perform_move(self, address, new_ip, new_hostname): old_ipaddress = IPAddress.objects.get(address=address) device = old_ipaddress.device mac = None for r in Record.objects.filter( db.Q(name=old_ipaddress.hostname) | db.Q(content=old_ipaddress.hostname) | db.Q(content=old_ipaddress.address) ): r.delete() for e in DHCPEntry.objects.filter(ip=old_ipaddress.address): mac = e.mac e.delete() old_ipaddress.device = None old_ipaddress.save() reset_dns(new_hostname, new_ip) new_ipaddress, c = IPAddress.concurrent_get_or_create( address=new_ip, ) new_ipaddress.device = device new_ipaddress.hostname = new_hostname new_ipaddress.save() if mac: entry = DHCPEntry(ip=new_ip, mac=mac) entry.save() pricing.device_update_cached(device)
def run_http(ip): headers, document = get_http_info(ip) family = guess_family(headers, document) ip_address, created = IPAddress.concurrent_get_or_create(address=ip) ip_address.http_family = family ip_address.save(update_last_seen=True) return family
def make_device(ilo, ip): if ilo.model.startswith('HP ProLiant BL'): t = DeviceType.blade_server else: t = DeviceType.rack_server ethernets = [Eth(label, mac, speed=None) for label, mac in ilo.ethernets] dev = Device.create( ethernets=ethernets, model_name=ilo.model, model_type=t, sn=ilo.sn, name=ilo.name, mgmt_firmware=ilo.firmware, ) dev.save(update_last_seen=True, priority=SAVE_PRIORITY) ipaddr, created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save() if dev.parent and dev.parent.management: dev.management = dev.parent.management else: dev.management = ipaddr dev.save(priority=SAVE_PRIORITY) return dev
def _get_master(ssh, data=None): if data is None: stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg") data = stdout.read() nodes = {} current_node = None for line in data.splitlines(): line = line.strip() if line.endswith("{"): current_node = line.replace("{", "").strip() nodes[current_node] = {} elif line.endswith("}"): current_node = None elif ":" in line and current_node: key, value = (v.strip() for v in line.split(":", 1)) nodes[current_node][key] = value for node, pairs in nodes.iteritems(): is_master = node.startswith("master") try: ip = pairs["IP"] except KeyError: continue if is_master: ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.save() return ipaddr
def _run_ssh_catalyst(ip): ssh = _connect_ssh(ip) try: raw = '\n'.join(ssh.cisco_command("show inventory")) finally: ssh.close() inventory = list(cisco_inventory(raw)) serials = [inv['sn'] for inv in inventory] try: dev = Device.objects.get(sn__in=serials) except Device.DoesNotExist: dev_inv = inventory[0] dev = Device.create(sn=dev_inv['sn'], model_name='Cisco %s' % dev_inv['pid'], model_type=DeviceType.switch, name=dev_inv['descr'][:255]) dev.save(update_last_seen=True) for inv in inventory: cisco_component(dev, inv) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.device = dev ip_address.is_management = True ip_address.save(update_last_seen=True) return dev.name
def scan_address_job( ip_address=None, plugins=None, results=None, automerge=AUTOMERGE_MODE, **kwargs ): """ The function that is actually running on the worker. """ job = rq.get_current_job() available_plugins = getattr(settings, 'SCAN_PLUGINS', {}).keys() if not plugins: plugins = available_plugins run_postprocessing = not (set(available_plugins) - set(plugins)) if ip_address and plugins: if not kwargs: ip, created = IPAddress.concurrent_get_or_create( address=ip_address, ) kwargs = { 'snmp_community': ip.snmp_community, 'snmp_version': ip.snmp_version, 'http_family': ip.http_family, 'snmp_name': ip.snmp_name, } results = _run_plugins(ip_address, plugins, job, **kwargs) if run_postprocessing: _scan_postprocessing(results, job, ip_address) # Run only when automerge mode is enabled and some change was detected. # When `change` state is not available just run it... if automerge and job.meta.get('changed', True): save_job_results(job.id) return results
def _cisco_snmp_model(model_oid, sn_oid, **kwargs): ip = str(kwargs['ip']) version = kwargs.get('snmp_version') if version == '3': community = SNMP_V3_AUTH else: community = str(kwargs['community']) model = snmp_command( ip, community, model_oid, attempts=2, timeout=3, snmp_version=version, ) sn = snmp_command( ip, community, sn_oid, attempts=2, timeout=3, snmp_version=version, ) if not (model and sn): return False, "silent.", kwargs sn = unicode(sn[0][1]) model = 'Cisco %s' % unicode(model[0][1]) dev = Device.create(sn=sn, model_name=model, model_type=DeviceType.switch) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True ip_address.save() return True, sn, kwargs
def _save_shares(dev, luns, mounts): wwns = [] for lun, volume in luns.iteritems(): rest, wwn_end = lun.rsplit('_', 1) try: share = DiskShare.objects.get(wwn__endswith=wwn_end) except DiskShare.DoesNotExist: continue wwns.append(share.wwn) clients = mounts.get(volume, []) for client in clients: ipaddr, ip_created = IPAddress.concurrent_get_or_create( address=client) mount, created = DiskShareMount.concurrent_get_or_create( address=ipaddr, device=ipaddr.device, share=share, server=dev) mount.volume = volume mount.save(update_last_seen=True) if not clients: mount, created = DiskShareMount.concurrent_get_or_create( address=None, device=None, share=share, server=dev) mount.volume = volume mount.save(update_last_seen=True) for mount in DiskShareMount.objects.filter(server=dev).exclude( share__wwn__in=wwns): mount.delete()
def run_http_ibm_system_x(ip): session_id = get_session_id(ip) management_url = "http://%s/wsman" % ip model_name = get_model_name(management_url, session_id) sn = get_sn(management_url, session_id) macs = get_mac_addresses(management_url, session_id) ethernets = [Eth(label=label, mac=mac, speed=0) for (label, mac) in macs] ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.is_management = True ipaddr.save() dev = Device.create( ethernets=ethernets, model_name=model_name, sn=sn, model_type=DeviceType.rack_server, ) dev.management = ipaddr dev.save(priority=SAVE_PRIORITY) ipaddr.device = dev ipaddr.save() detected_memory = get_memory(management_url, session_id) detected_memory_indexes = [x.get('index') for x in detected_memory] for m in dev.memory_set.exclude(index__in=detected_memory_indexes): m.delete() for m in detected_memory: index = m['index'] mem, _ = Memory.concurrent_get_or_create(index=index, device=dev) mem.label = m['label'] mem.size = m['size'] mem.save(priority=SAVE_PRIORITY) mem.model, c = ComponentModel.concurrent_get_or_create( name='RAM %s %dMiB' % (mem.label, mem.size), size=mem.size, type=ComponentType.memory.id, family=mem.label, cores=0 ) mem.save(priority=SAVE_PRIORITY) detected_processors = get_processors(management_url, session_id) detected_processors_keys = [x.get('index') for x in detected_processors] for cpu in dev.processor_set.exclude(index__in=detected_processors_keys): cpu.delete() # add new for p in detected_processors: processor_model, _ = ComponentModel.concurrent_get_or_create( name=p.get('label'), speed=p.get('speed'), type=ComponentType.processor.id, family=p.get('family'), cores=p.get('cores') ) processor, _ = Processor.concurrent_get_or_create( device=dev, index=p.get('index'), ) processor.label = p.get('label') processor.model = processor_model processor.speed = p.get('speed') processor.save() return model_name
def run_http_ibm_system_x(ip): session_id = get_session_id(ip) management_url = "http://%s/wsman" % ip model_name = get_model_name(management_url, session_id) sn = get_sn(management_url, session_id) macs = get_mac_addresses(management_url, session_id) ethernets = [Eth(label=label, mac=mac, speed=0) for (label, mac) in macs] ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.is_management = True ipaddr.save() dev = Device.create( ethernets=ethernets, model_name=model_name, sn=sn, model_type=DeviceType.rack_server, ) dev.management = ipaddr dev.save(priority=SAVE_PRIORITY) ipaddr.device = dev ipaddr.save() detected_memory = get_memory(management_url, session_id) detected_memory_indexes = [x.get('index') for x in detected_memory] for m in dev.memory_set.exclude(index__in=detected_memory_indexes): m.delete() for m in detected_memory: index = m['index'] mem, _ = Memory.concurrent_get_or_create(index=index, device=dev) mem.label = m['label'] mem.size = m['size'] mem.model, c = ComponentModel.create( ComponentType.memory, size=mem.size, priority=SAVE_PRIORITY, ) mem.save(priority=SAVE_PRIORITY) detected_processors = get_processors(management_url, session_id) detected_processors_keys = [x.get('index') for x in detected_processors] for cpu in dev.processor_set.exclude(index__in=detected_processors_keys): cpu.delete() # add new for p in detected_processors: processor_model, _ = ComponentModel.create( ComponentType.processor, speed=p.get('speed'), family=p.get('family'), cores=p.get('cores'), name=p.get('label'), priority=SAVE_PRIORITY, ) processor, _ = Processor.concurrent_get_or_create( device=dev, index=p.get('index'), ) processor.label = p.get('label') processor.model = processor_model processor.speed = p.get('speed') processor.save(priority=SAVE_PRIORITY) return model_name
def scan_address_job(ip_address=None, plugins=None, results=None, automerge=AUTOMERGE_MODE, called_from_ui=False, **kwargs): """The function that is actually running on the worker.""" job = rq.get_current_job() available_plugins = getattr(settings, 'SCAN_PLUGINS', {}).keys() if not plugins: plugins = available_plugins run_postprocessing = not (set(available_plugins) - set(plugins)) if ip_address and plugins: if not kwargs: ip, created = IPAddress.concurrent_get_or_create( address=ip_address, ) if not (ip.snmp_name and ip.snmp_community): message = ("SNMP name and community is missing. Forcing " " autoscan.") job.meta['messages'] = [(ip_address, 'ralph.scan', 'info', message)] job.save() autoscan_address(ip_address) # since autoscan_address can update some fields on IPAddress, # we need to refresh it here ip = IPAddress.objects.get(address=ip_address) kwargs = { 'snmp_community': ip.snmp_community, 'snmp_version': ip.snmp_version, 'http_family': ip.http_family, 'snmp_name': ip.snmp_name, } results = _run_plugins(ip_address, plugins, job, **kwargs) if run_postprocessing: _scan_postprocessing(results, job, ip_address) if automerge and job.meta.get('changed', True): # Run only when automerge mode is enabled and some change was # detected. When `change` state is not available just run it... save_job_results(job.id) elif not called_from_ui and job.args: try: ip_obj = IPAddress.objects.select_related().get( address=job.args[0] # job.args[0] == ip_address ) except IPAddress.DoesNotExist: pass else: for plugin_name in getattr(settings, 'SCAN_POSTPROCESS_ENABLED_JOBS', []): try: module = import_module(plugin_name) except ImportError as e: logger.error(unicode(e)) else: module.run_job(ip_obj, plugins_results=results) return results
def save_device_data(data, remote_ip): device = data['device'] ethernets = [ Eth(e.get('label'), MACAddressField.normalize(e.get('mac')), str_to_ethspeed(e.get('speed'))) for e in data['ethernets'] if MACAddressField.normalize(e.get('mac')) not in MAC_PREFIX_BLACKLIST] sn = device.get('sn') vendor = device.get('vendor', '') if not ethernets and not sn: raise NoRequiredDataError('No MAC addresses and no device SN.') ip_addresses = [ e['ipaddress'] for e in data['ethernets'] if e.get('ipaddress') ] if not ip_addresses: raise NoRequiredIPAddressError( "Couldn't find any IP address for this device." ) try: dev = Device.create( sn=sn, ethernets=ethernets, model_name='%s %s %s' % ( device.get('caption'), vendor, device.get('version'), ), model_type=DeviceType.unknown, priority=SAVE_PRIORITY ) except ValueError as e: DiscoveryWarning( message="Failed to create device: " + str(e), plugin=__name__, ip=str(remote_ip), ).save() return None dev.save(priority=SAVE_PRIORITY) os = data['operating_system'] o = OperatingSystem.create(dev, os_name=os.get('label'), family='Windows', priority=SAVE_PRIORITY) o.memory = int(os['memory']) o.storage = int(os['storage']) o.cores_count = int(os['corescount']) o.save(priority=SAVE_PRIORITY) for ip in ip_addresses: ip_address, _ = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = False ip_address.save() vendor = vendor.lower() is_virtual = any(virtual in vendor for virtual in CPU_VIRTUAL_LIST) save_processors(data['processors'], dev, is_virtual) save_memory(data['memory'], dev) save_storage(data['storage'], dev) save_shares(data['shares'], dev, ip_address) save_fibre_channel(data['fcs'], dev) save_software(data.get('software', []), dev) return dev
def save_device_data(data, remote_ip): device = data['device'] ethernets = [ Eth(e.get('label'), MACAddressField.normalize(e.get('mac')), str_to_ethspeed(e.get('speed'))) for e in data['ethernets'] if MACAddressField.normalize(e.get('mac')) not in MAC_PREFIX_BLACKLIST ] sn = device.get('sn') vendor = device.get('vendor', '') if not ethernets and not sn: raise NoRequiredDataError('No MAC addresses and no device SN.') ip_addresses = [ e['ipaddress'] for e in data['ethernets'] if e.get('ipaddress') ] if not ip_addresses: raise NoRequiredIPAddressError( "Couldn't find any IP address for this device.") try: dev = Device.create(sn=sn, ethernets=ethernets, model_name='%s %s %s' % ( device.get('caption'), vendor, device.get('version'), ), model_type=DeviceType.unknown, priority=SAVE_PRIORITY) except ValueError as e: DiscoveryWarning( message="Failed to create device: " + str(e), plugin=__name__, ip=str(remote_ip), ).save() return None dev.save(priority=SAVE_PRIORITY) os = data['operating_system'] o = OperatingSystem.create(dev, os_name=os.get('label'), family='Windows', priority=SAVE_PRIORITY) o.memory = int(os['memory']) o.storage = int(os['storage']) o.cores_count = int(os['corescount']) o.save(priority=SAVE_PRIORITY) for ip in ip_addresses: ip_address, _ = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = False ip_address.save() vendor = vendor.lower() is_virtual = any(virtual in vendor for virtual in CPU_VIRTUAL_LIST) save_processors(data['processors'], dev, is_virtual) save_memory(data['memory'], dev) save_storage(data['storage'], dev) save_shares(data['shares'], dev, ip_address) save_fibre_channel(data['fcs'], dev) save_software(data.get('software', []), dev) return dev
def _add_dev_system(ip, pairs, parent, raw, counts, dev_id): dev = _dev(DeviceType.blade_system, pairs, parent, raw) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.device = dev ip_address.is_management = True # FIXME: how do we know for sure? ip_address.save(update_last_seen=True) # no priorities for IP addresses return dev
def _get_master(ssh, ip, data=None): if data is None: stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg") data = stdout.read() if not data: stdin, stdout, stderr = ssh.exec_command("pvesh get /nodes") data = stdout.read() if data: nodes = json.loads(data) for node in nodes: node_name = node['node'] stdin, stdout, stderr = ssh.exec_command( 'pvesh get "/nodes/%s/dns"' % node_name) dns = json.loads(stdout.read()) ip = dns['dns1'] break if not data: ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.save() return ipaddr nodes = {} current_node = None for line in data.splitlines(): line = line.strip() if line.endswith('{'): current_node = line.replace('{', '').strip() nodes[current_node] = {} elif line.endswith('}'): current_node = None elif ':' in line and current_node: key, value = (v.strip() for v in line.split(':', 1)) nodes[current_node][key] = value for node, pairs in nodes.iteritems(): is_master = node.startswith('master') try: ip = pairs['IP'] except KeyError: continue if is_master: ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.save() return ipaddr
def _save_device(ip, name, model_name, sn, macs): ethernets = [Eth(mac=mac, label='MAC', speed=0) for mac in macs] dev = Device.create(sn=sn, model_name=model_name, ethernets=ethernets, model_type=DeviceType.storage, name=name) ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save() dev.management = ipaddr dev.save() return dev
def clean(deployment_id): """Prepare an existing device for deployment by cleaning old information.""" deployment = Deployment.objects.get(id=deployment_id) if deployment.status != DeploymentStatus.open: return True do_clean(deployment.device, deployment.user) ip, created = IPAddress.concurrent_get_or_create(address=deployment.ip) ip.device=deployment.device ip.hostname=deployment.hostname ip.save() return True
def clean(deployment_id): """Prepare an existing device for deployment by cleaning old information.""" deployment = Deployment.objects.get(id=deployment_id) if deployment.status != DeploymentStatus.open: return True do_clean(deployment.device, deployment.user) ip, created = IPAddress.concurrent_get_or_create(address=deployment.ip) ip.device = deployment.device ip.hostname = deployment.hostname ip.save() return True
def run_ssh_asa(ip): ssh = _connect_ssh(ip) try: lines = ssh.asa_command( "show version | grep (^Hardware|Boot microcode|^Serial|address is)" ) raw_inventory = '\n'.join(ssh.asa_command("show inventory")) finally: ssh.close() pairs = parse.pairs(lines=[line.strip() for line in lines]) sn = pairs.get('Serial Number', None) model, ram, cpu = pairs['Hardware'].split(',') boot_firmware = pairs['Boot microcode'] ethernets = [] for i in xrange(99): try: junk, label, mac = pairs['%d' % i].split(':') except KeyError: break mac = mac.split(',', 1)[0] mac = mac.replace('address is', '') mac = mac.replace('.', '').upper().strip() label = label.strip() ethernets.append(Eth(label, mac, speed=None)) dev = Device.create(ethernets=ethernets, sn=sn, model_name=model, model_type=DeviceType.firewall, boot_firmware=boot_firmware) dev.save(update_last_seen=True) inventory = list(cisco_inventory(raw_inventory)) for inv in inventory: cisco_component(dev, inv) ipaddr, created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save() for label, mac, speed in ethernets: eth, created = Ethernet.concurrent_get_or_create( mac=mac, defaults={'device': dev}, ) eth.label = label eth.device = dev eth.save() return model
def _save_device(ip, name, model_name, sn, mac): model, model_created = DeviceModel.concurrent_get_or_create( name = 'Onstor %s' % model_name, type=DeviceType.storage.id) dev, dev_created = Device.concurrent_get_or_create(sn=sn, model=model) dev.save() ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save(update_last_seen=True) dev.management = ipaddr dev.save(update_last_seen=True) return dev
def _save_device(ip, name, model_name, sn, mac): model, model_created = DeviceModel.concurrent_get_or_create( name='Onstor %s' % model_name, type=DeviceType.storage.id) dev, dev_created = Device.concurrent_get_or_create(sn=sn, model=model) dev.save() ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save(update_last_seen=True) dev.management = ipaddr dev.save(update_last_seen=True) return dev
def assign_ips(dev, ip_addresses): ip_addresses = {str(ip) for ip in ip_addresses} for addr in IPAddress.objects.filter(device=dev, is_management=False): if addr.address in ip_addresses: continue addr.device = None addr.save() for ip in ip_addresses: addr, created = IPAddress.concurrent_get_or_create(address=ip) addr.device = dev addr.last_puppet = datetime.datetime.now() addr.save()
def _add_cluster_member(ssh, ip): stdin, stdout, stderr = ssh.exec_command("ifconfig eth0 | head -n 1") mac = stdout.readline().split()[-1] dev = Device.create(ethernets=[Eth(label='eth0', mac=mac, speed=0)], model_name='Proxmox', model_type=DeviceType.unknown) Software.create(dev, 'proxmox', 'Proxmox', family='Virtualization').save() ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.is_management = False ipaddr.device = dev ipaddr.save() return dev
def _save_device(ip, name, model_name, sn): model, model_created = DeviceModel.concurrent_get_or_create( name='3PAR %s' % model_name, defaults={'type': DeviceType.storage.id}, ) dev = Device.create(sn=sn, model=model) ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save() dev.management = ipaddr dev.save() return dev
def _run_ipmi(ip): try: ipmi = IPMI(ip) fru = ipmi.get_fru() except AuthError: try: ipmi = IPMI(ip, 'ADMIN') fru = ipmi.get_fru() except AuthError: ipmi = IPMI(ip, 'ADMIN', 'ADMIN') fru = ipmi.get_fru() mc = ipmi.get_mc() top = fru['/SYS'] if not top: top = fru['Builtin FRU Device'] if not top: raise AnswerError('Incompatible answer.') name, name_clean = _clean(top['Product Name']) sn, sn_clean = _clean(top['Product Serial']) if sn in SERIAL_BLACKLIST: sn = None model_type = DeviceType.rack_server if name.lower().startswith('ipmi'): model_type = DeviceType.unknown mac = ipmi.get_mac() if mac: ethernets = [Eth(label='IPMI MAC', mac=mac, speed=0)] else: ethernets = [] ethernets.extend(_get_ipmi_ethernets(fru)) dev = Device.create(ethernets=ethernets, priority=SAVE_PRIORITY, sn=sn, model_name=name.title(), model_type=model_type) firmware = mc.get('Firmware Revision') if firmware: dev.mgmt_firmware = 'rev %s' % firmware _add_ipmi_lan(dev, mac) _add_ipmi_components(dev, fru) dev.save(update_last_seen=True, priority=SAVE_PRIORITY) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.snmp_name = name ip_address.save(update_last_seen=True) # no priorities for IP addresses return name
def ping(**kwargs): ip = kwargs['ip'] is_up = False if network.ping(str(ip)) is None: message = 'down.' else: is_up = True message = 'up!' ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) hostname = network.hostname(ip) if hostname: ip_address.hostname = hostname ip_address.dns_info = '\n'.join(network.descriptions(hostname)) kwargs['community'] = ip_address.snmp_community ip_address.save(update_last_seen=True) return is_up, message, kwargs
def save_device_data(data, remote_ip): device = data["device"] ethernets = [ Eth(e.get("label"), MACAddressField.normalize(e.get("mac")), str_to_ethspeed(e.get("speed"))) for e in data["ethernets"] if MACAddressField.normalize(e.get("mac")) not in MAC_PREFIX_BLACKLIST ] sn = device.get("sn") vendor = device.get("vendor", "") if not ethernets and not sn: raise NoRequiredDataError("No MAC addresses and no device SN.") ip_addresses = [e["ipaddress"] for e in data["ethernets"] if e.get("ipaddress")] if not ip_addresses: raise NoRequiredIPAddressError("Couldn't find any IP address for this device.") try: dev = Device.create( sn=sn, ethernets=ethernets, model_name="%s %s %s" % (device.get("caption"), vendor, device.get("version")), model_type=DeviceType.unknown, priority=SAVE_PRIORITY, ) except ValueError as e: DiscoveryWarning(message="Failed to create device: " + str(e), plugin=__name__, ip=str(remote_ip)).save() return None dev.save(priority=SAVE_PRIORITY) os = data["operating_system"] o = OperatingSystem.create(dev, os_name=os.get("label"), family="Windows", priority=SAVE_PRIORITY) o.memory = int(os["memory"]) o.storage = int(os["storage"]) o.cores_count = int(os["corescount"]) o.save(priority=SAVE_PRIORITY) for ip in ip_addresses: ip_address, _ = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = False ip_address.save() vendor = vendor.lower() is_virtual = any(virtual in vendor for virtual in CPU_VIRTUAL_LIST) save_processors(data["processors"], dev, is_virtual) save_memory(data["memory"], dev) save_storage(data["storage"], dev) save_shares(data["shares"], dev, ip_address) save_fibre_channel(data["fcs"], dev) save_software(data.get("software", []), dev) return dev
def ping(**kwargs): ip = kwargs['ip'] is_up = False if network.ping(str(ip)) is None: message = 'down.' else: is_up = True message = 'up!' ip_address, created = IPAddress.concurrent_get_or_create( address=str(ip)) hostname = network.hostname(ip) if hostname: ip_address.hostname = hostname ip_address.dns_info = '\n'.join(network.descriptions(hostname)) kwargs['community'] = ip_address.snmp_community ip_address.save(update_last_seen=True) return is_up, message, kwargs
def nortel_snmp(**kwargs): sn_oid = (1, 3, 6, 1, 4, 1, 1872, 2, 5, 1, 3, 1, 18, 0) uuid_oid = (1, 3, 6, 1, 4, 1, 1872, 2, 5, 1, 3, 1, 17, 0) substrings = ["nortel layer2-3 gbe switch", "bnt layer 2/3 copper gigabit ethernet " "switch module for ibm bladecenter"] snmp_name = kwargs.get('snmp_name', '') if not (snmp_name and any(substring in kwargs['snmp_name'].lower() for substring in substrings)): return False, "no match.", kwargs ip = str(kwargs['ip']) version = kwargs.get('snmp_version') if version == '3': community = SNMP_V3_AUTH else: community = str(kwargs['community']) sn = ( snmp_command( ip, community, sn_oid, attempts=2, timeout=3, snmp_version=version, ) or snmp_command( ip, community, uuid_oid, attempts=2, timeout=3, snmp_version=version, ) ) if not sn: return False, "silent.", kwargs sn = unicode(sn[0][1]) model = kwargs['snmp_name'] dev = Device.create(sn=sn, model_name=model, model_type=DeviceType.switch) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True ip_address.save() kwargs['model'] = model kwargs['sn'] = sn return True, sn, kwargs
def puppet(**kwargs): if not settings.PUPPET_DB_URL: return False, "not configured", kwargs ip = str(kwargs['ip']) ip_set, hostname_set = get_ip_hostname_sets(ip) db = connect_db() facts = get_all_facts_by_ip_set(db, ip_set) if not facts and hostname_set: facts = get_all_facts_by_hostname_set(db, hostname_set) if not facts: return False, "host config not found.", kwargs try: is_virtual = is_host_virtual(facts) try: lshw = facts['lshw'] except KeyError: dev, dev_name = parse_facts(facts, is_virtual) else: dev, dev_name = parse_lshw(lshw, facts, is_virtual) except MySQLdb.OperationalError as e: if e.args[0] in (1205, 1213) and 'try restarting transaction' in e.args[1]: time.sleep(random.choice(range(10)) + 1) raise plugin.Restart(unicode(e), kwargs) raise if not dev: return False, dev_name, kwargs parse_wwn(facts, dev) parse_smartctl(facts, dev) parse_hpacu(facts, dev) parse_megaraid(facts, dev) parse_uptime(facts, dev) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device, message = dev, dev_name if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.last_puppet = datetime.datetime.now() ip_address.save(update_last_seen=True) # no priorities for IP addresses handle_facts_os(dev, facts, is_virtual) return True, message, kwargs
def run_ssh_proxmox(ip): ssh = _connect_ssh(ip) try: stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg") data = stdout.read() if data != "": master = _get_master(ssh, data) else: stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/storage.cfg") data = stdout.read() if data == "": raise NotProxmoxError("this is not a PROXMOX server.") master, ip_created = IPAddress.concurrent_get_or_create(address=ip) member = _add_cluster_member(ssh, ip) _add_virtual_machines(ssh, member, master) finally: ssh.close() return member.sn or member.name
def nortel_snmp(**kwargs): sn_oid = (1, 3, 6, 1, 4, 1, 1872, 2, 5, 1, 3, 1, 18, 0) uuid_oid = (1, 3, 6, 1, 4, 1, 1872, 2, 5, 1, 3, 1, 17, 0) substrings = [ "nortel layer2-3 gbe switch", "bnt layer 2/3 copper gigabit ethernet " "switch module for ibm bladecenter" ] snmp_name = kwargs.get('snmp_name', '') if not (snmp_name and any(substring in kwargs['snmp_name'].lower() for substring in substrings)): return False, "no match.", kwargs ip = str(kwargs['ip']) version = kwargs.get('snmp_version') if version == '3': community = SNMP_V3_AUTH else: community = str(kwargs['community']) sn = (snmp_command( ip, community, sn_oid, attempts=2, timeout=3, snmp_version=version, ) or snmp_command( ip, community, uuid_oid, attempts=2, timeout=3, snmp_version=version, )) if not sn: return False, "silent.", kwargs sn = unicode(sn[0][1]) model = kwargs['snmp_name'] dev = Device.create(sn=sn, model_name=model, model_type=DeviceType.switch) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True ip_address.save() kwargs['model'] = model kwargs['sn'] = sn return True, sn, kwargs
def _run_ipmi(ip): try: ipmi = IPMI(ip) fru = ipmi.get_fru() except AuthError: try: ipmi = IPMI(ip, 'ADMIN') fru = ipmi.get_fru() except AuthError: ipmi = IPMI(ip, 'ADMIN', 'ADMIN') fru = ipmi.get_fru() mc = ipmi.get_mc() top = fru['/SYS'] if not top: top = fru['Builtin FRU Device'] if not top: raise AnswerError('Incompatible answer.') name, sn, model_type = _get_base_device_info(top) mac = ipmi.get_mac() if mac: ethernets = [Eth(label='IPMI MAC', mac=mac, speed=0)] else: ethernets = [] ethernets.extend(_get_ipmi_ethernets(fru)) dev = Device.create(ethernets=ethernets, priority=SAVE_PRIORITY, sn=sn, model_name=name.title(), model_type=model_type) firmware = mc.get('Firmware Revision') if firmware: dev.mgmt_firmware = 'rev %s' % firmware _add_ipmi_lan(dev, mac) _add_ipmi_components(dev, fru) dev.save(update_last_seen=True, priority=SAVE_PRIORITY) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.snmp_name = name ip_address.save(update_last_seen=True) return name
def run_ssh_ganeti(ip): ssh = _connect_ssh(ip) master_hostname = get_master_hostname(ssh) try: master_ip = IPAddress.objects.get( Q(hostname=master_hostname) | Q(address=master_hostname) ) except IPAddress.DoesNotExist: raise Error('unknown master hostname %r' % master_hostname) if master_ip.address != ip: raise Error('not a cluster master.') existing_macs = set() for hostname, primary_node, address, mac in get_instances(ssh): parent = get_device(primary_node) existing_macs.add(mac) dev = Device.create( ethernets=[Eth(label='eth0', mac=mac, speed=0)], parent=parent, management=master_ip, model_name='Ganeti', model_type=DeviceType.virtual_server, family='Virtualization', priority=SAVE_PRIORITY, ) dev.name = hostname dev.save(priority=SAVE_PRIORITY) if address: ip_address, created = IPAddress.concurrent_get_or_create( address=address, ) ip_address.device = dev ip_address.save() for dev in Device.objects.filter( management=master_ip, model__name='Ganeti', ).exclude( ethernet__mac__in=existing_macs, ): dev.deleted = True dev.save(priority=SAVE_PRIORITY) return master_hostname
def _get_ip_addresses_from_results(results): """ Returns list of IPAddress instances that are matched from inside scan results source addresses. """ ip_addresses = set() for plugin_name, plugin_results in results.iteritems(): # Only system ip addresses. This function will be used only with API # and only system ip addresses will be possible. device = plugin_results.get('device') if not device: continue ip_addresses |= set(device.get('system_ip_addresses', [])) result = [] for address in ip_addresses: ip_address, created = IPAddress.concurrent_get_or_create( address=address, ) result.append(ip_address) return result
def puppet(**kwargs): if not settings.PUPPET_API_URL and not settings.PUPPET_DB_URL: return False, "not configured", kwargs ip = str(kwargs['ip']) ip_set, hostname_set = get_ip_hostname_sets(ip) if settings.PUPPET_DB_URL: puppet = PuppetDBProvider() else: puppet = PuppetAPIProvider() facts = puppet.get_facts(ip_set, hostname_set) if not facts: return False, "host config not found.", kwargs is_virtual = is_host_virtual(facts) try: lshw = facts['lshw'] except KeyError: dev, dev_name = parse_facts(facts, is_virtual) else: dev, dev_name = parse_lshw(lshw, facts, is_virtual) if not dev: return False, dev_name, kwargs parse_wwn(facts, dev) parse_3ware(facts, dev) parse_smartctl(facts, dev) parse_hpacu(facts, dev) parse_megaraid(facts, dev) parse_uptime(facts, dev) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device, message = dev, dev_name if created: ip_address.hostname = network.hostname(ip_address.address) ip_address.last_puppet = datetime.datetime.now() ip_address.save(update_last_seen=True) # no priorities for IP addresses handle_facts_os(dev, facts, is_virtual) handle_facts_packages(dev, facts.get('packages')) return True, message, kwargs
def juniper_snmp(**kwargs): sn_oid = (1, 3, 6, 1, 4, 1, 2636, 3, 1, 3, 0) model_oid = (1, 3, 6, 1, 4, 1, 2636, 3, 1, 2, 0) version = kwargs.get('snmp_version') if version == '3': community = SNMP_V3_AUTH else: community = str(kwargs['community']) substring = "juniper networks" if not ('snmp_name' in kwargs and kwargs['snmp_name'] and substring in kwargs['snmp_name'].lower()): return False, "no match.", kwargs ip = str(kwargs['ip']) sn = snmp_command( ip, community, sn_oid, attempts=2, timeout=3, snmp_version=version, ) model = snmp_command( ip, community, model_oid, attempts=2, timeout=3, snmp_version=version, ) if not sn or not model: return False, "silent.", kwargs sn = unicode(str(sn[0][1]), encoding='utf-8') model = unicode(str(model[0][1]), encoding='utf-8') dev = Device.create(sn=sn, model_name=model, model_type=DeviceType.switch) ip_address, created = IPAddress.concurrent_get_or_create(address=str(ip)) ip_address.device = dev ip_address.is_management = True ip_address.save() return True, sn, kwargs
def run_ssh_ssg(ip): ssh = _connect_ssh(ip) lines = ssh.ssg_command('get system') pairs = parse.pairs(lines=lines[:10]) name = pairs['Product Name'] version = pairs['Hardware Version'].split(',', 1)[0] model = '%s %s' % (name, version) mac = pairs['Base Mac'].replace('.', '').upper() sn = pairs['Serial Number'].split(',', 1)[0] dev = Device.create(ethernets=[Eth(label='Base MAC', mac=mac, speed=0)], model_name=model, model_type=DeviceType.firewall, sn=sn, name=name, raw='\n'.join(lines)) dev.boot_firmware = pairs['Software Version'].split(',', 1)[0] dev.save(update_last_seen=True) ipaddr, created = IPAddress.concurrent_get_or_create(address=ip) ipaddr.device = dev ipaddr.is_management = True ipaddr.save() return dev.name