def process_servers(self, link_unresolved_to=None): """ Analyze server connections and automatically set server parent the same as linked switch parent, if server parent is None :return: """ for server in Server.active.all(): for server_port in ServerPort.active.filter(parent=server): switch_port = server_port.switch_port if not switch_port: continue switch = switch_port.typed_parent rack = switch.typed_parent if switch.is_mounted: server_parent_id = server.parent_id if server.parent and server.parent.id != rack.id: # clean parent unresolved group, to try to relink server.parent = None if not server.parent: logger.info("Update server %s parent %s->%s" % (server, server_parent_id, switch.parent_id)) server.mount_to(rack) if link_unresolved_to and not server.parent: server.parent = link_unresolved_to server.save() logger.info("Server %s linked to unresolveds group: %s" % (server, server.parent_id))
def _handle_address_move(self, *args, **options): target_pool = Resource.active.get(pk=options['target-pool-id']) moved_ips = [] if options['file']: with open(options['file']) as ip_list_file: for ip_line in ip_list_file: ip_line = ip_line.decode('utf-8').strip() if not ip_line: continue ip_obj = GlobalIPManager.get_ip(ip_line) if ip_obj.move_to_pool(target_pool): moved_ips.append(ip_obj) elif options['ip_start']: start_ip = options['ip_start'].decode('utf-8') end_ip = options['ip_end'].decode('utf-8') count = options['count'] moved_ips = GlobalIPManager.move_ips(target_pool=target_pool, start_ip=start_ip, end_ip=end_ip, count=count) logger.info("IPs are moved to %s" % target_pool) self._print_addresses(moved_ips)
def _handle_hypervisors(self, *args, **options): if options['list']: table = PrettyTable([ 'node', 'group', 'label', 'hypervisor_driver', 'rating', 'agentd_heartbeat' ]) table.padding_width = 1 table.sortby = 'rating' for hypervisor in self.cloud.get_hypervisors(): hyper_driver = hypervisor.get_option_value('hypervisor_driver', default=None) if hyper_driver: current_time_stamp = int(time.time()) agentd_heartbeat = hypervisor.get_option_value( 'agentd_heartbeat', default=0) agentd_heartbeat_value = agentd_heartbeat if ( current_time_stamp - int(agentd_heartbeat) ) < 90 else "%s (!)" % agentd_heartbeat table.add_row([ hypervisor.id, hypervisor.get_option_value('group'), hypervisor.get_option_value('label'), hypervisor.get_option_value('hypervisor_driver'), hypervisor.get_option_value('rating', default=0), agentd_heartbeat_value, ]) logger.info(table.get_string(reversesort=True))
def internal_send_task(self, task_class, target_node, **task_options): """ Run specific task remotely. Used to collect and send task with options to the remote node (worker). :param task_class: Task to run. :param target_node: Node that is used to run the task. :param task_options: Task options (context). :return: TaskTracker """ assert target_node node_queue = target_node.get_option_value('agentd_taskqueue', default=None) if not node_queue: raise ValueError("Missing agentd_taskqueue in node %s" % target_node.id) logger.info("Send task %s to queue %s for node %s" % (task_class, node_queue, target_node.id)) task_options['node'] = target_node.id task_options['driver'] = target_node.get_option_value('hypervisor_driver', default='unknown') return self.send_task(task_class, cmdb_node_id=target_node.id, queue=node_queue, options=task_options)
def internal_send_task(self, task_class, target_node, **task_options): """ Run specific task remotely. Used to collect and send task with options to the remote node (worker). :param task_class: Task to run. :param target_node: Node that is used to run the task. :param task_options: Task options (context). :return: TaskTracker """ assert target_node node_queue = target_node.get_option_value('agentd_taskqueue', default=None) if not node_queue: raise ValueError("Missing agentd_taskqueue in node %s" % target_node.id) logger.info("Send task %s to queue %s for node %s" % (task_class, node_queue, target_node.id)) task_options['node'] = target_node.id task_options['driver'] = target_node.get_option_value( 'hypervisor_driver', default='unknown') return self.send_task(task_class, cmdb_node_id=target_node.id, queue=node_queue, options=task_options)
def wait(self): if self.is_success: return self.return_data if self.is_failed: return self.error try: last_data = '' while True: ready, result_data = self.task.poll() if ready: self.success(result_data) return result_data # in progress self.progress() if last_data != result_data: last_data = result_data logger.info("progress: %s" % last_data) time.sleep(1) except Exception as ex: self.failed(ex.message) raise ex
def _handle_address_delete(self, *args, **options): ip_address = options['ip-address'].decode('utf-8') ip = GlobalIPManager.get_ip(ip_address) ip.delete() logger.info("%s deleted" % ip_address)
def wait(self): if self.is_success: return self.return_data if self.is_failed: return self.error try: last_data = '' while True: ready, result_data = self.task.poll() if ready: self.success(result_data) return result_data # in progress self.progress() if last_data != result_data: last_data = result_data logger.info("progress: %s" % last_data) time.sleep(1) except Exception, ex: self.failed(ex.message) raise ex
def process_servers(self, link_unresolved_to=None): """ Analyze server connections and automatically set server parent the same as linked switch parent, if server parent is None :return: """ for server in Server.active.all(): for server_port in ServerPort.active.filter(parent=server): switch_port = server_port.switch_port if not switch_port: continue switch = switch_port.typed_parent rack = switch.typed_parent if switch.is_mounted: server_parent_id = server.parent_id if server.parent and server.parent.id != rack.id: # clean parent unresolved group, to try to relink server.parent = None if not server.parent: logger.info( "Update server %s parent %s->%s" % (server, server_parent_id, switch.parent_id)) server.mount_to(rack) if link_unresolved_to and not server.parent: server.parent = link_unresolved_to server.save() logger.info("Server %s linked to unresolveds group: %s" % (server, server.parent_id))
def import_switch(self, switch_cmdb_id, l3switch): """ Import data from layer 3 switch :param l3switch: L3Switch """ source_switch = Resource.active.get(pk=switch_cmdb_id) for l3port in l3switch.ports: if l3port.is_local: self._add_local_port(source_switch, l3port) else: self._add_foreign_port(l3port) # Import IP addresses for connected_mac in l3port.macs: server, server_port = self._add_server_and_port(connected_mac) for ip_address in l3port.switch.get_mac_ips(unicode(connected_mac)): self._add_ip(ip_address, parent=server_port) # There is only one connection from the single server port. logger.info("Clean extra PortConnections") for server_port in ServerPort.active.filter(): port_connections = PortConnection.active.filter(linked_port_id=server_port.id).order_by('-last_seen') if len(port_connections) > 1: logger.warning("Server port %s have >1 PortConnection" % server_port) deleted_poconn = 0 for port_connection in port_connections[1:]: logger.warning(" remove PortConnection %s" % port_connection) port_connection.delete() deleted_poconn += 1 logger.warning(" deleted %s" % deleted_poconn)
def _add_ip(self, ip_address, parent=None): assert ip_address, "ip_address must be defined." added = False for ip_pool in self.available_ip_pools: if ip_pool.can_add(ip_address): added_ip, created = IPAddress.active.get_or_create(address__exact=ip_address, defaults=dict(address=ip_address, parent=ip_pool)) added_ip.use(cascade=True) if created: logger.info("Added %s to %s" % (ip_address, ip_pool)) else: added_ip.touch(cascade=True) if parent: if added_ip.parent and added_ip.parent.id != parent.id: logger.info("IP %s moved from %s to %s" % (ip_address, added_ip.typed_parent, parent)) added_ip.parent = parent added_ip.save() added = True break if not added: logger.error("%s is not added. IP pool is not available." % ip_address)
def _handle_switch(self, *args, **options): device_id = options['switch-id'] switch = Resource.objects.get(pk=device_id) query = dict(parent=switch) if options['port'] and len(options['port']) > 0: query['number__in'] = options['port'] port_link_data = [] for switch_port in SwitchPort.active.filter(**query).order_by('-name'): for port_connection in PortConnection.active.filter(parent=switch_port): linked_server_port = port_connection.linked_port if linked_server_port: if isinstance(linked_server_port, ServerPort): port_link_data.append([ switch_port.number, port_connection.link_speed_mbit, linked_server_port.parent.name, linked_server_port.typed_parent.label, port_connection.last_seen ]) else: logger.warning("PortConnection %s linked to missing ServerPort %s" % ( port_connection, linked_server_port.id)) continue if port_link_data: writer = ConsoleResourceWriter(port_link_data) writer.print_table(fields=['port_number', 'link_speed_mbit', 'server_name', 'label', 'last_seen'], sort_by='port_number') else: logger.info("not connected")
def _add_ip(self, ip_address, parent=None): assert ip_address, "ip_address must be defined." added = False for ip_pool in self.available_ip_pools: if ip_pool.can_add(ip_address): added_ip, created = IPAddress.active.get_or_create( address__exact=ip_address, defaults=dict(address=ip_address, parent=ip_pool)) added_ip.use(cascade=True) if created: logger.info("Added %s to %s" % (ip_address, ip_pool)) else: added_ip.touch(cascade=True) if parent: if added_ip.parent and added_ip.parent.id != parent.id: logger.info( "IP %s moved from %s to %s" % (ip_address, added_ip.typed_parent, parent)) added_ip.parent = parent added_ip.save() added = True break if not added: logger.error("%s is not added. IP pool is not available." % ip_address)
def print_path(self, fields=None): assert fields indent = 0 for resource in self.resources_iterable: columns = self._get_resource_data_row(resource, fields) logger.info("%s%s" % ("".ljust(indent * self.indent), " ".join([unicode(col_value) for col_value in columns]))) indent += 1
def _handle_daadd(self, *args, **options): cid = options['cid'] lid = options['lid'] ip = options['ip'] license = DirectAdminLicense.register_license(cid=cid, lid=int(lid), ip_address=ip) logger.info(license)
def _recursive_delete(self, node): assert node logger.info("Deleting %s" % node) for child in node.filter_childs(Resource): if child.id != node.id: self._recursive_delete(child) node.delete()
def process_virtual_servers(self, link_unresolved_to=None): """ Find and link unresolved virtual servers to special group :return: """ for vps_server in VirtualServer.active.all(): if link_unresolved_to and not vps_server.parent: vps_server.parent = link_unresolved_to vps_server.save() logger.info("Virtual server %s linked to unresolveds group: %s" % (vps_server, vps_server.parent_id))
def print_tree(self, fields=None): assert fields for resource in self.resources_iterable: indent = self.resources_iterable.level - 1 columns = [''] columns.extend(self._get_resource_data_row(resource, fields)) columns.append('') logger.info( "%s%s" % ("".ljust(indent * self.indent), " | ".join([unicode(col_value) for col_value in columns])))
def process_virtual_servers(self, link_unresolved_to=None): """ Find and link unresolved virtual servers to special group :return: """ for vps_server in VirtualServer.active.all(): if link_unresolved_to and not vps_server.parent: vps_server.parent = link_unresolved_to vps_server.save() logger.info( "Virtual server %s linked to unresolveds group: %s" % (vps_server, vps_server.parent_id))
def _handle_auto(self, *args, **options): # update via snmp query = dict() if options['switch_id']: query['pk'] = options['switch_id'] if not options['skip_arp']: for switch in (itertools.chain( GatewaySwitch.active.filter(**query), Switch.active.filter(**query))): logger.info("* Found switch: %s" % switch) if switch.has_option('snmp_provider_key'): snmp_provider_key = switch.get_option_value( 'snmp_provider_key') if snmp_provider_key in self.registered_providers: hostname = switch.get_option_value('snmp_host') community = switch.get_option_value('snmp_community') logger.info("host: %s" % hostname) provider = self.registered_providers[ snmp_provider_key]() provider.from_snmp(hostname, community) self.cmdb_importer.import_switch(switch.id, provider) else: logger.warning("Unknown SNMP data provider: %s" % snmp_provider_key) Resource.objects.rebuild() logger.info("Process hypervisors.") for switch in Switch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self.cmdb_importer.process_hypervisors(switch_port) for switch in GatewaySwitch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self.cmdb_importer.process_hypervisors(switch_port) logger.info("Process server mounts") link_unresolved_to_container, created = RegionResource.objects.get_or_create( name='Unresolved servers') self.cmdb_importer.process_servers( link_unresolved_to=link_unresolved_to_container) logger.info("Process virtual server mounts") link_unresolved_to_container, created = RegionResource.objects.get_or_create( name='Unresolved VPS') self.cmdb_importer.process_virtual_servers( link_unresolved_to=link_unresolved_to_container) Resource.objects.rebuild()
def _handle_rack_unit(self, *args, **options): server = self._get_server_by_ip_or_id(options['ip-or-id']) assert RackMountable.is_rack_mountable(server) for option_name in options: if option_name.startswith('set_'): prop_name = option_name[4:] if options[option_name]: server.set_option(prop_name, options[option_name]) self._dump_server(server) logger.info("")
def _handle_analyze(self, *args, **options): # hypervisors dry_run = options['dry_run'] if options['merge_servers']: logger.info( "Check if ports from the different servers are from the same server." ) for server_port1 in ServerPort.active.all().order_by('id'): mac1 = int(server_port1.mac, 16) for server_port2 in ServerPort.active.all().order_by('id'): if server_port1.id == server_port2.id or server_port1.parent_id == server_port2.parent_id: continue mac2 = int(server_port2.mac, 16) if abs(mac1 - mac2) <= 5: logger.info( "Check if servers are the same: %s and %s" % (server_port1.device, server_port2.device)) logger.info(" %s and %s" % (server_port1, server_port2)) elif options['hypervisors']: logger.info("Search for hypervisors in CMDB...") for switch in Switch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self._guess_hypervisor(switch_port, dry_run) for switch in GatewaySwitch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self._guess_hypervisor(switch_port, dry_run)
def dump_item(resource): assert resource # dump model fields for field in resource.__class__._meta.fields: field_value = getattr(resource, field.name) if isinstance(field, DateTimeField): field_value = timezone.localtime(field_value) logger.info("%s = %s" % (field.name, field_value)) # dump resource options for option in resource.get_options(): logger.info("%s" % option)
def _handle_analyze(self, *args, **options): # hypervisors dry_run = options['dry_run'] if options['merge_servers']: logger.info("Check if ports from the different servers are from the same server.") for server_port1 in ServerPort.active.all().order_by('id'): mac1 = int(server_port1.mac, 16) for server_port2 in ServerPort.active.all().order_by('id'): if server_port1.id == server_port2.id or server_port1.parent_id == server_port2.parent_id: continue mac2 = int(server_port2.mac, 16) if abs(mac1 - mac2) <= 5: logger.info( "Check if servers are the same: %s and %s" % (server_port1.device, server_port2.device)) logger.info(" %s and %s" % (server_port1, server_port2)) elif options['hypervisors']: logger.info("Search for hypervisors in CMDB...") for switch in Switch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self._guess_hypervisor(switch_port, dry_run) for switch in GatewaySwitch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self._guess_hypervisor(switch_port, dry_run)
def _handle_vps(self, *args, **options): tracker = None vmid = int(options['vmid']) user_name = options['user'] node_id = int(options['node']) if options['create']: ram = int(options['ram']) hdd = int(options['hdd']) cpu = int(options['cpu']) template = options['template'] ip_addr = options['ip'] tracker = self.backend.create_vps( node=node_id, vmid=vmid, template=template, user=user_name, ram=ram, hdd=hdd, cpu=cpu, ip=ip_addr) elif options['stop']: hyper_driver = options['driver'] tracker = self.backend.stop_vps( node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) elif options['start']: hyper_driver = options['driver'] tracker = self.backend.start_vps( node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) if tracker: logger.info("Attached to the task tracker %s. Ctrl-C to exit." % tracker.id) try: result_data = tracker.wait() logger.info(result_data) except Exception, ex: logger.error(ex.message)
def _handle_vps(self, *args, **options): tracker = None vmid = int(options['vmid']) user_name = options['user'] node_id = int(options['node']) if options['create']: ram = int(options['ram']) hdd = int(options['hdd']) cpu = int(options['cpu']) template = options['template'] ip_addr = options['ip'] tracker = self.backend.create_vps(node=node_id, vmid=vmid, template=template, user=user_name, ram=ram, hdd=hdd, cpu=cpu, ip=ip_addr) elif options['stop']: hyper_driver = options['driver'] tracker = self.backend.stop_vps(node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) elif options['start']: hyper_driver = options['driver'] tracker = self.backend.start_vps(node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) if tracker: logger.info("Attached to the task tracker %s. Ctrl-C to exit." % tracker.id) try: result_data = tracker.wait() logger.info(result_data) except Exception, ex: logger.error(ex.message)
def _handle_res_list(self, *args, **options): query = self._parse_reminder_arg(options['filter']) limit = options['limit'] offset = (options['page'] - 1) * limit events_set = HistoryEvent.objects.filter(**query) if options['from_date']: events_begin_date = timezone.datetime.strptime( options['from_date'], '%d.%m.%Y %H:%M') events_set = events_set.filter(created_at__gte=events_begin_date) if options['order']: fields = options['order'].split(',') events_set = events_set.order_by(*fields) if limit > 0: events_set = events_set[offset:limit] table = PrettyTable([ 'id', 'created_at', 'type', 'resource_id', 'resource__type', 'field_name', 'field_old_value', 'field_new_value' ]) table.padding_width = 1 table.align['id'] = 'r' table.align['resource_id'] = 'r' table.align['resource_type'] = 'l' table.align['field_name'] = 'l' table.align['field_old_value'] = 'l' table.align['field_new_value'] = 'l' for event in events_set: try: table.add_row([ event.id, timezone.localtime( event.created_at).strftime('%d.%m.%Y %H:%M'), event.type, event.resource.id, event.resource.type, event.field_name, event.field_old_value, event.field_new_value ]) except ObjectDoesNotExist: logger.debug("Removing event %s with missing resource %s" % (event.id, event.resource_id)) event.delete() logger.info(unicode(table))
def create(self, request, *args, **kwargs): indata = CreateVpsSerializer(data=request.data) if not indata.is_valid(): return Response(indata.errors, status=status.HTTP_400_BAD_REQUEST) logger.info("Creating VPS: %s" % indata.data) # hardcoded backend cloud = CmdbCloudConfig() backend = ProxMoxJBONServiceBackend(cloud) tracker = backend.create_vps(**indata.data) serializer = CloudTaskTrackerSerializer(tracker) return Response(serializer.data)
def stop(self, request, pk=None): indata = StartStopSerializer(data=request.data) if not indata.is_valid(): return Response(indata.errors, status=status.HTTP_400_BAD_REQUEST) logger.info("Stopping VPS: %s" % indata.data) # hardcoded backend cloud = CmdbCloudConfig() backend = ProxMoxJBONServiceBackend(cloud) tracker = backend.stop_vps(**indata.data) serializer = CloudTaskTrackerSerializer(tracker) return Response(serializer.data)
def execute(self): self.context['options']['tracker_id'] = self.tracker.id logger.info("Executing task %s with tracker id %s" % (self.task_name, self.tracker.id)) async_task = self.REMOTE_WORKER.send_task(self.task_name, queue=self.queue, routing_key=self.queue, kwargs={'options': self.context['options']}) logger.info(" got Celery ID %s" % async_task.id) tracker_context = self.tracker.context tracker_context['celery_task_id'] = async_task.id self.tracker.context = tracker_context self.tracker.save()
def print_table(self, fields=None, sort_by=None): assert fields table = PrettyTable(fields) table.padding_width = 1 table.sortby = sort_by for afield in table.align: table.align[afield] = 'l' for resource in self.resources_iterable: if isinstance(resource, models.Model): table.add_row(self._get_resource_data_row(resource, fields)) else: table.add_row(resource) logger.info(table.get_string())
def _add_local_port(self, source_switch, l3port): """ Add port and server from the local switch. Add switch port, server port, server and connection between switch port and server port. :param source_switch: Resource switch, which port is being added. :param l3port: L3 port of the switch :return: None """ assert source_switch assert l3port switch_local_port, created = SwitchPort.active.get_or_create( number=l3port.number, parent=source_switch, defaults=dict(name=l3port.number, status=Resource.STATUS_INUSE) ) if created: logger.info("Added switch port: %s:%s (cmdbid:%s)" % ( source_switch.id, l3port.number, switch_local_port.id)) elif switch_local_port.uplink: logger.info("Port %s marked as UPLINK, purge port connections" % switch_local_port) PortConnection.active.filter(parent=switch_local_port).delete() return switch_local_port if len(l3port.macs) > 0: switch_local_port.use() logger.info("Switch port %s marked used" % switch_local_port) else: switch_local_port.free() logger.info("Switch port %s marked free" % switch_local_port) for connected_mac in l3port.macs: server, server_port = self._add_server_and_port(connected_mac) port_connection, created = PortConnection.active.get_or_create( parent=switch_local_port, linked_port_id=server_port.id ) if created: logger.info("Added %s" % port_connection) else: port_connection.touch() port_connection.use() return switch_local_port
def _dump_trackers(self, status, limit): trackers = self.task_tracker.find(status=status).order_by('-id')[:limit] table = PrettyTable( ['id', 'task_class', 'status', 'created', 'updated', 'time-delta']) table.padding_width = 1 for tracker in trackers: table.add_row([tracker.id, tracker.task_class, tracker.status, timezone.localtime(tracker.created_at).strftime('%d.%m.%Y %H:%M'), timezone.localtime(tracker.updated_at).strftime('%d.%m.%Y %H:%M'), (tracker.updated_at - tracker.created_at) if tracker.updated_at else '0' ]) logger.info(unicode(table))
def _add_local_port(self, source_switch, l3port): """ Add port and server from the local switch. Add switch port, server port, server and connection between switch port and server port. :param source_switch: Resource switch, which port is being added. :param l3port: L3 port of the switch :return: None """ assert source_switch assert l3port switch_local_port, created = SwitchPort.active.get_or_create( number=l3port.number, parent=source_switch, defaults=dict(name=l3port.number, status=Resource.STATUS_INUSE)) if created: logger.info( "Added switch port: %s:%s (cmdbid:%s)" % (source_switch.id, l3port.number, switch_local_port.id)) elif switch_local_port.uplink: logger.info("Port %s marked as UPLINK, purge port connections" % switch_local_port) PortConnection.active.filter(parent=switch_local_port).delete() return switch_local_port if len(l3port.macs) > 0: switch_local_port.use() logger.info("Switch port %s marked used" % switch_local_port) else: switch_local_port.free() logger.info("Switch port %s marked free" % switch_local_port) for connected_mac in l3port.macs: server, server_port = self._add_server_and_port(connected_mac) port_connection, created = PortConnection.active.get_or_create( parent=switch_local_port, linked_port_id=server_port.id) if created: logger.info("Added %s" % port_connection) else: port_connection.touch() port_connection.use() return switch_local_port
def execute(self): self.context['options']['tracker_id'] = self.tracker.id logger.info("Executing task %s with tracker id %s" % (self.task_name, self.tracker.id)) async_task = self.REMOTE_WORKER.send_task( self.task_name, queue=self.queue, routing_key=self.queue, kwargs={'options': self.context['options']}) logger.info(" got Celery ID %s" % async_task.id) tracker_context = self.tracker.context tracker_context['celery_task_id'] = async_task.id self.tracker.context = tracker_context self.tracker.save()
def get(self, request, format=None, *args, **kwargs): ip_pools = request.query_params.getlist('pool', None) ip_count = int(request.query_params.get('count', 1)) if not ip_pools: raise ParseError() logger.debug(request.query_params) logger.info("Getting %s new ip addresses from pools: %s" % (ip_count, ip_pools)) rented_ips = IPAddressPool.lease_ips(ip_pools, ip_count) serializer = self.get_serializer(rented_ips, many=True) response = {'count': len(rented_ips), 'results': serializer.data} return Response(response)
def poll(self): if self.is_success: return self.return_data if self.is_failed: return self.error try: ready, result_data = self.task.poll() if ready: self.success(result_data) else: logger.info("progress: %s" % result_data) self.progress() return result_data except Exception as ex: self.failed(ex.message) raise ex
def _handle_auto(self, *args, **options): # update via snmp query = dict(type__in=[GatewaySwitch.__name__, Switch.__name__]) if options['switch_id']: query['pk'] = options['switch_id'] if not options['skip_arp']: for switch in Resource.active.filter(**query): logger.info("* Found switch: %s" % switch) if switch.has_option('snmp_provider_key'): snmp_provider_key = switch.get_option_value('snmp_provider_key') if snmp_provider_key in self.registered_providers: hostname = switch.get_option_value('snmp_host') community = switch.get_option_value('snmp_community') logger.info("host: %s" % hostname) provider = self.registered_providers[snmp_provider_key]() provider.from_snmp(hostname, community) self.cmdb_importer.import_switch(switch.id, provider) else: logger.warning("Unknown SNMP data provider: %s" % snmp_provider_key) Resource.objects.rebuild() logger.info("Process hypervisors.") for switch in Switch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self.cmdb_importer.process_hypervisors(switch_port) for switch in GatewaySwitch.active.all(): for switch_port in SwitchPort.active.filter(parent=switch): self.cmdb_importer.process_hypervisors(switch_port) logger.info("Process server mounts") link_unresolved_to_container, created = RegionResource.objects.get_or_create(name='Unresolved servers') self.cmdb_importer.process_servers(link_unresolved_to=link_unresolved_to_container) logger.info("Process virtual server mounts") link_unresolved_to_container, created = RegionResource.objects.get_or_create(name='Unresolved VPS') self.cmdb_importer.process_virtual_servers(link_unresolved_to=link_unresolved_to_container) Resource.objects.rebuild()
def poll(self): if self.is_success: return self.return_data if self.is_failed: return self.error try: ready, result_data = self.task.poll() if ready: self.success(result_data) else: logger.info("progress: %s" % result_data) self.progress() return result_data except Exception, ex: self.failed(ex.message) raise ex
def _add_ip(self, ip_address, parent=None): assert ip_address, "ip_address must be defined." logger.info("Processing IP %s" % ip_address) try: added_ip = GlobalIPManager.get_ip(ip_address) added_ip.use() added_ip.touch() if parent: if added_ip.parent and added_ip.parent.id != parent.id: logger.info("IP %s moved from %s to %s" % (ip_address, added_ip.parent, parent)) added_ip.parent = parent added_ip.save() except Exception as ex: logger.exception(ex.message)
def _handle_res_list(self, *args, **options): query = self._parse_reminder_arg(options['filter']) limit = options['limit'] offset = (options['page'] - 1) * limit events_set = HistoryEvent.objects.filter(**query) if options['from_date']: events_begin_date = timezone.datetime.strptime(options['from_date'], '%d.%m.%Y %H:%M') events_set = events_set.filter(created_at__gte=events_begin_date) if options['order']: fields = options['order'].split(',') events_set = events_set.order_by(*fields) if limit > 0: events_set = events_set[offset:limit] table = PrettyTable( ['id', 'created_at', 'type', 'resource_id', 'resource__type', 'field_name', 'field_old_value', 'field_new_value']) table.padding_width = 1 table.align['id'] = 'r' table.align['resource_id'] = 'r' table.align['resource_type'] = 'l' table.align['field_name'] = 'l' table.align['field_old_value'] = 'l' table.align['field_new_value'] = 'l' for event in events_set: try: table.add_row([event.id, timezone.localtime(event.created_at).strftime('%d.%m.%Y %H:%M'), event.type, event.resource.id, event.resource.type, event.field_name, event.field_old_value, event.field_new_value]) except ObjectDoesNotExist: logger.debug("Removing event %s with missing resource %s" % (event.id, event.resource_id)) event.delete() logger.info(unicode(table))
def get(self, request, format=None, *args, **kwargs): ip_pools = request.query_params.getlist('pool', None) ip_count = int(request.query_params.get('count', 1)) if not ip_pools: raise ParseError() logger.debug(request.query_params) logger.info("Getting %s new ip addresses from pools: %s" % (ip_count, ip_pools)) rented_ips = IPAddressPool.lease_ips(ip_pools, ip_count) serializer = self.get_serializer(rented_ips, many=True) response = { 'count': len(rented_ips), 'results': serializer.data } return Response(response)
def _dump_trackers(self, status, limit): trackers = self.task_tracker.find( status=status).order_by('-id')[:limit] table = PrettyTable( ['id', 'task_class', 'status', 'created', 'updated', 'time-delta']) table.padding_width = 1 for tracker in trackers: table.add_row([ tracker.id, tracker.task_class, tracker.status, timezone.localtime( tracker.created_at).strftime('%d.%m.%Y %H:%M'), timezone.localtime( tracker.updated_at).strftime('%d.%m.%Y %H:%M'), (tracker.updated_at - tracker.created_at) if tracker.updated_at else '0' ]) logger.info(unicode(table))
def _handle_switch(self, *args, **options): device_id = options['switch-id'] switch = Resource.objects.get(pk=device_id) query = dict(parent=switch) if options['port'] and len(options['port']) > 0: query['number__in'] = options['port'] port_link_data = [] for switch_port in SwitchPort.active.filter(**query).order_by('-name'): for port_connection in PortConnection.active.filter( parent=switch_port): linked_server_port = port_connection.linked_port if linked_server_port: if isinstance(linked_server_port, ServerPort): port_link_data.append([ switch_port.number, port_connection.link_speed_mbit, linked_server_port.parent.name, linked_server_port.typed_parent.label, port_connection.last_seen ]) else: logger.warning( "PortConnection %s linked to missing ServerPort %s" % (port_connection, linked_server_port.id)) continue if port_link_data: writer = ConsoleResourceWriter(port_link_data) writer.print_table(fields=[ 'port_number', 'link_speed_mbit', 'server_name', 'label', 'last_seen' ], sort_by='port_number') else: logger.info("not connected")
def _handle_trackers(self, *args, **options): if options['cancel']: tracker_id = int(options['cancel']) tracker = self.task_tracker.get(tracker_id) tracker.failed('Cancelled') elif options['attach']: tracker_id = int(options['attach']) tracker = self.task_tracker.get(tracker_id) logger.info("Attached to task tracker %s. Ctrl-C to detach." % tracker_id) print tracker.wait() else: limit = int(options['limit']) view_status = TaskTrackerStatus.STATUS_NEW if options['failed']: view_status = TaskTrackerStatus.STATUS_FAILED elif options['success']: view_status = TaskTrackerStatus.STATUS_SUCCESS elif options['progress']: view_status = TaskTrackerStatus.STATUS_PROGRESS self._dump_trackers(view_status, limit)
def _handle_daimport(self, *args, **options): cid = options['cid'] da_pool = IPAddressPoolFactory.from_name(name="Imported DirectAdmin") with open(options['tsv-file']) as tsv_file: for line in tsv_file: line = line.strip() if line == '': continue (lid_id, ipaddr, lic_status) = line.decode('utf-8').split(None, 3) logger.info("> Processing: %s %s %s" % (lid_id, ipaddr, lic_status)) try: ip_obj = GlobalIPManager.get_ip(ipaddr) except: # TODO: refactor IP creation ip_obj, created = IPAddressGeneric.objects.update_or_create( address=ipaddr, pool=da_pool) if lic_status == Resource.STATUS_FREE: if ip_obj.status == Resource.STATUS_FREE: license = DirectAdminLicense.register_license( pool=da_pool, cid=cid, lid=int(lid_id), ip_address=ip_obj) license.free() logger.info("LIC %s (%s). Added as FREE" % (lid_id, ipaddr)) else: logger.warning( "(!!) LIC %s (%s). You must change IP." % (lid_id, ipaddr)) else: if ip_obj.status == Resource.STATUS_FREE: license = DirectAdminLicense.register_license( pool=da_pool, cid=cid, lid=int(lid_id), ip_address=ip_obj) license.free() logger.warning( "(!) LIC %s (%s). Added as FREE (changed License status to FREE)" % (lid_id, ipaddr)) else: license = DirectAdminLicense.register_license( pool=da_pool, cid=cid, lid=int(lid_id), ip_address=ip_obj) license.use() logger.info("LIC %s (%s). Added as USED." % (lid_id, ipaddr))
def import_switch(self, switch_cmdb_id, l3switch): """ Import data from layer 3 switch :param l3switch: L3Switch """ source_switch = Resource.active.get(pk=switch_cmdb_id) for l3port in l3switch.ports: if l3port.is_local: self._add_local_port(source_switch, l3port) else: self._add_foreign_port(l3port) # Import IP addresses for connected_mac in l3port.macs: server, server_port = self._add_server_and_port(connected_mac) for ip_address in l3port.switch.get_mac_ips( unicode(connected_mac)): self._add_ip(ip_address, parent=server_port) # There is only one connection from the single server port. logger.info("Clean extra PortConnections") for server_port in ServerPort.active.filter(): port_connections = PortConnection.active.filter( linked_port_id=server_port.id).order_by('-last_seen') if len(port_connections) > 1: logger.warning("Server port %s have >1 PortConnection" % server_port) deleted_poconn = 0 for port_connection in port_connections[1:]: logger.warning(" remove PortConnection %s" % port_connection) port_connection.delete() deleted_poconn += 1 logger.warning(" deleted %s" % deleted_poconn)
def _handle_hypervisors(self, *args, **options): if options['list']: table = PrettyTable(['node', 'group', 'label', 'hypervisor_driver', 'rating', 'agentd_heartbeat']) table.padding_width = 1 table.sortby = 'rating' for hypervisor in self.cloud.get_hypervisors(): hyper_driver = hypervisor.get_option_value('hypervisor_driver', default=None) if hyper_driver: current_time_stamp = int(time.time()) agentd_heartbeat = hypervisor.get_option_value('agentd_heartbeat', default=0) agentd_heartbeat_value = agentd_heartbeat if (current_time_stamp - int( agentd_heartbeat)) < 90 else "%s (!)" % agentd_heartbeat table.add_row([hypervisor.id, hypervisor.get_option_value('group'), hypervisor.get_option_value('label'), hypervisor.get_option_value('hypervisor_driver'), hypervisor.get_option_value('rating', default=0), agentd_heartbeat_value, ]) logger.info(table.get_string(reversesort=True))
def _add_server_and_port(self, connected_mac): """ Add or get server with port. Selecting bare metal or Virtual based on Vendor code of MAC. :param connected_mac: :return: """ assert connected_mac logger.debug("Found mac: %s" % connected_mac) server_port, created = Resource.active.get_or_create( mac=connected_mac.interface, type__in=[ServerPort.__name__, VirtualServerPort.__name__], defaults=dict( mac=connected_mac.interface, type="assets.%s" % (ServerPort.__name__ if connected_mac.vendor else VirtualServerPort.__name__), status=Resource.STATUS_INUSE ) ) if created: logger.info("Added server port %s (%s)" % (server_port.id, connected_mac.interface)) if server_port.__class__ == VirtualServerPort: server = VirtualServer.objects.create(label='VPS') logger.info("Added VPS %s (%s)" % (server, connected_mac)) else: server = Server.objects.create(label=connected_mac.vendor, vendor=connected_mac.vendor) logger.info("Added metal server %s (%s)" % (server, connected_mac)) # set parent for the port server_port.parent = server server_port.save() else: server_port.use() server_port.touch() server_port.parent.touch() return server_port.typed_parent, server_port
def _handle_household(self, *args, **options): last_seen_31days = timezone.now() - datetime.timedelta(days=31) last_seen_15days = timezone.now() - datetime.timedelta(days=15) # Clean IP with parent=ip pool (free) with last_seen older that 31 days. It means that IP is not # used and can be released. logger.info("Clean missing IP addresses: %s" % last_seen_31days) for free_ip_pool in Resource.active.filter(status=Resource.STATUS_FREE, type__in=IPAddressPool.ip_pool_types): logger.info(" pool %s" % free_ip_pool) for ip in IPAddress.active.filter( status=Resource.STATUS_INUSE, last_seen__lt=last_seen_31days, ipman_pool_id=free_ip_pool.id, version=4): logger.warning(" used ip %s from the FREE IP pool is not seen for 31 days. Free it." % ip) ip.free(cascade=True) for ip in IPAddress.active.filter( status=Resource.STATUS_LOCKED, last_seen__lt=last_seen_15days, ipman_pool_id=free_ip_pool.id, version=4): logger.warning(" locked ip %s from the FREE IP pool is not seen for 15 days. Free it." % ip) ip.free(cascade=True) logger.info("Clean missing virtual servers: %s" % last_seen_31days) for vm in VirtualServer.active.filter(last_seen__lt=last_seen_31days): logger.warning(" server %s not seen for 31 days. Removing..." % vm) for vm_child in vm: logger.info(" remove %s" % vm_child) vm_child.delete() vm.delete() logger.info("Clean unresolved PortConnections...") removed = 0 for connection in PortConnection.active.all(): if not connection.linked_port: connection.delete() removed += 1 logger.info(" removed: %s" % removed) Resource.objects.rebuild()
def process_hypervisors(self, switch_port): """ Searching for the switch ports, where one physical and many VPS servers. If hypervisor found on port, then set its role and link VPS servers. :param switch_port: :param dry_run: If True, then role is set for guessed hypervisor (when 1 physical + many VMs). :return: """ assert switch_port assert isinstance(switch_port, SwitchPort) result, pysical_srv, virtual_srv = CmdbAnalyzer.guess_hypervisor(switch_port) if result: logger.info("Found hypervisor: %s" % pysical_srv) pysical_srv.set_option('role', 'hypervisor') for virtual_server in virtual_srv: virtual_server.parent = pysical_srv virtual_server.save() logger.info(" virtual server %s is auto-linked to it" % virtual_server) else: logger.info("Switch port: %s" % switch_port) logger.info(" physicals: %s, virtuals: %s." % (len(pysical_srv), len(virtual_srv))) logger.info("Physical servers:") for server in pysical_srv: logger.info(unicode(server)) logger.info("Virtual servers:") for vserver in virtual_srv: logger.info(unicode(vserver))
def _handle_metrics(self, *args, **options): if options['auto_poll']: for linked_metric in ZabbixMetric.objects.filter(cmdb_node_option__resource__status=Resource.STATUS_INUSE): cmdb_node = linked_metric.cmdb_node_option.resource cmdb_attr = linked_metric.cmdb_node_option.name metric_id = linked_metric.zbx_metric_id logger.info("Auto populate %s.%s from Zabbix item %s." % (cmdb_node.id, cmdb_attr, metric_id)) linked_metric = self._populate_attribute(cmdb_node, cmdb_attr, metric_id) logger.info(linked_metric) elif options['list']: logger.info("Linked metrics:") for linked_metric in ZabbixMetric.objects.filter(): logger.info(linked_metric) elif options['delete']: assert options['zbx_item'] metric_id = int(options['zbx_item']) ZabbixMetric.objects.filter(zbx_metric_id=metric_id).delete() elif options['populate']: assert options['cmdb_node'] assert options['zbx_item'] metric_id = int(options['zbx_item']) cmdb_node_id = int(options['cmdb_node']) cmdb_attr = options['populate'] cmdb_node = Resource.active.get(pk=cmdb_node_id) logger.info("Populate %s.%s from zabbix item %s" % (cmdb_node.id, cmdb_attr, metric_id)) linked_metric = self._populate_attribute(cmdb_node, cmdb_attr, metric_id) logger.info(linked_metric)