def storages(request, hostname): """ List of node storages. """ context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname) context['nodes'] = Node.all() context['zpools'] = node.zpools.keys() context['zpools_missing'] = [] context['storages'] = node.nodestorage_set.select_related('storage', 'storage__owner').order_by('zpool')\ .annotate(Count('dc', distinct=True)) context['form'] = NodeStorageForm(request, node, None, initial={ 'node': node.hostname, 'owner': request.user.username, 'access': Storage.PUBLIC, 'size_coef': Storage.SIZE_COEF, }) for ns in context['storages']: try: context['zpools'].remove(ns.zpool) except ValueError: context['zpools_missing'].append( ns.zpool) # zpool vanished from node return render(request, 'gui/node/storages.html', context)
def images_zpool(request, hostname, zpool): """ List of images on node storages. """ context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname) context['nodes'] = Node.all() try: context['ns'] = ns = NodeStorage.objects.select_related('storage').get(node=node, zpool=zpool) except NodeStorage.DoesNotExist: raise Http404 context['storages'] = node.nodestorage_set.select_related('storage').all().order_by('zpool').\ annotate(imgs=Count('images__uuid')) context['images'] = ns.images.select_related('owner', 'dc_bound').all().order_by('name').annotate(dcs=Count('dc')) image_vms = {} for vm in node.vm_set.select_related('dc').all().order_by('hostname'): for img_uuid in vm.get_image_uuids(zpool=zpool): image_vms.setdefault(img_uuid, []).append({'hostname': vm.hostname, 'dc': vm.dc.name}) context['image_vms'] = image_vms context['form'] = NodeStorageImageForm(ns, initial={'node': hostname, 'zpool': zpool}) context['last_img'] = request.GET.get('last_img', None) return render(request, 'gui/node/images.html', context)
def details(request, hostname): """ Compute node details. """ dc1_settings = get_dc1_settings(request) context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname, sr=('owner', )) context['nodes'] = Node.all() context['node_dcs'] = node.dc.all().values_list('alias', flat=True) context['node_vms'] = node.vm_set.count() context['node_real_vms'] = node.vm_set.filter(slavevm__isnull=True).count() context['form'] = NodeForm(request, node, initial=node.web_data) context[ 'mon_sla_enabled'] = settings.MON_ZABBIX_ENABLED and dc1_settings.MON_ZABBIX_NODE_SLA if node.is_backup: context['node_backups'] = node.backup_set.count() else: context['node_backups'] = 0 view_node_details.send(sender='gui.node.views.details', request=request, context=context) return render(request, 'gui/node/details.html', context)
def images(request, hostname): """ Redirect to list of images on default node storage. """ node = get_node(request, hostname) nss = node.nodestorage_set.all().values_list('zpool', flat=True).order_by('zpool') if nss: nz = node.zpool if nz and nz in nss: zpool_redirect = nz elif settings.VMS_STORAGE_DEFAULT in nss: zpool_redirect = settings.VMS_STORAGE_DEFAULT else: zpool_redirect = nss[0] return redirect('node_images_zpool', hostname, zpool_redirect) context = collect_view_data(request, 'node_list') context['nodes'] = Node.all() context['node'] = node return render(request, 'gui/node/images_disabled.html', context)
def backup_definitions(request, hostname): """ List of server backup definitions targeted onto this node. """ context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname) context['nodes'] = Node.all() context['bkpdefs'] = get_node_bkpdefs(node) return render(request, 'gui/node/backup_definitions.html', context)
def node_vm_snapshot_sync_all(): """ This is a periodic beat task responsible for syncing node snapshot sizes of all VMs on a compute node. """ for node in Node.all(): if node.is_online() and node.is_compute: try: NodeVmSnapshotList.sync(node) except Exception as exc: logger.exception(exc)
def node_status_all(): """ This is a special periodic task, run by Danube Cloud mgmt daemon (que.bootsteps.MgmtDaemon) every minute. It is responsible for running checks on an unreachable compute node. """ for node in Node.all(): if node.is_unreachable(): logger.info('Checking status of unreachable node %s', node) node_worker_status_update.call(node.hostname, queue=Q_FAST, status='unknown') node_check.send('node_status_all', node=node) # Signal!
def node_list(request): """ List of all compute nodes. """ context = collect_view_data(request, 'node_list') context['nodes'] = Node.all() context['node_list'] = get_nodes_extended(request) context['status_form'] = NodeStatusForm(request, None) view_node_list.send(sender='gui.node.views.list', request=request, context=context) return render(request, 'gui/node/list.html', context)
def maintenance(request): """ System maintenance. """ context = collect_view_data(request, 'system_maintenance') context['system'] = call_api_view(request, 'GET', system_version).data.get('result', {}) context['node_list'] = Node.all() context['current_view'] = 'maintenance' context['status_form'] = NodeStatusForm(request, None) context['update_form'] = UpdateForm(request, None) context['node_update_form'] = NodeUpdateForm(request, None, prefix='node') return render(request, 'gui/system/maintenance.html', context)
def node_img_sources_sync(task_id, sender, **kwargs): """ Task for updating imgadm sources on one or every compute node. Called by dc_settings_changed signal. """ new_img_sources = ImageVm().sources for node in Node.all(): # We update imgadm sources only on online nodes # But we will also run run_node_img_sources_sync() whenever node status is changed to online because # the node_startup handler runs the sysinfo update task if not node.is_online(): logger.warn( 'Excluding node %s from updating imgadm sources because it is not in online state', node) continue run_node_img_sources_sync(node, new_img_sources=new_img_sources)
def vms(request, hostname, zpool=None): """ List of servers defined on this compute node, optionally filtered by storage (#952). """ context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname) context['nodes'] = Node.all() context['node_online'] = node.is_online() context['can_edit'] = True context['storages'] = nss = node.nodestorage_set.select_related( 'storage').all().order_by('zpool') all_vms = node.vm_set.select_related( 'owner', 'dc', 'slavevm', 'slavevm__master_vm').order_by('hostname') context['vms_all_count'] = all_vms.count() _vms = [] if zpool and zpool not in {ns.zpool for ns in nss}: zpool = None for ns in nss: ns.vms_count = 0 for vm in all_vms: vm_zpools = vm.get_used_disk_pools() vm.resources = vm.get_cpu_ram_disk(zpool=zpool) for ns in nss: if ns.zpool in vm_zpools: ns.vms_count += 1 if zpool and zpool == ns.zpool: _vms.append(vm) if zpool: context['vms'] = _vms else: context['vms'] = all_vms context['zpool'] = zpool return render(request, 'gui/node/vms.html', context)
def mon_sync_all(task_id, dc_id, clear_cache=True, sync_groups=True, sync_nodes=True, sync_vms=True, **kwargs): """ Clear Zabbix cache and sync everything in Zabbix. Related to a specific DC. Triggered by dc_settings_changed signal. """ dc = Dc.objects.get_by_id(int(dc_id)) if clear_cache: logger.info('Clearing zabbix cache in DC %s', dc) mon_clear_zabbix_cache(dc) get_monitoring(dc) # Cache new Zabbix instance for tasks below if sync_groups: logger.info( 'Running monitoring group synchronization for all user groups in DC %s', dc) mon_all_groups_sync.call(task_id, dc_name=dc.name) if sync_nodes: logger.info( 'Running monitoring host synchronization for all compute nodes') for node in Node.all(): mon_node_sync.call(task_id, node_uuid=node.uuid) if sync_vms: logger.info( 'Running monitoring host synchronization for all VMs in DC %s', dc) for vm_uuid in dc.vm_set.values_list('uuid', flat=True): mon_vm_sync.call(task_id, vm_uuid=vm_uuid)
def monitoring(request, hostname, graph_type='cpu'): """ Compute node related monitoring. """ dc1_settings = get_dc1_settings(request) context = collect_view_data(request, 'node_list') context['node'] = node = get_node(request, hostname) context['nodes'] = Node.all() if not dc1_settings.MON_ZABBIX_NODE_SYNC: return render(request, 'gui/node/monitoring_disabled.html', context) from api.mon.node.graphs import GRAPH_ITEMS context['graph_items'] = GRAPH_ITEMS context['obj_lifetime'] = node.lifetime context[ 'obj_operational'] = node.status != Node.STATUS_AVAILABLE_MONITORING and ( not graph_type.startswith('vm-') or node.vm_set.exclude( status=Vm.NOTCREATED).filter(slavevm__isnull=True).exists()) if graph_type == 'memory': graphs = (Graph('mem-usage'), Graph('swap-usage')) elif graph_type == 'network': context['node_nics'] = node_nics = node.used_nics.keys() graphs = list( chain(*[(Graph('net-bandwidth', nic=i), Graph('net-packets', nic=i)) for i in node_nics])) elif graph_type == 'storage': context['zpools'] = node_zpools = node.zpools graphs = list( chain(*[(Graph('storage-throughput', zpool=i), Graph('storage-io', zpool=i), Graph('storage-space', zpool=i)) for i in node_zpools])) elif graph_type == 'vm-cpu': graphs = (Graph('vm-cpu-usage'), ) elif graph_type == 'vm-memory': graphs = (Graph('vm-mem-usage'), ) elif graph_type == 'vm-disk-throughput': graphs = ( Graph('vm-disk-logical-throughput-reads'), Graph('vm-disk-logical-throughput-writes'), Graph('vm-disk-physical-throughput-reads'), Graph('vm-disk-physical-throughput-writes'), ) elif graph_type == 'vm-disk-io': graphs = ( Graph('vm-disk-logical-io-reads'), Graph('vm-disk-logical-io-writes'), Graph('vm-disk-physical-io-reads'), Graph('vm-disk-physical-io-writes'), ) else: graph_type = 'cpu' graphs = ( Graph('cpu-usage'), Graph('cpu-jumps'), Graph('cpu-load'), ) context['graphs'] = graphs context['graph_type'] = graph_type return render(request, 'gui/node/monitoring_%s.html' % graph_type, context)