def put(self): request = self.request domain = self.domain ser = DomainSerializer(request, domain, data=self.data, partial=True) # validate the main Domain form before processing TSIG params if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) tsig_data = {} # in case there will be more tsig_* parameters (but for now, there's only tsig_keys) for key, val in self.data.items(): # remove tsig_* parameters from request data because they belong to other validator if key.startswith('tsig_'): self.data.pop(key) tsig_data[key[5:]] = val # e.g: tsig_keys -> keys tsig_keys_new, tsig_serializers = self.process_tsig_keys(request, tsig_data) # save default serializer ser.object.save() # save tsig serializer(s) [ser_tsig.object.save() for ser_tsig in tsig_serializers] # link newly defined TSIG keys to this domain [new_key.link_to_axfr_domain(domain) for new_key in tsig_keys_new] # unlink old TSIG keys that were defined for this domain but they were removed in this update tsig_keys_names = [key.name for key in tsig_keys_new] [linked_key.unlink_axfr_domain(domain) for linked_key in TsigKey.get_linked_axfr_keys(domain) if linked_key.name not in tsig_keys_names] res = SuccessTaskResponse(request, ser.data, obj=domain, msg=LOG_DOMAIN_UPDATE, detail_dict=ser.detail_dict(), dc_bound=False) if ser.name_changed: # Update SOA and NS records when MASTER/NATIVE Domain name changed from api.dns.record.views import dns_record try: data = {'name': domain.name} for record_id in domain.record_set.filter(name__iexact=ser.name_changed, type__in=[Record.NS, Record.SOA])\ .values_list('id', flat=True): call_api_view(request, 'PUT', dns_record, domain.name, record_id, data=data, log_response=True) except Exception as e: logger.exception(e) # Update VMS_VM_DOMAIN_DEFAULT if this domain was used as a default DC domain from api.dc.base.views import dc_settings try: for dc in Dc.objects.all(): if dc.settings.VMS_VM_DOMAIN_DEFAULT == ser.name_changed: call_api_view(request, 'PUT', dc_settings, dc.name, data={'VMS_VM_DOMAIN_DEFAULT': domain.name}, log_response=True) except Exception as e: logger.exception(e) return res
def post(self): request = self.request dc1_settings = DefaultDc().settings domain = self.domain domain.owner = request.user # just a default domain.type = dc1_settings.DNS_DOMAIN_TYPE_DEFAULT if not request.user.is_staff: self.data.pop('dc_bound', None) # default DC binding cannot be changed when creating object ser = DomainSerializer(request, domain, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) tsig_data = {} # in case there will be more tsig_* parameters (but for now, there's only tsig_keys) for key, val in self.data.items(): # remove tsig_* parameters from request data because they belong to other validator if key.startswith('tsig_'): self.data.pop(key) tsig_data[key[5:]] = val # e.g: tsig_keys -> keys tsig_keys_new, tsig_serializers = self.process_tsig_keys(request, tsig_data) # save default serializer ser.object.save() # save tsig serializer(s) [ser_tsig.object.save() for ser_tsig in tsig_serializers] # link newly defined TSIG keys to this domain [new_key.link_to_axfr_domain(domain) for new_key in tsig_keys_new] res = SuccessTaskResponse(request, ser.data, status=HTTP_201_CREATED, obj=domain, dc_bound=False, msg=LOG_DOMAIN_CREATE, detail_dict=ser.detail_dict()) # Create SOA and NS records for new MASTER/NATIVE domain from api.dns.record.views import dns_record try: if dc1_settings.DNS_SOA_DEFAULT and dc1_settings.DNS_NAMESERVERS: soa_attrs = {'hostmaster': dc1_settings.DNS_HOSTMASTER.replace('@', '.'), 'nameserver': dc1_settings.DNS_NAMESERVERS[0]} soa_data = {'type': Record.SOA, 'name': domain.name, 'content': dc1_settings.DNS_SOA_DEFAULT.format(**soa_attrs)} call_api_view(request, 'POST', dns_record, domain.name, 0, data=soa_data, log_response=True) for ns in dc1_settings.DNS_NAMESERVERS: ns_data = {'type': Record.NS, 'name': domain.name, 'content': ns} call_api_view(request, 'POST', dns_record, domain.name, 0, data=ns_data, log_response=True) except Exception as e: logger.exception(e) if domain.dc_bound: assert request.dc.id == domain.dc_bound attach_dc_virt_object(res.data.get('task_id'), LOG_DOMAIN_ATTACH, domain, request.dc, user=request.user) return res
def put(self): request = self.request domain = self.domain ser = DomainSerializer(request, domain, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) ser.object.save() res = SuccessTaskResponse(request, ser.data, obj=domain, msg=LOG_DOMAIN_UPDATE, detail_dict=ser.detail_dict(), dc_bound=False) if ser.name_changed: # Update SOA and NS records when MASTER/NATIVE Domain name changed from api.dns.record.views import dns_record try: data = {'name': domain.name} for record_id in domain.record_set.filter(name__iexact=ser.name_changed, type__in=[Record.NS, Record.SOA])\ .values_list('id', flat=True): call_api_view(request, 'PUT', dns_record, domain.name, record_id, data=data, log_response=True) except Exception as e: logger.exception(e) # Update VMS_VM_DOMAIN_DEFAULT if this domain was used as a default DC domain from api.dc.base.views import dc_settings try: for dc in Dc.objects.all(): if dc.settings.VMS_VM_DOMAIN_DEFAULT == ser.name_changed: call_api_view( request, 'PUT', dc_settings, dc.name, data={'VMS_VM_DOMAIN_DEFAULT': domain.name}, log_response=True) except Exception as e: logger.exception(e) return res
def post(self): dc = self.dc request = self.request if not DefaultDc().settings.VMS_DC_ENABLED: raise PermissionDenied dc.owner = request.user # just a default dc.alias = dc.name # just a default ser = self.serializer(request, dc, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=dc) # Create default custom settings suitable for new DC (without global settings) default_custom_settings = DefaultDc().custom_settings.copy() for key in DefaultDcSettingsSerializer.get_global_settings(): try: del default_custom_settings[key] except KeyError: pass # Copy custom settings from default DC and save new DC ser.object.custom_settings = default_custom_settings ser.save() res = SuccessTaskResponse(request, ser.data, status=status.HTTP_201_CREATED, obj=dc, detail_dict=ser.detail_dict(), msg=LOG_DC_CREATE) dcs = dc.settings task_id = res.data.get('task_id') # Changing DC groups affects the group.dc_bound flag if dc.roles.exists(): # The groups that are added to newly created DC should not be DC-bound anymore for group in dc.roles.all(): if group.dc_bound: remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, group, user=request.user) # Creating new DC can affect the dc_bound flag on users (owner + users from dc.groups) self._remove_user_dc_binding(task_id, owner=dc.owner, groups=dc.roles.all()) # Create association with default server domain if dcs.DNS_ENABLED: from api.dc.domain.views import dc_domain call_api_view(request, None, dc_domain, dcs.VMS_VM_DOMAIN_DEFAULT, data={'dc': dc}, log_response=True) # Create association with default rescue CD if dcs.VMS_ISO_RESCUECD: from api.dc.iso.views import dc_iso call_api_view(request, None, dc_iso, dcs.VMS_ISO_RESCUECD, data={'dc': dc}, log_response=True) return res
def imagestore_update(request, repo): """ Ajax page for refreshing imagestore repositories. """ if repo not in ImageStore.get_repositories( include_image_vm=request.user.is_staff): raise Http404 res = call_api_view(request, 'PUT', imagestore_manage, repo, log_response=True) if res.status_code == 200: imagestore = res.data['result'] msg = _( 'Downloaded metadata for %(image_count)d images from image repository %(name)s' ) messages.success(request, msg % imagestore) return redirect('imagestore_list_repo', repo=repo, query_string=request.GET) else: if res.data.get('result', {}).get('error', None): status = 200 # The error will be displayed by ImageStoreList JS else: status = res.status_code return JSONResponse(res.data, status=status)
def save(self, request): args = self.user.username data = {'password': self.cleaned_data['password1']} logger.info('Calling API view PUT user_manage(%s, data=%s) by user %s in DC %s', args, {'password': '******'}, request.user, request.dc) res = call_api_view(request, 'PUT', user_manage, args, data=data) return res.status_code
def cleanup(self): self._check_node() ns = self.ns zpool = ns.zpool used_images = set() for vm in ns.node.vm_set.all(): used_images.update(vm.get_image_uuids(zpool=zpool)) unused_images = self.img.exclude(uuid__in=used_images) res = {} node_hostname = ns.node.hostname from api.node.image.views import node_image for img in unused_images: if img.get_ns_status(ns) == img.READY: r = call_api_view(self.request, 'DELETE', node_image, node_hostname, zpool, img.name, log_response=True) res[img.name] = { 'status_code': r.status_code, 'response': r.data } return SuccessTaskResponse(self.request, res, dc_bound=False)
def _delete_oldest(model, define, view_function, view_item, task_id, msg): """ Helper for finding oldest snapshots/backups and running DELETE view_function(). @type model: django.db.models.Model """ vm = define.vm # TODO: check indexes # noinspection PyUnresolvedReferences total = model.objects.filter(vm=vm, disk_id=define.disk_id, define=define, status=model.OK).count() to_delete = total - define.retention if to_delete < 1: return None # List of snapshot or backup names to delete TODO: check indexes # noinspection PyUnresolvedReferences oldest = model.objects.filter(vm=vm, disk_id=define.disk_id, define=define, status=model.OK)\ .values_list('name', flat=True).order_by('id')[:to_delete] view_name = view_function.__name__ view_data = {'disk_id': define.array_disk_id, view_item: tuple(oldest)} request = get_dummy_request(vm.dc, method='DELETE', system_user=True) request.define_id = define.id # Automatic task # Go! logger.info('Running DELETE %s(%s, %s), because %s>%s', view_name, vm, view_data, total, define.retention) res = call_api_view(request, 'DELETE', view_function, vm.hostname, data=view_data) if res.status_code in (200, 201): logger.warn('DELETE %s(%s, %s) was successful: %s', view_name, vm, view_data, res.data) else: logger.error('Running DELETE %s(%s, %s) failed: %s (%s): %s', view_name, vm, view_data, res.status_code, res.status_text, res.data) MonitoringBackend.vm_send_alert( vm, 'Automatic deletion of old %ss %s/disk-%s failed to start.' % (model.__name__.lower(), vm.hostname, define.array_disk_id)) # Need to log this, because nobody else does (+ there is no PENDING task) detail = 'hostname=%s, %s=%s, disk_id=%s, Error: %s' % ( vm.hostname, view_item, ','.join(oldest), define.array_disk_id, get_task_error_message(res.data)) task_log_error(task_id, msg, vm=vm, detail=detail, update_user_tasks=False) return res
def _call_api_view(self, method, viewspace, view, args, kwargs): kwargs = dict(kwargs) args = list(args) for kwarg in INTERNAL_API_KWARGS: kwargs.pop(kwarg, None) self.log('Calling %s on API view "%s.%s" with args: "%s" and kwargs: "%s"', method, viewspace.__name__, view, args, kwargs) if method not in ('GET', 'POST', 'PUT', 'DELETE'): self.log('Method "%s" not allowed', method, level=ERROR) self.emit('error', 'Method not allowed') return request = self.get_request(method) # Every api view called from here expects a data keyword parameter, which should not be None if called from sio if 'data' not in kwargs or not kwargs['data']: kwargs['data'] = {} try: if view.startswith('_'): raise AttributeError f = getattr(viewspace, view) except AttributeError: self.log('API view "%s.%s not found', viewspace.__name__, view, level=ERROR) self.emit('error', 'API view "%s.%s" not found' % (viewspace.__name__, view)) return try: r = call_api_view(request, None, f, *args, **kwargs) except Exception as e: # Catch all, because this would break the user socket.io instance self.log('API view %s "%s.%s" failed', method, viewspace.__name__, view, level=ERROR) logger.exception(e) return if r.status_code in (200, 201): self.log('API view %s "%s.%s (%s, %s)" has finished (%s) with output "%s"', method, viewspace.__name__, view, args, kwargs, r.status_code, r.data) if isinstance(r.data, dict) and 'task_id' in r.data: task_id = r.data['task_id'] if task_id in self.last_tasks: self.log('Ignoring new task %s, because we already know (1)', task_id) return self.last_tasks.append(task_id) else: # API call failed because of some validation error self.log('API view %s "%s.%s (%s, %s)" has finished (%s) with output "%s"', method, viewspace.__name__, view, args, kwargs, r.status_code, r.data, level=WARNING) self.emit('message', view, method, r.status_code, r.data, args, kwargs, getattr(r, 'apiview', {}), getattr(r, 'apidata', {}))
def api_call(cls, action, obj, request, args=(), data=()): method = cls._api_method[action] logger.info('Calling API view %s %s(%s, data=%s) by user %s in DC %s', method, cls._api_call.__name__, args, data, request.user, request.dc) return call_api_view(request, method, cls._api_call.__func__, *args, data=dict(data), log_response=True)
def _vm_update(vm): logger.info('Running PUT vm_manage(%s)', vm) from api.vm.base.views import vm_manage request = get_dummy_request(vm.dc, method='PUT', system_user=True) res = call_api_view(request, 'PUT', vm_manage, vm.hostname) if res.status_code == 201: logger.info('PUT vm_manage(%s) was successful: %s', vm, res.data) else: logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
def post(self): node, dcnode = self.node, self.dcnode request, data = self.request, self.data # Set defaults for Shared strategy (default) try: strategy = int(data.get('strategy', DcNode.SHARED)) except ValueError: strategy = DcNode.SHARED if strategy == DcNode.SHARED: dcnode.cpu = dcnode.ram = dcnode.disk = 0 # Value doesn't matter => will be set in save/update_resources data.pop('cpu', None) data.pop('ram', None) data.pop('disk', None) # Used in GUI try: add_storage = int(data.pop('add_storage', DcNode.NS_ATTACH_NONE)) except ValueError: add_storage = DcNode.NS_ATTACH_NONE ser = DcNodeSerializer(request, dcnode, data=data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=node) ser.object.save(update_resources=False) DcNode.update_all(node=node) ser.reload() if add_storage: from api.utils.views import call_api_view from api.dc.storage.views import dc_storage ns = NodeStorage.objects.filter(node=node) if add_storage != DcNode.NS_ATTACH_ALL: ns = ns.filter(storage__access=add_storage) for zpool in ns.values_list('zpool', flat=True): try: zpool_node = '%s@%s' % (zpool, node.hostname) res = call_api_view(request, 'POST', dc_storage, zpool_node, data={}, log_response=True) if res.status_code == 201: logger.info('POST dc_storage(%s) was successful: %s', zpool_node, res.data) else: logger.error('POST dc_storage(%s) failed: %s: %s', zpool_node, res.status_code, res.data) except Exception as ex: logger.exception(ex) return SuccessTaskResponse(request, ser.data, status=status.HTTP_201_CREATED, obj=node, detail_dict=ser.detail_dict(), msg=LOG_NODE_ATTACH)
def vm_snapshot_beat(snap_define_id): """ This is a periodic beat task. Run POST vm_snapshot according to snapshot definition. """ from api.vm.snapshot.views import vm_snapshot snap_define = SnapshotDefine.objects.get(id=snap_define_id) snap_name = snap_define.generate_snapshot_name() vm = snap_define.vm disk_id = snap_define.array_disk_id request = get_dummy_request(vm.dc, method='POST', system_user=True) request.define_id = snap_define.id # Automatic task # Go! res = call_api_view(request, 'POST', vm_snapshot, vm.hostname, snap_name, data={ 'disk_id': disk_id, 'fsfreeze': snap_define.fsfreeze }) if res.status_code == 201: logger.info( 'POST vm_snapshot(%s, %s, {disk_id=%s}) was successful: %s', vm, snap_name, disk_id, res.data) else: # Need to log this, because nobody else does (+ there is no PENDING task) detail = 'snapname=%s, disk_id=%s, type=%s. Error: %s' % ( snap_name, disk_id, Snapshot.AUTO, get_task_error_message( res.data)) task_log_error(task_id_from_task_id(vm_snapshot_beat.request.id, dc_id=vm.dc.id), LOG_SNAP_CREATE, vm=vm, detail=detail, update_user_tasks=False) if res.status_code == HTTP_423_LOCKED: logger.warning( 'Running POST vm_snapshot(%s, %s, {disk_id=%s}) failed: %s (%s): %s', vm, snap_name, disk_id, res.status_code, res.status_text, res.data) else: logger.error( 'Running POST vm_snapshot(%s, %s, {disk_id=%s}) failed: %s (%s): %s', vm, snap_name, disk_id, res.status_code, res.status_text, res.data) MonitoringBackend.vm_send_alert( vm, 'Automatic snapshot %s/disk-%s@%s failed to start.' % (vm.hostname, disk_id, snap_define.name))
def vm_backup_beat(bkp_define_id): """ This is a periodic beat task. Run POST vm_backup according to backup definition. """ from api.vm.backup.views import vm_backup bkp_define = BackupDefine.objects.get(id=bkp_define_id) vm = bkp_define.vm disk_id = bkp_define.array_disk_id defname = bkp_define.name request = get_dummy_request(vm.dc, method='POST', system_user=True) request.define_id = bkp_define.id # Automatic task # Go! res = call_api_view(request, 'POST', vm_backup, vm.hostname, defname, data={ 'disk_id': disk_id, 'fsfreeze': bkp_define.fsfreeze }) if res.status_code == 201: logger.info('POST vm_backup(%s, %s, {disk_id=%s}) was successful: %s', vm, defname, disk_id, res.data) else: # Need to log this, because nobody else does (+ there is no PENDING task) detail = 'hostname=%s, bkpname=%s, disk_id=%s, Error: %s' % ( vm.hostname, bkp_define.generate_backup_name(), disk_id, get_task_error_message(res.data)) task_log_error(task_id_from_task_id(vm_backup_beat.request.id, dc_id=vm.dc.id), LOG_BKP_CREATE, vm=vm, detail=detail, update_user_tasks=False) if res.status_code == HTTP_423_LOCKED: logger.warning( 'Running POST vm_backup(%s, %s, {disk_id=%s}) failed: %s (%s): %s', vm, defname, disk_id, res.status_code, res.status_text, res.data) else: logger.error( 'Running POST vm_backup(%s, %s, {disk_id=%s}) failed: %s (%s): %s', vm, defname, disk_id, res.status_code, res.status_text, res.data) Zabbix.vm_send_alert( vm, 'Automatic backup %s/disk-%s@%s failed to start.' % (vm.hostname, disk_id, defname))
def maintenance(request): """ System maintenance. """ context = collect_view_data(request, 'system_maintenance') context['system'] = call_api_view(request, 'GET', system_version).data.get('result', {}) context['node_list'] = Node.all() context['current_view'] = 'maintenance' context['status_form'] = NodeStatusForm(request, None) context['update_form'] = UpdateForm(request, None) context['node_update_form'] = NodeUpdateForm(request, None, prefix='node') return render(request, 'gui/system/maintenance.html', context)
def post(self): img = self.get_image() data = self.data data['manifest_url'] = self.repo.get_image_manifest_url(img['uuid']) data.pop('file_url', None) name = data.get('name', None) if not name: name = data['name'] = img['name'] return call_api_view(self.request, 'POST', image_manage, name, data=data)
def vm_update(vm): """ Internal API used for updating VM if there were changes in jason detected. """ logger.info( 'Running PUT vm_manage(%s), because something (vnc port?) has changed changed', vm) from api.vm.base.views import vm_manage from api.utils.request import get_dummy_request from api.utils.views import call_api_view request = get_dummy_request(vm.dc, method='PUT', system_user=True) res = call_api_view(request, 'PUT', vm_manage, vm.hostname) if res.status_code == 201: logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data) else: logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data)
def _api_cmd(request, method, view, *args, **kwargs): """Call api view and raise exception according to result""" res = call_api_view(request, method, view, *args, data=kwargs) cod = res.status_code out = res.data log = '%s %s(%s, data=%s)' % (method, view.__name__, args, kwargs) if status.is_success(cod): logger.info('%s was successful (%s): %s', log, cod, out) else: # Do not fail if created object already exists if method == 'POST' and cod == status.HTTP_406_NOT_ACCEPTABLE: logger.warning('%s failed (%s): %s', log, cod, out) else: logger.error('%s failed (%s): %s', log, cod, out) raise APIViewError(str(out)) return res
def internal_response(cls, request, method, record, data, task_id=None, related_obj=None): """Called by VmDefineSerializer""" return call_api_view(request, method, cls, record.domain.name, record.id, data=data, record=record, task_id=task_id, related_obj=related_obj, api_class=True, log_response=True)
def alert_list_table(request): context = collect_view_data(request, 'mon_alert_list') try: api_data = json.loads(request.POST.get('alert_filter', None)) except (ValueError, TypeError): context['error'] = 'Unexpected error: could not parse alert filter.' else: context['alert_filter'] = api_data res = call_api_view(request, 'GET', mon_alert_list, data=api_data) if res.status_code == 200: context['alerts'] = res.data['result'] elif res.status_code == 201: context['error'] = 'Unexpected error: got into an API loop.' else: context['error'] = res.data.get('result', {}).get('error', res.data) return render(request, 'gui/mon/alert_table.html', context)
def save(self, request=None): if request is None: request = self.request assert request username = self.user.username data = {'password': self.cleaned_data['password1']} logger.info( 'Calling API view PUT user_manage(%s, data=%s) by user %s in DC %s', username, {'password': '******'}, request.user, request.dc) res = call_api_view(request, 'PUT', user_manage, username, data=data) if res.status_code == 200: logger.info('Password for user "%s" was changed successfully', username) else: logger.error('Failed to change password for user "%s" (%s)', username, res.data) return res.status_code
def post(self): img = self.get_image() uuid = img['uuid'] data = self.data data['manifest_url'] = self.repo.get_image_manifest_url(uuid) data.pop('file_url', None) name = data.get('name', None) if not name: name = data['name'] = img['name'] # Although this is also checked inside the image_manage, doing it here is better because: # - checking the uniqueness of the UUID is done differently in image_manage and the result is not a 406 error # - it is faster - in case the name/uuid is not unique we don't have to call another view if Image.objects.filter(Q(uuid=uuid) | Q(name=name)).exists(): raise ObjectAlreadyExists(model=Image) return call_api_view(self.request, 'POST', image_manage, name, data=data)
def post(self): request = self.request dc1_settings = DefaultDc().settings domain = self.domain domain.owner = request.user # just a default domain.type = dc1_settings.DNS_DOMAIN_TYPE_DEFAULT if not request.user.is_staff: self.data.pop( 'dc_bound', None ) # default DC binding cannot be changed when creating object ser = DomainSerializer(request, domain, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) ser.object.save() res = SuccessTaskResponse(request, ser.data, status=HTTP_201_CREATED, obj=domain, dc_bound=False, msg=LOG_DOMAIN_CREATE, detail_dict=ser.detail_dict()) # Create SOA and NS records for new MASTER/NATIVE domain from api.dns.record.views import dns_record try: if dc1_settings.DNS_SOA_DEFAULT and dc1_settings.DNS_NAMESERVERS: soa_attrs = { 'hostmaster': dc1_settings.DNS_HOSTMASTER.replace('@', '.'), 'nameserver': dc1_settings.DNS_NAMESERVERS[0] } soa_data = { 'type': Record.SOA, 'name': domain.name, 'content': dc1_settings.DNS_SOA_DEFAULT.format(**soa_attrs) } call_api_view(request, 'POST', dns_record, domain.name, 0, data=soa_data, log_response=True) for ns in dc1_settings.DNS_NAMESERVERS: ns_data = { 'type': Record.NS, 'name': domain.name, 'content': ns } call_api_view(request, 'POST', dns_record, domain.name, 0, data=ns_data, log_response=True) except Exception as e: logger.exception(e) if domain.dc_bound: assert request.dc.id == domain.dc_bound attach_dc_virt_object(res.data.get('task_id'), LOG_DOMAIN_ATTACH, domain, request.dc, user=request.user) return res
def _api_task_status(self, task_id): return call_api_view(self.get_request('GET'), None, api.task.views.task_status, task_id=task_id)
def vm_replica_failover_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None): """ A callback function for api.vm.replica.views.vm_replica_failover. """ slave_vm = SlaveVm.get_by_uuid(slave_vm_uuid, sr=( 'vm', 'master_vm', 'vm__node', 'vm__dc', )) vm = slave_vm.master_vm assert vm.uuid == vm_uuid action = result['meta']['apiview']['method'] force = result['meta']['apiview']['force'] result, jsons = _parse_vm_replica_result(result, vm, slave_vm, action, key_json_idx=-1, cb_name='vm_replica_failover') sync_status = _save_svc_state(slave_vm, jsons) if result['returncode'] != 0: if sync_status is not None: slave_vm.save(update_fields=('sync_status', )) vm.revert_notready() msg = result['detail'] logger.error( 'Found nonzero returncode in result from %s vm_replica_failover(%s, %s). Error: %s', action, vm_uuid, slave_vm_uuid, msg) errmsg = _update_task_result_failure(result, msg) raise TaskException(result, errmsg) # New master VM was born # Delete tasks for old master if force: tasks = list(vm.tasks.keys()) try: tasks.remove(task_id) except ValueError: pass _delete_tasks(vm, tasks) # Create internal shutdown task of old master VM old_vm_status = result['meta']['apiview']['orig_status'] _vm_shutdown(vm) # Save new master, degrade old master slave_vm.master_vm.revert_notready(save=False) new_vm = slave_vm.fail_over() # Re-check status of old master (current degraded slave) because it was shut down, # but the state wasn't save (it was notready back then) vm_status_one(task_id, vm) # Continue with prompting of new master and degradation of old SlaveVm.switch_vm_snapshots_node_storages(new_vm, nss=vm.get_node_storages()) # Force update of zabbix vm_json_active_changed.send(task_id, vm=new_vm, old_json_active={}, force_update=True) # Signal! if new_vm.node != vm.node: vm_node_changed.send(task_id, vm=new_vm, force_update=True) # Signal! msg = 'Server replica was successfully promoted to master' _update_task_result_success(result, slave_vm, action, msg) task_log_cb_success(result, task_id, vm=new_vm, **result['meta']) request = get_dummy_request(vm.dc, method='PUT', system_user=True) # Mark pending backups as "lost" :( TODO: implement vm_backup_sync new_vm.backup_set.filter(status=Backup.PENDING).update(status=Backup.LOST) # Sync snapshots on new master VM (mark missing snapshots as "lost") for disk_id, _ in enumerate(new_vm.json_active_get_disks(), start=1): call_api_view(request, 'PUT', vm_snapshot_list, new_vm.hostname, data={'disk_id': disk_id}, log_response=True) if old_vm_status == Vm.RUNNING: # Start new master VM call_api_view(request, 'PUT', vm_status, new_vm.hostname, action='start', log_response=True) return result
def vm_define_all(request, vm_details, method='POST'): """ Run all API functions to define VM we run vm_define, vm_define_disk a vm_define_nic, it also supports to pass request method DELETE to remove server. Return status_code and vm_details """ ieb = ImportExportBase() vm, nics, disks = ieb.prepare_vm(json.loads(vm_details['json'])) logger.debug('Extracted json %s', json.loads(vm_details['json'])) hostname = vm['hostname'] success = True vm_details['_vm_defined'] = False try: # Set server zpool from first disk pool vm['zpool'] = disks[0]['zpool'] except (IndexError, KeyError): pass # API: POST vm_define() logger.info('Calling API view %s vm_define(%s, data=%s) by user %s in DC %s', method, hostname, vm, request.user, request.dc) res = call_api_view(request, method, vm_define, hostname, data=vm, disable_throttling=True) # when deleting server, system would delete nic and disk automatically if method == 'DELETE': success = False if res.status_code not in (200, 201) and method == 'POST': success = False logger.warning('vm_define: "%s" status_code: "%s" data: %s', hostname, res.status_code, res.data) vm_details = process_errors(res.data, vm_details, 0) else: vm_details['_vm_defined'] = True # Try to create NIC and DISK only if server has been created if success: nic_id = 1 html_row_counter = 0 for nic in nics: # API: POST vm_define_nic() logger.info('Calling API view vm_define_nic(%s, %s, data=%s) by user %s in DC %s', hostname, nic_id, nic, request.user, request.dc) res = call_api_view(request, 'POST', vm_define_nic, hostname, nic_id, data=nic, disable_throttling=True) if res.status_code not in (200, 201): success = False logger.warning('vm_define_nic: "%s" nic_id: "%s" status_code: "%s" data: %s', hostname, nic_id, res.status_code, res.data) vm_details = process_errors(res.data, vm_details, html_row_counter) else: nic_id += 1 html_row_counter += 1 disk_id = 1 html_row_counter = 0 for i, disk in enumerate(disks): # API: POST vm_define_disk() logger.info('Calling API view vm_define_disk(%s, %s, data=%s) by user %s in DC %s', hostname, disk_id, disk, request.user, request.dc) # disk_id 1 for zone is created automatically we can just update it here... if i == 0 and vm['ostype'] in Vm.ZONE_OSTYPES: res = call_api_view(request, 'PUT', vm_define_disk, hostname, disk_id, data=disk, disable_throttling=True) else: res = call_api_view(request, 'POST', vm_define_disk, hostname, disk_id, data=disk, disable_throttling=True) if res.status_code not in (200, 201): success = False logger.warning('vm_define_disk: "%s" disk_id: "%s" status_code: "%s" data: %s', hostname, disk_id, res.status_code, res.data) vm_details = process_errors(res.data, vm_details, html_row_counter) else: disk_id += 1 html_row_counter += 1 if success: logger.debug('Server %s has been defined.', hostname) return 201, vm_details else: return 400, vm_details
def vm_migrate_cb(result, task_id, vm_uuid=None, slave_vm_uuid=None): """ A callback function for api.vm.migrate.views.vm_migrate. """ ghost_vm = SlaveVm.get_by_uuid(slave_vm_uuid) msg = result.get('message', '') if result['returncode'] == 0 and msg and 'Successfully migrated' in msg: # Save node and delete placeholder VM first node = ghost_vm.vm.node nss = set(ghost_vm.vm.get_node_storages()) ghost_vm.delete( ) # post_delete signal will update node and storage resources # Fetch VM after ghost_vm is deleted, because it updates vm.slave_vms array vm = Vm.objects.select_related('node', 'dc').get(uuid=vm_uuid) changing_node = vm.node != ghost_vm.vm.node json = result.pop('json', None) try: # save json from smartos json_active = vm.json.load(json) vm.json_active = json_active vm.json = json_active except Exception as e: logger.exception(e) logger.error( 'Could not parse json output from vm_migrate(%s). Error: %s', vm_uuid, e) raise TaskException(result, 'Could not parse json output') nss.update(list(vm.get_node_storages())) # Revert status and set new node (should trigger node resource update) vm.revert_notready(save=False) if changing_node: vm.set_node(node) vm.save(update_node_resources=True, update_storage_resources=nss) SlaveVm.switch_vm_snapshots_node_storages(vm, nss=nss) vm_node_changed.send(task_id, vm=vm, force_update=True) # Signal! else: vm = Vm.objects.get(uuid=vm_uuid) _vm_migrate_cb_failed(result, task_id, vm, ghost_vm) logger.error( 'Found nonzero returncode in result from vm_migrate(%s). Error: %s', vm_uuid, msg) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) task_log_cb_success(result, task_id, vm=vm, **result['meta']) if vm.json_changed(): logger.info( 'Running PUT vm_manage(%s), because something (vnc port?) has changed changed', vm) from api.vm.base.views import vm_manage from api.utils.request import get_dummy_request from api.utils.views import call_api_view request = get_dummy_request(vm.dc, method='PUT', system_user=True) res = call_api_view(request, 'PUT', vm_manage, vm.hostname) if res.status_code == 201: logger.warn('PUT vm_manage(%s) was successful: %s', vm, res.data) else: logger.error('PUT vm_manage(%s) failed: %s (%s): %s', vm, res.status_code, res.status_text, res.data) return result