def put(self): """Update node definition""" node = self.node ser = NodeDefineSerializer(self.request, node, data=self.data, partial=True) if node.tasks: raise NodeHasPendingTasks if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=node, dc_bound=False) if ser.status_changed == Node.OFFLINE and node.has_related_tasks(): raise NodeHasPendingTasks( 'Node has related objects with pending tasks') # Changing cpu or disk coefficients can lead to negative numbers in node.cpu/ram_free or dc_node.cpu/ram_free; # This is solved by running the DB update inside a transaction and checking for negative values (=> rollback) errors = ser.save() if errors: return FailureTaskResponse(self.request, errors, obj=node, dc_bound=False) res = SuccessTaskResponse(self.request, ser.data, obj=node, detail_dict=ser.detail_dict(), dc_bound=False, msg=LOG_DEF_UPDATE) task_id = TaskID(res.data.get('task_id'), request=self.request) # Delete obsolete IP address and DNS records and create new ones if possible self._post_update(task_id, ser) # Signals section (should go last) if ser.status_changed: node_status_changed.send(task_id, node=node, automatic=False) # Signal! if node.is_online(): node_online.send(task_id, node=node, automatic=False) # Signal! elif node.is_offline(): node_offline.send(task_id, node=node) # Signal! if ser.monitoring_changed or ser.address_changed: node_json_changed.send(task_id, node=node) # Signal! return res
def put(self): request, vm, command = self.request, self.vm, self.command if not vm.is_hvm(): raise OperationNotSupported if vm.status not in (vm.RUNNING, vm.STOPPING): raise VmIsNotOperational if command not in COMMANDS: raise InvalidInput('Invalid command') ser = VmQGASerializer(request, command, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=vm) apiview = { 'view': 'vm_qga', 'method': request.method, 'hostname': vm.hostname, 'command': command, } cmd = 'qga-client %s %s 2>&1' % (vm.qga_socket_path, ' '.join( ser.get_full_command())) lock = 'vm_qga vm:%s' % vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'replace_stdout': ((vm.uuid, vm.hostname), ), 'apiview': apiview, 'msg': LOG_QGA_COMMAND, 'vm_uuid': vm.uuid, 'check_returncode': True, } # callback=None means that an implicit LOGTASK callback will be used (task_log_cb) tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_QGA_COMMAND, obj=vm, api_view=apiview, data=self.data, detail_dict=ser.detail_dict())
def post(self): self._check_vm_status() apiview, detail = self._get_apiview_detail() request, vm, snap = self.request, self.vm, self.snap snap.status = snap.PENDING snap.define_id = self.snap_define_id snap.type = self.snaptype ser = SnapshotSerializer(request, snap, data=self.data) fsfreeze = '' if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) if vm.is_hvm() and self.data.get('fsfreeze', False): qga_socket = vm.qga_socket_path if qga_socket: snap.fsfreeze = True if vm.status != vm.STOPPED: fsfreeze = '"%s"' % qga_socket self._check_snap_limit() self._check_snap_size_limit() # Issue #chili-848 self._check_snap_dc_size_limit() # Issue #chili-848 snap.zpool = vm.node.nodestorage_set.get(zpool=self.zpool) snap.save() detail += ', type=%s, fsfreeze=%s' % (self.snaptype, str( snap.fsfreeze).lower()) msg = LOG_SNAP_CREATE lock = self.LOCK % (vm.uuid, snap.disk_id) cmd = 'esnapshot create "%s@%s" "es:snapname=%s" %s 2>&1' % ( self.zfs_filesystem, snap.zfs_name, snap.name, fsfreeze) tid, err = execute(request, vm.owner.id, cmd, meta=snap_meta(vm, msg, apiview, detail), lock=lock, callback=snap_callback(vm, snap), queue=vm.node.fast_queue, tt=self.tt) if err: snap.delete() return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, detail=detail, data=self.data)
def put(self, ns): """Update node-storage""" ser = NodeStorageSerializer(self.request, ns, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=ns, dc_bound=False) update_storage_resources = ser.update_storage_resources is_zones_pool = ser.object.zpool == ser.object.node.zpool try: with transaction.atomic(): ser.object.storage.save() ser.object.save(update_resources=update_storage_resources, update_dcnode_resources=is_zones_pool) if update_storage_resources: if ns.storage.size_free < 0: raise IntegrityError('disk_check') elif is_zones_pool and ns.node.dcnode_set.filter( dc__in=ns.dc.all(), ram_free__lt=0).exists(): raise IntegrityError('disk_check') except IntegrityError as exc: # size constraint was violated on vms_dcnode (can happen when DcNode strategy is set to RESERVED) # OR a an exception was raised above if 'disk_check' in str(exc): errors = {'size_coef': ser.error_negative_resources} return FailureTaskResponse(self.request, errors, obj=ns, dc_bound=False) else: raise exc if ser.update_storage_resources: # size_free changed ser.reload() return SuccessTaskResponse(self.request, ser.data, obj=ns, detail_dict=ser.detail_dict(), msg=LOG_NS_UPDATE, dc_bound=False)
def get_current_status(self, force_change=False): """Get current VM status""" request, vm = self.request, self.vm if vm.node.status not in vm.node.STATUS_OPERATIONAL: raise NodeIsNotOperational apiview = self.apiview msg = LOG_STATUS_GET cmd = 'vmadm list -p -H -o state uuid=' + vm.uuid meta = { 'output': {'returncode': 'returncode', 'stdout': 'stdout', 'stderr': 'stderr', 'hostname': vm.hostname}, 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview, 'last_status': vm.status, } callback = ( 'api.vm.status.tasks.vm_status_current_cb', {'vm_uuid': vm.uuid, 'force_change': force_change} ) tid, err = execute(request, vm.owner.id, cmd, meta=meta, callback=callback, queue=vm.node.fast_queue, nolog=True) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, vm=vm, api_view=apiview, data=self.data) # No msg
def put(self): profile = self.profile ser = self.serializer(self.request, profile, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=profile, dc_bound=False) ser.save() res = SuccessTaskResponse(self.request, ser.data, obj=self.user, detail_dict=ser.detail_dict(), owner=ser.object.user, msg=LOG_PROFILE_UPDATE, dc_bound=False) task_id = res.data.get('task_id') connection.on_commit(lambda: user_profile_changed.send( task_id, user_name=self.user.username)) # Signal! return res
def get(self): request, node, graph = self.request, self.node, self.graph_type if node.status not in node.STATUS_AVAILABLE_MONITORING: raise NodeIsNotOperational try: graph_settings = GRAPH_ITEMS.get_options(graph, node) except KeyError: raise InvalidInput('Invalid graph') if graph.startswith(('nic-', 'net-')): ser_class = NetworkNodeMonHistorySerializer elif graph.startswith(('storage-', )): ser_class = StorageNodeMonHistorySerializer else: ser_class = MonHistorySerializer ser = ser_class(obj=self.node, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=node) return call_mon_history_task(request, t_mon_node_history, view_fun_name='mon_node_history', obj=self.node, dc_bound=False, serializer=ser, data=self.data, graph=graph, graph_settings=graph_settings)
def put(self): """ Performs call to execute_sysinfo """ if not self.node.is_online(): raise NodeIsNotOperational() apiview = { 'view': 'node_sysinfo', 'method': self.request.method, 'hostname': self.node.hostname, } meta = { 'apiview': apiview, 'msg': LOG_NODE_UPDATE, 'node_uuid': self.node.uuid, } task_id, err = execute_sysinfo(self.request, self.node.owner.id, queue=self.node.fast_queue, meta=meta, node_uuid=self.node.uuid, check_user_tasks=True) if err: return FailureTaskResponse(self.request, err) else: return TaskResponse(self.request, task_id, api_view=apiview, obj=self.node, msg=LOG_NODE_UPDATE, data=self.data)
def post(self): net, request = self.net, self.request if not request.user.is_staff: self.data.pop( 'dc_bound', None ) # default DC binding cannot be changed when creating object net.owner = request.user # just a default net.alias = net.name # just a default ser = NetworkSerializer(request, net, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=net, dc_bound=False) ser.object.save() res = SuccessTaskResponse(request, ser.data, status=HTTP_201_CREATED, obj=net, dc_bound=False, detail_dict=ser.detail_dict(), msg=LOG_NET_CREATE) if net.dc_bound: attach_dc_virt_object(res.data.get('task_id'), LOG_NETWORK_ATTACH, net, net.dc_bound, user=request.user) return res
def put(self): dc = self.dc ser = self.serializer(self.request, dc, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=dc) dcs = dc.custom_settings dcs.update(ser.settings) new_settings = dcs old_settings = dc.custom_settings dc.custom_settings = dcs dc.save() data = ser.data dd = ser.detail_dict() res = SuccessTaskResponse(self.request, data, obj=dc, detail_dict=dd, msg=LOG_DC_SETTINGS_UPDATE) # Check if monitoring settings have been changed if any(['MON_ZABBIX' in i for i in dd]): logger.info('Monitoring settings have been changed in DC %s. Running task for clearing zabbix cache', dc) try: mon_clear_zabbix_cache.call(dc.id, full=True) except Exception as e: logger.exception(e) # Check if compute node SSH key was added to VMS_NODE_SSH_KEYS_DEFAULT task_id = TaskID(res.data.get('task_id'), request=self.request) if old_settings != new_settings: dc_settings_changed.send(task_id, dc=dc, old_settings=old_settings, new_settings=new_settings) # Signal return res
def ssl_certificate(self): """PUT /system/settings/ssl-certificate - runs a script, which checks the certificate by running openssl, replaces the PEM file and reloads haproxy""" assert self.request.dc.id == DefaultDc().id ser = SSLCertificateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, dc_bound=False) cert = ser.object['cert'] update_script = os.path.join(settings.PROJECT_DIR, self.SSL_CERTIFICATE_UPDATE_CMD) res = { 'action': 'SSL Certificate Update', 'returncode': '???', 'message': '' } cert_file = NamedTemporaryFile(dir=settings.TMPDIR, mode='w', delete=False) cert_file.write(cert) cert_file.close() try: proc = Popen(['sudo', update_script, cert_file.name], bufsize=0, close_fds=True, stdout=PIPE, stderr=STDOUT) res['message'], _ = proc.communicate() res['returncode'] = proc.returncode finally: os.remove(cert_file.name) if proc.returncode == 0: response_class = SuccessTaskResponse else: response_class = FailureTaskResponse return response_class(self.request, res, msg=LOG_SYSTEM_SETTINGS_UPDATE, detail_dict=res, dc_bound=False)
def put(self): assert self.request.dc.is_default() ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, dc_bound=False) version = ser.data['version'] from core.version import __version__ as mgmt_version # noinspection PyUnboundLocalVariable if version == ('v' + mgmt_version) and not ser.data.get('force'): raise PreconditionRequired('System is already up-to-date') obj = self.request.dc msg = LOG_SYSTEM_UPDATE _apiview_ = { 'view': 'system_update', 'method': self.request.method, 'version': version, } meta = { 'apiview': _apiview_, 'msg': LOG_SYSTEM_UPDATE, } task_kwargs = ser.data.copy() task_kwargs['dc_id'] = obj.id tid, err, res = system_update.call(self.request, None, (), kwargs=task_kwargs, meta=meta, tg=TG_DC_UNBOUND, tidlock=self.LOCK) if err: msg = obj = None # Do not log an error here return mgmt_task_response(self.request, tid, err, res, msg=msg, obj=obj, api_view=_apiview_, dc_bound=False, data=self.data, detail_dict=ser.detail_dict(force_full=True))
def put(self): record = self.record ser = RecordSerializer(self.request, record, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=self.domain, dc_bound=False, task_id=self.task_id, **self.log_failure(LOG_RECORD_UPDATE)) ser.object.save() return SuccessTaskResponse(self.request, ser.data, obj=self.domain, msg=LOG_RECORD_UPDATE, dc_bound=False, task_id=self.task_id, detail_dict=self._fix_detail_dict( ser.detail_dict()))
def put(self): """Sync snapshots in DB with snapshots on compute node and update snapshot status and size.""" request, vm = self.request, self.vm disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, self.data) self._check_vm_status() # Prepare task data apiview = { 'view': 'vm_snapshot_list', 'method': request.method, 'hostname': vm.hostname, 'disk_id': disk_id, } meta = { 'output': {'returncode': 'returncode', 'stdout': 'data', 'stderr': 'message'}, 'replace_text': ((vm.uuid, vm.hostname),), 'msg': LOG_SNAPS_SYNC, 'vm_uuid': vm.uuid, 'apiview': apiview, } detail = 'disk_id=%s' % disk_id cmd = 'esnapshot list "%s"' % zfs_filesystem lock = self.LOCK % (vm.uuid, real_disk_id) callback = ('api.vm.snapshot.tasks.vm_snapshot_sync_cb', {'vm_uuid': vm.uuid, 'disk_id': real_disk_id}) # Run task tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, callback=callback, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_SNAPS_SYNC, vm=vm, api_view=apiview, detail=detail, data=self.data)
def put(self, vm, data, task_id=None, **kwargs): """Common code for updating VM properties used in vm_define and gui.vm.forms.ServerSettingsForm""" ser = VmDefineSerializer(self.request, vm, data=data, partial=True) if ser.is_valid(): ser.object.save( sync_json=True, update_hostname=ser.hostname_changed, update_node_resources=ser.update_node_resources, update_storage_resources=ser.update_storage_resources) if ser.hostname_changed: # Task event for GUI VmDefineHostnameChanged(self.request, vm, ser.old_hostname).send() return SuccessTaskResponse(self.request, ser.data, vm=vm, task_id=task_id, msg=LOG_DEF_UPDATE, detail_dict=ser.detail_dict()) return FailureTaskResponse(self.request, ser.errors, vm=vm, task_id=task_id)
def post(self): template, request = self.template, self.request if not request.user.is_staff: self.data.pop( 'dc_bound', None ) # default DC binding cannot be changed when creating object template.owner = request.user # just a default template.alias = template.name # just a default ser = TemplateSerializer(request, template, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=template, dc_bound=False) ser.object.save() res = SuccessTaskResponse(request, ser.data, status=HTTP_201_CREATED, obj=template, dc_bound=False, detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_CREATE) if template.dc_bound: attach_dc_virt_object(res.data.get('task_id'), LOG_TEMPLATE_ATTACH, template, template.dc_bound, user=request.user) return res
def delete(self): user = self.user # Predefined users can not be deleted if user.id in (settings.ADMIN_USER, settings.SYSTEM_USER, self.request.user.id): raise PermissionDenied relations = user.get_relations() if relations: message = { 'detail': _('Cannot delete user, because he has relations to some objects.'), 'relations': relations } return FailureTaskResponse(self.request, message, obj=user, dc_bound=False) dd = {'email': user.email, 'date_joined': user.date_joined} was_staff = user.is_staff old_roles = list(user.roles.all()) ser = self.serializer(self.request, user) ser.object.delete() res = SuccessTaskResponse(self.request, None, obj=user, msg=LOG_USER_DELETE, detail_dict=dd, dc_bound=False) # User was removed, which may affect the cached list of DC admins for DCs which are attached to user's groups # So we need to clear the list of admins cached for each affected DC affected_dcs = Dc.objects.distinct().filter(roles__in=old_roles, roles__permissions__id=AdminPermission.id) for dc in affected_dcs: User.clear_dc_admin_ids(dc) if was_staff: User.clear_super_admin_ids() return res
def post(self, vm, data, hostname_or_uuid=None): """ Create VM definition In this case, hostname_or_uuid parameter has to be only hostname, never uuid """ ser = VmDefineSerializer(self.request, data=data, hostname=hostname_or_uuid) if ser.is_valid(): ser.object.save(sync_json=True, update_node_resources=ser.update_node_resources) vm = ser.object try: res = SuccessTaskResponse(self.request, ser.data, status=scode.HTTP_201_CREATED, vm=vm, msg=LOG_DEF_CREATE, detail_dict=ser.detail_dict()) vm_defined.send(TaskID(res.data.get('task_id'), request=self.request), vm=vm) # Signal! return res finally: # Create disk/nics if defined in template if vm.template: self._create_disks_and_nics(vm) return FailureTaskResponse(self.request, ser.errors)
def put(self, vm, disk_id, disks, disk, data): """Update VM disk definition""" ser = VmDefineDiskSerializer(self.request, vm, disk.copy(), disk_id=disk_id, data=data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, vm=vm) disks[disk_id] = ser.jsondata vm.save_disks(disks, update_node_resources=ser.update_node_resources, update_storage_resources=ser.update_storage_resources) res = SuccessTaskResponse(self.request, ser.data, vm=vm, msg=LOG_DISK_UPDATE, detail='disk_id=' + str(disk_id + 1), detail_dict=ser.detail_dict()) self._update_vm_tags(vm, ser.img, ser.img_old, data, task_id=res.data.get('task_id')) return res
def put(self): assert self.request.dc.id == DefaultDc().id ser = UpdateSerializer(self.request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, task_id=self.task_id, dc_bound=False) version = ser.object['version'] from core.version import __version__ as mgmt_version if version == ('v' + mgmt_version): raise PreconditionRequired('System is already up-to-date') lock = TaskLock(self._lock_key, desc='System task') if not lock.acquire(self.task_id, timeout=7200, save_reverse=False): raise TaskIsAlreadyRunning try: return self._update(version, key=ser.object.get('key'), cert=ser.object.get('cert')) finally: lock.delete(fail_silently=True, delete_reverse=False)
def post(self, ns): """Create node-storage""" ns.storage = Storage(name='%s@%s' % (ns.zpool, ns.node.hostname), alias=ns.zpool, owner=self.request.user) try: ns.storage.size = ns.node.zpools[ns.zpool]['size'] except KeyError: pass ser = NodeStorageSerializer(self.request, ns, data=self.data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=ns, dc_bound=False) with transaction.atomic(): storage = ser.object.storage storage.save() ser.object.storage = storage ser.object.save(update_dcnode_resources=( ser.object.zpool == ser.object.node.zpool)) return SuccessTaskResponse(self.request, ser.data, status=HTTP_201_CREATED, obj=ns, dc_bound=False, detail_dict=ser.detail_dict(), msg=LOG_NS_CREATE)
def post(self, vm, disk_id, disks, disk, data): """Create VM nic definition""" if not vm.is_kvm() and vm.is_deployed(): raise OperationNotSupported ser = VmDefineDiskSerializer(self.request, vm, disk_id=disk_id, data=data) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, vm=vm) disks[disk_id] = ser.jsondata vm.save_disks(disks, update_node_resources=ser.update_node_resources, update_storage_resources=ser.update_storage_resources) res = SuccessTaskResponse(self.request, ser.data, status=scode.HTTP_201_CREATED, vm=vm, msg=LOG_DISK_CREATE, detail='disk_id=' + str(disk_id + 1), detail_dict=ser.detail_dict()) self._update_vm_tags(vm, ser.img, ser.img_old, data, task_id=res.data.get('task_id')) return res
def put(self): dc = self.dc ser = self.serializer(self.request, dc, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=dc) dcs = dc.custom_settings dcs.update(ser.settings) new_settings = dcs old_settings = dc.custom_settings dc.custom_settings = dcs dc.save() data = ser.data # Prepare ser._data for ser.detail_dict() to work res = SuccessTaskResponse(self.request, data, obj=dc, detail_dict=ser.detail_dict(), msg=LOG_DC_SETTINGS_UPDATE) task_id = TaskID(res.data.get('task_id'), request=self.request) if old_settings != new_settings: dc_settings_changed.send(task_id, dc=dc, old_settings=old_settings, new_settings=new_settings) # Signal! return res
def user_modify(self, update=False, serializer=None): affected_groups = () if not serializer: serializer = self.serializer user = self.user ser = serializer(self.request, user, data=self.data, partial=update) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, obj=user, dc_bound=False) ser.save() if update: msg = LOG_USER_UPDATE status = HTTP_200_OK else: msg = LOG_USER_CREATE status = HTTP_201_CREATED res = SuccessTaskResponse(self.request, ser.data, status=status, obj=user, msg=msg, owner=ser.object, detail_dict=ser.detail_dict(), dc_bound=False) if serializer == UserSerializer: # User's is_staff attribute was changed -> Clear the cached list of super admins if ser.is_staff_changed: User.clear_super_admin_ids() # User's groups were changed, which may affect the cached list of DC admins for DCs which are attached # to these groups. So we need to clear the list of admins cached for each affected DC # noinspection PyProtectedMember if user._roles_to_save is not None: # noinspection PyProtectedMember affected_groups = set(user._roles_to_save) affected_groups.update(ser.old_roles) affected_dcs = Dc.objects.distinct().filter( roles__in=affected_groups, roles__permissions__id=AdminPermission.id) for dc in affected_dcs: User.clear_dc_admin_ids(dc) # User was removed from some groups and may loose access to DCs which are attached to this group # So we better set his current_dc to default_dc if ser.old_roles and not user.is_staff: user.reset_current_dc() connection.on_commit(lambda: user_relationship_changed.send( user_name=ser.object.username, affected_groups=tuple(group.id for group in affected_groups))) return res
def get(self): request = self.request ser = AlertSerializer(request, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors) dc_bound = ser.data['dc_bound'] if dc_bound: tg = TG_DC_BOUND else: tg = TG_DC_UNBOUND if not request.dc.is_default(): request.dc = DefaultDc() # Warning: Changing request.dc logger.info( '"%s %s" user="******" _changed_ dc="%s" permissions=%s', request.method, request.path, request.user.username, request.dc.name, request.dc_user_permissions) if not request.dc.settings.MON_ZABBIX_ENABLED: # dc1_settings raise Http404 _apiview_ = {'view': 'mon_alert_list', 'method': request.method} _tidlock = [ 'mon_alert_list', 'dc_id=%s' % request.dc.id, 'vm_uuids=%s' % ','.join(ser.vms or ()), 'node_uuids=%s' % ','.join(ser.nodes or ()), ] task_kwargs = { 'vm_uuids': ser.vms, 'node_uuids': ser.nodes, } for key, val in ser.data.items(): _apiview_[key] = val if not (key.startswith('vm_') or key.startswith('node_')): task_kwargs[key] = val _tidlock.append('%s=%s' % (key, to_string(val))) tidlock = ':'.join(_tidlock) ter = mon_alert_list.call(request, None, (request.dc.id, ), kwargs=task_kwargs, meta={'apiview': _apiview_}, tg=tg, tidlock=tidlock, cache_result=tidlock, cache_timeout=self.cache_timeout) return mgmt_task_response(request, *ter, obj=request.dc, api_view=_apiview_, dc_bound=dc_bound, data=self.data, detail_dict=ser.detail_dict(force_full=True))
def save_note(self): request = self.request ser = BackupSerializer(request, self.bkp, data=self.data, partial=True) if ser.is_valid(): ser.object.save() return SuccessTaskResponse(request, ser.data) else: return FailureTaskResponse(request, ser.errors)
def post(self): request = self.request dc1_settings = DefaultDc().settings domain = self.domain domain.owner = request.user # just a default domain.type = dc1_settings.DNS_DOMAIN_TYPE_DEFAULT if not request.user.is_staff: self.data.pop('dc_bound', None) # default DC binding cannot be changed when creating object ser = DomainSerializer(request, domain, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) tsig_data = {} # in case there will be more tsig_* parameters (but for now, there's only tsig_keys) for key, val in self.data.items(): # remove tsig_* parameters from request data because they belong to other validator if key.startswith('tsig_'): self.data.pop(key) tsig_data[key[5:]] = val # e.g: tsig_keys -> keys tsig_keys_new, tsig_serializers = self.process_tsig_keys(request, tsig_data) # save default serializer ser.object.save() # save tsig serializer(s) [ser_tsig.object.save() for ser_tsig in tsig_serializers] # link newly defined TSIG keys to this domain [new_key.link_to_axfr_domain(domain) for new_key in tsig_keys_new] res = SuccessTaskResponse(request, ser.data, status=HTTP_201_CREATED, obj=domain, dc_bound=False, msg=LOG_DOMAIN_CREATE, detail_dict=ser.detail_dict()) # Create SOA and NS records for new MASTER/NATIVE domain from api.dns.record.views import dns_record try: if dc1_settings.DNS_SOA_DEFAULT and dc1_settings.DNS_NAMESERVERS: soa_attrs = {'hostmaster': dc1_settings.DNS_HOSTMASTER.replace('@', '.'), 'nameserver': dc1_settings.DNS_NAMESERVERS[0]} soa_data = {'type': Record.SOA, 'name': domain.name, 'content': dc1_settings.DNS_SOA_DEFAULT.format(**soa_attrs)} call_api_view(request, 'POST', dns_record, domain.name, 0, data=soa_data, log_response=True) for ns in dc1_settings.DNS_NAMESERVERS: ns_data = {'type': Record.NS, 'name': domain.name, 'content': ns} call_api_view(request, 'POST', dns_record, domain.name, 0, data=ns_data, log_response=True) except Exception as e: logger.exception(e) if domain.dc_bound: assert request.dc.id == domain.dc_bound attach_dc_virt_object(res.data.get('task_id'), LOG_DOMAIN_ATTACH, domain, request.dc, user=request.user) return res
def put(self): request = self.request domain = self.domain ser = DomainSerializer(request, domain, data=self.data, partial=True) # validate the main Domain form before processing TSIG params if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=domain, dc_bound=False) tsig_data = {} # in case there will be more tsig_* parameters (but for now, there's only tsig_keys) for key, val in self.data.items(): # remove tsig_* parameters from request data because they belong to other validator if key.startswith('tsig_'): self.data.pop(key) tsig_data[key[5:]] = val # e.g: tsig_keys -> keys tsig_keys_new, tsig_serializers = self.process_tsig_keys(request, tsig_data) # save default serializer ser.object.save() # save tsig serializer(s) [ser_tsig.object.save() for ser_tsig in tsig_serializers] # link newly defined TSIG keys to this domain [new_key.link_to_axfr_domain(domain) for new_key in tsig_keys_new] # unlink old TSIG keys that were defined for this domain but they were removed in this update tsig_keys_names = [key.name for key in tsig_keys_new] [linked_key.unlink_axfr_domain(domain) for linked_key in TsigKey.get_linked_axfr_keys(domain) if linked_key.name not in tsig_keys_names] res = SuccessTaskResponse(request, ser.data, obj=domain, msg=LOG_DOMAIN_UPDATE, detail_dict=ser.detail_dict(), dc_bound=False) if ser.name_changed: # Update SOA and NS records when MASTER/NATIVE Domain name changed from api.dns.record.views import dns_record try: data = {'name': domain.name} for record_id in domain.record_set.filter(name__iexact=ser.name_changed, type__in=[Record.NS, Record.SOA])\ .values_list('id', flat=True): call_api_view(request, 'PUT', dns_record, domain.name, record_id, data=data, log_response=True) except Exception as e: logger.exception(e) # Update VMS_VM_DOMAIN_DEFAULT if this domain was used as a default DC domain from api.dc.base.views import dc_settings try: for dc in Dc.objects.all(): if dc.settings.VMS_VM_DOMAIN_DEFAULT == ser.name_changed: call_api_view(request, 'PUT', dc_settings, dc.name, data={'VMS_VM_DOMAIN_DEFAULT': domain.name}, log_response=True) except Exception as e: logger.exception(e) return res
def put(self): net, request = self.net, self.request ser = NetworkSerializer(request, net, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=net, dc_bound=False) data = ser.data dd = ser.detail_dict() # These fields cannot be updated when net is used by some VM updated_ro_fields = { i for i in ('network', 'netmask', 'gateway', 'nic_tag', 'vlan_id', 'vxlan_id', 'mtu', 'dhcp_passthrough') if i in dd } # These fields cannot be updated when IP addresses exist updated_ro_fields2 = updated_ro_fields.intersection( ('network', 'netmask')) if updated_ro_fields2 and net.ipaddress_set.exists(): err = ser.update_errors( updated_ro_fields, _('This field cannot be updated ' 'because network has existing IP addresses.')) return FailureTaskResponse(request, err, obj=net, dc_bound=False) if updated_ro_fields and net.is_used_by_vms(): err = ser.update_errors( updated_ro_fields, _('This field cannot be updated ' 'because network is used by some VMs.')) return FailureTaskResponse(request, err, obj=net, dc_bound=False) ser.object.save() return SuccessTaskResponse(self.request, data, obj=net, detail_dict=dd, msg=LOG_NET_UPDATE, dc_bound=False)
def _run_execute(self, msg, cmd, status): self._check_img() request, ns, img = self.request, self.ns, self.img node = ns.node detail = 'image=%s' % img.name apiview = { 'view': 'node_image', 'method': request.method, 'hostname': node.hostname, 'zpool': ns.zpool, 'name': img.name, } # Set importing/deleting status img.set_ns_status(ns, status) # Create task tid, err = execute( request, ns.storage.owner.id, cmd, tg=TG_DC_UNBOUND, queue=node.image_queue, meta={ 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'replace_stdout': ((node.uuid, node.hostname), (img.uuid, img.name)), 'msg': msg, 'nodestorage_id': ns.id, 'apiview': apiview }, callback=('api.node.image.tasks.node_image_cb', { 'nodestorage_id': ns.id, 'zpool': ns.zpool, 'img_uuid': img.uuid }), lock='node_image ns:%s img:%s' % (ns.id, img.uuid), # Lock image per node storage expires=IMAGE_TASK_EXPIRES) if err: img.del_ns_status(ns) return FailureTaskResponse(request, err, obj=ns) else: return TaskResponse(request, tid, msg=msg, obj=ns, api_view=apiview, detail=detail, data=self.data)