def put(self): dc = self.dc request = self.request ser = self.serializer(request, dc, data=self.data, partial=True) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=dc) ser.save() res = SuccessTaskResponse(request, ser.data, obj=dc, detail_dict=ser.detail_dict(), msg=LOG_DC_UPDATE) task_id = res.data.get('task_id') # Changing DC groups affects the group.dc_bound flag if ser.groups_changed: # The groups that are removed or added should not be DC-bound anymore for group in ser.groups_changed: connection.on_commit(lambda: group_relationship_changed.send( dc_name=dc.name, group_name=group.name)) if group.dc_bound: remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, group, user=request.user) # After changing the DC owner or changing DC groups we have to invalidate the list of admins for this DC if ser.owner_changed or ser.groups_changed: connection.on_commit( lambda: dc_relationship_changed.send(dc_name=dc.name)) User.clear_dc_admin_ids(dc) # Remove user.dc_bound flag for new DC owner # Remove user.dc_bound flag for users in new dc.groups, which are DC-bound, but not to this datacenter self._remove_user_dc_binding(task_id, owner=dc.owner, groups=ser.groups_added) # When a user is removed as owner from non-default DC or groups are changed on a non-default DC # we have to update the current_dc on every affected user, because he could remain access to this DC # (this is because get_dc() uses current_dc as a shortcut) if not dc.is_default(): if ser.owner_changed and not ser.owner_changed.is_staff: ser.owner_changed.reset_current_dc() if ser.removed_users: for user in ser.removed_users.select_related( 'default_dc').exclude(is_staff=True): user.reset_current_dc() return res
def post(self): dc = self.dc request = self.request if not DefaultDc().settings.VMS_DC_ENABLED: raise PermissionDenied dc.owner = request.user # just a default dc.alias = dc.name # just a default ser = self.serializer(request, dc, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=dc) # Create default custom settings suitable for new DC (without global settings) default_custom_settings = DefaultDc().custom_settings.copy() for key in DefaultDcSettingsSerializer.get_global_settings(): try: del default_custom_settings[key] except KeyError: pass # Copy custom settings from default DC and save new DC ser.object.custom_settings = default_custom_settings ser.save() res = SuccessTaskResponse(request, ser.data, status=status.HTTP_201_CREATED, obj=dc, detail_dict=ser.detail_dict(), msg=LOG_DC_CREATE) dcs = dc.settings task_id = res.data.get('task_id') # Changing DC groups affects the group.dc_bound flag if dc.roles.exists(): # The groups that are added to newly created DC should not be DC-bound anymore for group in dc.roles.all(): if group.dc_bound: remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, group, user=request.user) # Creating new DC can affect the dc_bound flag on users (owner + users from dc.groups) self._remove_user_dc_binding(task_id, owner=dc.owner, groups=dc.roles.all()) # Create association with default server domain if dcs.DNS_ENABLED: from api.dc.domain.views import dc_domain call_api_view(request, None, dc_domain, dcs.VMS_VM_DOMAIN_DEFAULT, data={'dc': dc}, log_response=True) # Create association with default rescue CD if dcs.VMS_ISO_RESCUECD: from api.dc.iso.views import dc_iso call_api_view(request, None, dc_iso, dcs.VMS_ISO_RESCUECD, data={'dc': dc}, log_response=True) return res
def delete(self): dc, request = self.dc, self.request if dc.is_default(): raise PreconditionRequired( _('Default datacenter cannot be deleted')) if dc.dcnode_set.exists(): raise PreconditionRequired( _('Datacenter has nodes')) # also "checks" DC backups if dc.vm_set.exists(): raise PreconditionRequired(_('Datacenter has VMs')) if dc.backup_set.exists(): raise PreconditionRequired( _('Datacenter has backups' )) # should be checked by dcnode check above dc_id = dc.id ser = self.serializer(request, dc) dc_bound_objects = dc.get_bound_objects() # After deleting a DC the current_dc is automatically set to DefaultDc by the on_delete db field parameter ser.object.delete() # Remove cached tasklog for this DC (DB tasklog entries will be remove automatically) delete_tasklog_cached(dc_id) connection.on_commit( lambda: dc_relationship_changed.send(dc_name=ser.object.name)) res = SuccessTaskResponse(request, None) # no msg => won't be logged # Every DC-bound object looses their DC => becomes DC-unbound task_id = res.data.get('task_id') # Update bound virt objects to be DC-unbound after DC removal for model, objects in dc_bound_objects.items(): msg = LOG_VIRT_OBJECT_UPDATE_MESSAGES.get(model, None) if objects and msg: for obj in objects: if obj.dc_bound: # noinspection PyUnresolvedReferences remove_dc_binding_virt_object(task_id, msg, obj, user=request.user, dc_id=DefaultDc.id) return res
def _remove_dc_binding(self, task_id): if self.role.dc_bound: remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, self.role, user=self.request.user)
def _remove_dc_binding(self, res): if self.role.dc_bound: remove_dc_binding_virt_object(res.data.get('task_id'), LOG_GROUP_UPDATE, self.role, user=self.request.user)
def _remove_dc_binding(self, res): if self.img.dc_bound: remove_dc_binding_virt_object(res.data.get('task_id'), LOG_IMAGE_UPDATE, self.img, user=self.request.user)
def _remove_dc_binding(self, res): if self.domain.dc_bound: remove_dc_binding_virt_object(res.data.get('task_id'), LOG_DOMAIN_UPDATE, self.domain, user=self.request.user)
def _remove_dc_binding(self, res): if self.vmt.dc_bound: remove_dc_binding_virt_object(res.data.get('task_id'), LOG_TEMPLATE_UPDATE, self.vmt, user=self.request.user)