def __init__(self, request, hostname, data): super(DcNodeView, self).__init__(request) self.data = data self.hostname = hostname if hostname: self.node = get_object(request, Node, {'hostname': hostname}, exists_ok=True, noexists_fail=True) self.dcnode = get_object(request, DcNode, {'dc': request.dc, 'node': self.node}, sr=('dc', 'node')) else: self.node = Node self.dcnode = get_dc_nodes(request, prefetch_vms_count=self.extended, order_by=self.order_by)
def _set_record(self): request = self.request record_id = self.record_id # Check IsSuperAdmin or IsDomainOwner permissions in get_domain self.domain = get_domain(request, self.domain_name, exists_ok=True, noexists_fail=True) # Records for slave domains cannot be modified if request.method != 'GET' and self.domain.type in (Domain.SLAVE, Domain.SUPERSLAVE): raise ExpectationFailed(_('Changing DNS records is not allowed for %s domain') % self.domain.type) if record_id is None: # Get many records = self.data.get('records', None) qs = self.domain.record_set.select_related('domain').order_by(*self.order_by) if records is None: self.record = qs else: if not isinstance(records, (tuple, list)): raise InvalidInput('Invalid records') self.record = qs.filter(id__in=records) else: if record_id == 0: # New record self.record = Record(domain=self.domain) else: # Existing record self.record = get_object(request, Record, {'domain': self.domain, 'id': record_id}, sr=('domain',), noexists_fail=True)
def __init__(self, request, username, data): super(DcUserView, self).__init__(request) self.data = data self.username = username dc = request.dc restrict_users = ExcludeInternalUsers if dc.access != dc.PUBLIC: restrict_users = restrict_users & (Q(id=dc.owner.id) | Q(roles__in=dc.roles.all())) if username: self.user = get_object(request, User, {'username': username}, where=restrict_users, sr=('dc_bound', 'default_dc'), exists_ok=True, noexists_fail=True) else: self.user = User.objects.distinct().filter(restrict_users)\ .filter(is_active=self.active).order_by(*self.order_by) if self.full or self.extended: self.user = self.user.select_related( 'dc_bound', 'default_dc').prefetch_related('roles')
def get_node(request, hostname, attrs=None, where=None, exists_ok=True, noexists_fail=True, sr=(), pr=(), dc=False, api=True, extra=None, annotate=None): """Call get_object for Node model identified by hostname. This function should be called by staff users or DC admins only.""" if attrs is None: attrs = {} if not request.user.is_staff: # DC admin attrs['dc'] = request.dc if dc: attrs['dc'] = dc attrs['hostname'] = hostname if api: return get_object(request, Node, attrs, where=where, exists_ok=exists_ok, noexists_fail=noexists_fail, sr=sr, pr=pr, extra=extra, annotate=annotate) if sr: qs = Node.objects.select_related(*sr) else: qs = Node.objects if where: return qs.filter(where).get(**attrs) else: return qs.get(**attrs)
def __init__(self, request, name, data): super(DcView, self).__init__(request) self.data = data self.name = name if request.user.is_staff: self.serializer = SuperDcSerializer if self.extended: self.serializer = ExtendedDcSerializer extra = {'select': ExtendedDcSerializer.extra_select} else: extra = None if name: # Return dc from cache (this will also check user permissions for requested DC) # We do this because GET is available for anybody if request.method == 'GET': get_dc(request, name) # But we will use this fresh DC object self.dc = get_object(request, Dc, {'name': name}, sr=('owner',), extra=extra) # Update current datacenter to log tasks for this dc request.dc = self.dc else: # GET many is available for anybody if (request.user.is_staff or self.full) or self.extended: pr = ('roles',) else: pr = None self.dc = get_dcs(request, sr=('owner',), pr=pr, extra=extra, order_by=self.order_by)
def __init__(self, request, name, data): super(DcGroupView, self).__init__(request) self.data = data self.name = name self.dc = request.dc if name: attrs = {'name': name} if request.method != 'POST': attrs['dc'] = request.dc roles = get_object(request, Role, attrs, sr=('dc_bound', ), exists_ok=True, noexists_fail=True) else: roles = self.dc.roles.all().order_by(*self.order_by) if self.full or self.extended: roles = roles.select_related('dc_bound', ).prefetch_related( 'permissions', 'user_set') self.role = roles
def __init__(self, request, hostname_or_uuid, bkpname, data): super(VmBackup, self).__init__(request) if request.method == 'POST': # Got bkpdef instead of bkpname vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) # TODO: check indexes define = get_object(request, BackupDefine, {'name': bkpname, 'vm': vm, 'disk_id': real_disk_id}, exists_ok=True, noexists_fail=True, sr=('vm', 'node')) bkpname = define.generate_backup_name() bkp_get = {'name': bkpname, 'vm_hostname': vm.hostname, 'vm_disk_id': disk_id - 1, 'vm': vm} else: try: if 'hostname' in data: # Force original hostname raise ObjectNotFound # Only target VM status and backup node status are important vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=None) except ObjectNotFound: vm = None bkp_get = {'name': bkpname, 'vm_hostname': hostname_or_uuid} else: bkp_get = {'vm': vm, 'name': bkpname} define = None real_disk_id = None zfs_filesystem = None bkp_get = filter_disk_id(None, bkp_get, data, default=1) # vm_disk_id instead of disk_id bkp_get['dc'] = request.dc # Backup instance self.bkp = bkp = get_object(request, Backup, bkp_get, sr=('node', 'define', 'vm')) self.disk_id = bkp.array_disk_id self.hostname = bkp.vm_hostname_real self.define = define self.real_disk_id = real_disk_id self.zfs_filesystem = zfs_filesystem self.vm = vm self.data = data # Task type (a = automatic, e = manual) if getattr(request, 'define_id', None): self.tt = TT_AUTO else: self.tt = TT_EXEC
def node_image_list(request, hostname, zpool, data=None): """ List (:http:get:`GET </node/(hostname)/storage/(zpool)/image>`) all images imported on a compute node storage or remove (:http:delete:`DELETE </node/(hostname)/storage/(zpool)/image>`) all unused images imported on a compute node storage. .. http:get:: /node/(hostname)/storage/(zpool)/image :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-no| :arg hostname: **required** - Node hostname :type hostname: string :arg zpool: **required** - Node storage pool name :type zpool: string :arg data.full: Return list of objects with all image details (default: false) :type data.full: boolean :arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name`` (default: ``name``) :type data.order_by: string :status 200: SUCCESS :status 403: Forbidden :status 404: Storage not found .. http:delete:: /node/(hostname)/storage/(zpool)/image .. note:: This API function will run \ :http:delete:`DELETE node_image </node/(hostname)/storage/(zpool)/image/(name)>` for every unused image. :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-no| :arg hostname: **required** - Node hostname :type hostname: string :arg zpool: **required** - Node storage pool name :type zpool: string :status 200: SUCCESS :status 403: Forbidden :status 404: Storage not found :status 423: Node is not operational """ ns = get_object(request, NodeStorage, {'node__hostname': hostname, 'zpool': zpool}, exists_ok=True, noexists_fail=True, sr=('node',)) images = ns.images.select_related('owner', 'dc_bound').order_by(*NodeImageView.get_order_by(data)) node_image_view = NodeImageView(request, ns, images, data) if request.method == 'DELETE': return node_image_view.cleanup() else: return node_image_view.get(many=True)
def __init__(self, request, name, data): super(DcStorageView, self).__init__(request) self.data = data self.name = name dc = request.dc if name: try: zpool, hostname = name.split('@') if not (zpool and hostname): raise ValueError except ValueError: raise ObjectNotFound(model=NodeStorage) attrs = {'node__hostname': hostname, 'zpool': zpool} if request.method != 'POST': attrs['dc'] = dc ns = get_object(request, NodeStorage, attrs, sr=( 'node', 'storage', 'storage__owner', ), exists_ok=True, noexists_fail=True) ns.set_dc(dc) try: # Bug #chili-525 + checks if node is attached to Dc (must be!) ns.set_dc_node(DcNode.objects.get(node=ns.node, dc=dc)) except DcNode.DoesNotExist: raise PreconditionRequired(_('Compute node is not available')) else: # many ns = NodeStorage.objects.filter(dc=dc).order_by(*self.order_by) if self.full or self.extended: dc_nodes = { dn.node.hostname: dn for dn in DcNode.objects.select_related('node').filter( dc=request.dc) } ns = ns.select_related('node', 'storage', 'storage__owner') for i in ns: # Bug #chili-525 i.set_dc_node(dc_nodes.get(i.node.hostname, None)) i.set_dc(dc) self.ns = ns
def get_domain(request, name, attrs=None, fetch_dc=False, data=None, count_records=False, **kwargs): """Return Domain object according to name. SuperAdmins have access to all domains and users can access only domain which they own. This function also acts as IsDomainOwner permission.""" user = request.user if attrs is None: attrs = {} if user.is_staff: dom_filter = None else: dom_filter = Q(user=user.id) if request.dcs: # noinspection PyAugmentAssignment dom_filter = dom_filter | Q(dc_bound__in=[dc.id for dc in request.dcs]) if count_records: # noinspection SqlDialectInspection,SqlNoDataSourceInspection kwargs['extra'] = { 'select': {'records': 'SELECT COUNT(*) FROM "records" WHERE "records"."domain_id" = "domains"."id"'} } attrs['name'] = name.lower() # The domain name must be always lowercased (DB requirement) domain = get_object(request, Domain, attrs, where=dom_filter, **kwargs) if domain.new: domain.dc_bound = get_dc(request, data.get('dc', request.dc.name)).id if not (user.is_staff or domain.user == user.id): # request.dcs is brought by IsAnyDcPermission if not (domain.dc_bound and Dc.objects.get_by_id(domain.dc_bound) in request.dcs): raise PermissionDenied # only DC-bound objects are visible by non-superadmin users if domain.dc_bound: # Change DC according to domain.dc_bound flag if request.dc.id != domain.dc_bound: request.dc = Dc.objects.get_by_id(domain.dc_bound) # Warning: Changing request.dc if not user.is_staff: request.dc_user_permissions = request.dc.get_user_permissions(user) logger.info('"%s %s" user="******" _changed_ dc="%s" permissions=%s', request.method, request.path, user.username, request.dc.name, request.dc_user_permissions) if fetch_dc: if domain.id: domain.dc = list(Dc.objects.filter(domaindc__domain_id=domain.id)) else: domain.dc = [] return domain
def __init__(self, request, hostname_or_uuid, repname, data): super(VmReplicaBaseView, self).__init__(request) self.repname = repname self.data = data self._success = False self.vm = vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=self._check_node_status) if repname is None: # Exclude migration ghost VMs self.slave_vm = SlaveVm.objects.select_related('vm', 'master_vm', 'vm__node')\ .filter(master_vm=vm).exclude(name=u'').order_by('name') else: self.slave_vm = get_object(request, SlaveVm, {'master_vm': vm, 'name': repname}, sr=('vm', 'master_vm', 'vm__node'), create_attrs={'_master_vm': vm, 'name': repname})
def get_user(request, username, where=None, sr=('dc_bound',), **kwargs): user = request.user if where: where = where & ExcludeInternalUsers else: where = ExcludeInternalUsers if getattr(request, 'is_profile_owner', False): if user.username == username: # IsProfileOwner return get_object(request, User, {'username': username}, where=where, sr=sr, **kwargs) else: raise PermissionDenied else: # Is SuperAdmin or UserAdmin return get_virt_object(request, User, sr=sr, get_attrs={'username': username}, where=where, **kwargs)
def __init__(self, request, name, data): super(DcImageView, self).__init__(request) self.data = data self.name = name if name: attrs = {'name': name} if request.method != 'POST': attrs['dc'] = request.dc self.img = get_object(request, Image, attrs, sr=('owner', 'dc_bound'), exists_ok=True, noexists_fail=True) else: self.img = Image.objects.select_related('owner', 'dc_bound').filter(dc=request.dc)\ .exclude(access__in=Image.INVISIBLE)\ .order_by(*self.order_by)
def _get_vm_from_db(request, attrs, where, sr, api, **kwargs): """ Used in get_vm below. """ if api: vm = get_object(request, Vm, attrs, where=where, sr=sr, **kwargs) else: qs = Vm.objects if sr: qs = qs.select_related(*sr) if where: qs = qs.filter(where) vm = qs.get(**attrs) return vm
def __init__(self, request, hostname_or_uuid, snapname, data): super(VmSnapshot, self).__init__(request) self.data = data self.vm = vm = get_vm( request, hostname_or_uuid, exists_ok=True, noexists_fail=True, check_node_status=('POST', 'DELETE')) # custom node check inside put() self.disk_id, real_disk_id, self.zfs_filesystem = get_disk_id( request, vm, data) self.snap = get_object(request, Snapshot, { 'name': snapname, 'vm': vm, 'disk_id': real_disk_id }, sr=('define', ))
def create_from_template(cls, request, vm, vm_define_backup, log=logger): """Create backup definitions from vm.template.vm_define_backup list""" if vm_define_backup and isinstance(vm_define_backup, list): request = set_request_method(request, 'POST') for i, data in enumerate(vm_define_backup): try: try: bkpdef = data['bkpdef'] except KeyError: bkpdef = data['name'] disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) log.info('Creating backup definition [%d] "%s" for vm=%s, disk_id=%d defined by template %s', i, bkpdef, vm, disk_id, vm.template) define = get_object(request, BackupDefine, {'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id}) res = cls(request, data=data).post(vm, define, vm_template=True) if res.status_code != scode.HTTP_201_CREATED: raise APIError('vm_define_backup error [%s]: %s' % (res.status_code, res.data)) except Exception as ex: log.warn('Failed to create backup definition [%d] for vm=%s defined by template %s with ' 'data="%s". Error: %s', i, vm, vm.template, data, ex)
def get_node_storage(request, hostname, zpool, sr=('node', 'storage', 'storage__owner')): """Return NodeStorage object. Used only by staff users!""" node = get_node(request, hostname, exists_ok=True, noexists_fail=True) extended = output_extended(request) if extended: extra = {'select': extended} pr = ('dc', ) else: pr = () extra = None return get_object(request, NodeStorage, { 'node': node, 'zpool': zpool }, sr=sr, pr=pr, extra=extra)
def __init__(self, request, name, data): super(DcDomainView, self).__init__(request) self.data = data self.name = name if name: where = None if request.method != 'POST': where = Q(id__in=list( request.dc.domaindc_set.values_list('domain_id', flat=True))) self.domain = get_object(request, Domain, {'name': name.lower()}, where=where, exists_ok=True, noexists_fail=True) else: dc_domain_ids = list( request.dc.domaindc_set.values_list('domain_id', flat=True)) self.domain = Domain.objects.filter(id__in=dc_domain_ids).order_by( *self.order_by)
def vm_define_snapshot(request, hostname_or_uuid, snapdef, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`), remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`) or update (:http:put:`PUT </vm/(hostname_or_uuid)/define/snapshot/(snapdef)>`) a VM snapshot definition and schedule. .. http:get:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.extended: Include total number of snapshots (default: false) :type data.extended: boolean :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name (predefined: hourly, daily, weekly, monthly) :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: **required** - Maximum number of snapshots to keep :type data.retention: integer :arg data.active: Enable or disable snapshot schedule (default: true) :type data.active: boolean :arg data.desc: Snapshot definition description :type data.desc: string :arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \ creating snapshot (requires QEMU Guest Agent) (default: false) :type data.fsfreeze: boolean :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Snapshot definition already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:put:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: Maximum number of snapshots to keep :type data.retention: integer :arg data.active: Enable or disable snapshot schedule :type data.active: boolean :arg data.desc: Snapshot definition description :type data.desc: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:delete:: /vm/(hostname_or_uuid)/define/snapshot/(snapdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapdef: **required** - Snapshot definition name :type snapdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Snapshot definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) extra = output_extended_snap_count(request, data) define = get_object(request, SnapshotDefine, { 'name': snapdef, 'vm': vm, 'disk_id': real_disk_id }, sr=('vm', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra}) return SnapshotDefineView(request, data=data).response(vm, define, extended=bool(extra))
def __init__(self, request, name, ip, data, dc=None, many=False): super(NetworkIPView, self).__init__(request) self.data = data self.many = many self.dc = dc net_filter = {'name': name} ip_filter = [] if dc: net_filter['dc'] = dc ip_filter.append(Q(usage__in=[IPAddress.VM, IPAddress.VM_REAL])) ip_filter.append((Q(vm__isnull=False) & Q(vm__dc=dc)) | (~Q(vms=None) & Q(vms__dc=dc))) elif not request.user.is_staff: ip_filter.append(~Q(usage=IPAddress.NODE)) self.net = net = get_virt_object(request, Subnet, data=data, sr=('dc_bound', ), get_attrs=net_filter, exists_ok=True, noexists_fail=True) ip_filter.append(Q(subnet=net)) if many: self.ips = ips = data.get('ips', None) if ips is not None: if not isinstance(ips, (tuple, list)): raise InvalidInput('Invalid ips') ip_filter.append(Q(ip__in=ips)) if request.method == 'GET': usage = data.get('usage', None) if usage and not dc: try: usage = int(usage) if usage not in dict(IPAddress.USAGE_REAL): raise ValueError except ValueError: raise InvalidInput('Invalid usage') else: ip_filter.append(Q(usage=usage)) try: ip_filter = reduce(operator.and_, ip_filter) self.ip = IPAddress.objects.select_related('vm', 'vm__dc', 'subnet')\ .prefetch_related('vms', 'vms__dc')\ .filter(ip_filter)\ .order_by(*self.order_by).distinct() except TypeError: raise InvalidInput('Invalid ips') else: ip_filter = reduce(operator.and_, ip_filter) self.ip = get_object(request, IPAddress, {'ip': ip}, where=ip_filter, sr=('vm', 'vm__dc', 'subnet'))
def vm_define_backup(request, hostname_or_uuid, bkpdef, data=None): """ Show (:http:get:`GET </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), create (:http:post:`POST </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`), remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) or update (:http:put:`PUT </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) a VM backup definition and schedule. .. http:get:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |VmOwner| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.extended: Include total number of backups (default: false) :type data.extended: boolean :status 200: SUCCESS :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id .. http:post:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name (predefined: hourly, daily, weekly, monthly) :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.type: **required** - Backup type (1 - dataset, 2 - file) (default: 1) :type: data.type: integer :arg data.node: **required** - Name of the backup node :type data.node: string :arg data.zpool: **required** - The zpool used on the backup node (default: zones) :type data.zpool: string :arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: **required** - Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule (default: true) :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2) (default: 0) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes (default: null => no limit) :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \ creating backup snapshot (requires QEMU Guest Agent) (default: false) :type data.fsfreeze: boolean :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Backup definition already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:put:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6) :type data.schedule: string :arg data.retention: Maximum number of backups to keep :type data.retention: integer :arg data.active: Enable or disable backup schedule :type data.active: boolean :arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2) :type data.compression: integer :arg data.bwlimit: Transfer rate limit in bytes :type data.bwlimit: integer :arg data.desc: Backup definition description :type data.desc: string :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational .. http:delete:: /vm/(hostname_or_uuid)/define/backup/(bkpdef) :DC-bound?: * |dc-yes| :Permissions: * |Admin| :Asynchronous?: * |async-no| :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg bkpdef: **required** - Backup definition name :type bkpdef: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :status 200: SUCCESS :status 400: FAILURE :status 403: Forbidden :status 404: VM not found / Backup definition not found :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational """ vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) extra = output_extended_backup_count(request, data) define = get_object(request, BackupDefine, { 'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id }, sr=('vm', 'vm__dc', 'node', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra}) return BackupDefineView(request, data=data).response(vm, define, extended=bool(extra))
def node_image(request, hostname, zpool, name, data=None): """ Show (:http:get:`GET </node/(hostname)/storage/(zpool)/image/(name)>`), import (:http:post:`POST </node/(hostname)/storage/(zpool)/image/(name)>`) or delete (:http:delete:`DELETE </node/(hostname)/storage/(zpool)/image/(name)>`) an image (name) on a compute node (hostname) storage (zpool). .. http:get:: /node/(hostname)/storage/(zpool)/image/(name) :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-no| :arg hostname: **required** - Node hostname :type hostname: string :arg zpool: **required** - Node storage pool name :type zpool: string :arg name: **required** - Image name :type name: string :status 200: SUCCESS :status 403: Forbidden :status 404: Storage not found / Image not found .. http:post:: /node/(hostname)/storage/(zpool)/image/(name) :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-yes| :arg hostname: **required** - Node hostname :type hostname: string :arg zpool: **required** - Node storage pool name :type zpool: string :arg name: **required** - Image name :type name: string :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: Storage not found / Image not found :status 406: Image already exists :status 423: Node is not operational :status 428: Image requires newer node version / Image requires newer node version .. http:delete:: /node/(hostname)/storage/(zpool)/image/(name) :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-yes| :arg hostname: **required** - Node hostname :type hostname: string :arg zpool: **required** - Node storage pool name :type zpool: string :arg name: **required** - Image name :type name: string :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: Storage not found / Image not found :status 423: Node is not operational :status 428: Image is used by some VMs """ ns = get_object(request, NodeStorage, {'node__hostname': hostname, 'zpool': zpool}, exists_ok=True, noexists_fail=True, sr=('node', 'storage')) attrs = {'name': name} if request.method != 'POST': attrs['nodestorage'] = ns img = get_object(request, Image, attrs, sr=('owner', 'dc_bound'), exists_ok=True, noexists_fail=True) return NodeImageView(request, ns, img, data).response()
def node_vm_snapshot_list(request, hostname, zpool, data=None): """ List (:http:get:`GET </node/(hostname)/storage/(zpool)/snapshot>`) all VM snapshots on compute node storage or synchronize (:http:put:`PUT </node/(hostname)/storage/(zpool)/snapshot>`) snapshots of all VM's disks on a compute node storage with snapshots saved in database. .. http:get:: /node/(hostname)/storage/(zpool)/snapshot :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-no| :arg hostname: **required** - Node hostname :type hostname: string :arg data.full: Return list of objects with all snapshot details (default: false) :type data.full: boolean :arg data.disk_id: Filter by disk number/ID :type data.disk_id: integer :arg data.type: Filter by snapshot type (1 - Automatic, 2 - Manual) :type data.type: integer :arg data.vm: Filter by server hostname :type data.vm: string :arg data.define: Filter by snapshot definition name :type data.define: string :arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, ``hostname``, \ ``size``, ``created`` (default: ``-created``) :type data.order_by: string :status 200: SUCCESS :status 403: Forbidden :status 404: Storage not found :status 412: Invalid disk_id / Invalid snapshot type .. http:put:: /node/(hostname)/storage/(zpool)/snapshot :DC-bound?: * |dc-no| :Permissions: * |SuperAdmin| :Asynchronous?: * |async-yes| :arg hostname: **required** - Node hostname :type hostname: string :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: Storage not found :status 423: Node is not operational """ ns = get_object(request, NodeStorage, { 'node__hostname': hostname, 'zpool': zpool }, exists_ok=True, noexists_fail=True, sr=('node', 'storage')) return NodeVmSnapshotList(request, ns, data).response()
def image_snapshot(request, hostname_or_uuid, snapname, name, data=None): """ Create (:http:post:`POST </vm/(hostname_or_uuid)/snapshot/(snapname)/image/(name)>`) a server disk image from a disk snapshot. .. note:: A global image server (:http:put:`VMS_IMAGE_VM </dc/(dc)/settings>`) must be configured in the system. .. http:post:: /vm/(hostname_or_uuid)/snapshot/(snapname)/image/(name) :DC-bound?: * |dc-yes| :Permissions: * |ImageAdmin| :Asynchronous?: * |async-yes| :arg name: **required** - Server disk image name :type name: string :arg hostname_or_uuid: **required** - Server hostname or uuid :type hostname_or_uuid: string :arg snapname: **required** - Snapshot name :type snapname: string :arg data.disk_id: **required** - Disk number/ID (default: 1) :type data.disk_id: integer :arg data.alias: Short image name (default: ``name``) :type data.alias: string :arg data.access: Access type (1 - Public, 3 - Private, 4 - Deleted) (default: 3) :type data.access: integer :arg data.owner: User that owns the image (default: logged in user) :type data.owner: string :arg data.desc: Image description :type data.desc: string :arg data.version: Image version (default: 1.0) :type data.version: string :arg data.resize: Whether the image is able to resize the disk during an initial start or deploy process \ (default: false) :type data.resize: boolean :arg data.deploy: Whether the image is able to shut down the server after an initial start (default: false) :type data.deploy: boolean :status 200: SUCCESS :status 201: PENDING :status 400: FAILURE :status 403: Forbidden :status 404: VM not found :status 406: Image already exists :status 412: Invalid disk_id :status 423: Node is not operational / VM is not operational :status 417: Image status is not OK / VM snapshot status is not OK :status 428: Image server is not available """ from api.utils.db import get_object from api.vm.utils import get_vm from api.vm.snapshot.utils import get_disk_id from vms.models import Snapshot vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True) disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data) snap = get_object(request, Snapshot, { 'name': snapname, 'vm': vm, 'disk_id': real_disk_id }, exists_ok=True, noexists_fail=True) assert zfs_filesystem == snap.zfs_filesystem return ImageView(request, name, data).create(vm, snap)