def validate(self): logger.error('Matching name: ', self.name) if not re.match('^[A-Za-z0-9\._/-]{1,250}$', self.name): logger.error('Matching name 2x: ', self.name) raise InvalidInput('Invalid TSIG name: "%s"' % self.name) if len(self.secret) > 250: raise InvalidInput('TSIG secret too long') if self.algorithm not in self.ALGORITHMS: raise InvalidInput( 'Invalid TSIG algorithm: "%s". Must be one of: %s' % (self.algorithm, self.ALGORITHMS)) return True
def __init__(self, request, subnet, data, dc=None): super(NetworkIPPlanView, self).__init__(request) self.data = data self.dc = dc ip_filter = [] if subnet: try: ipi = cidr_validator(subnet, return_ip_interface=True) except ValidationError: raise InvalidInput('Invalid subnet') network, netmask = ipi.with_netmask.split('/') net_filter = {'network': network, 'netmask': netmask} if dc: net_filter['dc'] = dc nets = Subnet.objects.filter(**net_filter) if not nets.exists(): raise ObjectNotFound(model=Subnet) ip_filter.append(Q(subnet__in=nets)) if dc: ip_filter.append(Q(usage=IPAddress.VM)) ip_filter.append((Q(vm__isnull=False) & Q(vm__dc=dc)) | (~Q(vms=None) & Q(vms__dc=dc))) usage = data.get('usage', None) if usage and not dc: try: usage = int(usage) if usage not in dict(IPAddress.USAGE_REAL): raise ValueError except ValueError: raise InvalidInput('Invalid usage') else: ip_filter.append(Q(usage=usage)) if ip_filter: ip_filter = reduce(operator.and_, ip_filter) else: ip_filter = Q() self.ips = IPAddress.objects.select_related('vm', 'vm__dc', 'subnet')\ .prefetch_related('vms', 'vms__dc')\ .filter(ip_filter)\ .order_by(*self.order_by).distinct()
def _set_record(self): request = self.request record_id = self.record_id # Check IsSuperAdmin or IsDomainOwner permissions in get_domain self.domain = get_domain(request, self.domain_name, exists_ok=True, noexists_fail=True) # Records for slave domains cannot be modified if request.method != 'GET' and self.domain.type in (Domain.SLAVE, Domain.SUPERSLAVE): raise ExpectationFailed(_('Changing DNS records is not allowed for %s domain') % self.domain.type) if record_id is None: # Get many records = self.data.get('records', None) qs = self.domain.record_set.select_related('domain').order_by(*self.order_by) if records is None: self.record = qs else: if not isinstance(records, (tuple, list)): raise InvalidInput('Invalid records') self.record = qs.filter(id__in=records) else: if record_id == 0: # New record self.record = Record(domain=self.domain) else: # Existing record self.record = get_object(request, Record, {'domain': self.domain, 'id': record_id}, sr=('domain',), noexists_fail=True)
def parse_yyyymm(yyyymm, min_value): """Process the yyyymm string and return (yyyymm, since, until, current_month) tuple consisting of: - validated yyyymm string, - since and until datetime objects, - current_month boolean. Used in SLA views. """ # noinspection PyBroadException try: yyyymm = str(yyyymm) since = datetime(year=int(yyyymm[:4]), month=int(yyyymm[4:]), day=1) except Exception: raise InvalidInput('Invalid yyyymm') now = datetime.now() yyyymm = since.strftime('%Y%m') current_month = now.strftime('%Y%m') == yyyymm if current_month: until = now else: until = since + relativedelta(months=+1) if until < min_value or since > now: raise ExpectationFailed('Monitoring data not available') return yyyymm, since, until, current_month
def get(self): request, node, graph = self.request, self.node, self.graph_type if node.status not in node.STATUS_AVAILABLE_MONITORING: raise NodeIsNotOperational try: graph_settings = GRAPH_ITEMS.get_options(graph, node) except KeyError: raise InvalidInput('Invalid graph') if graph.startswith(('nic-', 'net-')): ser_class = NetworkNodeMonHistorySerializer elif graph.startswith(('storage-', )): ser_class = StorageNodeMonHistorySerializer else: ser_class = MonHistorySerializer ser = ser_class(obj=self.node, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=node) return call_mon_history_task(request, t_mon_node_history, view_fun_name='mon_node_history', obj=self.node, dc_bound=False, serializer=ser, data=self.data, graph=graph, graph_settings=graph_settings)
def validate_order_by(cls, order_by): """Check if order_by list does contain valid values""" try: return [cls._get_db_field(f.strip()) for f in order_by] except (ValueError, TypeError): # noinspection PyTypeChecker raise InvalidInput('Invalid order_by; Possible sort fields are: %s' % ', '.join(cls._get_available_order_by_fields()))
def put(self): request, vm, command = self.request, self.vm, self.command if not vm.is_hvm(): raise OperationNotSupported if vm.status not in (vm.RUNNING, vm.STOPPING): raise VmIsNotOperational if command not in COMMANDS: raise InvalidInput('Invalid command') ser = VmQGASerializer(request, command, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, obj=vm) apiview = { 'view': 'vm_qga', 'method': request.method, 'hostname': vm.hostname, 'command': command, } cmd = 'qga-client %s %s 2>&1' % (vm.qga_socket_path, ' '.join( ser.get_full_command())) lock = 'vm_qga vm:%s' % vm.uuid meta = { 'output': { 'returncode': 'returncode', 'stdout': 'message' }, 'replace_stdout': ((vm.uuid, vm.hostname), ), 'apiview': apiview, 'msg': LOG_QGA_COMMAND, 'vm_uuid': vm.uuid, 'check_returncode': True, } # callback=None means that an implicit LOGTASK callback will be used (task_log_cb) tid, err = execute(request, vm.owner.id, cmd, meta=meta, lock=lock, queue=vm.node.fast_queue) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=LOG_QGA_COMMAND, obj=vm, api_view=apiview, data=self.data, detail_dict=ser.detail_dict())
def get_backups(request, bkp_filter, data): """Return Backup queryset according to filter and backup names in data""" bkpnames = data.get('bkpnames', None) if not (bkpnames and isinstance(bkpnames, (list, tuple))): # List and not empty raise InvalidInput('Invalid bkpnames') bkp_filter['name__in'] = bkpnames # TODO: check indexes bkps = Backup.objects.select_related('node', 'vm').filter(**bkp_filter) if not bkps: raise ObjectNotFound(model=Backup) bkp = bkps[0] for i in bkps: if i.node != bkp.node or i.vm != bkp.vm: raise InvalidInput('Invalid bkpnames') return bkps, bkpnames
def load_from_schema(request: Request = None, schema=None, data: dict = None, context: dict = None) -> dict: instance_schema = schema() if context: instance_schema.context.update(context) data, errors = instance_schema.load((request.json if request else data) or {}) if not errors: return data raise InvalidInput(errors)
def get(self): request, vm, graph = self.request, self.vm, self.graph_type if not vm.is_zabbix_sync_active(): raise ExpectationFailed('VM monitoring disabled') if vm.status not in vm.STATUS_OPERATIONAL: raise VmIsNotOperational try: graph_settings = GRAPH_ITEMS.get_options(graph, vm) except KeyError: raise InvalidInput('Invalid graph') else: required_ostype = graph_settings.get('required_ostype', None) if required_ostype is not None and vm.ostype not in required_ostype: raise InvalidInput('Invalid OS type') if graph.startswith(('nic-', 'net-')): ser_class = NetworkVmMonHistorySerializer elif graph.startswith(('disk-', 'hdd-', 'fs-')): ser_class = DiskVmMonHistorySerializer else: ser_class = MonHistorySerializer ser = ser_class(obj=self.vm, data=self.data) if not ser.is_valid(): return FailureTaskResponse(request, ser.errors, vm=vm) return call_mon_history_task(request, t_mon_vm_history, view_fun_name='mon_vm_history', obj=self.vm, dc_bound=True, serializer=ser, data=self.data, graph=graph, graph_settings=graph_settings)
def report(self): """api.task.views.task_log_report""" try: startime = timezone.now() - timedelta( seconds=int(self.data.get('last', 86400))) except: raise InvalidInput('Invalid "last" parameter') qs = get_tasklog(self.request, sr=(), time__gte=startime) report = TaskLogReportSerializer.get_report(qs) return TaskSuccessResponse(self.request, TaskLogReportSerializer(report).data)
def get_stats(self): """api.task.views.task_log_stats""" try: last = int(self.data.get('last', 86400)) startime = timezone.now() - timedelta(seconds=last) except Exception: # This also catches the OverflowError raised during startime calculation raise InvalidInput('Invalid "last" parameter') qs = get_tasklog(self.request, sr=(), time__gte=startime) res = self._get_stats_result(qs) res['_last'] = last return TaskSuccessResponse(self.request, res)
def post(self): dd = {} if 'note' in self.data: dd['note'] = note = self.data['note'] else: note = '' if 'usage' in self.data: dd['usage'] = usage = self.data['usage'] else: usage = IPAddress.VM if self.many: ips = self.ips if not ips: raise InvalidInput('Invalid ips') if self.ip.exists(): # SELECT count(*) from IPAddress raise ObjectAlreadyExists(model=IPAddress) msg = LOG_IPS_CREATE data = [{'ip': ip, 'usage': usage, 'note': note} for ip in ips] dd['ips'] = '%s-%s' % (ips[0], ips[-1]) else: msg = LOG_IP_CREATE data = dd data['ip'] = self.ip.ip ser = NetworkIPSerializer(self.net, data=data, many=self.many) if not ser.is_valid(): return FailureTaskResponse(self.request, ser.errors, dc_bound=False) if self.many: IPAddress.objects.bulk_create(ser.object) # INSERT into IPAddress else: ser.object.save() # INSERT into IPAddress return SuccessTaskResponse(self.request, ser.data, status=HTTP_201_CREATED, obj=self.net, msg=msg, detail_dict=dd, dc_bound=False)
def filter_snap_type(query_filter, data): """Validate snapshot type and update dictionary used for queryset filtering""" stype = data.get('type', None) if stype: # noinspection PyBroadException try: stype = int(stype) if stype not in dict(Snapshot.TYPE): raise ValueError query_filter['type'] = stype except: raise InvalidInput('Invalid snapshot type') return query_filter
def get_disk_id(request, vm, data, key='disk_id', default=1): """Get disk_id from data and return additional disk information""" disk_id = data.get(key, default) # noinspection PyBroadException try: disk_id = int(disk_id) if not disk_id > 0: raise ValueError disk = vm.json_active_get_disks()[disk_id - 1] zfs_filesystem = disk['zfs_filesystem'] real_disk_id = Snapshot.get_real_disk_id(disk) except: raise InvalidInput('Invalid %s' % key) return disk_id, real_disk_id, zfs_filesystem
def filter_disk_id(vm, query_filter, data, default=None): """Validate disk_id and update dictionary used for queryset filtering""" disk_id = data.get('disk_id', default) if disk_id is not None: # noinspection PyBroadException try: disk_id = int(disk_id) if not disk_id > 0: raise ValueError if vm: query_filter['disk_id'] = Snapshot.get_disk_id(vm, disk_id) else: query_filter['vm_disk_id'] = disk_id - 1 except: raise InvalidInput('Invalid disk_id') return query_filter
def get_snapshots(request, vm, real_disk_id, data): """Return Snapshot queryset according to disk_id and snapshot names in data""" snapnames = data.get('snapnames', None) if not (snapnames and isinstance(snapnames, (list, tuple))): # List and not empty raise InvalidInput('Invalid snapnames') # Stringify data, because if the name is a number, then form/data sent via socket.io is contains numbers snapnames = map(str, snapnames) # TODO: check indexes snaps = Snapshot.objects.select_related('vm').filter(vm=vm, disk_id=real_disk_id, name__in=snapnames) if not snaps: raise ObjectNotFound(model=Snapshot) return snaps, snapnames
def __init__(self, request, name, ip, data, dc=None, many=False): super(NetworkIPView, self).__init__(request) self.data = data self.many = many self.dc = dc net_filter = {'name': name} ip_filter = [] if dc: net_filter['dc'] = dc ip_filter.append(Q(usage__in=[IPAddress.VM, IPAddress.VM_REAL])) ip_filter.append((Q(vm__isnull=False) & Q(vm__dc=dc)) | (~Q(vms=None) & Q(vms__dc=dc))) elif not request.user.is_staff: ip_filter.append(~Q(usage=IPAddress.NODE)) self.net = net = get_virt_object(request, Subnet, data=data, sr=('dc_bound', ), get_attrs=net_filter, exists_ok=True, noexists_fail=True) ip_filter.append(Q(subnet=net)) if many: self.ips = ips = data.get('ips', None) if ips is not None: if not isinstance(ips, (tuple, list)): raise InvalidInput('Invalid ips') ip_filter.append(Q(ip__in=ips)) if request.method == 'GET': usage = data.get('usage', None) if usage and not dc: try: usage = int(usage) if usage not in dict(IPAddress.USAGE_REAL): raise ValueError except ValueError: raise InvalidInput('Invalid usage') else: ip_filter.append(Q(usage=usage)) try: ip_filter = reduce(operator.and_, ip_filter) self.ip = IPAddress.objects.select_related('vm', 'vm__dc', 'subnet')\ .prefetch_related('vms', 'vms__dc')\ .filter(ip_filter)\ .order_by(*self.order_by).distinct() except TypeError: raise InvalidInput('Invalid ips') else: ip_filter = reduce(operator.and_, ip_filter) self.ip = get_object(request, IPAddress, {'ip': ip}, where=ip_filter, sr=('vm', 'vm__dc', 'subnet'))
def put(self): request, vm = self.request, self.vm # only admin if not (request.user and request.user.is_admin(request)): raise PermissionDenied node = vm.node apiview = self.apiview apiview['force'] = bool(ForceSerializer(data=self.data, default=False)) queue = vm.node.fast_queue new_node_uuid = None detail_dict = {} if vm.status not in (vm.RUNNING, vm.STOPPED): raise VmIsNotOperational('VM is not stopped or running') if apiview['force']: detail_dict['force'] = True # final cmd and empty stdin cmd = 'vmadm get %s 2>/dev/null' % vm.uuid stdin = None block_key = None node_param = self.data.get('node') if node_param: if not request.user.is_staff: raise PermissionDenied node = get_node(request, node_param, dc=request.dc, exists_ok=True, noexists_fail=True) if node.hostname == vm.node.hostname: raise InvalidInput('VM already has the requested node set in DB') apiview['node'] = detail_dict['node'] = node.hostname queue = node.fast_queue new_node_uuid = node.uuid elif vm.json_changed(): if vm.locked: raise VmIsLocked json_update = vm.json_update() self.check_update(json_update) if (vm.json_disks_changed() or vm.json_nics_changed()) and vm.tasks: raise VmHasPendingTasks # create json suitable for update stdin, cmd1 = self.fix_update(json_update) self.validate_update(vm, stdin, cmd1) stdin = stdin.dump() # final cmd cmd = cmd1 + 'vmadm update %s >&2; e=$?; vmadm get %s 2>/dev/null; exit $e' % (vm.uuid, vm.uuid) # Possible node_image import task which will block this task on node worker block_key = self.node_image_import(vm.node, json_update.get('add_disks', [])) else: # JSON unchanged and not force detail = 'Successfully updated VM %s (locally)' % vm.hostname res = SuccessTaskResponse(request, detail, msg=LOG_VM_UPDATE, vm=vm, detail=detail) vm_updated.send(TaskID(res.data.get('task_id'), request=request), vm=vm) # Signal! return res # Check compute node status after we know which compute node the task is going to be run on # The internal vm.node.status checking is disabled in get_vm() in __init__ if node.status != node.ONLINE: raise NodeIsNotOperational msg = LOG_VM_UPDATE meta = { 'output': {'returncode': 'returncode', 'stderr': 'message', 'stdout': 'json'}, 'replace_stderr': ((vm.uuid, vm.hostname),), 'msg': msg, 'vm_uuid': vm.uuid, 'apiview': apiview } callback = ('api.vm.base.tasks.vm_update_cb', {'vm_uuid': vm.uuid, 'new_node_uuid': new_node_uuid}) logger.debug('Updating VM %s with json: """%s"""', vm, stdin) err = True vm.set_notready() try: tid, err = execute(request, vm.owner.id, cmd, stdin=stdin, meta=meta, lock=self.lock, callback=callback, queue=queue, block_key=block_key) if err: return FailureTaskResponse(request, err, vm=vm) else: return TaskResponse(request, tid, msg=msg, vm=vm, api_view=apiview, data=self.data, detail_dict=detail_dict) finally: if err: vm.revert_notready()
def process_tsig_keys(self, request, tsig_data): """ :param request: :param tsig_data: dict that contains 'keys' key with the str containing comma separated key definitions (for key definition see TsigKey.parse_tsig_string()) :return: On success: arg1: new_keys - list of new keys that are not yet in database (link them to this domain on commit) arg2: tsig_serializers - list of verified but not saved serializer classes \ (call .save() over each on commit) On error: :raises InvalidInput() """ # remove spaces, ignore empty values try: tsig_keys = tsig_data['keys'].replace(' ', '') tsig_keys = [x for x in tsig_keys.split(',') if x] except (KeyError, AttributeError): # no tsig keys found return [], [] tsig_serializers = [] tsig_keys_new = [] for tsig_key_str in tsig_keys: tsig_key_new_tmp = TsigKey.parse_tsig_string(tsig_key_str) if not tsig_key_new_tmp: raise InvalidInput('Invalid TSIG key: "%s"' % tsig_key_str) tsig_key_from_db = TsigKey.objects.filter(name=tsig_key_new_tmp.name) if not tsig_key_from_db: # key is not in DB. Create it. tsig_key = tsig_key_new_tmp else: # Key(s) with such name is already present. Check if the signature is also the same. # We assume that only one key with such name can be present in DB. # If our assumption is wrong, PDNS would probably fail anyway. tsig_key_from_db = tsig_key_from_db[0] if tsig_key_from_db.secret != tsig_key_new_tmp.secret: linked_domains = tsig_key_from_db.get_linked_axfr_domains() if len(linked_domains) == 0: # this is probably DB inconsistency... we have a key but it's not used anywhere pass elif len(linked_domains) == 1 and linked_domains[0].id == self.domain.id: # only one domain has this TSIG key defined - the domain we're editing just now. # Therefore it's safe to edit the key. tsig_key_from_db.secret = tsig_key_new_tmp.secret pass else: raise InvalidInput('TSIG key with the same name "%s" and different secret is already used ' 'for other domain. Please use other key name.' % tsig_key_new_tmp.name) elif tsig_key_from_db.algorithm != tsig_key_new_tmp.algorithm: raise InvalidInput('TSIG key\'s "%s" algorithm ("%s") does not match the one already saved in ' 'database ("%s")' % (tsig_key_new_tmp.name, tsig_key_new_tmp.algorithm, tsig_key_from_db.algorithm)) else: # the key is the same, we don't need to save it again tsig_keys_new += [tsig_key_from_db] # but we need it in this list continue tsig_key = tsig_key_from_db ser_tsig = TsigKeySerializer(request, tsig_key, read_only=False, many=False, data=tsig_data) if not ser_tsig.is_valid(): return FailureTaskResponse(request, ser_tsig.errors, obj=self.domain, dc_bound=False) # custom validation tsig_key.validate() tsig_serializers += [ser_tsig] tsig_keys_new += [tsig_key] return tsig_keys_new, tsig_serializers