def test_copy_mutable(self): """ test if mutable objects (dicts and lists) are copied by value """ obj = {'foo': [{'bar': 42}, {'baz': 3.14}], 'boo': []} # copy object somewhere res = jsonpatch.apply_patch(obj, [{'op': 'copy', 'from': '/foo/0', 'path': '/boo/0' }]) self.assertEqual(res, {'foo': [{'bar': 42}, {'baz': 3.14}], 'boo': [{'bar': 42}]}) # modify original object res = jsonpatch.apply_patch(res, [{'op': 'add', 'path': '/foo/0/zoo', 'value': 255}]) # check if that didn't modify the copied object self.assertEqual(res['boo'], [{'bar': 42}])
def test_copy_mutable(self): """ test if mutable objects (dicts and lists) are copied by value """ obj = {"foo": [{"bar": 42}, {"baz": 3.14}], "boo": []} # copy object somewhere res = jsonpatch.apply_patch(obj, [{"op": "copy", "from": "/foo/0", "path": "/boo/0"}]) self.assertEqual(res, {"foo": [{"bar": 42}, {"baz": 3.14}], "boo": [{"bar": 42}]}) # modify original object res = jsonpatch.apply_patch(res, [{"op": "add", "path": "/foo/0/zoo", "value": 255}]) # check if that didn't modify the copied object self.assertEqual(res["boo"], [{"bar": 42}])
def get_record_orig_or_mem(self, recid): # We upload patches at the end, so we're not picking up our own patches here # XXX Could put a lock around this but ugh. record = get_record_orig(recid) record_hash = hash(record) sorted_fullpatches = get_fullpatches(recid) for fullpatch in sorted_fullpatches: # FIXME: Check record hash? Or check in `get_fullpatches`? see conftest2.py:408 # assert fullpatch['record_hash'] == record_hash self.patches_we_applied_add(fullpatch) jsonpatch.apply_patch(record, fullpatch['patch'], in_place=True) return record
def handle_worker_completion(task_id): """Commit patches at the completion of a single worker. If `rule.confirm_hash_on_commit` is enabled then only records whose hash has not changed in the meantime are committed. ..note:: currently `CheckerRuleExecution.should_commit` depends on `dry_run`. :type task_id: str :param task_id: task_id as returned by `run_test` """ from invenio_records.api import get_record as get_record_orig with elioterize("finalize worker", worker_id=task_id): worker = RedisWorker(task_id) should_commit = worker.master.get_execution().should_commit Message.log(message_type='commit decision', commit=should_commit) recids_we_comitted_changes_to = intbitset() if should_commit: for recid, patches in worker.all_patches.items(): # recid: record ID # patches: patches for this record ID record = get_record_orig(recid) first_patch = True for patch in patches: # The record hash is bound to change once we've applied the # first hash, not to mention there is no reason to check # twice. if first_patch: first_patch = False if worker.master.rule.confirm_hash_on_commit: if hash(record) != patch['hash']: Message.log(message_type='skipping record', recid=record['id'], worker_id=task_id) break # No commits for this record, kthx. recids_we_comitted_changes_to += recid jsonpatch.apply_patch(record, patch, in_place=True) record.commit() Message.log(message_type='committing complete', patches_count=len(recids_we_comitted_changes_to)) worker.master.rule.mark_recids_as_checked(recids_we_comitted_changes_to) db.session.commit() worker.status = StatusWorker.committed
def test_apply_patch_from_string(self): obj = {"foo": "bar"} patch = '[{"op": "add", "path": "/baz", "value": "qux"}]' res = jsonpatch.apply_patch(obj, patch) self.assertTrue(obj is not res) self.assertTrue("baz" in res) self.assertEqual(res["baz"], "qux")
def fn(_src, _dst): patch = list(jsonpatch.make_patch(_src, _dst)) # Check if there are only 'move' operations for p in patch: self.assertEqual(p['op'], 'move') res = jsonpatch.apply_patch(_src, patch) self.assertEqual(res, _dst)
def handle_results(task_ids): """Commit patches. :type task_ids: list of str :param task_ids: values returned by `run_test` instances """ with with_eliot(action_type='handle results', worker_id=task_ids[0]): for task_id in task_ids: redis_worker = RedisWorker(task_id) for recid, patches in redis_worker.all_patches.items(): record = get_record_orig(recid) for patch in patches: jsonpatch.apply_patch(record, patch, in_place=True) record.commit() redis_worker.master.rule.mark_recids_as_checked(redis_worker.bundle_requested_recids) redis_worker.status = StatusWorker.committed
def _test(self, test): if not 'doc' in test or not 'patch' in test: # incomplete return if test.get('disabled', False): # test is disabled return if 'error' in test: self.assertRaises( (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException), jsonpatch.apply_patch, test['doc'], test['patch'] ) else: try: res = jsonpatch.apply_patch(test['doc'], test['patch']) except jsonpatch.JsonPatchException as jpe: raise Exception(test.get('comment', '')) from jpe # if there is no 'expected' we only verify that applying the patch # does not raies an exception if 'expected' in test: self.assertEquals(res, test['expected'], test.get('comment', ''))
def test_add_nested(self): # see http://tools.ietf.org/html/draft-ietf-appsawg-json-patch-03#appendix-A.10 src = {"foo": "bar"} patch_obj = [{"op": "add", "path": "/child", "value": {"grandchild": {}}}] res = jsonpatch.apply_patch(src, patch_obj) expected = {"foo": "bar", "child": {"grandchild": {}}} self.assertEqual(expected, res)
def patch(self, uuid, patch): """Update an existing node.""" if self._from_chassis: raise exception.OperationNotPermitted node = objects.Node.get_by_uuid(pecan.request.context, uuid) node_dict = node.as_dict() utils.validate_patch(patch) patch_obj = jsonpatch.JsonPatch(patch) # Prevent states from being updated state_rel_path = ['/power_state', '/target_power_state', '/provision_state', '/target_provision_state'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError(_("Changing states is not allowed " "here; You must use the " "nodes/%s/state interface.") % uuid) # Prevent node from being updated when there's a state # change in progress if any(node.get(tgt) for tgt in ["target_power_state", "target_provision_state"]): raise wsme.exc.ClientSideError(_("Can not update node %s while " "a state transition is in " "progress.") % uuid, status_code=409) try: patched_node = jsonpatch.apply_patch(node_dict, patch_obj) except jsonpatch.JsonPatchException as e: LOG.exception(e) raise wsme.exc.ClientSideError(_("Patching Error: %s") % e) try: self. _convert_chassis_uuid_to_id(patched_node) defaults = objects.Node.get_defaults() for key in defaults: # Internal values that shouldn't be part of the patch if key in ['id', 'updated_at', 'created_at']: continue # In case of a remove operation, add the missing fields back # to the document with their default value if key in node_dict and key not in patched_node: patched_node[key] = defaults[key] # Update only the fields that have changed if node[key] != patched_node[key]: node[key] = patched_node[key] node = pecan.request.rpcapi.update_node(pecan.request.context, node) except exception.IronicException as e: with excutils.save_and_reraise_exception(): LOG.exception(e) return Node.convert_with_links(node)
def patch(self, patch): """Update the community's metadata with a json-patch. Args: patch (dict): json-patch which can modify the following fields: name, description, logo. Returns: :class:`Community`: self Raises: jsonpatch.JsonPatchConflict: the json patch conflicts on the community. jsonpatch.InvalidJsonPatch: the json patch is invalid. b2share.modules.communities.errors.InvalidCommunityError: The community patch failed because the resulting community is not valid. """ data = apply_patch({ 'name': self.model.name, 'description': self.model.description, 'logo': self.model.logo, 'publication_workflow': self.model.publication_workflow, 'restricted_submission': self.model.restricted_submission, }, patch, True) self.update(data) return self
def account_json_patch_loader(user=None, **kwargs): """Accounts REST API data loader for JSON Patch input.""" data = request.get_json(force=True) if data is None: abort(400) modified_fields = { cmd['path'][1:] for cmd in data if 'path' in cmd and 'op' in cmd and cmd['op'] != 'test' } errors = [ FieldError(field, 'Unknown or immutable field {}.'.format(field)) # only "active" field is immutable for field in modified_fields if field != 'active' ] if len(errors) > 0: raise RESTValidationError(errors=errors) original = { 'active': user.active } try: patched = apply_patch(original, data) except (JsonPatchException, JsonPointerException): raise PatchJSONFailureRESTError() return patched
def update(self, key, patch, user): previous = self._state.get(key) if isinstance(patch, dict): patch = self._generate_patch(previous.data, patch) patch = self._validate_patch(patch) schema_updates = [p for p in patch if p["path"] == "/schema" and p["op"] != "remove"] for update in schema_updates: # ensure new schema(s) are valid self.load_schema(update["value"]) try: data = jsonpatch.apply_patch(previous.data, patch) except jsonpatch.JsonPatchTestFailed as e: raise exceptions.JsonPatchTestFailed(e) if self.schema: self.schema.validate(data) data_object = self._storage.create(data) return self._state.apply( self._logger.create( key, Operation.UPDATE, data_object.ref, user, previous=previous, operation_parameters={"patch": list(patch)}, ), data, )
def get_record_orig_or_mem(self, recid): """Returns record from database, after applying existing patches. ..note:: We upload patches at the end, so we're not picking up our own patches here """ # XXX Should we put a lock around this? from invenio_records.api import get_record as get_record_orig record = get_record_orig(recid) sorted_fullpatches = get_sorted_fullpatches(recid, self.uuid) for fullpatch in sorted_fullpatches: jsonpatch.apply_patch(record, fullpatch["patch"], in_place=True) return record
def patch(self, uuid, patch): """Update an existing chassis.""" chassis = objects.Chassis.get_by_uuid(pecan.request.context, uuid) chassis_dict = chassis.as_dict() utils.validate_patch(patch) try: patched_chassis = jsonpatch.apply_patch(chassis_dict, jsonpatch.JsonPatch(patch)) except jsonpatch.JsonPatchException as e: LOG.exception(e) raise wsme.exc.ClientSideError(_("Patching Error: %s") % e) defaults = objects.Chassis.get_defaults() for key in defaults: # Internal values that shouldn't be part of the patch if key in ['id', 'updated_at', 'created_at']: continue # In case of a remove operation, add the missing fields back # to the document with their default value if key in chassis_dict and key not in patched_chassis: patched_chassis[key] = defaults[key] # Update only the fields that have changed if chassis[key] != patched_chassis[key]: chassis[key] = patched_chassis[key] chassis.save() return Chassis.convert_with_links(chassis)
def test_apply_patch_from_string(self): obj = {'foo': 'bar'} patch = '[{"op": "add", "path": "/baz", "value": "qux"}]' res = jsonpatch.apply_patch(obj, patch) self.assertTrue(obj is not res) self.assertTrue('baz' in res) self.assertEqual(res['baz'], 'qux')
def test_move_object_key(self): obj = {'foo': {'bar': 'baz', 'waldo': 'fred'}, 'qux': {'corge': 'grault'}} res = jsonpatch.apply_patch(obj, [{'op': 'move', 'from': '/foo/waldo', 'path': '/qux/thud'}]) self.assertEqual(res, {'qux': {'thud': 'fred', 'corge': 'grault'}, 'foo': {'bar': 'baz'}})
def test_append(self): obj = {'foo': [1, 2]} res = jsonpatch.apply_patch(obj, [ {'op': 'add', 'path': '/foo/-', 'value': 3}, {'op': 'add', 'path': '/foo/-', 'value': 4}, ]) self.assertEqual(res['foo'], [1, 2, 3, 4])
def apply_jsonpatch(doc, patch): """Apply a JSON patch, one operation at a time. If the patch fails to apply, this allows us to determine which operation failed, making the error message a little less cryptic. :param doc: The JSON document to patch. :param patch: The JSON patch to apply. :returns: The result of the patch operation. :raises: PatchError if the patch fails to apply. :raises: wsme.exc.ClientSideError if the patch adds a new root attribute. """ # Prevent removal of root attributes. for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' 'the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) # Apply operations one at a time, to improve error reporting. for patch_op in patch: try: doc = jsonpatch.apply_patch(doc, jsonpatch.JsonPatch([patch_op])) except _JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch_op, reason=e) return doc
def getHostConfig(update): """ Load host configuration. Read device information from networkDevices. Store host configuration in hostConfig. """ # TODO We need to check for changes in hardware. If a new device was # added, we should try to automatically configure it. If a device was # removed, we should be aware of what is no longer valid. devices = update.cache_get('networkDevices') config = prepareHostConfig(devices) # update.old is not guaranteed to contain the old host configuration, so # save a backup copy in update.new. This will be used by revertHostConfig # if we need to back out. update.cache_set('oldHostConfig', config) # If this is a sethostconfig operation, then read the host config from the # update object. Ordinary chute operations should not alter the host # configuration. if update.updateType == 'sethostconfig': config = update.hostconfig elif update.updateType == 'patchhostconfig': config = jsonpatch.apply_patch(config, update.patch) # For factoryreset, try to load the default configuration or automatically # generate a new one if the file is not found. elif update.updateType == 'factoryreset': config = prepareHostConfig(devices, hostConfigPath=settings.DEFAULT_HOST_CONFIG_FILE) update.cache_set('hostConfig', config)
def apply_jsonpatch(doc, patch): for p in patch: if p["op"] == "add" and p["path"].count("/") == 1: if p["path"].lstrip("/") not in doc: msg = _("Adding a new attribute (%s) to the root of " " the resource is not allowed") raise wsme.exc.ClientSideError(msg % p["path"]) return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
def patch(self, config_path): '''Update only part of a json.''' app.logger.debug('Patching: ' + config_path) if not self.get_json(): return self.return_obj() patches = json.loads(self.jsons) self.load_json() app.logger.debug('JSON: ' + json.dumps(self.json)) app.logger.debug('patches: ' + json.dumps(patches)) try: self.json = jsonpatch.apply_patch(self.json, patches) except ( jsonpatch.JsonPatchConflict, jsonpointer.JsonPointerException)as e: self.return_code = 422 self.description = str(e) app.logger.error(self.description) return self.return_obj() except: self.return_code = 422 self.description = 'Unknown error in patch.' app.logger.error(self.description) return self.return_obj() if not self.save_json(): return self.return_obj() self.git_commit('add') self.return_code = 200 self.description = 'File patched successfully' return self.return_obj()
def patch(self, brick_uuid, patch): """Update an existing brick. :param brick_uuid: UUID of a brick. :param patch: a json PATCH document to apply to this brick. """ check_policy(pecan.request.context, 'update') req_ctx = pecan.request.context tenant_id = req_ctx.tenant_id if not req_ctx.is_admin else None rpc_brick = objects.Brick.get_by_uuid(pecan.request.context, brick_uuid, tenant_id=tenant_id) try: brick = Brick(**jsonpatch.apply_patch(rpc_brick.as_dict(), jsonpatch.JsonPatch(patch))) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Brick.fields: if rpc_brick[field] != getattr(brick, field): rpc_brick[field] = getattr(brick, field) rpc_brick.save() return Brick.convert_with_links(rpc_brick)
def patch(self, port_uuid, patch): """Update an existing port. :param port_uuid: UUID of a port. :param patch: a json PATCH document to apply to this port. """ if self._from_nodes: raise exception.OperationNotPermitted rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid) try: port = Port(**jsonpatch.apply_patch(rpc_port.as_dict(), jsonpatch.JsonPatch(patch))) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Port.fields: if rpc_port[field] != getattr(port, field): rpc_port[field] = getattr(port, field) rpc_node = objects.Node.get_by_uuid(pecan.request.context, rpc_port.node_id) topic = pecan.request.rpcapi.get_topic_for(rpc_node) new_port = pecan.request.rpcapi.update_port( pecan.request.context, rpc_port, topic) return Port.convert_with_links(new_port)
def test_arrays_one_element_sequences(self): """ Tests the case of multiple common one element sequences inside an array """ # see https://github.com/stefankoegl/python-json-patch/issues/30#issuecomment-155070128 src = [1,2,3] dst = [3,1,4,2] patch = jsonpatch.make_patch(src, dst) res = jsonpatch.apply_patch(src, patch) self.assertEqual(res, dst)
def test_use_move_instead_of_remove_add(self): src = {"foo": [4, 1, 2, 3]} dst = {"foo": [1, 2, 3, 4]} patch = list(jsonpatch.make_patch(src, dst)) self.assertEqual(len(patch), 1) self.assertEqual(patch[0]["op"], "move") res = jsonpatch.apply_patch(src, patch) self.assertEqual(res, dst)
def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' ' the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
def test_should_just_add_new_item_not_rebuild_all_list(self): src = {'foo': [1, 2, 3]} dst = {'foo': [3, 1, 2, 3]} patch = list(jsonpatch.make_patch(src, dst)) self.assertEqual(len(patch), 1) self.assertEqual(patch[0]['op'], 'add') res = jsonpatch.apply_patch(src, patch) self.assertEqual(res, dst)
def patch(self, patch): """Patch record metadata. :params patch: Dictionary of record metadata. :returns: A new :class:`Record` instance. """ data = apply_patch(dict(self), patch) return self.__class__(data, model=self.model)
def test_use_move_instead_of_add_remove(self): src = {'foo': [1, 2, 3]} dst = {'foo': [3, 1, 2]} patch = list(jsonpatch.make_patch(src, dst)) self.assertEqual(len(patch), 1) self.assertEqual(patch[0]['op'], 'move') res = jsonpatch.apply_patch(src, patch) self.assertEqual(res, dst)
def test_move_array_item_into_other_item(self): obj = [{"foo": []}, {"bar": []}] patch = [{"op": "move", "from": "/0", "path": "/0/bar/0"}] res = jsonpatch.apply_patch(obj, patch) self.assertEqual(res, [{'bar': [{"foo": []}]}])
def test_copy_array_item(self): obj = {'foo': ['all', 'grass', 'cows', 'eat']} res = jsonpatch.apply_patch(obj, [{'op': 'copy', 'from': '/foo/1', 'path': '/foo/3'}]) self.assertEqual(res, {'foo': ['all', 'grass', 'cows', 'grass', 'eat']})
def test_add_replace_whole_document(self): obj = {'foo': 'bar'} new_obj = {'baz': 'qux'} res = jsonpatch.apply_patch(obj, [{'op': 'add', 'path': '', 'value': new_obj}]) self.assertTrue(res, new_obj)
test_data4 = { 'title': 'Unknown film', 'year': 4242, 'stars': 5, } test_patch = [ { 'op': 'replace', 'path': '/year', 'value': 1985 }, ] test_data_patched = apply_patch(test_data, test_patch) def create_record(data): """Create a test record.""" with db.session.begin_nested(): data = copy.deepcopy(data) rec_uuid = uuid.uuid4() pid = current_pidstore.minters['recid'](rec_uuid, data) record = Record.create(data, id_=rec_uuid) return pid, record def control_num(data, cn=1): """Inject a control number in data.""" data = copy.deepcopy(data)
def patch(self, cpu_uuid, patch): """Update an existing cpu.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.cpu.get_by_uuid(pecan.request.context, cpu_uuid) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) from_profile = False action = None for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None if p['path'] == '/allocated_function': from_profile = True if p['path'] == '/action': value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value # Clean up patch extra_args = {} for p in patch[:]: path = p['path'] if 'num_cores_on_processor' in path: extra_args[path.lstrip('/')] = p['value'] patch.remove(p) if path == '/function': extra_args[path.lstrip('/')] = p['value'] patch.remove(p) # Apply patch try: cpu = CPU(**jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) for key, val in extra_args.items(): setattr(cpu, key, val) # Semantic checks ihost = pecan.request.dbapi.ihost_get(cpu.forihostid) _check_host(ihost) if not from_profile: _check_cpu(cpu, ihost) # Update only the fields that have changed try: for field in objects.cpu.fields: if rpc_port[field] != getattr(cpu, field): rpc_port[field] = getattr(cpu, field) rpc_port.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_grub_config(pecan.request.context) return CPU.convert_with_links(rpc_port) except exception.HTTPNotFound: msg = _("Cpu update failed: host %s cpu %s : patch %s" % (ihost.hostname, CPU.uuid, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, infra_uuid, patch): """Update the current infrastructure network config.""" if self._from_isystems: raise exception.OperationNotPermitted rpc_infra = objects.infra_network.get_by_uuid(pecan.request.context, infra_uuid) infra_orig = copy.deepcopy(rpc_infra) action = None for p in patch: if '/action' in p['path']: value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value break # replace isystem_uuid and iinfra_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = [ '/uuid', '/id', '/forisystemid', '/isystem_uuid', '/created_at', '/updated_at', ] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s from this level." % state_rel_path)) self._check_host_states() if action == constants.APPLY_ACTION: self._check_host_interfaces() for p in patch_obj: if p['path'] == '/isystem_uuid': isystem = objects.system.get_by_uuid(pecan.request.context, p['value']) p['path'] = '/forisystemid' p['value'] = isystem.id try: infra = InfraNetwork( **jsonpatch.apply_patch(rpc_infra.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) infra = self._check_infra_data(infra.as_dict(), infra_orig.as_dict()) changed_fields = [] try: # Update only the fields that have changed for field in objects.infra_network.fields: if rpc_infra[field] != infra[field]: rpc_infra[field] = infra[field] changed_fields.append(field) rpc_infra.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_infra_config(pecan.request.context) return InfraNetwork.convert_with_links(rpc_infra) except exception.HTTPNotFound: msg = _( "Infrastructure IP update failed: system %s infra %s: patch %s" % (isystem['systemname'], infra, patch)) raise wsme.exc.ClientSideError(msg)
def update_baseline(baseline_id, system_baseline_patch): """ update a baseline """ ensure_rbac_write() validate_uuids([baseline_id]) account_number = view_helpers.get_account_number(request) _check_for_whitespace_in_display_name( system_baseline_patch["display_name"]) # this query is a bit different than what's in _check_for_existing_display_name, # since it's OK if the display name is used by the baseline we are updating existing_display_name_query = SystemBaseline.query.filter( SystemBaseline.account == account_number, SystemBaseline.id != baseline_id, SystemBaseline.display_name == system_baseline_patch["display_name"], ) if existing_display_name_query.count() > 0: message = ("display_name '%s' already used for this account" % system_baseline_patch["display_name"]) current_app.logger.audit(message, request=request, success=False) raise HTTPError( HTTPStatus.BAD_REQUEST, message=message, ) query = SystemBaseline.query.filter( SystemBaseline.account == account_number, SystemBaseline.id == baseline_id) baseline = query.first_or_404() message = "read baselines" current_app.logger.audit(message, request=request) try: updated_facts = jsonpatch.apply_patch( baseline.baseline_facts, system_baseline_patch["facts_patch"]) _validate_facts(updated_facts) baseline.baseline_facts = updated_facts except FactValidationError as error: message = error.message current_app.logger.audit(message, request=request, success=False) raise HTTPError(HTTPStatus.BAD_REQUEST, message=message) except (jsonpatch.JsonPatchException, jsonpointer.JsonPointerException): message = "unable to apply patch to baseline" current_app.logger.audit(message, request=request, success=False) raise HTTPError(HTTPStatus.BAD_REQUEST, message=message) baseline.display_name = system_baseline_patch["display_name"] baseline.baseline_facts = _sort_baseline_facts(baseline.baseline_facts) db.session.add(baseline) db.session.commit() message = "updated baselines" current_app.logger.audit(message, request=request) # pull baseline again so we have the correct updated timestamp and fact count query = SystemBaseline.query.filter( SystemBaseline.account == account_number, SystemBaseline.id == baseline_id) return [query.first().to_json()]
def test_apply_patch_to_copy(self): obj = {'foo': 'bar'} res = jsonpatch.apply_patch(obj, [{'add': '/baz', 'value': 'qux'}]) self.assertTrue(obj is not res)
def patch(self, memory_uuid, patch): """Update an existing memory.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.memory.get_by_uuid(pecan.request.context, memory_uuid) if 'forihostid' in rpc_port: ihostId = rpc_port['forihostid'] else: ihostId = rpc_port['ihost_uuid'] host_id = pecan.request.dbapi.ihost_get(ihostId) if host_id['personality'] == constants.STORAGE: raise exception.OperationNotPermitted vm_hugepages_nr_2M_pending = None vm_hugepages_nr_1G_pending = None vswitch_hugepages_reqd = None vswitch_hugepages_size_mib = None platform_reserved_mib = None vm_pending_as_percentage = None for p in patch: if p['path'] == '/platform_reserved_mib': platform_reserved_mib = p['value'] if p['path'] == '/vm_hugepages_nr_2M_pending': vm_hugepages_nr_2M_pending = p['value'] if p['path'] == '/vm_hugepages_nr_1G_pending': vm_hugepages_nr_1G_pending = p['value'] if p['path'] == '/vswitch_hugepages_reqd': vswitch_hugepages_reqd = p['value'] if p['path'] == '/vswitch_hugepages_size_mib': vswitch_hugepages_size_mib = p['value'] if p['path'] == '/vm_pending_as_percentage': vm_pending_as_percentage = p['value'] if vm_pending_as_percentage is None: vm_pending_as_percentage = rpc_port["vm_pending_as_percentage"] elif vm_pending_as_percentage == "True": if vm_hugepages_nr_2M_pending is not None: patch.append({ 'op': 'replace', 'path': '/vm_hugepages_2M_percentage', 'value': vm_hugepages_nr_2M_pending }) if vm_hugepages_nr_1G_pending is not None: patch.append({ 'op': 'replace', 'path': '/vm_hugepages_1G_percentage', 'value': vm_hugepages_nr_1G_pending }) # The host must be locked if host_id: _check_host(host_id) else: raise wsme.exc.ClientSideError( _("Hostname or uuid must be defined")) if cutils.host_has_function(host_id, constants.WORKER): try: # Semantics checks and update hugepage memory accounting patch = _check_huge_values( rpc_port, patch, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, platform_reserved_mib, vm_pending_as_percentage) except wsme.exc.ClientSideError as e: inode = pecan.request.dbapi.inode_get( inode_id=rpc_port.forinodeid) numa_node = inode.numa_node msg = _('Processor {0}:'.format(numa_node)) + e.message raise wsme.exc.ClientSideError(msg) else: # Standard/system controller or storage node if (vm_hugepages_nr_2M_pending is not None or vm_hugepages_nr_1G_pending is not None or vswitch_hugepages_reqd is not None or vswitch_hugepages_size_mib is not None): raise wsme.exc.ClientSideError( _("Hugepages memory configuration is not supported for this node." )) # Semantics checks for platform memory _check_memory(pecan.request.dbapi, rpc_port, host_id, platform_reserved_mib, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, vm_pending_as_percentage) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None try: memory = Memory( **jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.memory.fields: if rpc_port[field] != getattr(memory, field): rpc_port[field] = getattr(memory, field) rpc_port.save() pecan.request.rpcapi.update_grub_config(pecan.request.context, host_id['uuid'], force=True) return Memory.convert_with_links(rpc_port)
def _patch(storfile_uuid, patch): # Obtain current storage object. rpc_storfile = objects.storage_file.get_by_uuid(pecan.request.context, storfile_uuid) patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/capabilities': p['value'] = jsonutils.loads(p['value']) ostorfile = copy.deepcopy(rpc_storfile) # perform checks based on the current vs.requested modifications _pre_patch_checks(rpc_storfile, patch_obj) # Obtain a storage object with the patch applied. try: storfile_config = StorageFile( **jsonpatch.apply_patch(rpc_storfile.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update current storage object. for field in objects.storage_file.fields: if (field in storfile_config.as_dict() and rpc_storfile[field] != storfile_config.as_dict()[field]): rpc_storfile[field] = storfile_config.as_dict()[field] # Obtain the fields that have changed. delta = rpc_storfile.obj_what_changed() if len(delta) == 0: raise wsme.exc.ClientSideError( _("No changes to the existing backend settings were detected.")) allowed_attributes = ['services', 'capabilities', 'task'] for d in delta: if d not in allowed_attributes: raise wsme.exc.ClientSideError( _("Can not modify '%s' with this operation." % d)) LOG.info("SYS_I orig storage_file: %s " % ostorfile.as_dict()) LOG.info("SYS_I new storage_file: %s " % storfile_config.as_dict()) # Execute the common semantic checks for all backends, if backend is not # present this will not return api_helper.common_checks(constants.SB_API_OP_MODIFY, rpc_storfile.as_dict()) # Run the backend specific semantic checks _check_backend_file(constants.SB_API_OP_MODIFY, rpc_storfile.as_dict(), True) try: rpc_storfile.save() # Enable the backend changes: _apply_backend_changes(constants.SB_API_OP_MODIFY, rpc_storfile) return StorageFile.convert_with_links(rpc_storfile) except exception.HTTPNotFound: msg = _("StorFile update failed: storfile %s : " " patch %s" % (storfile_config, patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, sensor_uuid, patch): """Update an existing sensor.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_sensor = objects.sensor.get_by_uuid(pecan.request.context, sensor_uuid) if rpc_sensor.datatype == 'discrete': rpc_sensor = objects.sensor_discrete.get_by_uuid( pecan.request.context, sensor_uuid) elif rpc_sensor.datatype == 'analog': rpc_sensor = objects.sensor_analog.get_by_uuid( pecan.request.context, sensor_uuid) else: raise wsme.exc.ClientSideError(_("Invalid datatype=%s" % rpc_sensor.datatype)) rpc_sensor_orig = copy.deepcopy(rpc_sensor) # replace ihost_uuid and isensorgroup_uuid with corresponding utils.validate_patch(patch) patch_obj = jsonpatch.JsonPatch(patch) my_host_uuid = None for p in patch_obj: if p['path'] == '/host_uuid': p['path'] = '/host_id' host = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = host.id my_host_uuid = host.uuid if p['path'] == '/sensorgroup_uuid': p['path'] = '/sensorgroup_id' try: sensorgroup = objects.sensorgroup.get_by_uuid( pecan.request.context, p['value']) p['value'] = sensorgroup.id LOG.info("sensorgroup_uuid=%s id=%s" % (p['value'], sensorgroup.id)) except exception.SysinvException: p['value'] = None try: sensor = Sensor(**jsonpatch.apply_patch(rpc_sensor.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed if rpc_sensor.datatype == 'discrete': fields = objects.sensor_discrete.fields else: fields = objects.sensor_analog.fields for field in fields: if rpc_sensor[field] != getattr(sensor, field): rpc_sensor[field] = getattr(sensor, field) delta = rpc_sensor.obj_what_changed() sensor_suppress_attrs = ['suppress'] force_action = False if any(x in delta for x in sensor_suppress_attrs): valid_suppress = ['True', 'False', 'true', 'false', 'force_action'] if rpc_sensor.suppress.lower() not in valid_suppress: raise wsme.exc.ClientSideError(_("Invalid suppress value, " "select 'True' or 'False'")) elif rpc_sensor.suppress.lower() == 'force_action': LOG.info("suppress=%s" % rpc_sensor.suppress.lower()) rpc_sensor.suppress = rpc_sensor_orig.suppress force_action = True self._semantic_modifiable_fields(patch_obj, force_action) if not pecan.request.user_agent.startswith('hwmon'): hwmon_sensor = cutils.removekeys_nonhwmon( rpc_sensor.as_dict()) if not my_host_uuid: host = objects.host.get_by_uuid(pecan.request.context, rpc_sensor.host_id) my_host_uuid = host.uuid LOG.warn("Missing host_uuid updated=%s" % my_host_uuid) hwmon_sensor.update({'host_uuid': my_host_uuid}) hwmon_response = hwmon_api.sensor_modify( self._api_token, self._hwmon_address, self._hwmon_port, hwmon_sensor, constants.HWMON_DEFAULT_TIMEOUT_IN_SECS) if not hwmon_response: hwmon_response = {'status': 'fail', 'reason': 'no response', 'action': 'retry'} if hwmon_response['status'] != 'pass': msg = _("HWMON has returned with " "a status of %s, reason: %s, " "recommended action: %s") % ( hwmon_response.get('status'), hwmon_response.get('reason'), hwmon_response.get('action')) if force_action: LOG.error(msg) else: raise wsme.exc.ClientSideError(msg) rpc_sensor.save() return Sensor.convert_with_links(rpc_sensor)
def patch(self, ntp_uuid, patch): """Update the current NTP configuration.""" if self._from_isystems: raise exception.OperationNotPermitted rpc_ntp = objects.ntp.get_by_uuid(pecan.request.context, ntp_uuid) action = None for p in patch: if '/action' in p['path']: value = p['value'] patch.remove(p) if value in (constants.APPLY_ACTION, constants.INSTALL_ACTION): action = value break # replace isystem_uuid and intp_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id', 'forisystemid', 'isystem_uuid'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError( _("The following fields can not be " "modified: %s" % state_rel_path)) for p in patch_obj: if p['path'] == '/isystem_uuid': isystem = objects.system.get_by_uuid(pecan.request.context, p['value']) p['path'] = '/forisystemid' p['value'] = isystem.id try: # Keep an original copy of the ntp data ntp_orig = rpc_ntp.as_dict() ntp = NTP(**jsonpatch.apply_patch(rpc_ntp.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) LOG.warn("ntp %s" % ntp.as_dict()) ntp = _check_ntp_data("modify", ntp.as_dict()) try: # Update only the fields that have changed for field in objects.ntp.fields: if rpc_ntp[field] != ntp[field]: rpc_ntp[field] = ntp[field] delta = rpc_ntp.obj_what_changed() if delta: rpc_ntp.save() if action == constants.APPLY_ACTION: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_ntp_config( pecan.request.context) else: LOG.info("No NTP config changes") return NTP.convert_with_links(rpc_ntp) except Exception as e: # rollback database changes for field in ntp_orig: if rpc_ntp[field] != ntp_orig[field]: rpc_ntp[field] = ntp_orig[field] rpc_ntp.save() msg = _("Failed to update the NTP configuration") if e == exception.HTTPNotFound: msg = _("NTP update failed: system %s if %s : patch %s" % (isystem['systemname'], ntp['ifname'], patch)) raise wsme.exc.ClientSideError(msg)
def patch(self, isystem_uuid, patch): """Update an existing isystem. :param isystem_uuid: UUID of a isystem. :param patch: a json PATCH document to apply to this isystem. """ rpc_isystem = objects.system.get_by_uuid(pecan.request.context, isystem_uuid) system_dict = rpc_isystem.as_dict() updates = self._get_updates(patch) change_https = False change_sdn = False change_dc_role = False vswitch_type = None # prevent description field from being updated for p in jsonpatch.JsonPatch(patch): if p['path'] == '/software_version': raise wsme.exc.ClientSideError( _("software_version field " "cannot be modified.")) if p['path'] == '/system_type': if rpc_isystem is not None: if rpc_isystem.system_type is not None: raise wsme.exc.ClientSideError( _("system_type field " "cannot be " "modified.")) if (p['path'] == '/system_mode' and p.get('value') != rpc_isystem.system_mode): if rpc_isystem is not None and \ rpc_isystem.system_mode is not None: if rpc_isystem.system_type != constants.TIS_AIO_BUILD: raise wsme.exc.ClientSideError( "system_mode can only be modified on an " "AIO system") system_mode_options = [ constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT ] new_system_mode = p['value'] if rpc_isystem.system_mode == \ constants.SYSTEM_MODE_SIMPLEX: msg = _("Cannot modify system mode when it is " "already set to %s." % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) elif new_system_mode == constants.SYSTEM_MODE_SIMPLEX: msg = _("Cannot modify system mode to simplex when " "it is set to %s " % rpc_isystem.system_mode) raise wsme.exc.ClientSideError(msg) if new_system_mode not in system_mode_options: raise wsme.exc.ClientSideError( "Invalid value for system_mode, it can only" " be modified to '%s' or '%s'" % (constants.SYSTEM_MODE_DUPLEX, constants.SYSTEM_MODE_DUPLEX_DIRECT)) if p['path'] == '/timezone': timezone = p['value'] if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): raise wsme.exc.ClientSideError( _("Timezone file %s " "does not exist." % timezone)) if p['path'] == '/sdn_enabled': sdn_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/https_enabled': https_enabled = p['value'].lower() patch.remove(p) if p['path'] == '/distributed_cloud_role': distributed_cloud_role = p['value'] patch.remove(p) if p['path'] == '/vswitch_type': vswitch_type = p['value'] patch.remove(p) if p['path'] == '/security_feature': security_feature = p['value'] patch.remove(p) try: patched_system = jsonpatch.apply_patch(system_dict, jsonpatch.JsonPatch(patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) if 'sdn_enabled' in updates: if sdn_enabled != rpc_isystem['capabilities']['sdn_enabled']: self._check_hosts() change_sdn = True if sdn_enabled == 'true': self._verify_sdn_enabled() patched_system['capabilities']['sdn_enabled'] = True else: self._verify_sdn_disabled() patched_system['capabilities']['sdn_enabled'] = False if 'https_enabled' in updates: if https_enabled != rpc_isystem['capabilities']['https_enabled']: change_https = True if https_enabled == 'true': patched_system['capabilities']['https_enabled'] = True else: patched_system['capabilities']['https_enabled'] = False else: raise wsme.exc.ClientSideError( _("https_enabled is already set" " as %s" % https_enabled)) if 'distributed_cloud_role' in updates: # At this point dc role cannot be changed after config_controller # and config_subcloud if rpc_isystem['distributed_cloud_role'] is None and \ distributed_cloud_role in \ [constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER, constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]: change_dc_role = True patched_system[ 'distributed_cloud_role'] = distributed_cloud_role else: raise wsme.exc.ClientSideError( _("distributed_cloud_role is already set " " as %s" % rpc_isystem['distributed_cloud_role'])) if 'vswitch_type' in updates: if vswitch_type == rpc_isystem['capabilities']['vswitch_type']: raise wsme.exc.ClientSideError( _("vswitch_type is already set" " as %s" % vswitch_type)) patched_system['capabilities']['vswitch_type'] = vswitch_type if 'security_feature' in updates: # Security feature string must be translated from user values to # kernel options if (security_feature in constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS): security_feature_value = \ constants.SYSTEM_SECURITY_FEATURE_SPECTRE_MELTDOWN_OPTS[security_feature] patched_system['security_feature'] = security_feature_value else: raise wsme.exc.ClientSideError( _("Unexpected value %s specified for " "security_feature" % security_feature)) # Update only the fields that have changed name = "" contact = "" location = "" system_mode = "" timezone = "" capabilities = {} distributed_cloud_role = "" security_feature = "" for field in objects.system.fields: if rpc_isystem[field] != patched_system[field]: rpc_isystem[field] = patched_system[field] if field == 'name': name = rpc_isystem[field] if field == 'contact': contact = rpc_isystem[field] if field == 'location': location = rpc_isystem[field] if field == 'system_mode': system_mode = rpc_isystem[field] if field == 'timezone': timezone = rpc_isystem[field] if field == 'capabilities': capabilities = rpc_isystem[field] if field == 'distributed_cloud_role': distributed_cloud_role = rpc_isystem[field] if field == 'security_feature': security_feature = rpc_isystem[field] delta = rpc_isystem.obj_what_changed() delta_handle = list(delta) rpc_isystem.save() if name: LOG.info("update system name") pecan.request.rpcapi.configure_isystemname(pecan.request.context, name) if name or location or contact: LOG.info("update SNMP config") pecan.request.rpcapi.update_snmp_config(pecan.request.context) if 'system_mode' in delta_handle: LOG.info("update system mode %s" % system_mode) pecan.request.rpcapi.update_system_mode_config( pecan.request.context) if timezone: LOG.info("update system timezone to %s" % timezone) pecan.request.rpcapi.configure_system_timezone( pecan.request.context) if capabilities: if change_sdn: LOG.info("update sdn to %s" % capabilities) pecan.request.rpcapi.update_sdn_enabled(pecan.request.context) if change_https: LOG.info("update https to %s" % capabilities) pecan.request.rpcapi.configure_system_https( pecan.request.context) if vswitch_type: LOG.info("update vswitch_type to %s" % capabilities) pecan.request.rpcapi.update_vswitch_type(pecan.request.context) if distributed_cloud_role and change_dc_role: LOG.info("update distributed cloud role to %s" % distributed_cloud_role) pecan.request.rpcapi.update_distributed_cloud_role( pecan.request.context) if 'security_feature' in delta_handle: LOG.info("update security_feature %s" % security_feature) pecan.request.rpcapi.update_security_feature_config( pecan.request.context) return System.convert_with_links(rpc_isystem)
def patch(self, cephmon_uuid, patch): """Update the current storage configuration.""" if not StorageBackendConfig.has_backend_configured( pecan.request.dbapi, constants.CINDER_BACKEND_CEPH ): raise wsme.exc.ClientSideError( _("Ceph backend is not configured.") ) rpc_cephmon = objects.ceph_mon.get_by_uuid(pecan.request.context, cephmon_uuid) is_ceph_mon_gib_changed = False patch = [p for p in patch if '/controller' not in p['path']] # Check if either ceph mon size or disk has to change. for p in patch: if '/ceph_mon_gib' in p['path']: if rpc_cephmon.ceph_mon_gib != p['value']: is_ceph_mon_gib_changed = True if not is_ceph_mon_gib_changed: LOG.info("ceph_mon parameters are not changed") raise wsme.exc.ClientSideError( _("Warning: ceph_mon parameters are not changed.")) # replace isystem_uuid and ceph_mon_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) state_rel_path = ['/uuid', '/id', '/forihostid', '/device_node', '/device_path'] if any(p['path'] in state_rel_path for p in patch_obj): raise wsme.exc.ClientSideError(_("The following fields can not be " "modified: %s" % state_rel_path)) try: cephmon = CephMon(**jsonpatch.apply_patch( rpc_cephmon.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) if is_ceph_mon_gib_changed: _check_ceph_mon(cephmon.as_dict(), rpc_cephmon.as_dict()) controller_fs_utils._check_controller_fs( ceph_mon_gib_new=cephmon.ceph_mon_gib) for field in objects.ceph_mon.fields: if rpc_cephmon[field] != cephmon.as_dict()[field]: rpc_cephmon[field] = cephmon.as_dict()[field] LOG.info("SYS_I cephmon: %s " % cephmon.as_dict()) try: rpc_cephmon.save() except exception.HTTPNotFound: msg = _("Ceph Mon update failed: uuid %s : " " patch %s" % (rpc_cephmon.uuid, patch)) raise wsme.exc.ClientSideError(msg) if is_ceph_mon_gib_changed: # Update the task for ceph storage backend. StorageBackendConfig.update_backend_states( pecan.request.dbapi, constants.CINDER_BACKEND_CEPH, task=constants.SB_TASK_RESIZE_CEPH_MON_LV ) # Mark controllers and storage node as Config out-of-date. pecan.request.rpcapi.update_storage_config( pecan.request.context, update_storage=is_ceph_mon_gib_changed, reinstall_required=False ) return CephMon.convert_with_links(rpc_cephmon)
def apply_data_patch(item, changes): patch_changes = [] prepare_patch(patch_changes, item, changes) if not patch_changes: return {} return apply_patch(item, patch_changes)
def test_add_object_key(self): obj = {'foo': 'bar'} res = jsonpatch.apply_patch(obj, [{'add': '/baz', 'value': 'qux'}]) self.assertTrue('baz' in res) self.assertEqual(res['baz'], 'qux')
def _load_additional_permissions(self): permissions = [] new_deposit = None # Check submit/publish actions if (request.method == 'PATCH' and request.content_type == 'application/json-patch+json'): # FIXME: need some optimization on Invenio side. We are applying # the patch twice patch = deposit_patch_input_loader(self.deposit) new_deposit = deepcopy(self.deposit) # Generate 'external_pids' field in order to give # the illusion that this field actually exist. external_pids = generate_external_pids(self.deposit) if external_pids: new_deposit['external_pids'] = deepcopy(external_pids) apply_patch(new_deposit, patch, in_place=True) external_pids_changed = False if external_pids: external_pids_changed = ( external_pids != new_deposit['external_pids'] ) del new_deposit['external_pids'] else: abort(400) # Create permission for updating the state_field if (new_deposit is not None and new_deposit['publication_state'] != self.deposit['publication_state']): state_permission = StrictDynamicPermission() state_permission.explicit_needs.add( update_deposit_publication_state_need_factory( community=self.deposit['community'], old_state=self.deposit['publication_state'], new_state=new_deposit['publication_state'] ) ) # Owners of a record can always "submit" it. if (self.deposit['publication_state'] == PublicationStates.draft.name and new_deposit['publication_state'] == PublicationStates.submitted.name or # Owners have also the right to move the record from submitted # to draft again. self.deposit['publication_state'] == PublicationStates.submitted.name and new_deposit['publication_state'] == PublicationStates.draft.name): # Owners are allowed to update for owner_id in self.deposit['_deposit']['owners']: state_permission.explicit_needs.add(UserNeed(owner_id)) permissions.append(state_permission) # Create permission for updating generic metadata fields. # Only superadmin can modify published draft. if self.deposit['publication_state'] != 'published': new_state = new_deposit['publication_state'] # Check if any metadata has been changed del new_deposit['publication_state'] original_metadata = deepcopy(self.deposit) del original_metadata['publication_state'] if original_metadata != new_deposit: permissions.append( UpdateDepositMetadataPermission(self.deposit, new_state) ) if external_pids_changed: permissions.append( DepositFilesPermission(self.deposit, 'bucket-update') ) if len(permissions) > 1: self.permissions.add(AndPermissions(*permissions)) elif len(permissions) == 1: self.permissions.add(permissions[0]) elif len(permissions) == 0: # Avoid forbidding requests doing nothing. This can be useful if # a script replays an action. self.permissions.add( UpdateDepositMetadataPermission( self.deposit, new_deposit['publication_state'] ) )
def test_add_array_item(self): obj = {'foo': ['bar', 'baz']} res = jsonpatch.apply_patch(obj, [{'add': '/foo/1', 'value': 'qux'}]) self.assertEqual(res['foo'], ['bar', 'qux', 'baz'])
def patch(self, memory_uuid, patch): """Update an existing memory.""" if self._from_ihosts: raise exception.OperationNotPermitted rpc_port = objects.memory.get_by_uuid(pecan.request.context, memory_uuid) if 'forihostid' in rpc_port: ihostId = rpc_port['forihostid'] else: ihostId = rpc_port['ihost_uuid'] host_id = pecan.request.dbapi.ihost_get(ihostId) vm_hugepages_nr_2M_pending = None vm_hugepages_nr_1G_pending = None vswitch_hugepages_reqd = None vswitch_hugepages_size_mib = None platform_reserved_mib = None for p in patch: if p['path'] == '/platform_reserved_mib': platform_reserved_mib = p['value'] if p['path'] == '/vm_hugepages_nr_2M_pending': vm_hugepages_nr_2M_pending = p['value'] if p['path'] == '/vm_hugepages_nr_1G_pending': vm_hugepages_nr_1G_pending = p['value'] if p['path'] == '/vswitch_hugepages_reqd': vswitch_hugepages_reqd = p['value'] if p['path'] == '/vswitch_hugepages_size_mib': vswitch_hugepages_size_mib = p['value'] # The host must be locked if host_id: _check_host(host_id) else: raise wsme.exc.ClientSideError( _("Hostname or uuid must be defined")) try: # Semantics checks and update hugepage memory accounting patch = _check_huge_values(rpc_port, patch, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib, platform_reserved_mib) except wsme.exc.ClientSideError as e: inode = pecan.request.dbapi.inode_get(inode_id=rpc_port.forinodeid) numa_node = inode.numa_node msg = _('Processor {0}:'.format(numa_node)) + e.message raise wsme.exc.ClientSideError(msg) # Semantics checks for platform memory _check_memory(rpc_port, host_id, platform_reserved_mib, vm_hugepages_nr_2M_pending, vm_hugepages_nr_1G_pending, vswitch_hugepages_reqd, vswitch_hugepages_size_mib) # only allow patching allocated_function and capabilities # replace ihost_uuid and inode_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id if p['path'] == '/inode_uuid': p['path'] = '/forinodeid' try: inode = objects.node.get_by_uuid(pecan.request.context, p['value']) p['value'] = inode.id except exception.SysinvException: p['value'] = None try: memory = Memory( **jsonpatch.apply_patch(rpc_port.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.memory.fields: if rpc_port[field] != getattr(memory, field): rpc_port[field] = getattr(memory, field) rpc_port.save() return Memory.convert_with_links(rpc_port)
def update_many(self, isystem_uuid, patch): """Update the current controller_fs configuration.""" if self._from_isystems and not isystem_uuid: raise exception.InvalidParameterValue( _("System id not specified.")) # Validate input filesystem names controller_fs_list = pecan.request.dbapi.controller_fs_get_list() valid_fs_list = [] if controller_fs_list: valid_fs_list = {fs.name: fs.size for fs in controller_fs_list} reinstall_required = False reboot_required = False modified_fs = [] for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['path'] == '/name': fs_name = p_obj['value'] elif p_obj['path'] == '/size': size = p_obj['value'] if fs_name not in valid_fs_list.keys(): msg = _("ControllerFs update failed: invalid filesystem " "'%s' " % fs_name) raise wsme.exc.ClientSideError(msg) elif not cutils.is_int_like(size): msg = _("ControllerFs update failed: filesystem '%s' " "size must be an integer " % fs_name) raise wsme.exc.ClientSideError(msg) elif int(size) <= int(valid_fs_list[fs_name]): msg = _("ControllerFs update failed: size for filesystem '%s' " "should be bigger than %s " % (fs_name, valid_fs_list[fs_name])) raise wsme.exc.ClientSideError(msg) if fs_name in constants.SUPPORTED_REPLICATED_FILEYSTEM_LIST: if utils.is_drbd_fs_resizing(): raise wsme.exc.ClientSideError( _("A drbd sync operation is currently in progress. " "Retry again later.")) modified_fs += [fs_name] controller_fs_list_new = [] for fs in controller_fs_list: replaced = False for p_list in patch: p_obj_list = jsonpatch.JsonPatch(p_list) for p_obj in p_obj_list: if p_obj['value'] == fs['name']: try: controller_fs_list_new += [ ControllerFs(**jsonpatch.apply_patch( fs.as_dict(), p_obj_list)) ] replaced = True break except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=p_list, reason=e) if replaced: break if not replaced: controller_fs_list_new += [fs] cgtsvg_growth_gib = _check_controller_multi_fs_data( pecan.request.context, controller_fs_list_new) if _check_controller_state(): _check_controller_multi_fs(controller_fs_list_new, cgtsvg_growth_gib=cgtsvg_growth_gib) for fs in controller_fs_list_new: if fs.name in modified_fs: value = {'size': fs.size} if fs.replicated: value.update({ 'state': constants.CONTROLLER_FS_RESIZING_IN_PROGRESS }) pecan.request.dbapi.controller_fs_update(fs.uuid, value) try: # perform rpc to conductor to perform config apply pecan.request.rpcapi.update_storage_config( pecan.request.context, update_storage=False, reinstall_required=reinstall_required, reboot_required=reboot_required, filesystem_list=modified_fs) except Exception as e: msg = _("Failed to update filesystem size ") LOG.error("%s with patch %s with exception %s" % (msg, patch, e)) raise wsme.exc.ClientSideError(msg)
def test_remove_object_key(self): obj = {'foo': 'bar', 'baz': 'qux'} res = jsonpatch.apply_patch(obj, [{'remove': '/baz'}]) self.assertTrue('baz' not in res)
def update_service(self, project_id, service_id, auth_token, service_updates, force_update=False): """update. :param project_id :param service_id :param auth_token :param service_updates :param force_update :raises LookupError, ValueError """ # get the current service object try: service_old = self.storage_controller.get_service( project_id, service_id) except ValueError: raise errors.ServiceNotFound("Service not found") if service_old.operator_status == u'disabled': raise errors.ServiceStatusDisabled( u'Service {0} is disabled'.format(service_id)) if (service_old.status not in [u'deployed', u'failed'] and force_update is False): raise errors.ServiceStatusNeitherDeployedNorFailed( u'Service {0} neither deployed nor failed'.format(service_id)) # Fixing the operator_url domain for ssl # for schema validation existing_shared_domains = {} for domain in service_old.domains: if domain.protocol == 'https' and domain.certificate == 'shared': customer_domain = domain.domain.split('.')[0] existing_shared_domains[customer_domain] = domain.domain domain.domain = customer_domain # old domains need to bind as well elif domain.certificate == 'san': cert_for_domain = ( self.ssl_certificate_storage.get_certs_by_domain( domain.domain, project_id=project_id, flavor_id=service_old.flavor_id, cert_type=domain.certificate)) if cert_for_domain == []: cert_for_domain = None domain.cert_info = cert_for_domain service_old_json = json.loads(json.dumps(service_old.to_dict())) # remove fields that cannot be part of PATCH del service_old_json['service_id'] del service_old_json['status'] del service_old_json['operator_status'] del service_old_json['provider_details'] for domain in service_old_json['domains']: if 'cert_info' in domain: del domain['cert_info'] service_new_json = jsonpatch.apply_patch(service_old_json, service_updates) # add any default rules so its explicitly defined self._append_defaults(service_new_json, operation='update') # validate the updates schema = service_schema.ServiceSchema.get_schema("service", "POST") validators.is_valid_service_configuration(service_new_json, schema) try: self.flavor_controller.get(service_new_json['flavor_id']) # raise a lookup error if the flavor is not found except LookupError as e: raise e # must be valid, carry on service_new_json['service_id'] = service_old.service_id service_new = service.Service.init_from_dict(project_id, service_new_json) store = str(uuid.uuid4()).replace('-', '_') service_new.provider_details = service_old.provider_details # fixing the old and new shared ssl domains in service_new for domain in service_new.domains: if domain.protocol == 'https': if domain.certificate == 'shared': customer_domain = domain.domain.split('.')[0] # if this domain is from service_old if customer_domain in existing_shared_domains: domain.domain = existing_shared_domains[ customer_domain] else: domain.domain = self._pick_shared_ssl_domain( customer_domain, service_new.service_id, store) elif domain.certificate == 'san': cert_for_domain = ( self.ssl_certificate_storage.get_certs_by_domain( domain.domain, project_id=project_id, flavor_id=service_new.flavor_id, cert_type=domain.certificate)) if cert_for_domain == []: cert_for_domain = None domain.cert_info = cert_for_domain # retrofit the access url info into # certificate_info table # Note(tonytan4ever): this is for backward # compatibility if domain.cert_info is None and \ service_new.provider_details is not None: # Note(tonytan4ever): right now we assume # only one provider per flavor, that's # why we use values()[0] access_url_for_domain = ( service_new.provider_details.values() [0].get_domain_access_url(domain.domain)) if access_url_for_domain is not None: providers = (self.flavor_controller.get( service_new.flavor_id).providers) san_cert_url = access_url_for_domain.get( 'provider_url') # Note(tonytan4ever): stored san_cert_url # for two times, that's intentional # a little extra info does not hurt new_cert_detail = { providers[0].provider_id.title(): json.dumps( dict(cert_domain=san_cert_url, extra_info={ 'status': 'deployed', 'san cert': san_cert_url, 'created_at': str(datetime.datetime.now()) })) } new_cert_obj = ssl_certificate.SSLCertificate( service_new.flavor_id, domain.domain, 'san', project_id, new_cert_detail) self.ssl_certificate_storage.create_certificate( project_id, new_cert_obj) # deserialize cert_details dict new_cert_obj.cert_details[ providers[0].provider_id.title()] = json.loads( new_cert_obj.cert_details[ providers[0].provider_id.title()]) domain.cert_info = new_cert_obj if hasattr(self, store): delattr(self, store) # check if the service domain names already exist # existing ones does not count! for d in service_new.domains: if self.storage_controller.domain_exists_elsewhere( d.domain, service_id) is True and \ d.domain not in existing_shared_domains.values(): raise ValueError("Domain {0} has already been taken".format( d.domain)) # set status in provider details to u'update_in_progress' provider_details = service_old.provider_details for provider in provider_details: provider_details[provider].status = u'update_in_progress' service_new.provider_details = provider_details self.storage_controller.update_service(project_id, service_id, service_new) kwargs = { 'project_id': project_id, 'service_id': service_id, 'auth_token': auth_token, 'service_old': json.dumps(service_old.to_dict()), 'service_obj': json.dumps(service_new.to_dict()), 'time_seconds': self.determine_sleep_times(), 'context_dict': context_utils.get_current().to_dict() } self.distributed_task_controller.submit_task( update_service.update_service, **kwargs) return
def patch(self, patch): model = self.model data = apply_patch(dict(self), patch) return self.__class__(data, model=model)
def test_remove_array_item(self): obj = {'foo': ['bar', 'qux', 'baz']} res = jsonpatch.apply_patch(obj, [{'remove': '/foo/1'}]) self.assertEqual(res['foo'], ['bar', 'baz'])
def apply_patch(self, patch): jsonpatch.apply_patch(self._spec, self.normalize_patch(patch), True)
def test_replace_object_key(self): obj = {'foo': 'bar', 'baz': 'qux'} res = jsonpatch.apply_patch(obj, [{'replace': '/baz', 'value': 'boo'}]) self.assertTrue(res['baz'], 'boo')
def patch(s, p): with open(os.path.join(SCHEMA.root, p)) as f: return jsonpatch.apply_patch(s, json.load(f))
def test_move_array_item(self): obj = {'foo': ['all', 'grass', 'cows', 'eat']} res = jsonpatch.apply_patch(obj, [{'move': '/foo/1', 'to': '/foo/3'}]) self.assertEqual(res, {'foo': ['all', 'cows', 'eat', 'grass']})
def patch(self, stor_uuid, patch): """Update an existing stor.""" if self._from_ihosts: raise exception.OperationNotPermitted if self._from_tier: raise exception.OperationNotPermitted try: rpc_stor = objects.storage.get_by_uuid(pecan.request.context, stor_uuid) except exception.ServerNotFound: raise wsme.exc.ClientSideError( _("No stor with the provided" " uuid: %s" % stor_uuid)) # replace ihost_uuid and istor_uuid with corresponding patch_obj = jsonpatch.JsonPatch(patch) for p in patch_obj: if p['path'] == '/ihost_uuid': p['path'] = '/forihostid' ihost = objects.host.get_by_uuid(pecan.request.context, p['value']) p['value'] = ihost.id elif p['path'] == '/tier_uuid': p['path'] = '/fortierid' tier = objects.tier.get_by_uuid(pecan.request.context, p['value']) p['value'] = tier.id try: stor = Storage( **jsonpatch.apply_patch(rpc_stor.as_dict(), patch_obj)) except utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Semantic Checks _check_host(stor.as_dict()) _check_disk(stor.as_dict()) if (hasattr(stor, 'journal_size_mib') or hasattr(stor, 'journal_location')): _check_journal(rpc_stor, stor.as_dict()) # Journal partitions can be either collocated with the OSD or external. # Any location change requires that the device_nodes of the remaining # journals of the external journal disk to be updated, therefore we back # up the external journal stor before updating it with the new value journal_stor_uuid = None if rpc_stor['journal_location'] != getattr(stor, 'journal_location'): if rpc_stor['uuid'] == getattr(stor, 'journal_location'): # journal partition becomes collocated, backup the prev journal journal_stor_uuid = rpc_stor['journal_location'] setattr(stor, 'journal_size_mib', CONF.journal.journal_default_size) else: # journal partition moves to external journal disk journal_stor_uuid = getattr(stor, 'journal_location') else: if (hasattr(stor, 'journal_size_mib') and rpc_stor['uuid'] == rpc_stor['journal_location']): raise wsme.exc.ClientSideError( _("Invalid update: Size of collocated journal is fixed.")) # Update only the fields that have changed updated = False for field in objects.storage.fields: if rpc_stor[field] != getattr(stor, field): rpc_stor[field] = getattr(stor, field) updated = True if not updated: # None of the data fields have been updated, return! return Storage.convert_with_links(rpc_stor) # Set status for newly created OSD. if rpc_stor['function'] == constants.STOR_FUNCTION_OSD: ihost_id = rpc_stor['forihostid'] ihost = pecan.request.dbapi.ihost_get(ihost_id) if ihost['operational'] == constants.OPERATIONAL_ENABLED: # We are running live manifests rpc_stor['state'] = constants.SB_STATE_CONFIGURING else: rpc_stor['state'] = constants.SB_STATE_CONFIGURING_ON_UNLOCK # Save istor rpc_stor.save() # Update device nodes for the journal disk if journal_stor_uuid: try: pecan.request.dbapi.journal_update_dev_nodes(journal_stor_uuid) # Refresh device node for current stor, if changed by prev call st = pecan.request.dbapi.istor_get(rpc_stor['id']) rpc_stor['journal_path'] = st.journal_path except Exception as e: LOG.exception(e) # Run runtime manifests to update configuration runtime_manifests = False if (rpc_stor['state'] == constants.SB_STATE_CONFIGURING and rpc_stor['function'] == constants.STOR_FUNCTION_OSD): runtime_manifests = True pecan.request.rpcapi.update_ceph_osd_config(pecan.request.context, ihost, rpc_stor['uuid'], runtime_manifests) return Storage.convert_with_links(rpc_stor)
def test_test_whole_obj(self): obj = {'baz': 1} jsonpatch.apply_patch(obj, [{'op': 'test', 'path': '', 'value': obj}])