def persist_new_state(epoch_path, patches_path, new_state, persisted_state_cache=None): ''' Read in the original file, apply patches, and add patch for diff between filesystem state and the provided new_state argument. ''' if not os.path.exists(epoch_path): with open(epoch_path, 'w') as epoch_fp: json.dump(new_state, epoch_fp) logger.info('Created file: %s', epoch_path) else: # load 0-day json and merge with patches. cache_key = epoch_path + ':' + patches_path persisted_state = persisted_state_cache.get(cache_key) if persisted_state_cache is not None else None if not persisted_state: logger.debug('Cache miss (%s), loading from epoch-patches pair: %s, %s', cache_key, epoch_path, patches_path) persisted_state = merge_epoch_and_patches(epoch_path, patches_path) else: logger.info('Previous state retrieved from cache: %s', cache_key) diff = JsonPatch.from_diff(persisted_state, new_state) if len(diff.patch) > 0: with open(patches_path, 'a') as patches_fp: logger.warn('Writing %d-patch patchset to %s', len(diff.patch), patches_path) json.dump(diff.patch, patches_fp) patches_fp.write(os.linesep) else: logger.info('No patches to write to %s', patches_path) if persisted_state_cache is not None: logger.debug('Adding state to persisted cache: %s', cache_key) persisted_state_cache[cache_key] = new_state
def mutate(controller, init_image, processor_addr, req): if req["operation"] != "CREATE" or not should_mutate( req["object"]["metadata"]['annotations']): return True, "" req_obj = copy.deepcopy(req).get('object') raw_binds = get_bind_names_from_annotations( req_obj.get('metadata').get('annotations')) if len(raw_binds) == 0: return True, "" initContainer = InitContainer('esk-init', init_image, processor_addr) use_default_volume = False for bind in raw_binds: spec = controller.get_secretbinding_spec( bind.get('name'), req_obj.get('metadata').get('namespace')).get('spec') current_app.logger.debug(bind) if bind.get('target') is None: bind['target'] = spec.get('target') use_default_volume = use_default_volume or bind.get( 'target').startswith('/esk/secrets') initContainer.add_bind(bind, req_obj.get('metadata').get('namespace'), spec) renderedContainer = initContainer.get() # Add our init container if 'initContainers' not in req_obj['spec']: req_obj['spec']['initContainers'] = [renderedContainer] else: req_obj['spec']['initContainers'].append(renderedContainer) pod_vols = initContainer.get_volumes() if use_default_volume: pod_vols.append(__DEFAULT_VOLUME) # Add our secret volumes if 'volumes' not in req_obj['spec']: req_obj['spec']['volumes'] = pod_vols else: req_obj['spec']['volumes'] += pod_vols # Add volumeMounts to every container for index in range(0, len(req_obj['spec']['containers'])): if 'volumeMounts' not in req_obj['spec']['containers'][index]: req_obj['spec']['containers'][index]['volumeMounts'] = [] req_obj['spec']['containers'][index][ 'volumeMounts'] += renderedContainer.get('volumeMounts') patch = JsonPatch.from_diff(req["object"], req_obj) return True, base64.b64encode(str(patch).encode()).decode()
def load_data(datafile): with app.app_context(): with open(datafile) as f: data = json.load(f) user_id = "initial-data-loader" patch = JsonPatch.from_diff({}, data) patch_request_id = patching.create_request(patch, user_id) patching.merge(patch_request_id, user_id) database.commit()
def _update(self): "Update the level, entities, etc" # TODO: Should be possible to be smarter here and not generate # the dicts on each update if nothing actually happened. self.level.update_entities() data = self.level.to_dict() if self._cache: patch = JsonPatch.from_diff(self._cache, data) if patch: # something has changed! self._cache = data return patch else: self._cache = data
def create_reverse_jsonpatch(self, original_entity): patched_entity = self.json_patch.apply(original_entity, False) return JsonPatch.from_diff(patched_entity, original_entity)