def test_nan(self): value = float('nan') diffed = list(diff([value], [value])) assert [] == diffed diffed = list(diff([value], [3.5])) assert [('change', [0], (value, 3.5))] == diffed
def test_path_limit_addition(self): first = {} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} p = PathLimit([('author',)]) diffed = list(diff(first, second, path_limit=p)) res = [('add', '', [('author', {'first_name': 'John', 'last_name': 'Doe'})])] assert res == diffed first = {} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} p = PathLimit([('author',)]) diffed = list(diff(first, second, path_limit=p, expand=True)) res = [('add', '', [('author', {'first_name': 'John', 'last_name': 'Doe'})])] assert res == diffed first = {} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} p = PathLimit() diffed = list(diff(first, second, path_limit=p, expand=True)) res = [('add', '', [('author', {})]), ('add', 'author', [('first_name', 'John')]), ('add', 'author', [('last_name', 'Doe')])] assert len(diffed) == 3 for patch in res: assert patch in diffed
def test_change(self): first = {'a': 'b'} second = {'a': 'c'} diffed = next(diff(first, second)) assert ('change', 'a', ('b', 'c')) == diffed first = {'a': None} second = {'a': 'c'} diffed = next(diff(first, second)) assert ('change', 'a', (None, 'c')) == diffed first = {'a': 'c'} second = {'a': None} diffed = next(diff(first, second)) assert ('change', 'a', ('c', None)) == diffed first = {'a': 'c'} second = {'a': u'c'} diffed = list(diff(first, second)) assert [] == diffed first = {'a': 'b'} second = {'a': None} diffed = next(diff(first, second)) assert ('change', 'a', ('b', None)) == diffed
def test_ignore_missing_complex_keys(self): first = {'a': {1: {'a': 'a', 'b': 'b'}}} second = {'a': {1: {'a': 1}}} diffed = next(diff(first, second, ignore=[['a', 1, 'b']])) assert ('change', ['a', 1, 'a'], ('a', 1)) == diffed diffed = next(diff(second, first, ignore=[['a', 1, 'b']])) assert ('change', ['a', 1, 'a'], (1, 'a')) == diffed
def test_ignore_with_unicode_sub_keys(self): first = {u'a': {u'aא': {u'aa': 'A'}}} second = {u'a': {u'aא': {u'aa': 'B'}}} assert len(list(diff(first, second))) == 1 assert len(list(diff(first, second, ignore=[u'a.aא.aa']))) == 0 assert len( list(diff(first, second, ignore=[[u'a', u'aא', u'aa'] ]))) == 0
def test_unicode_keys(self): first = {u'привет': 1} second = {'hello': 1} diffed = list(diff(first, second)) assert ('add', '', [('hello', 1)]) in diffed assert ('remove', '', [(u'привет', 1)]) in diffed diffed = list(diff(first, second, ignore=['hello'])) assert ('remove', '', [(u'привет', 1)]) == diffed[0]
def test_revert(self): first = {'a': [1, 2]} second = {'a': []} diffed = diff(first, second) patched = patch(diffed, first) assert patched == second diffed = diff(first, second) reverted = revert(diffed, second) assert reverted == first
def testRemoveItem(self): data1 = {'a':1, 'b':2, 'X':3, 'c':['d', {'X':4, 'e':5}]} data1R = {'a':1, 'b':2, 'c':['d', {'e':5}]} data2 = ['a', 'b', {'c':1, 'd':[{'X':2, 'e':3}, 'f']}] data2R = ['a', 'b', {'c':1, 'd':[{'e':3}, 'f']}] data1 = remove_item(data1, 'X') self.assertEqual(len(list(dictdiffer.diff(data1, data1R))),0) data2 = remove_item(data2, 'X') self.assertEqual(len(list(dictdiffer.diff(data2, data2R))),0)
def test_complex_diff(self): """Check regression on issue #4.""" from decimal import Decimal d1 = { 'id': 1, 'code': None, 'type': u'foo', 'bars': [ {'id': 6934900}, {'id': 6934977}, {'id': 6934992}, {'id': 6934993}, {'id': 6935014}], 'n': 10, 'date_str': u'2013-07-08 00:00:00', 'float_here': 0.454545, 'complex': [{ 'id': 83865, 'goal': Decimal('2.000000'), 'state': u'active'}], 'profile_id': None, 'state': u'active' } d2 = { 'id': u'2', 'code': None, 'type': u'foo', 'bars': [ {'id': 6934900}, {'id': 6934977}, {'id': 6934992}, {'id': 6934993}, {'id': 6935014}], 'n': 10, 'date_str': u'2013-07-08 00:00:00', 'float_here': 0.454545, 'complex': [{ 'id': 83865, 'goal': Decimal('2.000000'), 'state': u'active'}], 'profile_id': None, 'state': u'active' } assert len(list(diff(d1, {}))) > 0 assert d1['id'] == 1 assert d2['id'] == u'2' assert d1 is not d2 assert d1 != d2 assert len(list(diff(d1, d2))) > 0
def main(argv): parser = OptionParser( usage="usage clitool.py %prog -i file -o file.config...", description="""Make cli file to cmdconfig file , read example file """ ) parser.add_option("-i", "--input", dest="input_file", help="Input cli file", metavar="FILE") parser.add_option("-o", "--output", dest="output_file", help="Output file") (options, args) = parser.parse_args(argv) cli_line = load_file(options.input_file) root = cli_line[0].split()[0] out_file = clean_file(root, options.output_file) conf = Shconfig.config(name="", conffile=out_file) print conf if conf is None: print "output file is not correct format" return d = conf.dict main_dict = {root: {}} for cli in cli_line: cmd = cli.split() res = list_to_deep_dict(cmd) result = diff(main_dict, res, nodel=True) patched = patch(result, main_dict, set_remove=False) main_dict = patched print json.dumps(main_dict, indent=2) cof = conf.dict print cof cof[root] = main_dict[root] print cof print conf.save()
def test_dict_subclasses(self): class Foo(dict): pass first = Foo({2014: [ dict(month=6, category=None, sum=672.00), dict(month=6, category=1, sum=-8954.00), dict(month=7, category=None, sum=7475.17), dict(month=7, category=1, sum=-11745.00), dict(month=8, category=None, sum=-12140.00), dict(month=8, category=1, sum=-11812.00), dict(month=9, category=None, sum=-31719.41), dict(month=9, category=1, sum=-11663.00), ]}) second = Foo({2014: [ dict(month=6, category=None, sum=672.00), dict(month=6, category=1, sum=-8954.00), dict(month=7, category=None, sum=7475.17), dict(month=7, category=1, sum=-11745.00), dict(month=8, category=None, sum=-12141.00), dict(month=8, category=1, sum=-11812.00), dict(month=9, category=None, sum=-31719.41), dict(month=9, category=2, sum=-11663.00), ]}) diffed = next(diff(first, second)) assert ('change', [2014, 4, 'sum'], (-12140.0, -12141.0)) == diffed
def _diff_dev(self, cached, curr): """ Given two dicts of information/status about a device, one cached from the last run of this program and one from the current run, return either a formatted string showing the differences between them, or None if there are no differences. Note that attributes and other indicators which should normally change over time are ignored from the diff; this is limited to things which may indicate a health problem. :param cached: cached data on the device, from the last run :type cached: dict :param curr: data on the device from the current run :type curr: dict :return: human-readable diff, or None :rtype: :py:obj:`str` or :py:data:`None` """ # remove values we don't want included in the diff cached = self._prep_dict_for_diff(cached) curr = self._prep_dict_for_diff(curr) # do the diff s = '' for d in diff(cached, curr): if d[0] != 'change': logger.debug('Ignoring diff: %s', d) continue k = d[1] if isinstance(k, type([])): k = ' '.join(['%s' % x for x in k]) print(d) s += "%s changed from %s to %s\n" % (k, d[2][0], d[2][1]) if s == '': return None return s
def test_list_change(self): """Produced diffs should not contain empty list instructions (#30).""" first = {"a": {"b": [100, 101, 201]}} second = {"a": {"b": [100, 101, 202]}} result = list(diff(first, second)) assert len(result) == 1 assert result == [('change', ['a', 'b', 2], (201, 202))]
def test_numpy_array(self): """Compare NumPy arrays (#68).""" import numpy as np first = np.array([1, 2, 3]) second = np.array([1, 2, 4]) result = list(diff(first, second)) assert result == [('change', [2], (3, 4))]
def test_immutable_diffs(self): first = {'a': 'a'} second = {'a': {'b': 'b'}} result = list(diff(first, second)) assert result[0][2][1]['b'] == 'b' second['a']['b'] = 'c' # result MUST stay unchanged assert result[0][2][1]['b'] == 'b'
def _test_dictize_update_shallow(fixture_name, dtype, changeset): '''Test from_dict in shallow-update mode ''' x0 = getattr(fixtures, fixture_name) df0 = x0.to_dict(flat=1, opts={'serialize-keys': 1}) d = updates[dtype][changeset] df = flatten(d, lambda k: '.' . join(map(str, k))) x1 = copy.deepcopy(x0) x1.from_dict(d, is_flat=0, opts={ 'update': True }) df1 = x1.to_dict(flat=1, opts={'serialize-keys': 1}) for k in (set(x0.get_fields()) - set(d.keys())): assert_equal(getattr(x1,k), getattr(x0,k)) for change, key, desc in dictdiffer.diff(df0, df1): if change == 'change': val0, val1 = desc assert ((val1 is None) and not (key in df)) or df[key] == val1 assert df1[key] == val1 assert df0[key] == val0 elif change == 'add': for key1, val1 in desc: assert ((val1 is None) and not (key1 in df)) or df[key1] == val1 assert df1[key1] == val1 assert not key1 in df0 elif change == 'remove': for key0, val0 in desc: assert df0[key0] == val0 assert not key0 in df1 assert not (key0 in df) or (df[key0] is None) pass
def diff_dict_list(): """Compares if object is a dictionary. Callees again the parent function as recursive if dictionary have child objects. Yields `add` and `remove` flags.""" for key in intersection: # if type is not changed, callees again diff function to compare. # otherwise, the change will be handled as `change` flag. recurred = diff( first[key], second[key], node=node + [str(key) if isinstance(key, int) else key]) for diffed in recurred: yield diffed if addition: yield ADD, dotted_node, [ # for additions, return a list that consist with # two-pair tuples. (key, second[key]) for key in addition] if deletion: yield REMOVE, dotted_node, [ # for deletions, return the list of removed keys # and values. (key, first[key]) for key in deletion]
def test_ignore_with_ignorecase(self): class IgnoreCase(set): def __contains__(self, key): return set.__contains__(self, str(key).lower()) assert list(diff({'a': 1, 'b': 2}, {'A': 3, 'b': 4}, ignore=IgnoreCase('a'))) == [('change', 'b', (2, 4))]
def test_numpy_nan(self): """Compare NumPy NaNs (#114).""" import numpy as np first = {'a': np.float32('nan')} second = {'a': float('nan')} result = list(diff(first, second)) assert result == []
def test_path_limit_as_list(self): first = {} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} diffed = list(diff(first, second, path_limit=[('author',)])) res = [('add', '', [('author', {'first_name': 'John', 'last_name': 'Doe'})])] assert res == diffed
def create_diff(staged, commited): def listify(t): # we need all values as lists, because we need the same behaviour # in pre and post save situations return list(map(listify, t)) if isinstance(t, (list, tuple)) else t res = tuple(dictdiffer.diff(commited, staged)) return listify(res)
def test_tolerance(self): first = {'a': 'b'} second = {'a': 'c'} diffed = next(diff(first, second, tolerance=0.1)) assert ('change', 'a', ('b', 'c')) == diffed first = {'a': None} second = {'a': 'c'} diffed = next(diff(first, second, tolerance=0.1)) assert ('change', 'a', (None, 'c')) == diffed first = {'a': 10.0} second = {'a': 10.5} diffed = list(diff(first, second, tolerance=0.1)) assert [] == diffed diffed = next(diff(first, second, tolerance=0.01)) assert ('change', 'a', (10.0, 10.5)) == diffed
def diff(self, other=None): new_values = self._serialize(other) if new_values != self.original_values: changes = self.changes.create() changes.diff = diff(self.original_values, new_values) changes.save() self.original_values = new_values
def _record_has_changed(obj, eng, record, extra_data): """Check whether a record is different from its state in the database. :param record: record :type record: invenio.modules.records.api.Record or AmendableRecord :returns: whether the record has changed :rtype: bool """ recid = record['recid'] modified_record = record.dumps() db_record = get_record(recid).dumps() def log_changes(changes): """Log the changes done to this record by the last check.""" try: rule_name = record.check.rule_name except AttributeError: # Not an AmendableRecord, not running a check pass else: obj.log.info( "{rule} made the following changes on record {recid}: {changes}" .format(rule=rule_name, recid=recid, changes=changes)) # Try against `extra_data` try: extra_data_record = extra_data['modified_records'][recid] except KeyError: # We have not previously stored this record pass else: changes = tuple(diff(extra_data_record, modified_record)) if changes: log_changes(changes) return True # Try against the database changes = tuple(diff(db_record, modified_record)) if changes: log_changes(changes) return True return False
def test_path_limit_deletion(self): first = {'author': {'last_name': 'Doe', 'first_name': 'John'}} second = {} p = PathLimit([('author',)]) diffed = list(diff(first, second, path_limit=p, expand=True)) res = [('remove', '', [('author', {'first_name': 'John', 'last_name': 'Doe'})])] assert res == diffed
def merge_changes(deposition, dest, a, b): """Find changes between two dicts and apply them to a destination dict. This method is useful when A is a subset of the destination dictionary. """ # Generate patch patch = dictdiffer.diff(a, b) # Apply patch (returns a deep copy of dest with patch applied) return dictdiffer.patch(patch, dest)
def test_expand_deletion(self): first = {'foo': 'bar', 'apple': 'banana'} second = {} diffed = list(diff(first, second, expand=True)) res = [('remove', '', [('foo', 'bar')]), ('remove', '', [('apple', 'banana')])] assert len(diffed) == 2 for patch in res: assert patch in diffed
def resolve(base, local, remote): for op, path, value in diff(base, remote): if op == 'remove' and path == 'anime-list.anime': value = list(on_anime_removed(base, local, remote, value)) # Ignore empty changes if not value: continue # Yield change yield op, path, value
def test_in_place_patch_and_revert(self): first = {'a': 1} second = {'a': 2} changes = list(diff(first, second)) patched_copy = patch(changes, first) assert first != patched_copy reverted_in_place = revert(changes, patched_copy, in_place=True) assert first == reverted_in_place assert patched_copy == reverted_in_place patched_in_place = patch(changes, first, in_place=True) assert first == patched_in_place
def test_path_limit_change(self): first = {'author': {'last_name': 'Do', 'first_name': 'John'}} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} p = PathLimit([('author',)]) diffed = list(diff(first, second, path_limit=p, expand=True)) res = [('change', ['author'], ({'first_name': 'John', 'last_name': 'Do'}, {'first_name': 'John', 'last_name': 'Doe'}))] assert res == diffed first = {'author': {'last_name': 'Do', 'first_name': 'John'}} second = {'author': {'last_name': 'Doe', 'first_name': 'John'}} p = PathLimit() diffed = list(diff(first, second, path_limit=p, expand=True)) res = [('change', 'author.last_name', ('Do', 'Doe'))] assert res == diffed
def apply_deletion_for_node_id(api, channel_tree, channel_id, node_id, deletion_dict): results = find_nodes_by_node_id(channel_tree, node_id) assert results, 'no match found based on node_id search' assert len(results)==1, 'multiple matches found...' tree_node = results[0] studio_id = tree_node['id'] # node_before = unresolve_children(tree_node) node_before = api.get_contentnode(studio_id) # PREPARE data for DLETE request data = {} data['id'] = node_before['id'] # DELETE print('DELETE studio_id=', studio_id, 'node_id=', node_id) response_data = api.delete_contentnode(data, channel_id) # Check what changed node_after = api.get_contentnode(studio_id) diffs = list(dictdiffer.diff(node_before, node_after)) print(' diff=', diffs) return response_data
def diff_round_tripped(self, original, round_tripped, ignore_fields=[]): differ = False for diff_type, field_path, values in list( dictdiffer.diff(round_tripped.toJsonDict(), original.toJsonDict())): if type(field_path).__name__ in ['unicode', 'str']: field_path = [field_path] if self.is_field_ignored(field_path, ignore_fields): continue expected = values[1] if len(values) > 1 else None observed = values[0] if observed in self._empty_values and expected in self._empty_values: continue if self.is_hashable(expected) and self._equal_values.get( expected, "not the same") == observed: continue if expected == observed: continue logging.error("{}: {} expected '{}' found '{}'".format( diff_type, ".".join(list(map(str, field_path))), expected, observed)) differ = True return differ
def atomic(tracked, remote=False): """ Context manager used to ensure that all the changes are applied to the tracked data structure, or none. Delays any change notifications, saves and syncs until successful completion of the context. """ with tracked: handler(tracked).track = False backup_copy = copy.deepcopy(tracked) try: yield #transient_copy except: tracked.__subject__ = backup_copy handler(tracked).start_to_track(tracked, [], force=True) handler(tracked).track = True raise else: all_changes = list(dictdiffer.diff(backup_copy, tracked)) handler(tracked).track = True handler(tracked).on_change(tracked, all_changes, remote) #finally: # tracked._tracker.handler.tracked = True '''
def character_diff(self, old, new): """ Diff two character dicts; return a human-readable representation :param old: old (cache) character dict :type old: dict :param new: new (battlenet) character dict :type new: dict :rtype: string """ d = diff(old, new) s = '' for x in sorted(list(d)): if x[0] == 'change': s += 'change {item} from {a} to {b}\n'.format(item=x[1], a=x[2][0], b=x[2][1]) elif x[0] == 'remove': s += 'remove {a} {b}\n'.format(a=x[1], b=x[2]) else: s += 'add {a} {b}\n'.format(a=x[1], b=x[2]) s = s.strip() return s
def check_rules(self) -> YieldFlake8Error: """Check missing sections and missing key/value pairs in setup.cfg.""" setup_cfg = ConfigParser() with self.file_path.open() as handle: setup_cfg.read_file(handle) actual_sections = set(setup_cfg.sections()) missing = self.get_missing_output(actual_sections) if missing: yield self.flake8_error(1, " has some missing sections. Use this:", missing) for section in self.expected_sections - self.missing_sections: expected_dict = self.file_dict[section] actual_dict = dict(setup_cfg[section]) # TODO: add a class Ini(BaseFormat) and move this dictdiffer code there for diff_type, key, values in dictdiffer.diff( actual_dict, expected_dict): if diff_type == dictdiffer.CHANGE: yield from self.compare_different_keys( section, key, values[0], values[1]) elif diff_type == dictdiffer.ADD: yield from self.show_missing_keys(section, key, values)
def _diff_dicts(active, current, ignore=('changed', )): res = {} for t, key, value in diff(active, current, ignore=ignore): if t not in res: res[t] = {} if t == 'change': if isinstance(key, list): # list value has changed key = key[0] value = (active[key], current[key]) res[t][key] = value else: # add, remove if key: # list value has changed if 'change' not in res: res['change'] = {} res['change'][key] = (active[key], current[key]) else: for k, v in value: res[t][k] = v return res
def test_identify_subthreshold_hyperpol_with_amplitudes(subthreshold_sweeps): class SomeSweeps: @property def sweeps(self): ndata = len(subthreshold_sweeps["data"][0]) nsweeps = len(subthreshold_sweeps["index"]) return np.arange(nsweeps * ndata).reshape(nsweeps, ndata) obtained, _ = \ fv.identify_subthreshold_hyperpol_with_amplitudes( {"subthreshold_sweeps": pd.DataFrame(**subthreshold_sweeps)}, SomeSweeps() ) expected = { -30.0: np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), -50.0: np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), -70.0: np.array([22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]) } differing = list(diff(expected, obtained)) assert not differing, differing
def edit_config(self): ''' Open editor to update configuration ''' try: from cloudlift.version import VERSION current_configuration = self.get_config(VERSION) updated_configuration = edit( json.dumps(current_configuration, indent=4, sort_keys=True, cls=DecimalEncoder)) if updated_configuration is None: if self.new_service: self.set_config(current_configuration) log_warning("Using default configuration.") else: log_warning("No changes made.") else: updated_configuration = json.loads(updated_configuration) differences = list( dictdiffer.diff(current_configuration, updated_configuration)) if not differences: log_warning("No changes made.") else: print_json_changes(differences) if confirm('Do you want update the config?'): self.set_config(updated_configuration) else: log_warning("Changes aborted.") except ClientError: raise UnrecoverableException( "Unable to fetch service configuration from DynamoDB.")
def diff(self, previous_results, new_results): all_keys = set( itertools.chain(previous_results.keys(), new_results.keys())) diff_payload = {} for key in all_keys: old_data = previous_results.get(key, {}) new_data = new_results.get(key, {}) unified_diff = "\n".join( list( difflib.unified_diff( json.dumps(old_data, indent=4, sort_keys=True, cls=TachikomaJSONEncoder).split("\n"), json.dumps(new_data, indent=4, sort_keys=True, cls=TachikomaJSONEncoder).split("\n"), ))) dict_diff = list(dictdiffer.diff(old_data, new_data)) deep_diff = deepdiff.DeepDiff(old_data, new_data, ignore_order=True, view="tree") diff_payload[key] = { "unified": unified_diff, "dict_diff": list(dict_diff), "deep_diff": deep_diff } return diff_payload
def evaluateMarkerSequence(self): full_d = {} current_reading = {} for d in self.d_list: #d_list is a list of the 5 most recent readings. for d_key in d: if (not d_key in full_d): full_d[d_key] = [ ] #full_d is a single dict holding all info from the 5 most recent readings. full_d[d_key].append(d[d_key]) for d_key in full_d: c = Counter(full_d[d_key]) value = c.most_common()[0][ 0] #Get the most frequent value from the 5 most recent readings current_reading[ d_key] = value #current_reading holds the most frequent values from the 5 most recent readings. major_diff_detected = False for diff in list(dictdiffer.diff(self.prev_reading, current_reading)): if (diff[0] == 'change'): m1 = diff[2][0] #Old reading m2 = diff[2][1] #New reading if (not isInBoundary(m1, m2, 1)): #diff in 1 cm mean actual movement. major_diff_detected = True aDict = createChangeDict(diff) self.worker.enQueue(aDict) else: eventList = diff[2] for e in eventList: aDict = createAddOrRemoveDict(diff[0], e) self.worker.enQueue(aDict) major_diff_detected = True self.prev_reading = current_reading self.d_list = [] if (major_diff_detected): return True else: return False
def test_load_dict(testdata_dir, tmp_trestle_dir): """Test loading of distributed dict.""" # prepare trestle project dir with the file test_utils.ensure_trestle_config_dir(tmp_trestle_dir) test_data_source = testdata_dir / 'split_merge/step4_split_groups_array/catalogs' catalogs_dir = Path('catalogs/') mycatalog_dir = catalogs_dir / 'mycatalog' catalog_dir = mycatalog_dir / 'catalog' # Copy files from test/data/split_merge/step4 shutil.rmtree(catalogs_dir) shutil.copytree(test_data_source, catalogs_dir) actual_model_type, actual_model_alias, actual_model_instance = _load_dict( catalog_dir / 'metadata/responsible-parties') expexted_model_instance = { 'contact': ResponsibleParty.oscal_read( catalog_dir / 'metadata/responsible-parties/contact__responsible-party.json'), 'creator': ResponsibleParty.oscal_read( catalog_dir / 'metadata/responsible-parties/creator__responsible-party.json') } assert len( list(dictdiffer.diff(actual_model_instance, expexted_model_instance))) == 0 assert actual_model_alias == 'catalog.metadata.responsible-parties' expected_model_type, _ = fs.get_contextual_model_type( (catalog_dir / 'metadata/responsible-parties/').absolute()) assert actual_model_type.__fields__[ '__root__'].outer_type_ == expected_model_type
def diffConf(confA, confB): def listConf(inputString): return inputString.strip().split("\n") class ConfigParser(iniparser.BaseParser): comment_called = False values = None section = '' def __init__(self): self.values = {} def assignment(self, key, value): self.values.setdefault(self.section, {}) self.values[self.section][key] = value def new_section(self, section): self.section = section def comment(self, section): self.comment_called = True parserA = ConfigParser() parserA.parse(listConf(confA)) #print "parserA.values \n" #print parserA.values parserB = ConfigParser() parserB.parse(listConf(confB)) #print "parserB.values \n" #print parserB.values diffList = list(diff(parserA.values, parserB.values)) #print "diff \n" #print diffList return [diffList, parserA.values, parserB.values]
def update_data(self): check_data = diff(self.child_asset.data, self.request.data["data"]) check_data = list(check_data) tup_dic = { "change": [], "add": [], "remove": [], } for tup in check_data: # [('change', 'name', ('eth331', 'eth3311')), ('change', 'test', ('11131', '111311'))] # [('add', '', [('33', 33)]), ('remove', '', [('22', '22')])] if tup[0] == "change": tup_dic[tup[0]].append( f"属性<{tup[1]}>: 旧值:{tup[2][0]} -> 新值: {tup[2][1]}") elif tup[0] == "add": tup_dic[tup[0]].append( f"属性: {tup[2][0][0]} -> 值: {tup[2][0][1]}") elif tup[0] == "remove": tup_dic[tup[0]].append( f"属性: {tup[2][0][0]} -> 值: {tup[2][0][1]}") for k, v in tup_dic.items(): if v: if hasattr(self, k): getattr(self, k)(v)
def compare_logs(log1, log2, ignore_fields=None, ignore_msgs=None, tolerance=None): if ignore_fields is None: ignore_fields = [] if ignore_msgs is None: ignore_msgs = [] log1, log2 = [list(filter(lambda m: m.which() not in ignore_msgs, log)) for log in (log1, log2)] assert len(log1) == len(log2), "logs are not same length: " + str(len(log1)) + " VS " + str(len(log2)) diff = [] for msg1, msg2 in tqdm(zip(log1, log2)): if msg1.which() != msg2.which(): print(msg1, msg2) raise Exception("msgs not aligned between logs") msg1_bytes = remove_ignored_fields(msg1, ignore_fields).as_builder().to_bytes() msg2_bytes = remove_ignored_fields(msg2, ignore_fields).as_builder().to_bytes() if msg1_bytes != msg2_bytes: msg1_dict = msg1.to_dict(verbose=True) msg2_dict = msg2.to_dict(verbose=True) tolerance = EPSILON if tolerance is None else tolerance dd = dictdiffer.diff(msg1_dict, msg2_dict, ignore=ignore_fields) # Dictiffer only supports relative tolerance, we also want to check for absolute def outside_tolerance(diff): a, b = diff[2] if isinstance(a, numbers.Number) and isinstance(b, numbers.Number): return abs(a - b) > max(tolerance, tolerance * max(abs(a), abs(b))) return True dd = list(filter(outside_tolerance, dd)) diff.extend(dd) return diff
def processar_dados(dados): filename = 'ultimos_dados.json' file_exists = os.path.exists(filename) if not file_exists: logger.info( f'Primeira vez rodando, então criando dados de parâmetro...') with open(filename, 'w') as f: json.dump(dados, f) else: with open(filename, 'r') as f: dados_antigos = json.load(f) diffs = list(dictdiffer.diff(dados_antigos, dados)) dict_diff = Dict() for status, key, (old, new) in diffs: if status == 'change': path = '' for x in key.split('.'): path += f'["{x}"]' exec(f'dict_diff{path} = "{new}"') alunos = [] for key, value in dict_diff.items(): if 'Autor última mensagem' in value.keys(): if any([ x in value['Autor última mensagem'] for x in ['JORDANA', 'SEDIR'] ]): continue alunos.append(key) if alunos: logger.info( f'Existe pelo menos uma mensagem de: {", ".join(alunos)}') else: logger.info(f'Nada novo sob o sol.') with open(filename, 'w') as f: json.dump(dados, f) return alunos
def print_evolution(new, previous_printed): if DBS not in new: return previous_printed new_keys = list_of_dict_keys(new[DBS]) if TIMES in new and 'modification' in new[TIMES]: last_modified = new[TIMES]['modification'] else: last_modified = 'N/A' formatted_keys = f'\nVersion {new[APP_VERSION]} ({last_modified}):\t{new_keys}' if previous_printed is None: print(formatted_keys) return new if DBS not in previous_printed: return new old_keys = list_of_dict_keys(previous_printed[DBS]) if new_keys == old_keys: diff_result = list(diff(previous_printed[DBS], new[DBS])) if len(diff_result) == 0: return new print(formatted_keys) for key in new_keys: if key in old_keys: print( f"Number of tables:\t [New] {len(new[DBS][key]['tables'])}\t [Old] {len(previous_printed[DBS][key]['tables'])}\t ({key})" ) else: print( f"Number of tables:\t [New] {len(new[DBS][key]['tables'])}\t [Old] 0\t ({key})" ) print('\tDiffers inside the databases: ') for each_diff in diff_result: print(f'\t\t{each_diff}') else: print(formatted_keys) return new
def test_definitions_match(self, name, check): test_obj = FacilityTest.objects.get(name=check["fields"]["name"]) generated_definition = json.loads(test_obj.definition) print("---" * 10, check["fields"]["name"]) customised_definition = json.loads(check["fields"]["definition"]) ignore_list = { "type.calculations", "type.comparisons", "type.name", "type.models", "operator.name", "python_class", ("groups", 0, "model", "formulations"), ("groups", 1, "model", "formulations"), ("groups", 0, "model", "tracingFormulations"), ("groups", 1, "model", "tracingFormulations"), ("groups", 0, "model", "overrideOptions"), ("groups", 1, "model", "overrideOptions"), ("groups", 0, "model", "allowOverride"), ("groups", 1, "model", "allowOverride"), ("groups", 0, "model", "fields"), ("groups", 1, "model", "fields"), ("groups", 0, "model", "selectId"), ("groups", 1, "model", "selectId"), ("groups", 0, "cycles"), ("groups", 1, "cycles"), ("groups", 0, "aggregation"), ("groups", 1, "aggregation"), } output = list( diff(generated_definition, customised_definition, ignore=ignore_list)) pp = pprint.PrettyPrinter(indent=4) pp.pprint(output) self.assertLess(len(output), 3)
def driver(): # start the initial scan file_handler.log(LOG_FILE, "Starting the initial scan...") INITIAL_FILE_HASHES = scan() # save the initial scan dictionary of hashes file_handler.save_dict(INITIAL_FILE_HASHES, \ SCAN_STORAGE,\ LOG_FILE) file_handler.log(LOG_FILE, "Initial scan completed!") # start the integrity check file_handler.log(LOG_FILE, "Starting the integrity check...") while True: # get the file hashes new_hash = scan() # load the old hash old_hash = file_handler.load_dict(SCAN_STORAGE, \ LOG_FILE) # compare two dict of hashes for diff in list(dictdiffer.diff(old_hash, new_hash)): # ALERT file_handler.log(ALERT_FILE, diff) # save the new hash file_handler.save_dict(new_hash, \ SCAN_STORAGE, LOG_FILE) # wait time.sleep(SLEEP_TIME_SECONDS)
def eq_assemblies(self, a1, a2): for e1, e2 in zip(a1.edges(), a2.edges()): # check assembly nodes assert e1[0] == e2[0] assert e1[1] == e2[1] d1 = dict(e1[2]) d2 = dict(e2[2]) del d1["groups"] del d2["groups"] d1["type_def"] = d1["type_def"].__dict__ d2["type_def"] = d2["type_def"].__dict__ d1["group"] = str(d1["group"]) d2["group"] = str(d2["group"]) diff = list(dictdiffer.diff(d1, d2)) print(d1) print(d2) assert not diff
def _test_dictize_update_shallow(fixture_name, dtype, changeset): '''Test from_dict in shallow-update mode ''' x0 = getattr(fixtures, fixture_name) df0 = x0.to_dict(flat=1, opts={'serialize-keys': 1}) d = updates[dtype][changeset] df = flatten(d, lambda k: '.'.join(map(str, k))) x1 = copy.deepcopy(x0) x1.from_dict(d, is_flat=0, opts={'update': True}) df1 = x1.to_dict(flat=1, opts={'serialize-keys': 1}) for k in (set(x0.get_fields()) - set(d.keys())): assert_equal(getattr(x1, k), getattr(x0, k)) for change, key, desc in dictdiffer.diff(df0, df1): if change == 'change': val0, val1 = desc assert ((val1 is None) and not (key in df)) or df[key] == val1 assert df1[key] == val1 assert df0[key] == val0 elif change == 'add': for key1, val1 in desc: assert ((val1 is None) and not (key1 in df)) or df[key1] == val1 assert df1[key1] == val1 assert not key1 in df0 elif change == 'remove': for key0, val0 in desc: assert df0[key0] == val0 assert not key0 in df1 assert not (key0 in df) or (df[key0] is None) pass
def test_list_of_different_length(self): """Check that one can revert list with different length.""" first = [1] second = [1, 2, 3] result = list(diff(first, second)) assert first == revert(result, second)
def _fetch_comparison(hsps): n = 0 while n < len(hsps) - 1: yield diff(hsps[n], hsps[n + 1]) n = n + 1
def test_addition(self): first = {} second = {'a': 'b'} diffed = next(diff(first, second)) assert ('add', '', [('a', 'b')]) == diffed
def test_pull(self): first = {'a': ['b']} second = {'a': []} diffed = next(diff(first, second)) assert ('pull', 'a', ['b']) == diffed
def test_nodes(self): first = {'a': {'b': {'c': 'd'}}} second = {'a': {'b': {'c': 'd', 'e': 'f'}}} diffed = next(diff(first, second)) assert ('add', 'a.b', [('e', 'f')]) == diffed
def test_deletion(self): first = {'a': 'b'} second = {} diffed = next(diff(first, second)) assert ('remove', '', [('a', 'b')]) == diffed
def import_policy_definition_list(self, policy_definition_list, update=False, push=False, check_mode=False, force=False): policy_definition_updates = [] for definition in policy_definition_list: policy_definition_dict = self.get_policy_definition_dict( definition['type'], remove_key=False) diff = [] payload = { "name": definition['name'], "description": definition['description'], "type": definition['type'], } if 'defaultAction' in definition: payload.update({'defaultAction': definition['defaultAction']}) if 'sequences' in definition: payload.update({'sequences': definition['sequences']}) if 'definition' in definition: payload.update({'definition': definition['definition']}) if definition['name'] in policy_definition_dict: existing_definition = policy_definition_dict[ definition['name']] if 'defaultAction' in payload: diff.extend( list( dictdiffer.diff( existing_definition['defaultAction'], payload['defaultAction']))) if 'sequences' in payload: diff.extend( list( dictdiffer.diff(existing_definition['sequences'], payload['sequences']))) if 'definition' in payload: diff.extend( list( dictdiffer.diff(existing_definition['definition'], payload['definition']))) if len(diff): if 'definition' in definition: self.convert_list_name_to_id(definition['definition']) if 'sequences' in definition: self.convert_sequences_to_id(definition['sequences']) if 'rules' in definition: self.convert_sequences_to_id(definition['rules']) if not check_mode and update: self.update_policy_definition( definition, policy_definition_dict[ definition['name']]['definitionId']) policy_definition_updates.append({ 'name': definition['name'], 'diff': diff }) else: diff = list(dictdiffer.diff({}, payload)) policy_definition_updates.append({ 'name': definition['name'], 'diff': diff }) # List does not exist if 'definition' in definition: self.convert_list_name_to_id(definition['definition']) if 'sequences' in definition: self.convert_list_name_to_id(definition['sequences']) if 'rules' in definition: self.convert_list_name_to_id(definition['rules']) if not check_mode: self.add_policy_definition(definition) return policy_definition_updates
def test_dict_combined_key_type(self): first = {0: {'1': {2: 3}}} second = {0: {'1': {2: '3'}}} first_patch = [('change', [0, '1', 2], (3, '3'))] assert second == patch(first_patch, first) assert first_patch[0] == list(diff(first, second))[0]
def test_collection_subclasses(self): class DictA(MutableMapping): def __init__(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) def __setitem__(self, key, value): self.__dict__[key] = value def __getitem__(self, key): return self.__dict__[key] def __delitem__(self, key): del self.__dict__[key] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) class DictB(MutableMapping): def __init__(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) def __setitem__(self, key, value): self.__dict__[key] = value def __getitem__(self, key): return self.__dict__[key] def __delitem__(self, key): del self.__dict__[key] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) class ListA(MutableSequence): def __init__(self, *args, **kwargs): self._list = list(*args, **kwargs) def __getitem__(self, index): return self._list[index] def __setitem__(self, index, value): self._list[index] = value def __delitem__(self, index): del self._list[index] def __iter__(self): for value in self._list: yield value def __len__(self): return len(self._list) def insert(self, index, value): self._list.insert(index, value) daa = DictA(a=ListA(['a', 'A'])) dba = DictB(a=ListA(['a', 'A'])) dbb = DictB(a=ListA(['b', 'A'])) assert list(diff(daa, dba)) == [] assert list(diff(daa, dbb)) == [('change', ['a', 0], ('a', 'b'))] assert list(diff(dba, dbb)) == [('change', ['a', 0], ('a', 'b'))]
def test_list_same(self): """Diff for the same list should be empty.""" first = {1: [1]} assert len(list(diff(first, first))) == 0