def check_items_equal(actual_value, expected_value, msg=""): """ :param actual_value: :param expected_value: :param msg: :return: """ if isinstance(actual_value, (list, dict, tuple)): msg = "\n" + msg + "\n\nDiffering items :\nFirst Argument(Usually Actual) marked with (-)," \ "Second Argument(Usually Expected) marked with (+)" else: msg = "\n" + msg + "\nFirst Argument(Usually Actual), Second Argument(Usually Expected)" if not actual_value or not expected_value: assert_equal(actual_value, expected_value, u"{}\n{} != {}".format(msg, actual_value, expected_value)) elif isinstance(actual_value, (list, tuple)): assert_items_equal(sorted(actual_value), sorted(expected_value), u"{}\n{}".format(msg, unicode(diff(sorted(actual_value), sorted(expected_value))))) elif isinstance(actual_value, dict): assert_dict_equal(actual_value, expected_value, u"{}\n{}".format(msg, unicode(diff(actual_value, dict(expected_value))))) elif isinstance(actual_value, (str, bool)): assert_equal(actual_value, expected_value, u"{}\n{} != {}".format(msg, unicode(actual_value), unicode(expected_value))) else: assert_equal(actual_value, expected_value, u"{}\n{} != {}".format(msg, actual_value, expected_value))
def multi_type_in_cluster(request): if request.method == 'POST': try: received_json_data = json.loads(request.body) except ValueError: return HttpResponse('ValueError: No JSON Object could be decoded') error_list = [] done_list = [] del_list = [] for new_info_dict in received_json_data: if len(new_info_dict.keys()) == 4 and { 'Name', 'IP', 'User', 'Pwd' }.issubset(new_info_dict): cluster_name = new_info_dict['Name'] cluster_ip = new_info_dict['IP'] user = new_info_dict['User'] pwd = new_info_dict['Pwd'] try: mysi = connect.ConnectNoSSL(host=cluster_ip, port=443, user=user, pwd=pwd) except Exception: error_list.append(new_info_dict) else: connect.Disconnect(mysi) t_info = ClusterInfo.objects.filter(IP=cluster_ip) if len(t_info) == 1: t_info_dict = list(t_info.values())[0] t_info_dict.pop('id') if datadiff.diff([t_info_dict], [new_info_dict]): old_info = OldClusterInfo.objects.filter( IP=cluster_ip) if len(old_info) == 1: old_info_dict = list(old_info.values())[0] old_info_dict.pop('id') if datadiff.diff([old_info_dict], [t_info_dict]): old_info.delete() del_list.append(old_info_dict) OldClusterInfo.objects.get_or_create(**t_info_dict) t_info.update(Name=cluster_name, User=user, Pwd=pwd) done_list.append(new_info_dict) else: ClusterInfo.objects.get_or_create(**new_info_dict) done_list.append(new_info_dict) else: error_list.append(new_info_dict) return HttpResponse( json.dumps({ 'Done': done_list, 'Error': error_list, 'Delete': del_list })) else: return HttpResponse( 'Can Only Use POST Request - from multi_type_in_cluster')
def diff_profiles(user, password, entity_id): print "Property: ", entity_id profiles_dict = profiles.get_all_profiles(user, password, entity_id, 'return') for k, v in profiles_dict.items(): print k print v[0].keys()[0], " VS ", v[1].keys()[0] print diff(v[0].values()[0], v[1].values()[0])
def diff(self, b): try: datadiff.tools.assert_equal(dict(self), dict(b)) equal=True except AssertionError as e: equal=False if not equal: print datadiff.diff(dict(self), dict(b))
def callback(new, old): try: import datadiff if old is None: return print datadiff.diff(old, new) except ImportError: print 'received longpoll update, datadiff not available'
def diff(self, b): try: datadiff.tools.assert_equal(dict(self), dict(b)) equal = True except AssertionError as e: equal = False if not equal: print datadiff.diff(dict(self), dict(b))
def test_eval_bool(): d = diff([1], [1], fromfile="x", tofile="y") assert_equal(bool(d), False) d = diff([1], [2], fromfile="x", tofile="y") assert_equal(bool(d), True) d = diff(dict(a=1), dict(a=1), fromfile="x", tofile="y") assert_equal(bool(d), False)
def test_diff_oneline_strings(): try: diff("foobar", "baz", fromfile="x", tofile="y") except Exception: e = sys.exc_info()[1] assert_equal(type(e), DiffNotImplementedForType, "Raised exception should be DiffNotImplementedForType") assert_equal(e.attempted_type, str) else: raise AssertionError("Should've raised a DiffNotImplementedForType")
def test_diff_oneline_strings(): try: diff('foobar', 'baz', fromfile="x", tofile="y") except Exception: e = sys.exc_info()[1] assert_equal(type(e), DiffNotImplementedForType, "Raised exception should be DiffNotImplementedForType") assert_equal(e.attempted_type, str) else: raise AssertionError("Should've raised a DiffNotImplementedForType")
def test_diff_objects(): class Foo(object): pass try: diff(Foo(), Foo(), fromfile="x", tofile="y") except Exception: e = sys.exc_info()[1] assert_equal(type(e), DiffNotImplementedForType, "Raised exception should be DiffNotImplementedForType") assert_equal(e.attempted_type, Foo) else: raise AssertionError("Should've raised a DiffNotImplementedForType")
def compare_pps(old_pp, new_pp): """Given two sets of problem params; old_pp from a stored file/db and new_pp as current arguments, compare them and if they differ, ask which one to use and return it (use_old_pp, pp) """ if 'dt' in old_pp: dt = old_pp['dt'] del old_pp['dt'] if '_id' in old_pp: del old_pp['_id'] # Dims are converted from list to tuple in DB, so don't diff dims = new_pp['dims'] del new_pp['dims'] if 'dims' in old_pp: del old_pp['dims'] pp_diff = diff(old_pp, new_pp) new_pp['dims'] = dims old_pp['dims'] = dims pp = new_pp use_old_pp = False if old_pp != new_pp: if 'dt' in old_pp: print(f"Found old problem params in MongoDB added at {dt}") print( f"Diff('a': old, from DB. 'b': specified, from args):\n{pp_diff}") ans = '' while ans not in ['y', 'n']: ans = input("Use old pp (Y) instead of specified (N)?: ").lower() if ans == 'y': use_old_pp = True pp = old_pp return (use_old_pp, pp)
def do_test(log, check_errors=True): """ Test a single log. ``log`` - the url prefix of the log to test. Also searches for the result file with the same prefix. """ url = "file://{0}".format( SampleData().get_log_path("{0}.txt.gz".format(log))) builder = BuildbotLogViewArtifactBuilder(url, check_errors=check_errors) lpc = ArtifactBuilderCollection(url, builders=builder) lpc.parse() act = lpc.artifacts[builder.name] # we can't compare the "logurl" field, because it's a fully qualified url, # so it will be different depending on the config it's run in. assert "logurl" in act del(act["logurl"]) exp = test_utils.load_exp("{0}.logview.json".format(log)) # :: use to create the ``exp`` files, if you're making a lot of them # with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f: # f.write(json.dumps(act, indent=4)) # f.close() # log urls won't match in tests, since they're machine specific # but leave it in the exp file as an example of what the real structure # should look like. del(exp["logurl"]) assert act == exp, diff(exp, act)
def _hash_compare(hash1, hash2): # quickly: if the top-level tag doesn't match, ditch it if list(hash1.keys())[0] != list(hash2.keys())[0]: return False # otherwise, diff it and find differences diff = datadiff.diff(hash1, hash2) hash1_string = json.dumps(hash1) hash2_string = json.dumps(hash2) total = len(FEATURE_REGEX.findall(hash1_string)) + len(FEATURE_REGEX.findall(hash2_string)) score = 0 for line in str(diff).splitlines(): if line.lstrip().startswith(('+', '-')): if line.lstrip().startswith(('+++', '---')): # it's a diff header line, skip continue elif '{' in line and '}' in line: # not a modification # penalise by 2 (parent + children as a group) # and reduce the total by (len) count = len(FEATURE_REGEX.findall(line)) score += 2 total -= count else: # modification, only half a point since it does it twice score += 0.5 * len(FEATURE_REGEX.findall(line)) res = 1 - score / total return res >= TREE_SIMILARITY_THRESHOLD
def main(files): #oldDict=get_dict('OldSchema.csv') #newDict=get_dict('NewSchema.csv') oldDict = get_dict(files[0]) newDict = get_dict(files[1]) print(diff(oldDict, newDict))
def test_all_builders_complete(): """test when parse.complete is true creates correct structure""" url = add_log_response( "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50.txt.gz" ) lpc = ArtifactBuilderCollection(url) for builder in lpc.builders: builder.parser.complete = True lpc.parse() exp = { "text_log_summary": { "step_data": { "steps": [], "errors_truncated": False }, "logurl": url, }, "Job Info": { "job_details": [], "logurl": url, } } assert exp == lpc.artifacts, diff(exp, lpc.artifacts)
def _update(self, service_guid): """Updates the service if it's different in the appstack and in the live environment. Args: service_guid (str): GUID of a service. Returns: list[str]: List of applications (their guids) that need to be restarted because of the update of this service. This list will be empty when there's nothing to restart """ service_name = self.service.name appstack_credentials = self.service.credentials live_credentials = cf_api.get_upsi_credentials(service_guid) if live_credentials != appstack_credentials: _log.info('User provided service %s is different in the live environment and appstack. ' 'Will update it...', service_name) _log.debug('Service credentials differences:\n%s', datadiff.diff(live_credentials, appstack_credentials, fromfile='live env', tofile='appstack')) cf_cli.update_user_provided_service(service_name, json.dumps(appstack_credentials)) service_bindings = cf_api.get_upsi_bindings(service_guid) _log.info('Rebinding apps to service instance %s...', service_name) self._recreate_bindings(service_bindings) app_guids = [binding['entity']['app_guid'] for binding in service_bindings] return app_guids else: _log.info('Service %s already exists and is up-to-date. No need to do anything...', service_name) return []
def _print_conf_diff(self, local, remote): str_diff = diff(remote, json.loads(json.dumps(local))).__str__() str_diff = re.compile(r'^( +)([\-\+])', re.MULTILINE).sub(r'\2\1', str_diff) self._logger.warn('[API] Configuration:\n{}'.format(highlight( str_diff, get_lexer_by_name('diff'), TerminalFormatter())))
def compare_and_view(self): bench={} tree=ET.parse(self.spec) root=tree.getroot() XMLUtil.get_all_xpath(root,bench,root.tag) cba_version={} tree2=ET.parse(self.cba) root2=tree2.getroot() XMLUtil.get_all_xpath(root2,cba_version,root2.tag) diffs=diff(bench,cba_version).diffs self.diff_text.tag_config("spec",foreground="blue", font=("Arial 10 italic",13)) self.diff_text.tag_config("cba",foreground="red", font=("Arial 10 italic",13)) for l in diffs: if l[0]!='equal': printl0=l[0] if l[0]=='delete': printl0='spec has ' tags=("spec") else: printl0='cba has ' tags=("cba") self.diff_text.insert(END,printl0,tags) for i in l[1]: self.diff_text.insert(END,i[0].replace('-','')+i[1]+os.linesep,tags)
def test_elasticsearch_doc_request_body(test_project, eleven_jobs_stored): """ Test the request body is created correctly """ bug_id = 12345678 job_id = 1 classification_timestamp = int(time()) who = "*****@*****.**" req = ElasticsearchDocRequest(test_project, job_id, bug_id, classification_timestamp, who) req.generate_request_body() expected = { "buildname": "39643b5073cfb9473042884bfd3ced0289b3d7dd", "machinename": "bld-linux64-ec2-132", "os": "b2g-emu-jb", # I'm using the request time date here, as start time is not # available for pending jobs "date": "2013-11-13", "type": "B2G Emulator Image Build", "buildtype": "debug", "starttime": "1384353553", "tree": test_project, "rev": "45f8637cb9f78f19cb8463ff174e81756805d8cf", "bug": str(bug_id), "who": who, "timestamp": str(classification_timestamp), "treeherder_job_id": job_id, } assert req.body == expected, diff(expected, req.body)
def record_gamestate(self, state): p1insts = collections.defaultdict(int) p2insts = collections.defaultdict(int) for unit in state.units: if not unit.dead and not unit.card.spell: inst = self.unit_to_inst(unit) if unit.side == 1: p1insts[str(inst)] += 1 else: p2insts[str(inst)] += 1 p1supplies = { n.lower().replace(" ", "") : sup for (n, sup) in state.p1supplies.items() } p2supplies = { n.lower().replace(" ", "") : sup for (n, sup) in state.p2supplies.items() } p1 = Side(int(state.p1res[0]), int(state.p1res[1]), p1insts, p1supplies) p2 = Side(int(state.p2res[0]), int(state.p2res[1]), p2insts, p2supplies) gs = GameState(self.setup, p1, p2, state.ply, int(state.oppres[5])) self.game_states.append(gs) self.log(gs.to_json()) if len(self.game_states) > 1: gs2 = self.game_states[-2].do_move(self.moves[-1]) if gs2.to_json() != gs.to_json(): self.log(gs2.to_json()) self.log_plain(str(datadiff.diff(gs.to_json(), gs2.to_json()))) assert False
def test_diff_list_context(): a = [1]*50 + [2, 3, 4, 5, 6, 7, 8] + [1]*10 b = [1]*50 + [3, 9, 10] + [1]*10 d = diff(a, b, fromfile="x", tofile="y") expected = dedent('''\ --- x +++ y [ @@ -46,59 +46,55 @@ 1, 1, 1, -2, 3, -4, -5, -6, -7, -8, +9, +10, 1, 1, 1, @@ @@ ]''') assert_equal(str(d), expected)
def test_elasticsearch_doc_request_body(test_project, eleven_jobs_stored): """ Test the request body is created correctly """ bug_id = 12345678 job_id = 1 sample_artifact = {"build_id": 39953854, "buildername": "b2g_emulator_vm mozilla-inbound opt test crashtest-2"} placeholders = [[job_id, "buildapi", "json", zlib.compress(json.dumps(sample_artifact)), job_id, "buildapi"]] with ArtifactsModel(test_project) as artifacts_model: artifacts_model.store_job_artifact(placeholders) classification_timestamp = int(time()) who = "*****@*****.**" req = ElasticsearchDocRequest(test_project, job_id, bug_id, classification_timestamp, who) req.generate_request_body() expected = { "buildname": "b2g_emulator_vm mozilla-inbound opt test crashtest-2", "machinename": "bld-linux64-ec2-132", "os": "b2g-emu-jb", # I'm using the request time date here, as start time is not # available for pending jobs "date": "2013-11-13", "type": "B2G Emulator Image Build", "buildtype": "debug", "starttime": "1384353553", "tree": test_project, "rev": "45f8637cb9f78f19cb8463ff174e81756805d8cf", "bug": str(bug_id), "who": who, "timestamp": str(classification_timestamp), "treeherder_job_id": job_id, } assert req.body == expected, diff(expected, req.body)
def diffFiles (leftFile, rightFile): try: with open(leftFile, 'r') as fp: left = json.load(fp) except Exception as e: print "{}: Error reading left file: {}".format(leftFile, e.message if e.message else " ".join(e.args)) return False try: with open(rightFile, 'r') as fp: right = json.load(fp) except Exception as e: print "{}: Error reading right file: {}".format(rightFile, e.message if e.message else " ".join(e.args)) return False try: dd = datadiff.diff(left, right, fromfile=leftFile, tofile=rightFile, context=10000) except TypeError as e: print "{} - {}: Error: {}".format(leftFile, rightFile, e.message if e.message else " ".join(e.args)) return False isMatch = not bool(dd) if isMatch: print "{} - {}: Match".format(leftFile, rightFile) return True print "{} - {}: Differ".format(leftFile, rightFile) print str(dd) return False
def _hash_compare(hash1, hash2): # quickly: if the top-level tag doesn't match, ditch it if list(hash1.keys())[0] != list(hash2.keys())[0]: return False # otherwise, diff it and find differences diff = datadiff.diff(hash1, hash2) hash1_string = json.dumps(hash1) hash2_string = json.dumps(hash2) total = len(FEATURE_REGEX.findall(hash1_string)) + len( FEATURE_REGEX.findall(hash2_string)) score = 0 for line in str(diff).splitlines(): if line.lstrip().startswith(('+', '-')): if line.lstrip().startswith(('+++', '---')): # it's a diff header line, skip continue elif '{' in line and '}' in line: # not a modification # penalise by 2 (parent + children as a group) # and reduce the total by (len) count = len(FEATURE_REGEX.findall(line)) score += 2 total -= count else: # modification, only half a point since it does it twice score += 0.5 * len(FEATURE_REGEX.findall(line)) res = 1 - score / total return res >= TREE_SIMILARITY_THRESHOLD
def test(self): #self.assertEqual( sa.parse( addr ), addr_parsed ) addr_parsed = sa.parse(addr) assert addr_parsed, 'Could not parse address "{}"'.format(addr) assert addr_parsed == addr_parsed_validator, ( addr + "\n" + str(diff(addr_parsed, addr_parsed_validator)))
def test_diff_dict(): a = dict(zero=0, one=1, two=2, three=3, nine=9, ten=10) b = dict(zero='@', one=1, three=3, four=4, nine=9, ten=10) d = diff(a, b, fromfile="x", tofile="y") # result should look something like this: dedent('''\ --- x +++ y { +'four': 4, 'nine': 9, 'ten': 10, 'three': 3, -'two': 2, -'zero': 0, +'zero': '@', @@ @@ }''') # but due to arbitrary key ordering, may differ in the # context. diff_str = str(d) assert "+'four': 4," in diff_str assert "-'two': 2," in diff_str assert "-'zero': 0," in diff_str assert "+'zero': '@'," in diff_str context_pattern = "^ '\w+': \d+,$" assert_equal(_count_lines(context_pattern, diff_str), 3)
def test_nested_unhashable_compare_with_func_diff2(): a = [1, 2, 3, (dict(foo = dict(user_id=false, mount_point='Wiki'), bar = true), 6, 7, 8, 'diff', 9)] b = [1, 2, 3, (dict(foo = dict(user_id='badf00d', mount_point='Wiki'), bar = 'qaz'), 6, 7, 8, 9)] d = diff(a, b, fromfile="x", tofile="y", compare_with_func=True) expected = dedent('''\ --- x +++ y [ @@ -0,3 +0,3 @@ 1, 2, 3, ( @@ -0,5 +0,4 @@ { 'bar': 'qaz', 'foo': { 'mount_point': 'Wiki', -'user_id': %r, +'user_id': 'badf00d', }, }, 6, 7, 8, -'diff', 9, ), ]''') % (false,) assert_equal(str(d), expected)
def main(): """ Read, write or check sync status. Reads/writes from/to yaml files or compares to current status. """ parser = argparse.ArgumentParser() parser.add_argument('-s', nargs='?', const=DEFAULT_DIR, default=None) group = parser.add_mutually_exclusive_group() group.add_argument('-r', nargs='?', const='__current__', default=None) group.add_argument('-w', nargs='?', const=DEFAULT_DIR, default=None) args = parser.parse_args() if args.w: io_write(args.w, tree_map()) if args.r: if args.r == '__current__': printer.pprint(tree_map()) else: printer.pprint(io_read(args.r)) if args.s: logger.info("\nReading from: {}".format(args.s)) filelst, currlst = io_read(args.s), tree_map() sync_state = filelst == currlst print("\n:: ==SYNC== :: [{}] ".format(sync_state)) if not sync_state: print("\na:Stored:--- <<[DIFFS]>> b:Etheral:+++") print(diff(filelst, currlst, context=0, depth=15))
def test_all_builders_complete(): """test when parse.complete is true creates correct structure""" log = "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50" url = "file://{0}".format( SampleData().get_log_path("{0}.txt.gz".format(log))) lpc = ArtifactBuilderCollection( url, ) for builder in lpc.builders: builder.parser.complete = True lpc.parse() exp = { "text_log_summary": { "step_data": { "steps": [], "errors_truncated": False }, }, "Job Info": { "job_details": [] } } act = lpc.artifacts # we can't compare the "logurl" field, because it's a fully qualified url, # so it will be different depending on the config it's run in. assert "logurl" in act["text_log_summary"] assert "logurl" in act["Job Info"] del(act["Job Info"]["logurl"]) del(act["text_log_summary"]["logurl"]) assert exp == lpc.artifacts, diff(exp, lpc.artifacts)
def check_update(arn): res = devicefarm.list_runs(arn=arn) if res['ResponseMetadata']['HTTPStatusCode'] != 200: return res['runs'] = [ensure_utc(run) for run in res['runs']] collection = db[arn] cursor = collection.find(limit=1, sort=[('_id', DESCENDING)]) if cursor.count() == 0: res['created'] = datetime.now(tz) collection.insert_one(res) return data_runs = [ensure_utc(run) for run in cursor[0]['runs']] if not diff(res['runs'], data_runs): return res['created'] = datetime.now(tz) collection.insert_one(res) res['runs'].reverse() for i, run in enumerate(res['runs']): old_run = find_run_by_arn(data_runs, run['arn']) if old_run == None and i < 2 and run['status'] == 'COMPLETED': continue if run['status'] != 'COMPLETED': continue if old_run == None or run['status'] != old_run['status']: notify_to_slack(run)
def test_diff_seq_objects(): class FooSeq(object): def __init__(self, list): self.list = list def __len__(self): return len(self.list) def __iter__(self): return iter(self.list) def __getitem__(self, x): return self.list[x] d = diff(FooSeq([1]), FooSeq([1, 2]), fromfile="x", tofile="y") expected = dedent( """\ --- x +++ y FooSeq([ @@ -0 +0,1 @@ 1, +2, ])""" ) assert_equal(str(d), expected)
def test_all_builders_complete(): """test when parse.complete is true creates correct structure""" log = "mozilla-central_fedora-b2g_test-crashtest-1-bm54-tests1-linux-build50" url = "file://{0}".format(SampleData().get_log_path( "{0}.txt.gz".format(log))) lpc = ArtifactBuilderCollection(url, ) for builder in lpc.builders: for parser in builder.parsers: parser.complete = True lpc.parse() exp = { "text_log_summary": { "header": {}, "step_data": { "all_errors": [], "steps": [], "errors_truncated": False }, }, "Job Info": { "job_details": [] } } act = lpc.artifacts # we can't compare the "logurl" field, because it's a fully qualified url, # so it will be different depending on the config it's run in. assert "logurl" in act["text_log_summary"] assert "logurl" in act["Job Info"] del (act["Job Info"]["logurl"]) del (act["text_log_summary"]["logurl"]) assert exp == lpc.artifacts, diff(exp, lpc.artifacts)
def _error(p, r): id_ = p['id'] print(Fore.RED, '\bRows at id = {} are different'.format(id_), Fore.RESET) print(diff(p, r)) _debug2('primary: {}'.format(p)) _debug2('replica: {}'.format(r)) if os.getenv('EXIT_ON_ERROR'): exit(1)
def diff(self, left, right, fromfile, tofile): import datadiff try: diff = datadiff.diff(left, right, fromfile=fromfile, tofile=tofile) except (datadiff.DiffNotImplementedForType, datadiff.DiffTypeError): pass else: return unicode(diff)
def assertDictEqual(self, d1, d2): """ Show the differences between the dicts in a human readable way. It's just a helper for debugging API responses. """ import datadiff return super().assertDictEqual(d1, d2, datadiff.diff(d1, d2))
def test_api_meta_data(self): """Does the content of the search API have the right meta data?""" self.client.login(username='******', password='******') api_endpoints = { 'audio': {'params': '', 'api_versions': ('v2',)}, 'citation': {'params': '', 'api_versions': ('v1', 'v2',)}, 'cited-by': {'params': '&id=1', 'api_versions': ('v1', 'v2',)}, 'cites': {'params': '&id=1', 'api_versions': ('v1', 'v2',)}, 'docket': {'params': '', 'api_versions': ('v2',)}, # opinion --> document in v2. 'opinion': {'params': '', 'api_versions': ('v1',)}, 'document': {'params': '', 'api_versions': ('v2',)}, 'jurisdiction': {'params': '', 'api_versions': ('v1', 'v2',)}, 'search': {'params': '', 'api_versions': ('v1', 'v2',)}, } # Alphabetical ordering makes the tests run consistently api_endpoints_ordered = OrderedDict(sorted( api_endpoints.items(), key=lambda t: t[0]) ) for endpoint, endpoint_dict in api_endpoints_ordered.iteritems(): for v in endpoint_dict['api_versions']: r = self.client.get('/api/rest/%s/%s/?format=json%s' % (v, endpoint, endpoint_dict['params'])) actual = simplejson.loads(r.content) with open( os.path.join( settings.INSTALL_ROOT, 'alert', 'search', 'test_assets', 'api_%s_%s_test_results.json' % (v, endpoint) ), 'r') as f: correct = simplejson.load(f) actual, correct = self.strip_varying_data( endpoint, actual, correct ) msg = "Response from API did not match expected " \ "results (api version: %s, endpoint: %s):\n%s" % ( v, endpoint, diff(actual, correct, fromfile='actual', tofile='correct') ) self.assertEqual( actual, correct, msg=msg, )
def _assert_same(input_, output): for x in input_: try: assert x in output except AssertionError: print(datadiff.diff(input_, output)) raise assert len(input_) == len(output)
def _properties_differ(app_properties, app_summary): """Compares application's properties from appstack to those taken from a live environment. Args: app_properties (dict): Application's properties from appstack. app_summary (dict): Application's properties from Cloud Foundry. Returns: bool: True if one of the properties present in `app_properties` is different in `app_summary`. False otherwise. """ for key, value in app_properties.items(): if key == 'env': if not _dict_is_part_of(app_summary['environment_json'], value): _log.info("Differences in application's env:\n%s", datadiff.diff(app_summary['environment_json'], value, fromfile='live env', tofile='appstack')) return True elif key in ('disk_quota', 'memory'): # Values in the manifest will be strings and have suffix M, MB, G or GB, # while values in summary will be ints specifying the number of megabytes. megabytes_in_properties = _normalize_to_megabytes(value) if megabytes_in_properties != app_summary[key]: _log.info("Difference in application's %s field: %s (live env) vs. %s (appstack).", key, app_summary[key], megabytes_in_properties) return True elif key == 'services': summary_services = [service['name'] for service in app_summary[key]] if not set(value).issubset(set(summary_services)): _log.info("Difference in application's services: \n%s", datadiff.diff(app_summary[key], value, fromfile='live env', tofile='appstack')) return True elif key == 'host': summary_hosts = [route['host'] for route in app_summary['routes']] if value not in summary_hosts: _log.info("Application's hosts in live env don't contain %s.", value) return True else: if value != app_summary[key]: _log.info("Difference in application's %s field: %s (live env) vs. %s (appstack).", key, app_summary[key], value) return True return False
def diff(self, other): """Get a naive diff between two derivations, just comparing their dictionary representation.""" selfdict, otherdict = vars(self), vars(other) # Convert outputs to a format that doesn't include the output # file path, since we know this will be different if the two # derivations are different. selfdict["outputs"] = list(sorted(selfdict["outputs"].keys())) otherdict["outputs"] = list(sorted(otherdict["outputs"].keys())) return datadiff.diff(selfdict, otherdict)
def test_when_enabled(self): if TDDSPRY_USE_DATADIFF: if not datadiff: self.test_when_disabled(True) else: try: self.assert_equal(TEST_BAR, TEST_FOO) except AssertionError, e: diff = datadiff.diff(TEST_BAR, TEST_FOO) self.assert_unicode(e, "\n" + smart_str(diff))
def test_diff_multiline_strings(): d = diff('abc\ndef\nghi', 'abc\nghi', fromfile="x", tofile="y") expected = dedent('''\ --- x +++ y @@ -1,3 +1,2 @@ abc -def ghi''') assert_equal(str(d), expected)
def assert_equal(first, second, msg=None): if first == second: return if msg is None: try: ddiff = diff(first, second) except DiffTypeError: msg = '%r != %r' % (first, second) else: msg = "\n" + str(ddiff) raise AssertionError(msg)
def test_datadiff_assert_equal(self): if not datadiff: self.assert_false(hasattr(self, "datadiff_assert_equal")) else: self.datadiff_assert_equal(TEST_BAR, TEST_BAR) try: self.datadiff_assert_equal(TEST_BAR, TEST_FOO) except AssertionError, e: diff = datadiff.diff(TEST_BAR, TEST_FOO) self.assert_unicode(e, "\n" + smart_str(diff))
def test_tuple(): d = diff((1,2), (1,3), fromfile="x", tofile="y") expected = dedent('''\ --- x +++ y ( @@ -0,1 +0,1 @@ 1, -2, +3, )''') assert_equal(str(d), expected)
def test_diff_dict_complex(): a = dict(a=1, b=dict(foo='bar')) b = dict(a=1) d = diff(a, b, fromfile="x", tofile="y") expected = dedent('''\ --- x +++ y { 'a': 1, -'b': {'foo': 'bar'}, }''') assert_equal(str(d), expected)
def test_diff_unicode_vs_str(): d = diff(u'abc\ndef\nghi', b'abc\nghi', fromfile="x", tofile="y") # accommodate python 2.6 having trailing spaces after --- and +++ lines d = '\n'.join(line.rstrip() for line in d.split('\n')) expected = dedent('''\ --- x +++ y @@ -1,3 +1,2 @@ abc -def ghi''') assert_equal(str(d), expected)
def test_seq_compare_with_func_diff2(): a = [1] b = [] d = diff(a, b, fromfile="x", tofile="y", compare_with_func=True) expected = dedent('''\ --- x +++ y [ @@ -0 +0 @@ -1, ]''') assert_equal(str(d), expected)
def test_diff_multiline_strings(): d = diff('abc\ndef\nghi', 'abc\nghi', fromfile="x", tofile="y") # accommodate python 2.6 having trailing spaces after --- and +++ lines d = '\n'.join(line.rstrip() for line in d.split('\n')) expected = dedent('''\ --- x +++ y @@ -1,3 +1,2 @@ abc -def ghi''') assert_equal(str(d), expected)
def compare_two_documents(kls, doc1, doc2): """Compare two documents by converting them into json objects and back to strings and compare""" first = doc1 if isinstance(doc1, six.string_types): try: first = json.loads(doc1) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return second = doc2 if isinstance(doc2, six.string_types): try: second = json.loads(doc2) except (ValueError, TypeError) as error: log.warning("Failed to convert doc into a json object\terror=%s", error) yield error.args[0] return # Ordering the principals because the ordering amazon gives me hates me def sort_statement(statement): for principal in (statement.get("Principal", None), statement.get("NotPrincipal", None)): if principal: for principal_type in ("AWS", "Federated", "Service"): if principal_type in principal and type(principal[principal_type]) is list: principal[principal_type] = sorted(principal[principal_type]) def sort_key(statement, key): if key in statement and type(statement[key]) is list: statement[key] = sorted(statement[key]) for document in (first, second): if "Statement" in document: if type(document["Statement"]) is dict: sort_statement(document["Statement"]) sort_key(document["Statement"], "Action") sort_key(document["Statement"], "NotAction") sort_key(document["Statement"], "Resource") sort_key(document["Statement"], "NotResource") else: for statement in document["Statement"]: sort_statement(statement) sort_key(statement, "Action") sort_key(statement, "NotAction") sort_key(statement, "Resource") sort_key(statement, "NotResource") difference = diff(first, second, fromfile="current", tofile="new").stringify() if difference: lines = difference.split('\n') if not first or not second or first != second: for line in lines: yield line
def compare_two_documents(self, doc1, doc2): """Compare two documents by converting them into json objects and back to strings and compare""" try: first = json.loads(doc1) except (ValueError, TypeError): return try: second = json.loads(doc2) except (ValueError, TypeError): return # Ordering the principals because the ordering amazon gives me hates me def sort_statement(statement): for principal in (statement.get("Principal", None), statement.get("NotPrincipal", None)): if principal: for principal_type in ("AWS", "Federated", "Service"): if principal_type in principal and type( principal[principal_type]) is list: principal[principal_type] = sorted( principal[principal_type]) def sort_key(statement, key): if key in statement and type(statement[key]) is list: statement[key] = sorted(statement[key]) for document in (first, second): if "Statement" in document: if type(document["Statement"]) is dict: sort_statement(document["Statement"]) sort_key(document["Statement"], "Action") sort_key(document["Statement"], "NotAction") sort_key(document["Statement"], "Resource") sort_key(document["Statement"], "NotResource") else: for statement in document["Statement"]: sort_statement(statement) sort_key(statement, "Action") sort_key(statement, "NotAction") sort_key(statement, "Resource") sort_key(statement, "NotResource") difference = diff(first, second, fromfile="current", tofile="new").stringify() if difference: lines = difference.split('\n') if not first or not second or any(line.strip().startswith("@@") and line.strip().endswith("@@") for line in lines): for line in lines: yield line