def test_junosphere_olive(): config.settings["Junosphere"]["platform"] = "Olive" master_dir = resource_filename(__name__, "junosphere_olive") inet = AutoNetkit.internet.Internet("multias", junosphere=True) inet.compile() f_test = os.path.join(config.ank_main_dir, "junos_lab", "topology.vmm") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "topology.vmm"), "Ur").read() try: assert test_file == master_file except AssertionError: message = "".join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError f_test = os.path.join(config.junos_dir, "configset", "1c_AS1.conf") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "1c_AS1.conf"), "Ur").read() skiplines = ["message"] test_file = remove_skiplines(test_file, skiplines) master_file = remove_skiplines(master_file, skiplines) try: assert test_file == master_file except AssertionError: message = "".join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError
def test_dns(): try: config.settings['DNS']['hierarchical'] = True master_dir = (resource_filename(__name__, "dns")) inet = AutoNetkit.internet.Internet("multias", netkit=True) inet.compile() finally: config.settings['DNS']['hierarchical'] = False root_files = ["db.root", "named.conf"] for root_file in root_files: f_test = os.path.join(config.lab_dir, "rootdns1_AS2", "etc", "bind", root_file) test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "rootdns1_AS2", "etc", "bind", root_file), "Ur").read() try: assert(test_file == master_file) except AssertionError: message = ''.join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError l3_files = ["db.0.10.in-addr.arpa", "db.AS1", "db.root", "named.conf"] for l3_file in l3_files: f_test = os.path.join(config.lab_dir, "l31dns1_AS1", "etc", "bind", l3_file) test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "l31dns1_AS1", "etc", "bind", l3_file), "Ur").read() try: assert(test_file == master_file) except AssertionError: message = ''.join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError
def diff_only_removes_notebook_or_bokeh(self, old): """Check whether the diff is exclusively removing 'bokeh' or 'notebook'. This is used for a hack, because we can auto-add 'bokeh' or 'notebook' packages when we conda-kapsel init, and that alone shouldn't result in being out of sync with the environment.yml. """ to_remove = [("- " + r) for r in ("bokeh", "notebook")] def filter_context(items): return list(filter(lambda line: line.startswith("- ") or line.startswith("+ "), items)) conda_diff = filter_context(difflib.ndiff(old.conda_packages, self.conda_packages)) for r in to_remove: if r in conda_diff: conda_diff.remove(r) if len(conda_diff) > 0: return False channels_diff = filter_context(difflib.ndiff(old.channels, self.channels)) if len(channels_diff) > 0: return False pip_diff = filter_context(difflib.ndiff(old.pip_packages, self.pip_packages)) if len(pip_diff) > 0: return False return True
def test_dumps(): master_dir = (resource_filename(__name__, "comparisons")) inet = AutoNetkit.internet.Internet("multias") inet.compile() inet.dump() f_test = os.path.join(config.log_dir, "bgp.txt") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "bgp.txt"), "Ur").read() try: assert(test_file == master_file) except AssertionError: message = ''.join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError f_test = os.path.join(config.log_dir, "physical.txt") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "physical.txt"), "Ur").read() try: assert(test_file == master_file) except AssertionError: message = ''.join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError
def getDifferences(scoreMatrix, speciesName, threshold): ''' given a list of strings and a scoreMatrix, return the list of difference between those strings with a levenshtein difference of less than threshold returns: namePairs: list of tuples containing strings with distance <2 differenceList: list of differences between the tuples in namePairs ''' differenceList = [] namePairs = [] for idx in range(0, len(scoreMatrix)): for idx2 in range(0, len(scoreMatrix[idx])): if scoreMatrix[idx][idx2] <= threshold and idx < idx2: if len(speciesName[idx]) < len(speciesName[idx2]): namePairs.append([speciesName[idx], speciesName[idx2]]) else: namePairs.append([speciesName[idx2], speciesName[idx]]) for pair in namePairs: if len(pair[1]) < len(pair[0]): difference = difflib.ndiff(pair[1].lower(), pair[0].lower()) else: difference = difflib.ndiff(pair[0].lower(), pair[1].lower()) tmp = [] for diff in difference: if diff[0] == ' ': continue tmp.append(diff) differenceList.append(tuple(tmp)) return namePairs, differenceList
def diff_from(self, old): """A string showing the comparison between this env spec and another one.""" channels_diff = list(difflib.ndiff(old.channels, self.channels)) conda_diff = list(difflib.ndiff(old.conda_packages, self.conda_packages)) pip_diff = list(difflib.ndiff(old.pip_packages, self.pip_packages)) if pip_diff: pip_diff = [" pip:"] + list(map(lambda x: " " + x, pip_diff)) if channels_diff: channels_diff = [" channels:"] + list(map(lambda x: " " + x, channels_diff)) return "\n".join(channels_diff + conda_diff + pip_diff)
def get_sitelevel_changes(self, file_name): with open(file_name) as f: for i, ln in enumerate(f.readlines()): # DEBUG log data # (Contain data for site level names and/or data) site_level_obj = re.search('\s(\w*)list:\s(.*[^\s+])', ln) if site_level_obj is not None: sitelevels = site_level_obj.group().strip().split(':') if sitelevels[1].strip()[0:6] == 'siteid': pass else: self.log_dict[ 'sitelevels_{}'.format(sitelevels[0])] = sitelevels[1].strip() else: pass f.close() site_to = self.log_dict['sitelevels_changed_site_list'] site_to_list = site_to.split() site_from = self.log_dict['sitelevels_list'] if ' or ' in site_from: print('in or block') site_from = site_from.replace(' or ', '_or_') site_from_list = site_from.split() site_from_list = [re.sub('_', ' ', x) for x in site_from_list] else: site_from_list = site_from.split() original_site_list = [None]*len(site_to_list) indexer = 0 changed = list(difflib.restore(difflib.ndiff(site_to, site_from), 1)) revert = list(difflib.restore(difflib.ndiff(site_to, site_from), 2)) print(changed) print(revert) if len(changed) == len(revert) : for i, s in enumerate(changed): if i == 0 or s == ' ': word = revert[i] if i == 0: original_site_list[indexer] = word if s == ' ': indexer += 1 elif i > 0: word = word + revert[i] original_site_list[indexer] = word original_site_list = [x.strip() for x in original_site_list] site_from_list = original_site_list elif len(site_to_list) == len(site_from_list): pass change_dict = {} for i, value in enumerate(site_from_list): change_dict[value] = site_to_list[i] return change_dict
def processObject(tempObj): global writeToDiskMem, ObjectsOnMem, edgeCache if tempObj.url in ObjectsOnMem: if tempObj.hash != ObjectsOnMem[tempObj.url][4]: # since there was a change, we need to calculate and update the rate of change and also update the max-age in the headers getRateOfChange (tempObj, ObjectsOnMem[tempObj.url][3]) #[3] last modified date if ObjectsOnMem[tempObj.url][0] == 'memory': if tempObj.isText: diff = difflib.ndiff(ObjectsOnMem[tempObj.url][1].content.newlines(1), tempObj.content.newlines(1)) edgeObject=edgeCacheObject(tempObj.headers, tempObj.url, diff, tempObj.status, True) else: edgeObject=edgeCacheObject(tempObj.headers, tempObj.url, tempObj.content, tempObj.status, False) elif tempObj.isText: contentFile = open ('cache/'+ObjectsOnMem[tempObj.url][2], 'rb') content = contentFile.read() diff = difflib.ndiff(content.newlines(1), tempObj.content.newlines(1)) edgeObject=edgeCacheObject(tempObj.headers, tempObj.url, diff, tempObj.status, True) else: edgeObject=edgeCacheObject(tempObj.headers, tempObj.url, tempObj.content, tempObj.status, False) # 1. update the memory with the new content del ObjectsOnMem[tempObj.url][1] ObjectsOnMem[tempObj.url].insert(1, tempObj) ObjectsOnMem[tempObj.url][0] = 'memory' ObjectsOnMem[tempObj.url][3] = int(time.time()) ObjectsOnMem[tempObj.url][4] = tempObj.hash if tempObj.isText: # 2. update the object on disk (only text based files are stored) writeToDiskMem.append(tempObj) else: return else: # new object, add it to memory if tempObj.isText: filename=cacheFileName() ObjectsOnMem[tempObj.url] = ['memory', tempObj, filename, int(time.time()), tempObj.hash] tempObj.fileName=filename # 2. update the object on disk (only text based files are stored) writeToDiskMem.append(tempObj) else: ObjectsOnMem[tempObj.url] = ['memory', None, "", int(time.time()),tempObj.hash] edgeObject=edgeCacheObject(tempObj.headers, tempObj.url, tempObj.content, tempObj.status, False) # 3. Send the object content/diff to edge cache edgeCache.append(edgeObject)
def test_store_passive_message(self): """JRPC test: services.communiations.storePassiveMessage (2) Simple test for validating the storage of passive messages. """ if self.__verbose_testing: print('>>> test_store_passive_message') self.assertEqual( comms_jrpc.store_passive_message( groundstation_id=self.__gs_1_id, timestamp=misc.get_utc_timestamp(misc.get_now_utc()), doppler_shift=0.0, message=db_tools.MESSAGE_BASE64 ), 1, 'Message ID expected not to be none' ) message = comms_models.PassiveMessage.objects.get(pk=1).message self.assertEqual( db_tools.MESSAGE_BASE64.decode(), message, 'In-database stored message differs, diff = ' + str( difflib.ndiff(db_tools.MESSAGE_BASE64.decode(), message)) ) if self.__verbose_testing: print('>>> message_1 (RAW) = ' + str(message)) print('>>> message_1 (STR) = ' + str(base64.b64decode(message))) self.assertEqual( comms_jrpc.store_passive_message( groundstation_id=self.__gs_1_id, timestamp=misc.get_utc_timestamp(misc.get_now_utc()), doppler_shift=0.0, message=db_tools.MESSAGE_BASE64 ), 2, 'Message ID expected to be 2' ) message = comms_models.PassiveMessage.objects.get(pk=2).message self.assertEqual( db_tools.MESSAGE_BASE64.decode(), message, 'In-database stored message differs, diff = ' + str( difflib.ndiff(db_tools.MESSAGE_BASE64.decode(), message)) ) if self.__verbose_testing: print('>>> message_2 (RAW) = ' + str(message)) print('>>> message_2 (STR) = ' + str(base64.b64decode(message)))
def process_file(self, filename): self.process_wrd_file(filename) self.process_pos_file(filename) self.process_transcript_file(filename) self.process_txt_file(filename) #print("Words in .wrd:", len(self._wrd_list)) #print("Words in .txt: ", len(self._word_list)) wrds = [self.normalize(x) for x, time in self._wrd_list] txt = [self.normalize(x) for x in self._word_list] diff = list(difflib.ndiff(wrds, txt)) wrds_counter = 0 txt_counter = 0 diff = [x for x in diff if not(x.startswith("?")) and x.strip()] if len(diff) <> len(txt): print(filename) for i, x in enumerate(diff): fields = x.split() if len(fields) == 1: code = " " word = fields[0] else: code = fields[0] word = fields[1] if diff[i].startswith("-"): if not diff[i+1].startswith("+"): print("Insert '{}' at {}".format(self._wrd_list[wrds_counter][0], txt_counter)) self._word_list.insert(txt_counter, self._wrd_list[wrds_counter][0]) txt_counter += 1 wrds_counter += 1 else: print("Change from '{}' to '{}'".format(self._word_list[txt_counter], self._wrd_list[wrds_counter][0])) self._word_list[txt_counter] = self._wrd_list[wrds_counter][0] else: txt_counter += 1 wrds_counter += 1 wrds = [self.normalize(x) for x, time in self._wrd_list] txt = [self.normalize(x) for x in self._word_list] diff = list(difflib.ndiff(wrds, txt)) if len(diff) <> len(txt): print(filename) for i, x in enumerate(diff): print(x) asd
def test_dynagen(): config.settings["Dynagen"] = { "image": "/space/c7200-is-mz.124-19.image", "working dir": "/home/autonetkit/", "model": 7200, "interfaces": [ "Ethernet0/0", "Ethernet0/1", "Ethernet1/0", "Ethernet1/1", "Ethernet2/0", "Ethernet2/1", "Ethernet3/0", "Ethernet3/1", "Ethernet4/0", "Ethernet4/1", "Ethernet5/0", "Ethernet5/1", "Ethernet6/0", "Ethernet6/1", ], "Slots": {"slot1": "PA-2FE-TX", "slot2": "PA-2FE-TX"}, "Options": {"idlepc": "0x6085af60", "ram": 128}, "Hypervisor": {"server": "127.0.0.1", "port": 7202}, } master_dir = resource_filename(__name__, "dynagen") inet = AutoNetkit.internet.Internet("multias", dynagen=True) inet.compile() f_test = os.path.join(config.dynagen_dir, "lab.net") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "lab.net"), "Ur").read() try: assert test_file == master_file except AssertionError: message = "".join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError f_test = os.path.join(config.dynagen_dir, "configs", "1c_AS1.conf") test_file = open(f_test, "Ur").read() master_file = open(os.path.join(master_dir, "1c_AS1.conf"), "Ur").read() try: assert test_file == master_file except AssertionError: message = "".join(difflib.ndiff(test_file.splitlines(True), master_file.splitlines(True))) LOG.warn(message) raise AssertionError
def compare_dump_files(message, label, expected, actual, ignore_uuid=False, expect_content_length_always=False, ignore_empty_prop_sections=False, ignore_number_of_blank_lines=False): """Parse two dump files EXPECTED and ACTUAL, both of which are lists of lines as returned by run_and_verify_dump, and check that the same revisions, nodes, properties, etc. are present in both dumps. """ parsed_expected = DumpParser(expected).parse() parsed_actual = DumpParser(actual).parse() if ignore_uuid: parsed_expected['uuid'] = '<ignored>' parsed_actual['uuid'] = '<ignored>' for parsed in [parsed_expected, parsed_actual]: for rev_name, rev_record in parsed.items(): #print "Found %s" % (rev_name,) if 'nodes' in rev_record: #print "Found %s.%s" % (rev_name, 'nodes') for path_name, path_record in rev_record['nodes'].items(): #print "Found %s.%s.%s" % (rev_name, 'nodes', path_name) for action_name, action_record in path_record.items(): #print "Found %s.%s.%s.%s" % (rev_name, 'nodes', path_name, action_name) if expect_content_length_always: if action_record.get('content_length') == None: #print 'Adding: %s.%s.%s.%s.%s' % (rev_name, 'nodes', path_name, action_name, 'content_length=0') action_record['content_length'] = '0' if ignore_empty_prop_sections: if action_record.get('prop_length') == '10': #print 'Removing: %s.%s.%s.%s.%s' % (rev_name, 'nodes', path_name, action_name, 'prop_length') action_record['prop_length'] = None del action_record['props'] old_content_length = int(action_record['content_length']) action_record['content_length'] = str(old_content_length - 10) if ignore_number_of_blank_lines: action_record['blanks'] = 0 if parsed_expected != parsed_actual: print 'DIFF of raw dumpfiles (including expected differences)' print ''.join(ndiff(expected, actual)) raise svntest.Failure('DIFF of parsed dumpfiles (ignoring expected differences)\n' + '\n'.join(ndiff( pprint.pformat(parsed_expected).splitlines(), pprint.pformat(parsed_actual).splitlines())))
def compare_events(self, line_offset, events, expected_events): events = [(l - line_offset, e) for (l, e) in events] if events != expected_events: self.fail( "events did not match expectation:\n" + "\n".join(difflib.ndiff([str(x) for x in expected_events], [str(x) for x in events])))
def func(args): # Get modification times fromdate = time.ctime(os.stat(args.FILE1).st_mtime) todate = time.ctime(os.stat(args.FILE2).st_mtime) # Open fromfile try: with open(args.FILE1, 'U') as fd: fromlines = fd.readlines() except IOError: print("Error opening file " + args.FILE1, file=sys.stderr) # Open tofile try: with open(args.FILE2, 'U') as fd: tolines = fd.readlines() except IOError: print("Error opening file " + args.FILE2, file=sys.stderr) # Create diff if args.unified: diff = difflib.unified_diff(fromlines, tolines, args.FILE1, args.FILE2, fromdate, todate, n=args.lines) elif args.ndiff: diff = difflib.ndiff(fromlines, tolines) elif args.html: diff = difflib.HtmlDiff().make_file(fromlines, tolines, args.FILE1, args.FILE2, context=args.context, numlines=args.lines) else: diff = difflib.context_diff(fromlines, tolines, args.FILE1, args.FILE2, fromdate, todate, n=args.lines) # we're using writelines because diff is a generator sys.stdout.writelines(diff)
def getVersionDiff( self, fromDate, toDate ): retVal = self.rpcClient.getVersionContents( [ fromDate, toDate ] ) if retVal[ 'OK' ]: fromData = zlib.decompress( retVal[ 'Value' ][0] ) toData = zlib.decompress( retVal[ 'Value' ][1] ) return difflib.ndiff( fromData.split( "\n" ), toData.split( "\n" ) ) return []
def side_by_side_diff(old, new): left, right = [], [] # Deletions on the left, insertions on the right. def append(tag, text): if tag == '<ins>': right.append(text) elif tag == '<del>': left.append(text) else: left.append(text) right.append(text) # Generates a compact word diff prev_start, prev_end = None, '' prev_item = '' for item in difflib.ndiff(old.split(), new.split()): tag = tags.get(item[0]) if not tag: continue if prev_start == tag[0]: prev_item += item[1:] else: append(prev_start, prev_item + prev_end) prev_start, prev_end = tag prev_item = prev_start + item[1:] append(prev_start, prev_item + prev_end) return " ".join(left), " ".join(right)
def diff_check(file_one, file_two): for line in ndiff( open(file_one, 'r').readlines(), open(file_two, 'r').readlines()): if line.startswith('-'): print(line) elif line.startswith('+'): print '\t{0}' .format(line)
def report_diff_values(fileobj, a, b, ind=0): """Write a diff between two values to the specified file-like object.""" if isinstance(a, float): a = repr(a) if isinstance(b, float): b = repr(b) if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): diff_indices = np.where(a != b) num_diffs = reduce(lambda x, y: x * y, (len(d) for d in diff_indices), 1) for idx in islice(izip(*diff_indices), 3): fileobj.write(indent(' at %r:\n' % list(idx), ind)) report_diff_values(fileobj, a[idx], b[idx], ind=ind + 1) if num_diffs: fileobj.write(indent(u' ...and at %d more indices.\n' % (num_diffs - 3), ind)) return for line in difflib.ndiff(str(a).splitlines(), str(b).splitlines()): if line[0] == '-': line = 'a>' + line[1:] elif line[0] == '+': line = 'b>' + line[1:] else: line = ' ' + line fileobj.write(indent(u' %s\n' % line.rstrip('\n'), ind))
def showCurrentDiff( self ): retVal = self.rpcClient.getCompressedData() if retVal[ 'OK' ]: remoteData = zlib.decompress( retVal[ 'Value' ] ).splitlines() localData = str( self.cfgData ).splitlines() return difflib.ndiff( remoteData, localData ) return []
def test_import_bloggers(): """Import `legacy_yaml` and verify the database contents.""" import_bloggers(StringIO(legacy_yaml)) bloggers = Blogger.query.order_by(Blogger.name).all() # We sort the data below to get a deterministic ordering, but # dictionaries in python 3 aren't ordered, so we need to supply # a custom key function: def keyfn(val): return sorted(list(val)) actual = [ { 'display_name': blogger.name, 'start_date': blogger.start_date, 'blogs': sorted([ { 'title': blog.title, 'page_url': blog.page_url, 'feed_url': blog.feed_url, } for blog in blogger.blogs ], key=keyfn), } for blogger in bloggers ] expected = [ { 'display_name': 'alice', # Hour is off from midnight, since we're parsing this as EDT. 'start_date': datetime(2015, 4, 1, 4, 0), 'blogs': [ { 'title': 'Fun With Crypto', 'page_url': 'http://example.com/alice/blog.html', 'feed_url': 'http://example.com/alice/rss.xml', }, ], }, { 'display_name': 'bob', 'start_date': datetime(2015, 4, 8, 4, 0), 'blogs': sorted([ { 'title': 'Secret Messages', 'page_url': 'http://example.com/bob/secrets/blog.html', 'feed_url': 'http://example.com/bob/secrets/feed', }, { 'title': 'Kittens', 'page_url': 'http://example.com/bob/kittens', 'feed_url': 'http://example.com/bob/kittens/feed.atom', }, ], key=keyfn) }, ] diff = difflib.ndiff(pformat(actual).split('\n'), pformat(expected).split('\n')) assert actual == expected, \ "Import differs from expected result: %s" % pformat(list(diff))
def main(): usage = "usage: %prog [options] fromfile tofile" parser = optparse.OptionParser(usage) parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)') parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff') parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)') parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff') parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)') (options, args) = parser.parse_args() if len(args) == 0: parser.print_help() sys.exit(1) if len(args) != 2: parser.error("need to specify both a fromfile and tofile") n = options.lines fromfile, tofile = args fromdate = time.ctime(os.stat(fromfile).st_mtime) todate = time.ctime(os.stat(tofile).st_mtime) fromlines = open(fromfile, 'U').readlines() tolines = open(tofile, 'U').readlines() if options.u: diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) elif options.n: diff = difflib.ndiff(fromlines, tolines) elif options.m: diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n) else: diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) sys.stdout.writelines(diff)
def _generate_chain_diff_iptables_commands(chain, old_chain_rules, new_chain_rules): # keep track of the old index because we have to insert rules # in the right position old_index = 1 statements = [] for line in difflib.ndiff(old_chain_rules, new_chain_rules): if line.startswith('?'): # skip ? because that's a guide string for intraline differences continue elif line.startswith('-'): # line deleted statements.append('-D %s %d' % (chain, old_index)) # since we are removing a line from the old rules, we # backup the index by 1 old_index -= 1 elif line.startswith('+'): # line added # strip the chain name since we have to add it before the index rule = line[5:].split(' ', 1)[-1] # IptablesRule does not add trailing spaces for rules, so we # have to detect that here by making sure this chain isn't # referencing itself if rule == chain: rule = '' # rule inserted at this position statements.append('-I %s %d %s' % (chain, old_index, rule)) old_index += 1 return statements
def print_stack_diff(conn, stack_name, tpl_file, config): local_template, metadata, errors = gen_template(tpl_file, config) if metadata: name_from_metadata = metadata.get('name', None) else: name_from_metadata = None stack_name = stack_name or name_from_metadata if not stack_name: print('Stack name must be specified via command line argument or stack metadata.') sys.exit(1) if errors: for err in errors: print('ERROR: ' + err) live_template, errors = get_stack_template(conn, stack_name) if errors: for err in errors: print('ERROR: ' + err) sys.exit(1) if local_template == live_template: return for line in difflib.ndiff(live_template.split('\n'), local_template.split('\n')): print(line)
def _diffStrings(self, a, b, originalMessage): """Compares the conents of two strins Compares the contents of two strings, generates a difference between them, and appends that to the message passed in as originalMessage Inputs ------ a : the first string b : the second string originalMessage: the original message before the string content difference is added Returns ------- A message with the contents of orignalMessage followed by a summary of the differnece between the two strings.""" from WellBehavedPython.api import expect aList = a.split('\n'); bList = b.split('\n') generator = difflib.ndiff(aList, bList) message = originalMessage + "\nDifference is:" for line in generator: message += '\n' message += line return message
def CompareToGoldenLines(self, text, golden_lines): actual_lines = text.splitlines(1) self.assertEqual( golden_lines, actual_lines, "Text doesn't match golden. Diff:\n" + "".join(difflib.ndiff(golden_lines, actual_lines)), )
def diff(before, after): if before == after: return before before = [' '.join(row) for row in flatten_xml_from_string(before)] after = [' '.join(row) for row in flatten_xml_from_string(after)] difference = [] delta = list(difflib.ndiff(before, after)) previous_mode = None mode_index = None mode_map = {'+': 'ins', '-': 'del'} for row in delta: cells = row.split() current_mode = cells[0] if current_mode in mode_map: cells.pop(0) if mode_index is None or current_mode != previous_mode: mode_index = len(cells) - 1 mode_tag = mode_map.get(current_mode) last_item = cells[-1] cells[-1] = mode_tag difference.append(list(cells)) cells.append(last_item) else: cells.insert(mode_index, mode_map.get(current_mode)) difference.append(cells) previous_mode = current_mode else: previous_mode = None mode_index = None difference.append(cells) return unflatten(difference)
def code_diff_report(python_source, new_source, filename): line_commentary = [] line_changes = {} import difflib line_number = 1 for line in difflib.ndiff(python_source.split(u"\n"), new_source.split(u"\n")): state = line[0] line_text = line[2:].rstrip() if state == " ": line_number += 1 elif state == "+": if line_number in line_changes: line_change = line_changes[line_number] change_text = "Line {}: {} -> {}".format(line_number, line_change, line_text) line_commentary.append(change_text) del line_changes[line_number] else: change_text = "Line {}: {}".format(line_number, line_text) line_number += 1 elif state == "-": line_changes[line_number] = line_text elif state == "?": pass return "\n".join(line_commentary)
def diff(self, other): """ Return a detailed comparison of a CVODE wrapper object to another. Returns 0 if the objects are equal, otherwise a string. >>> from example_ode import vdp, exp_growth >>> from pprint import pprint >>> a, b = [Cvodeint(vdp, [0, 20], [0, -2]) for i in range(2)] >>> print a.diff(b) <BLANKLINE> >>> a.y[0] = 1 >>> print a.diff(b) - Cvodeint(f_ode=vdp, t=array([ 0., 20.]), y=[1.0, -2.0], ? ^ + Cvodeint(f_ode=vdp, t=array([ 0., 20.]), y=[0.0, -2.0], ? ^ >>> c = Cvodeint(exp_growth, t=[0,2], y=[0.25]) >>> print a.diff(c) - Cvodeint(f_ode=vdp, t=array([ 0., 20.]), y=[1.0, -2.0], ? ^^ - - ^ ---- ^^ + Cvodeint(f_ode=exp_growth, t=array([ 0., 2.]), y=[0.25], ? ^^ +++++++ ^ ^ """ import textwrap, difflib s, o = [textwrap.wrap(repr(x)) for x in self, other] return "\n".join([li.strip() for li in difflib.ndiff(s, o) if not li.startswith(" ")])
def subDiff(adiff,bdiff,rdiff): if len(adiff) == 1 and len(bdiff) == 1: tdiff = difflib.ndiff(adiff[0][2:].split(' '),bdiff[0][2:].split(' ')) for s in tdiff: rdiff.append(s[0] + '.' + s[2:]) else: rdiff += adiff + bdiff
def diff(t1, t2): """ Affiche un diff entre deux chaînes (par exemple une page et sa version altérée) """ diff = difflib.ndiff(t1.splitlines(1), t2.splitlines(1)) for ligne in diff: if ligne[0] in "-+": pywikibot.output(ligne.rstrip('\n'))
if args.r: config_change('subreddit=', args.r) if args.u: config_change('user='******'password='******'clientid=', args.cid) if args.secret: config_change('clientsecret=', args.secret) if not any(vars(args).values()): for submission in search_subreddit: topics_dict = f"{submission.title}: {submission.shortlink}" log.seek(0) log.write(f'{topics_dict}\n') #print(topics_dict) subprocess.check_call(['sort', '-u', '-o', 'log.txt', 'log.txt']) diff = difflib.ndiff(log.readlines(), tmp.readlines()) for line in diff: minus = '-' channel = 'UABLM0KRP' if line[0] == minus: print(line) slack_message(line, channel)
def compare(text1, text2): """Utility function for comparing text and returning differences.""" diff = difflib.ndiff( str(text1).splitlines(True), str(text2).splitlines(True)) return '\n' + '\n'.join(diff)
def _make_explanation(a, b): """Explanation for a failed assert -- the a and b arguments are List[str]""" return ["--- actual / +++ expected" ] + [line.strip('\n') for line in difflib.ndiff(a, b)]
f.write('For coms : {}\n'.format(some_com_ids)) r = collectionTasks.getComponentReport(some_com_ids) assert (type(r) == dict) for elem in sorted(r.items()): f.write("\t" + elem[0] + 'was used : ' + str(elem[1]) + ' times\n') f.write('---------------------------Part 8--ENDS--------------------------------------------\n') # Part9 f.write("------------------Part 9----------------------------------------------------------\n") some_std = std_names[23:43] f.write('-----std-----\n'+ "\n".join(some_std) + '\n----Circuits----\n') r = collectionTasks.getCircuitByStudent(set(some_std)) r = sorted(list(r)) for e in r: f.write("\t{}\n".format(e)) f.write('---------------------------Part 9--ENDS--------------------------------------------\n') # Part10 f.write("------------------Part 10----------------------------------------------------------\n") some_com_ids = {'RTD-159', 'NVC-327', 'MGC-590', 'OLW-497', 'LKZ-532', 'SLT-436', 'TMS-946'} f.write('-----COM_ID-----\n'+ "\n".join(sorted(list(some_com_ids))) + '\n----Circuits----\n') r = collectionTasks.getCircuitByComponent(set(some_com_ids)) r = sorted(list(r)) for e in r: f.write("\t{}\n".format(e)) f.write('---------------------------Part 10--ENDS--------------------------------------------\n') a = open('myResult.txt', 'r') b = open('myResult_kai.txt', 'r') diff = difflib.ndiff(a.readlines(), b.readlines()) print(''.join(diff))
def diff(a, b): return '\n' + '\n'.join([l for l in ndiff(a, b) if not l.startswith(' ')])
def add_application_data( self, data: Dict[str, Any], source: str, timestamp: datetime, store: AppStore, ): assert 'id' in data app_id = int(data['id']) app, app_created = Application.objects.get_or_create(itunes_id=app_id) metadatas = Metadata.objects.filter( application=app, store=store, source=source, timestamp=timestamp, ) if metadatas.exists(): assert metadatas.count() == 1 metadata = metadatas.first() existing_data = json.dumps(metadata.data, sort_keys=True, indent=2).splitlines() new_data = json.dumps(data, sort_keys=True, indent=2).splitlines() if existing_data != new_data: self.warn( f"Cached entries are different:\n App: {app}\n Store: {store}\n Source: {source}\n Timestamp: {timestamp}" ) if self.auto_update: answer = 'u' else: diff = difflib.ndiff(existing_data, new_data) for line in diff: if line.startswith('- '): self.secho(line, fg='red') elif line.startswith(' '): self.secho(line) elif line.startswith('+ '): self.secho(line, fg='green') elif line.startswith('? '): self.secho(line, fg='white') else: assert False self.secho("Update [u] / Keep [k] / Abort [a]:", fg='white', bold=True) answer = input("Select an option: ") while True: if answer in ['u', 'k', 'a']: break answer = input("Please select a valid option: ") if answer == 'u': # Update metadata.data = data metadata.full_clean() metadata.save() elif answer == 'k': # Keep assert not app_created return elif answer == 'a': # Abort raise CommandError("Aborted") else: assert False, f"Unhandled answer: {answer}" else: metadata = Metadata( application=app, store=store, source=source, timestamp=timestamp, data=data, ) metadata.full_clean() metadata.save() if app_created: self.success(f"Added new application: {app}")
def check_std_output(expected_path, result_path, result_str, result_manip=None, program_args=None): """ compares output generated by Uncrustify (std out/err) with a the content of a file Parameters ---------------------------------------------------------------------------- :param expected_path: string path of the file that will be compared with the output of Uncrustify :param result_path: string path to which the Uncrustifys output will be saved in case of a mismatch :param result_str: string (utf8) the output string generated by Uncrustify :param result_manip: lambda OR list or tuple of lambdas see result_manip for check_generated_output :param program_args: tuple of options a collection of multiple options used to add extra functionality to the script (i.e. auto apply changes or show diffs on command line) :return: bool ---------------------------------------------------------------------------- True or False depending on whether both files have the same content """ exp_txt = get_file_content(expected_path) if exp_txt is None: return False if result_manip is not None: if type(result_manip) is list or type(result_manip) is tuple: for m in result_manip: result_str = m(result_str) else: result_str = result_manip(result_str) if result_str != exp_txt: with open(result_path, 'w', encoding="utf-8", newline="\n") as f: f.write(result_str) if program_args.apply and program_args.auto_output_path: write_to_output_path(program_args.auto_output_path, result_str) return True if program_args.diff: print("\n************************************") print("Problem with %s" % result_path) print("************************************") file_diff = difflib.ndiff(result_str.splitlines(True), exp_txt.splitlines(True)) for line in file_diff: pprint.PrettyPrinter(indent=4).pprint(line) else: print("\nProblem with %s" % result_path) print("use: '--diff' to find out why %s %s are different" % (result_path, expected_path)) return False return True
def main(): usage = "usage: %prog [options] fromfile tofile" parser = optparse.OptionParser(usage) parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)') parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff') parser.add_option( "-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)' ) parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff') parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)') (options, args) = parser.parse_args() if len(args) == 0: parser.print_help() sys.exit(1) if len(args) != 2: parser.error("need to specify both a fromfile and tofile") n = options.lines fromfile, tofile = args fromdate = file_mtime(fromfile) todate = file_mtime(tofile) with open(fromfile) as ff: fromlines = ff.readlines() with open(tofile) as tf: tolines = tf.readlines() if options.u: diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) elif options.n: diff = difflib.ndiff(fromlines, tolines) elif options.m: diff = difflib.HtmlDiff().make_file(fromlines, tolines, fromfile, tofile, context=options.c, numlines=n) else: diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n) sys.stdout.writelines(diff)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None): """An equality assertion for ordered sequences (like lists and tuples). For the purposes of this function, a valid ordered sequence type is one which can be indexed, has a length, and has an equality operator. Args: seq1: The first sequence to compare. seq2: The second sequence to compare. seq_type: The expected datatype of the sequences, or None if no datatype should be enforced. msg: Optional message to use on failure instead of a list of differences. """ if seq_type is not None: seq_type_name = seq_type.__name__ if not isinstance(seq1, seq_type): raise self.failureException('First sequence is not a %s: %s' % (seq_type_name, safe_repr(seq1))) if not isinstance(seq2, seq_type): raise self.failureException('Second sequence is not a %s: %s' % (seq_type_name, safe_repr(seq2))) else: seq_type_name = "sequence" differing = None try: len1 = len(seq1) except (TypeError, NotImplementedError): differing = 'First %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: try: len2 = len(seq2) except (TypeError, NotImplementedError): differing = 'Second %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: if seq1 == seq2: return seq1_repr = safe_repr(seq1) seq2_repr = safe_repr(seq2) if len(seq1_repr) > 30: seq1_repr = seq1_repr[:30] + '...' if len(seq2_repr) > 30: seq2_repr = seq2_repr[:30] + '...' elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) differing = '%ss differ: %s != %s\n' % elements for i in xrange(min(len1, len2)): try: item1 = seq1[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of first %s\n' % (i, seq_type_name)) break try: item2 = seq2[i] except (TypeError, IndexError, NotImplementedError): differing += ('\nUnable to index element %d of second %s\n' % (i, seq_type_name)) break if item1 != item2: differing += ('\nFirst differing element %d:\n%s\n%s\n' % (i, safe_repr(item1), safe_repr(item2))) break else: if (len1 == len2 and seq_type is None and type(seq1) != type(seq2)): # The sequences are the same, but have differing types. return if len1 > len2: differing += ('\nFirst %s contains %d additional ' 'elements.\n' % (seq_type_name, len1 - len2)) try: differing += ('First extra element %d:\n%s\n' % (len2, safe_repr(seq1[len2]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of first %s\n' % (len2, seq_type_name)) elif len1 < len2: differing += ('\nSecond %s contains %d additional ' 'elements.\n' % (seq_type_name, len2 - len1)) try: differing += ('First extra element %d:\n%s\n' % (len1, safe_repr(seq2[len1]))) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of second %s\n' % (len1, seq_type_name)) standardMsg = differing diffMsg = '\n' + '\n'.join( difflib.ndiff(pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) standardMsg = self._truncateMessage(standardMsg, diffMsg) msg = self._formatMessage(msg, standardMsg) self.fail(msg)
def diff2(fromlines, tolines): '''Basic linewise diff of two lists of strings, returning the merged text with conflicts dublicated ''' diff = list(difflib.ndiff(fromlines, tolines)) dd = [ln[2:] for ln in diff if ln[0] in [' ', '+', '-']] return dd
def psdiff(cls, pid_pool, old, new, rvid): """ Compares 2 vesions of text at a word level to identify the individual edits (insertions and deletions). """ ptype = None ps = cls() start = None # Obtain a list of differences between the texts diff = difflib.ndiff(old, new) # unified_diff = difflib.unified_diff(old, new) # for line in unified_diff: # print(line) # debugging diff = [line for line in diff] # ignore helper lines diff = [line for line in diff if not line.startswith('?')] reorder_diff(diff) # print("diffset: \n%s" % '\n'.join(diff)) # print("") # print("old: %s" % '\n'.join(old)) # print("new: %s" % '\n'.join(new)) # print("diffset: %s" % '\n'.join(diff)) # print("") # Split the differences into Patches index = 0 deletes = 0 for line in diff: if line[0] == ' ': # If equal, terminate any current patch. if ptype is not None: ps.append_patch( Patch(pid_pool.next_pid(), ptype, start, index, diff[start + deletes:index + deletes], rvid)) if ptype == PatchType.DELETE: deletes += index - start index = start ptype = None index += 1 elif line[0] == '+': # If addition, terminate any current DELETE patch. if ptype == PatchType.DELETE: ps.append_patch( Patch(pid_pool.next_pid(), ptype, start, index, diff[start + deletes:index + deletes], rvid)) deletes += index - start index = start ptype = None # Begin a new ADD patch, or extend an existing one. if ptype is None: ptype = PatchType.ADD start = index index += 1 elif line[0] == '-': # If deletion, terminate any current ADD patch. if ptype == PatchType.ADD: ps.append_patch( Patch(pid_pool.next_pid(), ptype, start, index, diff[start + deletes:index + deletes], rvid)) ptype = None # Begin a new DELETE patch, or extend an existing one. if ptype is None: ptype = PatchType.DELETE start = index index += 1 # Skip line[0] == '?' completely. # Terminate and add any remaining patch. if ptype is not None: ps.append_patch(Patch(pid_pool.next_pid(), ptype, start, index, diff[ start + deletes:index + deletes], rvid)) # print "Patch: " # print "".join([line[1:] for line in diff[start:index]]) return ps
def print_diff(case_a, case_b): output_list = [li for li in difflib.ndiff(case_a, case_b) if li[0] != ' '] print(output_list)
def one_all_f(sel_row, sel_col, sel_find, file): sel_find = sel_find keyword = 'cccZZZ' filename = file df = pd.read_csv(filename) # Get columns headers index = len(df.index) # Get only the selected columns columns = [] for i in sel_col: columns.append(df.columns[i-1]) columns_index = len(columns) # !!! Test find in columns # print('___') # print(df.columns.str.contains("A|C")==True) new_list = [] old_list = [] group = [] results = [] row_group = [] row_old = '' row_new = '' find_flag = True for i, data in enumerate(sel_row): # first row if i == 0: # pass # ??? does it need to check i==0 ??? old_list = df.loc[data - 2, columns].tolist() row_old = data # row number # !!! Test searching # print(df.loc[data - 2, columns].str.contains("aaa6|aaa4")==True) # next rows else: new_list = df.loc[data - 2, columns].tolist() row_group.append(new_list) row_new = data # row number # !!! Test searching # print(df.loc[data - 2, columns].str.contains("dddd")==True) # Group of rows. ie. #2 and #3, #3 and #4... group = [] # for each column for ii, old in enumerate(old_list): # Fill empty cells with <EMPTY> to allow difflib.ndiff if type(old) == type(0.1): old = '<EMPTY>' if type(new_list[ii]) == type(0.1): new_list[ii] = '<EMPTY>' # append first row of columns to group if len(group) == 0: row_info = ['Row / Col', row_old, row_new, 'Compare'] # Feature Find: Append Find if searching for a keyword if find_flag == True: row_info.append('Find') # Append first column group.append(row_info) # Feature Compare: SAME values on temp list "col" if old == new_list[ii]: col = [columns[ii], old, new_list[ii], '-'] # Feature Find: color formatting and adding Yes on Find row if col[1] == keyword: keyword_format = f'{blackb}{greenf}{bold}{keyword}{reset}' col[1] = keyword_format col.append('Yes') if col[2] == keyword: keyword_format = f'{blackb}{greenf}{bold}{keyword}{reset}' col[2] = keyword_format col.append('Yes') else: col.append('-') # Feature Compare: DIFFERENT values on temp list "col" else: col = [columns[ii], old, new_list[ii], get_color_string(bcolors.RED,"diff")] if col[1] == keyword: col.append('Yes') elif col[2] == keyword: col.append('Yes') else: col.append('-') # 1st row formatting: apply color for each different character abc = '' a = old b = new_list[ii] if keyword == a: diff_char = difflib.ndiff(b, a) abc = diff_color_keyword(diff_char) else: diff_char = difflib.ndiff(b, a) abc = diff_color(diff_char) # for iii, s in enumerate(difflib.ndiff(b, a)): # # APPLY different colors # abc = '' # # if a == keyword: # abc = diff_color_keyword(s, abc) # # print(abc) # # if s[0]==' ': # # abc = abc + s[-1] # # elif s[0]=='-': # # br_color = f'{redb}{greenf}{bold}{s[-1]}{reset}' # # # # elif s[0]=='+': # # br_color = f'{redb}{greenf}{bold}{s[-1]}{reset}' # # abc = abc + br_color # # else: # abc = diff_color(s, abc) # # print(abc) # # if s[0]==' ': # # abc = abc + s[-1] # # elif s[0]=='-': # # br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' # # # # elif s[0]=='+': # # br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' # # abc = abc + br_color # Assign wrong characters colored in red col[1] = abc # 2nd row formatting: apply color for each different character abc = '' a = old b = new_list[ii] if keyword == b: diff_char = difflib.ndiff(a, b) abc = diff_color_keyword(diff_char) else: diff_char = difflib.ndiff(a, b) abc = diff_color(diff_char) # abc = '' # for iii, s in enumerate(difflib.ndiff(a, b)): # if s[0]==' ': # abc = abc + s[-1] # elif s[0]=='-': # br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' # elif s[0]=='+': # br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' # abc = abc + br_color # Assign wrong characters colored in red col[2] = abc # Check if find keyword # if col[1] == keyword: # k = ' ' # keyword_format = f'{greenb}{greenf}{bold}{k}{reset}' # col[1] = keyword_format + col[1] + keyword_format # if col[2] == keyword: # k = 'X' # keyword_format = f'{greenb}{greenf}{bold}{k}{reset}' # col[2] = keyword_format + col[2] + keyword_format # col[2] = keyword_format # print('+++') t1 = f'{reset}{col[2]}{reset}' print(t1) # !!! Test find in columns # for col_index, col_item in enumerate(col): # if 'aaa4' == col_item: # col[col_index] = get_color_string(bcolors.GREEN, col_item) # print(f'col {col}') group.append(col) # add group to results and reset the lists if ii == (columns_index - 1): results.append(group) # clean lists new = [] group = [] # DISABLE this reassignment to compare One row VS Rest rows # Re-assign new list and index to old list and index # old_list = new_list # List with row values # row_old = row_new # Variable with row number value print(f'------------ one vs all ------------------') for group in results: tab = tt.Texttable() tab.set_deco(tab.HEADER) for row in zip(*group): tab.add_row(row) s = tab.draw() print(s, '\n')
def stringDiff(actual, expected): diff = list(difflib.ndiff(actual.splitlines(1), expected.splitlines(2))) return "\n".join(diff)
def compare_events(self, line_offset, events, expected_events): events = [(l - line_offset, e) for (l, e) in events] if events != expected_events: self.fail("events did not match expectation:\n" + "\n".join( difflib.ndiff([str(x) for x in expected_events], [str(x) for x in events])))
def check_row_pairs(sel_row, sel_col, sel_find, file): sel_find = sel_find filename = file df = pd.read_csv(filename) # Get columns headers index = len(df.index) # Get only the selected columns columns = [] for i in sel_col: columns.append(df.columns[i-1]) # !!! Test find in columns # print('___') # print(df.columns.str.contains("A|C")==True) columns_index = len(columns) new_list = [] old_list = [] group = [] results = [] row_group = [] row_old = '' row_new = '' for i, data in enumerate(sel_row): # first row if i == 0: # pass # ??? does it need to check i==0 ??? old_list = df.loc[data - 2, columns].tolist() row_old = data # row number # !!! Test searching # print(df.loc[data - 2, columns].str.contains("aaa6|aaa4")==True) # next rows else: new_list = df.loc[data - 2, columns].tolist() row_group.append(new_list) row_new = data # row number # !!! Test find in columns # print(df.loc[data - 2, columns].str.contains("dddd")==True) # Group of rows. ie. #2 and #3, #3 and #4... group = [] # for each column for ii, old in enumerate(old_list): # Fill empty cells with <EMPTY> to allow difflib.ndiff if type(old) == type(0.1): old = '<EMPTY>' if type(new_list[ii]) == type(0.1): new_list[ii] = '<EMPTY>' # append first row of columns to group if len(group) == 0: row_info = ['Row / Col', row_old, row_new, 'Result'] group.append(row_info) # compare each column, same values on temp list "col" if old == new_list[ii]: col = [columns[ii], old, new_list[ii], '-'] # compare column, different values else: # Test to apply color in 1 word br = f'{redb}{whitef}{bold}{new_list[ii]}{reset}' col = [columns[ii], old, br, get_color_string(bcolors.RED,"diff")] a = old b = new_list[ii] # 1st row formatting: apply color for each different character abc = '' for iii, s in enumerate(difflib.ndiff(b, a)): if s[0]==' ': abc = abc + s[-1] elif s[0]=='-': br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' elif s[0]=='+': br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' abc = abc + br_color # Assign wrong characters colored in red col[1] = abc # 2nd row formatting: apply color for each different character abc = '' for iii, s in enumerate(difflib.ndiff(a, b)): if s[0]==' ': abc = abc + s[-1] elif s[0]=='-': br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' elif s[0]=='+': br_color = f'{redb}{whitef}{bold}{s[-1]}{reset}' abc = abc + br_color # Assign wrong characters colored in red col[2] = abc for col_index, col_item in enumerate(col): if 'aaa4' == col_item: col[col_index] = get_color_string(bcolors.GREEN, col_item) # print(f'col {col}') group.append(col) # add group to results and reset the lists if ii == (columns_index - 1): results.append(group) # clean lists new = [] group = [] # Re-assign new list and index to old list and index old_list = new_list # List with row values row_old = row_new # Variable with row number value print(f'------------ table ------------------') for group in results: tab = tt.Texttable() tab.set_deco(tab.HEADER) for row in zip(*group): tab.add_row(row) s = tab.draw() print(s, '\n')
def check_docstring_parameters(func, doc=None, ignore=None): """Helper to check docstring Parameters ---------- func : callable The function object to test. doc : str, optional (default: None) Docstring if it is passed manually to the test. ignore : None | list Parameters to ignore. Returns ------- incorrect : list A list of string describing the incorrect results. """ from numpydoc import docscrape incorrect = [] ignore = [] if ignore is None else ignore func_name = _get_func_name(func) if (not func_name.startswith('sklearn.') or func_name.startswith('sklearn.externals')): return incorrect # Don't check docstring for property-functions if inspect.isdatadescriptor(func): return incorrect # Don't check docstring for setup / teardown pytest functions if func_name.split('.')[-1] in ('setup_module', 'teardown_module'): return incorrect # Dont check estimator_checks module if func_name.split('.')[2] == 'estimator_checks': return incorrect # Get the arguments from the function signature param_signature = list(filter(lambda x: x not in ignore, _get_args(func))) # drop self if len(param_signature) > 0 and param_signature[0] == 'self': param_signature.remove('self') # Analyze function's docstring if doc is None: with warnings.catch_warnings(record=True) as w: try: doc = docscrape.FunctionDoc(func) except Exception as exp: incorrect += [func_name + ' parsing error: ' + str(exp)] return incorrect if len(w): raise RuntimeError('Error for %s:\n%s' % (func_name, w[0])) param_docs = [] for name, type_definition, param_doc in doc['Parameters']: # Type hints are empty only if parameter name ended with : if not type_definition.strip(): if ':' in name and name[:name.index(':')][-1:].strip(): incorrect += [ func_name + ' There was no space between the param name and ' 'colon (%r)' % name ] elif name.rstrip().endswith(':'): incorrect += [ func_name + ' Parameter %r has an empty type spec. ' 'Remove the colon' % (name.lstrip()) ] # Create a list of parameters to compare with the parameters gotten # from the func signature if '*' not in name: param_docs.append(name.split(':')[0].strip('` ')) # If one of the docstring's parameters had an error then return that # incorrect message if len(incorrect) > 0: return incorrect # Remove the parameters that should be ignored from list param_docs = list(filter(lambda x: x not in ignore, param_docs)) # The following is derived from pytest, Copyright (c) 2004-2017 Holger # Krekel and others, Licensed under MIT License. See # https://github.com/pytest-dev/pytest message = [] for i in range(min(len(param_docs), len(param_signature))): if param_signature[i] != param_docs[i]: message += [ "There's a parameter name mismatch in function" " docstring w.r.t. function signature, at index %s" " diff: %r != %r" % (i, param_signature[i], param_docs[i]) ] break if len(param_signature) > len(param_docs): message += [ "Parameters in function docstring have less items w.r.t." " function signature, first missing item: %s" % param_signature[len(param_docs)] ] elif len(param_signature) < len(param_docs): message += [ "Parameters in function docstring have more items w.r.t." " function signature, first extra item: %s" % param_docs[len(param_signature)] ] # If there wasn't any difference in the parameters themselves between # docstring and signature including having the same length then return # empty list if len(message) == 0: return [] import difflib import pprint param_docs_formatted = pprint.pformat(param_docs).splitlines() param_signature_formatted = pprint.pformat(param_signature).splitlines() message += ["Full diff:"] message.extend(line.strip() for line in difflib.ndiff( param_signature_formatted, param_docs_formatted)) incorrect.extend(message) # Prepend function name incorrect = ['In function: ' + func_name] + incorrect return incorrect
def CompareToGoldenLines(self, text, golden_lines): actual_lines = text.splitlines(1) self.assertEqual( golden_lines, actual_lines, "Text doesn't match golden. Diff:\n" + ''.join(difflib.ndiff(golden_lines, actual_lines)))
def sequence_diff(seq1, seq2, seq_type=None): if seq_type is not None: seq_type_name = seq_type.__name__ if not isinstance(seq1, seq_type): return 'First sequence is not a %s: %s' % \ (seq_type_name, safe_repr(seq1)) if not isinstance(seq2, seq_type): return 'Second sequence is not a %s: %s' % \ (seq_type_name, safe_repr(seq2)) else: seq_type_name = "sequence" differing = None try: len1 = len(seq1) except (TypeError, NotImplementedError): differing = 'First %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: try: len2 = len(seq2) except (TypeError, NotImplementedError): differing = 'Second %s has no length. Non-sequence?' % ( seq_type_name) if differing is None: if seq1 == seq2: return seq1_repr = safe_repr(seq1) seq2_repr = safe_repr(seq2) if len(seq1_repr) > 30: seq1_repr = seq1_repr[:30] + '...' if len(seq2_repr) > 30: seq2_repr = seq2_repr[:30] + '...' elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr) differing = '%ss differ: %s != %s\n' % elements for i in xrange(min(len1, len2)): try: item1 = seq1[i] except (TypeError, IndexError, NotImplementedError): differing += ( '\nUnable to index element %d of first %s\n' % (i, seq_type_name)) break try: item2 = seq2[i] except (TypeError, IndexError, NotImplementedError): differing += ( '\nUnable to index element %d of second %s\n' % (i, seq_type_name)) break if item1 != item2: differing += ('\nFirst differing element %d:\n%s\n%s\n' % (i, item1, item2)) break else: if (len1 == len2 and seq_type is None and type(seq1) != type(seq2)): # The sequences are the same, but have differing types. return if len1 > len2: differing += ('\nFirst %s contains %d additional ' 'elements.\n' % (seq_type_name, len1 - len2)) try: differing += ('First extra element %d:\n%s\n' % (len2, seq1[len2])) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of first %s\n' % (len2, seq_type_name)) elif len1 < len2: differing += ('\nSecond %s contains %d additional ' 'elements.\n' % (seq_type_name, len2 - len1)) try: differing += ('First extra element %d:\n%s\n' % (len1, seq2[len1])) except (TypeError, IndexError, NotImplementedError): differing += ('Unable to index element %d ' 'of second %s\n' % (len1, seq_type_name)) standardMsg = differing diffMsg = '\n' + '\n'.join( difflib.ndiff( pprint.pformat(seq1).splitlines(), pprint.pformat(seq2).splitlines())) return standardMsg + '\n' + diffMsg
def graph_diagnostic_info(): mod_canonicalized = torch._C._jit_pass_canonicalize(traced_func.graph) torch._C._jit_pass_inline(mod_canonicalized) torch._C._jit_pass_erase_shape_information(mod_canonicalized) mod_str = str(mod_canonicalized) mod_str = re.sub(r"___torch_mangle_[0-9]+\.", "", mod_str) check_canonicalized = torch._C._jit_pass_canonicalize(check_mod_func.graph) torch._C._jit_pass_inline(check_canonicalized) torch._C._jit_pass_erase_shape_information(check_canonicalized) check_str = str(check_canonicalized) check_str = re.sub(r"___torch_mangle_[0-9]+\.", "", check_str) graph_diff_errors = None if mod_str != check_str: import difflib graph_diff = difflib.ndiff( mod_str.splitlines(True), check_str.splitlines(True) ) graph_diff_errors = "Graph diff:\n" + indent("".join(graph_diff)) + "\n" for n_mod, n_check in zip( mod_canonicalized.nodes(), check_canonicalized.nodes() ): if str(n_mod) != str(n_check): graph_diff_errors += "First diverging operator:\n" node_diff = difflib.ndiff( str(n_mod).splitlines(True), str(n_check).splitlines(True) ) source_printout = ( "Node diff:\n" + indent("".join(node_diff)) + "\n" ) mod_stack = n_mod.sourceRange() if mod_stack: source_printout += ( "Trace source location:\n" + indent(mod_stack) + "\n" ) check_stack = n_check.sourceRange() if check_stack: source_printout += ( "Check source location:\n" + indent(check_stack) + "\n" ) graph_diff_errors += source_printout break # For now, only print out the first pair of nodes that diverges tensor_compare_errors = None # Check Tensor-valued constant nodes for n_mod, n_check in zip( mod_canonicalized.nodes(), check_canonicalized.nodes() ): if n_mod.kind() != n_check.kind(): break # Graphs have already diverged if n_mod.kind() == "prim::Constant" and not ( n_mod.mustBeNone() or n_check.mustBeNone() ): if not n_mod.hasAttribute("value"): continue if n_mod.kindOf("value") != "t" or n_check.kindOf("value") != "t": continue mod_tensor_val = n_mod.t("value") check_tensor_val = n_check.t("value") try: torch.testing.assert_allclose(mod_tensor_val, check_tensor_val) except (RuntimeError, AssertionError) as e: if tensor_compare_errors is None: tensor_compare_errors = "" tensor_compare_errors += "Node:\n" + indent(str(n_mod)) + "\n" compare_stack = n_mod.sourceRange() if compare_stack: tensor_compare_errors += ( "Source Location:\n" + indent(compare_stack) + "\n" ) tensor_compare_errors += "Comparison exception: " + indent( str(e) ) break # For now, only print the first diverging pair return graph_diff_errors, tensor_compare_errors
class Commands: """ Each static method can be called from the command line. Add a new static method here to add a new command to the program. """ @staticmethod def display(language): """ Print all the code listings in the .rst files. """ for f in restFiles: listings = language.listings.findall(open(f).read()) if not listings: continue print('=' * 60 + "\n" + f + "\n" + '=' * 60) for n, l in enumerate(listings): print("\n".join(shift(l))) if n < len(listings) - 1: print('-' * 60) @staticmethod def extract(language): """ Pull the code listings from the .rst files and write each listing into its own file. Will not overwrite if code files and .rst files disagree unless you say "extract -force". """ force = len(sys.argv) == 3 and sys.argv[2] == '-force' paths = set() for listing in [shift(listing) for f in restFiles for listing in language.listings.findall(open(f).read())]: path = listing[0][len(language.commentTag):].strip() if path in paths: print("ERROR: Duplicate file name: %s" % path) sys.exit(1) else: paths.add(path) path = os.path.join("..", "code", path) dirname = os.path.dirname(path) if dirname and not os.path.exists(dirname): os.makedirs(dirname) if os.path.exists(path) and not force: for i in difflib.ndiff(open(path).read().splitlines(), listing): if i.startswith("+ ") or i.startswith("- "): print("ERROR: Existing file different from .rst") print("Use 'extract -force' to force overwrite") Commands.check(language) return file(path, 'w').write("\n".join(listing)) @staticmethod def check(language): """ Ensure that external code files exist and check which external files have changed from what's in the .rst files. Generate files in the _deltas subdirectory showing what has changed. """ class Result: # Messenger def __init__(self, `kwargs): self.__dict__ = kwargs result = Result(missing = [], deltas = []) listings = [Result(code = shift(code), file = f) for f in restFiles for code in language.listings.findall(open(f).read())] paths = [os.path.normpath(os.path.join("..", "code", path)) for path in [listing.code[0].strip()[len(language.commentTag):].strip() for listing in listings]] if os.path.exists("_deltas"): shutil.rmtree("_deltas") for path, listing in zip(paths, listings): if not os.path.exists(path): result.missing.append(path) else: code = open(path).read().splitlines() for i in difflib.ndiff(listing.code, code): if i.startswith("+ ") or i.startswith("- "): d = difflib.HtmlDiff() if not os.path.exists("_deltas"): os.makedirs("_deltas") html = os.path.join("_deltas", os.path.basename(path).split('.')[0] + ".html") open(html, 'w').write( "<html><h1>Left: %s<br>Right: %s</h1>" % (listing.file, path) + d.make_file(listing.code, code)) result.deltas.append(Result(file = listing.file, path = path, html = html, code = code)) break if result.missing: print("Missing %s files:\n%s" % (language.__name__, "\n".join(result.missing))) for delta in result.deltas: print("%s changed in %s; see %s" % (delta.file, delta.path, delta.html)) return result
for item in difference2: lines.append(repr(item)) return '\n'.join(lines) diff = None if not isinstance(d1, type(d2)): return diff if d1 == d2: return diff if isinstance(d1, dict): diff = ('\n' + '\n'.join( difflib.ndiff( pprint.pformat(d1).splitlines(), pprint.pformat(d2).splitlines()))) elif isinstance(d1, list): diff = sequence_diff(d1, d2, seq_type=list) elif isinstance(d1, tuple): diff = sequence_diff(d1, d2, seq_type=tuple) elif isinstance(d1, set): diff = set_diff(d1, d2) elif isinstance(d1, frozenset): diff = set_diff(d1, d2) return diff class ItemDiff(object): ITEM_NAME = "Item"
def check_generated_output(gen_expected_path, gen_result_path, result_manip=None, program_args=None): """ compares the content of two files, is intended to compare a file that was generated during a call of Uncrustify with a file that has the expected content Parameters ---------------------------------------------------------------------------- :param gen_expected_path: string path to a file that will be compared with the generated file :param gen_result_path: string path to the file that will be generated by Uncrustify :param result_manip: lambda OR list or tuple of lambdas optional lambda function(s) that will be applied (before the comparison) on the content of the generated file, the lambda function(s) should accept one string parameter :param program_args: tuple of options a collection of multiple options used to add extra functionality to the script (i.e. auto apply changes or show diffs on command line) :return: bool ---------------------------------------------------------------------------- True or False depending on whether both files have the same content >>> check_generated_output("/dev/null", "/dev/null") True """ gen_exp_txt = get_file_content(gen_expected_path) if gen_exp_txt is None: return False gen_res_txt = get_file_content(gen_result_path) if gen_res_txt is None: return False if result_manip is not None: if type(result_manip) is list or type(result_manip) is tuple: for m in result_manip: gen_res_txt = m(gen_res_txt) else: gen_res_txt = result_manip(gen_res_txt) if gen_res_txt != gen_exp_txt: with open(gen_result_path, 'w', encoding="utf-8", newline="") as f: f.write(gen_res_txt) if program_args.apply and program_args.auto_output_path: write_to_output_path(program_args.auto_output_path, gen_res_txt) return True elif program_args.diff: print("\n************************************") print("Problem with %s" % gen_result_path) print("************************************") file_diff = difflib.ndiff(gen_res_txt.splitlines(True), gen_exp_txt.splitlines(True)) for line in file_diff: pprint.PrettyPrinter(indent=4).pprint(line) return False else: print("\nProblem with %s" % gen_result_path) print("use(gen): '--diff' to find out why %s %s are different" % (gen_result_path, gen_expected_path)) return False remove(gen_result_path) return True
def _diff_text(left, right, verbose=0): """Return the explanation for the diff between text or bytes. Unless --verbose is used this will skip leading and trailing characters which are identical to keep the diff minimal. If the input are bytes they will be safely converted to text. """ from difflib import ndiff explanation = [] def escape_for_readable_diff(binary_text): """ Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape newlines and carriage returns (#429). """ r = six.text_type(repr(binary_text)[1:-1]) r = r.replace(r"\n", "\n") r = r.replace(r"\r", "\r") return r if isinstance(left, bytes): left = escape_for_readable_diff(left) if isinstance(right, bytes): right = escape_for_readable_diff(right) if verbose < 1: i = 0 # just in case left or right has zero length for i in range(min(len(left), len(right))): if left[i] != right[i]: break if i > 42: i -= 10 # Provide some context explanation = [ u"Skipping %s identical leading characters in diff, use -v to show" % i ] left = left[i:] right = right[i:] if len(left) == len(right): for i in range(len(left)): if left[-i] != right[-i]: break if i > 42: i -= 10 # Provide some context explanation += [ u"Skipping {} identical trailing " u"characters in diff, use -v to show".format(i) ] left = left[:-i] right = right[:-i] keepends = True if left.isspace() or right.isspace(): left = repr(str(left)) right = repr(str(right)) explanation += [ u"Strings contain only whitespace, escaping them using repr()" ] explanation += [ line.strip("\n") for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) ] return explanation
def difflib_ndiff(): diff = difflib.ndiff(text1_lines, text2_lines) print('\n'.join(diff))
def test_tempfiles(): # specifiy a writeable temp dir for testing tempdir = '/tmp/gffutils-test' def clean_tempdir(): tempfile.tempdir = tempdir if os.path.exists(tempdir): shutil.rmtree(tempdir) os.makedirs(tempdir) clean_tempdir() # default keep_tempfiles=False should give us nothing. db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gtf'), ':memory:') assert len(os.listdir(tempdir)) == 0 # adding keep_tempfiles=True should give us 1 tempfile for gtf... db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles=True) filelist = os.listdir(tempdir) assert len(filelist) == 1, filelist assert filelist[0].endswith('.gffutils') #...and another one for gff. This time, make sure the suffix db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ':memory:', _keep_tempfiles=True) filelist = os.listdir(tempdir) assert len(filelist) == 2, filelist for i in filelist: assert i.endswith('.gffutils') # OK, now delete what we have so far... clean_tempdir() # Make sure that works for custom suffixes db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles='.GTFtmp') filelist = os.listdir(tempdir) assert len(filelist) == 1, filelist assert filelist[0].endswith('.GTFtmp') clean_tempdir() db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles='.GFFtmp') filelist = os.listdir(tempdir) assert len(filelist) == 1, filelist assert filelist[0].endswith('.GFFtmp') # Test n parallel instances of gffutils across PROCESSES processes. # # Note that travis-ci doesn't like it when you use multiple cores, so the # .travis.yml file sets this to 1. This also means that # 1) `n` shouldn't be too large because travis-ci will run one at a time, # but more importantly, # 2) this will only truly test parallel processes on a local machine with # multiple cpus. clean_tempdir() # .travis.yml sets the PROCESSES env var; otherwise use all available. PROCESSES = int(os.environ.get("PROCESSES", multiprocessing.cpu_count())) pool = multiprocessing.Pool(PROCESSES) n = 100 res = pool.map(make_db, range(n)) assert sorted(list(res)) == list(range(n)) filelist = os.listdir(tempdir) assert len(filelist) == n, len(filelist) expected = dedent("""\ FBtr0300689 chr2L 7529 9484 + transcript 4681 {"transcript_id":["FBtr0300689"],"gene_id":["FBgn0031208"]} FBgn0031208 chr2L 7529 9484 + gene 4681 {"gene_id":["FBgn0031208"]} FBtr0300690 chr2L 7529 9484 + transcript 4681 {"transcript_id":["FBtr0300690"],"gene_id":["FBgn0031208"]} transcript_Fk_gene_1 chr2L 10000 11000 - transcript 4681 {"transcript_id":["transcript_Fk_gene_1"],"gene_id":["Fk_gene_1"]} Fk_gene_1 chr2L 10000 11000 - gene 4681 {"gene_id":["Fk_gene_1"]} transcript_Fk_gene_2 chr2L 11500 12500 - transcript 4681 {"transcript_id":["transcript_Fk_gene_2"],"gene_id":["Fk_gene_2"]} Fk_gene_2 chr2L 11500 12500 - gene 4681 {"gene_id":["Fk_gene_2"]} """) def matches_expected(fn): """ Python 3 has unpredictable dictionary ordering. This function checks the *semantic* similarity of lines by parsing the attributes into a dictonary. """ exp_features = expected.splitlines(True) new_features = list(open(fn)) assert len(exp_features) == len(new_features) for expline, newline in zip(exp_features, new_features): exp_toks = expline.split() new_toks = newline.split() assert exp_toks[:-1] == new_toks[:-1] assert json.loads(exp_toks[-1]) == json.loads(new_toks[-1]) # make sure that each of the `n` files matches the expected output. for fn in filelist: fn = os.path.join(tempdir, fn) try: matches_expected(fn) except AssertionError: print(''.join( difflib.ndiff(expected.splitlines(True), this.splitlines(True)))) raise clean_tempdir()
def diference(case_a, case_b): output_list = [li for li in difflib.ndiff(case_a, case_b) if li[0] != ' '] return output_list
def _pdiff(str1, str2): # For debugging from difflib import ndiff print(''.join(ndiff(str1.splitlines(True), str2.splitlines(True))))
def update_event(self, inp=-1): self.set_output_val(0, difflib.ndiff(self.input(0), self.input(1), self.input(2), self.input(3)))
def render_text_diff(text1, text2): diff = difflib.ndiff(text1.splitlines(), text2.splitlines()) return "```diff\n%s\n```\n" % "\n".join(diff)