Example #1
1
def compare_site_and_disk(config, diff, site, docs, push, get):
    """ Does both compare and push/get since it's quite similar code-wide"""
    for f in docs:
        full_path = "./" + f + ".mediawiki"
        m_ondisk = hashlib.new(config["hashalg"])
        with open(full_path) as fd:
            on_disk = fd.read()
        m_ondisk.update(on_disk.encode("utf-8"))

        m_onsite = hashlib.new(config["hashalg"])
        page = site.Pages[f]
        on_site = page.text().encode("utf-8") + "\n".encode("utf-8")
        m_onsite.update(on_site)

        if m_ondisk.digest() != m_onsite.digest():
            print("Page {} differ.".format(f))
            if diff:
                # Just display diff in the correct order, we default to push-side-diff
                if get:
                    mydiff = difflib.unified_diff(blist2str(on_site.splitlines(1)), blist2str(on_disk.splitlines(1)))
                else:
                    mydiff = difflib.unified_diff(blist2str(on_disk.splitlines(1)), blist2str(on_site.splitlines(1)))

                sys.stdout.writelines(mydiff)

            # Now push or get whatever is needed to sync
            # But never do both push and get at once, would make no sense
            if get:
                print("Getting {} from site to disk.".format(f))
                with open(full_path, "w") as fd:
                    fd.write(on_site.decode("utf-8"))
            elif push:
                check_repos_is_current(config)
                print("Pushing {} from disk to site.".format(f))
                page.save(on_disk, summary=u"Automated sync from {}".format(config["repos"]))
Example #2
0
def testrr_helper (filee, jalangi, norm_fn, record_fn, instrument_fn=instrument):
    try:
        shutil.rmtree("jalangi_tmp")
    except: pass
    os.mkdir("jalangi_tmp")
    os.mkdir("jalangi_tmp/out")
    os.putenv("JALANGI_HOME", jalangi.get_home())
    os.chdir("jalangi_tmp")
    (instrumented_f, out) = instrument_fn(os.path.join(os.pardir,filee), jalangi=jalangi)
    try:                    # Ignore failures on first iteration
        os.remove("inputs.js")
        shutil.copy("jalangi_inputs{}.js".format(i), "inputs.js")
    except:
        pass
    if not os.path.isfile("inputs.js"):
        util.mkempty("inputs.js")
    print "---- Running without instrumentation ----"
    try:
        os.remove("jalangi_trace")
    except:
        pass
    norm = norm_fn(os.path.join(os.pardir,filee + ".js"), jalangi=jalangi)
    with open("jalangi_normal", "w") as normfile:
        normfile.write(norm)
    print norm
    print "---- Recording execution of {} ----".format(filee)
    rec = record_fn(os.path.join(os.pardir,filee), instrumented_f)
    with open("jalangi_record", "w") as recfile:
        recfile.write(rec)
    print rec
    print "---- Replaying {} ----".format(filee)
    os.putenv("JALANGI_MODE", "replay")
    os.putenv("JALANGI_ANALYSIS", "none")
    rep = replay()
    with open("jalangi_replay", "w") as repfile:
        repfile.write(rep)
    print rep
    try:
	    wcl = util.count_lines("jalangi_trace")
	
	    with open("../jalangi_test_results", 'a') as f:
		f.write("# of lines in jalangi_trace for {}: {}".format(filee,str(wcl)))
		f.write("\n")
    except: pass	
    if norm != rep: #TODO: Factor out this.
        print "{}.js failed".format(filee)
        import difflib
        with open("../jalangi_test_results", 'a') as f:
            f.write("\n")
            for line in difflib.unified_diff(norm.splitlines(1), rec.splitlines(1), fromfile='normal.{}'.format(filee), tofile='replay.{}'.format(filee)):
                f.write(line)
    if rec != rep:
        print "{}.js failed".format(filee)
        import difflib
        with open("../jalangi_test_results", 'a') as f:
            f.write("\n")
            for line in difflib.unified_diff(rec.splitlines(1), rep.splitlines(1), fromfile='record.{}'.format(filee), tofile='replay.{}'.format(filee)):
                f.write(line)
        
    util.move_coverage(jalangi)
Example #3
0
	def unusedClangcheckFile(self, path, ext):
		if ext not in self.SRCEXTS:
			return self.SKIP

		with open(path, 'rt') as f:
			try:
				originalraw = f.read()
			except UnicodeDecodeError as e:
				print('File "{}" has invalid unicode: {}'.format(path, e))
				return False
			original = originalraw.splitlines(keepends=True)

		#cmd = ['astyle'] + list(self.ASTYLE_OPTIONS)
		cmd = ('clang-format', '-style=file')
		proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
		po, pe = proc.communicate(originalraw.encode())

		assert proc.returncode == 0, "Return code {}".format(proc.returncode)
		assert len(pe) == 0, "clang-format error: {}".format(pe)
		try:
			formatted = po.decode().splitlines(keepends=True)
		except UnicodeDecodeError as e:
			print('File "{}" has invalid unicode during formatting: {}'.format(path, e))
			return False

		diff = list(difflib.unified_diff(original, formatted, 'current', 'formatted'))
		if not len(diff):
			return True

		print('File "{}" has invalid style'.format(path))
		for d in difflib.unified_diff(self.hightLightSpaces(original), self.hightLightSpaces(formatted), 'current', 'formatted'):
			print(d, end='')
		return False
Example #4
0
    def test_same_output_with_different_engine_incr(self):
        """Reference implementation should match mustache and mako implem"""

        w("""cat <<EOF > .gitchangelog.rc

output_engine = mustache('restructuredtext')

EOF
        """)
        changelog = w('$tprog show 0.0.2..0.0.3')
        self.assertEqual(
            changelog, self.INCR_REFERENCE_002_003,
            msg="Mustache output should match our reference output... "
            "diff of changelogs:\n%s"
            % '\n'.join(difflib.unified_diff(self.INCR_REFERENCE_002_003.split("\n"),
                                             changelog.split("\n"),
                                             lineterm="")))

        w("""cat <<EOF > .gitchangelog.rc

output_engine = makotemplate('restructuredtext')

EOF
        """)
        changelog = w('$tprog show 0.0.2..0.0.3')
        self.assertEqual(
            changelog, self.INCR_REFERENCE_002_003,
            msg="Mako output should match our reference output... "
            "diff of changelogs:\n%s"
            % '\n'.join(difflib.unified_diff(self.INCR_REFERENCE_002_003.split("\n"),
                                             changelog.split("\n"),
                                             lineterm="")))
Example #5
0
    def show_diff(self, chart, installed_chart,
                  installed_values, target_chart, target_values):
        '''
        Produce a unified diff of the installed chart vs our intention

        TODO(alanmeadows): This needs to be rewritten to produce better
        unified diff output and avoid the use of print
        '''

        chart_diff = list(difflib.unified_diff(installed_chart
                                               .SerializeToString()
                                               .split('\n'),
                                               target_chart.split('\n')))
        if len(chart_diff) > 0:
            LOG.info("Chart Unified Diff (%s)", chart.release_name)
            for line in chart_diff:
                LOG.debug(line)
        values_diff = list(difflib.unified_diff(installed_values.split('\n'),
                                                yaml
                                                .safe_dump(target_values)
                                                .split('\n')))
        if len(values_diff) > 0:
            LOG.info("Values Unified Diff (%s)", chart.release_name)
            for line in values_diff:
                LOG.debug(line)

        return (len(chart_diff) > 0) or (len(values_diff) > 0)
Example #6
0
    def runTest(self, fileName, sedCmd, changeCmd = None):
        new = os.popen("sed '%s' < %s" % (sedCmd, fileName)).readlines()

        orig = open(fileName).readlines()

        if changeCmd:
            target = os.popen("sed '%s' < %s" % (changeCmd, fileName)).readlines()
            final = os.popen("sed '%s' < %s | sed '%s'" % (sedCmd, fileName, 
                             changeCmd)).readlines()
            check = os.popen("sed '%s' < %s | sed '%s'" % (changeCmd, fileName, 
                             sedCmd)).readlines()
            if "".join(final) != "".join(check):
                self.fail("sed scripts have conflicting results")
        else:
            target = orig
            final = new

        diff = difflib.unified_diff(orig, new)
        # advance past header
        diff.next()
        diff.next()
        d = [ x for x in diff ]

        (new2, conflicts) = patch(target, d)

        diff = difflib.unified_diff(final, new2)
        try:
            diff.next()
            print "%s '%s' failed:" % (fileName, sedCmd)
            diff.next()
            for line in diff:
                line = line[:-1]
                print "\t%s" % line
        except StopIteration:
            pass
Example #7
0
def main():
    opt = parse_args()

    import psycopg2.tests
    test = psycopg2.tests
    if opt.suite:
        test = getattr(test, opt.suite)

    sys.stdout.write("test suite %s\n" % test.__name__)

    for i in range(1, opt.nruns + 1):
        sys.stdout.write("test suite run %d of %d\n" % (i, opt.nruns))
        runner = unittest.TextTestRunner()
        runner.run(test.test_suite())
        dump(i, opt)

    f1 = open('debug-%02d.txt' % (opt.nruns - 1)).readlines()
    f2 = open('debug-%02d.txt' % (opt.nruns)).readlines()
    for line in difflib.unified_diff(f1, f2,
            "run %d" % (opt.nruns - 1), "run %d" % opt.nruns):
        sys.stdout.write(line)

    rv = f1 != f2 and 1 or 0

    if opt.objs:
        f1 = open('objs-%02d.txt' % (opt.nruns - 1)).readlines()
        f2 = open('objs-%02d.txt' % (opt.nruns)).readlines()
        for line in difflib.unified_diff(f1, f2,
                "run %d" % (opt.nruns - 1), "run %d" % opt.nruns):
            sys.stdout.write(line)

    return rv
Example #8
0
def file_diff(filename1, filename2, lineno=None, context=None):
    INPUT1=open_possibly_compressed_file(filename1)
    lines1 = INPUT1.readlines()
    for i in range(0,len(lines1)):
        lines1[i] = lines1[i].strip()
    INPUT1.close()

    INPUT2=open_possibly_compressed_file(filename2)
    lines2 = INPUT2.readlines()
    for i in range(0,len(lines2)):
        lines2[i] = lines2[i].strip()
    INPUT2.close()

    s=""
    if lineno is None:
        for line in difflib.unified_diff(lines2,lines1,fromfile=filename2,tofile=filename1):
            s += line+"\n"
    else:
        if context is None:
            context = 3
        start = lineno-context
        stop = lineno+context
        if start < 0:
            start=0
        if stop > len(lines1):
            stop = len(lines1)
        if stop > len(lines2):
            stop = len(lines2)
        for line in difflib.unified_diff(lines2[start:stop],lines1[start:stop],fromfile=filename2,tofile=filename1):
            s += line+"\n"
    return s
Example #9
0
def compare_site_and_disk(config, diff, site, docs, push, get):
    ''' Does both compare and push/get since it's quite similar code-wide'''
    for f in docs:
        full_path = './'+f+'.mediawiki'
        m_ondisk = hashlib.new(config['hashalg'])
        with open(full_path) as fd:
            on_disk = fd.read()
        m_ondisk.update(on_disk)

        m_onsite = hashlib.new(config['hashalg'])
        page = site.Pages[f]
        on_site = page.text().encode('utf-8')+'\n'
        m_onsite.update(on_site)

        if m_ondisk.digest() != m_onsite.digest():
            print("Page {} differ.".format(f))
            if (diff):
                #Just display diff in the correct order, we default to push-side-diff
                if get:
                    mydiff = difflib.unified_diff(on_site.splitlines(1), on_disk.splitlines(1))
                else:
                    mydiff = difflib.unified_diff(on_disk.splitlines(1), on_site.splitlines(1))

                sys.stdout.writelines(mydiff)

            #Now push or get whatever is needed to sync
            #But never do both push and get at once, would make no sense
            if get:
                print("Getting {} from site to disk.".format(f))
                with open(full_path, 'w') as fd:
                    fd.write(on_site)
            elif push:
                print("Pushing {} from disk to site.".format(f))
                page.save(on_disk, summary=u'Automated sync from {}'.format(config['repos']))
Example #10
0
def generate_header_diff_list(pre_header_dict, header_dict):
    # return result list
    header_diff_list = []

    # diff origin data
    client_request_hdr_pre = []
    server_request_hdr_pre = []
    server_response_hdr_pre = []
    client_response_hdr_pre = []

    # based on pre_header_dict, init origin data
    if pre_header_dict and pre_header_dict['state_machine_id'] == header_dict['state_machine_id']:
        client_request_hdr_pre = pre_header_dict['header_list'][0]['hdr_info']
        server_request_hdr_pre = pre_header_dict['header_list'][1]['hdr_info']
        server_response_hdr_pre = pre_header_dict['header_list'][2]['hdr_info']
        client_response_hdr_pre = pre_header_dict['header_list'][3]['hdr_info']

    # header_dict could not be null, get the new data
    client_request_hdr = header_dict['header_list'][0]['hdr_info']
    server_request_hdr = header_dict['header_list'][1]['hdr_info']
    server_response_hdr = header_dict['header_list'][2]['hdr_info']
    client_response_hdr = header_dict['header_list'][3]['hdr_info']

    # generate diff list
    header_diff_list.append({'name': 'Client Request',
                             'hdr_diff': difflib.unified_diff(client_request_hdr_pre, client_request_hdr, lineterm='')})
    header_diff_list.append({'name': 'Server Request',
                             'hdr_diff': difflib.unified_diff(server_request_hdr_pre, server_request_hdr, lineterm='')})
    header_diff_list.append({'name': 'Server Response',
                             'hdr_diff': difflib.unified_diff(server_response_hdr_pre, server_response_hdr, lineterm='')})
    header_diff_list.append({'name': 'Client Response',
                             'hdr_diff': difflib.unified_diff(client_response_hdr_pre, client_response_hdr, lineterm='')})

    return header_diff_list
Example #11
0
def get_diff() -> str:
    """Get a HTML diff for the old config files."""
    old_conf_lines = []  # type: typing.MutableSequence[str]
    old_key_lines = []  # type: typing.MutableSequence[str]

    for filename, dest in [('qutebrowser.conf', old_conf_lines),
                           ('keys.conf', old_key_lines)]:
        path = os.path.join(standarddir.config(), filename)

        with open(path, 'r', encoding='utf-8') as f:
            for line in f:
                if not line.strip() or line.startswith('#'):
                    continue
                dest.append(line.rstrip())

    conf_delta = difflib.unified_diff(OLD_CONF.lstrip().splitlines(),
                                      old_conf_lines)
    key_delta = difflib.unified_diff(OLD_KEYS_CONF.lstrip().splitlines(),
                                     old_key_lines)

    conf_diff = '\n'.join(conf_delta)
    key_diff = '\n'.join(key_delta)

    # pylint: disable=no-member
    # WORKAROUND for https://bitbucket.org/logilab/pylint/issue/491/
    lexer = pygments.lexers.DiffLexer()
    formatter = pygments.formatters.HtmlFormatter(
        full=True, linenos='table',
        title='Diffing pre-1.0 default config with pre-1.0 modified config')
    # pylint: enable=no-member
    return pygments.highlight(conf_diff + key_diff, lexer, formatter)
Example #12
0
def context_dict(revision, ready_for_l10n=False, revision_approved=False):
    """Return a dict that fills in the blanks in KB notification templates."""
    document = revision.document
    diff = ''
    l10n = revision.document.revisions.filter(is_ready_for_localization=True)
    approved = revision.document.revisions.filter(is_approved=True)
    if ready_for_l10n and l10n.count() > 1:
        fromfile = u'[%s] %s #%s' % (revision.document.locale,
                                     revision.document.title,
                                     l10n.order_by('-created')[1].id)
        tofile = u'[%s] %s #%s' % (revision.document.locale,
                                   revision.document.title,
                                   revision.id)

        diff = clean(
            u''.join(
                difflib.unified_diff(
                    l10n.order_by('-created')[1].content.splitlines(1),
                    revision.content.splitlines(1),
                    fromfile=fromfile, tofile=tofile)
                ),
            ALLOWED_TAGS, ALLOWED_ATTRIBUTES)
    elif revision_approved and approved.count() > 1:
        doc = revision.document
        approved_rev = approved.order_by('-created')[1]

        fromfile = u'[%s] %s #%s' % (doc.locale, doc.title, approved_rev.id)
        tofile = u'[%s] %s #%s' % (doc.locale, doc.title, revision.id)

        diff = clean(
            u''.join(
                difflib.unified_diff(
                    approved_rev.content.splitlines(1),
                    revision.content.splitlines(1),
                    fromfile=fromfile, tofile=tofile)
                ),
            ALLOWED_TAGS, ALLOWED_ATTRIBUTES)
    elif revision.document.current_revision is not None:
        fromfile = u'[%s] %s #%s' % (revision.document.locale,
                                     revision.document.title,
                                     revision.document.current_revision.id)
        tofile = u'[%s] %s #%s' % (revision.document.locale,
                                   revision.document.title,
                                   revision.id)

        diff = clean(
            u''.join(
                difflib.unified_diff(
                    revision.document.current_revision.content.splitlines(1),
                    revision.content.splitlines(1),
                    fromfile=fromfile, tofile=tofile)
                ),
            ALLOWED_TAGS, ALLOWED_ATTRIBUTES)

    return {
        'document_title': document.title,
        'creator': revision.creator,
        'host': Site.objects.get_current().domain,
        'diff': diff,
        'fulltext': clean(revision.content, ALLOWED_TAGS, ALLOWED_ATTRIBUTES)}
Example #13
0
 def __str__(self):
   ret = TestResult.__str__(self)
   if self.verbose:
     if self.content and self.content_old:
       diff = difflib.unified_diff(open(self.content).read().split("\n"),
                            open(self.content_old).read().split("\n"), 
                            "Non-Tor1", "Non-Tor2",
                            lineterm="")
       for line in diff:
         ret+=line+"\n"
     if self.content and self.content_exit:
       diff = difflib.unified_diff(open(self.content).read().split("\n"),
                            open(self.content_exit).read().split("\n"), 
                             "Non-Tor", "Exit",
                             lineterm="")
       for line in diff:
         ret+=line+"\n"
   else:
     if self.content:
       ret += " "+self.content+"\n"
     if self.content_old:
       ret += " "+self.content_old+"\n"
     if self.content_exit:
       ret += " "+self.content_exit+"\n"
   return ret
Example #14
0
    def update(self, content):
        """Update the vim buffer with the new content."""
        self.dirty = False
        if not self.buf.registered:
            return

        # build the list of the offsets of the beginning of each line
        newlist = content.splitlines(1)
        num_lines = len(newlist)
        offsets = [offset for offset in misc.offset_gen(newlist)]

        started = False
        hunk_a = hunk_b = 0
        send_function = self.send_function
        try:
            if logger.level <= logging.DEBUG:
                for line in difflib.unified_diff(self.linelist, newlist):
                    debug(line.strip('\n'))

            for line in difflib.unified_diff(self.linelist, newlist):
                if not started:
                    if line.startswith('+++'):
                        started = True
                    continue
                if hunk_a == hunk_b == 0:
                    matchobj = re_unidiff.match(line.strip())
                    if matchobj:
                        lnum = int(matchobj.group('lnum'))
                        hunk_a = int(matchobj.group('a'))
                        hunk_b = int(matchobj.group('b'))
                    else:
                        assert False, "missing unified-diff control line"
                    continue
                if line[0] == ' ':
                    lnum += 1
                    hunk_a -= 1
                    hunk_b -= 1
                elif line[0] == '+':
                    delta = len(line) - 1
                    send_function('insert',
                        '%s %s' % (str(offsets[lnum-1]), misc.quote(line[1:])))
                    self.len += delta
                    lnum += 1
                    hunk_b -= 1
                elif line[0] == '-':
                    delta = len(line) - 1
                    if lnum <= num_lines:
                        self.remove(offsets[lnum-1], delta)
                    else:
                        # removing (one of) the last line(s)
                        self.remove(len(content), delta)
                    self.len -= delta
                    hunk_a -= 1
                else:
                    assert False, "unknown unified-diff line type"
        finally:
            self.terminate_editing()

        self.linelist = newlist
Example #15
0
def system(name, **kwargs):
    '''
    Ensure that global network settings are configured properly.

    name
        Custom name to represent this configuration change.

    kwargs
        The global parameters for the system.

    '''

    ret = {
        'name': name,
        'changes': {},
        'result': True,
        'comment': 'Global network settings are up to date.'
    }
    apply_net_settings = False
    kwargs['test'] = __opts__['test']
    # Build global network settings
    try:
        old = __salt__['ip.get_network_settings']()
        new = __salt__['ip.build_network_settings'](kwargs)
        if __opts__['test']:
            if old == new:
                return ret
            if not old and new:
                ret['result'] = None
                ret['comment'] = 'Global network settings are set to be added.'
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new)
                ret['result'] = None
                ret['comment'] = 'Global network settings are set to be updated.'
                ret['changes']['network_settings'] = ''.join(diff)
                return ret
        if not old and new:
            apply_net_settings = True
            ret['changes']['network_settings'] = 'Added global network settings.'
        elif old != new:
            diff = difflib.unified_diff(old, new)
            apply_net_settings = True
            ret['changes']['network_settings'] = ''.join(diff)
    except AttributeError as error:
        ret['result'] = False
        ret['comment'] = error.message
        return ret

    # Apply global network settings
    if apply_net_settings:
        try:
            __salt__['ip.apply_network_settings'](kwargs)
        except AttributeError as error:
            ret['result'] = False
            ret['comment'] = error.message
            return ret

    return ret
Example #16
0
def run(d):
    method = d['method']
    bedtool = d['bedtool']
    convert = d['convert']
    kwargs = d['kw'].copy()
    expected = d['test_case']['expected']

    bedtool_converter = convert.pop('bedtool')
    bedtool = (
        converters[bedtool_converter](pybedtools.example_bedtool(bedtool))
    )

    for k, converter_name in convert.items():
        kwargs[k] = (
            converters[converter_name](pybedtools.example_bedtool(kwargs[k]))
        )
    result = getattr(bedtool, method)(**kwargs)
    res = str(result)
    expected = fix(expected)
    try:
        assert res == expected

    except AssertionError:
        print(result.fn)
        print('Method call:')
        args = []
        for key, val in list(kwargs.items()):
            args.append(('%s=%s' % (key, val)).strip())

        args = ', '.join(args)
        print('BedTool.%(method)s(%(args)s)' % locals())
        print('Got:')
        print(res)
        print('Expected:')
        print(expected)
        print('Diff:')
        for i in (
            difflib.unified_diff(res.splitlines(1), expected.splitlines(1))
        ):
            print(i, end=' ')

        # Make tabs and newlines visible
        spec_res = res.replace('\t', '\\t').replace('\n', '\\n\n')
        spec_expected = expected.replace('\t', '\\t').replace('\n', '\\n\n')

        print('Showing special characters:')
        print('Got:')
        print(spec_res)
        print('Expected:')
        print(spec_expected)
        print('Diff:')
        for i in (
            difflib.unified_diff(spec_res.splitlines(1),
                                 spec_expected.splitlines(1))
        ):
            print(i, end=' ')
        raise
Example #17
0
def show_diff(old_opt, old_label, new_opt, new_label):
	if args.sort:
		sys.stdout.writelines(difflib.unified_diff(
			sorted(old_opt), sorted(new_opt),
			fromfile=old_label, tofile=new_label))
	else:
		sys.stdout.writelines(difflib.unified_diff(
			old_opt, new_opt,
			fromfile=old_label, tofile=new_label))
Example #18
0
def routes(name, **kwargs):
    '''
    Manage network interface static routes.

    name
        Interface name to apply the route to.

    kwargs
        Named routes
    '''
    ret = {
        'name': name,
        'changes': {},
        'result': True,
        'comment': 'Interface {0} routes are up to date.'.format(name),
    }
    apply_routes = False
    kwargs['test'] = __opts__['test']
    # Build interface routes
    try:
        old = __salt__['ip.get_routes'](name)
        new = __salt__['ip.build_routes'](name, **kwargs)
        if __opts__['test']:
            if old == new:
                return ret
            if not old and new:
                ret['result'] = None
                ret['comment'] = 'Interface {0} routes are set to be added.'.format(name)
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new)
                ret['result'] = None
                ret['comment'] = 'Interface {0} routes are set to be updated.'.format(name)
                ret['changes']['network_routes'] = ''.join(diff)
                return ret
        if not old and new:
            apply_routes = True
            ret['changes']['network_routes'] = 'Added interface {0} routes.'.format(name)
        elif old != new:
            diff = difflib.unified_diff(old, new)
            apply_routes = True
            ret['changes']['network_routes'] = ''.join(diff)
    except AttributeError as error:
        ret['result'] = False
        ret['comment'] = error.message
        return ret

    # Apply interface routes
    if apply_routes:
        try:
            __salt__['ip.apply_network_settings'](**kwargs)
        except AttributeError as error:
            ret['result'] = False
            ret['comment'] = error.message
            return ret

    return ret
Example #19
0
 def diff(self):
     texts = []
     for old_path, (new_path, old_l, new_l) in self.change_dct.items():
         if old_path:
             udiff = difflib.unified_diff(old_l, new_l)
         else:
             udiff = difflib.unified_diff(old_l, new_l, old_path, new_path)
         texts.append('\n'.join(udiff))
     return '\n'.join(texts)
def check_lexer(lx, fn):
    absfn = os.path.join(TESTDIR, 'examplefiles', fn)
    fp = open(absfn, 'rb')
    try:
        text = fp.read()
    finally:
        fp.close()
    text = text.replace(b'\r\n', b'\n')
    text = text.strip(b'\n') + b'\n'
    try:
        text = text.decode('utf-8')
        if text.startswith(u'\ufeff'):
            text = text[len(u'\ufeff'):]
    except UnicodeError:
        text = text.decode('latin1')
    ntext = []
    tokens = []
    import time
    t1 = time.time()
    for type, val in lx.get_tokens(text):
        ntext.append(val)
        assert type != Error, \
            'lexer %s generated error token for %s: %r at position %d' % \
            (lx, absfn, val, len(u''.join(ntext)))
        tokens.append((type, val))
    t2 = time.time()
    STATS[os.path.basename(absfn)] = (len(text),
                                      1000 * (t2 - t1), 1000 * (t2 - t1) / len(text))
    if u''.join(ntext) != text:
        print('\n'.join(difflib.unified_diff(u''.join(ntext).splitlines(),
                                             text.splitlines())))
        raise AssertionError('round trip failed for ' + absfn)

    # check output against previous run if enabled
    if STORE_OUTPUT:
        # no previous output -- store it
        outfn = os.path.join(TESTDIR, 'examplefiles', 'output', fn)
        if not os.path.isfile(outfn):
            fp = open(outfn, 'wb')
            try:
                pickle.dump(tokens, fp)
            finally:
                fp.close()
            return
        # otherwise load it and compare
        fp = open(outfn, 'rb')
        try:
            stored_tokens = pickle.load(fp)
        finally:
            fp.close()
        if stored_tokens != tokens:
            f1 = pprint.pformat(stored_tokens)
            f2 = pprint.pformat(tokens)
            print('\n'.join(difflib.unified_diff(f1.splitlines(),
                                                 f2.splitlines())))
            assert False, absfn
Example #21
0
def routes(name, **kwargs):
    """
    Manage network interface static routes.

    name
        Interface name to apply the route to.

    kwargs
        Named routes
    """
    ret = {"name": name, "changes": {}, "result": True, "comment": "Interface {0} routes are up to date.".format(name)}
    apply_routes = False
    if not "test" in kwargs:
        kwargs["test"] = __opts__.get("test", False)

    # Build interface routes
    try:
        old = __salt__["ip.get_routes"](name)
        new = __salt__["ip.build_routes"](name, **kwargs)
        if kwargs["test"]:
            if old == new:
                return ret
            if not old and new:
                ret["result"] = None
                ret["comment"] = "Interface {0} routes are set to be added.".format(name)
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new)
                ret["result"] = None
                ret["comment"] = "Interface {0} routes are set to be updated.".format(name)
                ret["changes"]["network_routes"] = "".join(diff)
                return ret
        if not old and new:
            apply_routes = True
            ret["comment"] = "Interface {0} routes added.".format(name)
            ret["changes"]["network_routes"] = "Added interface {0} routes.".format(name)
        elif old != new:
            diff = difflib.unified_diff(old, new)
            apply_routes = True
            ret["comment"] = "Interface {0} routes updated.".format(name)
            ret["changes"]["network_routes"] = "".join(diff)
    except AttributeError as error:
        ret["result"] = False
        ret["comment"] = error.message
        return ret

    # Apply interface routes
    if apply_routes:
        try:
            __salt__["ip.apply_network_settings"](**kwargs)
        except AttributeError as error:
            ret["result"] = False
            ret["comment"] = error.message
            return ret

    return ret
Example #22
0
def _get_debug_error_message(module, old_lines, new_lines):
    current_lines = split_lines(module.get_code(), keepends=True)
    current_diff = difflib.unified_diff(new_lines, current_lines)
    old_new_diff = difflib.unified_diff(old_lines, new_lines)
    import parso
    return (
        "There's an issue with the diff parser. Please "
        "report (parso v%s) - Old/New:\n%s\nActual Diff (May be empty):\n%s"
        % (parso.__version__, ''.join(old_new_diff), ''.join(current_diff))
    )
Example #23
0
def system(name, **kwargs):
    """
    Ensure that global network settings are configured properly.

    name
        Custom name to represent this configuration change.

    kwargs
        The global parameters for the system.

    """

    ret = {"name": name, "changes": {}, "result": True, "comment": "Global network settings are up to date."}
    apply_net_settings = False
    kwargs["test"] = __opts__["test"]
    # Build global network settings
    try:
        old = __salt__["ip.get_network_settings"]()
        new = __salt__["ip.build_network_settings"](**kwargs)
        if __opts__["test"]:
            if old == new:
                return ret
            if not old and new:
                ret["result"] = None
                ret["comment"] = "Global network settings are set to be added."
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new)
                ret["result"] = None
                ret["comment"] = "Global network settings are set to be updated."
                ret["changes"]["network_settings"] = "".join(diff)
                return ret
        if not old and new:
            apply_net_settings = True
            ret["changes"]["network_settings"] = "Added global network settings."
        elif old != new:
            diff = difflib.unified_diff(old, new)
            apply_net_settings = True
            ret["changes"]["network_settings"] = "".join(diff)
    except AttributeError as error:
        ret["result"] = False
        ret["comment"] = error.message
        return ret

    # Apply global network settings
    if apply_net_settings:
        try:
            __salt__["ip.apply_network_settings"](**kwargs)
        except AttributeError as error:
            ret["result"] = False
            ret["comment"] = error.message
            return ret

    return ret
Example #24
0
def main():
    print("Creating patches")
    mcp = os.path.normpath(sys.argv[1])
    fml_dir = os.path.normpath(sys.argv[2])
    patchd = os.path.normpath(os.path.join(fml_dir, 'patches'))
    base = os.path.normpath(os.path.join(mcp, 'src-base'))
    work = os.path.normpath(os.path.join(mcp, 'src-work'))
    
    for path, _, filelist in os.walk(work, followlinks=True):
        for cur_file in fnmatch.filter(filelist, '*.java'):
            file_base = os.path.normpath(os.path.join(base, path[len(work)+1:], cur_file)).replace(os.path.sep, '/')
            file_work = os.path.normpath(os.path.join(work, path[len(work)+1:], cur_file)).replace(os.path.sep, '/')
            
            fromlines = open(file_base, 'U').readlines()
            tolines = open(file_work, 'U').readlines()
            
            patch = ''.join(difflib.unified_diff(fromlines, tolines, '../' + file_base[len(mcp)+1:], '../' + file_work[len(mcp)+1:], '', '', n=3))
            patch_dir = os.path.join(patchd, path[len(work)+1:])
            patch_file = os.path.join(patch_dir, cur_file + '.patch')
            
            if len(patch) > 0:
                print patch_file[len(patchd)+1:]
                patch = patch.replace('\r\n', '\n')
                
                if not os.path.exists(patch_dir):
                    os.makedirs(patch_dir)
                with open(patch_file, 'wb') as fh:
                    fh.write(patch)
            else:
                if os.path.isfile(patch_file):
                    print("Deleting empty patch: %s"%(patch_file))
                    os.remove(patch_file)
                    

    cleanDirs(patchd)
    
    backup = os.path.join(mcp, 'runtime', 'commands.py.bck')
    runtime = os.path.join(mcp, 'runtime', 'commands.py')
    patch_file = os.path.join(fml_dir, 'commands.patch')
    
    if not os.path.exists(backup):
        shutil.copy(runtime, backup)
    
    patch = ''.join(difflib.unified_diff(open(backup, 'U').readlines(), open(runtime, 'U').readlines(), 'commands.py', 'commands.py', '', '', n=3))
    if len(patch) > 0:
        print 'Creating commands.py patch'
        patch = patch.replace('\r\n', '\n')
        
        with open(patch_file, 'wb') as fh:
            fh.write(patch)
    else:
        if os.path.isfile(patch_file):
            print("Deleting empty commands.py patch")
            os.remove(patch_file)
Example #25
0
	def getPrettyDiff(self):
		blobOld = Blob(self.commit.commit.repo, unhexlify(self.oldSha)).data_stream.read()
		blobNew = Blob(self.commit.commit.repo, unhexlify(self.newSha)).data_stream.read()
		diff = ''
		try:
			diffs = unified_diff(smart_unicode(blobOld).splitlines(), smart_unicode(blobNew).splitlines())
		except DjangoUnicodeDecodeError:
			diffs = unified_diff(str(blobOld).decode('latin1').splitlines(), str(blobNew).decode('latin1').splitlines())
		for line in diffs:
			diff += line + '\n'
		diff = diff.replace('\n\n', '\n')
		return diff
Example #26
0
def diff_freqs(a, b, output):
    a, b = edgews_normalize(a, b)
    a, b = sepws_normalize(a, b)
    ranks_a, ranks_b = [[l[:l.find(' ') or None] + '\n' for l in x] for x in [a, b]]
    freqs_a, freqs_b = [sorted(l[(l.find(' ') or 0)+1:] for l in x) if len(l) > 1 else [] for x in [a, b]]
    if not (ranks_a == ranks_b and freqs_a == freqs_b):
            output.write("Diff in sorted rank numbers:\n")
            output.writelines(difflib.unified_diff(ranks_a, ranks_b))
            output.write("Diff in sorted frequencies:\n")
            output.writelines(difflib.unified_diff(freqs_a, freqs_b))
            return False
    return True
Example #27
0
def present(name,
            config=None,
            **kwargs):
    '''
    Ensure the job is present in the Jenkins
    configured jobs

    name
        The unique name for the Jenkins job

    config
        The Salt URL for the file to use for
        configuring the job.
    '''

    ret = {'name': name,
           'result': True,
           'changes': {},
           'comment': ['Job {0} is up to date.'.format(name)]}

    _job_exists = __salt__['jenkins.job_exists'](name)

    if _job_exists:
        _current_job_config = __salt__['jenkins.get_job_config'](name)
        buf = six.moves.StringIO(_current_job_config)
        _current_job_config = buf.readlines()

        cached_source_path = __salt__['cp.cache_file'](config, __env__)
        with salt.utils.fopen(cached_source_path) as _fp:
            new_config_xml = _fp.readlines()

        if _current_job_config != new_config_xml:
            diff = difflib.unified_diff(_current_job_config, new_config_xml, lineterm='')
            __salt__['jenkins.update_job'](name, config, __env__)
            ret['changes'] = ''.join(diff)
            ret['comment'].append('Job {0} updated.'.format(name))

    else:
        cached_source_path = __salt__['cp.cache_file'](config, __env__)
        with salt.utils.fopen(cached_source_path) as _fp:
            new_config_xml = _fp.read()

        __salt__['jenkins.create_job'](name, config, __env__)

        buf = six.moves.StringIO(new_config_xml)
        _current_job_config = buf.readlines()

        diff = difflib.unified_diff('', buf, lineterm='')
        ret['changes'] = ''.join(diff)
        ret['comment'].append('Job {0} added.'.format(name))

    ret['comment'] = '\n'.join(ret['comment'])
    return ret
Example #28
0
def compareGeom(golden, outfile, headerDecoration):
    if not os.path.exists(golden):
        raise RuntimeError("Failed to find the orignal geometry '%s'" % golden)
    if not os.path.exists(outfile):
        raise RuntimeError("Failed to find the new geometry '%s'" % outfile)

    oldData = open(golden, 'r').readlines()
    newData = open(outfile, 'r').readlines()

    # do the "quick" check to see if anything has been changed
    diff = difflib.unified_diff(oldData, newData, golden, outfile, n=0)
    leftDiff = 0
    rightDiff = 0
    oldModified=None
    newModified=None
    for line in diff:
        if line.startswith('-') or line.startswith('+'):
            if not (line.startswith('---') or line.startswith('+++')):
                if 'last-modified' in line:
                    modified = getModified(line)
                    newline = line.replace(modified, "")
                    if line.startswith('-'):
                        oldModified = (modified, newline[1:])
                    else:
                        newModified = (modified, newline[1:])
                        if oldModified is None or oldModified[1] != newModified[1]:
                            leftDiff += 1
                            rightDiff += 1
                else:
                    if line.startswith('-'):
                        leftDiff += 1
                    if line.startswith('+'):
                        rightDiff += 1

    # log the modified dates according to the file
    if oldModified is not None:
        logging.info(golden+" "+oldModified[0])
    else:
        logging.info(golden+" oldModified is None")
    logging.info(outfile+" "+newModified[0])

    # only print diff if there is a meaningful one
    if (leftDiff+rightDiff) > 0:
        if headerDecoration:
            print "========================================"
        print "%d lines removed and %d lines added" % (leftDiff, rightDiff)
        # rerun and write out
        diff = difflib.unified_diff(oldData, newData, golden, outfile)
        sys.stdout.writelines(diff)
    else:
        print "%s and %s match" % (os.path.split(golden)[1], \
                                   os.path.split(outfile)[1])
def compareSnapshots(snapfile1, snapfile2):

    try:
        pkl_file = open(snapfile1, 'rb')
        dirs1 = pickle.load(pkl_file)
        files1 = pickle.load(pkl_file)
        pkl_file.close()

        pk2_file = open(snapfile2, 'rb')
        dirs2 = pickle.load(pk2_file)
        files2 = pickle.load(pk2_file)
        pk2_file.close()
    except:
        print "Problems encountered accessing snapshot files!"
        raw_input("\n\nPress [Enter] to continue...")
        return

    result_dirs = list(difflib.unified_diff(dirs1, dirs2))
    result_files = list(difflib.unified_diff(files1, files2))

    added_dirs = []
    removed_dirs = []
    added_files = []
    removed_files = []
    
    for result in result_files:
        if result.endswith("\n") == -1:
            if result.startswith('+'):
                resultadd = result.strip('+')
                added_files.append(resultadd)
            elif result.startswith('-'):
                resultsubtract = result.strip('-')
                removed_files.append(resultsubtract)

    for result in result_dirs:
        if result.endswith("\n") == -1:
            if result.startswith('+'):
                resultadd = result.strip('+')
                added_dirs.append(resultadd)
            elif result.startswith('-'):
                resultsubtract = result.strip('-')
                removed_dirs.append(resultsubtract)
                
    print "\n\nAdded Directories:\n"
    printList(added_dirs)
    print "\n\nAdded Files:\n"
    printList(added_files)
    print "\n\nRemoved Directories:\n"
    printList(removed_dirs)
    print "\n\nRemoved Files:\n"
    printList(removed_files)
    raw_input("\n\nPress [Enter] to continue...")
def compareGeom(golden, outfile):
    if not os.path.exists(golden):
        logging.warning(" Failed to find the original geometry " + golden + " - not comparing")
        return
    if not os.path.exists(outfile):
        logging.warning(" Failed to find the new geometry " + outfile)
        return

    oldData = open(golden).readlines()
    newData = open(outfile).readlines()

    # do the "quick" check to see if anything has been changed
    diff = difflib.unified_diff(oldData, newData, golden, outfile, n=0)

    leftDiff = 0
    rightDiff = 0
    oldModified = None
    oldDate = None
    newDate = None

    for line in diff:
        if line.startswith('-<instrument'):
            oldDate, oldModified = getModified(line)
        elif line.startswith('+<instrument'):
            newDate, newModified = getModified(line)
            if oldModified != newModified:
                leftDiff += 1
                rightDiff += 1
        else:
            if line.startswith('-') and not line.startswith('---'):
                leftDiff += 1
            if line.startswith('+') and not line.startswith('+++'):
                rightDiff += 1

    if oldDate is not None:
        logging.info(" Compare " + golden + " " + oldDate)
    else:
        logging.info(golden+" oldDate is None")
    if newDate is not None:
        logging.info(" With " + outfile + " " + newDate)
    else:
        logging.info(outfile+" newDate is None")

    # only print diff if there is a meaningful one
    if (leftDiff+rightDiff) > 0:
        logging.info(" ========================================")
        logging.info(" " + str(leftDiff) + " line(s) removed and " + str(rightDiff) + " line(s) added")
        # rerun and write out
        diff = difflib.unified_diff(oldData, newData, golden, outfile)
        sys.stdout.writelines(diff)
    else:
        logging.info(" " + os.path.split(golden)[1] + " and " + os.path.split(outfile)[1] + " match")
Example #31
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    parser = optparse.OptionParser()
    parser.add_option(
        '-d',
        '--diff',
        action='store_true',
        dest='diff',
        default=False,
        help=
        'create examples for the .in file and output a unified diff against the related .xml file'
    )
    parser.add_option(
        '-r',
        '--run',
        action='store_true',
        dest='run',
        default=False,
        help='create examples for the .in file, but do not change any files')
    parser.add_option(
        '-s',
        '--simple_diff',
        action='store_true',
        dest='simple',
        default=False,
        help='use a simpler output for the diff mode (no unified diff!)')
    parser.add_option(
        '-u',
        '--update',
        action='store_true',
        dest='update',
        default=False,
        help='create examples for the .in file and update the related .xml file'
    )

    opts, args = parser.parse_args(argv[1:])

    if opts.diff:
        import StringIO
        import difflib

        if not args:
            args = glob.glob('doc/user/*.in')
        for arg in sorted(args):
            diff = None
            s = StringIO.StringIO()
            process(arg, s)
            filename = arg[:-2] + 'xml'
            try:
                fxml = open(filename, 'r')
                xmlcontent = fxml.read()
                fxml.close()
                if opts.simple:
                    diff = list(
                        difflib.context_diff(xmlcontent.splitlines(),
                                             s.getvalue().splitlines(),
                                             fromfile=arg,
                                             tofile=filename))
                else:
                    diff = list(
                        difflib.unified_diff(xmlcontent.splitlines(),
                                             s.getvalue().splitlines(),
                                             fromfile=arg,
                                             tofile=filename,
                                             lineterm=''))
            except EnvironmentError, e:
                sys.stderr.write('%s: %s\n' % (filename, e))

            s.close()
            if diff:
                print "%s:" % arg
                print '\n'.join(diff)
def main():
    env_file, rest = sys.argv[1], sys.argv[2:]

    # Parse some argument flags.
    header_dir = None
    resource_dir = None
    input_file = None
    for i, arg in enumerate(rest):
        if arg == '-h' and len(rest) > i + 1:
            assert header_dir == None
            header_dir = rest[i + 1]
        elif arg == '-r' and len(rest) > i + 1:
            assert resource_dir == None
            resource_dir = rest[i + 1]
        elif arg.endswith('.mc') or arg.endswith('.man'):
            assert input_file == None
            input_file = arg

    # Copy checked-in outputs to final location.
    THIS_DIR = os.path.abspath(os.path.dirname(__file__))
    assert header_dir == resource_dir
    source = os.path.join(THIS_DIR, "..", "..", "third_party",
                          "win_build_output",
                          re.sub(r'^(?:[^/]+/)?gen/', 'mc/', header_dir))
    distutils.dir_util.copy_tree(source, header_dir, preserve_times=False)

    # On non-Windows, that's all we can do.
    if sys.platform != 'win32':
        return

    # On Windows, run mc.exe on the input and check that its outputs are
    # identical to the checked-in outputs.

    # Read the environment block from the file. This is stored in the format used
    # by CreateProcess. Drop last 2 NULs, one for list terminator, one for
    # trailing vs. separator.
    env_pairs = open(env_file).read()[:-2].split('\0')
    env_dict = dict([item.split('=', 1) for item in env_pairs])

    extension = os.path.splitext(input_file)[1]
    if extension in ['.man', '.mc']:
        # For .man files, mc's output changed significantly from Version 10.0.15063
        # to Version 10.0.16299.  We should always have the output of the current
        # default SDK checked in and compare to that. Early out if a different SDK
        # is active. This also happens with .mc files.
        # TODO(thakis): Check in new baselines and compare to 16299 instead once
        # we use the 2017 Fall Creator's Update by default.
        mc_help = subprocess.check_output(['mc.exe', '/?'],
                                          env=env_dict,
                                          stderr=subprocess.STDOUT,
                                          shell=True)
        version = re.search(r'Message Compiler\s+Version (\S+)',
                            mc_help).group(1)
        if version != '10.0.15063':
            return

    # mc writes to stderr, so this explicitly redirects to stdout and eats it.
    try:
        tmp_dir = tempfile.mkdtemp()
        delete_tmp_dir = True
        if header_dir:
            rest[rest.index('-h') + 1] = tmp_dir
            header_dir = tmp_dir
        if resource_dir:
            rest[rest.index('-r') + 1] = tmp_dir
            resource_dir = tmp_dir

        # This needs shell=True to search the path in env_dict for the mc
        # executable.
        subprocess.check_output(['mc.exe'] + rest,
                                env=env_dict,
                                stderr=subprocess.STDOUT,
                                shell=True)
        # We require all source code (in particular, the header generated here) to
        # be UTF-8. jinja can output the intermediate .mc file in UTF-8 or UTF-16LE.
        # However, mc.exe only supports Unicode via the -u flag, and it assumes when
        # that is specified that the input is UTF-16LE (and errors out on UTF-8
        # files, assuming they're ANSI). Even with -u specified and UTF16-LE input,
        # it generates an ANSI header, and includes broken versions of the message
        # text in the comment before the value. To work around this, for any invalid
        # // comment lines, we simply drop the line in the header after building it.
        # Also, mc.exe apparently doesn't always write #define lines in
        # deterministic order, so manually sort each block of #defines.
        if header_dir:
            header_file = os.path.join(
                header_dir,
                os.path.splitext(os.path.basename(input_file))[0] + '.h')
            header_contents = []
            with open(header_file, 'rb') as f:
                define_block = []  # The current contiguous block of #defines.
                for line in f.readlines():
                    if line.startswith('//') and '?' in line:
                        continue
                    if line.startswith('#define '):
                        define_block.append(line)
                        continue
                    # On the first non-#define line, emit the sorted preceding #define
                    # block.
                    header_contents += sorted(define_block,
                                              key=lambda s: s.split()[-1])
                    define_block = []
                    header_contents.append(line)
                # If the .h file ends with a #define block, flush the final block.
                header_contents += sorted(define_block,
                                          key=lambda s: s.split()[-1])
            with open(header_file, 'wb') as f:
                f.write(''.join(header_contents))

        # mc.exe invocation and post-processing are complete, now compare the output
        # in tmp_dir to the checked-in outputs.
        diff = filecmp.dircmp(tmp_dir, source)
        if diff.diff_files or set(diff.left_list) != set(diff.right_list):
            print 'mc.exe output different from files in %s, see %s' % (
                source, tmp_dir)
            diff.report()
            for f in diff.diff_files:
                if f.endswith('.bin'): continue
                fromfile = os.path.join(source, f)
                tofile = os.path.join(tmp_dir, f)
                print ''.join(
                    difflib.unified_diff(
                        open(fromfile, 'U').readlines(),
                        open(tofile, 'U').readlines(), fromfile, tofile))
            delete_tmp_dir = False
            sys.exit(1)
    except subprocess.CalledProcessError as e:
        print e.output
        sys.exit(e.returncode)
    finally:
        if os.path.exists(tmp_dir) and delete_tmp_dir:
            shutil.rmtree(tmp_dir)
Example #33
0
def testme(testDir, testObj, exeext):
    """Runs a single test, comparing output and RC to expected output and RC

    Raises an error if input can't be read, executable fails, or output/RC
    are not as expected. Error is caught by tester() and reported
    """
    # Get the exec names and arguments
    execprog = testObj['exec'] + exeext
    execargs = testObj['args']
    execrun = [execprog] + execargs

    # Read the input data (if there is any)
    stdinCfg = None
    inputData = None
    if "input" in testObj:
        filename = testDir + "/" + testObj['input']
        inputData = open(filename).read()
        stdinCfg = subprocess.PIPE

    # Read the expected output data (if there is any)
    outputFn = None
    outputData = None
    if "output_cmp" in testObj:
        outputFn = testObj['output_cmp']
        outputType = os.path.splitext(outputFn)[1][
            1:]  # output type from file extension (determines how to compare)
        try:
            outputData = open(testDir + "/" + outputFn).read()
        except:
            logging.error("Output file " + outputFn + " can not be opened")
            raise
        if not outputData:
            logging.error("Output data missing for " + outputFn)
            raise Exception

    # Run the test
    proc = subprocess.Popen(execrun,
                            stdin=stdinCfg,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE,
                            universal_newlines=True)
    try:
        outs = proc.communicate(input=inputData)
    except OSError:
        logging.error("OSError, Failed to execute " + execprog)
        raise

    if outputData:
        # Parse command output and expected output
        try:
            a_parsed = parse_output(outs[0], outputType)
        except Exception as e:
            logging.error('Error parsing command output as %s: %s' %
                          (outputType, e))
            raise
        try:
            b_parsed = parse_output(outputData, outputType)
        except Exception as e:
            logging.error('Error parsing expected output %s as %s: %s' %
                          (outputFn, outputType, e))
            raise
        # Compare data
        if a_parsed != b_parsed:
            data_mismatch_message = "Output data mismatch for " + outputFn + " (format " + outputType + ")"
            logging.error(data_mismatch_message)
            logging.info(outs[0])
            raise Exception
        # Compare formatting
        if outs[0] != outputData:
            formatting_mismatch_message = "Output formatting mismatch for " + outputFn + ":\n"
            formatting_mismatch_message += "".join(
                difflib.unified_diff(outputData.splitlines(True),
                                     outs[0].splitlines(True),
                                     fromfile=outputFn,
                                     tofile="returned"))
            logging.error(formatting_mismatch_message)
            raise Exception

    # Compare the return code to the expected return code
    wantRC = 0
    if "return_code" in testObj:
        wantRC = testObj['return_code']
    if proc.returncode != wantRC:
        logging.error("Return code mismatch for " + outputFn)
        raise Exception

    if "error_txt" in testObj:
        want_error = testObj["error_txt"]
        # Compare error text
        # TODO: ideally, we'd compare the strings exactly and also assert
        # That stderr is empty if no errors are expected. However, bitcoin-tx
        # emits DISPLAY errors when running as a windows application on
        # linux through wine. Just assert that the expected error text appears
        # somewhere in stderr
        if want_error not in outs[1]:
            logging.error("Error mismatch:\n" + "Expected: " + want_error +
                          "\nReceived: " + outs[1].rstrip())
            raise Exception
Example #34
0
        def review_config(self):
            diff_lines = ''
            current_rules = ''
            current_modified_since_prev_setup = False
            interactive = True
            if os.path.isfile(osetupcons.FileLocations.SYSCONFIG_IPTABLES):
                with open(osetupcons.FileLocations.SYSCONFIG_IPTABLES,
                          'r') as current:
                    current_rules = current.read().splitlines()
            if os.path.isfile(osetupcons.FileLocations.OVIRT_IPTABLES_EXAMPLE):
                with open(osetupcons.FileLocations.OVIRT_IPTABLES_EXAMPLE,
                          'r') as prev_setup_example:
                    prev_setup_rules = prev_setup_example.read().splitlines()
                    diff_prev_cur = difflib.unified_diff(
                        current_rules,
                        prev_setup_rules,
                        lineterm='',
                    )
                    diff_lines = '\n'.join(diff_prev_cur)
                    if len(diff_lines) > 0:
                        current_modified_since_prev_setup = True
            diff = difflib.unified_diff(
                current_rules,
                self._get_rules().splitlines(),
                lineterm='',
                fromfile=_('current'),
                tofile=_('proposed'),
            )
            diff_lines = '\n'.join(diff)
            if len(diff_lines) > 0:
                if current_modified_since_prev_setup:
                    self.logger.warning(
                        _("It seams that previously generated iptables "
                          "configuration was manually edited,\n"
                          "please carefully review the proposed "
                          "configuration"))
                if self.environment[
                        osetupcons.ConfigEnv.FIREWALL_CHANGES_REVIEW] is None:
                    self.environment[
                        osetupcons.ConfigEnv.
                        FIREWALL_CHANGES_REVIEW] = dialog.queryBoolean(
                            dialog=self.plugin.dialog,
                            name='OVESETUP_REVIEW_IPTABLES_CHANGES',
                            note=_('Generated iptables rules are different '
                                   'from current ones.\n'
                                   'Do you want to review them? '
                                   '(@VALUES@) [@DEFAULT@]: '),
                            prompt=True,
                            true=_('Yes'),
                            false=_('No'),
                            default=False,
                        )
                else:
                    interactive = False

                if self.environment[osetupcons.ConfigEnv.
                                    FIREWALL_CHANGES_REVIEW] and interactive:
                    confirmed = dialog.queryBoolean(
                        dialog=self.plugin.dialog,
                        name='OVESETUP_CONFIRM_IPTABLES_CHANGES',
                        note=_('Please review the changes:\n\n'
                               '{diff}\n\n'
                               'Do you want to proceed with firewall '
                               'configuration? '
                               '(@VALUES@) [@DEFAULT@]: ').format(
                                   diff=diff_lines),
                        prompt=True,
                        true=_('Yes'),
                        false=_('No'),
                        default=True,
                    )
                    if not confirmed:
                        raise RuntimeError(
                            _('iptables proposed configuration '
                              'was rejected by user'))
    def _notify_trunk_changes(self, changeset_attribs, branch_attribs):
        """Email owner and/or access-list users on changes to trunk."""

        # Notify only if users' email addresses can be determined
        conf = ResourceLocator.default().get_conf()
        user_tool_name = conf.get_value(["rosa-svn", "user-tool"])
        if not user_tool_name:
            return
        notify_who_str = conf.get_value(
            ["rosa-svn", "notify-who-on-trunk-commit"], "")
        if not notify_who_str.strip():
            return
        notify_who = shlex.split(notify_who_str)

        # Build the message text
        info_file_path = "%s/trunk/%s" % ("/".join(
            branch_attribs["sid"]), self.INFO_FILE)
        text = ""
        for changed_line in branch_attribs["changed_lines"]:
            text += changed_line
            # For suite info file change, add diff as well
            if (changed_line[4:].strip() == info_file_path and
                    branch_attribs["status_info_file"] == self.ST_MODIFIED):
                old_strio = StringIO()
                metomi.rose.config.dump(branch_attribs["old_info"], old_strio)
                new_strio = StringIO()
                metomi.rose.config.dump(branch_attribs["info"], new_strio)
                for diff_line in unified_diff(
                        old_strio.getvalue().splitlines(True),
                        new_strio.getvalue().splitlines(True),
                        "@%d" % (int(changeset_attribs["revision"]) - 1),
                        "@%d" % (int(changeset_attribs["revision"]))):
                    text += " " * 4 + diff_line

        # Determine who to notify
        users = set()
        for key in ["old_info", "info"]:
            if branch_attribs[key] is not None:
                info_conf = branch_attribs[key]
                if "owner" in notify_who:
                    users.add(info_conf.get_value(["owner"]))
                if "access-list" in notify_who:
                    users.update(
                        info_conf.get_value(["access-list"], "").split())
        users.discard("*")

        # Determine email addresses
        user_tool = self.usertools_manager.get_handler(user_tool_name)
        if "author" in notify_who:
            users.add(changeset_attribs["author"])
        else:
            users.discard(changeset_attribs["author"])
        emails = sorted(user_tool.get_emails(users))
        if not emails:
            return

        # Send notification
        msg = MIMEText(text)
        msg.set_charset("utf-8")
        msg["From"] = conf.get_value(["rosa-svn", "notification-from"],
                                     "notications@" + socket.getfqdn())
        msg["To"] = ", ".join(emails)
        msg["Subject"] = "%s-%s/trunk@%d" % (
            changeset_attribs["prefix"], branch_attribs["sid"],
            int(changeset_attribs["revision"]))
        smtp_host = conf.get_value(["rosa-svn", "smtp-host"],
                                   default="localhost")
        smtp = SMTP(smtp_host)
        smtp.sendmail(msg["From"], emails, msg.as_string())
        smtp.quit()
Example #36
0
def diff_not_equal(path, want, have):
  want_lines, have_lines = want.splitlines(), have.splitlines()
  diff = "\n".join(difflib.unified_diff(want_lines, have_lines, fromfile=path, tofile="running", lineterm=""))
  return Difference("Diff:\n%s", path, diff)
Example #37
0
def __changeset(request, repo, rev):
    """
    display the file history
    ``name``
        the name of the repository
    ``rev``
        the revision in the changesets history
    """
    ctx = repo.get_context().repository[rev]
    to_rev = rev
    files = []
    for f in ctx.files():
        mimetype = mimetypes.guess_type(f)
        try:
            fc = ctx[f]
            if mimetype[0] is not None:
                if mimetype[0] not in BINARY_MIMETYPES:
                    data1 = fc.data().split('\n')

                    from_rev = int(rev) - int(fc.filerev())
                    ctx2 = repo.get_context().repository[from_rev]
                    data2 = ctx2.filectx(f).data().split('\n')

                    diff = difflib.unified_diff(data1,
                                                data2,
                                                fromfile=f + '@' +
                                                str(from_rev),
                                                tofile=f + '@' + str(to_rev))
                else:
                    diff = None
            else:
                diff = None

            files.append({
                'name': f,
                'size': len(fc.data()),
                'mimetype': mimetype[0],
                'diff': diff
            })
        except:
            files.append({'name': f, 'size': -1})

    if 'HTTP_REFERER' in request.META:
        referer = request.META['HTTP_REFERER']
    else:
        referer = reverse('hg-repo-action',
                          kwargs={
                              'name': repo.name,
                              'action': 'changesets'
                          })

    return render_to_response('django_hg/changeset.html', {
        'ctx': {
            "user": ctx.user(),
            "description": ctx.description(),
            "time": datetime.fromtimestamp(ctx.date()[0]).time(),
            "date": date.fromtimestamp(ctx.date()[0]),
            "hash": ctx,
            "rev": rev,
            "files_count": len(ctx.files())
        },
        'files': files,
        'repo': repo,
        'referer': referer,
        'rev': rev,
        'groups': _get_left_side_cont(),
    },
                              context_instance=RequestContext(request))
Example #38
0
def diff(source_data, target_data, source_channel, target_channel):
    return list(unified_diff(source_data, target_data, source_channel, target_channel))
Example #39
0
def update_copyright(fn_path, year, pretend=False):
    """
	Check file for a Copyright statement, and update its year.  The
	patterns used for replacing copyrights are taken from echangelog.
	Only the first lines of each file that start with a hash ('#') are
	considered, until a line is found that doesn't start with a hash.
	Files are read and written in binary mode, so that this function
	will work correctly with files encoded in any character set, as
	long as the copyright statements consist of plain ASCII.

	@param fn_path: file path
	@type str
	@param year: current year
	@type str
	@param pretend: pretend mode
	@type bool
	@rtype: bool
	@return: True if copyright update was needed, False otherwise
	"""

    try:
        fn_hdl = io.open(_unicode_encode(fn_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='rb')
    except EnvironmentError:
        return

    orig_header = []
    new_header = []

    for line in fn_hdl:
        line_strip = line.strip()
        orig_header.append(line)
        if not line_strip or line_strip[:1] != b'#':
            new_header.append(line)
            break

        line = update_copyright_year(year, line)
        new_header.append(line)

    difflines = 0
    for diffline in difflib.unified_diff(
        [_unicode_decode(diffline) for diffline in orig_header],
        [_unicode_decode(diffline) for diffline in new_header],
            fromfile=fn_path,
            tofile=fn_path,
            n=0):
        util.writemsg_stdout(diffline, noiselevel=-1)
        difflines += 1
    util.writemsg_stdout("\n", noiselevel=-1)

    # unified diff has three lines to start with
    if difflines > 3 and not pretend:
        # write new file with changed header
        f, fnnew_path = mkstemp()
        f = io.open(f, mode='wb')
        for line in new_header:
            f.write(line)
        for line in fn_hdl:
            f.write(line)
        f.close()
        try:
            fn_stat = os.stat(fn_path)
        except OSError:
            fn_stat = None

        shutil.move(fnnew_path, fn_path)

        if fn_stat is None:
            util.apply_permissions(fn_path, mode=0o644)
        else:
            util.apply_stat_permissions(fn_path, fn_stat)
    fn_hdl.close()
    return difflines > 3
Example #40
0
def system(name, **kwargs):
    """
    Ensure that global network settings are configured properly.

    name
        Custom name to represent this configuration change.

    kwargs
        The global parameters for the system.

    """
    ret = {
        "name": name,
        "changes": {},
        "result": True,
        "comment": "Global network settings are up to date.",
    }
    apply_net_settings = False
    kwargs["test"] = __opts__["test"]
    # Build global network settings
    try:
        old = __salt__["ip.get_network_settings"]()
        new = __salt__["ip.build_network_settings"](**kwargs)
        if __opts__["test"]:
            if old == new:
                return ret
            if not old and new:
                ret["result"] = None
                ret["comment"] = "Global network settings are set to be added."
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new, lineterm="")
                ret["result"] = None
                ret["comment"] = (
                    "Global network settings are set to be "
                    "updated:\n{}".format("\n".join(diff))
                )
                return ret
        if not old and new:
            apply_net_settings = True
            ret["changes"]["network_settings"] = "Added global network settings."
        elif old != new:
            diff = difflib.unified_diff(old, new, lineterm="")
            apply_net_settings = True
            ret["changes"]["network_settings"] = "\n".join(diff)
    except AttributeError as error:
        ret["result"] = False
        ret["comment"] = str(error)
        return ret
    except KeyError as error:
        ret["result"] = False
        ret["comment"] = str(error)
        return ret

    # Apply global network settings
    if apply_net_settings:
        try:
            __salt__["ip.apply_network_settings"](**kwargs)
        except AttributeError as error:
            ret["result"] = False
            ret["comment"] = str(error)
            return ret

    return ret
Example #41
0
def routes(name, **kwargs):
    """
    Manage network interface static routes.

    name
        Interface name to apply the route to.

    kwargs
        Named routes
    """
    ret = {
        "name": name,
        "changes": {},
        "result": True,
        "comment": "Interface {} routes are up to date.".format(name),
    }
    apply_routes = False
    if "test" not in kwargs:
        kwargs["test"] = __opts__.get("test", False)

    # Build interface routes
    try:
        old = __salt__["ip.get_routes"](name)
        new = __salt__["ip.build_routes"](name, **kwargs)
        if kwargs["test"]:
            if old == new:
                return ret
            if not old and new:
                ret["result"] = None
                ret["comment"] = "Interface {} routes are set to be added.".format(name)
                return ret
            elif old != new:
                diff = difflib.unified_diff(old, new, lineterm="")
                ret["result"] = None
                ret["comment"] = (
                    "Interface {} routes are set to be "
                    "updated:\n{}".format(name, "\n".join(diff))
                )
                return ret
        if not old and new:
            apply_routes = True
            ret["comment"] = "Interface {} routes added.".format(name)
            ret["changes"]["network_routes"] = "Added interface {} routes.".format(name)
        elif old != new:
            diff = difflib.unified_diff(old, new, lineterm="")
            apply_routes = True
            ret["comment"] = "Interface {} routes updated.".format(name)
            ret["changes"]["network_routes"] = "\n".join(diff)
    except AttributeError as error:
        ret["result"] = False
        ret["comment"] = str(error)
        return ret

    # Apply interface routes
    if apply_routes:
        try:
            __salt__["ip.apply_network_settings"](**kwargs)
        except AttributeError as error:
            ret["result"] = False
            ret["comment"] = str(error)
            return ret

    return ret
Example #42
0
def test_build(
    compiled,
    format,
    available_targets,
    generate_reference,
    update_reference,
    archive_differences,
):
    name = compiled
    scrub = SCRUBBERS[format]
    output_pattern = OUTPUT_PATTERN[format]
    assert_equals = ASSERT_EQUALS.get(format, None)

    encoding = "utf8"
    if format == "html" and name.startswith("html-encoding-"):
        encoding = re.match("^html-encoding-(.*)$", name).group(1)

    os.chdir(os.path.join(basedir, name))
    assert run(["make", format])

    if generate_reference:  # pragma: no cover
        generate_reference_data(output_pattern)

    whole_diff_output = []
    for coverage_file, reference_file in find_reference_files(output_pattern):
        with io.open(coverage_file, encoding=encoding) as f:
            coverage = scrub(f.read())
        with io.open(reference_file, encoding=encoding) as f:
            reference = scrub(f.read())

        try:
            if assert_equals is not None:
                assert_equals(reference, coverage)
            else:
                diff_out = list(
                    difflib.unified_diff(
                        reference.splitlines(keepends=True),
                        coverage.splitlines(keepends=True),
                        fromfile=reference_file,
                        tofile=coverage_file,
                    ))
                diff_is_empty = len(diff_out) == 0
                assert diff_is_empty, "".join(diff_out)
        except Exception as e:  # pragma: no cover
            whole_diff_output += "  " + str(e) + "\n"
            if update_reference:
                reference_file = update_reference_data(coverage_file,
                                                       reference_file)
            if archive_differences:
                archive_difference_data(name, coverage_file, reference_file)

        if generate_reference or update_reference:  # pragma: no cover
            remove_duplicate_data(encoding, scrub, coverage, coverage_file,
                                  reference_file)

    diff_is_empty = len(whole_diff_output) == 0
    assert diff_is_empty, "Diff output:\n" + "".join(whole_diff_output)

    # some tests require additional cleanup after each test
    if "clean-each" in available_targets:  # pragma: no cover
        assert run(["make", "clean-each"])

    os.chdir(basedir)
def main(argv):
    parser = argparse.ArgumentParser(description='Generate source code for this repository')
    parser.add_argument('registry', metavar='REGISTRY_PATH', help='path to the Vulkan-Headers registry directory')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('-i', '--incremental', action='store_true', help='only update repo files that change')
    group.add_argument('-v', '--verify', action='store_true', help='verify repo files match generator output')
    args = parser.parse_args(argv)

    gen_cmds = [*[[common_codegen.repo_relative('scripts/lvl_genvk.py'),
                   '-registry', os.path.abspath(os.path.join(args.registry,  'vk.xml')),
                   '-quiet',
                   filename] for filename in ["chassis.cpp",
                                              "chassis.h",
                                              "chassis_dispatch_helper.h",
                                              "layer_chassis_dispatch.cpp",
                                              "layer_chassis_dispatch.h",
                                              "object_tracker.cpp",
                                              "object_tracker.h",
                                              "parameter_validation.cpp",
                                              "parameter_validation.h",
                                              "synchronization_validation_types.cpp",
                                              "synchronization_validation_types.h",
                                              "thread_safety.cpp",
                                              "thread_safety.h",
                                              "vk_dispatch_table_helper.h",
                                              "vk_enum_string_helper.h",
                                              "vk_extension_helper.h",
                                              "vk_layer_dispatch_table.h",
                                              "vk_object_types.h",
                                              "vk_safe_struct.cpp",
                                              "vk_safe_struct.h",
                                              "lvt_function_pointers.cpp",
                                              "lvt_function_pointers.h",
                                              "vk_typemap_helper.h",
                                              "best_practices.h",
                                              "best_practices.cpp",
                                              "spirv_validation_helper.cpp",
                                              "corechecks_optick_instrumentation.cpp",
                                              "corechecks_optick_instrumentation.h",
                                              "command_counter_helper.cpp",
                                              "command_counter_helper.h"]],
                [common_codegen.repo_relative('scripts/vk_validation_stats.py'),
                 os.path.abspath(os.path.join(args.registry, 'validusage.json')),
                 '-export_header'],
                [common_codegen.repo_relative('scripts/external_revision_generator.py'),
                 '--json_file', common_codegen.repo_relative('scripts/known_good.json'),
                 '--json_keys', 'repos,0,commit',
                 '-s', 'SPIRV_TOOLS_COMMIT_ID',
                 '-o', 'spirv_tools_commit_id.h']]

    repo_dir = common_codegen.repo_relative('layers/generated')

    # get directory where generators will run
    if args.verify or args.incremental:
        # generate in temp directory so we can compare or copy later
        temp_obj = tempfile.TemporaryDirectory(prefix='VulkanVL_generated_source_')
        temp_dir = temp_obj.name
        gen_dir = temp_dir
    else:
        # generate directly in the repo
        gen_dir = repo_dir

    # run each code generator
    for cmd in gen_cmds:
        print(' '.join(cmd))
        try:
            subprocess.check_call([sys.executable] + cmd, cwd=gen_dir)
        except Exception as e:
            print('ERROR:', str(e))
            return 1

    # optional post-generation steps
    if args.verify:
        # compare contents of temp dir and repo
        temp_files = set(os.listdir(temp_dir))
        repo_files = set(os.listdir(repo_dir))
        files_match = True
        for filename in sorted((temp_files | repo_files) - set(verify_exclude)):
            temp_filename = os.path.join(temp_dir, filename)
            repo_filename = os.path.join(repo_dir, filename)
            if filename not in repo_files:
                print('ERROR: Missing repo file', filename)
                files_match = False
            elif filename not in temp_files:
                print('ERROR: Missing generator for', filename)
                files_match = False
            elif not filecmp.cmp(temp_filename, repo_filename, shallow=False):
                print('ERROR: Repo files do not match generator output for', filename)
                files_match = False
                # print line diff on file mismatch
                with open(temp_filename) as temp_file, open(repo_filename) as repo_file:
                    print(''.join(difflib.unified_diff(temp_file.readlines(),
                                                       repo_file.readlines(),
                                                       fromfile='temp/' + filename,
                                                       tofile=  'repo/' + filename)))

        # return code for test scripts
        if files_match:
            print('SUCCESS: Repo files match generator output')
            return 0
        return 1

    elif args.incremental:
        # copy missing or differing files from temp directory to repo
        for filename in os.listdir(temp_dir):
            temp_filename = os.path.join(temp_dir, filename)
            repo_filename = os.path.join(repo_dir, filename)
            if not os.path.exists(repo_filename) or \
               not filecmp.cmp(temp_filename, repo_filename, shallow=False):
                print('update', repo_filename)
                shutil.copyfile(temp_filename, repo_filename)

    return 0
Example #44
0
File: main.py Project: kpj/jupyfmt
def format_file(
    notebook_path: PathLike,
    mode: black.FileMode,
    check: bool,
    diff: bool,
    compact_diff: bool,
    assert_consistent_execution: bool,
):
    with open(notebook_path) as fd:
        nb = nbf.read(fd, as_version=4)

    cells_errored = 0
    cells_changed = 0
    cells_unchanged = 0

    current_execution_count = 1
    for i, cell in enumerate(nb['cells']):
        if cell['cell_type'] != 'code':
            continue

        # check execution consistency
        if (
            assert_consistent_execution
            and cell['execution_count'] != current_execution_count
        ):
            if check:
                cells_errored += 1
                continue
            else:
                raise RuntimeError(
                    f'[{notebook_path}] Cell {i} has inconsistent execution count'
                )
        current_execution_count += 1

        # format code
        orig_source = cell['source']

        # check whether we should really process cell
        if any(orig_source.startswith(f'%%{magic}') for magic in SKIPPABLE_MAGIC_CODES):
            continue

        # black expects empty line at end of non-empty file
        # for notebook cells, this does not make sense
        if len(orig_source) > 0:
            orig_source += '\n'

        # Jupyter cell magic can mess up black
        # TODO: this is a bad hack
        orig_source = re.sub('^%', '#%#jupylint#', orig_source, flags=re.M)
        orig_source = re.sub('^!', '#!#jupylint#', orig_source, flags=re.M)

        try:
            fmted_source = black.format_str(orig_source, mode=mode)
        except black.InvalidInput as e:
            if check:
                print(f'[{notebook_path}] Error while formatting cell {i}: {e}')
                cells_errored += 1
                continue
            else:
                raise RuntimeError(
                    f'[{notebook_path}] Error while formatting cell {i}: {e}'
                )

        if orig_source != fmted_source:
            fmted_source = re.sub('^#%#jupylint#', '%', fmted_source, flags=re.M)
            fmted_source = re.sub('^#!#jupylint#', '!', fmted_source, flags=re.M)

            if compact_diff:
                diff_result = difflib.unified_diff(
                    orig_source.splitlines(keepends=True),
                    fmted_source.splitlines(keepends=True),
                    fromfile=f'{notebook_path} - Cell {i} (original)',
                    tofile=f'{notebook_path} - Cell {i} (formatted)',
                )
            elif diff:
                diff_result = difflib.ndiff(
                    orig_source.splitlines(keepends=True),
                    fmted_source.splitlines(keepends=True),
                )

            if compact_diff or diff:
                diff_str = ''.join(diff_result)
                print(diff_str)

            if fmted_source.endswith('\n'):
                # remove dummy newline we added earlier
                fmted_source = fmted_source[:-1]
            fmted_cell = nbf.v4.new_code_cell(fmted_source)
            nb['cells'][i] = fmted_cell

            cells_changed += 1
        else:
            cells_unchanged += 1

    if cells_errored == 0 and not check and not compact_diff and not diff:
        with open(notebook_path, 'w') as fd:
            nbf.write(nb, fd)

    if check:
        if not diff and not compact_diff:
            print(notebook_path)

        if cells_errored > 0:
            print(f'{cells_errored} cell(s) raised parsing errors 🤕')
        if cells_changed > 0:
            print(f'{cells_changed} cell(s) would be changed 😬')
        if cells_unchanged > 0:
            print(f'{cells_unchanged} cell(s) would be left unchanged 🎉')
        print()

    return cells_errored, cells_changed, cells_unchanged
Example #45
0
def _AssertGolden(expected_lines, actual_lines):
    expected = list(expected_lines)
    actual = list(l + '\n' for l in actual_lines)
    assert actual == expected, (
        'Did not match .golden.\n' +
        ''.join(difflib.unified_diff(expected, actual, 'expected', 'actual')))
Example #46
0
    def __svnDiff(self, path, content, modified):
        """Performs diff.

        The given content vs. the current version in repository
        """
        # Get the SVN content first
        client = self.getSVNClient(self.getSettings())

        try:
            localStatus = self.getLocalStatus(path)
            localRevisionNumber = getLocalRevisionNumber(client, path)
            reposRevisionNumber = getReposRevisionNumber(client, path)

            if reposRevisionNumber is None:
                logging.info("The path " + path +
                             " does not exist in repository")
                return

            localAtLeft = False
            if localStatus == IND_UPTODATE:
                if localRevisionNumber < reposRevisionNumber:
                    localAtLeft = True

            repositoryVersion = client.cat(path)

            # Calculate difference
            if localAtLeft:
                diff = difflib.unified_diff(content.splitlines(),
                                            repositoryVersion.splitlines(),
                                            n=5)
            else:
                diff = difflib.unified_diff(repositoryVersion.splitlines(),
                                            content.splitlines(),
                                            n=5)
            nodiffMessage = path + " has no difference to the " \
                                   "repository at revision HEAD"
            if modified:
                nodiffMessage = "Editing buffer with " + nodiffMessage
            if diff is None:
                logging.info(nodiffMessage)
                return

            # There are changes, so replace the text and tell about the changes
            diffAsText = '\n'.join(list(diff))
            if diffAsText.strip() == '':
                logging.info(nodiffMessage)
                return

            if modified:
                localSpec = "editing buffer with " + \
                            os.path.basename(path) + " based on rev." + \
                            str(localRevisionNumber)
            else:
                localSpec = "local " + os.path.basename(path) + \
                            " (rev." + str(localRevisionNumber) + ")"
            reposSpec = "repository at revision HEAD (rev." + \
                        str(reposRevisionNumber) + ")"
            if localAtLeft:
                diffAsText = diffAsText.replace("--- ", "--- " + localSpec, 1)
                diffAsText = diffAsText.replace("+++ ", "+++ " + reposSpec, 1)
            else:
                diffAsText = diffAsText.replace("+++ ", "+++ " + localSpec, 1)
                diffAsText = diffAsText.replace("--- ", "--- " + reposSpec, 1)
        except Exception as excpt:
            logging.error(str(excpt))
            return

        self.ide.mainWindow.showDiff(diffAsText, "SVN diff for " + path)
Example #47
0
 def show_diff(self, location: Tuple[str, int] = None) -> List[str]:
     new_lines = self.read_file(self.filename)
     old_filename = self.options.get('diff')
     old_lines = self.read_file(old_filename)
     diff = unified_diff(old_lines, new_lines, old_filename, self.filename)
     return list(diff)
Example #48
0
def print_diff(store1, store2):
    store1_lines = bytes(store1).decode(store1.encoding).split('\n')
    store2_lines = bytes(store2).decode(store2.encoding).split('\n')
    for line in difflib.unified_diff(store1_lines, store2_lines):
        print(line)
Example #49
0
def managed(name, enabled=True, **kwargs):
    """
    Ensure that the named interface is configured properly.

    name
        The name of the interface to manage

    type : eth
        Type of interface and configuration

        .. versionchanged:: 3002

    enabled
        Designates the state of this interface.
    """
    # For this function we are purposefully overwriting a bif
    # to enhance the user experience. This does not look like
    # it will cause a problem. Just giving a heads up in case
    # it does create a problem.
    ret = {
        "name": name,
        "changes": {},
        "result": True,
        "comment": "Interface {} is up to date.".format(name),
    }
    if "test" not in kwargs:
        kwargs["test"] = __opts__.get("test", False)

    # set ranged status
    apply_ranged_setting = False

    # Pull interface type out of kwargs
    iface_type = str(kwargs.pop("type", "eth"))

    if "addr" in kwargs:
        hwaddr = kwargs.pop("addr")
        msg = "'addr' is not a valid argument name, "
        if "hwaddr" not in kwargs:
            msg += "its value has been assigned to 'hwaddr' instead."
            kwargs["hwaddr"] = hwaddr
        else:
            msg += "it has been ignored in favor of 'hwaddr'."
        msg += " Update your SLS file to get rid of this warning."
        ret.setdefault("warnings", []).append(msg)

    # Build interface
    try:
        old = __salt__["ip.get_interface"](name)
        new = __salt__["ip.build_interface"](name, iface_type, enabled, **kwargs)
        if kwargs["test"]:
            if old == new:
                pass
            if not old and new:
                ret["result"] = None
                ret["comment"] = "Interface {} is set to be added.".format(name)
            elif old != new:
                diff = difflib.unified_diff(old, new, lineterm="")
                ret["result"] = None
                ret["comment"] = "Interface {} is set to be updated:\n{}".format(
                    name, "\n".join(diff)
                )
        else:
            if not old and new:
                ret["comment"] = "Interface {} added.".format(name)
                ret["changes"]["interface"] = "Added network interface."
                apply_ranged_setting = True
            elif old != new:
                diff = difflib.unified_diff(old, new, lineterm="")
                ret["comment"] = "Interface {} updated.".format(name)
                ret["changes"]["interface"] = "\n".join(diff)
                apply_ranged_setting = True
    except AttributeError as error:
        ret["result"] = False
        ret["comment"] = str(error)
        return ret

    # Debian based system can have a type of source
    # in the interfaces file, we don't ifup or ifdown it
    if iface_type == "source":
        return ret

    # Setup up bond modprobe script if required
    if iface_type == "bond" and "ip.get_bond" in __salt__:
        try:
            old = __salt__["ip.get_bond"](name)
            new = __salt__["ip.build_bond"](name, **kwargs)
            if kwargs["test"]:
                if not old and new:
                    ret["result"] = None
                    ret["comment"] = "Bond interface {} is set to be added.".format(
                        name
                    )
                elif old != new:
                    diff = difflib.unified_diff(old, new, lineterm="")
                    ret["result"] = None
                    ret["comment"] = (
                        "Bond interface {} is set to be "
                        "updated:\n{}".format(name, "\n".join(diff))
                    )
            else:
                if not old and new:
                    ret["comment"] = "Bond interface {} added.".format(name)
                    ret["changes"]["bond"] = "Added bond {}.".format(name)
                    apply_ranged_setting = True
                elif old != new:
                    diff = difflib.unified_diff(old, new, lineterm="")
                    ret["comment"] = "Bond interface {} updated.".format(name)
                    ret["changes"]["bond"] = "\n".join(diff)
                    apply_ranged_setting = True
        except AttributeError as error:
            # TODO Add a way of reversing the interface changes.
            ret["result"] = False
            ret["comment"] = str(error)
            return ret

    if kwargs["test"]:
        return ret

    # For Redhat/Centos ranged network
    if "range" in name:
        if apply_ranged_setting:
            try:
                ret["result"] = __salt__["service.restart"]("network")
                ret["comment"] = "network restarted for change of ranged interfaces"
                return ret
            except Exception as error:  # pylint: disable=broad-except
                ret["result"] = False
                ret["comment"] = str(error)
                return ret
        ret["result"] = True
        ret["comment"] = "no change, passing it"
        return ret

    # Bring up/shutdown interface
    try:
        # Get Interface current status
        interfaces = salt.utils.network.interfaces()
        interface_status = False
        if name in interfaces:
            interface_status = interfaces[name].get("up")
        else:
            for iface in interfaces:
                if "secondary" in interfaces[iface]:
                    for second in interfaces[iface]["secondary"]:
                        if second.get("label", "") == name:
                            interface_status = True
                if iface == "lo":
                    if "inet" in interfaces[iface]:
                        inet_data = interfaces[iface]["inet"]
                        if len(inet_data) > 1:
                            for data in inet_data:
                                if data.get("label", "") == name:
                                    interface_status = True
                    if "inet6" in interfaces[iface]:
                        inet6_data = interfaces[iface]["inet6"]
                        if len(inet6_data) > 1:
                            for data in inet6_data:
                                if data.get("label", "") == name:
                                    interface_status = True
        if enabled:
            if "noifupdown" not in kwargs:
                if interface_status:
                    if ret["changes"]:
                        # Interface should restart to validate if it's up
                        __salt__["ip.down"](name, iface_type)
                        __salt__["ip.up"](name, iface_type)
                        ret["changes"][
                            "status"
                        ] = "Interface {} restart to validate".format(name)
                else:
                    __salt__["ip.up"](name, iface_type)
                    ret["changes"]["status"] = "Interface {} is up".format(name)
        else:
            if "noifupdown" not in kwargs:
                if interface_status:
                    __salt__["ip.down"](name, iface_type)
                    ret["changes"]["status"] = "Interface {} down".format(name)
    except Exception as error:  # pylint: disable=broad-except
        ret["result"] = False
        ret["comment"] = str(error)
        return ret

    # Try to enslave bonding interfaces after master was created
    if iface_type == "bond" and "noifupdown" not in kwargs:

        if "slaves" in kwargs and kwargs["slaves"]:
            # Check that there are new slaves for this master
            present_slaves = __salt__["cmd.run"](
                ["cat", "/sys/class/net/{}/bonding/slaves".format(name)]
            ).split()
            desired_slaves = kwargs["slaves"].split()
            missing_slaves = set(desired_slaves) - set(present_slaves)

            # Enslave only slaves missing in master
            if missing_slaves:
                ifenslave_path = __salt__["cmd.run"](["which", "ifenslave"]).strip()
                if ifenslave_path:
                    log.info(
                        "Adding slaves '%s' to the master %s",
                        " ".join(missing_slaves),
                        name,
                    )
                    cmd = [ifenslave_path, name] + list(missing_slaves)
                    __salt__["cmd.run"](cmd, python_shell=False)
                else:
                    log.error("Command 'ifenslave' not found")
                ret["changes"]["enslave"] = "Added slaves '{}' to master '{}'".format(
                    " ".join(missing_slaves), name
                )
            else:
                log.info(
                    "All slaves '%s' are already added to the master %s"
                    ", no actions required",
                    " ".join(missing_slaves),
                    name,
                )

    if enabled and interface_status:
        # Interface was restarted, return
        return ret

    # Make sure that the network grains reflect any changes made here
    __salt__["saltutil.refresh_grains"]()
    return ret
Example #50
0
def main(argv=sys.argv[1:]):
    config_file = os.path.join(os.path.dirname(__file__), 'configuration',
                               'ament_code_style.cfg')

    extensions = ['c', 'cc', 'cpp', 'cxx', 'h', 'hh', 'hpp', 'hxx']

    parser = argparse.ArgumentParser(
        description='Check code style using uncrustify.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c',
                        metavar='CFG',
                        default=config_file,
                        dest='config_file',
                        help='The config file')
    parser.add_argument(
        '--linelength',
        metavar='N',
        type=int,
        help='The maximum line length (default: specified in the config file)')
    parser.add_argument(
        'paths',
        nargs='*',
        default=[os.curdir],
        help='The files or directories to check. For directories files ending '
        'in %s will be considered.' %
        ', '.join(["'.%s'" % e for e in extensions]))
    parser.add_argument(
        '--exclude',
        nargs='*',
        default=[],
        help='Exclude specific file names and directory names from the check')
    parser.add_argument('--reformat',
                        action='store_true',
                        help='Reformat the files in place')
    # not using a file handle directly
    # in order to prevent leaving an empty file when something fails early
    parser.add_argument('--xunit-file',
                        help='Generate a xunit compliant XML file')
    args = parser.parse_args(argv)

    if not os.path.exists(args.config_file):
        print("Could not config file '%s'" % args.config_file, file=sys.stderr)
        return 1

    temp_config = None
    temp_path = None
    try:
        if args.linelength is not None:
            # check if different from config file
            config = ConfigParser()
            with open(args.config_file, 'r') as h:
                config_str = h.read()
            config.read_string('[DEFAULT]\n' + config_str)
            code_width = config['DEFAULT']['code_width']
            code_width = int(re.split('[ \t#]', code_width, maxsplit=1)[0])
            if args.linelength != code_width:
                # generate temporary config file with custom line length
                temp_config_fd, args.config_file = tempfile.mkstemp(
                    prefix='uncrustify_')
                temp_config = os.fdopen(temp_config_fd, 'w')
                temp_config.write(config_str +
                                  '\ncode_width=%d' % args.linelength)
                temp_config.close()

        if args.xunit_file:
            start_time = time.time()

        files = get_files(args.paths, extensions, args.exclude)
        if not files:
            print('No files found', file=sys.stderr)
            return 1

        uncrustify_bin = find_executable('uncrustify')
        if not uncrustify_bin:
            print("Could not find 'uncrustify' executable", file=sys.stderr)
            return 1

        suffix = '.uncrustify'

        report = []
        temp_path = tempfile.mkdtemp(prefix='uncrustify_')

        # invoke uncrustify on all files
        input_files = [os.path.abspath(f) for f in files]

        # on Windows uncrustify fails to concatenate
        # the absolute prefix path with the absolute input files
        # https://github.com/bengardner/uncrustify/issues/364
        cwd = None
        if os.name == 'nt':
            cwd = os.path.commonprefix(input_files)
            if not os.path.isdir(cwd):
                cwd = os.path.dirname(cwd)
                assert os.path.isdir(cwd), \
                    'Could not determine common prefix of input files'
            input_files = [os.path.relpath(f, start=cwd) for f in input_files]

        try:
            cmd = [
                uncrustify_bin, '-c', args.config_file, '--prefix', temp_path,
                '--suffix', suffix
            ]
            cmd.extend(input_files)
            subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
        except subprocess.CalledProcessError as e:
            if e.output:
                print(e.output.decode(), file=sys.stderr)
            print(
                "The invocation of 'uncrustify' failed with error code %d: %s"
                % (e.returncode, e),
                file=sys.stderr)
            return 1

        if cwd:
            # input files are relative
            # prepend temp path, append suffix
            output_files = [
                os.path.join(temp_path, f + suffix) for f in input_files
            ]
        else:
            # input files are absolute
            # remove leading slash, prepend temp path, append suffix
            output_files = [
                os.path.join(temp_path,
                             os.sep.join(f.split(os.sep)[1:]) + suffix)
                for f in input_files
            ]

        uncrustified_files = output_files
        i = 1
        while True:
            # identify files which have changed since the latest uncrustify run
            changed_files = []
            for input_filename, output_filename in zip(input_files,
                                                       uncrustified_files):
                if cwd and not os.path.isabs(input_filename):
                    input_filename = os.path.join(cwd, input_filename)
                if not filecmp.cmp(input_filename, output_filename):
                    if output_filename == input_filename + suffix:
                        # for repeated invocations
                        # replace the previous uncrustified file
                        os.rename(output_filename, input_filename)
                        changed_files.append(input_filename)
                    else:
                        # after first invocation remove suffix
                        # otherwise uncrustify behaves different
                        output_filename_without_suffix = \
                            output_filename[:-len(suffix)]
                        os.rename(output_filename,
                                  output_filename_without_suffix)
                        changed_files.append(output_filename_without_suffix)
            if not changed_files:
                break
            # reinvoke uncrustify for previously changed files
            input_files = changed_files
            try:
                cmd = [
                    uncrustify_bin, '-c', args.config_file, '--suffix', suffix
                ]
                cmd.extend(input_files)
                subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                if e.output:
                    print(e.output, file=sys.stderr)
                print(
                    "The invocation of 'uncrustify' failed with error code %d: %s"
                    % (e.returncode, e),
                    file=sys.stderr)
                return 1

            uncrustified_files = [f + suffix for f in input_files]
            i += 1
            if i >= 5:
                print("'uncrustify' did not settle on a final result even "
                      "after %d invocations" % i,
                      file=sys.stderr)
                return 1

        # compute diff
        for index, filename in enumerate(files):
            modified_filename = output_files[index]
            with open(filename, 'r', encoding='utf-8') as original_file:
                with open(modified_filename, 'r',
                          encoding='utf-8') as modified_file:
                    diff_lines = list(
                        difflib.unified_diff(original_file.readlines(),
                                             modified_file.readlines(),
                                             fromfile=filename,
                                             tofile=filename + suffix,
                                             n=0))
                    report.append((filename, diff_lines))
            if args.reformat:
                # overwrite original with reformatted file
                with (open(filename, 'wb')) as original_file:
                    with open(modified_filename, 'rb') as modified_file:
                        original_file.write(modified_file.read())
    finally:
        if temp_config:
            os.remove(args.config_file)
        if temp_path and os.path.exists(temp_path):
            shutil.rmtree(temp_path)

    # output diffs
    for (filename, diff_lines) in report:
        if diff_lines:
            if not args.reformat:
                print("Code style divergence in file '%s':" % filename,
                      file=sys.stderr)
                print('', file=sys.stderr)
                for line in diff_lines:
                    print(line.rstrip('\r\n'), file=sys.stderr)
                print('', file=sys.stderr)
            else:
                print("Code style divergence in file '%s': reformatted file" %
                      filename)
        else:
            print("No code style divergence in file '%s'" % filename)
            if not args.reformat:
                print('')

    # output summary
    error_count = sum([1 if r[1] else 0 for r in report])
    if not error_count:
        print('No problems found')
        rc = 0
    else:
        print('%d files with code style divergence' % error_count,
              file=sys.stderr)
        rc = 1

    # generate xunit file
    if args.xunit_file:
        folder_name = os.path.basename(os.path.dirname(args.xunit_file))
        file_name = os.path.basename(args.xunit_file)
        suffix = '.xml'
        if file_name.endswith(suffix):
            file_name = file_name[0:-len(suffix)]
            suffix = '.xunit'
            if file_name.endswith(suffix):
                file_name = file_name[0:-len(suffix)]
        testname = '%s.%s' % (folder_name, file_name)

        xml = get_xunit_content(report, testname, time.time() - start_time)
        path = os.path.dirname(os.path.abspath(args.xunit_file))
        if not os.path.exists(path):
            os.makedirs(path)
        with open(args.xunit_file, 'w') as f:
            f.write(xml)

    return rc
Example #51
0
    # Go !
    if len(args) != 2:
        parser.print_help()
        exit(1)

    # Open the 2 documents, diff only for ODT
    doc1 = odf_get_document(args[0])
    doc2 = odf_get_document(args[1])
    if doc1.get_type() != 'text' or doc2.get_type() != 'text':
        parser.print_help()
        exit(1)

    # Convert in text before the diff
    text1 = doc1.get_formatted_text(True).splitlines(True)
    text2 = doc2.get_formatted_text(True).splitlines(True)

    # Make the diff !
    if options.ndiff:
        result = ndiff(text1, text2, None, None)
        result = [ line for line in result if not line.startswith(u' ') ]
    else:
        fromdate = ctime(stat(args[0]).st_mtime)
        todate = ctime(stat(args[1]).st_mtime)
        result = unified_diff(text1, text2, args[0], args[1], fromdate, todate)
    result = u''.join(result)
    encoding = stdout.encoding if stdout.encoding is not None else 'utf-8'
    result = result.encode(encoding)

    # And print it !
    print result
Example #52
0
    result_set_1 = Path(sys.argv[1])
    result_set_2 = Path(sys.argv[2])

    config_1 = result_set_1 / "config"
    config_2 = result_set_2 / "config"

    # Look for the differences between configurations and output
    for c_file in config_files:
        text_1 = (config_1 / c_file).read_text()
        lines_1 = text_1.split("\n")
        text_2 = (config_2 / c_file).read_text()
        lines_2 = text_2.split("\n")
        print("Comparing " + c_file)
        for line in difflib.unified_diff(lines_1,
                                         lines_2,
                                         "file_1",
                                         "file_2",
                                         lineterm="\n"):
            print(line)

    # plot the error from each sequence
    error_path_1 = result_set_1 / 'plot_error'
    error_path_2 = result_set_2 / 'plot_error'
    for seq in sequences:
        for suffix in suffixes:
            error_1 = (error_path_1 / (seq + suffix)).read_text().splitlines()
            error_2 = (error_path_2 / (seq + suffix)).read_text().splitlines()
            x1 = []
            y1 = []
            x2 = []
            y2 = []
Example #53
0
def change(name, context=None, changes=None, lens=None, load_path=None, **kwargs):
    """
    .. versionadded:: 2014.7.0

    This state replaces :py:func:`~salt.states.augeas.setvalue`.

    Issue changes to Augeas, optionally for a specific context, with a
    specific lens.

    name
        State name

    context
        A file path, prefixed by ``/files``. Should resolve to an actual file
        (not an arbitrary augeas path). This is used to avoid duplicating the
        file name for each item in the changes list (for example, ``set bind 0.0.0.0``
        in the example below operates on the file specified by ``context``). If
        ``context`` is not specified, a file path prefixed by ``/files`` should be
        included with the ``set`` command.

        The file path is examined to determine if the
        specified changes are already present.

        .. code-block:: yaml

            redis-conf:
              augeas.change:
                - context: /files/etc/redis/redis.conf
                - changes:
                  - set bind 0.0.0.0
                  - set maxmemory 1G

    changes
        List of changes that are issued to Augeas. Available commands are
        ``set``, ``setm``, ``mv``/``move``, ``ins``/``insert``, and
        ``rm``/``remove``.

    lens
        The lens to use, needs to be suffixed with `.lns`, e.g.: `Nginx.lns`.
        See the `list of stock lenses <http://augeas.net/stock_lenses.html>`_
        shipped with Augeas.

    .. versionadded:: 2016.3.0

    load_path
        A list of directories that modules should be searched in. This is in
        addition to the standard load path and the directories in
        AUGEAS_LENS_LIB.


    Usage examples:

    Set the ``bind`` parameter in ``/etc/redis/redis.conf``:

    .. code-block:: yaml

        redis-conf:
          augeas.change:
            - changes:
              - set /files/etc/redis/redis.conf/bind 0.0.0.0

    .. note::

        Use the ``context`` parameter to specify the file you want to
        manipulate. This way you don't have to include this in the changes
        every time:

        .. code-block:: yaml

            redis-conf:
              augeas.change:
                - context: /files/etc/redis/redis.conf
                - changes:
                  - set bind 0.0.0.0
                  - set databases 4
                  - set maxmemory 1G

    Augeas is aware of a lot of common configuration files and their syntax.
    It knows the difference between for example ini and yaml files, but also
    files with very specific syntax, like the hosts file. This is done with
    *lenses*, which provide mappings between the Augeas tree and the file.

    There are many `preconfigured lenses`_ that come with Augeas by default,
    and they specify the common locations for configuration files. So most
    of the time Augeas will know how to manipulate a file. In the event that
    you need to manipulate a file that Augeas doesn't know about, you can
    specify the lens to use like this:

    .. code-block:: yaml

        redis-conf:
          augeas.change:
            - lens: redis.lns
            - context: /files/etc/redis/redis.conf
            - changes:
              - set bind 0.0.0.0

    .. note::

        Even though Augeas knows that ``/etc/redis/redis.conf`` is a Redis
        configuration file and knows how to parse it, it is recommended to
        specify the lens anyway. This is because by default, Augeas loads all
        known lenses and their associated file paths. All these files are
        parsed when Augeas is loaded, which can take some time. When specifying
        a lens, Augeas is loaded with only that lens, which speeds things up
        quite a bit.

    .. _preconfigured lenses: http://augeas.net/stock_lenses.html

    A more complex example, this adds an entry to the services file for Zabbix,
    and removes an obsolete service:

    .. code-block:: yaml

        zabbix-service:
          augeas.change:
            - lens: services.lns
            - context: /files/etc/services
            - changes:
              - ins service-name after service-name[last()]
              - set service-name[last()] "zabbix-agent"
              - set "service-name[. = 'zabbix-agent']/port" 10050
              - set "service-name[. = 'zabbix-agent']/protocol" tcp
              - set "service-name[. = 'zabbix-agent']/#comment" "Zabbix Agent service"
              - rm "service-name[. = 'im-obsolete']"
            - unless: grep "zabbix-agent" /etc/services

    .. warning::

        Don't forget the ``unless`` here, otherwise it will fail on next runs
        because the service is already defined. Additionally you have to quote
        lines containing ``service-name[. = 'zabbix-agent']`` otherwise
        :mod:`augeas_cfg <salt.modules.augeas_cfg>` execute will fail because
        it will receive more parameters than expected.

    .. note::

        Order is important when defining a service with Augeas, in this case
        it's ``port``, ``protocol`` and ``#comment``. For more info about
        the lens check `services lens documentation`_.

    .. _services lens documentation:

    http://augeas.net/docs/references/lenses/files/services-aug.html#Services.record

    """
    ret = {"name": name, "result": False, "comment": "", "changes": {}}

    if not changes or not isinstance(changes, list):
        ret["comment"] = "'changes' must be specified as a list"
        return ret

    if load_path is not None:
        if not isinstance(load_path, list):
            ret["comment"] = "'load_path' must be specified as a list"
            return ret
        else:
            load_path = ":".join(load_path)

    filename = None
    if context is None:
        try:
            filename = _check_filepath(changes)
        except ValueError as err:
            ret["comment"] = "Error: {}".format(err)
            return ret
    else:
        filename = re.sub("^/files|/$", "", context)

    if __opts__["test"]:
        ret["result"] = True
        ret["comment"] = "Executing commands"
        if context:
            ret["comment"] += ' in file "{}":\n'.format(context)
        ret["comment"] += "\n".join(changes)
        return ret

    old_file = []
    if filename is not None and os.path.isfile(filename):
        with salt.utils.files.fopen(filename, "r") as file_:
            old_file = [salt.utils.stringutils.to_unicode(x) for x in file_.readlines()]

    result = __salt__["augeas.execute"](
        context=context, lens=lens, commands=changes, load_path=load_path
    )
    ret["result"] = result["retval"]

    if ret["result"] is False:
        ret["comment"] = "Error: {}".format(result["error"])
        return ret

    if filename is not None and os.path.isfile(filename):
        with salt.utils.files.fopen(filename, "r") as file_:
            new_file = [salt.utils.stringutils.to_unicode(x) for x in file_.readlines()]
            diff = "".join(difflib.unified_diff(old_file, new_file, n=0))

        if diff:
            ret["comment"] = "Changes have been saved"
            ret["changes"] = {"diff": diff}
        else:
            ret["comment"] = "No changes made"

    else:
        ret["comment"] = "Changes have been saved"
        ret["changes"] = {"updates": changes}

    return ret
Example #54
0
def compare_outputs(outputs,
                    raise_error=True,
                    ignore_keywords=[],
                    ignore_hdus=[],
                    ignore_fields=[],
                    rtol=0.0,
                    atol=0.0,
                    input_path=[],
                    docopy=True,
                    results_root=None,
                    verbose=True):
    """
    Compare output with "truth" using appropriate
    diff routine; namely:

    * ``fitsdiff`` for FITS file comparisons.
    * ``unified_diff`` for ASCII products.

    Only after all elements of ``outputs`` have been
    processed will the method report any success or failure, with
    failure of any one comparison *not* preventing the rest of the
    comparisons to be performed.

    Parameters
    ----------
    outputs : list of tuple or dict
        This list defines what outputs from running the test will be
        compared.  Three distinct types of values as list elements
        are supported:

        * 2-tuple : ``(test output filename, truth filename)``
        * 3-tuple : ``(test output filename, truth filename, HDU names)``
        * dict : ``{'files': (output, truth), 'pars': {key: val}}``

        If filename contains extension such as ``[hdrtab]``,
        it will be interpreted as specifying comparison of just that HDU.

    raise_error : bool
        Raise ``AssertionError`` if difference is found.

    ignore_keywords : list of str
        List of FITS header keywords to be ignored by
        ``FITSDiff`` and ``HDUDiff``.

    ignore_hdus : list of str
        List of FITS HDU names to ignore by ``FITSDiff``.
        This is only available for ``astropy>=3.1``.

    ignore_fields : list of str
        List FITS table column names to be ignored by
        ``FITSDiff`` and ``HDUDiff``.

    rtol, atol : float
        Relative and absolute tolerance to be used by
        ``FITSDiff`` and ``HDUDiff``.

    input_path : list or tuple
        A series of sub-directory names under :func:`get_bigdata_root`
        that leads to the path of the 'truth' files to be compared
        against. If not provided, it assumes that 'truth' is in the
        working directory. For example, with :func:`get_bigdata_root`
        pointing to ``/grp/test_data``, a file at::

            /grp/test_data/pipeline/dev/ins/test_1/test_a.py

        would require ``input_path`` of::

            ["pipeline", "dev", "ins", "test_1"]

    docopy : bool
        If `True`, 'truth' will be copied to output directory before
        comparison is done.

    results_root : str or `None`
        If not `None`, for every failed comparison, the test output
        is automatically renamed to the given 'truth' in the output
        directory and :func:`generate_upload_schema` will be called
        to generate a JSON scheme for Artifactory upload.
        If you do not need this functionality, use ``results_root=None``.

    verbose : bool
        Print extra info to screen.

    Returns
    -------
    creature_report : str
        Report from FITS or ASCII comparator.
        This is part of error message if ``raise_error=True``.

    Examples
    --------
    There are multiple use cases for this method, specifically
    related to how ``outputs`` are defined upon calling this method.
    The specification of the ``outputs`` can be any combination of the
    following patterns:

    1. 2-tuple inputs::

           outputs = [('file1.fits', 'file1_truth.fits')]

       This definition indicates that ``file1.fits`` should be compared
       as a whole with ``file1_truth.fits``.

    2. 2-tuple inputs with extensions::

           outputs = [('file1.fits[hdrtab]', 'file1_truth.fits[hdrtab]')]

       This definition indicates that only the HDRTAB extension from
       ``file1.fits`` will be compared to the HDRTAB extension from
       ``file1_truth.fits``.

    3. 3-tuple inputs::

           outputs = [('file1.fits', 'file1_truth.fits', ['primary', 'sci'])]

       This definition indicates that only the PRIMARY and SCI extensions
       should be compared between the two files. This creates a temporary
       ``HDUList`` object comprising only the given extensions for comparison.

    4. Dictionary of inputs and parameters::

           outputs = [{'files': ('file1.fits', 'file1_truth.fits'),
                       'pars': {'ignore_keywords': ['ROOTNAME']}}]

        This definition indicates that ROOTNAME will be ignored during
        the comparison between the files specified in ``'files'``.
        Any input parameter for ``FITSDiff`` or ``HDUDiff`` can be specified
        as part of the ``'pars'`` dictionary.
        In addition, the input files listed in ``'files'`` can also include
        an extension specification, such as ``[hdrtab]``, to limit the
        comparison to just that extension.

    This example from an actual test definition demonstrates
    how multiple input defintions can be used at the same time::

        outputs = [
            ('jw99999_nircam_f140m-maskbar_psfstack.fits',
             'jw99999_nircam_f140m-maskbar_psfstack_ref.fits'
            ),
            ('jw9999947001_02102_00002_nrcb3_a3001_crfints.fits',
             'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits'
            ),
            {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
                       'jw99999_nircam_f140m-maskbar_i2d_ref.fits'),
             'pars': {'ignore_hdus': ['HDRTAB']},
            {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits',
                       'jw99999_nircam_f140m-maskbar_i2d_ref.fits',
                       ['primary','sci','dq']),
             'pars': {'rtol': 0.000001}
            },
            {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits[hdrtab]',
                       'jw99999_nircam_f140m-maskbar_i2d_ref.fits[hdrtab]'),
             'pars': {'ignore_keywords': ['NAXIS1', 'TFORM*'],
                      'ignore_fields': ['COL1', 'COL2']}
            }]

    .. note:: Each ``outputs`` entry in the list gets interpreted and processed
              separately.

    """
    if ASTROPY_LT_3_1:
        if len(ignore_hdus) > 0:  # pragma: no cover
            raise ValueError('ignore_hdus cannot be used for astropy<3.1')
        default_kwargs = {
            'rtol': rtol,
            'atol': atol,
            'ignore_keywords': ignore_keywords,
            'ignore_fields': ignore_fields
        }
    else:
        default_kwargs = {
            'rtol': rtol,
            'atol': atol,
            'ignore_keywords': ignore_keywords,
            'ignore_fields': ignore_fields,
            'ignore_hdus': ignore_hdus
        }

    all_okay = True
    creature_report = ''
    updated_outputs = []  # To track outputs for Artifactory JSON schema

    for entry in outputs:
        diff_kwargs = copy.deepcopy(default_kwargs)
        extn_list = None
        num_entries = len(entry)

        if isinstance(entry, dict):
            entry_files = entry['files']
            actual = entry_files[0]
            desired = entry_files[1]
            if len(entry_files) > 2:
                extn_list = entry_files[2]
            diff_kwargs.update(entry.get('pars', {}))
        elif num_entries == 2:
            actual, desired = entry
        elif num_entries == 3:
            actual, desired, extn_list = entry
        else:
            all_okay = False
            creature_report += '\nERROR: Cannot handle entry {}\n'.format(
                entry)
            continue

        # TODO: Use regex?
        if actual.endswith(']'):
            if extn_list is not None:
                all_okay = False
                creature_report += (
                    '\nERROR: Ambiguous extension requirements '
                    'for {} ({})\n'.format(actual, extn_list))
                continue
            actual_name, actual_extn = actual.split('[')
            actual_extn = actual_extn.replace(']', '')
        else:
            actual_name = actual
            actual_extn = None

        if desired.endswith(']'):
            if extn_list is not None:
                all_okay = False
                creature_report += (
                    '\nERROR: Ambiguous extension requirements '
                    'for {} ({})\n'.format(desired, extn_list))
                continue
            desired_name, desired_extn = desired.split('[')
            desired_extn = desired_extn.replace(']', '')
        else:
            desired_name = desired
            desired_extn = None

        # Get "truth" image
        try:
            desired = get_bigdata(*input_path, desired_name, docopy=docopy)
        except BigdataError:
            all_okay = False
            creature_report += '\nERROR: Cannot find {} in {}\n'.format(
                desired_name, input_path)
            continue

        if desired_extn is not None:
            desired_name = desired
            desired = "{}[{}]".format(desired, desired_extn)

        if verbose:
            print("\nComparing:\n {} \nto\n {}".format(actual, desired))

        if actual.endswith('.fits') and desired.endswith('.fits'):
            # Build HDULists for comparison based on user-specified extensions
            if extn_list is not None:
                with fits.open(actual) as f_act:
                    with fits.open(desired) as f_des:
                        actual_hdu = fits.HDUList(
                            [f_act[extn] for extn in extn_list])
                        desired_hdu = fits.HDUList(
                            [f_des[extn] for extn in extn_list])
                        fdiff = FITSDiff(actual_hdu, desired_hdu,
                                         **diff_kwargs)
                        creature_report += '\na: {}\nb: {}\n'.format(
                            actual, desired)  # diff report only gives hash
            # Working with FITS files...
            else:
                fdiff = FITSDiff(actual, desired, **diff_kwargs)

            creature_report += fdiff.report()

            if not fdiff.identical:
                all_okay = False
                # Only keep track of failed results which need to
                # be used to replace the truth files (if OK).
                updated_outputs.append((actual, desired))

        elif actual_extn is not None or desired_extn is not None:
            if 'ignore_hdus' in diff_kwargs:  # pragma: no cover
                diff_kwargs.pop('ignore_hdus')  # Not applicable

            # Specific element of FITS file specified
            with fits.open(actual_name) as f_act:
                with fits.open(desired_name) as f_des:
                    actual_hdu = f_act[actual_extn]
                    desired_hdu = f_des[desired_extn]
                    fdiff = HDUDiff(actual_hdu, desired_hdu, **diff_kwargs)

            creature_report += '\na: {}\nb: {}\n'.format(actual, desired)
            creature_report += fdiff.report()

            if not fdiff.identical:
                all_okay = False
                # Only keep track of failed results which need to
                # be used to replace the truth files (if OK).
                updated_outputs.append((actual_name, desired_name))

        else:
            # ASCII-based diff
            with open(actual) as afile:
                actual_lines = afile.readlines()
            with open(desired) as dfile:
                desired_lines = dfile.readlines()

            udiff = unified_diff(actual_lines,
                                 desired_lines,
                                 fromfile=actual,
                                 tofile=desired)
            udiffIO = StringIO()
            udiffIO.writelines(udiff)
            udiff_report = udiffIO.getvalue()
            udiffIO.close()

            if len(udiff_report) == 0:
                creature_report += ('\na: {}\nb: {}\nNo differences '
                                    'found.\n'.format(actual, desired))
            else:
                all_okay = False
                creature_report += udiff_report
                # Only keep track of failed results which need to
                # be used to replace the truth files (if OK).
                updated_outputs.append((actual, desired))

    if not all_okay and results_root is not None:  # pragma: no cover
        schema_pattern, tree, testname = generate_upload_params(
            results_root, updated_outputs, verbose=verbose)
        generate_upload_schema(schema_pattern, tree, testname)

    if not all_okay and raise_error:
        raise AssertionError(os.linesep + creature_report)

    return creature_report
def main():
    tools_clang_directory = os.path.dirname(
        os.path.dirname(os.path.realpath(__file__)))
    tools_clang_scripts_directory = os.path.join(tools_clang_directory,
                                                 'scripts')
    test_directory_for_tool = os.path.join(tools_clang_directory,
                                           'translation_unit', 'test_files')
    compile_database = os.path.join(test_directory_for_tool,
                                    'compile_commands.json')
    compile_database_template = compile_database + '.template'
    source_files = glob.glob(os.path.join(test_directory_for_tool, '*.cc'))

    # Generate a temporary compilation database to run the tool over.
    with open(compile_database, 'w') as f:
        f.write(
            _GenerateCompileCommands(compile_database_template,
                                     test_directory_for_tool))

    args = [
        'python',
        os.path.join(tools_clang_scripts_directory, 'run_tool.py'),
        'translation_unit', test_directory_for_tool
    ]
    args.extend(source_files)
    run_tool = subprocess.Popen(args, stdout=subprocess.PIPE)
    stdout, _ = run_tool.communicate()
    if run_tool.returncode != 0:
        print 'run_tool failed:\n%s' % stdout
        sys.exit(1)

    passed = 0
    failed = 0
    for actual in source_files:
        actual += '.filepaths'
        expected = actual + '.expected'
        print '[ RUN      ] %s' % os.path.relpath(actual)
        expected_output = actual_output = None
        with open(expected, 'r') as f:
            expected_output = f.readlines()
        with open(actual, 'r') as f:
            actual_output = f.readlines()
        has_same_filepaths = True
        for expected_line, actual_line in zip(expected_output, actual_output):
            if '//' in actual_line:
                actual_line = '//' + actual_line.split('//')[1]

            if ntpath.basename(expected_line) != ntpath.basename(actual_line):
                sys.stdout.write('expected: %s' %
                                 ntpath.basename(expected_line))
                sys.stdout.write('actual: %s' % ntpath.basename(actual_line))
                has_same_filepaths = False
                break
        if not has_same_filepaths:
            failed += 1
            for line in difflib.unified_diff(
                    expected_output,
                    actual_output,
                    fromfile=os.path.relpath(expected),
                    tofile=os.path.relpath(actual)):
                sys.stdout.write(line)
            print '[  FAILED  ] %s' % os.path.relpath(actual)
            # Don't clean up the file on failure, so the results can be referenced
            # more easily.
            continue
        print '[       OK ] %s' % os.path.relpath(actual)
        passed += 1
        os.remove(actual)

    if failed == 0:
        os.remove(compile_database)

    print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
    if passed > 0:
        print '[  PASSED  ] %s.' % _NumberOfTestsToString(passed)
    if failed > 0:
        print '[  FAILED  ] %s.' % _NumberOfTestsToString(failed)
Example #56
0
    def assertXMLEqual(self, response, expected, msg=''):
        """Compare XML line by line and sorted attributes"""
        response_lines = response.splitlines()
        expected_lines = expected.splitlines()
        line_no = 1

        diffs = []
        for diff in difflib.unified_diff(
            [l.decode('utf8') for l in expected_lines],
            [l.decode('utf8') for l in response_lines]):
            diffs.append(diff)

        self.assertEqual(
            len(expected_lines), len(response_lines),
            "Expected and response have different number of lines!\n{}\n{}".
            format(msg, '\n'.join(diffs)))
        for expected_line in expected_lines:
            expected_line = expected_line.strip()
            response_line = response_lines[line_no - 1].strip()
            response_line = response_line.replace(b'e+6', b'e+06')
            # Compare tag
            if re.match(RE_ELEMENT, expected_line):
                expected_elements = re.findall(RE_ELEMENT, expected_line)
                response_elements = re.findall(RE_ELEMENT, response_line)
                self.assertEqual(expected_elements[0],
                                 response_elements[0],
                                 msg=msg +
                                 "\nTag mismatch on line %s: %s != %s" %
                                 (line_no, expected_line, response_line))
                # Compare content
                if len(expected_elements
                       ) == 2 and expected_elements[0] == expected_elements[1]:
                    expected_element_content = re.findall(
                        RE_ELEMENT_CONTENT, expected_line)
                    response_element_content = re.findall(
                        RE_ELEMENT_CONTENT, response_line)
                    self.assertEqual(
                        len(expected_element_content),
                        len(response_element_content),
                        msg=msg + "\nContent mismatch on line %s: %s != %s" %
                        (line_no, expected_line, response_line))
                    if len(expected_element_content):
                        self.assertEqual(
                            expected_element_content[0],
                            response_element_content[0],
                            msg=msg +
                            "\nContent mismatch on line %s: %s != %s" %
                            (line_no, expected_line, response_line))
            else:
                self.assertEqual(expected_line,
                                 response_line,
                                 msg=msg +
                                 "\nTag line mismatch %s: %s != %s\n%s" %
                                 (line_no, expected_line, response_line, msg))
            # print("---->%s\t%s == %s" % (line_no, expected_line, response_line))
            # Compare attributes
            if re.findall(RE_ATTRIBUTES, expected_line):  # has attrs
                expected_attrs, expected_values = zip(
                    *sorted(re.findall(RE_ATTRIBUTES, expected_line)))
                self.assertTrue(
                    re.findall(RE_ATTRIBUTES, response_line),
                    msg=msg +
                    "\nXML attributes differ at line {0}: {1} != {2}".format(
                        line_no, expected_line, response_line))
                response_attrs, response_values = zip(
                    *sorted(re.findall(RE_ATTRIBUTES, response_line)))
                self.assertEqual(
                    expected_attrs,
                    response_attrs,
                    msg=msg +
                    "\nXML attributes differ at line {0}: {1} != {2}".format(
                        line_no, expected_attrs, response_attrs))
                self.assertEqual(
                    expected_values,
                    response_values,
                    msg=msg +
                    "\nXML attribute values differ at line {0}: {1} != {2}".
                    format(line_no, expected_values, response_values))
            line_no += 1
Example #57
0
    if m:
        actual += m.expand(r"\1\2\n")

# Gets the expected errors and their line number
errors = []
for i, line in enumerate(src, 1):
    m = re.search(r"(?:^\s*!ERROR: )(.*)", line)
    if m:
        errors.append(m.group(1))
        continue
    if errors:
        for x in errors:
            expect += f"{i}: {x}\n"
        errors = []

# Compares the expected errors with the compiler errors
for line in unified_diff(actual.split("\n"), expect.split("\n"), n=0):
    line = re.sub(r"(^\-)(\d+:)", r"\nactual at \g<2>", line)
    line = re.sub(r"(^\+)(\d+:)", r"\nexpect at \g<2>", line)
    diffs += line

if diffs != "":
    print(diffs)
    print()
    print("FAIL")
    sys.exit(1)
else:
    print()
    print("PASS")

Example #58
0
'''

    slow += '}'

    open('fuzz.slow.js', 'w').write(slow)
    open('fuzz.cpp', 'w').write(fast)
    print '_'
    slow_out = subprocess.Popen(['mozjs', '-m', '-n', 'fuzz.slow.js'],
                                stdout=subprocess.PIPE).communicate()[0]

    print '.'
    subprocess.call(['g++', 'fuzz.cpp', 'Relooper.o', '-o', 'fuzz', '-g'])
    print '*'
    subprocess.call(['./fuzz'], stdout=open('fuzz.fast.js', 'w'))
    print '-'
    fast_out = subprocess.Popen(['mozjs', '-m', '-n', 'fuzz.fast.js'],
                                stdout=subprocess.PIPE).communicate()[0]
    print

    if slow_out != fast_out:
        print ''.join([
            a.rstrip() + '\n'
            for a in difflib.unified_diff(slow_out.split('\n'),
                                          fast_out.split('\n'),
                                          fromfile='slow',
                                          tofile='fast')
        ])
        assert False

    #break
Example #59
0
            ap = ArchivesParserStorage()

            # Parse the old message, so we can
            ap.parse(StringIO(rawtxt))
            ap.analyze()

            if ap.bodytxt != bodytxt.decode('utf8'):
                print "Failed to fix %s!" % msgid

                # Generate diff to show what we changed
                print "CHANGED:"
                print "\n".join(
                    difflib.unified_diff(s.getvalue(),
                                         fixed,
                                         fromfile='old',
                                         tofile='new',
                                         n=2,
                                         lineterm=''))
                print "----"
                # Generate a diff to show what's left
                print "REMAINING:"
                print "\n".join(
                    difflib.unified_diff(bodytxt.decode('utf8').splitlines(),
                                         ap.bodytxt.splitlines(),
                                         fromfile='old',
                                         tofile='new',
                                         n=2,
                                         lineterm=''))
                print "--------------"
                while True:
                    a = raw_input('Save this change anyway?').lower()
Example #60
0
def export_notebook(notebook,
                    output_path=None,
                    ext='.ty',
                    identifier='_export_',
                    diff=True,
                    invert=False,
                    stale_time=None):
    """
    Given a v3-format .ipynb notebook file (from IPython 0.13 or
    later), allows the contents of labelled code cells to be exported
    to a plaintext file.  The typical use case is for generating a
    runnable plain Python source file from selected cells of an
    IPython notebook.

    Code is selected for export by placing the given identifier on the first
    line of the chosen code cells. By default, only the labelled cells are
    exported. This behavior may be inverted to exclude labelled cells by
    setting the invert flag.

    notebook    The filename of the notebook (.ipynb).
    output_path Optional output file path. Otherwise, uses the notebook basename.
    ext         The file extension of the output.
    identifier  The identifier used to label cells.
    diff        Whether to print a diff when overwriting content.
    invert      When set, only the non-labelled cells are exported.
    stale_time  Number of seconds that may elapse since the last notebook
                save before a staleness warning is issued. Useful when
                exporting from an active IPython notebook.
    """
    print("Deprecation Warning: Please use the %define_exporter magic instead")
    lines = []
    if output_path is None:
        output_path = os.path.splitext(os.path.basename(notebook))[0] + ext

    # Assumes the v3 version of the ipynb format.
    import IPython.nbformat.current
    nbnode = IPython.nbformat.current.read(open(notebook, 'r'), 'ipynb')
    for cell in nbnode['worksheets'][0]['cells']:
        if cell['cell_type'] == 'code':
            celllines = cell['input'].rsplit("\n")
            labelled = (celllines[0].strip() == identifier)
            if labelled and not invert:
                lines.append('\n'.join(celllines[1:]))
            if invert and not labelled:
                lines.append('\n'.join(celllines[1:]))

    if stale_time:
        modified_time = time.time() - os.path.getmtime(notebook)
        if modified_time > stale_time:
            print "Notebook last saved %.1f seconds ago." % modified_time

    new_contents = "\n".join(lines)
    overwrite = os.path.isfile(output_path)
    if overwrite:
        old_contents = open(output_path, 'r').read()
    with open(output_path, 'w') as outfile:
        outfile.write(new_contents)

    if diff and overwrite:
        deltas = difflib.unified_diff(old_contents.splitlines(),
                                      new_contents.splitlines(),
                                      lineterm='')
        print '\n'.join(list(deltas))