Esempio n. 1
0
 def lalInitProject(self, args):
     unit_provider = lal.UnitProvider.for_project(args[0])
     self.ctx = lal.AnalysisContext(unit_provider=unit_provider)
     self.auto_provider = False
Esempio n. 2
0
import libadalang as lal

provider_p = lal.UnitProvider.for_project("agg.gpr", project="p/p.gpr")
ctx_p = lal.AnalysisContext(unit_provider=provider_p)

provider_q = lal.UnitProvider.for_project("agg.gpr", project="q/q.gpr")
ctx_q = lal.AnalysisContext(unit_provider=provider_q)

u_p = ctx_p.get_from_file("common/common_pack.ads")
u_q = ctx_q.get_from_file("common/common_pack.ads")

print u_p
print u_q

fun_p = u_p.root.findall(lambda x: x.text == "Common_Fun")[0]
print fun_p.p_next_part.unit.filename

fun_q = u_q.root.findall(lambda x: x.text == "Common_Fun")[0]
print fun_q.p_next_part.unit.filename

fun_p = u_p.root.findall(lambda x: x.text == "Common_Fun")[0]
print fun_p.p_next_part.unit.filename

fun_q = u_q.root.findall(lambda x: x.text == "Common_Fun")[0]
print fun_q.p_next_part.unit.filename

#print p1_call.p_defining_name.p_base_subp_declarations
Esempio n. 3
0
import libadalang as lal
from libadalang import _py2to3

ctx = lal.AnalysisContext()
unit = ctx.get_from_file('test.adb')
unit2 = ctx.get_from_file('test2.adb')

print('First and last tokens for test.adb:')
print('  * {}'.format(unit.first_token))
print('  * {}'.format(unit.last_token))
print('')

print('Whole source buffer for test.adb:')
print(
    _py2to3.text_repr(lal.Token.text_range(unit.first_token, unit.last_token)))
print('')

last = unit.first_token.next
first = unit.last_token.previous
print('Empty range for the following bounds:')
print('  * {}'.format(first))
print('  * {}'.format(last))
print(_py2to3.text_repr(lal.Token.text_range(first, last)))
print('')

print('Source excerpts for all Basic_Decl in test.adb:')
for n in unit.root.findall(lal.BasicDecl):
    print('  * {}'.format(n))
    print('    {}'.format(_py2to3.text_repr(n.text)))
    print('')
Esempio n. 4
0
"""
Test that env shedding works correctly when resolving from the body of a
generic instantiation.
"""

from __future__ import absolute_import, division, print_function

import libadalang as lal


def type_from_subp_body(call):
    return (
        call.p_referenced_decl().p_body_part.f_subp_spec
        .f_subp_returns.p_designated_type_decl.entity_repr
    )


c = lal.AnalysisContext('utf-8')
u = c.get_from_file("main.adb")

ps = u.root.find(lambda n: n.text == 'Foo').parent
print(type_from_subp_body(ps))

ps = u.root.find(lambda n: n.text == 'Bar').parent
print(type_from_subp_body(ps))

ps = u.root.find(lambda n: n.text == 'Baz').parent
print(type_from_subp_body(ps))
Esempio n. 5
0
        # to no crash.
    ('project with unknown target', 'p.gpr', {
        'target': 'nosuchtarget',
        'runtime': 'nosuchrts'
    }),
]:
    pflush('Trying to load {}:'.format(label))
    try:
        ufp = libadalang.UnitProvider.for_project(project_file, **kwargs)
    except libadalang.InvalidProjectError as exc:
        pflush('   ... got an exception: {}'.format(exc))
    else:
        pflush('   ... got no exception')

# Then do something that is supposed to work
ctx = libadalang.AnalysisContext(
    unit_provider=libadalang.UnitProvider.for_project('p.gpr'))

# And try to load units with various invalid names
for filename in ('\n', ' '):
    pflush('Trying to get unit: {}'.format(repr(filename)))
    try:
        unit = ctx.get_from_provider(filename,
                                     libadalang.AnalysisUnitKind.unit_body)
    except libadalang.InvalidUnitNameError as exc:
        pflush('   ... got an exception: {}'.format(exc))
    else:
        pflush('   ... got no exception. Unacceptable!')

print('Done.')
Esempio n. 6
0
from __future__ import absolute_import, division, print_function

import libadalang

src_buffer = """
limited with Ada.Text_IO;

procedure Foo is
   function \"+\" (S : String) return String is (S);
begin
   Ada.Text_IO.Put_Line (+\"Hello, world!\");
end Foo;
"""

ctx = libadalang.AnalysisContext('iso-8859-1')
unit = ctx.get_from_buffer('foo.adb', src_buffer)
assert unit, 'Could not create the analysis unit for foo.adb from a buffer'

ctx.remove('foo.adb')
try:
    ctx.remove('foo.adb')
except KeyError:
    print('Trying to remove the analysis unit for foo.adb twice fails the'
          ' second time, as expected')
else:
    assert False, ('Removing twice the analysis unit for foo.adb is supposed'
                   ' to raise an error but it did not')

print('Done.')
def do_files(files, args):
    """
    Analyze a list of files. Issue messages on longer copy-pastes, either
    inside the same file, or between different files.
    """
    overall_start_time = datetime.datetime.now()
    start_time = datetime.datetime.now()

    def show_time(start_time, msg, reset=True):
        now = datetime.datetime.now()
        print('%-60s [%3.3fs]' % (msg, (now - start_time).total_seconds()))
        if reset:
            start_time = now
        return start_time

    contexts = [(f, lal.AnalysisContext()) for f in files]
    units = [(f, c.get_from_file(f)) for (f, c) in contexts]

    # For the analysis of multiple files, issue a message for files that are
    # not parsable, and proceed with others.
    for (f, unit) in units:
        if unit.root is None:
            print('Could not parse {}:'.format(f))

    units = [(f, unit) for (f, unit) in units if unit.root is not None]
    start_time = show_time(start_time,
                           'libadalang analysis (%s units)' % len(units))

    # All the units have been parsed correctly. Now encode the code into
    # a list of 'hashes'.
    codes = []
    encoder = Encoder()
    for i, (f, unit) in enumerate(units):
        codes += encoder.encode(f, unit.root, args.ignore_ids)
    start_time = show_time(start_time,
                           'encode ast (code size: %s)' % len(codes))

    ranked_code = [code.h for code in codes]
    result = suffix_array(ranked_code, k=encoder.rank)
    start_time = show_time(start_time,
                           'compute suffix array (rank:%s)' % encoder.rank)

    # Copy/Paste results arranged by paths
    copy_pastes = {}

    # Keep track of some stats
    stats = {'skipped': 0, 'prefix': 0, 'no_prefix': 0}

    # Iterate over the suffixes. Chunk of duplicate code will necessary be
    # prefixes of adjacent suffixes.
    for index in range(len(result) - 1):
        # Get the next two suffixes
        suffix = (result[index], result[index + 1])

        # Discard if nothing in common
        if ranked_code[suffix[0]] != ranked_code[suffix[1]]:
            stats['no_prefix'] += 1
            continue

        # Check if a longuer prefix exist in the suffix array. Analyse
        # only the longuest prefixes.
        if suffix[0] > 0 and suffix[1] > 0 and \
                codes[suffix[0] - 1].h == codes[suffix[1] - 1].h:
            stats['skipped'] += 1
            continue

        # Find the length of the common prefix between the two suffixes.
        # The following code is a bit naive and can be removed in case the
        # LCP table is computed at the same time as the suffix array.
        # With skew algorithm this is possible (keeping the linear property).

        # Size of the common prefix
        prefix_length = 0

        while ranked_code[suffix[0] + prefix_length] == \
                ranked_code[suffix[1] + prefix_length]:
            prefix_length += 1

        stats['prefix'] += 1
        # Two suffixes with similarities lasting more than min_size "items"
        if prefix_length + 1 >= args.min_size:

            code = (CodeChunk(codes[suffix[0]].filename, codes[suffix[0]].line,
                              codes[suffix[0] + prefix_length - 1].line,
                              prefix_length),
                    CodeChunk(codes[suffix[1]].filename, codes[suffix[1]].line,
                              codes[suffix[1] + prefix_length - 1].line,
                              prefix_length))
            if code[0].path > code[1].path or \
                    (code[0].path == code[1].path and
                     code[0].begin > code[1].begin):
                # By ordering in lexicograhic order the paths we avoid
                # duplicates.
                code = (code[1], code[0])

            if len(code[0]) >= args.min_lines:

                if code[0].intersect_with(code[1]):
                    # A code cannot be a copy of itself
                    print('ingore %s vs %s' % (code[0], code[1]))
                    continue

                if code[0].path not in copy_pastes:
                    copy_pastes[code[0].path] = []

                found = False
                # This search is unoptimized but efficient as we usually
                # deal with few elements per file (TODO: to be improved).
                for index, elt in enumerate(copy_pastes[code[0].path]):
                    if code[0].is_wider_than(elt[0]) and \
                            code[1].is_wider_than(elt[1]):
                        # Code duplication is a superset of a previous one, so
                        # replace.
                        copy_pastes[code[0].path][index] = code
                        found = True
                        break
                    elif elt[0].is_wider_than(code[0]) and \
                            elt[1].is_wider_than(code[1]):
                        # Code duplication is a subset of a previous one, so
                        # ignore.
                        found = True
                        break
                if not found:
                    # New code chunk
                    copy_pastes[code[0].path].append(code)
            else:
                pass
                # print 'discard %s' % code[0]
    show_time(
        start_time, 'find copy/paste code (skipped: %(skipped)s, '
        'with prefix: %(prefix)s, with no prefix: %(no_prefix)s' % stats)

    # Display the results
    if copy_pastes:
        print('%4s %4s: %s' % ('LINE', 'SIZE', 'CHUNKS'))
    for chunks in copy_pastes.values():
        for chunk in sorted(chunks, key=lambda x: x[0].size):
            print(
                '%4d %4d: %-40s (%4d,%4d) ~= %-40s (%4d, %4d)' %
                (len(chunk[0]), chunk[0].size,
                 os.path.relpath(chunk[0].path, args.rel_path), chunk[0].begin,
                 chunk[0].end, os.path.relpath(chunk[1].path, args.rel_path),
                 chunk[1].begin, chunk[1].end))
    show_time(overall_start_time, 'Overall')
Esempio n. 8
0
                out_file.write(line + "\n")


def llvm_prefixes(filename):
    if filename.find("lto") != -1:
        return ["lto"]
    else:
        return ["llvm"]


def clang_prefixes(filename):
    return ["clang", "CX"]


if __name__ == "__main__":
    context = lal.AnalysisContext(
        unit_provider=lal.UnitProvider.auto(argv[2:]))
    if argv[1] == "process_names_llvm":
        process_names_for(argv[2:], llvm_prefixes, sanitize_unit_name_llvm)
        for arg in argv[2:]:
            file_name = path.basename(arg)
            handle_dependencies(file_name)
    elif argv[1] == "process_names_clang":
        process_names_for(argv[2:], clang_prefixes, sanitize_unit_name_clang)
        for arg in argv[2:]:
            file_name = path.basename(arg)
            handle_dependencies(file_name)
    elif argv[1] == "generate_wrappers":
        for arg in argv[2:]:
            file_name = path.basename(arg)
            generate_wrappers_for_file(file_name)
    else:
Esempio n. 9
0
    def apply_strategies_on_file(self, file, buf) -> int:
        """Apply all the strategies on the given buf.

        Return the number of characters removed.
        """
        count = buf.count_chars()

        if REMOVE_TABS:
            log("=> Removing tabs")
            buf.strip_tabs()
            if CAUTIOUS_MODE and buf.count_chars() < count:
                # In cautious mode, if we actually did
                # remove some tabs, run the predicate as a check.
                if not self.run_predicate():
                    log(f"The issue is gone after stripping TABs in {file}")
                    log("adareducer cannot help in this case.")
                    sys.exit(1)

        unit = self.context.get_from_file(file)

        if unit is None or unit.root is None:
            log(f"??? cannot find a root node for {file}")
            self.attempt_delete(file)
            return 0

        if EMPTY_OUT_BODIES_BRUTE_FORCE:
            log("=> Emptying out bodies (brute force)")

            HollowOutSubprograms().run_on_file(unit, buf.lines,
                                               self.run_predicate,
                                               lambda: buf.save())

        # If there are bodies left, remove statements from them

        if EMPTY_OUT_BODIES_STATEMENTS:
            self.context = lal.AnalysisContext(
                unit_provider=self.unit_provider)
            log("=> Emptying out bodies (statement by statement)")
            buf = Buffer(file)
            unit = self.context.get_from_file(file)
            RemoveStatements().run_on_file(unit, buf.lines, self.run_predicate,
                                           lambda: buf.save())

        # Let's try removing aspects

        if REMOVE_ASPECTS:
            self.context = lal.AnalysisContext(
                unit_provider=self.unit_provider)
            log("=> Removing aspects")
            RemoveAspects().run_on_file(self.context, file, self.run_predicate)

        # Remove subprograms

        if REMOVE_SUBPROGRAMS:
            self.context = lal.AnalysisContext(
                unit_provider=self.unit_provider)
            log("=> Removing subprograms")
            try:
                RemoveSubprograms().run_on_file(self.context, file,
                                                self.run_predicate)
            except lal.PropertyError:
                # retry with a new context...
                self.context = lal.AnalysisContext(
                    unit_provider=self.unit_provider)
                RemoveSubprograms().run_on_file(self.context, file,
                                                self.run_predicate)

        # Let's try removing packages

        if REMOVE_PACKAGES:
            self.context = lal.AnalysisContext(
                unit_provider=self.unit_provider)
            log("=> Removing packages")
            RemovePackages().run_on_file(self.context, file,
                                         self.run_predicate)

        # Next remove the imports that we can remove

        if REMOVE_IMPORTS:
            log("=> Removing imports")
            RemoveImports().run_on_file(self.context, file, self.run_predicate)

        # Remove trivias

        if REMOVE_TRIVIAS:
            log("=> Removing blank lines and comments")
            RemoveTrivias().run_on_file(file, self.run_predicate)

        # Attempt to delete the file if it's empty-ish

        deletion_successful = False
        if ATTEMPT_DELETE:
            log("=> Attempting to delete")
            deletion_successful = DeleteEmptyUnits().run_on_file(
                self.context, file, self.run_predicate)

        # Fin

        if deletion_successful:
            log("   File deleted! \o/")
            chars_removed = count
        else:
            buf = Buffer(file)
            chars_removed = count - buf.count_chars()

        return chars_removed
Esempio n. 10
0
 def for_project(project_file, scenario_vars={}):
     return ExtractionContext(
         lal.AnalysisContext(unit_provider=lal.UnitProvider.for_project(
             project_file, scenario_vars)))
    def lal_context(self):
        assert USE_LAL, "LAL is not available"

        return lal.AnalysisContext(unit_provider=lal.UnitProvider.auto([]))
Esempio n. 12
0
from __future__ import absolute_import, division, print_function

import libadalang

from unicode_utils import src_buffer_iso_8859_1


ctx = libadalang.AnalysisContext('unknown-charset')
unit = ctx.get_from_buffer('foo.adb', src_buffer_iso_8859_1)
for d in unit.diagnostics:
    print('  {}'.format(d))

print('Done')
Esempio n. 13
0
from __future__ import absolute_import, division, print_function

import libadalang

for src_dir in ('src1', 'src2'):
    print('For SRC_DIR={}:'.format(src_dir))
    ctx = libadalang.AnalysisContext(
        unit_provider=libadalang.UnitProvider.for_project(
            'p.gpr', {'SRC_DIR': src_dir}))
    unit = ctx.get_from_provider('p2', 'specification')

    subtype_ind = unit.root.find(libadalang.SubtypeIndication)
    print('{} resolves to:'.format(subtype_ind))
    for entity in subtype_ind.f_name.p_matching_nodes:
        print('  {}'.format(entity))

print('Done.')
Esempio n. 14
0
import libadalang as lal

u = lal.AnalysisContext().get_from_file("test.adb")
assert not u.diagnostics

u_decl = u.root.find(lal.TypeDecl)
print("Declaration of U => {}".format(u_decl))
print("   get_pragma ('pack') => {}".format(u_decl.p_get_pragma('pack')))
print('')

u_decl = u.root.find(lal.DottedName).p_referenced_decl()
print("Declaration of U with rebindings of Pkg_I => {}".format(u_decl))
print("   get_pragma ('pack') => {}".format(u_decl.p_get_pragma('pack')))
print('')

print('Done')
Esempio n. 15
0
from __future__ import absolute_import, division, print_function

import libadalang


ctx = libadalang.AnalysisContext(with_trivia=True)
u = ctx.get_from_file('foo.adb')

prev_token = None
for token in u.iter_tokens():
    assert prev_token == token.previous, 'Inconsistent previous token'
    print('{}{}'.format(
        token.kind,
        ' {!r}'.format(token.text) if token.text else ''
    ))
    prev_token = token
print('Done.')
Esempio n. 16
0
    def reduce_file(self, file):
        """Reduce one given file as much as possible"""

        self.mark_as_processed(file)

        # Skip some cases
        if "rts-" in file:
            log(f"SKIPPING {file}: looks like a runtime file")
            return
        if not os.access(file, os.W_OK):
            log(f"SKIPPING {file}: not writable")
            return

        log(f"*** Reducing {file}")

        # Save the file to an '.orig' copy
        buf = Buffer(file)
        buf.save(file + ".orig")

        try:
            chars_removed = self.apply_strategies_on_file(file, buf)
        except:
            # Catch any exception occurring during the application of
            # strategies, and save the buf to a .crash file, to
            # help post-mortem analysis.
            chars_removed = 0
            buf.save(file + ".crash")
            raise

        # Print some stats

        log(f"done reducing {file} ({chars_removed} characters removed)")
        GUI.add_chars_removed(chars_removed)

        # Cautious?

        if CAUTIOUS_MODE:
            if not self.run_predicate():
                log(CAUTIOUS_MODE_HELP)
                sys.exit(1)

        # Move on to the next files

        if self.follow_closure:
            # First let's check if we are processing a .adb that has a .ads
            if file.endswith(".adb"):
                ads = file[:-1] + "s"
                if os.path.exists(ads):
                    # this exists, it's the natural next one to check
                    self.ads_dict[ads] = []
                    return

            self.context = lal.AnalysisContext(
                unit_provider=self.unit_provider)
            unit = self.context.get_from_file(file)
            root = unit.root
            if root is not None:
                # Iterate through all "with" statements
                for w in root.findall(lambda x: x.is_a(lal.WithClause)):
                    # find the last id in w
                    ids = w.findall(lambda x: x.is_a(lal.Identifier))
                    if ids is not None:
                        id = ids[-1]
                        decl = id.p_referenced_defining_name()
                        # find the definition
                        if decl is not None:
                            file_to_add = decl.unit.filename

                            # Only process files that are in the project closure
                            if self.resolver.belongs_to_project(file_to_add):
                                if file_to_add.endswith(".ads"):
                                    # if it's a .ads we want to empty, empty
                                    # the .adb first, it will go faster
                                    adb = file_to_add[:-1] + "b"
                                    if os.path.exists(adb):
                                        self.bodies_to_reduce.append(adb)
                                    self.ads_dict[file_to_add] = []
                                else:
                                    self.bodies_to_reduce.append(file_to_add)
Esempio n. 17
0
"""
Test that one can customize the tab stop to use when lexing. Also make sure
this parameter is properly sanitized.
"""

from __future__ import absolute_import, division, print_function

import libadalang as lal

c = lal.AnalysisContext('utf-8', tab_stop=20)
u = c.get_from_buffer('foo.ads', '\tprocedure Foo;\n')
print(u.root.sloc_range)

try:
    lal.AnalysisContext('utf-8', tab_stop=0)
except ValueError as exc:
    print('ValueError: {}'.format(exc))

print('Done')
Esempio n. 18
0
import libadalang as lal

ctx = lal.AnalysisContext(with_trivia=True)
unit = ctx.get_from_file('lal-highlighters.adb')
loc = lal.Sloc(1, 1)
t = unit.lookup_token(loc)
for i in range(40):
    print(t)
    t = t.next
Esempio n. 19
0
if args.project:
    provider = lal.UnitProvider.for_project(args.project)
elif args.auto_dir:
    input_files = []
    filename_re = re.compile(r'.*\.(ad.|a|spc|bdy)')
    for d in args.auto_dir:
        for dirpath, dirnames, filenames in os.walk(d):
            for f in filenames:
                if filename_re.match(f):
                    input_files.append(os.path.join(dirpath, f))
    provider = lal.UnitProvider.auto(input_files)
else:
    provider = None


ctx = lal.AnalysisContext(charset, unit_provider=provider)
ctx.discard_errors_in_populate_lexical_env(
    args.discard_errors_in_populate_lexical_env
)
for src_file in input_sources:
    print_title('#', 'Analyzing {}'.format(src_file))

    # Configuration for this file
    display_slocs = False
    display_short_images = False

    # Now analyze the source file
    unit = ctx.get_from_file(src_file)
    if unit.diagnostics:
        for d in unit.diagnostics:
            print('error: {}:{}'.format(src_file, d))
Esempio n. 20
0
            f"{root}/{file}"
            for file in list(filter(None,
                                    handle.read().splitlines()))
        ]
        return lines


# If a gpr project file is passed, it is used.
# If not, and a file is passed that lists project files, it is used.
# Otherwise, we just use the files listed on the remainder of the command line.
provider = (lal.UnitProvider.for_project(args.gpr)
            if args.gpr else lal.UnitProvider.auto(
                input_files=input_files_from_files_list(args.files))
            if args.files else lal.UnitProvider.auto(input_files=args.others))

context = lal.AnalysisContext(unit_provider=provider)

# This structure captures the output of the analysis in a structured way that
# can be passed to different backends to output in different formats.
AnalysisOutput = TypedDict(
    "AnalysisOutput", {
        "component_types":
        Set[ontology.ComponentTypeIdentifier],
        "components":
        Dict[ontology.SoftwareComponentIdentifier, ontology.SoftwareComponent],
        "files":
        Dict[ontology.FileIdentifier, ontology.File],
        "formats":
        Set[ontology.Format],
    })
Esempio n. 21
0
from __future__ import absolute_import, division, print_function

import os
import sys

import libadalang as lal

ctx = lal.AnalysisContext(unit_provider=lal.UnitProvider.auto(sys.argv[1:]))
all_units = [ctx.get_from_file(f) for f in sys.argv[1:]]

for unit in all_units:
    if len(unit.diagnostics) > 0:
        print(unit.diagnostics)

hello_adb = ctx.get_from_file("hello.adb")
hello = hello_adb.root.find(lal.DefiningName)

print('All references to Hello from hello.adb:')
for ref in hello.p_find_all_references(all_units):
    while ref.parent is not None and not ref.p_xref_entry_point:
        ref = ref.parent

    print('    {} ({}, {})'.format(ref.text.splitlines()[0],
                                   os.path.basename(ref.unit.filename),
                                   ref.sloc_range))

print('Done')
Esempio n. 22
0
 def update_lalctx(file):
     ctx = lal.AnalysisContext()
     return ctx.get_from_file(file)
Esempio n. 23
0
import libadalang as lal

for project in ('ap1.gpr', 'ap2.gpr'):
    print('Loading {}...'.format(project))
    try:
        up = lal.UnitProvider.for_project(project)
    except Exception as exc:
        print('   ... got a {} exception: {}'.format(type(exc).__name__, exc))
        continue
    else:
        print('   ... success!')

    ctx = lal.AnalysisContext(unit_provider=up)
    unit = ctx.get_from_provider('p2', lal.AnalysisUnitKind.unit_specification)
    ref = unit.root.find(lal.ObjectDecl).f_renaming_clause.f_renamed_object
    decl = ref.p_referenced_decl(imprecise_fallback=False)
    print('{} resolves to {}'.format(ref, decl))

print('Done.')
Esempio n. 24
0
from __future__ import absolute_import, division, print_function

import libadalang

ctx = libadalang.AnalysisContext()
for with_trivia in (False, True):
    print('With_Trivia => {}'.format(with_trivia))
    u = ctx.get_from_file('foo.adb', with_trivia=with_trivia, reparse=True)

    print('Unit has {} tokens and {} trivias'.format(u.token_count,
                                                     u.trivia_count))
    print('Unit start and end tokens:')
    print('  {}'.format(u.first_token))
    print('  {}'.format(u.last_token))
    print('AST root start and end tokens:')
    print('  {}'.format(u.root.token_start))
    print('  {}'.format(u.root.token_end))
    print('')

print('Done.')
Esempio n. 25
0
    def emit_doc(self, project, scenario_vars, file_name):
        """
        Entry point for the doc generation of one Ada package.

        :param None|str project: Optional path to the project file to load.

        :param scenario_vars: Mapping for scenario variables to use to load the
            project file.
        :type scenario_vars: None|dict[str,str]

        :param str file_name: Path of the Ada source file to parse.
        """

        from tempfile import mkdtemp

        if not project:
            with open('{}/default.gpr'.format(mkdtemp()), 'w') as f:
                f.write('project Default is end Default;')
                project = f.name

        ctx = lal.AnalysisContext('utf-8',
                                  with_trivia=True,
                                  unit_provider=lal.UnitProvider.for_project(
                                      project, scenario_vars))

        # List of top-level declarations to document
        toplevel_decls = []

        # Set mirorring toplevel_decls, used to check whether a decl is part of
        # it already.
        toplevel_decls_set = set()

        def append_decl(decl):
            """
            Append ``decl`` to ``toplevel_decls`` if it's not there yet.
            """
            if decl not in toplevel_decls_set:
                toplevel_decls.append(decl)
                toplevel_decls_set.add(decl)

        # Each declaration can group the documentation of several other
        # declarations. This mapping (decl -> list[decl]) describes this
        # grouping.
        associated_decls = defaultdict(list)

        u = ctx.get_from_file(file_name)
        assert not u.diagnostics, 'Parsing error in {}'.format(file_name)
        assert u.root, '{} is empty'.format(file_name)
        try:
            package_decl = (u.root.cast(lal.CompilationUnit).f_body.cast(
                lal.LibraryItem).f_item.cast(lal.BasePackageDecl))
        except AssertionError:
            print('Not a package')
            return

        # Go through all declarations that appear in the top-level package and
        # organize them in sections the way we want to document them.

        decls = package_decl.f_public_part.f_decls
        types = {}

        for decl in list(decls):
            _, annotations = self.get_documentation(decl)

            # Skip documentation for this entity
            if annotations.get('no-document'):
                continue

            if decl.is_a(lal.BasicSubpDecl, lal.ExprFunction):
                # Look for the type under which this subprogram should be
                # documented ("owning_type"). This is either the explicitly
                # asked type ("belongs-to" annotation) or the type that is a
                # primitive for this subprogram (if the type is declared in the
                # same file).
                owning_type = None
                if annotations.get('belongs-to'):
                    owning_type = types[annotations['belongs-to']]
                else:
                    prim_type = decl.f_subp_spec.p_primitive_subp_of
                    if prim_type and prim_type.unit == u:
                        owning_type = prim_type
                        append_decl(owning_type)

                # If we found a relevant type, document the subprogram under
                # it, otherwise document it at the top-level.
                if owning_type:
                    associated_decls[owning_type].append(decl)
                else:
                    append_decl(decl)

            elif decl.is_a(lal.BaseTypeDecl):
                # New type declaration: document it and register it as a type
                types[decl.p_defining_name.text] = decl
                append_decl(decl)

            elif decl.is_a(lal.ObjectDecl):
                # Try to associate object declarations to their type, if there
                # is one in the current package.
                type_name = (
                    decl.f_type_expr.p_designated_type_decl.p_defining_name)
                t = types.get(type_name.text) if type_name else None
                if t:
                    associated_decls[t].append(decl)
                else:
                    append_decl(decl)

            elif decl.is_a(lal.BasicDecl):
                if not decl.is_a(lal.ExceptionDecl, lal.PackageRenamingDecl,
                                 lal.GenericPackageInstantiation,
                                 lal.GenericSubpInstantiation):
                    self.warn('default entity handling for {}:{}',
                              decl.unit.filename, decl)
                append_decl(decl)

        ret = []

        # Get documentation for the top-level package itself
        pkg_doc, annotations = self.get_documentation(package_decl)

        # Create the documentation's content
        wrapper_node = nodes.Element()

        rst = ViewList()
        title = package_decl.p_defining_name.text
        rst.append(title, 'no_file.rst', 1)
        rst.append('-' * len(title), 'no_file.rst', 2)

        for i, l in enumerate(pkg_doc, 3):
            rst.append(l, 'no_file.rst', i)

        nested_parse_with_titles(self.state, rst, wrapper_node)

        ret += wrapper_node.children

        # Go through all entities to generate their documentation
        for decl in toplevel_decls:
            n, content_node = self.handle_ada_decl(decl)
            ret += n
            for assoc_decls in associated_decls[decl]:
                assoc_nodes, _ = self.handle_ada_decl(assoc_decls)
                content_node += assoc_nodes

        return ret
Esempio n. 26
0
import sys

import libadalang as lal

ctx = lal.AnalysisContext(
    unit_provider=lal.UnitProvider.for_project("test.gpr"))
test_decls = []
origin_decls = []
units = [ctx.get_from_file(f) for f in sys.argv[1:]]

for u in units:
    u.populate_lexical_env()
    test_decls.extend(
        u.root.findall(lambda n: n.is_a(lal.BasicDecl) and n.p_defining_name.
                       text.startswith("Test")))
    origin_decls.extend(
        u.root.findall(lambda n: n.is_a(lal.BasicDecl) and n.p_defining_name.
                       text.startswith("Origin")))

for test_decl in test_decls:
    for origin_decl in origin_decls:
        test_name = test_decl.p_defining_name.text
        origin_name = origin_decl.p_defining_name.text

        print("{: <25} is visible to {: >28} ? {}".format(
            test_name, origin_name, test_decl.p_is_visible(origin_decl)))

    print("")

print("Done")
Esempio n. 27
0
    dict(project_file=u'p.gpr',
         project=u'no_such_project', scenario_vars=vars),
    dict(project_file=u'p.gpr', project=u'q', scenario_vars=vars),
]:
    print('Trying to build with', unirepr(args, native_dict_keys=True))
    try:
        libadalang.UnitProvider.for_project(**args)
    except (TypeError, libadalang.InvalidProjectError) as exc:
        print('   ... got a {} exception: {}'.format(type(exc).__name__, exc))
    else:
        print('   ... success!')


for src_dir in ('src1', 'src2'):
    print('For SRC_DIR={}:'.format(src_dir))
    ctx = libadalang.AnalysisContext(
        unit_provider=libadalang.UnitProvider.for_project(
            project_file='p.gpr',
            scenario_vars={'SRC_DIR': src_dir}
        )
    )
    unit = ctx.get_from_provider(
        'p2', libadalang.AnalysisUnitKind.unit_specification)

    subtype_ind = unit.root.find(libadalang.SubtypeIndication)
    print('{} resolves to:'.format(subtype_ind))
    for entity in subtype_ind.f_name.p_matching_nodes:
        print ('  {}'.format(entity))

print('Done.')
Esempio n. 28
0
from __future__ import absolute_import, division, print_function

import libadalang

from unicode_utils import src_buffer_iso_8859_1

ctx = libadalang.AnalysisContext('utf-8')
unit = ctx.get_from_buffer('foo.adb', src_buffer_iso_8859_1)
for d in unit.diagnostics:
    print('  {}'.format(d))

print('Done')
Esempio n. 29
0
import sys

import libadalang as lal


for filename in sys.argv[1:]:
    print('== {} =='.format(filename))
    u = lal.AnalysisContext().get_from_file(filename)
    assert not u.diagnostics

    whole_src = u.text.splitlines()

    def extract_sloc(sloc_range):
        f = sloc_range.start
        l = sloc_range.end
        return (f.line, f.column), (l.line, l.column)

    def format_output(s_l, s_c, e_l, e_c):
        lines = whole_src[s_l - 1:e_l]
        underline_starts = [s_c - 1] + [0] * (len(lines) - 1)
        underline_ends = [len(l) for l in lines[:-1]] + [e_c - 1]
        out = list(lines)
        for i in range(len(lines)):
            out.insert(i * 2 + 1, "{}{}".format(
                " " * underline_starts[i],
                "^" * (underline_ends[i] - underline_starts[i])
            ))

        return "\n".join(out)

    print('')
Esempio n. 30
0
parser = argparse.ArgumentParser()
parser.add_argument('files',
                    help='Files to analyze',
                    type=str,
                    nargs='+',
                    metavar='files')
parser.add_argument('--charset', type=str, default="")
parser.add_argument('--discard-errors-in-populate-lexical-env',
                    '-d',
                    action='store_true')
args = parser.parse_args()

input_sources = args.files
charset = args.charset

ctx = lal.AnalysisContext(charset)
ctx.discard_errors_in_populate_lexical_env(
    args.discard_errors_in_populate_lexical_env)
for src_file in input_sources:
    print_title('#', 'Analyzing {}'.format(src_file))

    # Configuration for this file
    display_slocs = False

    # Now analyze the source file
    unit = ctx.get_from_file(src_file)
    if unit.diagnostics:
        for d in unit.diagnostics:
            print('error: {}:{}'.format(src_file, d))
        sys.exit(0)
    unit.populate_lexical_env()