예제 #1
0
    def find_dependencies(self, **kwargs):
        """
        Returns the string with dependencies identified
        using the given parser.
        """

        dependency_finder = Dependency()

        return dependency_finder.find_dependencies(text=kwargs.get("text"), parser=kwargs.get("parser"), response_type=kwargs.get("response_type"))
예제 #2
0
def handle_dependency(dependency: Dependency):
    try:
        if not dependency.is_installed():
            dependency.install()
            return
    except:
        logging.warn("Install failed, attempting to update", dependency.name)
        try:
            dependency.update()
        except:
            logging.error("Failed to install dependency:", dependency)
예제 #3
0
    def find_dependencies(self, **kwargs):
        """
        Returns the string with dependencies identified
        using the given parser.
        """

        dependency_finder = Dependency()

        return dependency_finder.find_dependencies(
            text=kwargs.get("text"),
            parser=kwargs.get("parser"),
            response_type=kwargs.get("response_type"))
예제 #4
0
파일: main.py 프로젝트: axnasim/CSCI4900-1
def main():
    argv = docopt(
        doc=__doc__.format(os.path.basename(sys.argv[0])),
        argv=sys.argv[1:],
        version=__version__
    )

    if argv['FILE']:
        dependency = Dependency()

        dependency.copyPom(os.path.abspath(argv['FILE']))
        dependency.getDependenciesandScan()

        shutil.rmtree(dependency.tempdir)
예제 #5
0
def parse_requirements(requirements, courses_taken):
    if type(requirements) == Course:
        if requirements.code in courses_taken:
            return None
        else:
            return requirements

    c1 = parse_requirements(requirements.children[0], courses_taken)
    c2 = parse_requirements(requirements.children[1], courses_taken)

    if requirements.logic == "and":
        if c1 == None and c2 == None:
            return None
        elif c1 == None:
            return c2
        elif c2 == None:
            return c1

        ret = Dependency(give_id=False)
        ret.add_children("&", c1, c2, find_children_id=False)
        return ret

    elif requirements.logic == "or":
        if c1 == None or c2 == None:
            return None

        ret = Dependency(give_id=False)
        ret.add_children("|", c1, c2, find_children_id=False)
        return ret
예제 #6
0
def scan_file(file):
    with open(f"{file}.txt") as text_gradle_file:
        lines = text_gradle_file.readlines()
        dependencies = []
        changes = []
        dependency_dump = []
        new_dependencies = get_new_versions()

        for line in lines:
            if "compile" in line:
                extracted_elements = re.findall(r"'([^']+)'", line)

                if len(extracted_elements) > 0:
                    dependency = Dependency(extracted_elements[0],
                                            extracted_elements[1],
                                            extracted_elements[2])
                    dependencies.append(dependency)
                    dependency_dump.append(dependency.name)

        for current_dependency in dependencies:
            for new_dependency in new_dependencies:
                if current_dependency.name == new_dependency.name and current_dependency.group == new_dependency.group:
                    if not current_dependency.version == new_dependency.version:
                        if new_version_is_higher(new_dependency.version,
                                                 current_dependency.version):
                            changes.append(
                                build_log_entry(f"{file}.gradle",
                                                current_dependency.name,
                                                current_dependency.version,
                                                new_dependency.version))
                            current_dependency.version = new_dependency.version

        update_dependency_dump(file, dependency_dump)
        update_change_log(changes)
        apply_new_versions(dependencies, file)
예제 #7
0
def handle_dependencies(dependencies_config: List[Dict[str,
                                                       List[str] or str]]):
    for dep in dependencies_config:
        dependency = Dependency(dep["name"],
                                check_in_dict_failback("install", dep),
                                check_in_dict_failback("update", dep),
                                check_in_dict_failback("check", dep))
        handle_dependency(dependency)
예제 #8
0
 def end(self, tag):
     self.tag = ''
     if tag == 'sentences':
         if self.parse_sent:
             self.parse_sent = False
     elif tag == 'sentence':
         if self.parse_sent:
             if self.sent is not None:
                 self.sents.append(deepcopy(self.sent))
                 self.sent = None
     elif tag == 'token':
         # map corenlp ner tags to coerse grained ner tags
         token = Token(self.word,
                       self.lemma,
                       self.pos,
                       ner=convert_corenlp_ner_tag(self.ner))
         self.sent.add_token(deepcopy(token))
         self.word = ''
         self.lemma = ''
         self.pos = ''
         self.ner = ''
     elif tag == 'dependencies':
         if self.parse_dep:
             self.parse_dep = False
     elif tag == 'dep':
         if self.parse_dep:
             if not self.copied_dep:
                 if self.dep_label != 'root':
                     dep = Dependency(self.dep_label, self.gov_idx,
                                      self.dep_idx, self.extra)
                     self.sent.add_dep(deepcopy(dep))
             else:
                 self.copied_dep = False
             self.dep_label = ''
             self.gov_idx = -1
             self.dep_idx = -1
             self.extra = False
     elif tag == 'coreference':
         if self.parse_coref:
             if self.coref is not None:
                 self.corefs.append(deepcopy(self.coref))
                 self.coref = None
             else:
                 self.parse_coref = False
     elif tag == 'mention':
         mention = Mention(self.sent_idx,
                           self.start_token_idx,
                           self.end_token_idx,
                           head_token_idx=self.head_token_idx,
                           rep=self.rep,
                           text=self.text.encode('ascii', 'ignore'))
         self.coref.add_mention(deepcopy(mention))
         self.sent_idx = -1
         self.start_token_idx = -1
         self.end_token_idx = -1
         self.head_token_idx = -1
         self.rep = False
         self.text = ''
예제 #9
0
    def extract_verb_phrases(self, **kwargs):
        """
        Returns the verb phrases found within
        a text.
        """

        # Instantiating variables
        verb_phrases = []
        verb_phrase_extractor = Dependency()

        if kwargs.get("parser").lower() != "nltk":
            # Getting verb phrases using the passed parser (Pattern or NLPNET)
            dependencies = verb_phrase_extractor.find_dependencies(
                text=kwargs.get("text"), parser=kwargs.get("parser")).split()

            for dependency_index in range(0, len(dependencies)):
                # Check to see if the word is a verb or part of a verb phrase
                if "VP-" in dependencies[dependency_index]:
                    # Check to see if the last word was part of a verb phrase
                    if dependency_index != 0 and "VP-" in dependencies[
                            dependency_index - 1]:
                        # Since it was, append this verb onto the last verb phrase
                        verb_phrases[len(verb_phrases)] += " " + re.sub(
                            '/.*', '', dependencies[dependency_index])

                        # Continue on to the next word in the sentence
                        continue

                    # Remove the ending information, leaving the word
                    cleaned_word = re.sub('/.*', '',
                                          dependencies[dependency_index])

                    # Add the word to the verb phrase
                    verb_phrases.append(cleaned_word)
        else:
            # Getting verb phrases using the NLTK parser
            dependencies = verb_phrase_extractor.find_dependencies(
                text=kwargs.get("text"), parser=kwargs.get("parser"))

            # Adding the verbs to the verb_phrases
            verb_phrases.append(dependencies[1][1])

        # Returning the found verb_phrases
        return verb_phrases
    def __init__(self, mode='all'):
        super(JointMultiTaskModel, self).__init__()

        # models
        self.lang_model = CharacterLanguageModel()
        self.postag = POSTag()
        self.chunking = Chunking()
        self.dependency = Dependency()
        self.sentiment = SentimentClassification()

        # Mode - all or module_name
        self.mode = mode
예제 #11
0
    def extract_slot(self, nlp, sentence, target_slot):
        sentence = str.lower(sentence)

        tokens_nlp = nlp.word_tokenize(sentence)
        dependency_nlp = nlp.dependency_parse(sentence)

        dependency = Dependency(sentence, dependency_nlp, tokens_nlp)

        self.current_rules_set.update(dependency)
        fillers = self.current_rules_set.apply(target_slot)

        return zip(self.current_frame.listslots(), fillers)
예제 #12
0
파일: buildhelp.py 프로젝트: tsengj10/oxsx
def read_dependencies(filename):
    cparse = ConfigParser()
    cparse.read(filename)
    
    dependencies = {}
    for dep_name in cparse.sections():
        try:
            libs   = cparse.get(dep_name, "libs")
            cheads = cparse.get(dep_name, "check_headers")
        except KeyError:
            print "Incomplete dependency spec for {0}, (needs libs & check_headers)".format(dep_name)
        dependencies[dep_name] = Dependency(dep_name, libs, cheads)
    return dependencies
    def extract_verb_phrases(self, **kwargs):
        """
        Returns the verb phrases found within
        a text.
        """

        # Instantiating variables
        verb_phrases = []
        verb_phrase_extractor = Dependency()

        if kwargs.get("parser").lower() != "nltk":
            # Getting verb phrases using the passed parser (Pattern or NLPNET)
            dependencies = verb_phrase_extractor.find_dependencies(text=kwargs.get("text"), parser=kwargs.get("parser")).split()

            for dependency_index in range(0, len(dependencies)):
                # Check to see if the word is a verb or part of a verb phrase
                if "VP-" in dependencies[dependency_index]:
                    # Check to see if the last word was part of a verb phrase
                    if dependency_index != 0 and "VP-" in dependencies[dependency_index - 1]:
                        # Since it was, append this verb onto the last verb phrase
                        verb_phrases[len(verb_phrases)] += " " + re.sub('/.*', '', dependencies[dependency_index])

                        # Continue on to the next word in the sentence
                        continue

                    # Remove the ending information, leaving the word
                    cleaned_word = re.sub('/.*', '', dependencies[dependency_index])

                    # Add the word to the verb phrase
                    verb_phrases.append(cleaned_word)
        else:
            # Getting verb phrases using the NLTK parser
            dependencies = verb_phrase_extractor.find_dependencies(text=kwargs.get("text"), parser=kwargs.get("parser"))

            # Adding the verbs to the verb_phrases
            verb_phrases.append(dependencies[1][1])

        # Returning the found verb_phrases
        return verb_phrases
예제 #14
0
def install_brew():
    try:
        dep = Dependency(
            "brew",
            "/usr/bin/ruby -e $(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)",
            "brew upgrade && brew upgrade")
        if dep.is_installed():
            dep.update()
        else:
            dep.install()
    except Exception as e:
        logging.error("Error installing brew, possible network error", e)
예제 #15
0
def get_new_versions():
    vulnerabilities_url = os.getenv("VULNERABILITIES_ENDPOINT",
                                    "http://localhost:5000/vulnerabilities")
    response = requests.get(vulnerabilities_url)

    if response.status_code == 200:
        response_content = json.loads(response.content.decode("UTF-8"))
        new_versions = []

        for dep in response_content["vulnerabilities"]:
            new_versions.append(
                Dependency(dep.get("group"), dep.get("name"),
                           dep.get("version")))

        return new_versions
    else:
        raise Exception(f"Error, HTTP status code: {response.status_code}")
예제 #16
0
def main():
    argv = docopt(doc=__doc__.format(os.path.basename(sys.argv[0])),
                  argv=sys.argv[1:],
                  version=__version__)

    if argv['FILE']:
        dependency = Dependency()

        dependency.copyPom(os.path.abspath(argv['FILE']))
        dependency.getDependenciesandScan()

        shutil.rmtree(dependency.tempdir)
예제 #17
0
파일: data_helper.py 프로젝트: Zzoay/YaoNLP
def load_ctb(data_path: str):
    file_names: List[str] = os.listdir(data_path)
    ctb_files: List[str] = [data_path + fle for fle in file_names]

    # id, form, tag, head, rel
    sentence: List[Dependency] = []

    for ctb_file in ctb_files:
        with open(ctb_file, 'r', encoding='utf-8') as f:
            # data example: 1	上海	_	NR	NR	_	2	nn	_	_
            for line in f.readlines():
                toks = line.split()
                if len(toks) == 0:
                    yield sentence
                    sentence = []
                elif len(toks) == 10:
                    dep = Dependency(toks[0], toks[1], toks[3], toks[6],
                                     toks[7])
                    sentence.append(dep)
예제 #18
0
from dependency import Dependency
from metrics import UAS

language = 'Korean'
langcode = 'ko'
PUD_CONLL = f"/net/data/universal-dependencies-2.6/UD_{language}-PUD/{langcode}_pud-ud-test.conllu"
PUD_TOKENS = f"/net/data/universal-dependencies-2.6/UD_{language}-PUD/{langcode}_pud-ud-test.txt"

dependency_trees = Dependency(PUD_CONLL, PUD_TOKENS)

uas_metric = UAS(dependency_trees)

left_branching = [
    list(zip(range(1, len(toks)), range(0,
                                        len(toks) - 1))) + [(0, -1)]
    for toks in dependency_trees.tokens
]
right_branching = [
    list(zip(range(0,
                   len(toks) - 1), range(1, len(toks)))) +
    [(len(toks) - 1, -1)] for toks in dependency_trees.tokens
]

uas_metric.reset_state()
uas_metric(range(1000), left_branching)
print("UAS left branching: ", uas_metric.result() * 100)
uas_metric.reset_state()
uas_metric(range(1000), right_branching)
print("UAS right branching: ", uas_metric.result() * 100)

gr_right_branching = right_branching
예제 #19
0
    ax.set_yticks(np.arange(len(sentence_tokens)))
    ax.set_xticks(np.arange(len(sentence_tokens)))
    ax.set_xticklabels(sentence_tokens, rotation=90)
    ax.set_yticklabels(sentence_tokens)
    ax.set_ylim(top=-0.5, bottom=len(sentence_tokens) - 0.5)
    
    plt.savefig(out_file, dpi=300, format='pdf')


if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument("attentions", type=str, help="NPZ file with attentions")
    ap.add_argument("tokens", type=str, help="Labels (tokens) separated by spaces")
    ap.add_argument("conll", type=str, help="Conll file for head selection.")

    ap.add_argument("-layer_idcs", nargs="*", type=int, default=[5, 3], help = "layer indices to plot")
    ap.add_argument("-head_idcs", nargs="*", type=int, default=[4, 9], help="head indices to plot")
    ap.add_argument("-s", "--sentences", nargs='*', type=int, default=list(range(10)), help="Only use the specified sentences; 0-based")
    
    ap.add_argument("-vis-dir", type=str, default="../results", help="Directory where to save head visualizations")
   
    args = ap.parse_args()
    
    dependency_tree = Dependency(args.conll, args.tokens)
    bert_attns = AttentionWrapper(args.attentions, dependency_tree.wordpieces2tokens, args.sentences)
    
    for sent_idx, attn_mats in bert_attns:
        for l, h in zip(args.layer_idcs, args.head_idcs):
            out_file = os.path.join(args.vis_dir, f"L-{l}_H-{h}_sent-{sent_idx}.pdf")
            plot_head(attn_mats, dependency_tree.tokens[sent_idx], l, h, out_file)
예제 #20
0
    dependencyList = set()
    evaluateDependency = False
    skipPattern = re.compile('\[INFO\]\s+\n')
    for line in mvnOutput:
        if 'The following files have been resolved:' in line:
            # This is our starting grid for the dependency
            evaluateDependency = True
        elif evaluateDependency:
            if not skipPattern.match(line):
                matchObj = re.match(
                    r'\[INFO\]\s+([\w.\-_]+):([\w.\-_]+):\w+:([\w.\-_]+):(\w+)',
                    line)
                if matchObj.group(1) in groupToSkip:
                    continue
                dependencyToAdd = Dependency(groupId=matchObj.group(1),
                                             artifactId=matchObj.group(2),
                                             versionId=matchObj.group(3),
                                             scope=matchObj.group(4))
                dependencyList.add(dependencyToAdd)
            else:
                evaluateDependency = False

print 'Unique dependencies ' + str(len(dependencyList)) + '\n'

# Now we have the list of dependencies... let's get the info about them from the maven central
for dependency in dependencyList:
    print dependency.name()
    urlToHit = URL_CONSTANT + str(dependency.groupId) + "/" + str(
        dependency.artifactId) + "/" + str(dependency.versionId)
    resp = requests.get(urlToHit)
    response = resp.text
    soup = BeautifulSoup(response, "html.parser")
예제 #21
0
 def inclusion(self, alpha: set, beta: set):
   if beta.issubset(alpha):
     # Dependencia (α, β)
     return Dependency(alpha, beta)
예제 #22
0
 def pseudo_transitivity(self, fd1: Dependency, fd2: Dependency, gamma: set):
   if fd1.beta.issubset(fd2.alpha) and gamma.union(fd1.beta) == fd2.alpha:
     # Dependencia (α ∪ γ, ω)
     return Dependency(fd1.alpha.union(gamma), fd2.beta)
예제 #23
0
tokens[34] = ['every', 'day']
sentences[35] = 'each day'
dep[35] = [('ROOT', 0, 2), ('det', 2, 1)]
tokens[35] = ['each', 'day']
sentences[36] = 'every monday'
dep[36] = [('ROOT', 0, 2), ('det', 2, 1)]
tokens[36] = ['every', 'monday']
sentences[37] = 'each monday'
dep[37] = [('ROOT', 0, 2), ('det', 2, 1)]
tokens[37] = ['each', 'monday']

#~~~~~~~ copy/paste generated code from test_generated.py in the upper part.
from rules_cleaning import CleaningRules
from dependency import Dependency

dd = Dependency(sentences[0], dep[0], tokens[0])

rule = CleaningRules(dd)

for i in range(N):
    print("~~~~~~~~~~~~\nSentence {}:".format(i))
    print(sentences[i])

    if len(tokens[i]) == 1:
        print("Permanent: ", rule.apply("permanent"))
        print("Mode: ", rule.apply("mode"))
        print("Time: ", rule.apply("time"))
    else:
        print(rule.apply(None))

    if i == N - 1: break
예제 #24
0
if __name__ == '__main__':
	ap = argparse.ArgumentParser()
	ap.add_argument("attentions", type=str, help="NPZ file with attentions")
	ap.add_argument("tokens", type=str, help="Labels (tokens) separated by spaces")
	ap.add_argument("conll", type=str, help="Conll file for head selection.")
	ap.add_argument("json", type=str, help="Json file with head ensemble")
	
	# other arguments
	ap.add_argument("--report-result", type=str, default=None, help="File where to save the results.")
	ap.add_argument("-s", "--sentences", nargs='*', type=int, default=None,
	                help="Only use the specified sentences; 0-based")
	
	args = ap.parse_args()
	
	dependency = Dependency(args.conll, args.tokens)
	
	head_ensembles = dict()
	ensembles_d2p = dict()
	ensembles_p2d = dict()
	depacc_d2p = dict()
	depacc_p2d = dict()
	
	with open(args.json, 'r') as inj:
		head_ensembles = json.load(inj)
	
	# considered_relations = (Dependency.LABEL_ALL,)
	
	considered_relations = ('adj-modifier', 'adv-modifier', 'auxiliary', 'compound', 'conjunct', 'determiner',
							'noun-modifier', 'num-modifier', 'object', 'other', 'subject', 'cc', 'case', 'mark')
예제 #25
0
 def augmentation(self, fd: Dependency, gamma):
   # Dependencia (α ∪ γ, β ∪ γ)
   return Dependency(fd.alpha.union(gamma), fd.beta.union(gamma))
예제 #26
0
 def transitivity(self, fd1: Dependency, fd2: Dependency):
   if fd1.beta == fd2.alpha:
     # Dependencia (α, γ)
     return Dependency(fd1.alpha, fd2.beta)
예제 #27
0
from dependency import Dependency
#it's comes with python3.3
#já vem com o python3.3
import urllib.request
#you can get it in https://pypi.python.org/pypi/beautifulsoup4
#você pode baixar em https://pypi.python.org/pypi/beautifulsoup4
from bs4 import BeautifulSoup
#you can get it in https://pypi.python.org/pypi/pyutilib.subprocess/3.2
#você pode pegar isso em https://pypi.python.org/pypi/pyutilib.subprocess/3.2
import subprocess

#create a instance of class Dependency
#cria uma instância da classe Dependency
print("Entre com o link do arquivo do site Slackbuilds.org")
s = input()
file = Dependency(s)

#list all dependencies from file
#lista todas dependências do arquivo
dependencies = file.set_dep()

print("Procurando arquivo ... ")
print("Arquivo encontrado: ", file.name)
print(" ")
print("fazendo download build")
num = len(file.down_links) -1
f = subprocess.Popen(["wget", "-c",file.down_links[num]])

f.wait()
name = file.name + ".tar.gz"
print("extraindo")
예제 #28
0
def analyze(fname, cname='MyContract', funcname='foo()'):
    slither = Slither(fname)

    myContract = slither.get_contract_from_name(cname)
    funcA = myContract.get_function_from_signature(funcname)
    
    # Dependency Analysis
    D = Dependency()
    D.compute_contract(myContract, slither)
    D.dependencies = funcA.context[D.KEY_NON_SSA]        

    
    # Refinement Analysis
    R = Refinement()
    R.compute_contract(myContract, slither, fname)

    # Lambda Analysis
    lambdas = get_lambda_analysis(fname, myContract, slither)
    
    # For Guard Types, use Dependency Analysis to fetch all vars which affect
    #   the Guard (i.e. on which the guard depends)
    guards = []
    for var in R.types[R.Typ.GUARD]:
        if var in D.dependencies:
            guards += D.dependencies[var]
    R.types[R.Typ.GUARD] += guards

    # Remove temporary variables and ref vars from types
    to_delete = {}
    for typ in R.types:
        to_delete[typ] = []
        if typ != 6 and typ != 7:
            for var in R.types[typ]:
                if var.name.startswith("REF") or var.name.startswith("TMP"):
                    to_delete[typ].append(var)

    for k,vals in to_delete.items():
        for v in vals:
            R.types[k].remove(v)
                
    # Remove temporary variables and ref vars from dependencies
    to_delete = []
    for var in D.dependencies:
        if var.name.startswith("REF") or var.name.startswith("TMP"):
            to_delete.append(var)
        else:
            to_delete2 = []
            for var2 in D.dependencies[var]:
                if var2.name.startswith("REF") or var2.name.startswith("TMP"):
                    to_delete2.append(var2)
            for x in to_delete2: D.dependencies[var].remove(x)
            if len(D.dependencies[var]) == 0: to_delete.append(var)
    for x in to_delete:
        D.dependencies.pop(x, None)

    # Fetch written and read types from dependencies
    R.types[R.Typ.WRITTEN] += D.dependencies.keys()
    R.types[R.Typ.READ] += [x for vals in D.dependencies.values() for x in vals]

    # Anything that is an index or guard is also read
    R.types[R.Typ.READ] += R.types[R.Typ.INDEX]
    R.types[R.Typ.READ] += R.types[R.Typ.GUARD]
    R.types[R.Typ.READ] += R.types[R.Typ.GUARDSTART]
    R.types[R.Typ.READ] += R.types[R.Typ.GUARDEND]
        
    # Reformat refinement type entries
    R_types_formatted = {}
    for typ, vrs in R.types.items():
        # Special check for lower casing True and False constants
        rhs = set(map(lambda v: v.lower() if v=="True" or v=="False" else v,
                       set(map(str, vrs))))
        typ = typ.lower() if typ == "True" or typ == "False" else typ
        R_types_formatted[typ] = rhs
    R.types = R_types_formatted
        
    # Reformat dependencies entries
    dependencies_formatted = {}
    for v, vrs in D.dependencies.items():
        # Special check for lower casing True and False constants
        lhs = str(v).lower() if str(v) == "True" or str(v) == "False" else str(v)
        rhs = set(map(lambda v: v.lower() if v=="True" or v=="False" else v,
                       set(map(str, vrs))))
        dependencies_formatted[lhs] = rhs
    D.dependencies = dependencies_formatted

    # Add lambdas to dependencies based on sub-parts
    dependencies_lambdas = {}
    for v, vrs in D.dependencies.items():
        dependencies_lambdas[v] = vrs
        for lam in lambdas:
            lam_vrs = re.findall(r"[\w']+", lam[lam.index(":")+1:])
            if any(map(lambda lv: lv in vrs, lam_vrs)):
                dependencies_lambdas[v].add(lam)
    D.dependencies = dependencies_lambdas
    
    # # Transitive Closure of Dependencies
    # D.dependencies = transitive_close(D.dependencies)
        
    return D, R    
예제 #29
0
def find_dependencies(root):
    # sca files
    xml_file_extensions = [
        'wsdl', 'xsd', 'xml', 'xsl', 'bpel', 'componentType', 'decs', 'dvm',
        'jpr', 'edl', 'jca', 'jws', 'config', 'monitor', 'mwp', 'rules', 'sch',
        'schema', 'table', 'offlinedb'
    ]

    # elements and attributes
    element_filters = [
        'reference', 'component', 'service', 'import', 'schemaImport',
        'schema-import', 'schema'
    ]

    attribute_filters = [
        'location', 'wsdlLocation', 'schemaLocation', 'localPart', 'src'
    ]

    # generate XPaths
    element_xpath = './/*[' + ' or '.join([
        "contains(name(.),'" + element_filter + "')"
        for element_filter in element_filters
    ]) + ']'
    attribute_xpath = './/@*[' + ' or '.join([
        "contains(local-name(.),'" + attribute_filter + "')"
        for attribute_filter in attribute_filters
    ]) + ']'

    # loop over directories
    dependencies = []
    for directory_path, directory_names, file_names in os.walk(root):
        # loop over files in the same directory
        for file_name in file_names:
            # get file extension
            file_extension = ''
            if len(file_name.split('.')) > 1:
                file_extension = file_name.split('.')[-1]
            # file extension filter
            if file_extension in xml_file_extensions:
                # parse xml file
                file_path = os.path.join(directory_path, file_name)
                relative_file_path = os.path.relpath(file_path, root)
                xml_tree = xml_parser.parse(file_path)
                # get elements
                elements = xml_tree.xpath(element_xpath)
                for element in elements:
                    # get element name
                    element_name = element.xpath('name(.)')
                    # remove namespace from element name if exists
                    if len(element_name.split(':')) > 1:
                        element_name = element_name.split(':')[-1]
                    # get attributes
                    paths = element.xpath(attribute_xpath)
                    for path in paths:
                        # get attribute name
                        attribute_name = element.xpath(
                            "local-name(.//@*[. = '" + path + "'])")
                        # remove namespace from attribute name if exists
                        if len(attribute_name.split(':')) > 1:
                            attribute_name = attribute_name.split(':')[-1]
                        # ignore wsdl service location
                        if not ((file_extension == 'wsdl') and
                                (element_name == 'service') and
                                (attribute_name == 'location')):
                            # create dependency
                            dependency = Dependency(file=relative_file_path,
                                                    element=element_name,
                                                    attribute=attribute_name,
                                                    path=path)
                            dependencies.append(dependency)
    # return
    return dependencies
예제 #30
0
	projectName, projectId, projectVersion, projectUrl, projectTimePeriod, projectDependencies = projectEntry

	# check the project list for a project with a similar id
	# assume that we did not find it, yet
	found = False
	for sfproject in projectList:
		if projectId == sfproject.getProjectId():
			found = True
			break

	if not found:
		# create a new project list entry
		sfproject = SfProject(projectName, projectId)

	# set up dependency object	
	dependency = Dependency()
	dependency.setTimePeriodStart(projectTimePeriod[0])
	dependency.setTimePeriodEnd(projectTimePeriod[1])
	dependency.setVersion(projectVersion)
	dependency.setUrl(projectUrl)
	for dep in projectDependencies:
		dependency.addDependency(dep)
	
	sfproject.addProjectDependencyEntry(dependency)
	if not found:
		projectList.append(sfproject)

testEnde1 = time.clock()		
		
#print "number of projects: ", len(projectList)
#for sfproject in projectList:
예제 #31
0
 def union(self, fd1: Dependency, fd2: Dependency):
   if fd1.alpha == fd2.alpha:
     # Dependencia (α, β ∪ γ)
     return Dependency(fd1.alpha, fd1.beta.union(fd2.beta))
예제 #32
0
# from analysis import is_dependent, pprint_dependency, compute_dependency_contract
from dependency import Dependency
from refinement import Refinement


slither = Slither('data_dependency_simple_example.sol')

myContract = slither.get_contract_from_name('MyContract')
funcA = myContract.get_function_from_signature('foo()')

a = myContract.get_state_variable_from_name('a')
b = myContract.get_state_variable_from_name('b')
c = myContract.get_state_variable_from_name('c')
d = myContract.get_state_variable_from_name('d')

D = Dependency()

D.compute_contract(myContract, slither)
D.dependencies = funcA.context[D.KEY_NON_SSA]        

R = Refinement()
R.compute_contract(myContract, slither)

guards = []
for var in R.types[R.Typ.GUARD]:
    if var in D.dependencies:
        guards += D.dependencies[var]

R.types[R.Typ.GUARD] += guards

for typ in R.types:
예제 #33
0
 def decomposition(self, fd: Dependency, beta: set, gamma: set):
   if fd.beta == beta.union(gamma):
     # Dependencia (α, β) e (α, γ)
     return [Dependency(fd.alpha, beta), Dependency(fd.alpha, gamma)]
예제 #34
0
if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument("conll", type=str, help="Conll file for head selection.")
    
    ap.add_argument("-j", "--json", type=str, default=None, help="Json with the head ensembles")
    ap.add_argument("-e", "--evaluate-only", action="store_true",
                    help="Whether to only evaluate (preomputed Json with head ensembles needed)")
    # other arguments
    
    ap.add_argument("--report-result", type=str, default=None, help="File where to save the results.")
    ap.add_argument("-s", "--sentences", nargs='*', type=int, default=None,
                    help="Only use the specified sentences; 0-based")
    
    args = ap.parse_args()
    
    dependency_tree = Dependency(args.conll)
    
    offset_modes = None
    
    if args.evaluate_only:
        if not args.json:
            raise ValueError("JSON with offset modes required in evaluate only mode!")

        with open(args.json, 'r') as inj:
            offset_modes = json.load(inj)
    
    else:
        offset_modes = dependency_tree.calc_offset_modes()
    
    results = defaultdict(dict)
    clausal_relations = ('adj-modifier', 'adv-modifier', 'auxiliary', 'compound', 'conjunct', 'determiner',
예제 #35
0
from dependency import Dependency
from rules import Rules

from fplus import calc_fplus

# relação R (A, B, C, D)
r = [{"A"}, {"B"}, {"C"}, {"D"}]
# F dependencias
F = [
    Dependency({"A"}, {"C"}),
    Dependency({"D"}, {"B"}),
]

fplus = calc_fplus(r, F)

for dep in fplus:
    print(dep)

print("\n{} dependencias encontradas para F+".format(len(fplus)))