def get_word_dependencies(text): dependencies = {} dep_parser = StanfordDependencyParser( model_path=osp.join( datadir, "stanford_data/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz" ), java_options="-mx4g -XX:-UseGCOverheadLimit") st = StanfordPOSTagger(osp.join(datadir, "stanford_pos/stanford-postagger-3.9.1.jar"),\ osp.join(datadir, 'stanford_pos/models/english-bidirectional-distsim.tagger'), java_options='-mx4g, XX:-UseGCOverheadLimit') stanford_dir = st._stanford_jar.rpartition('/')[0] stanford_jars = find_jars_within_path(stanford_dir) st.stanford_jar = ':'.join(stanford_jars) result = dep_parser.raw_parse(text) dep = result.__next__() #print(list(dep.triples())) for i in list(dep.triples()): w1 = i[0][0] w2 = i[2][0] if w1 in dependencies: dependencies[w1].append((w2, i[1])) else: dependencies[w1] = [(w2, i[1])] #print(dependencies) return dependencies