コード例 #1
0
def f(model, text):
    """
    This function prints the predicted sentiment labeled parse tree.

    NOTE: It requires CoreNLP server running at http://localhost:9000.

    Parameters
    ----------
    model := the RNTN model
    text := a sentence to predict

    Examples
    --------
    >>> model = rntn.RNTN.load('models/RNTN.pickle')
    >>> f(model, "not very good")
           1
       ____|____
      |         4
      |       __|__
      2      2     3
      |      |     |
     not   very   good

    """
    for tree in tr.parse(text):
        model.predict(tree).pretty_print()
コード例 #2
0
ファイル: test_convert.py プロジェクト: jkingdon/jh2gh
  def test_variables_from_param_are_not_visible_in_imported_interface(self):
    converter = convert.Convert()
    opener = string_stream.Opener()
    converter.set_opener(opener)
    opener.set_file("Interface/L/o/g/Logic1", string_stream.StringStream("""
<jh>
kind (formula)
var (formula in-one)
</jh>
"""))
    logic2 = string_stream.StringStream("""
<jh>
param (ONE Interface:Logic1 () "")
var (formula in-two)
term (formula (¬ formula))
</jh>
""")

    expressions = tree.parse(tokenizer.WikiTokenizer(logic2))
    converter.convert_tree(expressions)
    result = expressions.to_string_wiki_to_comment()

    self.assertEqual("""# 
param (ONE Logic1.ghi () "")
tvar (formula in-two)
term (formula (¬ in-two))
""", result)
コード例 #3
0
ファイル: convert.py プロジェクト: jkingdon/jh2gh
  def convert(self, input):
    expressions = tree.parse(input)
#    try:
    self.convert_tree(expressions)
#    except:
#      print "Got an exception"
#      print repr(expressions)
    return repr(expressions)
コード例 #4
0
ファイル: convert.py プロジェクト: jkingdon/jh2gh
  def convert(self, underscored_name):
    input = open(Convert().convert_filename(underscored_name), "r")
    proof_filename = "/general/" + Convert().ghilbert_filename(underscored_name)
    output = open("../ghilbert-app" + proof_filename, "w")
    output.write("# Creative Commons Attribution-Share Alike 3.0 Unported (http://creativecommons.org/licenses/by-sa/3.0/)\n")

    if underscored_name.split(":")[0] == "Interface":
      expressions = tree.parse(tokenizer.WikiTokenizer(input))
      Convert().convert_tree(expressions)
      output.write(expressions.to_string_wiki_to_comment())
    else:
      wiki_out = open("../ghilbert-app/wiki/general/" + underscored_name + ".ghm", "w")
      output.write(Wiki(input, proof_filename, wiki_out).convert())
コード例 #5
0
ファイル: app.py プロジェクト: steve3p0/LING511
    def post(self) -> Dict:
        """ HTTP POST calls parses a string
        :return: Dict[result]
            example
            {
                'sentence': 'boy meets world',
                'parser': 'pdx',
                'request_formats': [ 'tree_ascii', 'bracket_diagram', 'tree_str']
            }
        :rtype: Union[Dict[str, str], None]
        """

        try:
            result = tree.parse(self.sentence,
                                parser=self.parser,
                                request_formats=self.request_formats)
            return result
        except IndexError as err:
            raise tree_exceptions.InvalidUsage(err.message, 400,
                                               request.get_data())
コード例 #6
0
ファイル: parallel_search.py プロジェクト: JelleZijlstra/JPP
	def parse_list(l):
		'''Turn a list of tree objects into a list of strings'''
		return map(lambda t: tree.parse(t), l)
コード例 #7
0
ファイル: test_tree.py プロジェクト: jkingdon/jh2gh
 def process(self, inputString):
   return tree.parse(string_stream.StringStream(inputString))
コード例 #8
0
ファイル: exhaustive.py プロジェクト: JCM333/JPP
def run_tests():
	import charmatrix
	cm = charmatrix.charmatrix("data/data2.txt")
	t = exhaustive_search(cm)
	assert t == tree.parse("(0,(1,(2,(3,4))))"), "exhaustive search must return this tree"
コード例 #9
0
ファイル: parse.py プロジェクト: eggpan95/BOWser
    dictionary["COUNT_CAPS"] = sum(w[0].upper() == w[0] for w in tokens)

generator = SemanticGenerator(NT("listValue")) #set the starting Nonterminal, for geoquery: answer, for nlmaps: query, for overnight datasets: listValue

def semparse(sent):
    d = dict()
    nl_features(sent,d)
    print(generator.greedy_generate(d))
    
t1 = time()
print("Reading corpus...")
with open("datasets/overnight_rec/train.txt") as f: #nlmaps.train.enlisp
    for line in f:
        nl, mrl = line.split("\t")
        try:
            t = parse(mrl)
        except ValueError:
            print("PARSE-ERROR:",line)
            continue
        #print(t)
        rules_and_features = t.extract_rules_with_features()
        for rule, instanceDict in rules_and_features:
            nl_features(nl,instanceDict)
            #print(rule,instanceDict)
            generator.addInstance(rule, instanceDict)
        #input()
t2 = time()
print("done...took {}s".format(round(t2-t1,4)))
print("Training...")
generator.train()
t3 = time()
コード例 #10
0
ファイル: parser.py プロジェクト: y-akinobu/kolab
 def acceptChunk(self, tree: ParseTree):
     s = str(tree)
     node = ntree.parse(s)
     return ntree.系列(*node.flatten()).simplify()
コード例 #11
0
def process_line(line):
    line = line.strip()
    ops = {
        '+': 'add',
        '-': 'sub',
        '/': 'div',
        '*': 'mul',
        '%': 'mod',
    }

    # check brackets
    def line_has_brackets():
        return '(' in line and ')' in line

    def line_has_op():
        op_names = ops.keys()

        return len(filter(lambda a: a in op_names, line)) > 0

    if line_has_brackets() and line_has_op():
        parts = tree.parse(line)

        # convert tree to str now

        return 'foobar'

    # check regular ops
    def split_line(op):
        return re.split('(\\' + op + ')', line)

    splits = map(lambda op: split_line(op), ops.keys())
    matches = filter(lambda a: len(a) == 3, splits)

    if len(matches) == 1:
        l_part, op, r_part = matches[0]
        op_name = ops[op]

        l_split = l_part.split('=')

        if len(l_split) == 2:
            l_value1 = l_split[0].strip()
            l_value2 = l_split[1].strip()
            r_value = r_part.strip(' ;')

            return l_value1 + ' = ' + op_name + '(' + l_value2 + ', ' + \
                r_value + ');'
        else:
            l_value = l_part.strip()
            r_value = r_part.strip(' =;')

            return l_value + ' = ' + op_name + '(' + l_value + ', ' + \
                r_value + ');'

        return matches[0], l_split

    # check unary ops
    unary_ops = {'++': 'add', '--': 'sub'}

    matches = filter(lambda a: line.endswith(a + ';'), unary_ops.keys())

    if matches:
        op = matches[0]
        op_name = unary_ops[op]
        value = line.strip(' +-;')

        return value + ' = ' + op_name + '(' + value + ', 1);'

    # no match, return line itself
    return line
コード例 #12
0
ファイル: convert.py プロジェクト: jkingdon/jh2gh
 def __init__(self, input, proof_filename, wiki_out):
   self._tree = tree.parse(tokenizer.WikiTokenizer(input))
   self._proof_filename = proof_filename
   self._wiki_out = wiki_out
   self._proof = ''