示例#1
0
文件: executor.py 项目: Machyne/pal
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     FeatureExtractor.process(params)
     Classifier.process(params)
     Executor.process(params)
     return params
示例#2
0
文件: executor.py 项目: Machyne/pal
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     FeatureExtractor.process(params)
     Classifier.process(params)
     Executor.process(params)
     return params
示例#3
0
文件: hill_climb.py 项目: Machyne/pal
def get_confs_kws(query, services):
    params = {'query': query}
    StandardNLP.process(params)
    keywords = find_keywords(params['features']['tokens'])
    confidences = {}
    for name, heuristic in services.iteritems():
        confidences[name] = heuristic.run_heuristic(keywords)
    return confidences, keywords + ['BIAS']
示例#4
0
文件: engine.py 项目: Machyne/pal
 def process(cls, params):
     Validator.process(params)
     if 'error' in params:
         cls.LOGGER.error(message=params['error'])
         abort(404, message=params['error'])
     StandardNLP.process(params)
     FeatureExtractor.process(params)
     Classifier.process(params)
     Executor.process(params)
     Logger.process(params)
     Sanitizer.process(params)
示例#5
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return find_nouns(params['features']['pos'])[1]
示例#6
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return classify_question(params['features']['tokens'])
示例#7
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return is_question(params["features"]["tokens"])
示例#8
0
def test_nouns():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        features = FeatureExtractor.extract_features(processed)
        assert features['nouns'] == exp_features['nouns']
示例#9
0
文件: test_nlp.py 项目: Machyne/pal
def test_nouns():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        features = FeatureExtractor.extract_features(processed)
        assert features['nouns'] == exp_features['nouns']
示例#10
0
文件: test_nlp.py 项目: Machyne/pal
def test_pre_processing():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        assert processed['pos'] == tokens
示例#11
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return get_tense(params['features']['pos'])
示例#12
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return get_tense(params['features']['pos'])
示例#13
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return find_keywords(params['features']['tokens'])
示例#14
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return find_keywords(params['features']['tokens'])
示例#15
0
def test_question_type():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        features = FeatureExtractor.extract_features(processed)
        assert features['questionType'] == exp_features['questionType']
示例#16
0
文件: test_nlp.py 项目: Machyne/pal
def test_question_type():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        features = FeatureExtractor.extract_features(processed)
        assert features['questionType'] == exp_features['questionType']
示例#17
0
 def post(self):
     params = {x: request.form[x] for x in request.form}
     StandardNLP.process(params)
     return classify_question(params['features']['tokens'])
示例#18
0
def test_pre_processing():
    for test in test_cases:
        sent, tokens, exp_features = test
        processed = StandardNLP.process(sent)
        assert processed['pos'] == tokens