def extract_command(string: str): l = enums.DependencyEdge.Label tokens = process(string) # Find the command's verb line_verbs = get_token_by_pos(tokens, enums.PartOfSpeech.Tag.VERB) candidate_verbs = [ v for v in line_verbs if v.text.content.lower() in known_verbs ] if len(candidate_verbs) == 0: print("ERROR: No relevant verbs found") return None verb = candidate_verbs[0] def get_possessive(token): x = token.get_dependant(l.PREP) y = x.get_dependant(l.POBJ) z = token.get_dependant(l.DOBJ) return y or x or z def get_num(token): return token.get_dependant(l.NUM) def get_context(token): return token.get_dependant(l.AMOD) pos = get_possessive(verb) print("NUMBER", get_num(pos)) print("CONTEXT", get_context(pos)) print("VERB", verb) print("TARGET", pos)
def analyze(data): query = nlp.process(data) response = {} if(query['type'] == "wiki"): response = encyclopedia(query['subject']) if(query['type'] == "calc"): response = calculator.main(query['subject']) if(query['type'] == "error"): response = query return response
def process(input, output): """Convert a text file to a document. Parameters ---------- input : str Filepath for the input text file. output : str, optional Filepath for the output document database. If none provided, defaults to an in-memory database. See Also -------- `nlp.process` - the core functionality of this command-line procedure. """ import nlp # connect to the output db (defaults to in-memory) with orm.Connection(output) as mapping: # read the provided doc and process all the lines logger.info(f"Processing file {input}...") with open(input, "r") as f: text = f.read() nlp.process(text, mapping) logger.info(f"Processing of file {input} complete.")
def to_presses(cls, command): tokens = process(command) render_tokens(tokens) return extract_command(tokens)
from nlp import process, render_tokens, to_ir from voicerec import voice_command_generator for command in voice_command_generator('hey google'): tokens = process(command) render_tokens(tokens) print(to_ir(tokens))
def note_to_smartnote(note): smart_note = nlp.process(note) print(smart_note) return smart_note
def view(id): #received_text2, new_received, number_of_tokens, blob_sentiment, blob_subjectivity, summary, final_time, len_of_words, pos, neg, training, test=process(id) received_text2, new_received, number_of_tokens, blob_sentiment, blob_subjectivity, summary, final_time, len_of_words, markstop = process(id) # print(pos) # print(neg) # print(training) return render_template('dataset_view.html', received_text=received_text2, new_received=new_received, number_of_tokens=number_of_tokens, blob_sentiment=blob_sentiment, blob_subjectivity=blob_subjectivity, summary=summary, final_time=final_time, len=len_of_words, dt=dt, id=id, df=df, markstop=markstop)
data_dir = 'clue/data' statements_dir = 'statements' person_data = {} statement_data = {} with open(os.path.join(data_dir, 'persons.json')) as f: for person in json.load(f)['items']: person_data[person['fullName'].lower().replace(' ', '-')] = person persons_statements = defaultdict(list) statements_persons = defaultdict(list) for fn in glob(os.path.join(statements_dir, 'statement-*.txt')): statement_key = os.path.basename(fn).replace('.txt', '') doc = process(fn) doc.user_data['title'] = statement_key statement_data[statement_key] = doc person_ents = set([ent for ent in doc.ents if ent.label_ == 'PERSON']) for ent in person_ents: person_key = ent.text.lower().replace(' ', '-') person = person_data.get(person_key, None) if person: persons_statements[person_key].append(statement_key) statements_persons[statement_key].append(person_key) person_data = { id: person