示例#1
0
def test_complex_stim_from_text():
    textfile = join(get_test_data_path(), 'text', 'scandal.txt')
    text = open(textfile).read().strip()
    stim = ComplexTextStim.from_text(text)
    target = ['To', 'Sherlock', 'Holmes']
    assert [w.text for w in stim.elements[:3]] == target
    assert len(stim.elements) == 231
    stim = ComplexTextStim.from_text(text, unit='sent')
    # Custom tokenizer
    stim = ComplexTextStim.from_text(text, tokenizer='(\w+)')
    assert len(stim.elements) == 209
示例#2
0
 def test_complex_stim_from_text(self):
     textfile = join(_get_test_data_path(), 'text', 'scandal.txt')
     text = open(textfile).read().strip()
     stim = ComplexTextStim.from_text(text)
     target = ['To', 'Sherlock', 'Holmes']
     self.assertEquals([w.text for w in stim.elements[:3]], target)
     self.assertEquals(len(stim.elements), 231)
     stim = ComplexTextStim.from_text(text, unit='sent')
     # Custom tokenizer
     stim = ComplexTextStim.from_text(text, tokenizer='(\w+)')
     self.assertEquals(len(stim.elements), 209)
示例#3
0
def test_complex_stim_from_text():
    textfile = join(_get_test_data_path(), 'text', 'scandal.txt')
    text = open(textfile).read().strip()
    stim = ComplexTextStim.from_text(text)
    target = ['To', 'Sherlock', 'Holmes']
    assert [w.text for w in stim.elements[:3]] == target
    assert len(stim.elements) == 231
    stim = ComplexTextStim.from_text(text, unit='sent')
    # Custom tokenizer
    stim = ComplexTextStim.from_text(text, tokenizer='(\w+)')
    assert len(stim.elements) == 209
示例#4
0
 def test_complex_stim_from_text(self):
     textfile = join(_get_test_data_path(), 'text', 'scandal.txt')
     text = open(textfile).read().strip()
     stim = ComplexTextStim.from_text(text)
     target = ['To', 'Sherlock', 'Holmes']
     self.assertEquals([w.text for w in stim.elements[:3]], target)
     self.assertEquals(len(stim.elements), 231)
     stim = ComplexTextStim.from_text(text, unit='sent')
     # Custom tokenizer
     stim = ComplexTextStim.from_text(text, tokenizer='(\w+)')
     self.assertEquals(len(stim.elements), 209)
示例#5
0
def test_predefined_dictionary_extractor():
    text = """enormous chunks of ice that have been frozen for thousands of
              years are breaking apart and melting away"""
    stim = ComplexTextStim.from_text(text)
    td = PredefinedDictionaryExtractor(['aoa/Freq_pm', 'affect/V.Mean.Sum'])
    timeline = stim.extract([td])
    df = TimelineExporter.timeline_to_df(timeline)
    assert df.shape == (36, 4)
    valid_rows = df.query('name == "affect_V.Mean.Sum"').dropna()
    assert len(valid_rows) == 3
示例#6
0
文件: api.py 项目: qmac/featureX
 def _convert(self, audio):
     import speech_recognition as sr
     with sr.AudioFile(audio.filename) as source:
         clip = self.recognizer.record(source)
     text = getattr(self.recognizer, self.recognize_method)(clip, self.api_key)
     return ComplexTextStim.from_text(text=text)