def test_turtle(self): """Any model should be serializable as a turtle file""" ana = EmotionAnalysis() res = Results() res.activities.append(ana) entry = Entry(text='Just testing') eSet = EmotionSet() emotion = Emotion() entry.emotions.append(eSet) res.entries.append(entry) eSet.onyx__hasEmotion.append(emotion) eSet.prov__wasGeneratedBy = ana.id triples = ('ana a :Analysis', 'ent[]ry a :entry', ' nif:isString "Just testing"', ' onyx:hasEmotionSet eSet', 'eSet a onyx:EmotionSet', ' prov:wasGeneratedBy ana', ' onyx:hasEmotion emotion', 'emotion a onyx:Emotion', 'res a :results', ' me:AnalysisInvolved ana', ' prov:used entry') t = res.serialize(format='turtle') print(t) g = rdflib.Graph().parse(data=t, format='turtle') assert len(g) == len(triples)
def test_turtle(self): """Any model should be serializable as a turtle file""" ana = EmotionAnalysis() res = Results() res.analysis.append(ana) entry = Entry(text='Just testing') eSet = EmotionSet() emotion = Emotion() entry.emotions.append(eSet) res.entries.append(entry) eSet.onyx__hasEmotion.append(emotion) eSet.prov__wasGeneratedBy = ana.id triples = ('ana a :Analysis', 'entry a :entry', ' nif:isString "Just testing"', ' onyx:hasEmotionSet eSet', 'eSet a onyx:EmotionSet', ' prov:wasGeneratedBy ana', ' onyx:hasEmotion emotion', 'emotion a onyx:Emotion', 'res a :results', ' me:AnalysisInvoloved ana', ' prov:used entry') t = res.serialize(format='turtle') print(t) g = rdflib.Graph().parse(data=t, format='turtle') assert len(g) == len(triples)
def test_results(self): r = Results() e = Entry() e.nif__isString = "Results test" r.entries.append(e) r.id = ":test_results" r.validate()
def test_serializable(self): r = Results() e = Entry() r.entries.append(e) d = r.serializable() assert d assert d['entries']
def test_jsonld(self): prueba = {"id": "test", "activities": [], "entries": []} r = Results(**prueba) print("Response's context: ") pprint(r._context) assert r.id == "test" j = r.jsonld(with_context=True) print("As JSON:") pprint(j) assert ("@context" in j) assert ("marl" in j["@context"]) assert ("entries" in j["@context"]) assert (j["@id"] == "test") assert "id" not in j r6 = Results(**prueba) e = Entry({"@id": "ohno", "nif:isString": "Just testing"}) r6.entries.append(e) logging.debug("Reponse 6: %s", r6) assert ("marl" in r6._context) assert ("entries" in r6._context) j6 = r6.jsonld(with_context=True) logging.debug("jsonld: %s", j6) assert ("@context" in j6) assert ("entries" in j6) assert ("activities" in j6) resp = r6.flask() received = json.loads(resp.data.decode()) logging.debug("Response: %s", j6) assert (received["entries"]) assert (received["entries"][0]["nif:isString"] == "Just testing") assert (received["entries"][0]["nif:isString"] != "Not testing")
def test_patch_json(self): r = Results() with patch_requests(ENDPOINT, r): res = requests.get(ENDPOINT) assert res.text == json.dumps(r.jsonld()) js = res.json() assert js assert js['@type'] == r['@type']
def test_patch_json(self): r = Results() with patch_requests(r): res = requests.get('http://example.com') assert res.content == json.dumps(r.jsonld()) js = res.json() assert js assert js['@type'] == r['@type']
def test_serializable(self): r = Results() e = Entry() ent = Entity() e.entities.append(ent) r.entries.append(e) d = r.serializable() assert d assert d['entries'] assert d['entries'][0]['entities']
def test_template(self): r = Results() e = Entry() e.nif__isString = 'testing the template' sent = Sentiment() sent.polarity = 'marl:Positive' r.entries.append(e) e.sentiments.append(sent) template = ('{% for entry in entries %}' '{{ entry["nif:isString"] | upper }}' ',{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}' '{% endfor %}') res = r.serialize(template=template) assert res == 'TESTING THE TEMPLATE,Positive'
def test_template(self): r = Results() e = Entry() e.nif__isString = 'testing the template' sent = Sentiment() sent.polarity = 'marl:Positive' r.entries.append(e) e.sentiments.append(sent) template = ( '{% for entry in entries %}' '{{ entry["nif:isString"] | upper }}' ',{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}' '{% endfor %}') res = r.serialize(template=template) assert res == 'TESTING THE TEMPLATE,Positive'
def analyse(self, **params): lang = params.get("language", "auto") res = requests.post( "http://www.sentiment140.com/api/bulkClassifyJson", json.dumps({ "language": lang, "data": [{ "text": params["input"] }] })) p = params.get("prefix", None) response = Results(prefix=p) polarity_value = self.maxPolarityValue * int( res.json()["data"][0]["polarity"]) * 0.25 polarity = "marl:Neutral" neutral_value = self.maxPolarityValue / 2.0 if polarity_value > neutral_value: polarity = "marl:Positive" elif polarity_value < neutral_value: polarity = "marl:Negative" entry = Entry(id="Entry0", nif__isString=params["input"]) sentiment = Sentiment(id="Sentiment0", prefix=p, marl__hasPolarity=polarity, marl__polarityValue=polarity_value) sentiment.prov__wasGeneratedBy = self.id entry.sentiments = [] entry.sentiments.append(sentiment) entry.language = lang response.entries.append(entry) return response
def analyse(self, **params): logger.debug( "SentimentAnalysisDL Analysing with params {}".format(params)) text_input = params.get("input", None) # st = datetime.now() text = self.cleanTweet(text_input) # logger.info("{} {}".format(datetime.now() - st, "tweet cleaned")) X_test = self.convert_text_to_vector([text], self._tokenizer) y_pred = self.classify(X_test) response = Results() entry = Entry() _mapping_labels = {0: 'positive', 1: 'negative', 2: 'neutral'} _mapping_values = {0: "1", 1: "-1", 2: "0"} for sentence, y_i in zip([text], y_pred): sentiment = Sentiment() sentiment['marl:hasPolarity'] = _mapping_labels[y_i] sentiment["marl:polarityValue"] = _mapping_values[y_i] entry.sentiments.append(sentiment) entry.nif__isString = text_input response.entries.append(entry) return response
def analyse(self, *args, **kwargs): self.sh['counter'] = self.sh['counter'] + 1 e = Entry() e.nif__isString = self.sh['counter'] r = Results() r.entries.append(e) return r
def analyse(self, **params): logger.debug("SuggestionMiningDL Analysing with params {}".format(params)) text_input = params.get("input", None) st = datetime.now() text_sentences = self.cleanTweet(text_input) #[self.cleanTweet(sentence) for sentence in self.split_into_sentences(text_input)] #X_test = [ self.convert_text_to_vector(sentence, self._tokenizer) for sentence in text_sentences] X_test = self.convert_text_to_vector([text_sentences], self._tokenizer) y_pred = self.classify(X_test) logger.info("{} {}".format(datetime.now() - st, "tweet analysed, predicted: "+str(y_pred))) response = Results() entry = Entry() entry.nif__isString = text_input _mapping_labels = {0:False, 1:True} for sentence, y_i in zip([text_sentences], y_pred): suggestion = Suggestion() suggestion['hasSuggestion'] = _mapping_labels[y_i] suggestion["nif:beginIndex"] = 0 suggestion["nif:endIndex"] = len(sentence) suggestion["nif:anchorOf"] = sentence entry.suggestions.append(suggestion) response.entries.append(entry) return response
def test_jsonld(self): prueba = {"id": "test", "analysis": [], "entries": []} r = Results(**prueba) print("Response's context: ") pprint(r._context) assert r.id == "test" j = r.jsonld(with_context=True) print("As JSON:") pprint(j) assert ("@context" in j) assert ("marl" in j["@context"]) assert ("entries" in j["@context"]) assert (j["@id"] == "test") assert "id" not in j r6 = Results(**prueba) e = Entry({"@id": "ohno", "nif:isString": "Just testing"}) r6.entries.append(e) logging.debug("Reponse 6: %s", r6) assert ("marl" in r6._context) assert ("entries" in r6._context) j6 = r6.jsonld(with_context=True) logging.debug("jsonld: %s", j6) assert ("@context" in j6) assert ("entries" in j6) assert ("analysis" in j6) resp = r6.flask() received = json.loads(resp.data.decode()) logging.debug("Response: %s", j6) assert (received["entries"]) assert (received["entries"][0]["nif:isString"] == "Just testing") assert (received["entries"][0]["nif:isString"] != "Not testing")
def test_str(self): """The string representation shouldn't include private variables""" r = Results() p = plugins.Plugin({"name": "STR test", "version": 0}) p._testing = 0 s = str(p) assert "_testing" not in s r.analysis.append(p) s = str(r) assert "_testing" not in s
def analyse(self, **params): logger.debug("Hashtag SVM Analysing with params {}".format(params)) text_input = params.get("input", None) self.ESTIMATOR = params.get("estimator", 'LinearSVC') # EXTRACTING FEATURES text = self._text_preprocessor(text_input) X = self._convert_text_to_vector(text=text, text_input=text_input, Dictionary=self._Dictionary) feature_text = self._extract_features(X=X, classifiers=self._classifiers, estimator=self.ESTIMATOR) # GENERATING RESPONSE response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" if self.ESTIMATOR == 'SVC': emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V','A','D']: weights = [feature_text[i] for i in feature_text if (i != 'surprise')] if not all(v == 0 for v in weights): value = np.average([self.centroids[i][dimension] for i in feature_text if (i != 'surprise')], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: if(self.ESTIMATOR == 'SVC'): emotionSet.onyx__hasEmotion.append(Emotion( onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=feature_text[i])) else: if(feature_text[i] > 0): emotionSet.onyx__hasEmotion.append(Emotion( onyx__hasEmotionCategory=self.wnaffect_mappings[i])) entry.emotions = [emotionSet,] response.entries.append(entry) return response
def analyse(self, **kwargs): params = dict(kwargs) txt = params["input"] logger.info('TXT:%s' % txt) endpoint = params["endpoint"] lang = params.get("language") key = params["apiKey"] sentiplug = params["sentiments-plugin"] s_params = params.copy() s_params.update({'algo':sentiplug,'language':lang, 'meaningCloud-key':key}) senti_response = requests.get(endpoint, params=s_params).json() logger.info('SENTIPARAMS: %s' % s_params) logger.info('SENTIRESPONSE: %s' % senti_response) if 'entries' not in senti_response: raise Error(senti_response) senti_response = Results(senti_response) logger.info('SENTI: %s' % senti_response) logger.info(senti_response) emoplug = params["emotions-plugin"] e_params = params.copy() e_params.update({'algo':emoplug,'language':lang}) emo_response = requests.get(endpoint, params=e_params).json() if 'entries' not in emo_response: raise Error(emo_response) emo_response = Results(emo_response) logger.info('EMO: %s' % emo_response) logger.info(emo_response) #Senpy Response response = Results() response.analysis = [senti_response.analysis, emo_response.analysis] unified = senti_response.entries[0] unified["emotions"] = emo_response.entries[0]["emotions"] response.entries.append(unified) return response
def test_client(self): endpoint = 'http://dummy/' client = Client(endpoint) with patch_requests('http://dummy/', Results()): resp = client.analyse('hello') assert isinstance(resp, Results) with patch_requests('http://dummy/', Error('Nothing')): try: client.analyse(input='hello', algorithm='NONEXISTENT') raise Exception( 'Exceptions should be raised. This is not golang') except Error: pass
def analyse_entry(self, entry, params): txt = entry.get("text", None) model = "general" # general_es / general_es / general_fr api = 'http://api.meaningcloud.com/sentiment-2.1' lang = params.get("language") key = params["apiKey"] parameters = { 'key': key, 'model': model, 'lang': lang, 'of': 'json', 'txt': txt, 'src': 'its-not-a-real-python-sdk' } try: r = requests.post(api, params=parameters, timeout=3) except requests.exceptions.Timeout: raise Error("Meaning Cloud API does not response") api_response = r.json() if not api_response.get('score_tag'): raise Error(r.json()) logger.info(api_response) response = Results() agg_polarity, agg_polarityValue = self._polarity( api_response.get('score_tag', None)) agg_opinion = Sentiment(id="Opinion0", marl__hasPolarity=agg_polarity, marl__polarityValue=agg_polarityValue, marl__opinionCount=len( api_response['sentence_list'])) entry.sentiments.append(agg_opinion) logger.info(api_response['sentence_list']) count = 1 for sentence in api_response['sentence_list']: for nopinion in sentence['segment_list']: logger.info(nopinion) polarity, polarityValue = self._polarity( nopinion.get('score_tag', None)) opinion = Sentiment( id="Opinion{}".format(count), marl__hasPolarity=polarity, marl__polarityValue=polarityValue, marl__aggregatesOpinion=agg_opinion.get('id'), nif__anchorOf=nopinion.get('text', None), nif__beginIndex=nopinion.get('inip', None), nif__endIndex=nopinion.get('endp', None)) count += 1 entry.sentiments.append(opinion) yield entry
def test_jsonld(self): ctx = os.path.normpath(os.path.join(__file__, "..", "..", "..", "senpy", "schemas", "context.jsonld")) prueba = {"id": "test", "analysis": [], "entries": []} r = Results(**prueba) print("Response's context: ") pprint(r.context) assert r.id == "test" j = r.jsonld(with_context=True) print("As JSON:") pprint(j) assert("@context" in j) assert("marl" in j["@context"]) assert("entries" in j["@context"]) assert(j["@id"] == "test") assert "id" not in j r6 = Results(**prueba) r6.entries.append(Entry({"@id":"ohno", "nif:isString":"Just testing"})) logging.debug("Reponse 6: %s", r6) assert("marl" in r6.context) assert("entries" in r6.context) j6 = r6.jsonld(with_context=True) logging.debug("jsonld: %s", j6) assert("@context" in j6) assert("entries" in j6) assert("analysis" in j6) resp = r6.flask() received = json.loads(resp.data.decode()) logging.debug("Response: %s", j6) assert(received["entries"]) assert(received["entries"][0]["nif:isString"] == "Just testing") assert(received["entries"][0]["nif:isString"] != "Not testing")
def analyse(self, **params): classifier = params.get("classifier", "polyglot-es") p = params.get("prefix", None) response = Results(prefix=p) (entities, types, startIndexes, endIndexes) = wrapper.service(params.get("input"), classifier) print(entities, types, startIndexes, endIndexes) for x in range(0, len(entities)): entry = Entry(id="Entry" + str(x), prefix=p, anchorOf=entities[x], taClassRef="dbo:" + types[x], startIndex=startIndexes[x], endIndex=endIndexes[x]) response.entries.append(entry) return response
def analyse(self, **params): logger.debug("emotionService with params {}".format(params)) filename = params.get("i", None) ## FILE MANIPULATIONS ------------------------------- \ if validators.url(filename): filename = self._download_file(saveFolder = self._storage_path, url = filename) else: filename = os.path.join(self._storage_path,filename) logger.info("{} {}".format(datetime.now(), filename)) if not os.path.isfile(filename): raise Error("File %s does not exist" % filename) ## EXTRACTING FEATURES ------------------------------- \ feature_set = self._extract_features(filename, convert=True) # self._remove_file(filename) ## GENERATING OUTPUT --------------------------------- \ response = Results() entry = Entry() entry['filename'] = os.path.basename(filename) emotionSet = EmotionSet() emotionSet.id = "Emotions" emotion1 = Emotion() for dimension in self._dimensions: emotion1[ self._centroid_mappings[dimension] ] = 5*(1+feature_set[dimension]) emotionSet.onyx__hasEmotion.append(emotion1) entry.emotions = [emotionSet,] response.entries.append(entry) return response
def test_convert_emotions(self): self.senpy.activate_all(sync=True) plugin = Plugin({ 'id': 'imaginary', 'onyx:usesEmotionModel': 'emoml:fsre-dimensions' }) eSet1 = EmotionSet() activity = plugin.activity() eSet1.prov(activity) eSet1['onyx:hasEmotion'].append( Emotion({ 'emoml:arousal': 1, 'emoml:potency': 0, 'emoml:valence': 0 })) response = Results({ 'activities': [activity], 'entries': [ Entry({ 'nif:isString': 'much ado about nothing', 'onyx:hasEmotionSet': [eSet1] }) ] }) params = { 'emotion-model': 'emoml:big6', 'algorithm': ['conversion'], 'conversion': 'full' } r1 = deepcopy(response) r1.parameters = params self.senpy.analyse(r1) assert len(r1.entries[0].emotions) == 2 params['conversion'] = 'nested' r2 = deepcopy(response) r2.parameters = params self.senpy.analyse(r2) assert len(r2.entries[0].emotions) == 1 assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1 params['conversion'] = 'filtered' r3 = deepcopy(response) r3.parameters = params self.senpy.analyse(r3) assert len(r3.entries[0].emotions) == 1 r3.jsonld()
def test_client(self): endpoint = 'http://dummy/' client = Client(endpoint) with patch_requests(Results()) as (request, response): resp = client.analyse('hello') assert isinstance(resp, Results) request.assert_called_with( url=endpoint + '/', method='GET', params={'input': 'hello'}) with patch_requests(Error('Nothing')) as (request, response): try: client.analyse(input='hello', algorithm='NONEXISTENT') raise Exception('Exceptions should be raised. This is not golang') except Error: pass request.assert_called_with( url=endpoint + '/', method='GET', params={'input': 'hello', 'algorithm': 'NONEXISTENT'})
def analyse_entry(self, entry, params): text_input = entry.get("text", None) text=self._my_preprocessor(text_input) feature_text=self._extract_features(text) response = Results() emotionSet = EmotionSet(id="Emotions0") emotions = emotionSet.onyx__hasEmotion for i in feature_text: emotions.append(Emotion(onyx__hasEmotionCategory=self._wnaffect_mappings[i], onyx__hasEmotionIntensity=feature_text[i])) entry.emotions = [emotionSet] yield entry
def test_client(self): endpoint = 'http://dummy/' client = Client(endpoint) success = Call(Results()) with patch('requests.request', return_value=success) as patched: resp = client.analyse('hello') assert isinstance(resp, Results) patched.assert_called_with( url=endpoint + '/', method='GET', params={'input': 'hello'}) error = Call(Error('Nothing')) with patch('requests.request', return_value=error) as patched: try: client.analyse(input='hello', algorithm='NONEXISTENT') raise Exception('Exceptions should be raised. This is not golang') except Error: pass patched.assert_called_with( url=endpoint + '/', method='GET', params={'input': 'hello', 'algorithm': 'NONEXISTENT'})
def test_convert_emotions(self): self.senpy.activate_all(sync=True) plugin = Plugin({ 'id': 'imaginary', 'onyx:usesEmotionModel': 'emoml:fsre-dimensions' }) eSet1 = EmotionSet() eSet1.prov__wasGeneratedBy = plugin['id'] eSet1['onyx:hasEmotion'].append( Emotion({ 'emoml:arousal': 1, 'emoml:potency': 0, 'emoml:valence': 0 })) response = Results({ 'entries': [Entry({ 'text': 'much ado about nothing', 'emotions': [eSet1] })] }) params = {'emotionModel': 'emoml:big6', 'conversion': 'full'} r1 = deepcopy(response) self.senpy.convert_emotions(r1, [ plugin, ], params) assert len(r1.entries[0].emotions) == 2 params['conversion'] = 'nested' r2 = deepcopy(response) self.senpy.convert_emotions(r2, [ plugin, ], params) assert len(r2.entries[0].emotions) == 1 assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1 params['conversion'] = 'filtered' r3 = deepcopy(response) self.senpy.convert_emotions(r3, [ plugin, ], params) assert len(r3.entries[0].emotions) == 1
def analyse(self, **params): lang = params.get("language", "auto") response = Results() polarity_value = max(-1, min(1, random.gauss(0.2, 0.2))) polarity = "marl:Neutral" if polarity_value > 0: polarity = "marl:Positive" elif polarity_value < 0: polarity = "marl:Negative" entry = Entry({"id": ":Entry0", "nif:isString": params["input"]}) sentiment = Sentiment({ "id": ":Sentiment0", "marl:hasPolarity": polarity, "marl:polarityValue": polarity_value }) sentiment["prov:wasGeneratedBy"] = self.id entry.sentiments = [] entry.sentiments.append(sentiment) entry.language = lang response.entries.append(entry) return response
def analyse(self, **params): logger.debug( "wassaRegression LSTM Analysing with params {}".format(params)) st = datetime.now() text_input = params.get("input", None) text = self._text_preprocessor(text_input) self.ESTIMATOR = params.get("estimator", 'LSTM') if self.ESTIMATOR == 'LSTM': X_lstm = self._lists_to_vectors(text=text) feature_text = self._extract_features(X_lstm) elif self.ESTIMATOR == 'averaged': X_lstm = self._lists_to_vectors(text=text) X_svr = self._convert_text_to_vector(text=text, text_input=text_input) feature_text_lstm = self._extract_features(X_lstm) feature_text_svr = self._extract_features_svr(X_svr) feature_text = { emo: np.mean([feature_text_lstm[emo], feature_text_svr[emo]]) for emo in self._emoNames } else: X_svr = self._convert_text_to_vector(text=text, text_input=text_input) feature_text = self._extract_features_svr(X_svr) logger.info("{} {}".format(datetime.now() - st, "string analysed")) response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V', 'A', 'D']: weights = [feature_text[i] for i in feature_text] if not all(v == 0 for v in weights): value = np.average( [self.centroids[i][dimension] for i in feature_text], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=float(feature_text[i]) * emotionSet.onyx__maxIntensityValue)) entry.emotions = [ emotionSet, ] response.entries.append(entry) return response
def analyse(self, **params): logger.debug("Hashtag LSTM Analysing with params {}".format(params)) text_input = params.get("input", None) self._ESTIMATION = params.get("estimation", 'Probabilities') # EXTRACTING FEATURES text = self._text_preprocessor(text_input) X = self._lists_to_vectors(text=text) feature_text = self._extract_features(X=X) # GENERATING RESPONSE response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" if self._ESTIMATION == 'Probabilities': emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V', 'A', 'D']: weights = [ feature_text[i] for i in feature_text if (i != 'surprise') ] if not all(v == 0 for v in weights): value = np.average([ self.centroids[i][dimension] for i in feature_text if (i != 'surprise') ], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: if self._ESTIMATION == 'Probabilities': emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=float(feature_text[i]) * 100)) elif self._ESTIMATION == 'Classes': if feature_text[i] > 0: emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self. wnaffect_mappings[i])) #onyx__hasEmotionIntensity=int(feature_text[i]))) entry.emotions = [ emotionSet, ] response.entries.append(entry) return response
def test_analyse_empty(self): """ Trying to analyse when no plugins are installed should raise an error.""" senpy = Senpy(plugin_folder=None, app=self.app, default_plugins=False) self.assertRaises(Error, senpy.analyse, Results(), [])
def analyse(self, *args, **kwargs): sleep(float(kwargs.get("timeout", self.timeout))) return Results()
def analyse(self, *args, **kwargs): return Results()