def submission(submission_id): r = praw.Reddit(user_agent=user_agent) s = ServiceClient(config.TOKEN, config.ENDPOINT) submission = r.get_submission(submission_id=submission_id, comment_sort="top") comments = praw.helpers.flatten_tree(submission.comments) comments = sorted(comments, reverse=True, key=lambda x: x.score)[0:20] for comment in comments: res = s.request(input=comment.body, intype="direct", informat="text", outformat="json-ld") print res comment.results = res comment.polarity = 0 num = 0 for entry in res["entries"]: for opinion in entry["opinions"]: print "Polarity: %s" % comment.polarity num+=1 comment.polarity+=opinion["marl:polarityValue"] comment.polarity = comment.polarity / num return render_template('comments.html', comments=comments)
class PositiveWordsMatcher: def __init__(self): self.language_detector = ServiceClient(LANG_DETECTION_URL, TOKEN) self.domain_detector = ServiceClient(DOMAIN_DETECTION_URL, TOKEN) self.resource_client = ResourceClient(RESOURCES_URL, TOKEN) def positive_words(self, text): lang_result = self.language_detector.request({"text": text}) language = lang_result.get("dc:language", None) domain_result = self.domain_detector.request({"text": text}) domain = domain_result["domain"].split(":")[1] query = sparql(POSITIVE_ENTRIES, language, domain) input = {"query": query, "format": "application/json"} resources_result = self.resource_client.request(input) sentiment_words = self.__extract_words_from_response(resources_result) return matches_count(text, sentiment_words) def __extract_words_from_response(self, resources_response): words = set() for word in resources_response.get("results", {}).get("bindings", []): words.add(word.get("sentimentEntryWR", {}).get("value", None)) return words
def __init__(self): self.language_detector = ServiceClient(LANG_DETECTION_URL, TOKEN) self.domain_detector = ServiceClient(DOMAIN_DETECTION_URL, TOKEN) self.resource_client = ResourceClient(RESOURCES_URL, TOKEN)