def __init__(self,tweetText): self._tweetText = tweetText self._alchemyResponse = alchemy.entities("text", data=tweetText, options={"sentiment":1}) self._namedEntities = self._getNamedEntities()
def __init__(self,url): self._url = url self._alchemyResp = alchemy.text_clean(flavor="url", data=url) self._fullUrl, self._urlText = self._parseAlchemyResponse()
# Load the AlchemyAPI module code. import AlchemyAPI # Create an AlchemyAPI object. alchemyObj = AlchemyAPI.AlchemyAPI() # Load the API key from disk. alchemyObj.loadAPIKey("api_key.txt") # Extract a ranked list of relations from a web URL. result = alchemyObj.URLGetRelations("http://www.techcrunch.com/"); print(result) # Extract a ranked list of relations from a text string. result = alchemyObj.TextGetRelations("Ugly bob attacked beautiful Susan."); print(result) # Load a HTML document to analyze. htmlFileHandle = open("data/example.html", 'r') htmlFile = htmlFileHandle.read() htmlFileHandle.close() # Extract a ranked list of relations from a HTML document.
from BeautifulSoup import BeautifulSoup import AlchemyAPI import sys import re, urllib2, nltk alchemyObj = AlchemyAPI.AlchemyAPI() # Create an AlchemyAPI object. alchemyObj.loadAPIKey("api_key.txt") # Load the API key from disk. def scrapePage(url): # Extract page text from a web URL (ignoring navigation links, ads, etc.). try: print "URL: " + url #url=url.replace('(','%28') #url=url.replace(')','%29') #print "New URL:"+url result = alchemyObj.URLGetText(url) soup = BeautifulSoup(result) raw = soup('text') raw = [text.text for text in raw] rawstr = ' '.join(raw) except Exception: try: print "\n\nscraping using regex" webpage = urllib2.urlopen(url).read() #webpage = str(webpage) para = re.compile( '<p>(.*)</p>' ) #collect data in p tags and store in para object
# Load the AlchemyAPI module code. import AlchemyAPI # Create an AlchemyAPI object. alchemyObj = AlchemyAPI.AlchemyAPI() # Load the API key from disk. alchemyObj.loadAPIKey("api_key.txt") # Detect the language for a web URL. result = alchemyObj.URLGetLanguage("http://www.techcrunch.fr/") print result # Detect the language for a text string. (requires at least 100 characters text) result = alchemyObj.TextGetLanguage( "Hello my name is Bob Jones. I am speaking to you at this very moment. Are you listening to me, Bob?" ) print result # Load a HTML document to analyze. htmlFileHandle = open("data/example.html", 'r') htmlFile = htmlFileHandle.read() htmlFileHandle.close() # Detect the language for a HTML document. result = alchemyObj.HTMLGetLanguage(htmlFile, "http://www.test.com/") print result
# Load the AlchemyAPI module code. import AlchemyAPI # Create an AlchemyAPI object. alchemyObj = AlchemyAPI.AlchemyAPI() # Load the API key from disk. alchemyObj.loadAPIKey("api_key.txt") # Extract sentiment from a web URL. result = alchemyObj.URLGetTextSentiment("http://www.techcrunch.com/") print(result) # Extract sentiment from a text string. result = alchemyObj.TextGetTextSentiment( "I'm not impressed with Justin Bieber's new haircut.") print(result) # Load a HTML document to analyze. htmlFileHandle = open("data/example.html", 'r') htmlFile = htmlFileHandle.read() htmlFileHandle.close() # Extract sentiment from a HTML document. result = alchemyObj.HTMLGetTextSentiment(htmlFile, "http://www.test.com/") print(result) # Enable keyword-targeted sentiment. kparams = AlchemyAPI.AlchemyAPI_KeywordParams() kparams.setSentiment(1)
def __init__(self, url): self._url = url self._alchemyResp = alchemy.text_clean(flavor="url", data=url) self._fullUrl, self._urlText = self._parseAlchemyResponse()