def barebones_runParsed():
   res = blockspring.runParsed("historic-stock-prices", {
     "ticker": "MSFT",
     "start_date": "2014-1-1",
     "end_date": "2014-1-2"
   })
   print res.params["historical_quotes"]        
示例#2
0
def _svg2png(_svgfile, _name):
    svg = open(_svgfile, "r")
    source = blockspring.runParsed("svg-to-png", {
        "svg_string": svg.read()
    }).params["png_file"]
    shutil.move(str(source), (str(os.getcwd()) + "/" + str(_name) + ".png"))
    os.remove(str(os.getcwd()) + "/" + str(_name) + ".svg")
def methodForth():
    print(
        blockspring.runParsed(
            "stock-price-comparison", {
                "tickers": "FB,BIDU,TWTR",
                "start_date": "2014-01-01",
                "end_date": "2015-01-01"
            }))
示例#4
0
    def create_article_file(self, category, number_of_articles):
        title_list = list(
            blockspring.runParsed(
                "get-wikipedia-artickles-in-category", {"category": category, "limit": number_of_articles}
            ).params.values()
        )
        category_list = title_list[0]
        article_file = open(category, "a")

        for cat in category_list:
            nyy = wikipedia.page(cat).content.encode("UTF-8")
            ny = nyy.decode("utf-8")
            article_file.write(ny)
        article_file.close
示例#5
0
	def createTrainingData(self, categories, numberOfArticles):
		for category in categories:
			print category
			titleList = list(blockspring.runParsed("get-wikipedia-articles-in-category",{ "category": category, "limit": numberOfArticles }).params.values())
			categoryList = titleList[0]
			articleFile = open(category,'w+')

			for cat in categoryList:

				articleFile.write("[" + category + "] ")
				articleContent = TextBlob(wikipedia.page(cat).content.encode('UTF-8'))
				nouns = articleContent.noun_phrases
				self.get_nouns(nouns, articleFile)
				'''
示例#6
0
def getWeather(st, xml_url, lt, ln, valid):
    wj = "";
    wj = requests.get(xml_url).content.strip();

    if (valid):
        if ( not wj.startswith("<?xml")):
            print "*** NOT XML ***" + st + " "+xml_url;
            wj = "ERROR";
        else:
            ja=blockspring.runParsed("xml-to-json", { "my_xml": wj }).params["converted"]
            jas = json.dumps(ja);
            wj=jas;
    else:
        wj = getOW(lt,ln);

    return wj;
示例#7
0
	def createTrainingData(self, categories, numberOfArticles):
		for category in categories:
			titleList = list(blockspring.runParsed("get-wikipedia-articles-in-category",{ "category": category, "limit": numberOfArticles }).params.values())
			categoryList = titleList[0]

			articleFile = open(category,'w+')
			
			for cat in categoryList:
				articleFile.write("[" + cat + "] ")
				articleContent = TextBlob(wikipedia.page(cat).content.encode('UTF-8'))
				nouns = articleContent.noun_phrases
				for noun in nouns:
					#Removes weird words, only writes if they are not weird.
					if '=' in noun : continue
					try: 
						noun.decode('ascii')
					except:
						continue
					articleFile.write(noun + " ")

			articleFile.close
示例#8
0
import blockspring
import json

print blockspring.runParsed(
    "stock-price-comparison", {
        "tickers": "FB, LNKD, TWTR",
        "start_date": "2014-01-01",
        "end_date": "2015-01-01"
    }).params
示例#9
0
	def createTrainingData(categories,numberOfArticles):
		for category in categories:
			print ("Category: " + str(category))
			try:
				titleList = list(blockspring.runParsed("get-wikipedia-articles-in-category",{ "category": category, "limit": numberOfArticles }).params.values())[0]
			except:
				print "Error in getting articles in category."
				continue

			articleFile = open(category,'a')
			articleFile.write("[" + category + "] ")

			for cat in titleList:
				try: articleContent = TextBlob(wikipedia.page(cat).content.encode('UTF-8'))
				except:
					print "Error in getting article"
					continue
				try: nouns = articleContent.noun_phrases
				except:
					print "Error in getting nouns"
					continue
				for noun in nouns:
					#Removes weird words, only writes if they are not weird.
					if '=' in noun : continue
					try:
						noun.decode('ascii')
					except:
						continue
					articleFile.write(noun + " ")

			subcategories = list(blockspring.runParsed("get-wikipedia-sub-categories", { "category": category}).params.values())[0]

			for subcategory in subcategories:
				print ("Subcategory: " + str(subcategory))
				try:
					titleList = list(blockspring.runParsed("get-wikipedia-articles-in-category",{ "category": subcategory, "limit": 2 }).params.values())
				except:
					print "Error in getting articles in subcategory"
					continue

				categoryList = titleList[0]

				for cat in categoryList:
					try:
						articleContent = TextBlob(wikipedia.page(cat).content.encode('UTF-8'))
					except:
						"Error in getting article."
						continue
					try: nouns = articleContent.noun_phrases
					except:
						print "Error in getting nouns"
						continue
					for noun in nouns:
						#Removes weird words, only writes if they are not weird.
						if '=' in noun : continue
						try:
							noun.decode('ascii')
						except:
							continue
						articleFile.write(noun + " ")

			articleFile.close
def find_polarity_by_url(url):
    polarity = blockspring.runParsed("sentiment-analysis-from-url-with-alchemyapi", \
    { "url":url , "score_only": True }, {"api_key":"br_30151_58a0e12e4ade73883d4ce599098f8126d34387e2"}).params.values()
    print polarity
    return polarity
示例#11
0
文件: test.py 项目: andfor/ai-project
import blockspring
import json
import sys


'''
This method uses blockspring to fetch a category of articles and write their content to a file named after the variable category 
'''
category = "Physics"


titleList = list(blockspring.runParsed("get-wikipedia-articles-in-category",{ "category": category, "limit": 500 }).params.values())

categoryList = titleList[0]
articleFile = open(category,'w+')


countFound = 0
countNotFound = 0
for category in categoryList:
	text = blockspring.runParsed("get-wikipedia-article-content", { "title": category, "parse": True }).params
	try:
		content = text['content'].encode('UTF-8')

		articleFile.write(content)
		countFound+=1

	except:
		print(category)
		countNotFound+=1
		continue