def download_histo_data_from_yahoo(self, date_from = datetime.date(datetime.datetime.now().year-1, datetime.datetime.now().month, datetime.datetime.now().day), date_to = datetime.date(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)): """recupere les donnees historiques""" if self.is_valid: r = requests.get("http://ichart.finance.yahoo.com/table.csv", params={'s' : self.symbol, 'a' : str(date_from.month-1), 'b' : str(date_from.day), 'c': str(date_from.year), 'd' : str(date_to.month-1), 'e' : str(date_to.day), 'f' : str(date_to.year), 'g' : 'd', 'ignore' : '.csv'}) csv_data = r.content csv_data = csv_data.split('\n') data_histo = [] if (len(csv_data)>0): #check data are available for line in csv_data: if (len(line)>0): #sometimes, last line is empty data_histo.append(line.split(',')) data_histo = [data_histo[0]] + data_histo[:1:-1] #headers + reverse timeserie #cast for i in range(1, len(data_histo)): #saute premiere ligne car header for j in range(1, len(data_histo[i])): #saute premiere colonne car date if utility.is_number(data_histo[i][j]): data_histo[i][j] = float(data_histo[i][j]) self.histo_data = data_histo self.logs.append([datetime.datetime.now(), "found histo datas on yahoo finance " + str(len(csv_data)-1) + " entries"])
def get_totallpage_num(self, content): try: if( self.keyword_exist(content) ): bTag = 'class="W_textc">'; eTag = '<\/span>'; pos1 = content.find(bTag); count = ''; if( pos1 != -1 ): pos1 = pos1 + len(bTag); pos2 = content.find(eTag, pos1); if(pos2 != -1): slug = unicode(content[pos1:pos2], 'utf-8'); for i in slug: if( utility.is_number(i) ): count += i; self.page_num = int(count); else: logging.info('%s 关键词总页数获取失败', self.get_keyword()); return False; else: logging.info('%s 关键词不存在', self.get_keyword()); return False; except Exception,e: logging.exception("%s 获取总页数失败: " + str(e), self.get_keyword()); return False;
def read_csv(file_path, delimiter=',', skip_lines=[]): with open(file_path) as f: reader = csv.reader(f, delimiter = delimiter) data = [] for line in reader: if reader.line_num in skip_lines: continue data.append([float(v) if is_number(v) else v for v in line]) return data
def change_amount(self): amount = self.amountEdit.toPlainText() if amount is "": print("there is no amount") elif is_number(amount): print("adding value to account...") self.account.set_amount(self.account.get_amount() + float(amount)) print("new Value ", self.account.amount) else: print("it is not a number")
def dict_read_csv(file_path, delimiter=',', skip_lines=[]): with open(file_path) as f: reader = csv.DictReader(f, delimiter = delimiter) from collections import defaultdict data = defaultdict(list) for line in reader: if reader.line_num in skip_lines: continue for (k, v) in line.items(): data[k].append(float(v) if is_number(v) else v) return data
def download_last_datas_from_yahoo(self): """recupere les donnees live""" r = requests.get("http://finance.yahoo.com/q", params={'s' : self.symbol}) soup = BeautifulSoup(r.text) if (soup.title.string.find('Symbol Lookup from Yahoo') != -1): #invalid symbol self.is_valid = False self.logs.append([datetime.datetime.now(), "invalid ticker"]) else: self.is_valid = True self.logs.append([datetime.datetime.now(), "ticker seems valid"]) #parse html pour recup value for tag_last_price in soup.find('span', id='yfs_l84_' + self.symbol.lower()): if utility.is_number(tag_last_price.string): self.last_price = float(tag_last_price.string) self.logs.append([datetime.datetime.now(), "found last price on yahoo finance"]) break for tag_prev_close in soup.find_all('th', text=re.compile("Prev Close:")): if utility.is_number(tag_prev_close.next_sibling.string): self.previous_close = float(tag_prev_close.next_sibling.string) self.daily_return = round(100*((self.last_price/self.previous_close)-1),2) break self.company_name = soup.find('div', id='yfi_rt_quote_summary').find('h2').string for tag_beta in soup.find_all('th', text=re.compile("Beta:")): if utility.is_number(tag_beta.next_sibling.string): self.beta = float(tag_beta.next_sibling.string) break for tag_ern in soup.find_all('th', text=re.compile('Next Earnings Date:')): if tag_ern.next_sibling.text != '': self.next_earnings_date = tag_ern.next_sibling.text break for tag_market_cap in soup.find_all('span', id='yfs_j10_' + self.symbol.lower()): if utility.is_number(tag_market_cap.string[:-1]): self.market_cap = tag_market_cap.string break for ttm_span in soup.find_all('span', text=re.compile('ttm')): if ttm_span.parent.text[:3] == 'P/E': if utility.is_number(ttm_span.parent.next_sibling.text): self.pe = float(ttm_span.parent.next_sibling.text) elif ttm_span.parent.text[:3] == 'EPS': if utility.is_number(ttm_span.parent.next_sibling.text): self.eps = float(ttm_span.parent.next_sibling.text)
def get_content(self, content): if (self.preprocess(content)): itemlist = self.soup.find_all( 'dl', attrs={'action-type': 'feed_list_item'}) if (itemlist != None): for item in itemlist: self.init_weibo() # 获取mid self.weibomsg['mid'] = item['mid'] # 获取uid,un,iu dt = item.dt if (dt != None): a = dt.a if (a != None): self.weibomsg['un'] = a['title'] uid = a['suda-data'] pos = uid.rfind(':') self.weibomsg['uid'] = uid[pos + 1:] img = a.img if (img != None): self.weibomsg['iu'] = img['src'] # 获取微博内容主体 # dd = item.dd # 获取mc self.weibomsg['mc'] = dd.em.get_text() # 获取转发消息 retweet = dd.find( 'dl', attrs={ 'class': 'comment W_textc W_linecolor W_bgcolor' }) if (retweet != None): #转发内容 pass # 获取pu imglist = dd.find('ul', {'class': 'piclist'}) if (imglist != None): img = imglist.find('img', {'class': 'bigcursor'}) if (img != None): self.weibomsg['pu'] = img['src'] # 获取pt pt = dd.find('a', {'node-type': 'feed_list_item_date'}) if (pt != None): self.weibomsg['pt'] = pt['title'] self.weibomsg['page'] = pt['href'] # 获取srn srn = dd.find('a', {'rel': 'nofollow'}) if (srn != None): self.weibomsg['srn'] = srn.get_text() # 获取rc与cc rc = dd.find('a', {'action-type': 'feed_list_forward'}) if (rc != None): rc = rc.get_text() rc = utility.to_unicode(rc) temp = "" for i in rc: if (utility.is_number(i)): temp += i if (temp != ''): self.weibomsg['rc'] = int(temp) cc = dd.find('a', {'action-type': 'feed_list_comment'}) if (cc != None): cc = cc.get_text() cc = utility.to_unicode(cc) temp = "" for i in cc: if (utility.is_number(i)): temp += i if (temp != ''): self.weibomsg['cc'] = int(temp) #self.outputInfo(); self.writeResult() return True
def generate_chart(self): """creation du linechart sous forme d image png grace aux donnees historiques""" if self.is_valid and len(self.histo_data) > 10: from pygooglechart import Chart from pygooglechart import SimpleLineChart from pygooglechart import Axis price_max = -sys.maxint price_min = sys.maxint vec_price = [] vec_date = [] for i in range(1, len(self.histo_data)): if (i < len(self.histo_data)-1) and float(self.histo_data[i][0][5:7]) != float(self.histo_data[i+1][0][5:7]): #end of month ? vec_date.append(self.histo_data[i][0][5:7]) else: vec_date.append('') if utility.is_number(self.histo_data[i][6]): vec_price.append(self.histo_data[i][6]) if self.histo_data[i][6] < price_min: price_min = self.histo_data[i][6] if self.histo_data[i][6] > price_max: price_max = self.histo_data[i][6] y_scale_up = int(price_max) y_scale_down = int(price_min) if y_scale_up-y_scale_down > 500: base = 100 elif y_scale_up-y_scale_down > 200: base = 50 else: base = 25 for i in range(1,base+1): if ((y_scale_up + i) % base) == 0: y_scale_up = y_scale_up+i break for i in range(1,base+1): if ((y_scale_down - i) % base) == 0: y_scale_down -= i break chart = SimpleLineChart(200, 125, y_range=[y_scale_down, y_scale_up]) chart.add_data(vec_price) chart.set_colours(['0000FF']) left_axis = range(y_scale_down, y_scale_up + 1, base) chart.set_axis_labels(Axis.LEFT, left_axis) chart.set_axis_labels(Axis.BOTTOM, vec_date) utility.create_missing_folder(utility.FOLDER_CHART) chart.download(utility.FOLDER_CHART + '/' + self.symbol + '.png') self.path_chart_1yr = utility.FOLDER_CHART + '/' + self.symbol + '.png'
def add_new_alert_from_window(widget_tl_to_close_if_successfull = None): """ajout d une alert dans la DB a partir de la pop-up fenetre""" #check requirements if str_label_alert_symbol.get() != '' and alert_action.get() != '' and str_entry_alert_limit.get() != '' and utility.is_number(str_entry_alert_limit.get()): tmp_ticker = Ticker(str_label_alert_symbol.get(), True) if tmp_ticker.is_valid: tmp_alert = Alert(tmp_ticker, alert_action.get(), float(str_entry_alert_limit.get())) if widget_tl_to_close_if_successfull != None: widget_tl_to_close_if_successfull.destroy() else: msgbox("Missing or incorrect required parameters !")
def get_content(self, content): if( self.preprocess( content ) ): itemlist = self.soup.find_all('dl', attrs={'action-type':'feed_list_item'}); if(itemlist != None): for item in itemlist: self.init_weibo(); # 获取mid self.weibomsg['mid'] = item['mid']; # 获取uid,un,iu dt = item.dt; if(dt != None): a = dt.a; if(a != None): self.weibomsg['un'] = a['title']; uid = a['suda-data']; pos = uid.rfind(':'); self.weibomsg['uid'] = uid[pos + 1:]; img = a.img; if(img != None): self.weibomsg['iu'] = img['src']; # 获取微博内容主体 # dd = item.dd; # 获取mc self.weibomsg['mc'] = dd.em.get_text(); # 获取转发消息 retweet = dd.find('dl', attrs={'class':'comment W_textc W_linecolor W_bgcolor'}); if(retweet != None): #转发内容 pass; # 获取pu imglist = dd.find('ul', {'class':'piclist'}); if(imglist != None): img = imglist.find('img', {'class':'bigcursor'}); if(img != None): self.weibomsg['pu'] = img['src']; # 获取pt pt = dd.find('a', {'node-type':'feed_list_item_date'}); if(pt != None): self.weibomsg['pt'] = pt['title']; self.weibomsg['page'] = pt['href']; # 获取srn srn = dd.find('a', {'rel':'nofollow'}); if(srn != None): self.weibomsg['srn'] = srn.get_text(); # 获取rc与cc rc = dd.find('a', {'action-type':'feed_list_forward'}); if(rc != None): rc = rc.get_text(); rc = utility.to_unicode(rc); temp = ""; for i in rc: if(utility.is_number(i)): temp += i; if(temp != ''): self.weibomsg['rc'] = int(temp); cc = dd.find('a', {'action-type':'feed_list_comment'}); if(cc != None): cc = cc.get_text(); cc = utility.to_unicode(cc); temp = ""; for i in cc: if(utility.is_number(i)): temp += i; if(temp != ''): self.weibomsg['cc'] = int(temp); #self.outputInfo(); self.writeResult(); return True;
import utility symbol = "GOOG" r = requests.get("http://finance.yahoo.com/q", params={'s' : symbol}) soup = BeautifulSoup(r.text) if (soup.title.string.find('Symbol Lookup from Yahoo') != -1): print 'invalid symbol' else: #parsing for tag_prev_close in soup.find_all('th', text=re.compile("Prev Close:")): if utility.is_number(tag_prev_close.next_sibling.string): prev_close = float(tag_prev_close.next_sibling.string) print prev_close break for tag_last_price in soup.find('span', id='yfs_l84_' + symbol.lower()): if utility.is_number(tag_last_price.string): last_price = float(tag_last_price.string) print last_price break compagny_name = soup.find('div', id='yfi_rt_quote_summary').find('h2').string print compagny_name