Beispiel #1
0
def getCurrentStockConsenFromHK():

    #한경 컨세서스 연결.
    stock_dic_list = []

    KST = datetime.now(timezone('Asia/Seoul'))
    today_str = KST.strftime('%Y-%m-%d')
    #몇일전까지 : days=0 은 오늘
    startday_str = (
        datetime.today() -
        timedelta(days=Preference.getStockDaysCount())).strftime('%Y-%m-%d')
    #몇개 분석할 것이냐?
    stock_count = Preference.getStockLoadCount()
    #request_url = 'http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=stock_good&sdate='+today_str+'&edate='+today_str+'&order_type=10010000&pagenum=150'
    #http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=business&sdate=2019-04-03&edate=2019-04-09&pagenum=1000&order_type=12000001&now_page=1
    request_url = 'http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=business&sdate=' + startday_str + '&edate=' + today_str + '&pagenum=' + stock_count + '&order_type=12000001&now_page=1'

    print(request_url)

    driver = Preference.getWebDriver()

    driver.get(request_url)
    #html = driver.page_source
    table_element = driver.find_element_by_xpath(
        '//*[@id="contents"]/div[2]/table/tbody')
    tablebody_html = table_element.get_attribute('innerHTML')
    #print tablebody_html

    soup = BeautifulSoup(tablebody_html, "html.parser")

    stock_element_list = soup.find_all('tr')

    for stock_element in stock_element_list:

        #DATE
        upload_date = stock_element.td.string
        stock_data = stock_element.find_all('td')
        stock_dic = {}
        stockcode = '000000'
        i = 1
        for astock_data in stock_data:
            if (i == 1):
                #print astock_data.text
                stock_dic['update_date'] = astock_data.text
            if (i == 2):
                #print astock_data.find('strong').text
                stock_dic['all_title'] = astock_data.find('strong').text
                all_title = stock_dic['all_title']
                start = all_title.find('(') + 1
                end = start + 6
                stock_code = all_title[start:end]
                stock_dic['stock_code'] = stock_code
                stockcode = stock_code
                stock_name = all_title[0:all_title.find('(')]
                stock_dic['stock_name'] = stock_name
                title = all_title[end + 1:len(all_title)]
                stock_dic['title'] = title
                #print stock_dic['all_title']
                #print stock_dic['stock_code']
                #print stock_dic['stock_name']
                #print stock_dic['title']
            if (i == 3):
                #적정가격
                #print astock_data.text
                stock_dic['new_price'] = astock_data.text
            if (i == 4):
                #print astock_data.text
                stock_dic['opinion'] = astock_data.text
            if (i == 5):
                #print astock_data.text
                stock_dic['analyst_name'] = astock_data.text
            if (i == 6):
                #print "####a####"
                #print astock_data.text
                stock_dic['analyst_company'] = astock_data.text

            if (i == 9):
                for link in astock_data.find_all('a', href=True):
                    url = link['href']
                stock_dic[
                    'report_url'] = 'http://hkconsensus.hankyung.com/' + url
                #print stock_dic['report_url']
                break
            i = i + 1
            #print "."

        stock_dic[
            'companyinfo_url'] = 'http://media.kisline.com/highlight/mainHighlight.nice?paper_stock=' + stockcode + '&nav=1'
        #print stock_dic['companyinfo_url']

        if (stock_dic['new_price'] != "0"):

            now_stock_price = GetStockPrice.getCurrentStockPriceNaver(
                stock_dic['stock_code'])

            stock_dic['now_price'] = now_stock_price['now_price']
            stock_dic['now_updown_rate'] = now_stock_price['updown_rate']

            print((stock_dic['stock_name']))
            print((stock_dic['new_price']))
            print((stock_dic['now_price']))

            diff_rate = float(stock_dic['now_price'].replace(',', '')) / float(
                stock_dic['new_price'].replace(',', ''))
            diff_rate = int(diff_rate * 100)
            stock_dic['diff_rate'] = str(diff_rate)

            print((stock_dic['diff_rate']))

            if (diff_rate > 20 and diff_rate < 100):
                stock_dic_list.append(stock_dic)

        #print stock_dic
    stock_dic_list_sorted = sorted(stock_dic_list,
                                   key=lambda k: k['diff_rate'],
                                   reverse=False)

    return stock_dic_list_sorted
Beispiel #2
0
def getUpturnStockFromHK():
    
    #한경 컨세서스 연결.
    
    stock_dic_list = []
    
    today = date.today()   
    today_str = datetime.today().strftime('%Y-%m-%d')

    yesterday = today - timedelta(Preference.getStockDaysCount())
    yesterday_str = yesterday.strftime('%Y-%m-%d')
    
    request_url = 'http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=stock_good&sdate='+yesterday_str+'&edate='+today_str+'&order_type=10010000&pagenum=150'
    print(request_url)
        
    
    driver = Preference.getWebDriver()   
    driver.get(request_url)
    
    table_element = driver.find_element_by_xpath('//*[@id="contents"]/div[2]/table/tbody')
    tablebody_html = table_element.get_attribute('innerHTML')
    
    soup = BeautifulSoup(tablebody_html, "html.parser")
    
    stock_element_list = soup.find_all('tr')
    
    for stock_element in stock_element_list:
        
        #DATE
        upload_date=stock_element.td.string
        stock_data = stock_element.find_all('td')
        stock_dic ={}
        i = 1
        for astock_data in stock_data:
            
            if(i==1):
                #print astock_data.text
                stock_dic['update_date']=astock_data.text
            if(i==2):
                #print '---'
                stock_dic['report_url']=''
                try:
                    spl1 = str(astock_data).split("\n")
                    #print spl1
                    spl2 = spl1[3].split("\"")
                    #print spl2
                    spl3 = spl2[3].split("_")
                    #print spl3
                    report_index = spl3[1]
                    #print report_index
                    stock_dic['report_url']='http://hkconsensus.hankyung.com/apps.analysis/analysis.downpdf?report_idx='+report_index
                except:
                    stock_dic['report_url']=''
                    
                stock_dic['all_title'] = astock_data.find('strong').text
                all_title = stock_dic['all_title']
                start = all_title.find('(')+1
                end = start+6    
                stock_code = all_title[start:end]
                stock_dic['stock_code']=stock_code
                stock_name = all_title[0:all_title.find('(')] 
                stock_dic['stock_name']=stock_name
                title = all_title[end+1:len(all_title)]
                stock_dic['title']=title
                #print stock_dic['all_title']
                #print stock_dic['stock_code']
                #print stock_dic['stock_name']
                #print stock_dic['title']
            if(i==3):
                #print astock_data.text
                stock_dic['analyst_name']=astock_data.text                
            if(i==4):
                #print astock_data.text
                stock_dic['analyst_company']=astock_data.text    
            if(i==5):
                #print astock_data.text
                stock_dic['new_price']=astock_data.text
            if(i==6):
                #print astock_data.text
                stock_dic['old_price']=astock_data.text
                continue
            i=i+1
            #print "."
        

        stock_dic['companyinfo_url']='http://media.kisline.com/highlight/mainHighlight.nice?paper_stock='+stock_code+'&nav=1'    
        #print stock_dic['companyinfo_url']        
        upper_rate=float(stock_dic['new_price'].replace(',',''))/float(stock_dic['old_price'].replace(',',''))    
        upper_rate=int((upper_rate-1)*100)        
        stock_dic['upper_rate']=str(upper_rate)
        
        
        if(stock_dic['new_price'] != "0" ):
             
            now_stock_price = GetStockPrice.getCurrentStockPriceNaver(stock_dic['stock_code'])
                    
            stock_dic['now_price']=now_stock_price['now_price']
            stock_dic['now_updown_rate']=now_stock_price['updown_rate']
            
            print((stock_dic['stock_name']))
            print((stock_dic['new_price']))
            print((stock_dic['now_price']))
                  
            diff_rate=float(stock_dic['now_price'].replace(',',''))/float(stock_dic['new_price'].replace(',',''))    
            diff_rate=int(diff_rate*100)
            stock_dic['diff_rate']=str(diff_rate)
            
            print((stock_dic['diff_rate']))
            
            if(diff_rate > 20 and diff_rate < 100):
                stock_dic_list.append(stock_dic)
            
        #print stock_dic
    
    #다 뽑아와서 차이나는 순대로 정렬    
    stock_dic_list_sorted = sorted(stock_dic_list, key=lambda k: k['diff_rate'], reverse=False)
        
    
    return stock_dic_list_sorted
Beispiel #3
0
def getStockGoodBadfromHK(stock_code):

    count_good = 0
    count_bad = 0

    try:

        driver = Preference.getWebDriver()

        today = date.today()
        pre_1month = today - timedelta(Preference.getStockDaysCount())

        today_str = today.strftime('%Y-%m-%d')
        pre_1month_str = pre_1month.strftime('%Y-%m-%d')

        #목표상향 갯수 추출
        request_url = 'http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=stock_good&search_text=' + stock_code + '&sdate=' + pre_1month_str + '&edate=' + today_str
        #print request_url

        driver.get(request_url)
        table_element = driver.find_element_by_xpath(
            '//*[@id="contents"]/div[2]/table/tbody')
        tablebody_html = table_element.get_attribute('innerHTML')

        soup = BeautifulSoup(tablebody_html, "html.parser")
        stock_element_list = soup.find_all('tr')

        for stock_element in stock_element_list:
            result_msg = stock_element.find('td').text
            #print result_msg
            #if(result_msg.find("결과가".decode('UTF-8'))):
            #if "결과가".decode('UTF-8') in result_msg:
            if "결과가" in result_msg:
                print("None")
            else:
                count_good = count_good + 1
        #목표하향 갯수 추출
        request_url = 'http://hkconsensus.hankyung.com/apps.analysis/analysis.list?skinType=stock_bad&search_text=' + stock_code + '&sdate=' + pre_1month_str + '&edate=' + today_str
        #print request_url

        driver.get(request_url)
        table_element = driver.find_element_by_xpath(
            '//*[@id="contents"]/div[2]/table/tbody')
        tablebody_html = table_element.get_attribute('innerHTML')

        soup = BeautifulSoup(tablebody_html, "html.parser")
        stock_element_list = soup.find_all('tr')

        for stock_element in stock_element_list:
            result_msg = stock_element.find('td').text
            #print result_msg
            #if(result_msg.find("결과가".decode('UTF-8'))):
            if "결과가".decode('UTF-8') in result_msg:
                print("None")
            else:
                count_bad = count_bad + 1

    except Exception as e:
        print(e)

    return count_good, count_bad