def run(self):
        while 1:
            try:
                # fetch a job from the queue
                ticker, fromdate, todate = self.queue.get_nowait()
            except queue_lib.Empty:
                raise SystemExit
            if ticker[0] == "^":  # make sure filename compatible
                filename_ticker = '_' + ticker[1:]
            else:
                filename_ticker = ticker

            if options.verbose:
                print("ticker:", ticker)
                print("last date asked: " + todate)
                print("first date asked: " + fromdate)

            if not options.offline:
                # download ticker data using yqd
                all_lines = yqd.load_yahoo_quote(ticker, fromdate, todate)

                if len(all_lines) > 5:  # safety check
                    filename = os.path.join(options.dir,
                                            filename_ticker + '.csv')

                    with open(filename, 'w') as fp:
                        fp.write(all_lines)

            if options.verbose:
                print("fetched: " + ticker)
            else:
                sys.stdout.write(".")
                sys.stdout.flush()
示例#2
0
def get_ticker_data(ticker):
    try:
        ticker_data = yqd.load_yahoo_quote(ticker, START_DATE, END_DATE)
        ticker_data.pop()  # get rid of blank string at end of data
        ticker_data = [row.split(',') for row in ticker_data]
        return ticker_data

    except:
        try:
            time.sleep(2)  #delay 2 seconds and try again
            ticker_data = yqd.load_yahoo_quote(ticker, START_DATE, END_DATE)
            ticker_data.pop()  # get rid of blank string at end of data
            ticker_data = [row.split(',') for row in ticker_data]
            return ticker_data
        except:
            return ticker
示例#3
0
def getStockData(stockTicker, startDate, endDate, investment):
    #This loop is necessary, since load_yahoo_quote will have HTTP 401 errors occasionally
    count = 0
    bool = True
    while (bool):
        try:
            quote_data = load_yahoo_quote(stockTicker, startDate, endDate)
            bool = False
            break
        except:
            bool = True
            count += 1
            if (count == 50):
                print("HTTP error exitting program")
                quit()

    data = [quote.rsplit(',') for quote in quote_data]
    data.pop()
    data = np.asarray(data)

    #Data from list now converted to a pandas DataFrame
    finDF = pd.DataFrame(data=data[1:, 1:],
                         index=data[1:, 0],
                         columns=data[0, 1:])

    #Series for Adj Close converted to float for the math needed later on
    finDF[column_name] = pd.to_numeric(finDF[column_name], downcast='float')
    adjSeries = finDF[column_name]

    print(stockTicker + " price first day " + str(adjSeries[0]))
    print(stockTicker + " price last day  " +
          str(adjSeries[len(adjSeries) - 1]))

    value_of_investment = investment * (adjSeries[len(adjSeries) - 1] /
                                        adjSeries[0])

    print("Value of investment on " + stockTicker + ":",
          numToString(value_of_investment))

    relative_val_series = deepcopy(adjSeries)

    for i in range(len(adjSeries) - 1, 0, -1):
        relative_val_series[i] = (relative_val_series[i] /
                                  relative_val_series[i - 1]) - 1

    relative_val_series[0] = np.nan

    mean = relative_val_series.mean()
    std = relative_val_series.std()
    print("#" * 60)
    print("These calculations are from", startDate, "to", endDate)
    print("Relative mean of stock price change for " + stockTicker + " is:",
          mean)
    print("Relative std of stock price change for " + stockTicker + " is:",
          std)
    print("Sharpe Ratio is:", (mean / std))
    print("#" * 60)

    return mean, std, mean / std, value_of_investment
示例#4
0
def load_quote(ticker):
    # open('foo.csv','w').write(yqd.load_yahoo_quote(ticker, '20170515', '20170517'))
    print('===', ticker, '===')
    data = []
    data = yqd.load_yahoo_quote(ticker, '20000101', '20170517')
    my_df = pd.DataFrame(data[1:])
    my_df.to_csv('foo.csv', index=False)
    print(data[1:])
示例#5
0
def _main():
    print("main")

    tickers = ['IBM', 'ZCN.TO']
    for ticker in tickers:
        print('===', ticker, '===')
        lines = yqd.load_yahoo_quote(ticker, '20180212',
                                     '20180213').split('\n')
        for line in lines:
            print(line)
示例#6
0
 def update(self):
     del self.data[:]
     if not self.load():
         self.data_lines = yqd.load_yahoo_quote(
             self.symbol, self.start.strftime("%Y%m%d"),
             self.end.strftime("%Y%m%d"))
         self.save()
     #url = self.construct_url()
     #dataStr = requests.get(url).text
     #dataLines = dataStr.split('\n')
     for data_line in self.data_lines[1:]:
         if data_line.__len__() > 4:
             self.data.append(self.parseDailyData(data_line))
示例#7
0
def repeat_download(ticker, start_date, end_date):
    # repeat download for N times
    repeat_times = 5
    for i in range(repeat_times):
        try:
            time.sleep(1 + random.random())
            price_df = load_yahoo_quote(ticker,
                                        start_date,
                                        end_date,
                                        format_output='dataframe')
            # skip loop if data is not empty
            if price_df is not None:
                # print(price_df.head())
                return price_df
        except Exception as e:
            print(e)
    return None
示例#8
0
def dl_ticker(stock, num_days=10):
    """Loads data from yahoo"""
    entries = []

    end_date = datetime.today()
    begin_date = end_date - timedelta(days=num_days)

    for line in yqd.load_yahoo_quote(stock, begin_date.strftime('%Y%m%d'),
                                     end_date.strftime('%Y%m%d')):

        if "Date" not in line and len(line) > 1:

            date, open_, high, low, close, adj_close, volume = line.split(',')

            entries.append(
                (stock, date, open_, high, low, close, adj_close, volume))

    add_stock_ticks(entries)

    clean_ticks()
def get_price_from_yahoo(ticker, start_date, end_date):
    quote = load_yahoo_quote(ticker, start_date, end_date)

    # get historical price
    ticker_price = {}
    index = ['open', 'high', 'low', 'close', 'adjClose', 'volume']
    for num, line in enumerate(quote):
        line = line.strip().split(',')
        if len(line) < 7 or num == 0:
            continue
        date = line[0]
        # check if the date type matched with the standard type
        if not re.search(r'^[12]\d{3}-[01]\d-[0123]\d$', date):
            continue
        # open, high, low, close, volume, adjClose : 1,2,3,4,5,6
        for num, type_name in enumerate(index, 1):
            try:
                ticker_price[type_name][date] = round(float(line[num]), 2)
            except:
                ticker_price[type_name] = {}
    return ticker_price
示例#10
0
def download_data(symbol, basedir, start_date, end_date):
    """Wrapper function to yqd library."""
    print("Downloading:{} ...".format(symbol))
    symbol = symbol.upper()
    # Date 1
    d1 = "{0:0>4}".format(start_date.year) + \
         "{0:0>2}".format(start_date.month) + \
         "{0:0>2}".format(start_date.day)

    # Date 2
    d2 = "{0:0>4}".format(end_date.year) + \
         "{0:0>2}".format(end_date.month) + \
         "{0:0>2}".format(end_date.day)

    f = symbol_to_filename(symbol, basedir)

    data = yqd.load_yahoo_quote(symbol, d1, d2)
    # prevent writing invalid data
    if len(data) > 0:
        fh = open(f, 'w')
        fh.write(data)
        fh.close()
示例#11
0

from yahoo_finance import Share
import yqd
import json
import os
import requests
import sys


stock = 'ISRG'
yf_data = yqd.load_yahoo_quote(stock, '20180301', '20180830')

print(yf_data)

示例#12
0
    # Check args
    _my_assert(queue.queue, "no Tickers given")
    nb_tickers = len(queue.queue)
    connections = min(options.concurrent, nb_tickers)
    _my_assert(1 <= connections <= 255, "too much concurrent connections asked")

    if options.verbose:
        print("----- Getting {} tickers using {} simultaneous connections -----".format(
            nb_tickers, connections))

    if not options.offline:
        if options.verbose:
            print("Downloading dummy quote...")
        # Get a dummy small quote from Y! to get the crumb & cookie before the threads start.
        _my_assert(len(yqd.load_yahoo_quote('^GSPC', '20180212', '20180212')) > 5,
                   "Error: initial download did not work")
        if options.verbose:
            print("...completed.")

    # start a bunch of threads, passing them the queue of jobs to do
    threads = []
    for _dummy in range(connections):
        t = WorkerThread(queue)
        t.start()
        threads.append(t)

    # wait for all threads to finish
    for thread in threads:
        thread.join()
    sys.stdout.write("\n")
示例#13
0
def getStocksFromSource(indexes=data, sortBy=SORT_BY_TOP):
    ''' '''
    stocks = []
    index = ['AGTC']
    # for stock in data["Ticker"][:100]:
    # for stock in index:
    for stock in data:
        try:
            print(stock)
            # print(type(stock))
            yf_data = yqd.load_yahoo_quote(stock, '20170301', '20170830')
            # yf_data = yqd.load_yahoo_quote('ABEO', '20170712', '20170725')
            # print(yf_data)
            share = Share(stock)

            # history part
            history = []
            for i, day in enumerate(yf_data[1:-1]):
                daily_data = day.split(',')
                history.append([
                    i,
                    str(daily_data[0]),
                    float(daily_data[1]),
                    float(daily_data[2]),
                    float(daily_data[3]),
                    float(daily_data[4]),
                    float(daily_data[6])
                ])

            # print(history)
            # comments part
            comments = []
            new_StockTwits_comments = []
            url = 'https://api.stocktwits.com/api/2/streams/symbol/{0}.json'.format(
                stock)
            print(url)
            try:
                r = requests.get(url).json()
                print(len(r['messages']))
                for message in r['messages']:
                    try:
                        new_tweet = {
                            'id':
                            message['id'],
                            'body':
                            message['body'],
                            'created_at':
                            message['created_at'],
                            'core_body':
                            nltk_service.clean_tweet(message['body']),
                            'nltk_sentiment':
                            nltk_service.get_tweet_sentiment(message['body']),
                            # 'azure_sentiment': azure_sentiment_service.GetSentiment(message['body'])
                        }
                        try:
                            new_tweet[
                                'azure_sentiment'] = azure_sentiment_service.GetSentiment(
                                    message['body'])
                        except Exception as e:
                            new_tweet['azure_sentiment'] = 0.5
                            print(e)
                        # print(new_tweet['azure_sentiment'])
                        new_StockTwits_comments.append(new_tweet)
                    except Exception as e:
                        print(e)
                        # pass
            except Exception as e:
                print('stock tweets part problem')
                print(e)
            # new_StockTwits_comments = [{'id': message['id'], 'body': message['body'], 'created_at': message['created_at']} for message in r['messages']]

            print(len(new_StockTwits_comments))
            stock = {
                'index': stock,
                'open': share.get_open(),
                'change': share.get_change(),
                'percent_change': share.get_percent_change(),
                'prev_close': share.get_prev_close(),
                'price': share.get_price(),
                'volume': share.get_volume(),
                'history': history,
                'new_StockTwits_comments': new_StockTwits_comments
            }
            # stock_json = json.dumps(stock)
            # print(type(stock_json))
            print(len(history))
            if len(history) != 0:
                # f.write(stock['index']+'/n')
                stocks.append(stock)
        except Exception as e:
            print(e)
            pass
    print(len(stocks))
    return stocks


# f.close()

# get_price()
# get_change()
# get_percent_change()
# get_volume()
# get_prev_close()
# get_open()
# get_avg_daily_volume()
# get_stock_exchange()
# get_market_cap()
# get_book_value()
# get_ebitda()
# get_dividend_share()
# get_dividend_yield()
# get_earnings_share()
# get_days_high()
# get_days_low()
# get_year_high()
# get_year_low()
# get_50day_moving_avg()
# get_200day_moving_avg()
# get_price_earnings_ratio()
# get_price_earnings_growth_ratio()
# get_price_sales()
# get_price_book()
# get_short_ratio()
# get_trade_datetime()
# get_historical(start_date, end_date)
# get_name()
# refresh()
# get_percent_change_from_year_high()
# get_percent_change_from_year_low()
# get_change_from_year_low()
# get_change_from_year_high()
# get_percent_change_from_200_day_moving_average()
# get_change_from_200_day_moving_average()
# get_percent_change_from_50_day_moving_average()
# get_change_from_50_day_moving_average()
# get_EPS_estimate_next_quarter()
# get_EPS_estimate_next_year()
# get_ex_dividend_date()
# get_EPS_estimate_current_year()
# get_price_EPS_estimate_next_year()
# get_price_EPS_estimate_current_year()
# get_one_yr_target_price()
# get_change_percent_change()
# get_dividend_pay_date()
# get_currency()
# get_last_trade_with_time()
# get_days_range()
# get_year_range()
示例#14
0
import yqd
# yf_data = yqd.load_yahoo_quote('LABU', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('JOBS', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('AVGO', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('JPM', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('MGC', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('BRK.B', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('MILN', '201704013', '20180414')

# yf_data = yqd.load_yahoo_quote('V', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('QQQC', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('BAC', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('GOOG', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('SPLV', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('BABA', '201704013', '20180414')
yf_data = yqd.load_yahoo_quote('ISRG', '201704013', '20180418')

# yf_data = yqd.load_yahoo_quote('FB', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('AMZN', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('AAPL', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('INTC', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('NVDA', '201704013', '20180414')
# yf_data = yqd.load_yahoo_quote('MSFT', '201704013', '20180414')

print(yf_data)
history = []
history1 = []
days = []
for i, day in enumerate(yf_data[1:-1]):
    data = day.split(',')
    print(data)
示例#15
0
#         'modules'   : 'earningsTrend',
#         'domain'    : 'finance.yahoo.com'
#     }

#     response = urlopen('{}{}?{}'.format(host, path, urlencode(params)))
#     data = json.loads(response.read().decode())

#     pprint(data)

# if __name__ == '__main__':
#     parse()

# ==========================================

import yqd
yf_data = yqd.load_yahoo_quote('ABEO', '20170301', '20170830')
print(yf_data)
history = []
for i, day in enumerate(yf_data[1:-1]):
    data = day.split(',')
    print(data)
    print(str(data[0]), float(data[1]), float(data[2]), float(data[3]),
          float(data[4]), float(data[6]))
    history.append([
        i,
        str(data[0]),
        float(data[1]),
        float(data[2]),
        float(data[3]),
        float(data[4]),
        float(data[6])
示例#16
0
文件: actions.py 项目: Japsz/isw18
def load_quote(ticker, desde, hasta):
    return yqd.load_yahoo_quote(ticker, desde, hasta)[1:-1]
def _main():
    global options

    # today is
    today = datetime.datetime.now().strftime("%Y%m%d")
    # default start date (very early to get all possible data)
    start_date = datetime.date(1900, 1, 1).strftime("%Y%m%d")

    # parse arguments
    parser = argparse.ArgumentParser(
        description='Yahoo historical quotes downloader')
    parser.add_argument(
        '-f',
        '--file',
        action='store',
        default='./tickers.txt',
        help='read ticker list from file, it uses ./tickers.txt as default')
    parser.add_argument(
        '-c',
        '--concurrent',
        type=int,
        default=10,
        action='store',
        help='# of concurrent connections used for the download')
    parser.add_argument(
        '-d',
        '--dir',
        action='store',
        default='./rawdata',
        help='save data to this directory, it uses ./rawdata/ as default')
    parser.add_argument('-s',
                        '--startdate',
                        default=start_date,
                        action='store',
                        help='start date, format is YYYYMMDD, default is ' +
                        start_date)
    parser.add_argument(
        '-t',
        '--todate',
        default=today,
        action='store',
        help='most recent date, format is YYYYMMDD, default is today: ' +
        today)
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='explain what is being done')
    parser.add_argument('-o',
                        '--offline',
                        action='store_true',
                        help='used for testing, skip the actual downloading')

    options = parser.parse_args()

    if options.verbose:
        print('Command line options parsed successfully.')

    # get input list
    with open(options.file, 'r') as f:
        ticker_lines = f.readlines()

    # build a queue with (ticker, fromdate, todate) tuples
    queue = queue_lib.Queue()
    for line in ticker_lines:
        line = line.strip()  # remove leading and trailing whitespace
        # skip empty lines or line starting with  # (comment)
        if not line or line[0] == "#":
            if options.verbose:
                print('Skipping ticker file line: ' + line)
            continue
        # split on whitespace to ignore optional description after the ticker
        ticker = line.split()[0]

        if options.verbose:
            print("Adding {} from {} to {}".format(ticker, options.startdate,
                                                   options.todate))

        queue.put((ticker, options.startdate, options.todate))

    # Check args
    _my_assert(queue.queue, "no Tickers given")
    nb_tickers = len(queue.queue)
    connections = min(options.concurrent, nb_tickers)
    _my_assert(1 <= connections <= 255,
               "too much concurrent connections asked")

    if options.verbose:
        print(
            "----- Getting {} tickers using {} simultaneous connections -----".
            format(nb_tickers, connections))

    if not options.offline:
        if options.verbose:
            print("Downloading dummy quote...")
        # Get a dummy small quote from Y! to get the crumb & cookie before the threads start.
        _my_assert(
            len(yqd.load_yahoo_quote('^GSPC', '20180212', '20180212')) > 5,
            "Error: initial download did not work")
        if options.verbose:
            print("...completed.")

    # start a bunch of threads, passing them the queue of jobs to do
    threads = []
    for _dummy in range(connections):
        t = WorkerThread(queue)
        t.start()
        threads.append(t)

    # wait for all threads to finish
    for thread in threads:
        thread.join()
    sys.stdout.write("\n")
    sys.stdout.flush()

    # tell something to the user before exiting
    if options.verbose:
        print("all threads are finished - goodbye.")
示例#18
0
def load_quote(ticker):
    print('===', ticker, '===')
    print(yqd.load_yahoo_quote(ticker, '20170515', '20170517'))
    print(yqd.load_yahoo_quote(ticker, '20170515', '20170517', 'dividend'))
    print(yqd.load_yahoo_quote(ticker, '20170515', '20170517', 'split'))