Example #1
0
from finviz.screener import Screener

filters = ['exch_nasd', 'idx_sp500']  # Shows companies in NASDAQ which are in the S&P500
# Get the first 50 results sorted by price ascending
stock_list = Screener(filters=filters, order='price')

# Export the screener results to .csv
stock_list.to_csv()

# Create a SQLite database
stock_list.to_sqlite()

for stock in stock_list[9:19]:  # Loop through 10th - 20th stocks
    print(stock['Ticker'], stock['Price']) # Print symbol and price

# Add more filters
stock_list.add(filters=['fa_div_high'])  # Show stocks with high dividend yield
# or just stock_list(filters=['fa_div_high'])

# Print the table into the console
print(stock_list)

Example #2
0
#!/usr/bin/python3

from finviz.screener import Screener

# Get dict of available filters
# filters dict contains the corresponding filter tags
filters = Screener.load_filter_dict()
some_filters = [filters["PEG"]["Under 1"], filters["Exchange"]["AMEX"]]
stock_list = Screener(filters=some_filters, order="ticker")
print(stock_list)

# Use raw filter tags in a list
# filters = ['geo_usa']
filters = ["idx_sp500"]  # Shows companies in the S&P500
print("Screening stocks...")
stock_list = Screener(filters=filters, order="ticker")
print(stock_list)

print("Retrieving stock data...")
stock_data = stock_list.get_ticker_details()
print(stock_data)

# Export the screener results to CSV file
stock_list.to_csv("sp500.csv")

# Create a SQLite database
# stock_list.to_sqlite("sp500.sqlite")
Example #3
0
def main():
    parser = argparse.ArgumentParser(description='scrap finviz screener')
    parser.add_argument('-output', type=str, help='output file')
    parser.add_argument('-output_prefix',
                        type=str,
                        default='../stock_data/raw_daily_finviz/finviz_',
                        help='prefix of the output file')
    parser.add_argument('-use_bs4_scrapper',
                        type=bool,
                        default=True,
                        help='Use my old bs4 scraper')
    parser.add_argument('-date',
                        type=str,
                        default=str(datetime.date.today()),
                        help='Specify the date')
    parser.add_argument('-filter',
                        type=str,
                        action='append',
                        help='filters apply to the screener')
    parser.add_argument('-tab',
                        type=str,
                        action='append',
                        help='tabs to the scrap')
    parser.add_argument('-delay',
                        type=int,
                        help='delay in sec between each URL request')
    parser.add_argument('-drop_col',
                        type=str,
                        action='append',
                        default=[],
                        help='remove columns')
    args = parser.parse_args()

    if args.filter is None:
        args.filter = ['f=cap_microover', 'f=cap_microunder']
    if args.delay is not None:
        global scrap_delay
        scrap_delay = args.delay

    # check is the market closed today
    if is_market_close(args.date):
        print('The market is closed today')
        return

    if args.output is None:
        filename = args.output_prefix + args.date + '.csv'
    else:
        filename = args.output

    # scrap the data
    if args.use_bs4_scrapper:
        # use my old code
        df_filters = []
        for filter in args.filter:
            df_filters.append(scrap_finviz(filter, args.tab))
        df = pd.concat(df_filters)
    else:
        # use the finviz package
        stock_list = Screener(filters=args.filter)
        df = pd.read_csv(StringIO(stock_list.to_csv()))

    df = df.loc[~df.index.duplicated(), ~df.columns.duplicated()]
    df.drop(columns=['No.'] + args.drop_col, inplace=True)
    df.insert(0, 'Date', args.date, True)
    df.to_csv(filename)
#filters = ['geo_usa']
filters = ['fa_div_pos']  # Shows companies in the S&P500
print("Filtering stocks..")
stock_list = Screener(filters=filters, order='ticker')
print("Parsing every stock..")
stock_list.get_ticker_details()


#df = pd.DataFrame(data=stock_list)

#print(df.head())

# Export the screener results to CSV file


stock_list.to_csv(r'C:/Users/Jacob Steenhuysen/Downloads/all_world_yields6.csv')

df =  pd.read_csv(r'C:/Users/Jacob Steenhuysen/Downloads/all_world_yields6.csv')



tickers_list = df['Ticker'].tolist()
data = pd.DataFrame(columns=tickers_list)

import yfinance as yf
#for ticker in tickers_list:
#    data[ticker] = yf.download(ticker, period="5d", interval="1d") ["Close"]

for ticker in tickers_list:
    data[ticker] = yf.download(ticker, start=datetime.now()-timedelta(days=366), end=date.today()) ["Adj Close"]
Example #5
0
foldername = os.path.basename(dirpath)
print("Directory name is : " + foldername)

# Get the first 50 results sorted by price ascending
stk_overview_list = Screener(filters=filters,
                             order='-change',
                             table='Overview')
stk_performance_list = Screener(filters=filters,
                                order='-change',
                                table='Performance')
stk_technical_list = Screener(filters=filters,
                              order='-change',
                              table='Technical')

# Export the screener results to .csv
stk_technical_list.to_csv()

# Create a SQLite database
#stock_list.to_sqlite()

# Add more filters
#stock_list.add(filters=['fa_div_high'])  # Show stocks with high dividend yield
# or just stock_list(filters=['fa_div_high'])

# Print the table into the console
#print(stk_overview_list)

#print(finviz.get_stock('AAPL'))

for stock in stk_overview_list[:5]:  # Loop through 10th - 20th stocks
    print(stock)  # Print symbol and price
Example #6
0
#!/usr/bin/python3

from finviz.screener import Screener

# Get dict of available filters
# filters dict contains the corresponding filter tags
filters = Screener.load_filter_dict()
some_filters = [filters['PEG']['Under 1'], filters['Exchange']['AMEX']]
stock_list = Screener(filters=some_filters, order='ticker')

# Use raw filter tags in a list
# filters = ['geo_usa']
filters = ['idx_sp500']  # Shows companies in the S&P500
print("Filtering stocks..")
stock_list = Screener(filters=filters, order='ticker')
print("Parsing every stock..")
stock_list.get_ticker_details()

# Export the screener results to CSV file
stock_list.to_csv('sp500.csv')

# Create a SQLite database
# stock_list.to_sqlite('sp500.sqlite')
Example #7
0
from finviz.screener import Screener
import pyrebase

filters = ['exch_amex', 'sh_relvol_o0.5'
           ]  # Shows companies in NASDAQ which are in the S&P500
signal = 'ta_mostactive'
stock_list = Screener(
    filters=filters, table='Performance',
    signal=signal)  # Get the performance table and sort it by price ascending
print(stock_list)
# Export the screener results to .csv
stock_list.to_csv("AMEX_stock.csv")

filters = ['exch_nasd', 'sh_relvol_o2.5'
           ]  # Shows companies in NASDAQ which are in the S&P500
signal = 'ta_mostactive'
stock_list = Screener(
    filters=filters, table='Performance',
    signal=signal)  # Get the performance table and sort it by price ascending
print(stock_list)
# Export the screener results to .csv
stock_list.to_csv("NASDAQ_stock.csv")

filters = ['exch_nyse',
           'sh_relvol_o1']  # Shows companies in NASDAQ which are in the S&P500
signal = 'ta_mostactive'
stock_list = Screener(
    filters=filters, table='Performance',
    signal=signal)  # Get the performance table and sort it by price ascending
print(stock_list)
# Export the screener results to .csv
Example #8
0
AUTH_TOKEN = "--------------------------------"

#filters = ['exch_nasd','ta_change_u' ]  # Shows companies in the S&P500
filters = ['ta_change_u']  # Shows companies in the S&P500

print("Filtering stocks..")

#stock_list = Screener(filters=filters,order='-volume,-change',signal='Most Volatile')
stock_list = Screener(filters=filters,
                      order='-volume',
                      signal='ta_mostvolatile')

stock_list.get_ticker_details()

# Export the screener results to CSV file
stock_list.to_csv('jimmy.csv')

result = [
    'These are stocks based on highest % change since this morning, GREEN stocks only'
]
d = {}


def main():
    with open('jimmy.csv', 'r') as read_obj:
        head = [next(read_obj) for x in range(10)]
        csv_reader = reader(head)
        for row in csv_reader:
            print(row[1], row[8], row[9], row[10])
            result.append(row[1])
Example #9
0
    else:  # init file
        PanPrint.pprint("+ " + yahoo_file + "\tcreating new file")
        f = open(yahoo_file, "w")
        f.write(yahoo_csv_stocks.replace('\r\n', '\n'))
        f.close()
    return yahoo_file


##stock_filters = ['exch_nasd', 'idx_sp500']  # Shows companies in NASDAQ which are in the S&P500
# stock_filters = ['exch_nyse']
stock_filters = 'exch_nyse'
# Get the first 50 results sorted by price ascending
stock_orderby = 'pe'  # 'pe'  'price'
stock_datetim = str(PanDateTime.datetime.now().strftime('%Y%m%d'))
stock_list = PanScreener(filters=stock_filters, order=stock_orderby)
stock_csv_ = stock_list.to_csv()
stock_file = set_file(stock_datetim, stock_filters, stock_orderby, stock_csv_)
PanPrint.pprint(stock_file)
dtype_dic= { 'No.':'int64', 'Ticker':'str', 'Company':'str', 'Sector':'str', 'Industry':'str', 'Country':'str',
             'Market Cap':'str', 'P/E':'float64', 'Price':'float64', 'Change':'str', 'Volume':'float64'}
myDf = Pan.read_csv(stock_file, keep_default_na = False, dtype = {'Volume' : 'str'})
myDf = myDf.loc[myDf['P/E'] != '-']
myDf = myDf.loc[myDf['Market Cap'] != '-']
myDf['Volume'] = myDf['Volume'].str.replace(',', '')
myDf['Market Cap'] = (myDf['Market Cap'].replace(r'[KMB]+$', '', regex=True).astype(float) * myDf['Market Cap'].str.extract(r'[\d\.]+([KMB]+)', expand=False).fillna(1).replace(['K','M', 'B'], [10**3, 10**6, 10**9]).astype(int))
myDf = myDf.astype({'Market Cap' : 'int64' , 'P/E':'float64', 'Price':'float64', 'Volume': 'int64'})
PanPrint.pprint(myDf.head())
PanPrint.pprint(myDf.dtypes)
PanPrint.pprint(myDf.info())
PanPrint.pprint(myDf['P/E'].describe(include='all'))
PanPrint.pprint(myDf['Price'].describe(include='all'))