예제 #1
0
def do_stuff():
    # Get rid of the older file in order to create a new file
    common_lib.delete_file(common_lib.TICKER_FILE_DIR + common_lib.OUTPUT_FILENAME)

    # Download data from the web
    for x in range(3):
        if x == 0:
            exchangeName = "nasdaq"
        elif x == 1:
            exchangeName = "nyse"
        elif x == 2:
            exchangeName = "amex"
        write_stock_tickers(exchangeName)

    # Start combining aggregate data into one file
    for x in range(0, 3):
        if x == 0:
            exchangeName = "nasdaq"
        elif x == 1:
            exchangeName = "nyse"
        elif x == 2:
            exchangeName = "amex"
        combine_all_data_into_one_file(exchangeName)
        print("function end for " + exchangeName + ".csv")

    concatenateStr = '"Symbol","Name","LastSale","MarketCap","ADR TSO","IPOyear","Sector","Industry","Summary Quote",\n'
    readStr = common_lib.open_file(common_lib.TICKER_FILE_DIR + "tmpFile.txt")

    targetStr = open(common_lib.TICKER_FILE_DIR + common_lib.OUTPUT_FILENAME, 'a')
    targetStr.write(concatenateStr)
    targetStr.write(readStr)
    targetStr.close()

    pickup_garbage()
    return
예제 #2
0
def combine_all_data_into_one_file(exchangeName):
    readStr = common_lib.open_file(common_lib.TICKER_FILE_DIR + exchangeName + "Tickers.csv")
    readStr = parse_stock_tickers_file(readStr)
    targetStr = open(common_lib.TICKER_FILE_DIR + "tmpFile.txt", 'a')
    targetStr.write(readStr)
    targetStr.close()
    return
def cleanup():
    data = common_lib.open_file(common_lib.NBA_LEADING_TEAM_CHAMPION_CHECK_CSV_FILE)
    if data == 'FileNotFoundError':
        pass
    else:
        data = None
        from os import remove
        remove(common_lib.NBA_LEADING_TEAM_CHAMPION_CHECK_CSV_FILE)
예제 #4
0
def start_main():
    data = common_lib.open_file(common_lib.NBA_STANDING_HTML_FILE)
    data = get_all_expanded_standings(data)

    while data.find('<a href="/teams/') > 0:
        data = parse_data_get_true_pct(data)

#########################
#    while data.find('<td class="team">') > 0:
 #       data = parse_html_file(data)
#########################

    thisList = common_lib.convert_list_into_str(list_of_team_stats.listOfTeamStats)
    common_lib.write_file(thisList, common_lib.NBA_STANDING_CSV_FILE)
예제 #5
0
def start_main():
    data = common_lib.open_file(common_lib.NBA_STANDING_HTML_FILE)
    data = get_all_expanded_standings(data)

    while data.find('<a href="/teams/') > 0:
        data = parse_data_get_true_pct(data)

#########################
#    while data.find('<td class="team">') > 0:
#       data = parse_html_file(data)
#########################

    thisList = common_lib.convert_list_into_str(
        list_of_team_stats.listOfTeamStats)
    common_lib.write_file(thisList, common_lib.NBA_STANDING_CSV_FILE)
def get_team_champion_from_html(filename):
    data = common_lib.open_file(filename)

    # Parsing through the file to get League Champion
    startPos = data.find('League Champion:')
    endPos = data.__len__()
    data = data[startPos:endPos]

    startPos = data.find('.html">')
    endPos = data.__len__()
    data = data[startPos:endPos]

    startPos = data.find('>')
    endPos = data.find('</a>')
    leagueChampion = data[startPos:endPos]
    leagueChampion = leagueChampion.replace('>', '')

    return leagueChampion
def start_main():
    cleanup()

    finalList = []
    teamList = common_lib.get_multiple_col(NBA_POWER_RANKING_CSV_FILE, 0)
    leadingTeamsTuple = get_two_leading_teams_of_true_power_ranking(teamList)
    leadingTeam = leadingTeamsTuple[0]
    secondLeadingTeam = leadingTeamsTuple[1]
    championTeam = get_team_champion_from_html(common_lib.NBA_CHAMPION_HTML_FILE)
    finalsRunnerUpTeam = get_team_finals_runner_up_from_html(common_lib.NBA_CHAMPION_HTML_FILE)


    # Check if file exist.
    data = common_lib.open_file(common_lib.NBA_LEADING_TEAM_CHAMPION_CHECK_CSV_FILE)
    if data == 'FileNotFoundError':
        finalList.append(['YEAR', 'TRUE_POWER_RANK_LEAD_TEAM', 'TRUE_POWER_RANK_SECOND_LEAD_TEAM', 'CHAMPION_TEAM', 'FINALS_RUNNER_UP_TEAM', 'SAME?'])

    # if the program cannot find a championTeam, then print 'No Champion'.
    if not championTeam:
        championTeam = 'No Champion'

    if not finalsRunnerUpTeam:
        finalsRunnerUpTeam = 'No Runner Up'

    # Check if leadingTeam is the same as championTeam
    leadingTeam = change_element_into_str(leadingTeam)
    secondLeadingTeam = change_element_into_str(secondLeadingTeam)
    if leadingTeam.lower() == championTeam.lower():
        sameTeam = True
    elif leadingTeam.lower() == finalsRunnerUpTeam.lower():
        sameTeam = True
    elif secondLeadingTeam.lower() == championTeam.lower():
        sameTeam = True
    elif secondLeadingTeam.lower() == finalsRunnerUpTeam.lower():
        sameTeam = True
    else:
        sameTeam = False

    finalList.append([YEAR, leadingTeam, secondLeadingTeam, championTeam, finalsRunnerUpTeam, sameTeam])
    dataStr = common_lib.convert_list_into_str(finalList)
    dataStr = dataStr + '\n'

    common_lib.append_file(dataStr, common_lib.NBA_LEADING_TEAM_CHAMPION_CHECK_CSV_FILE)
def get_statements_to_csv(stockFileDir, ticker):
    #print("in get_statements_to_csv")
    if ticker != 'Symbol':
        if(flagLastPriceClass.flagLastPrice == True):
            get_last_price_to_csv(ticker)
        elif(flagLastPriceClass.flagLastPrice == False):
            get_last_price_to_csv(ticker)
            time.sleep(1)
            get_everything_else(ticker)
            # I separated get_last_price_to_csv() from get_everything_else to allow commenting out get_everything_else,
            #   because balance sheets, income statements and cashflow statements doesn't change everyday,
            #   but the last price does. It would be faster this way.

        # Parse csv files
        readStr = common_lib.open_file(stockFileDir + ticker + " " + "Income" + " " + "Quarterly" + ".csv")
        readStr = parse_statements(readStr)
        common_lib.write_file(readStr, stockFileDir + ticker + " " + "Income" + " " + "Quarterly" + ".csv")

    #        ttmList = get_col_ttm(stockFileDir + ticker + " " + "Income" + " " + "Quarterly" + ".csv")
    #        print(ttmList)
    return
def get_team_finals_runner_up_from_html(filename):
    data = common_lib.open_file(filename)

    # Parsing through the file to get Finals Runner Up
    startPos = data.find('League Playoffs')
    endPos = data.__len__()
    data = data[startPos:endPos]

    startPos = data.find('Finals</span>')
    endPos = data.__len__()
    data = data[startPos:endPos]

    startPos = data.find('over <a href="')
    endPos = data.__len__()
    data = data[startPos:endPos]

    startPos = data.find('>')
    endPos = data.find('</a>')
    runnerUp = data[startPos:endPos]
    runnerUp = runnerUp.replace('>', '')

    return runnerUp