示例#1
0
def msnMoneyTenYearSummary(symbol, local=False):

    import bs4
    import utils

    if not len(symbol):
        return None
    url = "http://investing.money.msn.com/investments/financial-statements?symbol=" + symbol
    url, page = utils.openUrl(url, local)
    print url
    soup = bs4.BeautifulSoup(page, "lxml")

    # Extract tables
    tables = soup.find_all("table", {"class": "mnytbl"})

    # Parse income statement table
    income = {}
    for row in tables[0].find_all("tr"):
        cols = row.find_all("td")
        if len(cols) == 0:
            continue
        tuple = ()
        for icol, col in enumerate(cols):
            entry = col.find_all(text=True)[1].strip()
            if icol == 0:
                try:
                    secs = utils.makeEpochTime(str(entry), "%m/%y")
                    tuple = tuple + (secs,)
                except ValueError:
                    tuple = tuple + (str(entry),)
            else:
                tuple = utils.extractData(entry, tuple)

        income[tuple[0]] = tuple[1:]

    # Parse balance sheet table
    balance = {}
    for row in tables[1].find_all("tr"):
        cols = row.find_all("td")
        if len(cols) == 0:
            continue
        tuple = ()
        for icol, col in enumerate(cols):
            entry = col.find_all(text=True)[1].strip()
            if icol == 0:
                try:
                    secs = utils.makeEpochTime(str(entry), "%m/%y")
                    tuple = tuple + (secs,)
                except ValueError:
                    tuple = tuple + (str(entry),)
            else:
                tuple = utils.extractData(entry, tuple)
        balance[tuple[0]] = tuple[1:]

    return income, balance
示例#2
0
 def getMappedData(self, property_pin, year):
     dictionary_info = self.getRawData(property_pin)
     ### If dict vals all empty return as is
     if all(value == '' for value in dictionary_info.values()):
         return dictionary_info
     ### Extract data from massive dict per util definition
     return extractData(dictionary_info, self.data_map, year)
示例#3
0
def msnMoneyQuote( symbol, local = False ) :
    
    import bs4
    import utils

    if not len(symbol) : return None
    url = 'http://investing.money.msn.com/investments/stock-price?symbol=' + symbol
    url,page = utils.openUrl(url,local)
    print url
    soup = bs4.BeautifulSoup(page,"lxml")

    # Extract date stamp from below "details" table
    footers = soup.find_all("span",{"class":"foot"})
    string = footers[0].find_all(text=True)[0].strip().split(' ')[2]
    date = utils.makeEpochTime(string,'%m/%d/%Y')

    # Extract tables 
    tables = soup.find_all("table",{"class":"mnytbl"})

    # Parse "details" table
    details = {}
    tuple = ()
    cntr = 0
    for row in tables[0].find_all("tr") :
        cells = row.find_all("td")
        if len(cells) == 0 : continue
        data = cells[1].find_all(text=True)[1].strip()
        tuple = utils.extractData(data,tuple)
        cntr = cntr + 1
    details[date] = tuple

    # Parse "financial highlights" table
    highlights = {}
    tuple = ()
    cntr = 0
    for row in tables[1].find_all("tr") :
        cells = row.find_all("td")
        if len(cells) == 0 : continue
        index = 2 if ( cntr == 2 or cntr == 3 ) else 1
        data = cells[1].find_all(text=True)[index].strip()
        tuple = utils.extractData(data,tuple)
        cntr = cntr + 1
    highlights[date] = tuple

    return details,highlights
示例#4
0
def msnMoneyBalanceSheet( symbol, local = False ) :

    import bs4  
    import utils

    if not len(symbol) : return None
    url = 'http://investing.money.msn.com/investments/stock-balance-sheet/?symbol=' + symbol + '&stmtView=Ann'
    url,page = utils.openUrl(url,local)
    print url
    soup = bs4.BeautifulSoup(page,"lxml")

    rows = soup.find_all("tr") 
    ncols = len(rows[-1].find_all("td"))-1
    titles = []
    tuples = [() for x in range(ncols)]
    for irow,row in enumerate( rows ) :
        for icol,col in enumerate( row.find_all("td") ) :
            entries = col.find_all(text=True)
            index = None
            if len(entries) == 1 : index = 0
            elif len(entries) == 3 : index = 1
            elif len(entries) == 7 : index = 4
            else : continue
            entry = entries[index].strip().encode("utf-8")
            if irow == 7 or irow == 30 : continue
            if len(entry) : 
                if icol == 0 : 
                    titles.append(str(entry))
                else :
                    dates = {1:'%Y',2:'%m/%d/%Y',4:'%m/%d/%Y'}
                    try :
                        date = dates[irow]
                        secs = utils.makeEpochTime(str(entry),date)
                        tuples[icol-1] = tuples[icol-1] + (secs,)
                    except KeyError : 
                        tuples[icol-1] = utils.extractData(entry,tuples[icol-1])
                
    dict = {}
    for col in range(len(tuples)) :
        dict[tuples[col][0]] = tuples[col][1:]

    return titles,dict
示例#5
0
import requests
import utils
crypto = {}

page = requests.get('https://coinmarketcap.com/')
f = open('cryptoPage.html', 'w', encoding='utf-8')
f.write(page.text)
f.close()

f = open('cryptoPage.html', 'r', encoding='utf-8')
line = f.readline()

while line:
    utils.extractData(line, crypto)
    line = f.readline()

f.close()


示例#6
0
def msnMoneyHistoricalPrices(symbol, local=False):

    import bs4
    import utils

    if not len(symbol):
        return None
    url = (
        "http://investing.money.msn.com/investments/equity-historical-price/?PT=7&D4=1&DD=1&D5=0&DCS=2&MA0=0&MA1=0&CF=0&nocookie=1&SZ=0&symbol="
        + symbol
    )
    #    url = 'http://investing.money.msn.com/investments/equity-historical-price/?symbol=us%3a' + symbol + '&CA=0&CB=0&CC=0&CD=0&D4=1&DD=1&D5=0&DCS=2&MA0=0&MA1=0&C5=0&C5D=0&C6=0&C7=0&C7D=0&C8=0&C9=0&CF=4&D8=0&DB=1&DC=1&D9=0&DA=0&D1=0&SZ=0&PT=11'

    url, page = utils.openUrl(url, local)
    print url
    soup = bs4.BeautifulSoup(page, "lxml")

    rows = soup.find_all("tr")

    titles = []
    prices = {}
    dividends = {}
    for irow, row in enumerate(rows):
        cols = row.find_all("td")

        # Extract titles from table header
        headers = row.find_all("th")
        for header in headers:
            entries = header.find_all(text=True)
            entry = entries[1].strip()
            if not len(entry):
                continue
            titles.append(str(entry))

        # Extract ex-dividend dates, dividends paid, and share price
        if len(cols) == 3:
            date = 0
            div = 0.0
            price = 0.0
            try:
                entries = cols[0].find_all(text=True)
                entry = entries[1].strip()
                if len(entry):
                    date = utils.makeEpochTime(str(entry), "%m/%d/%Y")
            except ValueError:
                date = 0
            try:
                entries = cols[1].find_all(text=True)
                entry = entries[1].strip().split(" ")[0]
                if len(entry):
                    div = float(entry)
            except ValueError:
                div = 0.0
            try:
                if irow < len(rows):
                    entries = rows[irow + 1].find_all("td")[4].find_all(text=True)
                    entry = entries[1].strip()
                    price = float(entry)
                else:
                    price = 0.0
            except ValueError:
                price = 0.0
            if date != 0:
                dividends[date] = (div, price)

        # Loop through rows and extract share prices
        else:
            tuple = ()
            if len(cols) != 6:
                continue
            for icol, col in enumerate(cols):
                entries = col.find_all(text=True)
                entry = entries[1].strip()
                if not len(entry):
                    continue
                try:
                    secs = utils.makeEpochTime(str(entry), "%m/%d/%Y")
                    tuple = tuple + (secs,)
                except ValueError:
                    tuple = utils.extractData(entry, tuple)
            prices[tuple[0]] = tuple[1:]

    return titles, prices, dividends
示例#7
0
def cat_transits(filearray):
    cat_fluxes = []
    cat_times = []
    cat_flux_errs = []
    cat_npix = []
    cat_pld_intensities = [[], [], [], [], [], [], [], [], []]
    cat_xcenters = []
    cat_ycenters = []
    cat_xwidths = []
    cat_ywidths = []

    for file in filearray:
        fluxes, times, flux_errs, npix, pld_intensities, xcenters, ycenters, xwidths, ywidths = utils.extractData(
            file=file)

        cat_fluxes = cat_fluxes + list(fluxes)
        cat_times = cat_times + list(times)
        cat_flux_errs = cat_flux_errs + list(flux_errs)
        cat_npix = cat_npix + list(npix)
        i = 0
        for pix in pld_intensities:
            cat_pld_intensities[i] = cat_pld_intensities[i] + list(pix)
            i += 1
        cat_xcenters = cat_xcenters + list(xcenters)
        cat_ycenters = cat_ycenters + list(ycenters)
        cat_xwidths = cat_xwidths + list(xwidths)
        cat_ywidths = cat_ywidths + list(ywidths)

    cat_fluxes = np.array(cat_fluxes)
    cat_times = np.array(cat_times)
    cat_flux_errs = np.array(cat_flux_errs)
    cat_npix = np.array(cat_npix)
    cat_pld_intensities = np.array(
        [np.array(pix) for pix in cat_pld_intensities])
    cat_xcenters = np.array(cat_xcenters)
    cat_ycenters = np.array(cat_ycenters)
    cat_xwidths = np.array(cat_xwidths)
    cat_ywidths = np.array(cat_ywidths)

    current_transit = {}
    current_transit[time_key] = cat_times
    current_transit[flux_key] = cat_fluxes
    current_transit[flux_err_key] = cat_flux_errs
    current_transit[eff_width_key] = cat_npix
    current_transit[pld_coeff_key] = cat_pld_intensities
    current_transit[xcenter_key] = cat_xcenters
    current_transit[ycenter_key] = cat_ycenters
    current_transit[xwidth_key] = cat_xwidths
    current_transit[ywidth_key] = cat_ywidths

    joblib.dump(current_transit, 'cat_pairT0E0.joblib.save')
示例#8
0
#import src.api.utils as utils

import config
import utils

app = Flask(__name__)

logger = utils.setup_logger()


@app.route('/')
def hello_world():
    return 'Running Correctly!'


if __name__ == '__main__':

    if config.STARTUP["DOWNLOAD"]:
        utils.downloadAllData()

    if config.STARTUP["EXTRACT"]:
        utils.extractData()

    if config.STARTUP["PARSE"]:
        data = utils.parseData()

        if config.STARTUP["REBUILD_DB"]:
            utils.buildDB(data)

    #app.run(debug=config.DEBUG, host = config.HOST)
    app.run(host=config.HOST)
示例#9
0
        print("'planet name' is not inlcude in {};".format(planet_name),
              end=" ")
        planet_name = planet_name.split('.json')[0]
        print(" assuming the 'planet name' is {}".format(planet_name))
else:
    init_period, init_t0, init_aprs, init_inc, init_tdepth, init_ecc, init_omega = exoparams_to_lmfit_params(
        planet_name)

init_fpfs = 500 / ppm if init_fpfs is None else init_fpfs
init_u1 = 0.1 if init_u1 is None else init_u1
init_u2 = 0.0 if init_u2 is None else init_u2
init_u3 = 0.0 if init_u3 is None else init_u3
init_u4 = 0.0 if init_u4 is None else init_u4

print('Acquiring Data')
fluxes, times, flux_errs, npix, pld_intensities, xcenters, ycenters, xwidth_keys, ywidth_keys = utils.extractData(
    dataDir)
# Apply half day correction
times = times + 0.5

print('Fixing Time Stamps')
len_init_t0 = len(str(int(init_t0)))
len_times = len(str(int(times.mean())))

# Check if `init_t0` is in JD or MJD
if len_init_t0 == 7 and len_times != 7:
    if len_times == 5:
        init_t0 = init_t0 - 2400000.5
    elif len_times == 4:
        init_t0 = init_t0 - 2450000.5
    else:
        raise ValueError('The `init_t0` is {} and `times.mean()` is {}'.format(