def api_chicago(): months = json.loads(request.args.get('months')) months = list(map(int, months)) days = json.loads(request.args.get('days')) days = list(map(int, days)) # Checking for months and days filters and return error response if not set if not months or not days: return "Months and Days are required", 400 else: return jsonify(fn.execute_analysis(fn.get_df('chicago', months, days)))
def api_chicago_users(): months = json.loads(request.args.get('months')) months = list(map(int, months)) days = json.loads(request.args.get('days')) days = list(map(int, days)) # Start and End slicing args of the get get_users function start = int(json.loads(request.args.get('start'))) end = int(json.loads(request.args.get('end'))) df = fn.get_df('chicago', months, days) users = fn.get_users(df, start, end).to_json(orient='index') # Parsing users data as json object parsed_users = json.loads(users) return parsed_users
def update_graph(symbol, type, start_date, end_date, dict_tickers=dict_tickers): url = 'https://www.alphavantage.co/query' function = 'TIME_SERIES_DAILY' outputsize = 'full' response = get_response(url, function, outputsize, symbol) df_response = get_df(response) df_response = date_filter_df(df_response, start_date, end_date) if type == 'ts': if symbol in dict_tickers: fig = plot_df_py(df_response, dict_tickers[symbol]) else: fig = plot_df_py(df_response, symbol) else: if symbol in dict_tickers: fig = plot_candlestick(df_response, dict_tickers[symbol]) else: fig = plot_candlestick(df_response, symbol) return fig
# -- ---------------------------------------------------------------------------------------------------------------- # '''-------------------------------------------------------------- Crear modelo con features simbolicos ''' # Generacion de un features simbolicas, agregadas al modelo symbolic = ft.symbolic_features(p_x=features_divisa.iloc[:, 1:], p_y=features_divisa.iloc[:, 0]) nuevos_features = pd.DataFrame(symbolic['fit'], index=features_divisa.index) # modelo lm_model_s = ft.mult_reg(p_x=nuevos_features[:'01-01-2019'], p_y=features_divisa.iloc[:, 0][:'01-01-2019']) prediccion = ft.recursivo(nuevos_features, features_divisa, lm_model_s["ridge"]["model"]) #reales y pronostico # -- ---------------------------------------------------------------------------------------------------------------- # '''-------------------------------------------------------------- Backtest ''' backtest = ft.backtest(prediccion, datos_divisa) # -- ---------------------------------------------------------------------------------------------------------------- # '''-------------------------------------------------------------- Metricas de atribucion al desempeƱo ''' residuos = ft.get_residuos(backtest) hetero = ft.check_hetero(residuos) df = ft.get_df(backtest) mad, lista = ft.f_estadisticas_mad(df, True)
'month', MONTHS, return_multi=True) days = fnc.get_user_input( 'Which week day?\nPlease type out the full day name (e.g. Monday, Tuesday, ..) or type "all" to get all days.\n', 'week day', DAYS, return_multi=True) print('\n' + '#' * 50) print('Calculating the statistics of the selected dataset ...') print('#' * 50 + '\n') start_time = time.time() result_df = fn.get_df(city[0], months, days) result = fn.execute_analysis(result_df) month = fnc.get_key(result[0]['month'], MONTHS).capitalize() day = fnc.get_key(result[0]['day'], DAYS).capitalize() hour = result[0]['hour'] start_station = result[1]['start'] end_station = result[1]['end'] trip = result[1]['trip'] total_time = result[2]['total'] mean_time = result[2]['mean'] subscribers = result[3]['subscriber'] customers = result[3]['customer'] males = result[3]['male'] females = result[3]['female'] first_birth = result[3]['min']
import numpy as np table = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies') sandp_raw = df = table[0] sandp_symbols = sandp_raw["Symbol"] for i in range(1, 10): symbol = f.convert_to_string(sandp_symbols.iloc[i]) ticker = yf.Ticker(symbol) name = ticker.info["longName"] # contains total assets and total liabilities df_balance_sheet = f.get_df(symbol, 'balance-sheet') # contains Gross Profits df_financials = f.get_df(symbol, 'financials') # getting company stock price and eps gross_profit_df = df_financials["Gross Profit"].head(1) total_assets_df = df_balance_sheet["Total Assets"].head(1) total_liabilities_df = df_balance_sheet["Total Liabilities Net Minority Interest"].head(1) gross_profit_as_xml = gross_profit_df.iloc[0] total_assets_as_xml = total_assets_df.iloc[0] total_liabilities_as_xml = total_liabilities_df.iloc[0] gross_profit = f.convert_to_float([str(s) for s in gross_profit_as_xml]) * 1000 total_assets = f.convert_to_float([str(s) for s in total_assets_as_xml]) * 1000 total_liabilities = f.convert_to_float([str(s) for s in total_liabilities_as_xml]) * 1000
from functions import get_response, get_df, plot_df, plot_df_py url = 'https://www.alphavantage.co/query' function = 'TIME_SERIES_DAILY' symbol = 'BA' outputsize = 'full' response = get_response(url, function, outputsize, symbol) df_response = get_df(response) # Plot with matplotlib plot_df(df_response, symbol)
ft.configuracion() good_connect = False key_file = st.sidebar.file_uploader("Clave:") actualizar = st.sidebar.button("Actualizar Outcomes") if actualizar: if os.path.isfile("datos_pick"): os.remove('datos_pick') if key_file is not None: if not os.path.isfile("datos_pick"): key_json = ft.cargar_creadenciales(key_file) cliente = ft.connect_to_sheet(key_json) worksheet = ft.open_sheet(cliente, sheet_name) df = ft.get_df(worksheet) df.to_pickle('datos_pick') else: df = pd.read_pickle('datos_pick') else: st.warning("El archivo con los datos que has cargador no es correcto.") st.stop() menu = st.sidebar.selectbox( 'Menu:', options=['General', 'Estados de Contratacion', 'Trabajando']) filtros = ft.opciones_filtros(df) df = ft.filtrar(df, filtros)