async def teamstats(self, interaction: discord.Interaction, league_or_team: str): """Shows stats for all players on a team, or all teams in a league. Args: league_or_team (str): League name or Team name """ async with interaction.channel.typing(): await interaction.response.defer() val = league_or_team.lower() league = None team = None if val in leagues.keys(): league = leagues[val] elif val.title() in divisions.keys(): team = val.title() else: return await interaction.followup.send( f"Couldn't understand `{league_or_team}`. Please specify either a league or a team." ) if league: stats = self.stats.teamstats(league=league) else: stats = self.stats.teamstats(team=team) dfi.export(stats, "stats.png", table_conversion="matplotlib") path = os.path.abspath("stats.png") file = discord.File(path) await interaction.followup.send(file=file) file.close() return os.remove(path)
def books(update, context): request = context.args global bokreq bokreq = request if len(request) == 0: context.bot.send_message( chat_id=update.effective_chat.id, text= "Неверный формат команды, введите название книги или автора после команды /books" ) else: context.bot.send_message( chat_id=update.effective_chat.id, text="Запрос на поиск книги получен, обрабатываю...") context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=ChatAction.UPLOAD_PHOTO) df = getsearchresult(request, 1, "def") if df.empty: context.bot.send_message( chat_id=update.effective_chat.id, text= "К сожалению, книги не найдены, попробуйте повторить поиск по другим ключевым словам" ) else: dfi.export(df, 'books.png') context.bot.send_photo(chat_id=update.effective_chat.id, photo=open("books.png", 'rb')) context.bot.send_message( chat_id=update.effective_chat.id, text= "Введите команду \"/downl id\" ,чтобы скачать книгу (на место id укажите id нужной Вам книги). Если нужная книга не найдена, Вы можете поискать ее на другой странице, для этого введите команду /page и через пробел укажите номер страницы для повторного поиска, например \"/page 2\"" )
def test_pipeline_metrics(): image = visualize_images_data_side_by_side(image_data1=true_image_data, image_data2=pred_image_data, use_labels1=True, use_labels2=True, overlay=True) df_classification_metrics = get_df_classification_metrics( n_true_bboxes_data=[true_image_data.bboxes_data], n_pred_bboxes_data=[pred_image_data.bboxes_data], pseudo_class_names=['other'], known_class_names=['A', 'B', 'Z', 'other'], tops_n=[1, 2, 3, 4]) df_classification_metrics = df_classification_metrics.loc[[ 'A', 'B', 'C', 'Z', 'other', 'all_accuracy', 'all_weighted_average', 'all_accuracy_without_pseudo_classes', 'all_weighted_average_without_pseudo_classes', 'known_accuracy_without_pseudo_classes', 'known_weighted_average_without_pseudo_classes' ]] image_bytes = BytesIO() dfi.export( obj=df_classification_metrics, fontsize=10, filename=image_bytes, table_conversion='matplotlib', ) df_image = imageio.imread(image_bytes.getvalue()) total_image = concat_images(image_a=image, image_b=df_image, how='vertically', mode='RGB') Image.fromarray(total_image).save( test_dir / 'df_classification_metrics_A_B_Z_visualized.jpg')
def print_out_model(shares, quantity, username): """ Function to process portfolio construction with model's functions :param shares: List of shares :param quantity: List with counts of shares :param username: Username :return: dict with results """ if len(shares) == 0: return f'Tickers are not found in your request. Please specify them like that: \n' \ f'/model AMZN, TWTR, CSCO' if len(shares) > 5 and username not in top_users: return f'No more than 5 papers! For more services contact us' for share in shares: if not models.check_ticker(share) and username not in top_users: return f'{share} ticker not found at our base.' \ f'You may search for ticker of your company with /search command.' portf = models.Portfolio(shares, quantity, [1] * len(shares), [1] * len(shares)) return_str = f'Expectation return: {round(portf.portfolio_exp_ret*253*100, 2)} % annually.\n' \ f'Risk: {round(portf.general_variance*253*100, 2)} % annually.\n' \ f'Risk \ Reward ratio: {round(portf.general_variance/portf.portfolio_exp_ret, 2)}' pie_path = f'PNGs/pie_chart_{dt.now().strftime("%H%M%S%d%m%Y")}.png' weights_pie(portf.weights, pie_path) # make a pie chart with share's weights in portfolio df_styled = portf.corr_coef.style.background_gradient() corr_path = f'PNGs/corr_df_{dt.now().strftime("%H%M%S%d%m%Y")}.png' dfi.export(df_styled, corr_path, table_conversion='matplotlib') return_dict = { 'return_str': return_str, 'pie_path': home_path + pie_path, 'corr_path': home_path + corr_path } return return_dict
def start_1(): try: if os.path.exists(ruta_archivo): df = pd.read_csv(ruta_archivo) df['Palabra'].fillna('Sin datos', inplace=True) df['Estado'].fillna('Sin datos', inplace=True) #Creo un diccionario cuyas claves son las partidas jugadas y su valor es false partidas = df['Partida'].unique() dicci = {} for i in partidas: dicci[i] = False #filtro el data frame df_intentos = df[df['Estado'] == 'match'] #Creo una tabla con las primeras palabras halladas por partida tabla = pd.DataFrame(columns=['Partida', 'Palabra']) for i in df_intentos.index: num = df_intentos["Partida"][i] if dicci[num] == False: tabla.loc[i] = [num] + [df_intentos['Palabra'][i]] dicci[num] = True nom = 'tabla_de_estadistica.png' archivo = os.path.join("src/Archivos/", nom) if os.path.exists(archivo): remove(archivo) df_styled = tabla.style.background_gradient() dfi.export(df_styled, archivo) else: print("Ups! El registro de jugadas no se ha encontrado") except FileNotFoundError: print("La ruta esta rota")
def display_df_as_img(df): temp_file_name = "./temp.png" dataframe_image.export(df, temp_file_name) with open(temp_file_name, "rb") as file: display(Image(file.read())) os.remove(temp_file_name)
def make_df_image(table, max_cols=-1, max_rows=-1): """Return dataframe as image.""" with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp: dfi.export(table, tmp.name, max_cols=max_cols, max_rows=max_rows) image = mpimg.imread(tmp.name) return image
def data(): if request.method == 'POST': print(request.data, flush=True) html_er, desc = prc.d_processing(request.data, out) datapath = os.path.join(os.getcwd() + '/static/img') dfi.export(desc, os.path.join(datapath, 'out.png')) print(html_er, flush=True) global er_html er_html = html_er return 'OK', 200
def read_oi(spot_price, ce_values, pe_values, symbol): sheet_name = str(datetime.today().strftime("%d-%m-%Y")) + symbol strike_oi_ce_pe = find_ce_pe(float(spot_price), ce_values, pe_values) values = gs.read_row_values('master', 'Date', symbol, [ str(datetime.today().strftime("%d-%m-%Y")), spot_price, str(strike_oi_ce_pe[0][10]), str(strike_oi_ce_pe[1][10]) ]) strike_oi_ce_pe = list(strike_oi_ce_pe) if values == -1: gs.create_new_worksheet(sheet_name) else: if symbol == 'BANKNIFTY': spot_price_org = values[4] else: spot_price_org = values[1] strike_oi_ce_pe = find_ce_pe(float(spot_price_org), ce_values, pe_values) ce_dt = pd.DataFrame(ce_values).sort_values(['strikePrice']) pe_dt = pd.DataFrame(pe_values).sort_values(['strikePrice'], ascending=False) drop_col = [ 'pchangeinOpenInterest', 'askQty', 'askPrice', 'underlyingValue', 'totalBuyQuantity', 'totalSellQuantity', 'bidQty', 'bidprice', 'change', 'pChange', 'impliedVolatility', 'totalTradedVolume' ] ce_dt.drop(drop_col, inplace=True, axis=1) pe_dt.drop(drop_col, inplace=True, axis=1) ce_dt = ce_dt.loc[ce_dt['strikePrice'].isin(strike_oi_ce_pe[0])] pe_dt = pe_dt.loc[pe_dt['strikePrice'].isin(strike_oi_ce_pe[1])] # print(ce_dt[['strikePrice', 'lastPrice']]) # print(pe_dt[['strikePrice', 'changeinOpenInterest']]) print(ce_dt.head(1)) print(pe_dt.head(1)) gs.insert_record(sheet_name, spot_price, (ce_dt['changeinOpenInterest'].sum() * 75), (pe_dt['changeinOpenInterest'].sum() * 75)) # Read sheet and store and image to be sent to telegram dataframe = gs.read_sheet_into_df(sheet_name) dfi.export(dataframe, '/tmp/df_styled.png') file = open('/tmp/df_styled.png', 'rb') print( str([ spot_price, (ce_dt['changeinOpenInterest'].sum() * 75), (pe_dt['changeinOpenInterest'].sum() * 75) ]))
def nodestrajectory(): tag,conections,parent,mamaduck,people,timeactive,list1=DataJSONtoGraph('1')#con el nodo 1 se inicia para empezar a #crear los demas grafos n1=Nodo(tag,conections,parent,mamaduck,people,timeactive,list1) res,restab=recorridonodos(n1)#iniciamos el algoritmo de A estrella en una variable de objeto frase="El Nodo: %d "%res[1],"requiere ser monitoreado prioritariamente."#en res 1 está el nodo elegido como prioritario, esta función imprimirá el porqué primero este y porque los demas #st.header("Orden de prioridad") print(res) df=functionWhyPriority(restab) df_styled = df.style.background_gradient() dfi.export(df_styled,"analisisgrafo.png") return send_file("analisisgrafo.png")
def See_List_in_Group1_AutoDrop_Settings(update: Update, context: CallbackContext) -> int: update.message.reply_text( "Converting database into image..." ) df = pd.read_csv(Save_File_in_Group1_AutoDrop_Settings) import dataframe_image as dfi dfi.export(df, 'Photo_Of_Autodrop_List.png') Bot_With_Token = telegram.Bot(token= pd.read_csv("Group_1/Group1_Global_Settings.csv")["Settings_Bot_Key"].iloc[0]) Bot_With_Token.send_photo(chat_id = update.message.chat_id, photo = open("Photo_Of_Autodrop_List.png", "rb")) return Waiting_For_User_Choice_in_Group1_AutoDrop_Settings
def start_1(): # Tabla de usuarios y puntos if os.path.exists(ruta_archivo): df = pd.read_csv(ruta_archivo) df_1 = df[['Nick', 'Puntaje']].sort_values('Puntaje', ascending=False) nom = 'tabla_1.png' archivo = os.path.join("src/Archivos/", nom) if os.path.exists(archivo): remove(archivo) df_styled = df_1.style.background_gradient() dfi.export(df_styled, archivo) else: print("La ruta al archivo esta rota")
def table(val, model=0): pkl_filename = "Decision_Tree.pkl" if (model != 0): if (model == 1): pkl_filename = "Gaussian.pkl" if (model == 2): pkl_filename = "Decision_Tree.pkl" if (model == 3): pkl_filename = "Random_Forest.pkl" if (model == 4): pkl_filename = "SVC.pkl" if (model == 5): pkl_filename = "Logistic_Regression.pkl" if (model == 6): pkl_filename = "Gradient_Boosted.pkl" with open(pkl_filename, 'rb') as file: clfg = pickle.load(file) X_test = np.load('new_data.npy', allow_pickle=True) newdf = pd.read_pickle('newdf.pkl') newdf1 = newdf[[ 'duration', 'protocol_type', 'flag', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment', 'urgent', 'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted', 'num_file_creations', 'is_guest_login', 'num_shells', 'num_access_files' ]] newdf2 = newdf[[ 'srv_count', 'count', 'hot', 'serror_rate', 'rerror_rate', 'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'Attack Type' ]] p = newdf1[val:val + 1] dfi.export(p, 'static/df_styled1.png', max_cols=-1) q = newdf2[val:val + 1] dfi.export(q, 'static/df_styled2.png', max_cols=-1) display = X_test[val] y_test_pred = clfg.predict([display]) return display, y_test_pred, model, pkl_filename
def getnewsData(self): today = date.today() T_split = str(today).split('-') toDate = T_split[2] + '/' + T_split[1] + '/' + T_split[0] googlenewsMkt = GoogleNews(start=toDate, end=toDate) googlenewsMkt.get_news('Market') result = googlenewsMkt.results() df = pd.DataFrame(result).head(10) dfi.export(df, './template/df_styled_Market.jpeg') googlenewsBiz = GoogleNews(start=toDate, end=toDate) googlenewsBiz.get_news('Business') result = googlenewsBiz.results() df = pd.DataFrame(result).head(10) dfi.export(df, './template/df_styled_Business.jpeg')
def search_handler(message): log_message(message, 'search_request') chat_id = message.chat.id companies_str = message.text.replace('/search', '').replace(' ', '') return_str = models.ticker_searcher(companies_str) if type(return_str) == str: bot.send_message(chat_id, return_str, parse_mode='HTML') else: df_styled = return_str[:10].style.background_gradient() found_path = f'PNGs/found_companies_{dt.now().strftime("%H%M%S%d%m%Y")}.png' dfi.export(df_styled, found_path, table_conversion='matplotlib') bot.send_photo(chat_id=chat_id, photo=open(home_path + found_path, 'rb'), caption='Found companies', parse_mode='HTML')
def summary_stats(): dfi.export(data.describe(), 'table_1.png') # describing the data for each group individually then saving to folder as png. dfi.export(setosa.describe(), 'table_2.png') dfi.export(versicolor.describe(), 'table_3.png') dfi.export(virginica.describe(), 'table_4.png')
def start_4(): #Tabla de usuarios y puntos nivel dificil if os.path.exists(ruta_archivo): df = pd.read_csv(ruta_archivo) df_4 = df[df['Nivel'] == 'dificil'].groupby( ['Nick'])['Puntaje'].sum().sort_values(ascending=False) data = pd.DataFrame(df_4) nom = 'tabla_4.png' archivo = os.path.join("src/Archivos/", nom) if os.path.exists(archivo): remove(archivo) df_styled = data.style.background_gradient() dfi.export(df_styled, archivo) else: print("La ruta al archivo esta rota")
def save_formated_df_picture(formated_df, df_name='df_styled'): if formated_df is None: return None else: date_string = datetime.now().strftime("%Y-%m-%d_%H%M%S") file_name = f"img/{df_name}_{date_string}.png" dfi.export( formated_df, file_name, max_rows=-1, table_conversion='chrome', # chrome_path='.' ) return file_name
def tabelaerros(self,dc): image_dir = os.getcwd() #plt.figure(figsize=(10, 5)) cod_hab = pd.read_parquet(self.caminho_parametros_provas).query(f'SG_AREA == "{self.materia.upper()}"')[['CO_HABILIDADE','Descricao_Habilidade']] cod_hab = cod_hab.drop_duplicates().set_index('CO_HABILIDADE').sort_index(ascending=True) pd.set_option('display.max_colwidth', None) df_styled = cod_hab df_styled.sort_index(ascending=True,inplace=True) df_styled.rename(columns = {'Descricao_Habilidade':'Habilidade'},inplace=True) df_styled.index.names = ['Cód. Habilidade'] df_styled.style.set_table_styles({ 0: [{'selector': 'td:hover', 'props': [('font-size', '25px')]}] }) dfi.export(df_styled,os.path.join(image_dir,"mytable.png"))
def output_files(): ''' Generate the png files of three significant outputs: 1) finalized dataframe containing the original data of 12 factors; 2) correlation matrix based upon the finalized dataframe; 3) the heatmap showing the correlation matrix. ''' df_final = append_us_productivity() dfi.export(df_final, "output/df_final.png") corr_matr = generate_correlation_matrix() dfi.export(corr_matr, "output/correlation_matrix.png") heatmap_no_figure = generate_heatmap() heatmap = heatmap_no_figure.get_figure() heatmap.savefig('output/heatmap_no_figure.png', dpi=400) return None
def generate(self): schedule_file = 'schedule.xlsx' # excel file containing user's schedule schedule_data = pd.read_excel(schedule_file) # turning the excel file into a dataframe for processing for i in range(len(schedule_data)): # This whole section of code is used to process the dataframe and add it to the week dictionary days = schedule_data['Days'][i].replace(' ', '').split(',') self.add_to_schedule(schedule_data['Event'][i], schedule_data['Type'][i], days, self.standard_to_military(schedule_data['Start'][i]), schedule_data['End'][i], schedule_data['Location'][i], schedule_data['Host'][i]) for i in week: if len(week[i]) != 0: daily_schedule = pd.DataFrame(week[i]) daily_schedule_styled = daily_schedule.transpose().style.set_properties(**{'background-color': 'grey', 'color': 'white', 'border-color': 'grey'}) dfi.export(daily_schedule_styled, "./Weekdays/" + i + '.png')
async def schedule(self, interaction: discord.Interaction, *, team: str): """Shows the schedule for any team. Args: team (str): Team name """ async with interaction.channel.typing(): team: str = team.title() if team not in divisions.keys(): return await interaction.response.send_message( f"Couldn't find team `{team}`", ephemeral=True) league: str = self.identifier.find_league(team) sheet: Sheet = (self.p4sheet if league.lower() in ["major", "aaa", "aa", "a"] else self.indysheet) all_games: pd.DataFrame = sheet.to_df(f"{league} Schedule!O4:X188") if all_games.empty: return await interaction.response.send_message( "Schedules couldn't be found, possibly because they aren't on the sheet. Contact arco if you believe this is an error." ) # If any non-preseason results are in, get rid of the preseason games if 'N' in all_games[all_games['Score'] != ""]['Preseason'].values: all_games = all_games[all_games['Preseason'] == "N"] all_games.columns.values[3] = "Team 1" all_games.columns.values[5] = "Team 2" all_games.columns.values[8] = "Logs" all_games.drop( columns=["Preseason", "Playoff", "Game Logs Processed"], inplace=True) schedule: pd.DataFrame = all_games.loc[( all_games["Team 1"] == team) | (all_games["Team 2"] == team)] schedule.set_index("Day", drop=True, inplace=True) dfi.export(schedule, "schedule.png", table_conversion="matplotlib") path = os.path.abspath("schedule.png") file = discord.File(path) await interaction.response.send_message(file=file) file.close() return os.remove(path)
def save_visualization(self): """Saves the active visualization into its save directory Parameters ---------- None Returns ---------- None """ if self.active_option == 1: save_dir = self.save_dir + "_week_" + str(self.week) + "_table.png" dfi.export(self.display_week_table, save_dir) elif self.active_option == 2: save_dir = self.save_dir + "_week_" + str(self.week) + "_score.png" dfi.export(self.display_week_score, save_dir) pass
def save_visualization(self): """Saves the active visualization into its save directory Parameters ---------- None Returns ---------- None """ if self.active_option == 1: save_dir = self.save_dir + "_season_table.png" dfi.export(self.season_table, save_dir) elif self.active_option == 2: save_dir = self.save_dir + "_season_cumulative_score.png" fig = self.season_score.get_figure() fig.savefig(save_dir)
def See_Settings_in_Group1_WarningSystem_Settings( update: Update, context: CallbackContext) -> int: update.message.reply_text('Converting to image...') df = pd.read_csv("Group_1/Group1_Settings.csv")[[ "Warning_Status", "NumWarns", "Warning_Punishment" ]] import dataframe_image as dfi dfi.export(df, 'Photo_Of_Warning_System.png') Bot_With_Token = telegram.Bot(token=pd.read_csv( "Group_1/Group1_Global_Settings.csv")["Settings_Bot_Key"].iloc[0]) Bot_With_Token.send_photo(chat_id=update.message.chat_id, photo=open("Photo_Of_Warning_System.png", "rb")) return Waiting_For_User_Choice_in_Group1_WarningSystem_Settings
def Waterlevel(): url = 'https://fhy.wra.gov.tw/fhy/api/ReservoirInfoApi/DryAreaGetAll' data = requests.get(url).text jsondata = json.loads(data) data = [] for product in jsondata: for i in product['DryView']: name = i['StationName'] time = i['DATE'] time = time.split("T")[0] over = i['Capacity'] over = round(over) now = i['CapacityRate'] now = round(now, 2) data.append([name, time, over, now]) columns = ['水庫名稱', '更新日期', '目前水量(m3)', '目前百分比 %'] df = pd.DataFrame(data=data, columns=columns) dfi.export(df, 'dataframe.png')
def page(update, context): if bokreq == None: context.bot.send_message( chat_id=update.effective_chat.id, text= "Введите книгу которую вы ходите искать с помощью команды /book после команды, через пробел напишите название нужной Вам книги" ) else: pag = context.args if len(pag) == 0: context.bot.send_message( chat_id=update.effective_chat.id, text= "Неверный формат команды, после команды /page через пробел номер страницы на которую хотите перейти" ) else: try: pag = int(pag[0]) except: context.bot.send_message( chat_id=update.effective_chat.id, text= "Неверный формат команды, попробуйте еще раз (после команды нужно написать номер страницы)" ) df = getsearchresult(bokreq, pag, "def") if df.empty: context.bot.send_message( chat_id=update.effective_chat.id, text= "К сожалению, книги не найдены, попробуйте повторить поиск по другим ключевым словам" ) else: dfi.export(df, 'books.png') context.bot.send_photo(chat_id=update.effective_chat.id, photo=open("books.png", 'rb')) context.bot.send_message( chat_id=update.effective_chat.id, text= "Введите команду \"/downl id\" , на место id укажите id нужной Вам книги" )
def __update_files(self, league_name: str, stats_type: str, last_3_df, last_5_df) -> bool: try: base_path = STATS_DATA_PATH # create paths if not os.path.exists(base_path): os.mkdir(base_path) league_path = os.path.join(STATS_DATA_PATH, league_name) if not os.path.exists(league_path): os.mkdir(league_path) stat_path = os.path.join(league_path, stats_type) if not os.path.exists(stat_path): os.mkdir(stat_path) # save images dfi.export(last_3_df, os.path.join(stat_path, "last_3.png")) dfi.export(last_5_df, os.path.join(stat_path, "last_5.png")) return True except Exception as ex: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logging.warning(f"Ex={ex} in file={fname} line={exc_tb.tb_lineno}") return False
async def stream(self, interaction: discord.Interaction): """Shows the upcoming stream schedule""" async with interaction.channel.typing(): try: data = self.streamsheet.to_df("S17 Stream Schedule!D3:K") except: return await interaction.response.send_message( "Couldn't find the stream schedule :(") data = data.rename( columns={ "Date:": "Date", "League:": "League", "Series:": "Series", "Time:": "Time", "Streamer:": "Streamer", "Play by Play:": "PBP", "Color:": "Color", }) data = data[(data["League"].str.lower().isin(leagues.keys())) & (data['Series'].str.strip() != "-") & (data['Date'].str.strip() != "")] # Get rid of empty rows and TBD rows data["Date"] = pd.to_datetime(data["Date"], format="%m/%d/%y", errors="coerce") monday = datetime.today() - timedelta( days=datetime.today().weekday()) week = timedelta(days=7) sched = data[data["Date"] > datetime.today()].set_index("Date") filename = "stream_schedule.png" dfi.export(sched, filename, table_conversion="matplotlib") path = os.path.abspath(filename) file = discord.File(path) await interaction.response.send_message(file=file) return os.remove(path)
def cor_calc(tickers_str, username): """ Function to calculate correlations between tickers with using model :param tickers_str: string with tickers :return: path to picture with correlation table """ tickers_list = tickers_str.replace(' ', '').split(',') if len(tickers_list) < 2: return "Incorrect request. String should be in format like: \n/correlation AMZN, MSFT. \nIf you don't know " \ "share's ticker - use ticker searcher." if len(tickers_list) > 5 and username not in top_users: return 'No more than 5 papers. If you want more services contact us' for ticker in tickers_list: if not models.check_ticker(ticker): return f'{ticker} ticker not found at our base. You may search for ticker of your company with /search ' \ f'command.' corr_df = cc(tickers_list) df_styled = corr_df.style.background_gradient() corr_path = f'PNGs/corr_df_{dt.now().strftime("%H%M%S%d%m%Y")}.png' dfi.export(df_styled, corr_path, table_conversion='matplotlib') corr_path = home_path + corr_path return corr_path