def sheet_updater(raw_open_data, raw_smarttrack_data): list_disappeared = [] list_req_num_open = [] list_req_num_smarttrack = [] #determine disappeared reqs before updating the spreadsheet values latest_wkst_open = spr.worksheet("Open") latest_df_wkst_open = pd.DataFrame(latest_wkst_open.get_all_values()) latest_df_wkst_open.rename(columns=latest_df_wkst_open.iloc[0], inplace=True) latest_df_wkst_open.drop(latest_df_wkst_open.index[0], inplace=True) latest_df_wkst_open_only_google = latest_df_wkst_open[latest_df_wkst_open['Requisition#'].str.contains('G-REQ')] list_req_num_open = list(latest_df_wkst_open_only_google['Requisition#']) list_req_num_smarttrack = list(raw_smarttrack_data['Requisition#']) list_disappeared = list_diff(list_req_num_open, list_req_num_smarttrack) latest_wkst_open.clear() set_with_dataframe(latest_wkst_open, latest_df_wkst_open, row=1, include_index=False, include_column_header=True, resize=False, allow_formulas=False) for y in latest_df_wkst_open.index: for x in range(0, len(list_disappeared)): if(latest_df_wkst_open.at[y, "Requisition#"] == list_disappeared[x]): latest_df_wkst_open.at[y, "Req. Status"] = "Disappeared" open_data = pd.DataFrame(wkst_open.get_all_values()) open_data.rename(columns=open_data.iloc[0], inplace=True) open_data.drop(open_data.index[0], inplace=True) raw_open_data = open_data raw_open_data_only_closed = raw_open_data[raw_open_data['Req. Status'].str.contains('Closed')] raw_open_data = raw_open_data[~raw_open_data['Req. Status'].str.contains('Closed')] raw_open_data = raw_open_data.rename({'Number of Submission allowed': 'Submittals Left'}, axis='columns') raw_smarttrack_data = raw_smarttrack_data.rename({'Number of Submission allowed': 'Submittals Left'}, axis='columns') raw_open_data.set_index('Requisition#', inplace=True) raw_open_data.update(raw_smarttrack_data.set_index('Requisition#')) raw_open_data = raw_open_data.reset_index() raw_open_data = raw_open_data[['Date','Client','Requisition#','Job Titles','Min Pay Rate','Max Pay Rate','Location','Length(months)','Req. Status','Submittals Left','#Submitted','#Need(Total)','Verified','Assigned','Recruiter','PC','Notes','Team','Work Type','Work Type Category']] wkst_open.clear() set_with_dataframe(wkst_open, raw_open_data, row=1, include_index=False, include_column_header=True, resize=False, allow_formulas=False) next_dontwork_row = next_available_row(wkst_open_dontwork) wkst_open_after = spr.worksheet("Open") wkst_open_after_df = pd.DataFrame(wkst_open_after.get_all_values()) wkst_open_after_df.rename(columns=wkst_open_after_df.iloc[0], inplace=True) wkst_open_after_df.drop(wkst_open_after_df.index[0], inplace=True) wkst_open_after_df_without_filled = wkst_open_after_df[~wkst_open_after_df['Req. Status'].str.contains('Filled')] wkst_open_after_df_only_filled = wkst_open_after_df[wkst_open_after_df['Req. Status'].str.contains('Filled')] frames = [wkst_open_after_df_only_filled, raw_open_data_only_closed] wkst_open_after_df_only_filled_and_closed = pd.concat(frames) wkst_open.clear() set_with_dataframe(wkst_open, wkst_open_after_df_without_filled, row=1, include_index=False, include_column_header=True, resize=False, allow_formulas=False) set_with_dataframe(wkst_open_dontwork, wkst_open_after_df_only_filled_and_closed, row=next_dontwork_row, include_index=False, include_column_header=False, resize=False, allow_formulas=False) return print("Current requisition information has been updated.")
integrals_max_df = pd.DataFrame( Integration_dct.items(), columns=['Software', 'SUM of Integration Count']) integrals_max_df = integrals_max_df.sort_values(by='SUM of Integration Count', ascending=False, ignore_index=True) integrals_max_df.dropna(inplace=True) Most_Popular_Integrations = integrals_max_df.iloc[0:11] Most_Popular_Integrations[''] = 'Most Popular Integrations' integrals_max_df = pd.concat([integrals_max_df, Most_Popular_Integrations], axis=1) df = integrals_max_df[''] integrals_max_df.drop('', axis=1, inplace=True) integrals_max_df.insert(3, '', df) try: sheet.add_worksheet(rows=1000, cols=50, title='integrals_max') except: integrals_max_sheet = client.open(google_spreadsheet).worksheet( 'integrals_max') integrals_max_sheet = client.open(google_spreadsheet).worksheet( 'integrals_max') set_with_dataframe(integrals_max_sheet, integrals_max_df)
def set_with_df(self, sheet_name, data): worksheet = self.workbook.worksheet(sheet_name) set_with_dataframe(worksheet, data)
#If you use dataframes, you can use: import gspread_dataframe as gd G_key = { "You could paste here the code in your client_secret.json file, otherwise you will need to leave it in the same folder than your python file. Also remember to give edit access to the e-mail in the client_secret.json file." } scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] creds = ServiceAccountCredentials._from_parsed_json_keyfile(G_key, scope) gc = gspread.authorize(creds) ws = gc.open("sheet name").worksheet("worksheet name") #with this you can get from the worksheet: data_get = gd.get_as_dataframe(ws) #with this you write in your worksheet: gd.set_with_dataframe(ws, data) #Correlation heatmap import seaborn as sns sns.heatmap(z.corr(), annot=True, fmt=".2f") #DataFrame general info df.info() #this link it's a guide to generate the requirements https://qxf2.com/blog/auto-generate-requirements-txt-file-using-pipreqs/ #To get holidays use this awesome librarie(https://pypi.org/project/holidays/) import holidays #And here a example for date, name in sorted(holidays.US(state='CA', years=2014).items()): print(date, name)
overwrite=True) #### Update Google spreadsheet scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] credentials = ServiceAccountCredentials.from_json_keyfile_name('My Project-cd8f0fca74a6.json', scope) gc = gspread.authorize(credentials) wks = gc.open("Massachusetts power outage data").sheet1 alloutages_google = alloutages_poly[['town', 'pct_display', 'no_power', 'total_cust']] for index, row in alloutages_google.iterrows(): pct_display_str = str(row['pct_display']) + "%" alloutages_google.at[index,'pct_display_str'] = pct_display_str alloutages_google = alloutages_google[['town', 'pct_display_str', 'no_power', 'total_cust']] alloutages_google = alloutages_google.rename(columns={'town': 'City/Town', 'pct_display_str': '% without power', 'no_power': 'Outages', 'total_cust': 'Total customers'}) alloutages_google = alloutages_google.append({'City/Town': 'State', '% without power': '', 'Outages': alloutages_google['Outages'].sum(), 'Total customers': ''}, ignore_index=True) alloutages_google = alloutages_google.sort_values(by='Outages', ascending=False) set_with_dataframe(wks, alloutages_google)
#Update while Loop while True: #try: timeBegin = time.time() print(datetime.datetime.now().strftime('%H:%M')) for i in range(len(subAsset)): ws = gc.open("Data").worksheet(subAsset[i]) # เรียกชีทหน้า BNB XRP df = get_as_dataframe(ws).set_index( 'indexAround' ) # เรียกข้อมูลใน google sheet และตั้งให้ คอลัม indexAround เป็น index ไว้ให้ pandas เรียกใช้ Around = df.loc['Around']['Balance'] # ตัวนับ df = callFuntion.updatee(df, Around, subAsset[i]) df = df.loc[:, ~df.columns.str.contains( '^Unnamed')] # ลบคอลัม์ที่ไม่ต้องการ #print(" รอบ " + str(Around) + ' ของ ' + str(subAsset[i]) +' มีปริมาณ '+df.loc[Around]['Asset'] +' Balance = ' + df.loc[Around]['Balance'] + ' ' + str(callFuntion.MainAsset)) print(df.loc[Around].to_frame().T) set_with_dataframe( gc.open("Data").worksheet(subAsset[i]), df.reset_index()) # บันทึกลง ชีทหน้า timeEnd = time.time() timeElapsed = timeEnd - timeBegin time.sleep(60 - timeElapsed) # ถ่วงเวลา 1 นาที #except Exception as e: # callFuntion.LineNotify('','',e,'error') # ถ้า error ไลน์ไป แจ้งคนเขียน # break #End while Loop
df = pd.read_sql(sql, params=params, con=con) categories = df['Category'].to_list() categories = [i.strip() for i in categories] categories = list(dict.fromkeys(categories)) count = pd.DataFrame({'Category': 'All Keywords', 'Count': df.shape[0]}, index=[0]) counts_df = counts_df.append(count) for i in categories: count = df[df['Category']==i].shape[0] count = pd.DataFrame({'Category': i, 'Count': count}, index=[0]) counts_df = counts_df.append(count) counts_df = counts_df.reset_index(drop=True) client = gspread.authorize(creds) sheet = client.open_by_key(gspread_id) try: worksheet = sheet.worksheet(f'{client_name} - Keyword Counts') worksheet.clear() except gspread.exceptions.WorksheetNotFound as err: worksheet = sheet.add_worksheet(title=f'{client_name} - Keyword Counts', rows=1, cols=1) set_with_dataframe(worksheet, counts_df) # %%
tweets.drop(labels=['Extended_Entities'], axis=1, inplace=True) tweets['Date'] = tweets['Date'].map(lambda x: tz_convert(x)) tweets['Date'] = tweets['Date'].astype( str) #write PST datetime to string so it can be appended to Google Sheets scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] credentials = ServiceAccountCredentials.from_json_keyfile_name( './PyFilter-34d3cda723bf.json', scope) gc = gspread.authorize(credentials) ws = gc.open("PyFilter").worksheet( "Twitter_Data") #open google sheet and worksheet existing = gd.get_as_dataframe(worksheet=ws) #get worksheet as dataframe updated = existing.append(tweets, ignore_index=False, sort=False) gd.set_with_dataframe(ws, updated, resize=True) print('appended to google sheet') # delete photos that have been downloaded for file in os.listdir("../images"): if file[-4:] == '.jpg': os.remove("../images/" + file) print('Photos Removed from Folder') print('Executed Successfully')
faves_added.columns = ['from', 'to', 'faves', 'last_tweet'] print 'Counting number of times each person has been favorited' favees = faves.pivot_table(index=['to'], values=['from'], aggfunc='count') favees.reset_index(level=['to'], inplace=True) favees.columns = ['label', 'favees'] people = pandas.read_csv('people.csv') people = pandas.merge(people, favees, how='right', on=['label', 'label']) confirm = raw_input('OK to delete & replace worksheets? (Y/n) ') if confirm is '' or strtobool(confirm): print "Deleting" try: # currently needs one useless worksheet hanging about (can't delete all sheets) sheets.del_worksheet(sheets.worksheet('faves')) sheets.del_worksheet(sheets.worksheet('people')) except: print "Couldn't delete" print "Creating new worksheets" faves_worksheet = sheets.add_worksheet(title="faves", rows=faves_added.shape[0], cols=faves_added.shape[1]) people_worksheet = sheets.add_worksheet(title="people", rows=people.shape[0], cols=people.shape[1]) print "Uploading faves" set_with_dataframe(faves_worksheet, faves_added) print "Uploading people" set_with_dataframe(people_worksheet, people)
import gspread import cmews from gspread_dataframe import set_with_dataframe # get dataframe df = cmews.get_dataframe() # authenticate gc = gspread.service_account(filename='<INSERT FILE NAME HERE>.json') #open file sh = gc.open("CME scraper") ws = sh.worksheet('Copper') # transfer dataframe to excel set_with_dataframe(ws, df) print("Completed upload to Google Sheets")
#Sets gitter worksheet # gitter_worksheet = client.open("Buildly Analytics").get_worksheet(1) #Gets gitter chat metadata # gitter_meta_data = gitter.get_chat_meta() #Sets github worksheet github_worksheet = client.open("Buildly Analytics").get_worksheet(2) #Gets github chat data github_meta_data = retrieve_repo_data.get_github_meta() github_view_data = retrieve_repo_data.modularize_view_data() github_clone_data = retrieve_repo_data.modularize_clone_data() #Writes user by frame # by_user_frame = github_meta_data.groupby("user").size().to_frame().T # Writes to the gitter google sheet set_with_dataframe(gitter_worksheet, gitter_meta_data) #Writes to the github metadata google sheet github_worksheet.update_acell('A1', 'Github Meta Data') set_with_dataframe(github_worksheet, github_meta_data, row=2) github_worksheet.update_acell('M1', 'Github viewer Data') set_with_dataframe(github_worksheet, github_view_data, row=2, col=13) github_worksheet.update_acell('Q1', 'Github clone Data') set_with_dataframe(github_worksheet, github_clone_data, row=2, col=17)
def Trigger_trade(): difZone = df.loc[whatsymbol]['DifZone'] for i, row in dfMap.iterrows(): if pd.notna(row['IDorderBuy']): idOrderbuy = row['IDorderBuy'] orderMatchedBUY = checkByIDoder(idOrderbuy) if orderMatchedBUY['filled'] == 0: # ถ้าหมดเวลา cooldown แล้วไม่ได้เปิดสักทีให้ ยกเลิกออเดอร์ลิมิต Sell if pd.notna(row['timecancelbuy']): # ผ่านไป 10 นาที หรือยัง ถ้าจริง ให้ ยกเลิกออเดอร์ first_time = row['timecancelbuy'] start_time = first_time + 600 # นับถอยหลัง 10 นาที เพื่อยกเลิกออเดอร์ target_time = time.time() timeElapsed = target_time - start_time if timeElapsed > 0: cancelOrder(idOrderbuy) # ลบ ข้อมูลกระสุนนัดนี้ เมื่อยกเลิกออเดอร์ # ถ้า cancel แล้วต้องเคลียร์ค่าเก่าออกให้หมด ไม่นั้นจะ error ccxt.base.errors.InvalidOrder: order_not_exist_or_not_allow_to_cancel row['IDorderBuy'] = np.nan row['OpenPrice'] = np.nan row['AmountBuy'] = np.nan row['FilledBuy'] = np.nan row['ExposureBuy'] = np.nan row['timecancelbuy'] = np.nan # จะเปิด ออเดอร์ sell ได้ต้องมี Position Szie ด้าน Buy ก่อน elif orderMatchedBUY['filled'] == orderMatchedBUY['amount']: row['timecancelbuy'] = np.nan if pd.isna(row['FilledBuy']): row['FilledBuy'] = orderMatchedBUY['filled'] row['feeBuy'] = Getfee_ByIDoderinMyTrades(idOrderbuy, orderMatchedBUY['side']) #fee # บันทึก TradeLog # ต้องแปลงเป็น สติงทั้งหมดไม่งั้นบันทึกไม่ได้ # กำหนด PD ก่อน print('OpenOrder Price : '+str(orderMatchedBUY['price'])) print('Amount : '+str(orderMatchedBUY['filled'])) if pd.notna(row['IDorderSell']): idOrdersell = row['IDorderSell'] orderMatchedSELL = checkByIDoder(idOrdersell) # sell filled ทั้งหมด แสดงว่าปิด กำไร ได้ if orderMatchedSELL['filled'] == orderMatchedSELL['amount']: row['LastClosePrice'] = orderMatchedSELL['price'] row['feeSell'] = Getfee_ByIDoderinMyTrades(idOrdersell, orderMatchedSELL['side']) # fee ExposureBuy = row['ExposureBuy'] ExposureSell = orderMatchedSELL['filled'] * orderMatchedSELL['price'] feesell = row['feeSell'] feebuy = row['feeBuy'] if pd.isna(feesell): feesell = 0 if pd.isna(feebuy): feebuy = 0 profitshow = (ExposureSell - ExposureBuy) - (feesell + feebuy) if pd.isna(row['Profit']): row['Profit'] = profitshow elif pd.notna(row['Profit']): row['Profit'] = row['Profit'] + profitshow if pd.isna(row['round']): row['round'] = 1 elif pd.notna(row['round']): row['round'] = row['round'] + 1 print('ราคาขาย : ' + str(orderMatchedSELL['price'])) print('กำไร : ' + str(profitshow)) profitshowLine = round(profitshow,4) LineNotify('\n'+'ราคาขาย : ' + str(orderMatchedSELL['price']) +'\n'+ 'กำไร : ' + str(profitshowLine) + ' usd', 'change') if pd.isna(profitshow): LineNotify( 'บัค nan ExposureSell : ' + str(ExposureSell) + '\n' + 'บัค nan ExposureBuy : ' + str(ExposureBuy) + '\n' + 'บัค nan feeSell : ' + str(row['feeSell']) + '\n' + 'บัค nan feeBuy : ' + str(row['feeBuy']) ,'change') dfTradeLog = get_as_dataframe(gc.open(sheetname).worksheet('TradeLog')) # บันทึก TradeLog # ต้องแปลงเป็น สติงทั้งหมดไม่งั้นบันทึกไม่ได้ # กำหนด PD ก่อน dfTradeLog3 = pd.DataFrame({'IDorderOrderBuy': [str(idOrderbuy)] , 'IDorderOrderSell': [str(idOrdersell)] , 'Open': [str(orderMatchedBUY['price'])] , 'Close': [str(orderMatchedSELL['price'])] , 'Amount': [str(orderMatchedSELL['filled'])] , 'TradeTrigger': [str(row['TradeTrigger'])] , 'Zone': [str(row['Zone'])] , 'OpenTime': [str(orderMatchedBUY['datetime'])] , 'CloseTime': [str(orderMatchedSELL['datetime'])] , 'Profit': [str(profitshow)] , 'feeBuy': [str(row['feeBuy'])] , 'feeSell': [str(row['feeSell'])] }) dfTradeLog = dfTradeLog.append(dfTradeLog3, ignore_index=True) dfTradeLogg = dfTradeLog.drop(columns=[c for c in dfTradeLog.columns if "Unnamed" in c]).dropna(how="all") set_with_dataframe(gc.open(sheetname).worksheet('TradeLog'),dfTradeLogg) # บันทึกชีทหน้า TradeLog # ลบ ข้อมูลกระสุน เมื่อจบครบรอบ ทำให้กระสุนว่าง # ข้อมูลกระสุน buy row['IDorderBuy'] = np.nan row['OpenPrice'] = np.nan row['AmountBuy'] = np.nan row['FilledBuy'] = np.nan row['timecancelsell'] = np.nan row['ExposureBuy'] = np.nan row['NAV'] = np.nan row['feeBuy'] = np.nan # คืนสถานะ รูปแบบการเทรด เพื่อสุ่มใหม่ row['TradeTrigger'] = np.nan # ข้อมูลกระสุน sell row['IDorderSell'] = np.nan row['ClosePrice'] = np.nan row['AmountSell'] = np.nan row['feeSell'] = np.nan elif orderMatchedSELL['filled'] == 0: # ถ้าหมดเวลา cooldown แล้วไม่ได้เปิดสักทีให้ ยกเลิกออเดอร์ลิมิต Sell if pd.notna(row['timecancelsell']): # ผ่านไป 10 นาที หรือยัง ถ้าจริง ให้ ยกเลิกออเดอร์ first_time = row['timecancelsell'] start_time = first_time + 600 # นับถอยหลัง 10 นาที เพื่อยกเลิกออเดอร์ target_time = time.time() timeElapsed = target_time - start_time if timeElapsed > 0: cancelOrder(idOrdersell) # ลบ ข้อมูลกระสุนนัดนี้ เพื่อยกเลิกออเดอร์ # ถ้า cancel แล้วต้องเคลียร์ค่าเก่าออกให้หมด ไม่นั้นจะ error ccxt.base.errors.InvalidOrder: order_not_exist_or_not_allow_to_cancel row['IDorderSell'] = np.nan row['ClosePrice'] = np.nan row['AmountSell'] = np.nan row['timecancelsell'] = np.nan # เงื่อนไข ยิงกระสุน sell if pd.isna(row['IDorderSell']): NowPrice = getPrice(whatsymbol) if pd.notna(row['OpenPrice']): if NowPrice > (row['OpenPrice'] + (difZone*2)): # ต้องมากกว่า อย่างน้อย 2 โซน ถึงจะปิดกำไรได้ # MapTrigger = -1 คือ พื้นที่ๆ ลดของที่มีอยู่ โดยลด Buy Hold ที่ถือไว้ โดย เปิด Sell เท่ากับ จำนวน Position ของกระสุนนัดนั้นๆ if row['MapTrigger'] == -1 and row['Zone'] > 0: checktradesell = False if tradeFuntion == 'RSI': if row['TradeTrigger'] >= 1 and row['TradeTrigger'] <= 25: getRSIvalue = RSI('5m') if getRSIvalue > 70: print(getRSIvalue) checktradesell = True if row['TradeTrigger'] >= 26 and row['TradeTrigger'] <= 50: getRSIvalue = RSI('15m') if getRSIvalue > 70: print(getRSIvalue) checktradesell = True if row['TradeTrigger'] >= 51 and row['TradeTrigger'] <= 75: getRSIvalue = RSI('1h') if getRSIvalue > 70: print(getRSIvalue) checktradesell = True if row['TradeTrigger'] >= 76: getRSIvalue = RSI('4h') if getRSIvalue > 70: print(getRSIvalue) checktradesell = True if tradeFuntion == 'percent': pricenow = getPrice(whatsymbol) Openprice_ = row['OpenPrice'] minpercenttore = Openprice_ / 100 Closeprice_ = Openprice_ + minpercenttore if pricenow > Closeprice_: checktradesell = True if checktradesell == True: positionSizeClose = orderMatchedBUY['filled'] # เปิดออเดอร์ Sell เพื่อปิดออเดอร์ Buy orderSell = re(whatsymbol, 'limit', 'sell', positionSizeClose) row['IDorderSell'] = orderSell['id'] row['ClosePrice'] = orderSell['price'] row['AmountSell'] = orderSell['amount'] row['timecancelsell'] = time.time() # เงื่อนไข ยิงกระสุน buy ใช้งานกระสุนนัดนี้ if pd.isna(row['IDorderBuy']): if row['MapTrigger'] == 1 and row['Zone'] > 0 and row['Exposure'] > 0 and row['UseZone'] == 1: # MapTrigger = 1 คือ พื้นที่ๆ ควรมีกระสุน checktradebuy = False if tradeFuntion == 'RSI': if row['TradeTrigger'] >= 1 and row['TradeTrigger'] <= 25: getRSIvalue = RSI('5m') if getRSIvalue < 30: checktradebuy = True if row['TradeTrigger'] >= 26 and row['TradeTrigger'] <= 50: getRSIvalue = RSI('15m') if getRSIvalue < 30: checktradebuy = True if row['TradeTrigger'] >= 51 and row['TradeTrigger'] <= 75: getRSIvalue = RSI('1h') if getRSIvalue < 30: checktradebuy = True if row['TradeTrigger'] >= 76: getRSIvalue = RSI('4h') if getRSIvalue < 30: checktradebuy = True # ถ่วงเวลา ตอนโวเข้า # df._set_value(whatsymbol, 'TimerDelay', time.time()) # df._set_value(whatsymbol, 'Stat', 'Cooldown') if tradeFuntion == 'percent': pricenow = getPrice(whatsymbol) if pricenow < row['Zone']: checktradebuy = True if checktradebuy == True : # ต้นทุนกระสุนต่อนัด expousre = row['Exposure'] # ปริมาณสินค้าที่จะตั้งออเดอร์ ต่อ กระสุน 1นัด amount = abs(expousre) / float(getPrice(whatsymbol)) orderBuy = re(whatsymbol, 'limit', 'buy', amount) row['IDorderBuy'] = orderBuy['id'] row['OpenPrice'] = orderBuy['price'] row['AmountBuy'] = orderBuy['amount'] row['ExposureBuy'] = orderBuy['amount'] * orderBuy['price'] row['timecancelbuy'] = time.time()
import gspread import pandas as pd import gspread_dataframe from oauth2client.service_account import ServiceAccountCredentials sheet_name = "SPREADSHEET_NAME_GOES_HERE" scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'KEY_FILE_GOES_HERE.json', scope) teams = pd.read_csv("teams.csv") matches = pd.read_csv('matches.csv') gc = gspread.authorize(credentials) sheet = gc.open(sheet_name) wks = sheet.worksheet("Ranks") gspread_dataframe.set_with_dataframe(wks, teams) wks = sheet.worksheet("Match Breakdowns") gspread_dataframe.set_with_dataframe(wks, matches)
] df4['Title'] = np.select(conditions, choices, default='none') df4.head() # In[130]: import gspread_dataframe as gd import gspread import google.cloud as gc from oauth2client.service_account import ServiceAccountCredentials # In[131]: scope = ['https://spreadsheets.google.com/feeds'] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'C:\\Users\\HassSarw\\gcs_service_account_file\\itv-ds-dev-6cd968b7542d.json', scope) gc = gspread.authorize(credentials) # In[141]: ws2 = gc.open("LoveIsland Facebook Video Views").worksheet('Sheet4') existing2 = gd.get_as_dataframe(ws2) updated2 = existing2.append(df4) updated2 = updated2.drop_duplicates(['Date', 'Title'], keep='last') cell_list2 = ws2.range('A2:D500') for cell in cell_list2: cell.value = '' ws2.update_cells(cell_list2) gd.set_with_dataframe(ws2, updated2)
def write(sheet: gspread.Worksheet, df: pd.DataFrame) -> None: cells = sheet.get_all_values() set_with_dataframe(sheet, df, include_index=False, include_column_header=False, row=len(cells) + 1, resize=False)
def reset_sheet(name): if name == 'Global': sheet_global.clear() gd.set_with_dataframe(sheet_global, global_final) if name == 'NJ': sheet_nj.clear() gd.set_with_dataframe(sheet_nj, sbc1_final) if name == 'CA': sheet_ca.clear() gd.set_with_dataframe(sheet_ca, caw1_final) if name == 'IL': sheet_ill.clear() gd.set_with_dataframe(sheet_ill, ill1_final) if name == 'VA': sheet_va.clear() gd.set_with_dataframe(sheet_va, vaw1_final) if name == 'Cash Flow': sheet_cashflow.clear() gd.set_with_dataframe(sheet_cashflow, queries.cash_flow)
config = configparser.ConfigParser() config.read("../../config/"+ sys.argv[1] + ".ini") print(config["db"]) POSTGRES_ADDRESS = (config["db"]["db_address"]) POSTGRES_PORT = (config["db"]["db_port"]) POSTGRES_USERNAME = (config["db"]["db_username"]) POSTGRES_PASSWORD = (config["db"]["db_password"]) POSTGRES_DBNAME = (config["db"]["db_name"]) postgres_str = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}' .format(username=POSTGRES_USERNAME, password=POSTGRES_PASSWORD, ipaddress=POSTGRES_ADDRESS, port=POSTGRES_PORT, dbname=POSTGRES_DBNAME)) cnx = create_engine(postgres_str) scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] credentials = ServiceAccountCredentials.from_json_keyfile_name('../../config/Connect-gsheet.json', scope) gc = gs.authorize(credentials) query = pd.read_sql_query (''' ''',cnx ws1 = gc.open(" ").worksheet(" ") ws1.clear() set_with_dataframe(ws1, query)
def master_reset(): sheet_global.clear() gd.set_with_dataframe(sheet_global, global_final) sheet_cashflow.clear() gd.set_with_dataframe(sheet_cashflow, queries.cash_flow) sheet_nj.clear() gd.set_with_dataframe(sheet_nj, sbc1_final) sheet_ca.clear() gd.set_with_dataframe(sheet_ca, caw1_final) sheet_ill.clear() gd.set_with_dataframe(sheet_ill, ill1_final) sheet_va.clear() gd.set_with_dataframe(sheet_va, vaw1_final)
# In[40]: import gspread from oauth2client.service_account import ServiceAccountCredentials from gspread_dataframe import set_with_dataframe from gspread_dataframe import get_as_dataframe from gspread_pandas import Spread import sys, os, os.path import csv import certifi import json key_path = 'credentials.json' json_key = json.load(open(key_path)) scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets' ] credentials = ServiceAccountCredentials.from_json_keyfile_name(key_path, scope) gc = gspread.authorize(credentials) wks = gc.open("Current_Accruals").worksheet('sheet1') wks.clear() set_with_dataframe(wks, df3, include_column_header=True, include_index=False) #wks.insert_row('',index=1)
perm_type='user', role='writer', notify=True, email_message=message) ch.share('###', perm_type='user', role='writer', notify=True, email_message=message) print('Spreadsheet shared with emails.') create_spreadsheet_id = ch.id print(ch.id) spreadsheet_url = "https://docs.google.com/spreadsheets/d/%s" % ch.id print(spreadsheet_url) worksheet = ch.get_worksheet(0) set_with_dataframe(worksheet, ns) worksheet.update_acell('H1', 'Total People') worksheet.update_acell('H2', '=iferror(counta(unique(C:C)))-1') worksheet.update_acell('I1', 'Total Assessed') worksheet.update_acell( 'I2', '=COUNTIF(F:F,"1 - Strong Support") + COUNTIF(F:F,"2 - Lean Support") + COUNTIF(F:F,"3 - Undecided") + COUNTIF(F:F,"4 - Lean Opposed") + COUNTIF(F:F,"5 - Strong Opposed")' ) worksheet.update_acell('G1', '') worksheet.update_acell('G2', '') worksheet.update_acell( 'H4', '=HYPERLINK("###", "How to talk to your friends about Kim Coco!")' ) fmt = cellFormat(textFormat=textFormat( bold=True,
def main(): spreadsheet = get_spreadsheet() worksheet = get_worksheet(spreadsheet) csv = read_and_sanitise_csv() set_with_dataframe(worksheet, csv)
def pyToGspread(): #update stk labels to 'filered info' ws = spreadsheet.worksheet('filtered info') set_with_dataframe(ws, df, 1, 1, True, True) #update stk_info_df to worksheet "basic info" ws = spreadsheet.worksheet('basic info') set_with_dataframe(ws, stk_info_df, row=1, col=1, include_index=True, include_column_header=True) #update stk_price_df to "other info" ws = spreadsheet.worksheet('other info') set_with_dataframe(ws, stk_price_df, 1, 1, True, True) #update trend_df to "googleTrendData" ws = spreadsheet.worksheet('googleTrendsData') set_with_dataframe(ws, trend_df, 1, 1, True, True) #update big_trader_df to "investorsHoldings" ws = spreadsheet.worksheet('investorsHoldings') set_with_dataframe(ws, big_trader_df, 1, 1, False, True) #update event_df to "economicEvents" ws = spreadsheet.worksheet('economicEvents') set_with_dataframe(ws, event_df, 1, 2, False, True)
def test_dataframe_formatter(self): rows = [{ 'i': i, 'j': i * 2, 'A': 'Label ' + str(i), 'B': i * 100 + 2.34, 'C': date(2019, 3, i % 31 + 1), 'D': datetime(2019, 3, i % 31 + 1, i % 24, i % 60, i % 60), 'E': i * 1000 + 7.8001, } for i in range(200)] df = pd.DataFrame.from_records(rows, index=['i', 'j']) set_with_dataframe(self.sheet, df, include_index=True) format_with_dataframe( self.sheet, df, formatter=BasicFormatter.with_defaults( freeze_headers=True, column_formats={ 'C': cellFormat(numberFormat=numberFormat( type='DATE', pattern='yyyy mmmmmm dd'), horizontalAlignment='CENTER'), 'E': cellFormat(numberFormat=numberFormat( type='NUMBER', pattern= '[Color23][>40000]"HIGH";[Color43][<=10000]"LOW";0000' ), horizontalAlignment='CENTER') }), include_index=True, ) for cell_range, expected_uef in [ ('A2:A201', cellFormat(numberFormat=numberFormat(type='NUMBER'), horizontalAlignment='RIGHT')), ('B2:B201', cellFormat(numberFormat=numberFormat(type='NUMBER'), horizontalAlignment='RIGHT')), ('C2:C201', cellFormat(horizontalAlignment='CENTER')), ('D2:D201', cellFormat(numberFormat=numberFormat(type='NUMBER'), horizontalAlignment='RIGHT')), ('E2:E201', cellFormat(numberFormat=numberFormat(type='DATE', pattern='yyyy mmmmmm dd'), horizontalAlignment='CENTER')), ('F2:F201', cellFormat(numberFormat=numberFormat(type='DATE'), horizontalAlignment='CENTER')), ('G2:G201', cellFormat(numberFormat=numberFormat( type='NUMBER', pattern='[Color23][>40000]"HIGH";[Color43][<=10000]"LOW";0000' ), horizontalAlignment='CENTER')), ('A1:B201', cellFormat(backgroundColor=DEFAULT_HEADER_BACKGROUND_COLOR, textFormat=textFormat(bold=True))), ('A1:G1', cellFormat(backgroundColor=DEFAULT_HEADER_BACKGROUND_COLOR, textFormat=textFormat(bold=True))) ]: start_cell, end_cell = cell_range.split(':') for cell in (start_cell, end_cell): actual_uef = get_user_entered_format(self.sheet, cell) # actual_uef must be a superset of expected_uef self.assertTrue( actual_uef & expected_uef == expected_uef, "%s range expected format %s, got %s" % (cell_range, expected_uef, actual_uef)) self.assertEqual(1, get_frozen_row_count(self.sheet)) self.assertEqual(2, get_frozen_column_count(self.sheet))
] path = os.path.dirname(__file__) json = os.path.join(path, 'my-project-1550060360364-f758ca00dc50.json') credentials = ServiceAccountCredentials.from_json_keyfile_name(json, scope) gc = gspread.authorize(credentials) spreadsheetUrl = 'https://docs.google.com/spreadsheets/d/1-rYC8_1fpnJIC7G1pr3fbLolGledGn3Cb_OzwEmaUWU' doc = gc.open_by_url(spreadsheetUrl) # periodPrice 시트에 result_df 업로드 worksheet = doc.worksheet('periodPrice') gd.set_with_dataframe(worksheet, result_df, include_index = False) # periodPortValue 시트에 row 업데이트 worksheet = doc.worksheet('periodPortValue') sheet_id = worksheet._properties['sheetId'] last_row = len(list(filter(None, worksheet.col_values(1)))) + 1 last_col = worksheet.col_count last_date = worksheet.cell(last_row, 1).value last_date_index = result_df.loc[result_df.DATE == last_date].index[0] # 250 거래일을 초과하여 업데이트할 경우 오류가 발생 total_date_no = len(result_df.index) n = total_date_no - (last_date_index + 1) if n > 0: body = {
L2_count_df = L1_L2_overall_df.groupby(['L1','L2'],as_index=False).count() L2_count_df.columns = ['L1','L2','COUNT of L2'] L2_count_df = L2_count_df.sort_values(by=['L1','COUNT of L2'],ascending=[True,False],ignore_index=True) pivot_table_df = pd.concat([L1_count_df,L2_count_df],axis=1) try: sheet.add_worksheet(rows=1000,cols=100,title='Pivot Table') except: pivot_table = client.open(google_spreadsheet).worksheet('Pivot Table') pivot_table = client.open(google_spreadsheet).worksheet('Pivot Table') set_with_dataframe(pivot_table,pivot_table_df)
df.columns #%% imputed_sex_df = pd.DataFrame(imputed_sex_list) imputed_sex_df = imputed_sex_df.set_index('sample_id') imputed_sex_df.columns assert set(imputed_sex_df.index) == set( df.sample_id), set(imputed_sex_df.index) - set(df.sample_id) df['imputed sex'] = imputed_sex_df['imputed sex'] df['weighted_sex_gene_expression'] = imputed_sex_df[ 'weighted_sex_gene_expression'] df.columns #%% # export joined data to SEQR_INFO_AND_OTHER_METADATA_WORKSHEET ws = get_seqr_info_and_other_metadata_worksheet() set_with_dataframe(ws, df.fillna(''), resize=True) print("Updated", ws.title) #%% print("---------------") for v in imputed_sex_doesnt_match_list: print(v)
from pprint import pprint import pandas as pd from gspread_dataframe import set_with_dataframe scope = ['https://www.googleapis.com/auth/spreadsheets', "https://www.googleapis.com/auth/drive.file", 'https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('creds.json', scope) client = gspread.authorize(creds) sheets = client.open("Data TEST") ws = sheets.worksheet("Reto1") data = ws.get_all_records() df=pd.DataFrame(data) #Only in the first execution because create the worksheet, not quite the comment #salida = sheets.add_worksheet(title = "SalidaReto1", rows = 100, cols = 20) salida2= sheets.worksheet('SalidaReto1') pt = df.reset_index().pivot_table(values=['Country'],columns=[df.Country],index=['Author','Sentiment'], aggfunc=lambda x: bool('TRUE'),fill_value='False') pt2 = df.reset_index().pivot_table(values=['Theme'],columns=[df.Theme],index=['Author','Sentiment'], aggfunc=lambda x: bool('TRUE'), fill_value='False') pt_concat = pd.concat([pt,pt2],1,join="inner") set_with_dataframe(salida2, pt_concat, include_index=True) pprint (pt_concat)
def create_google_spreadsheet(program, df, share_with): params_data = {'PARAMETER': ['program_uid', 'metadata_version', 'server_url', 'orgUnit_uid', 'orgUnit_level', 'ignore_validation_errors', 'start_date', 'end_date', 'chunk_size'], 'VALUE': [ program['id'], program['version'], '', '', 4, 'FALSE', '', '', 50], 'NOTE': ['', 'Metadata version for this program', 'E.g. https://who-dev.dhis2.org/dev', 'if empty, uses all org units assigned to the program', 'default = 4, facility', 'true/false', 'dates in the form YYYY-MM-DD', 'default = today', 'maximum number of TEIs to include in the payload when POST to server']} df_params = pd.DataFrame(params_data) number_replicas_data = {'PRIMAL_ID': ['TEI_1', 'TEI_2', 'TEI_3', 'TEI_4', 'TEI_5'], 'NUMBER': ['50', '50', '50', '50', '50']} df_number_replicas = pd.DataFrame(number_replicas_data) sh_name = program['name'] scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] google_spreadshseet_credentials = 'dummy-data-297922-97b90db83bdc.json' try: f = open(google_spreadshseet_credentials) except IOError: print("Please provide file with google spreadsheet credentials") exit(1) else: credentials = ServiceAccountCredentials.from_json_keyfile_name(google_spreadshseet_credentials, scope) try: gc = gspread.authorize(credentials) mode='update' try: sh = gc.open(sh_name) except gspread.SpreadsheetNotFound: mode='create' sh = gc.create(sh_name) pass sh.share('*****@*****.**', perm_type='user', role='writer') #sh.share('*****@*****.**', perm_type='user', role='writer') #sh.share('*****@*****.**', perm_type='user', role='writer') if share_with is not None: for email in share_with: sh.share(email[0], perm_type='user', role='writer') if mode == 'create' or not sh.worksheet('DUMMY_DATA'): wks_dd = sh.sheet1 wks_dd.update_title('DUMMY_DATA') else: wks_dd = sh.worksheet('DUMMY_DATA') if mode == 'create' or not sh.worksheet('PARAMETERS'): wks_params = sh.add_worksheet(title="PARAMETERS", rows=df_params.shape[0], cols=df_params.shape[1]) else: wks_params = sh.worksheet('PARAMETERS') if mode == 'create' or not sh.worksheet('NUMBER_REPLICAS'): wks_number_replicas = sh.add_worksheet(title="NUMBER_REPLICAS", rows=df_number_replicas.shape[0], cols=df_number_replicas.shape[1]) else: wks_number_replicas = sh.worksheet('NUMBER_REPLICAS') tmp_df = df.copy() for tei_col in range(1, 6): tmp_df['TEI_' + str(tei_col)] = '' set_with_dataframe(wks_dd, tmp_df) # wks_dd.add_protected_range('A1:G'+str(df.shape[0]+2)) wks_dd.freeze(cols=7) del tmp_df # wks_params = sh.add_worksheet(title="PARAMETERS", rows=df_params.shape[0], cols=df_params.shape[1]) # wks_dd.add_protected_range('B2:B3') set_with_dataframe(wks_params, df_params) set_column_widths(wks_params, [('A', 200), ('B:', 100), ('C:', 600)]) # wks_number_replicas = sh.add_worksheet(title="NUMBER_REPLICAS", rows=df_number_replicas.shape[0], # cols=df_number_replicas.shape[1]) set_with_dataframe(wks_number_replicas, df_number_replicas) set_column_widths(wks_number_replicas, [('A', 100), ('B:', 100)]) # Add conditional format. Mandatory column in G position = TRUE should have bold text rule = ConditionalFormatRule( ranges=[GridRange.from_a1_range('G1:G2000', wks_dd)], booleanRule=BooleanRule( condition=BooleanCondition('TEXT_EQ', ['TRUE']), format=CellFormat(textFormat=TextFormat(bold=True)) ) ) rules = get_conditional_format_rules(wks_dd) # rules.clear() rules.append(rule) rules.save() batch = batch_updater(sh) # Add header formatting header = chr(65) + str(1) + ':' + chr(65 + df.shape[1] - 1) + str(1) batch.format_cell_range(wks_dd, header, CellFormat( backgroundColor=Color(0.40, 0.65, 1), textFormat=TextFormat(bold=True, foregroundColor=Color(1, 1, 1), fontSize=11), horizontalAlignment='CENTER' )) # Added alternative formatting for i in range(3, df.shape[0], 2): even_row = chr(65) + str(i) + ':' + chr(65 + df.shape[1] - 1) + str(i) batch.format_cell_range(wks_dd, even_row, CellFormat( backgroundColor=Color(0.90, 0.95, 1) )) b = Border("SOLID_THICK", Color(0, 0, 0)) # Add border to the stages stage_indexes = df.index[df['Stage'] != ''].tolist() for i in stage_indexes: stage_row = chr(65) + str(i + 2) + ':' + chr(65 + df.shape[1] - 1) + str(i + 2) batch.format_cell_range(wks_dd, stage_row, CellFormat(borders=Borders(top=b))) # Add formatting to spreadsheet batch.execute() except Exception as e: logger.error(str(e)) return "" else: spreadsheet_url = "https://docs.google.com/spreadsheets/d/%s" % sh.id return spreadsheet_url
database_name = getstat.dbize(folder_name) save_name = getstat.scrub(client_name) con = sqlite3.connect(os.path.join(root, folder_name, database_name)) cur = con.cursor() print(f"Starting {client_name}...") sql = f"SELECT * FROM Ctr_{save_name} LIMIT 20" df = pd.read_sql(sql, con=con) client = gspread.authorize(creds) sheet = client.open_by_key(gspread_id) try: worksheet = sheet.worksheet(f"{client_name} - CTR Analysis") worksheet.clear() except gspread.exceptions.WorksheetNotFound as err: worksheet = sheet.add_worksheet(title=f"{client_name} - CTR Analysis", rows=1, cols=1) set_with_dataframe(worksheet, df) print(f"{client_name} complete!") print(f"\n----------------------\n") print('DURN!') # %%
#and parse stuff for i in df_corrected_final.index: df_corrected_final.at[i, "Work Type Level 2"] = " " str_list = df_corrected_final.at[i, "Work Location"].split("-") str_list2 = str_list[1].split(")") df_corrected_final.at[i, "Work Location"] = str_list2[0] req_finder(df_corrected_final['Requisition#'][i], reqObj_list, driver, wait) for j in df_corrected_final.index: for k in range(len(reqObj_list)): if(str(df_corrected_final.at[j,"Requisition#"]) == str(reqObj_list[k].ID)): df_corrected_final.at[j, "Min Pay Rates"] = reqObj_list[k].minPay df_corrected_final.at[j, "Work Type Level 2"] = reqObj_list[k].maxPay df_corrected_final.at[j, "col4"] = reqObj_list[k].PC #if dataframe is non-empty, paste dataframes for newly added requisitions #if dataframe is empty, print 'Not appending any jobs to spreadsheet.' if(df_corrected_final.empty): print('Not appending any jobs to spreadsheet.') else: print('Appending jobs to spreadsheet...') #print(df_corrected_final) set_with_dataframe(wkst_open, date_and_google, row=next_row, col=1, include_index=False, include_column_header=False, resize=False, allow_formulas=False) set_with_dataframe(wkst_open, df_corrected_final, row=next_row, col=3, include_index=False, include_column_header=False, resize=False, allow_formulas=False) set_with_dataframe(wkst_open, work_type_label, row=next_row, col=19, include_index=False, include_column_header=False, resize=False, allow_formulas=False) sheet_updater(raw_open_data, raw_smarttrack_data) req_formatter(reqObj_list) #delete old requisition list .csv file os.remove("C:/Users/tzeid/Downloads/RequirementsForSupplierList.csv")