def uploadtosheet(): file_name = "xyz.csv" df = pd.read_csv(file_name) # 'Example Spreadsheet' needs to already exist and your user must have access to it spread = Spread('Example Spreadsheet') # This will ask to authenticate if you haven't done so before # Display available worksheets spread.sheets # Save DataFrame to worksheet 'New Test Sheet', create it first if it doesn't exist spread.df_to_sheet(df, index=False, sheet='SDSheet', start='A2', replace=True) print(spread) # <gspread_pandas.client.Spread - User: '******', Spread: 'Example Spreadsheet', Sheet: 'New Test Sheet'> # You can now first instanciate a Client separately and query folders and # instanciate other Spread objects by passing in the Client client = Client() # Assumming you have a dir called 'example dir' with sheets in it available_sheets = client.find_spreadsheet_files_in_folders('example dir') spreads = [] for sheet in available_sheets.get('example dir', []): spreads.append(Spread(sheet['id'], client=client))
def move_dashboards(): client = Client() # Get Dashboard links: dashboard_links_sheet = Spread( spread='1geNkTULCutp7PgcqiuMKaNkH7Ynp1nGu3oX1NqoXHBA', client=client, sheet='Dashboard').sheet_to_df() sheet_ids = dashboard_links_sheet['Link dashboard'].str.extract( '/d/([^/]+)', expand=False).unique().tolist() for sheet_id in sheet_ids: try: client.move_file(sheet_id, '/New Dashboards') except Exception as e: print('Error with sheet id:' + str(sheet_id)) pass
def sync(self): logger.info(f"Synchronizing Workbook: {self.workbook_id}") client = Client(creds=self.credentials) spread = Spread(self.workbook_id, create_sheet=True, client=client) for sheet_name, data in self.survey.worksheets: logger.info(f"Updating Google Sheet: {sheet_name}") spread.df_to_sheet(data, index=False, sheet=sheet_name, replace=True) logger.success(f"Google Sheet updated: {sheet_name}") logger.success(f"Workbook synchronized: {self.workbook_id}")
def betamax_client_bad_scope(request, set_test_config): cassette_name = _get_cassette_name(request) session = AuthorizedSession( conf.get_creds(scope=[ "https://www.googleapis.com/auth/spreadsheets", "https://www.googleapis.com/auth/drive", ])) if pytest.RECORD: session.credentials.refresh(session._auth_request) else: session.credentials.token = pytest.DUMMY_TOKEN recorder = _set_up_recorder(session, request, cassette_name) client = Client(session=session) request.cls.session = session request.cls.recorder = recorder request.cls.client = client return client
# _df = df.loc[:, cols].copy() # _df.columns = map(lambda x: x.replace(' ', '_').lower(), _df.columns) # for col in ['revenue', 'cost', 'profit']: # _df[col] = pd.to_numeric(_df[col].astype( # 'str').str.replace(',', ''), errors='coerce').fillna(0) # return _df def df_to_postgres(df, table_obj): with connect_postgres(conf_file='scripts/conf.json', workspace='dashboard_digital_ocean') as conn: DataFrameCopy(df, conn=conn, table_obj=table_obj).copy() if __name__ == '__main__': client = Client() projects = client.list_spreadsheet_files_in_folder( '1jQRJDeB369tnTVckeD-dueWOxtyReOy2') successes = {} failures = {} s = get_raw_data_sheet_to_df( spreadsheet='1OeKURzKKtZ8qKuhvUkKVK2VmP6dvitGPUiveTtgjVkY', client=client, cols_to_filter=[ 'Charge Code', 'Client', 'Function', 'Date', 'Channel', 'Revenue', 'Cost', 'Profit' ]) for project in projects: try: # print(' '.join(['Getting data for project ', project.get(
def __init__(self, workbook_name): with open(CREDIT_FILE, 'r') as f: credits = json.load(f) self.gspread_client = Client(config=credits) self.workbook = Spread(workbook_name, client=self.gspread_client)
print(f'Shared {sheet_id} to service account.') except Exception as e: print(f'Failed to share {sheet_id} to service account.') print(e) if __name__ == '__main__': print('Starting...') CONNECTION_STRING = os.environ['PMAX_AWS_CONNECTION_STRING'] UPDATE_MAIN = True UPDATE_SUB = False MAX_RETRIES = 2 engine = create_engine(CONNECTION_STRING) conn = engine.connect() client = Client() extract_cols = [ 'Charge Code', 'Client', 'Function', 'Date', 'Channel', 'Revenue', 'Cost', 'Profit' ] creative_cols = ['Charge Code', 'Client', 'date', 'revenue', 'cost', 'profit', 'creative - type', 'creative - source', 'channel'] planning_cols = [ 'Charge Code', 'Client', 'Function', 'Date', 'Channel', 'Revenue', 'Cost', 'Profit', 'Subchannel' ] # # Update all dashboards: # SCOPES = ['https://www.googleapis.com/auth/drive.readonly.metadata'] # store = file.Storage('storage.json')
""" Functions for uploading data to the shared Google Drive analysis folder. """ from pathlib import Path import gspread from gspread_pandas import Spread, Client # connect to Drive account for Gesenius Project gc = gspread.service_account() client = Client() def get_existing_sheet(foldername, filename): """Retrieve existing Drive sheet if it exists. Args: foldername: str of the folder name filename: str of the filename Returns: if sheet exists, dict of sheet data with keys: id, name, path; otherwise returns None """ # NB: avoid double-naming folders in the drive # since it is not possible to provide a full path folders = client.find_folders(foldername) if not folders: return None
def betamax_client(request, betamax_authorizedsession): request.cls.client = Client(session=betamax_authorizedsession) return request.cls.client
def betamax_client(request, betamax_authorizedsession): request.cls.client = Client(session=betamax_authorizedsession, load_dirs=True) return request.cls.client
import pandas as pd from gspread_pandas import Spread, Client from . import configure from .models import Block from .readers import ProgramReader from .services import SessionGenerator client = Client() program_reader = ProgramReader(client) # TODO: clarify naming convention e.g. training/block/sessions session_template_name = 'session_template' program_name = 'powerbuilding1' block_number = 2 program = program_reader.read(program_name) block = Block(block_number, program, root_path='Training/Blocks') # Session template session_template = client.open(session_template_name) session_generator = SessionGenerator(client, block, session_template) session_generator.generate_block()
import matplotlib.pyplot as plt import gspread_pandas from gspread_pandas import Spread, Client import json import inquirer from turnips import archipelago import json """ Script to extract turnip info from a google spreadsheet, transform it and then upload the results to google sheets. """ gSpreadConfig = gspread_pandas.conf.get_config(conf_dir="./", file_name="client_id.json") gSpreadClient = Client('naboHolding', config=gSpreadConfig) naboHoldingListFiles = gSpreadClient.list_spreadsheet_files('naboHolding') naboFileNames = [] for file in naboHoldingListFiles: naboFileNames.append(file['name']) print(naboFileNames) questions = [ inquirer.List('naboFile', message="Which file to work:", choices=naboFileNames, default=naboFileNames[0]) ]