def multifolderclone(source=None, dest=None, path='accounts', width=2, thread_count=None): global account_count global drive global threads stt = time.time() accounts = glob(path + '/*.json') check = build("drive", "v3", credentials=Credentials.from_service_account_file( accounts[0])) try: root_dir = check.files().get(fileId=source, supportsAllDrives=True).execute()['name'] except HttpError: print('Source folder cannot be read or is invalid.') exit(0) try: dest_dir = check.files().get(fileId=dest, supportsAllDrives=True).execute()['name'] except HttpError: print('Destination folder cannot be read or is invalid.') exit(0) print('Copy from ' + root_dir + ' to ' + dest_dir + '.') print('View set to tree (' + str(width) + ').') print("Creating %d Drive Services" % len(accounts)) for account in accounts: account_count += 1 credentials = Credentials.from_service_account_file( account, scopes=["https://www.googleapis.com/auth/drive"]) drive.append(build("drive", "v3", credentials=credentials)) if thread_count is not None and thread_count <= account_count: threads = threading.BoundedSemaphore(thread_count) else: threads = threading.BoundedSemaphore(account_count) print('BoundedSemaphore with %d threads' % account_count) try: rcopy(drive, 1, source, dest, root_dir, "", width) except KeyboardInterrupt: print('Quitting') exit(0) print('Complete.') hours, rem = divmod((time.time() - stt), 3600) minutes, sec = divmod(rem, 60) print("Elapsed Time:\n{:0>2}:{:0>2}:{:05.2f}".format( int(hours), int(minutes), sec))
def clone(self): accounts = glob(self.path + '/*.json') if len(accounts) < 2: raise ValueError('The path provided (%s) has 1 or no accounts.' % self.path) check = build('drive', 'v3', credentials=Credentials.from_service_account_file( accounts[0])) try: root_dir = check.files().get( fileId=self.source, supportsAllDrives=True).execute()['name'] except HttpError: raise ValueError('Source folder %s cannot be read or is invalid.' % self.source) dest_dict = {i: '' for i in self.dest} for key in list(dest_dict.keys()): try: dest_dir = check.files().get( fileId=key, supportsAllDrives=True).execute()['name'] dest_dict[key] = dest_dir except HttpError: if not self.skip_bad_dests: raise ValueError( 'Destination folder %s cannot be read or is invalid.' % key) else: dest_dict.pop(key) print('Creating %d Drive Services' % len(accounts)) drive = [] for account in accounts: credentials = Credentials.from_service_account_file( account, scopes=['https://www.googleapis.com/auth/drive']) drive.append(build('drive', 'v3', credentials=credentials)) if self.thread_count is not None and self.thread_count <= len(drive): self.threads = threading.BoundedSemaphore(self.thread_count) print('BoundedSemaphore with %d threads' % self.thread_count) elif self.thread_count is None: self.threads = threading.BoundedSemaphore(len(drive)) print('BoundedSemaphore with %d threads' % len(drive)) else: raise ValueError('More threads than there is service accounts.') for i, dest_dir in dest_dict.items(): print('Copying from %s to %s.' % (root_dir, dest_dir)) self._rcopy(drive, 1, self.source, i, root_dir, '', self.width)
def CredentialsServiceWrapper(service): if isinstance(service, dict): return CredentialsService.from_service_account_info(service) elif RE_CREDENTIALS_JSON.match(service): return CredentialsService.from_service_account_info(json.loads(service)) else: return CredentialsService.from_service_account_file(service)
def __init__(self, service_file, scopes, manualScopes=[], domainWide=True, *args, **kwargs): # Load valid APIs unlocked with the scopes self._loadApiNames(scopes) # Save all scopes results self.SCOPES = list( set([x['scope'] for x in self.apis.values()] + manualScopes)) # Set domain wide delegation flag self.__domWide = domainWide # Acquire credentials from JSON keyfile if service_file is not None: if isinstance(service_file, six.string_types): self._credentials = Credentials.from_service_account_file( service_file, scopes=self.SCOPES, ) else: self._credentials = Credentials.from_service_account_info( service_file, scopes=self.SCOPES, ) self.projectId = self._credentials.project_id else: self._credentials, self.projectId = google.auth.default() self._credentials = self._credentials.with_scopes(self.SCOPES) logger.debug("Credentials acquired")
def get_vision_data(request, image_data): """ Main caveat of this app. Sends image data for an analysis, and from the response it takes boundaries of detected faces and suggested crop (1:1 ratio). I suspect that client object should have been created on server start and kept as a request method, but with how "extensive" google's documentation is, I have no idea if client has any timeout and/or any other issues that would need to be tackled with this approach. :param request: Pyramid request object :param image_data: File stream :return: """ key_path = request.registry.settings.get('vision2.service_key_path', None) if key_path is not None: credentials = Credentials.from_service_account_file(key_path) client = vision.ImageAnnotatorClient(credentials=credentials) else: client = vision.ImageAnnotatorClient() image_data.seek(0) image = types.Image(content=image_data.read()) face_detection_data = _detect_faces(client, image) cropping_data = _detect_cropping(client, image) return face_detection_data, cropping_data
def get_data_google_sheets(sample_spreadsheet_id, tab_index): # Link to authenticate scopes = [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] # Read the .json file and authenticate with the links credentials = Credentials.from_service_account_file( 'asistenpengkom-1dfb14ae8f40.json', scopes=scopes ) # Request authorization and open the selected spreadsheet gc = gspread.authorize(credentials).open_by_key(sample_spreadsheet_id) #print(gc) # Prompts for all spreadsheet values values = gc.get_worksheet(tab_index).get_all_values() #print(values) # Turns the return into a dataframe df = pd.DataFrame(values) df.columns = df.iloc[0] df.drop(df.index[0], inplace=True) return df
def create_intent(project_id, display_name, answer, questions): credentials = Credentials.from_service_account_file( "google-credentials.json") intents_client = dialogflow.IntentsClient(credentials=credentials) parent = intents_client.project_agent_path(project_id) training_phrases = [] for question in questions: part = dialogflow.types.Intent.TrainingPhrase.Part(text=question) training_phrase = dialogflow.types.Intent.TrainingPhrase(parts=[part]) training_phrases.append(training_phrase) text = dialogflow.types.Intent.Message.Text(text=answer) message = dialogflow.types.Intent.Message(text=text) intent = dialogflow.types.Intent( display_name=display_name, training_phrases=training_phrases, messages=[message]) response = intents_client.create_intent(parent, intent)
def __init__(self, **kwargs): self.theme_cls.theme_style = "Light" self.theme_cls.primary_palette = "DeepPurple" self.shopping_list = [] self.button_list = [] self.card_dict = {} self.num = 0 self.new_password = '' self.view_list = '' self.shopping_list_widgets = [] self.submitted_lists = {} self.finished_lists = {} self.user_data_widgets = {} self.view_list_widgets = {} self.last_screen = '' self.delete_button = False self.price_button = False self.popup = False self.current_list = '' self.user_data_username = '' self.user_data_password = '' self.nav_drawer = False scopes = ["https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] creds = Credentials.from_service_account_file('creds.json', scopes=scopes) client = gspread.authorize(creds) self.spreadsheet = client.open('wtf_mercado') super().__init__(**kwargs)
def __init__(self, bot): self.bot = bot credentials = Credentials.from_service_account_file( f"{HOME_DIR}/authentication/{cloud_creds}") self.translator = translate.Client(credentials=credentials) self.lang_cache = dict() self.session = aiohttp.ClientSession()
def __init__(self, dataframe, project, dataset, table, if_exists, api_key_path): """Object Configuration""" Thread.__init__(self) """Client Configuration""" credentials = Credentials.from_service_account_file(api_key_path) _client = bigquery.Client(credentials=credentials, project=project) dataset_ref = _client.dataset(dataset) table_ref = dataset_ref.table(table) """Job Configuraton""" _job_config = bigquery.LoadJobConfig() _job_config.autodetect = True if if_exists == 'append': _job_config.write_disposition = 'WRITE_APPEND' elif if_exists == 'overwrite': _job_config.write_disposition = 'WRITE_TRUNCATE' else: _job_config.write_disposition = 'WRITE_EMPTY' self._query_job = _client.load_table_from_dataframe( dataframe=dataframe, destination=table_ref, project=project, job_config=_job_config)
def __init__(self): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. # TODO: 認証情報の保存場所の調整が必要 # ライブラリ利用時、CLI利用時などを考えるとコンストラクタ引数でもらうのが良いか? if os.path.exists("token.pickle"): with open("token.pickle", "rb") as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: creds = Credentials.from_service_account_file( "credentials.json") # Save the credentials for the next run with open("token.pickle", "wb") as token: pickle.dump(creds, token) service = build("sheets", "v4", credentials=creds) # Call the Sheets API self.spreadsheets = service.spreadsheets()
def get_data(credentials): scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] credentials = Credentials.from_service_account_file(credentials, scopes=scope) gc = gspread.authorize(credentials) # This uses the filename on Google Drive, # and by default we use the 'sheet1', if any of those # things changes names, we need to update this wks = gc.open("Turnip price tracker").sheet1 l = wks.get_all_values() column_names = ["day"] + [re.sub("\ week\ \d+", "", i) for i in l[0][1:]] df = pd.DataFrame.from_records(l, columns=column_names)[:-2] # Removing the lines without a day df = df[df.day != ""] # Selecting only the rows where at least one person added a price df = df[(df[list(df.columns)[1:]] != "").any(axis=1)] return df
def select_game_streaming_info(game_name, starts_from_timestamp, ends_to_timestamp=None): cred = "conf/pym_google_cloud_certificate.json" starts_from = arrow.get(starts_from_timestamp).format( 'YYYY-MM-DD HH:mm:ss') ends_to_timestamp = ends_to_timestamp or arrow.utcnow().timestamp ends_to = arrow.get(ends_to_timestamp).format('YYYY-MM-DD HH:mm:ss') client = bigquery.Client( project="soocii-data", credentials=Credentials.from_service_account_file(cred)) query = "SELECT * " \ "FROM `soocii-data.jarvis_prod_backend_media.soocii_streaming_stats` " \ "WHERE _PARTITIONTIME >= '{}' AND " \ "_PARTITIONTIME < '{}' AND " \ "game = '{}'" .format(starts_from, ends_to, game_name) timeout = 30 query_job = client.query(query) return [{ 'owner_soocii_id': i.soocii_id, 'owner_account_id': i.owner, 'streaming_start_at': arrow.get(i.start_at).timestamp, 'streaming_end_at': arrow.get(i.end_at).timestamp, 'streaming_url': i.streaming_url, 'streaming_name': i.name } for i in query_job.result(timeout=timeout)]
def get_songs(credentials): scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] credentials = Credentials.from_service_account_file(credentials, scopes=scope) gc = gspread.authorize(credentials) wks = gc.open("Turnip price tracker").worksheet("Songs") l = wks.get_all_values() column_names = ["song"] + [ re.sub("\ week\ \d+", "", i).lower().replace("á", "a") for i in l[0][1:] ] df = pd.DataFrame.from_records(l, columns=column_names).iloc[1:96, 0:11] d_missing = {i: None for i in df.columns if i != "song"} d_repeated = {i: None for i in df.columns if i != "song"} print([i for i in df.columns if i != "song"]) for key, value in d_missing.items(): d_missing[key] = list(df["song"][df[key] == ""]) d_repeated[key] = list(df["song"][df[key] != ""]) return d_missing, d_repeated, [i for i in df.columns if i != "song"]
def get_presigned_url(self, duration=None, private_key_file=None, use_cached=False) -> str: """ Args: duration: Duration in seconds. This is ignored if use_cached is on. use_cached: Use a cached URL. """ cache = GCSURI._CACHED_PRESIGNED_URLS if use_cached: if cache is not None and self._uri in cache: return cache[self._uri] # if not self.exists: # raise Exception('File does not exist. f={f}'.format(self._uri)) if private_key_file is None: private_key_file = os.path.expanduser(GCSURI.PRIVATE_KEY_FILE) else: private_key_file = os.path.expanduser(private_key_file) if not os.path.exists(private_key_file): raise Exception("GCS private key file not found. f:{f}".format( f=private_key_file)) credentials = Credentials.from_service_account_file(private_key_file) duration = duration if duration is not None else GCSURI.DURATION_PRESIGNED_URL blob, _ = self.get_blob() if blob is None: raise ValueError("Blob does not exist for {f}".format(f=self._uri)) url = blob.generate_signed_url(expiration=timedelta(seconds=duration), credentials=credentials) cache[self._uri] = url return url
def get_fossils(credentials): scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] credentials = Credentials.from_service_account_file(credentials, scopes=scope) gc = gspread.authorize(credentials) wks = gc.open("Turnip price tracker").worksheet("Fossils") l = wks.get_all_values() column_names = ["name1", "name2"] + [re.sub("\ week\ \d+", "", i).lower().replace("á", "a") for i in l[0][2:]] df = pd.DataFrame.from_records(l, columns=column_names)[1:-2] empty_columns = [i for i, name in enumerate(df.columns) if name == ""] df = df.drop(df.columns[empty_columns], axis=1) df["name"] = df["name1"] + " " + df["name2"] df["name"] = df["name"].str.strip() df = df.drop(["name1", "name2"], axis=1) d_missing = {i: None for i in df.columns if i != "name"} d_repeated = {i: None for i in df.columns if i != "name"} for key, value in d_missing.items(): d_missing[key] = list(~df["name"][df[key].str.lower().str.contains("x")]) d_repeated[key] = list(df["name"][df[key].str.lower().str.contains("e")]) d_missing[key] = [i for i in d_missing[key] if i not in d_repeated[key]] return d_missing, d_repeated, [i for i in df.columns if i != "name"]
def get_spreadsheet(sheet_name): if not SHEETS.get(sheet_name): # oAuth authentication. Json file created using explanation at: http://gspread.readthedocs.org/en/latest/oauth2.html # Updated call since v2.0: See https://github.com/google/oauth2client/releases/tag/v2.0.0 # Sheet should be shared with: 859748496829-pm6qtlliimaqt35o8nqcti0h77doigla@developer.gserviceaccount.com scopes = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive' ] # Latest version from: https://stackoverflow.com/questions/51618127/credentials-object-has-no-attribute-access-token-when-using-google-auth-wi credentials = Credentials.from_service_account_file( 'sources/oauth_key.json') scoped_credentials = credentials.with_scopes(scopes) gc = gspread.Client(auth=scoped_credentials) gc.session = AuthorizedSession(scoped_credentials) try: sheet = gc.open(sheet_name) except gspread.exceptions.SpreadsheetNotFound: log.log_error('googlesheet.py', 'get_spreasheeet()', 'Could not find ' + sheet_name) return None except gspread.exceptions.APIError: log.log_error('googlesheet.py', 'get_spreasheeet()', 'Could not open ' + sheet_name) return None SHEETS[sheet_name] = sheet return SHEETS[sheet_name]
def __init__(self, creds_file, config=None): """Create AppointmentManager object Args: creds_file (str): path to service account's creds json. This file should be generated via Google API credentials page: https://console.developers.google.com/apis/credentials config (dict, optional): config dict, if provided is merged into object's attributes, allows to change any existing attribute. """ # Below are defaults for all attributes. Change by passing config dict. self.tag_field = 'description' # do not modify unnecessarily self.slots_tag = '#slots' # do not modify unnecessarily self.appointments_tag = '#appointments' # do not modify unnecessarily self.schedule_name = 'my services' self.timezone = 'America/Los_Angeles' self.location = '1 Nowhere street, 00000' self.admin = '*****@*****.**' self.fixed_attendee = '*****@*****.**' self.event_description = ('Looking forward to meeting you!' ' Need to change or cancel? Call/text me at' ' +12345678901') self.configure(config) SCOPES = ['https://www.googleapis.com/auth/calendar'] creds = gcreds.from_service_account_file(creds_file, scopes=SCOPES) self._api = gbuild('calendar', 'v3', credentials=creds) # the following section finds or creates slot and appointment calendars cals = self._api.calendarList().list().execute() slots_cal = None appointments_cal = None for cal in cals['items']: if cal.get(self.tag_field, '').startswith(self.slots_tag): slots_cal = cal['id'] elif cal.get(self.tag_field, '').startswith(self.appointments_tag): appointments_cal = cal['id'] if slots_cal: self.slots_cal = slots_cal else: self.slots_cal = self.create_cal(self.slots_tag, 'slots') if appointments_cal: self.appointments_cal = appointments_cal else: self.appointments_cal = self.create_cal(self.appointments_tag, 'appointments') # the following section sets bot's timezone # not sure yet if it helps with notifications, testing # problem is that notification to non-google emails have UTC time # in email primary_cal = self._api.calendars().get(calendarId='primary').execute() if primary_cal['timeZone'] != self.timezone: primary_cal['timeZone'] = self.timezone self._api.calendars().update(calendarId=primary_cal['id'], body=primary_cal).execute() self.refresh()
def _check(api, callback, *args, **kwargs): if not api.key: print("No API Key") return False if not api.gc: scopes = [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] credentials = Credentials.from_service_account_file( 'credentials.json', scopes=scopes) api.gc = gspread.authorize(credentials) if not api.sh: api.set_sheet() try: result = func(api, *args, **kwargs) except (ConnectionError, APIError) as e: print(str(e)) result = False if callback: callback(result) return result
def _get_service(self): """ Get the service object for executing spreadsheets methods """ # Make sure that you have shared the google sheet with the client_email in token.json creds = Credentials.from_service_account_file(str(self._token_file), scopes=self.SCOPES) service = build('sheets', 'v4', credentials=creds) return service
def recognize_audio_from_file( file: Union[str, os.PathLike], credential: Union[str, os.PathLike, None] = None, language_code: str = 'en-US', encoding: enums.RecognitionConfig.AudioEncoding = enums.RecognitionConfig. AudioEncoding.FLAC, sampling_rate_hertz: int = 44100, ) -> types.RecognizeResponse: """ Args: file (str, os.PathLike) : credential (str) : language_code (str) : encoding (str) : sampling_rate_hertz (int) : Returns: types.RecognizeResponse """ if credential is None: client = SpeechClient() else: credentials = Credentials.from_service_account_file( filename=credential) client = SpeechClient(credentials=credentials) config = types.RecognitionConfig(encoding=encoding, language_code=language_code, sampling_rate_hertz=sampling_rate_hertz) with io.open(file, 'rb') as audio: content = audio.read() audio = types.RecognitionAudio(content=content) return client.recognize(config, audio)
def __init__(self, credentials_file): self.SCOPES = ['https://www.googleapis.com/auth/calendar'] self.credentials = credentials = Credentials.from_service_account_file( credentials_file, scopes=self.SCOPES ) self.service = discovery.build('calendar', 'v3', credentials=credentials)
def _check_old_config(self): """ Checks the config file to see if Google Sheets is defined using the old method. If so this method will initialize GS using the old mehtod. The config should already be defined and this method will exit if ``self.config`` doesn't have a config set. .. deprecated:: 0.9.3 This method only exists to help phase out the old Google Sheets config. This method will be removed in 0.9.5 when the old config style is removed. """ if self.config is None: return old_method_flag = False if self.config.has_value('sheets-spreedsheet-id'): old_method_flag = True self.spreedsheetID = self.config.config['sheets-spreedsheet-id'] if self.config.has_value('sheets-creds-path'): old_method_flag = True self._creds = Credentials.from_service_account_file( self.config.config['sheets-creds-path'], scopes=self.SCOPE) if self.config.has_value('sheets-worksheet-id'): old_method_flag = True self.sheet_id = self.config.config['sheets-worksheet-id'] if self.config.has_value('sheets-worksheet-title'): old_method_flag = True self.sheet_title = self.config.config['sheets-worksheet-title'] if old_method_flag: _log.warning('Google Sheet config should be done inside a key named "google" ' \ 'or "google-sheets". \nSee ' \ 'https://dapt.readthedocs.io/en/latest/reference/db/google_sheets.html#config ' \ 'for more information.')
def main(): # Google BigQuery Authentication. At first, we'll try to fetch # authentication data from a 'credentials.json' file at our # working directory. If this file doesn't exist, we authenticate # with web login. try: cred_file = os.environ["GOOGLE_APPLICATION_CREDENTIALS"] credentials = Credentials.from_service_account_file(cred_file) except (KeyError, FileNotFoundError): print( "Invalid credentials. Set GOOGLE_APPLICATION_CREDENTIALS to your credentials file." ) exit(1) # Project, dataset and table configuration pandas_gbq.context.credentials = credentials project_id = os.environ["PROJECT_ID"] url_table_id = os.environ["TABLE_ID"] driver_path = '/usr/lib/chromium-browser/chromedriver' miner = mining.MiningEngine(ScieloSearchLocations, driver_path=driver_path) URLBuilder.connect_to_gbq(credentials, project_id, url_table_id, url_schema) scielo_builder = URLBuilder(miner) # Collect these number of URLs and store them in pd.DataFrame search_domain = 'https://search.scielo.org/' search_terms = sys.argv[1:-1] limit = int(sys.argv[-1]) scielo_builder.collect(search_domain, search_terms, limit)
def _login(service_name: str, version: str, scopes: List[str], credentials_path: Path): """Create a specific google service driver and loginto it with credentials """ credentials = ( Credentials.from_service_account_file(credentials_path, scopes=scopes) ) return build(service_name, version, credentials=credentials)
def service_account_auth(client_secrets, oauth2_scopes, delegated_email_address): """ Creates a Credentials instance from a service account json file. Args: client_secrets (str): The path to the credentials json file or credentials information in json format. oauth2_scopes (list of str): Scopes to request during the authorization grant. delegated_email_address (str): For domain-wide delegation, the email address of the user to for which to request delegated access. Returns: google.auth.service_account.Credentials: Service account credentials """ try: data = json.loads(client_secrets) # https://google-auth.readthedocs.io/en/latest/reference/google.oauth2.service_account.html return Credentials.from_service_account_info( data, scopes=oauth2_scopes, subject=delegated_email_address) except JSONDecodeError: data = client_secrets # https://google-auth.readthedocs.io/en/latest/reference/google.oauth2.service_account.html return Credentials.from_service_account_file( client_secrets, scopes=oauth2_scopes, subject=delegated_email_address)
def main(): scopes = [ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] credentials = Credentials.from_service_account_file('service_account.json', scopes=scopes) gc = gspread.authorize(credentials) SP_SHEET_KEY = '1yQwXdgXbnsceKHokSuRbxBDT8v_DR3Kjo-_diZGqnnA' sh = gc.open_by_key(SP_SHEET_KEY) SP_SHEET = 'db' worksheet = sh.worksheet(SP_SHEET) data = worksheet.get_all_values() df = pd.DataFrame(data[1:], columns=data[0]) data_udemy = get_data_udemy() today = datetime.date.today().strftime('%Y/%m/%d') data_udemy['date'] = today df = df.append(data_udemy, ignore_index=True) set_with_dataframe(worksheet, df, row=1, col=1)
def load_sheet(): load_dotenv() scope = ['https://www.googleapis.com/auth/spreadsheets'] # 移出來讀一次就好,太耗效能 (成績有更動請重新啟動linebot) creds = Credentials.from_service_account_file("linear-outcome-339410-10f813b7e005.json", scopes=scope) sheet = gspread.authorize(creds).open_by_url(os.getenv('GOOGLE_SHEET_URL')) worksheet = sheet.get_worksheet(0) data = pd.DataFrame(worksheet.get_all_values()) data_filter = data.iloc[4:,:] worksheet1 = sheet.get_worksheet(1) data1 = pd.DataFrame(worksheet1.get_all_values()) data_filter1 = data1.iloc[4:,:] worksheet2 = sheet.get_worksheet(2) data2 = pd.DataFrame(worksheet2.get_all_values()) data_filter2 = data2.iloc[4:,:] worksheet3 = sheet.get_worksheet(3) data3 = pd.DataFrame(worksheet3.get_all_values()) data_filter3 = data3.iloc[4:,:] worksheet4 = sheet.get_worksheet(4) data4 = pd.DataFrame(worksheet4.get_all_values()) data_filter4 = data4.iloc[4:,:] filtered_data = pd.concat([data_filter, data_filter1, data_filter2, data_filter3, data_filter4]).fillna(0) filtered_data = filtered_data.replace('',0) # 成績單上不是數值型態 filtered_data = filtered_data.replace('#DIV/0!',0) return filtered_data
def service_account(filename=DEFAULT_SERVICE_ACCOUNT_FILENAME, scopes=DEFAULT_SCOPES): """Authenticate using a service account. ``scopes`` parameter defaults to read/write scope available in ``gspread.auth.DEFAULT_SCOPES``. It's read/write for Sheets and Drive API:: DEFAULT_SCOPES =[ 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] You can also use ``gspread.auth.READONLY_SCOPES`` for read only access. Obviously any method of ``gspread`` that updates a spreadsheet **will not work** in this case. :param str filename: The path to the service account json file. :param list scopes: The scopes used to obtain authorization. :rtype: :class:`gspread.Client` """ creds = ServiceAccountCredentials.from_service_account_file(filename, scopes=scopes) return Client(auth=creds)
def __init__(self, service_name: str, version: str, subject: str = None, num_retries: int = None, scopes=None, service_account_path=None): """ GoogleService constructor :param service_name: google service name (drive, gmail, calendar, ...) :param version: version of the api (v4 for drive, v1 for gmail...) :param subject: email of the delegated account, the account to impersonate. Can be none if not needed (no need of a delegated account to read a spreadsheet for example, just to share the spreadsheet to the service account email) :param num_retries: custom attribute, we use it for exponential backoff purposes :param scopes: array of strings, scopes needed to do your actions :param service_account_path: local path to your service account json key """ self.service_name = service_name self.version = version self.credentials = Credentials.from_service_account_file( service_account_path, scopes=scopes, subject=subject, ) self.service = self.build(self.service_name, self.version) self.num_retries = num_retries