def GetGoogleClient(filename): logging.debug("Creating Google client") credentials = Storage(filename).get() http = credentials.authorize(httplib2.Http()) client = build('fitness', 'v1', http=http) logging.debug("Google client created") return client
def get(self): """Get the calendar service from the storage file token.""" credentials = Storage(self.token_file).get() http = credentials.authorize(httplib2.Http()) service = google_discovery.build( "calendar", "v3", http=http, cache_discovery=False ) return service
def GetGoogleClient(self): """Returns an authenticated google fit client object""" logging.debug("Creating Google client") credentials = Storage(self.googleCredsFile).get() http = credentials.authorize(httplib2.Http()) client = build('fitness', 'v1', http=http) logging.debug("Google client created") return client
def get_google_client(self, cred_file='./google.json', ): """Returns an authenticated google fit client object""" logging.debug("Creating Google client") credentials = Storage(cred_file).get() http = credentials.authorize(httplib2.Http()) client = build('fitness', 'v1', http=http) logging.debug("Google client created") return client
def g_yetki(): # Kişisel bilgilei alır kimlik_bilgileri = Storage(G_DRIVE_TOKEN_DOSYASI).get() # httplib2.Http objesi oluşturur ve kişisel bilgilerinizle yetkilendirir. http = httplib2.Http() kimlik_bilgileri.refresh(http) referans = kimlik_bilgileri.authorize(http) return build("drive", "v3", http=referans, cache_discovery=False)
def get(self): """Get the calendar service from the storage file token.""" import httplib2 from oauth2client.file import Storage from googleapiclient import discovery as google_discovery credentials = Storage(self.token_file).get() http = credentials.authorize(httplib2.Http()) service = google_discovery.build('calendar', 'v3', http=http) return service
def GetGoogleClient(self, filename): global log log.info("Creating Google client") log.info("Reading auth file %s", filename) credentials = Storage(filename).get() http = credentials.authorize(httplib2.Http()) client = build('fitness', 'v1', http=http) log.info("Google client created") return client
def connectCalendar(jsonPath): print 'Connecting to Google Calendar...' # establish koneksi dengan google calendar dengan token pada jsonPath global calendarService try: credential = Storage(jsonPath).get() http = credential.authorize(httplib2.Http()) calendarService = discovery.build('calendar', 'v3', http=http) except Exception as e: raise Exception('Failed to connect to Google Calendar: ' + str(e))
def send_email(to, subject, message_text, *attachments): sender = 'me' message = create_message(sender, to, subject, message_text, *attachments) credentials = Storage(os.path.join('.', 'gmail_credential.json')).get() http = credentials.authorize(httplib2.Http()) service = discovery.build('gmail', 'v1', http=http, cache_discovery=False) try: service.users().messages().send(userId=sender, body=message).execute() except errors.HttpError as error: print('An error occurred: %s' % error)
def google_spreadsheet_service(): """Return spreadsheet service object.""" credentials = Storage(os.path.join(os.getcwd(), 'client_secret.json')).get() http = credentials.authorize(httplib2.Http()) discovery_url = ( 'https://sheets.googleapis.com/$discovery/rest?version=v4') service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discovery_url) return service
def get_service(storage_file): credentials = Storage(storage_file).get() if not credentials: raise Exception('credentials are invalid. Please authorize first.') http = Http() if credentials.access_token_expired: credentials.refresh(http) return discovery.build(serviceName='gmail', version='v1', http=credentials.authorize(http))
def google_login_flow(code): credentials = flow.step2_exchange(code) CREDENTIALS_FILE = "./credentials" Storage(CREDENTIALS_FILE).put(credentials) credentials = Storage(CREDENTIALS_FILE).get() http_auth = credentials.authorize(Http()) service = build('plus', 'v1', http=http_auth) result = service.people().get(userId='me').execute() return result['id']
def yetkilendir( ): # fiziksel olarak dosya bağımlılığı yoktur ve drive_erisim.json oluşturur if not os.path.exists(G_DRIVE_TOKEN_DOSYASI): print('''\ndrive_erisim.json Bulunamadı.. Lütfen yetkilendirmenizi tamamlayınız..\n''') token_yarat() # Kişisel bilgilei alır kimlik_bilgileri = Storage(G_DRIVE_TOKEN_DOSYASI).get() # httplib2.Http objesi oluşturur ve kişisel bilgilerinizle yetkilendirir. http = httplib2.Http() kimlik_bilgileri.refresh(http) referans = kimlik_bilgileri.authorize(http) return build("drive", "v3", http=referans, cache_discovery=False)
def __init__(self): self.usable = True try: self.config = getSecrets('foxydoxxing_client')['gmail'] except Exception as e: if DEBUG: print e self.usable = False return try: with open(self.config['last_update'], 'rb') as L: self.last_update = float(L.read().strip()) except Exception as e: print e self.last_update = 0 if DEBUG: print "Last Intake: %d" % self.last_update # start up data service from oauth2client.file import Storage try: credentials = Storage(self.config['auth_storage']).get() except Exception as e: if DEBUG: print "NO CREDENTIALS YET." print e, type(e) self.usable = False return import httplib2 from apiclient.discovery import build http = httplib2.Http() http = credentials.authorize(http) try: self.service = build('gmail', 'v1', http=http) except Exception as e: if DEBUG: print "COULD NOT CREATE SERVICE:" print e, type(e) self.usable = False
def g_yetki(): if not os.path.exists(AYARLAR): print(erisim_ver()) sys.exit() if not os.path.exists(G_DRIVE_TOKEN_DOSYASI): print("\t[!] Önce Yetkilendirme Yapmalısınız...\n\n") print(kod_al()) print(token_olustur(input('\n\nLütfen Kodu Giriniz.. : '))) # Kişisel bilgilei alır kimlik_bilgileri = Storage(G_DRIVE_TOKEN_DOSYASI).get() # httplib2.Http objesi oluşturur ve kişisel bilgilerinizle yetkilendirir. http = httplib2.Http() kimlik_bilgileri.refresh(http) referans = kimlik_bilgileri.authorize(http) return build("drive", "v3", http=referans, cache_discovery=False)
def write_spreadsheet(sheetid, title, row_major_data): spreadsheet_id = sheetid newsheet_title = title values = row_major_data home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') credential_filename = 'sheets.googleapis.com-python-fromLogtoSheet.json' credential_path = os.path.join(credential_dir, credential_filename) credentials = Storage(credential_path).get() http = credentials.authorize(httplib2.Http()) discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?' 'version=v4') service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl) # newsheet_title = datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S") create_sheet_body = { "requests": [{ "addSheet": { "properties": { "title": newsheet_title } } }] } request = service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=create_sheet_body) response = request.execute() newsheetId = response["replies"][0]["addSheet"]["properties"]["sheetId"] newsheetrange = newsheet_title + "!A:ZZ" data = [{ 'range': newsheetrange, 'values': values } # Additional ranges to update ... ] body = {'valueInputOption': "RAW", 'data': data} result = service.spreadsheets().values().batchUpdate( spreadsheetId=spreadsheet_id, body=body).execute()
def __init__(self, credentials_storage_path, dev_key, calendar_name=None, **_): """Connect to and authenticate with Google API""" self._calendar_id = "" if dev_key == "YOUR_DEV_KEY_HERE": raise ValueError("Missing developer key in settings") credentials = Storage(credentials_storage_path).get() if credentials is None or credentials.invalid: raise AuthorizationMissing("Missing or invalid credentials") try: self.service = apiclient.discovery.build( serviceName="calendar", version="v3", http=credentials.authorize(httplib2.Http()), developerKey=dev_key ) except httplib2.HttpLib2Error: raise EventImporterError("Can't connect to Google API") if calendar_name: self.select_calendar(calendar_name)
def __init__(self): if DEBUG: print "ANNEX CLIENT online" # get the conf settings try: self.hostname = getSecrets('server_host') except KeyError as e: pass try: self.port = getSecrets('server_port') except KeyError as e: pass try: self.user = getSecrets('server_user') except KeyError as e: pass try: self.remote_path = getSecrets('annex_remote') except KeyError as e: pass if getSecrets('auth_storage') is None: return from apiclient import errors from apiclient.discovery import build credentials = None try: from oauth2client.file import Storage credentials = Storage(getSecrets('auth_storage')).get() except KeyError as e: if DEBUG: print "NO AUTH YET!" if credentials is not None: import httplib2 http = httplib2.Http() http = credentials.authorize(http) self.service = build('drive', 'v2', http=http)
def OAuth2Login(client_secrets, email, pswd, app_path): global auth_code scope = 'https://picasaweb.google.com/data/' user_agent = 'PhotoAlbum' cred_file = 'credentials.txt' #we already have the credentials stored in the file system if cred_file in os.listdir(app_path): credentials = Storage(app_path + cred_file).get() else: #we have to get the credentials from google flow = flow_from_clientsecrets(client_secrets, scope=scope, redirect_uri='http://localhost:9999') #start a thread to listen for the auth code listen_thread = threading.Thread(target = Listen) listen_thread.start() uri = flow.step1_get_authorize_url() webbrowser.open(uri) #wait for the thread to finish so we have the auth code listen_thread.join() code = auth_code.split('code=')[1].split()[0] #print 'The code is: {0}'.format(code) credentials = flow.step2_exchange(code) Storage(app_path + cred_file).put(credentials) if (credentials.token_expiry - datetime.datetime.utcnow()) < datetime.timedelta(minutes=5): http = httplib2.Http() http = credentials.authorize(http) credentials.refresh(http) gd_client = gdata.photos.service.PhotosService(source=user_agent, email=email, additional_headers={'Authorization' : 'Bearer %s' % credentials.access_token}) return gd_client
from httplib2 import Http from apiclient.discovery import build from oauth2client.file import Storage CREDENTIALS_FILE = "./secret/credentials" # Get a credential. if os.path.exists(CREDENTIALS_FILE) == False: import google_oauth credentials = google_oauth.get_credentials() else: credentials = Storage(CREDENTIALS_FILE).get() # Get API Service. http_auth = credentials.authorize(Http()) service = build('bigquery', 'v2', http=http_auth) # Execute a google bigquery. query = "SELECT 1 AS dummy FROM [patriot-999:adcross.view_banner_entry_all] LIMIT 1" # query = "SELECT 1 AS dummy FROM [bigquery-public-data:hacker_news] LIMIT 1" # body = { # "kind": "bigquery#queryRequest", # The resource type of the request. # # "dryRun": True, # [Optional] If set to true, BigQuery doesn't run the job. Instead, if the query is valid, BigQuery returns statistics about the job such as how many bytes would be processed. If the query is invalid, an error returns. The default value is false. # # "useQueryCache": true, # [Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true. # # "defaultDataset": { # [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. # # "projectId": project_id, # [Optional] The ID of the project containing this dataset. # # "datasetId": "patriot-999", # [Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. # # }, # # "useLegacySql": True or False, # [Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's updated SQL dialect with improved standards compliance. When using BigQuery's updated SQL, the values of allowLargeResults and flattenResults are ignored. Queries with useLegacySql set to false will be run as if allowLargeResults is true and flattenResults is false. # # "maxResults": 42, # [Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.
def do(URL): req = requests.get(URL) root = lxml.html.fromstring(req.text) output = [] output_rows = [[], [], ["Step", "Deg", "#new", "time"]] precomp = True for line in root.text.split("\n"): if precomp: pickup("([A-z]\s=\s\d+)", line, output_rows[0]) pickup("(T2\s=\s.+)", line, output_rows[0]) pickup("(IX\s=\s.+)", line, output_rows[0]) pickup("Loading(.+)", line, output_rows[0]) if (re.match("Weil.+", line) != None): precomp = False else: pickup("STEP\s(\d+)", line, output) pickup("Basis length.+step degree:\s(\d+),.+", line, output) pickup("Num new poly.+:\s(\d+).+", line, output) if (re.match("No new.+", line) != None): output.append("no new") pickup("Step \d+ time:\s(\d+\.\d+)", line, output) if (len(output) == 4): output_rows.append(output) output = [] if (re.match("No pairs.+", line) != None): output.append("No pairs") output.append("-") output_rows.append(output) output = [] char = re.match("Gap.+:\s(\d+)\s.+", line) if (char != None): output_rows[1].append("Last fall Degree = " + char.groups()[0]) if (re.match("Point A 2", line) != None): break # print int(output_rows[-1][0])+2 home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') credential_filename = 'sheets.googleapis.com-python-fromLogtoSheet.json' credential_path = os.path.join(credential_dir, credential_filename) credentials = Storage(credential_path).get() http = credentials.authorize(httplib2.Http()) discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?' 'version=v4') service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl) spreadsheet_id = "1opFSZCBUryQBTF2JrmRGKubJdd_j2aeAAPYQzWMb-Ys" values = output_rows newsheet_title = datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S") create_sheet_body = { "requests": [{ "addSheet": { "properties": { "title": newsheet_title } } }] } request = service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=create_sheet_body) response = request.execute() newsheetId = response["replies"][0]["addSheet"]["properties"]["sheetId"] newsheetrange = newsheet_title + "!A:F" data = [{ 'range': newsheetrange, 'values': values } # Additional ranges to update ... ] body = {'valueInputOption': "RAW", 'data': data} result = service.spreadsheets().values().batchUpdate( spreadsheetId=spreadsheet_id, body=body).execute() l2 = [] for i in output_rows[3:]: if str_isfloat(i[2]): l2.append(float(i[2])) else: l2.append(0) l3 = [] for i in output_rows[3:]: if str_isfloat(i[3]): l3.append(float(i[3])) else: l3.append(0) for i, l in enumerate(output_rows[3:]): l[2] = round(l2[i] / sum(l2), 3) * 10 l[3] = round(l3[i] / sum(l3), 3) * 10 newsheetrange = newsheet_title + "!G:L" data = [{'range': newsheetrange, 'values': output_rows}] body = {'valueInputOption': "RAW", 'data': data} result = service.spreadsheets().values().batchUpdate( spreadsheetId=spreadsheet_id, body=body).execute() chart_title = "Transition" sourceSheetId = newsheetId body = { "requests": [{ "addChart": { "chart": { "spec": { "title": chart_title, "basicChart": { "chartType": "COLUMN", "legendPosition": "BOTTOM_LEGEND", "axis": [{ "position": "BOTTOM_AXIS", "title": "Step" }], "domains": [{ "domain": { "sourceRange": { "sources": [{ "sheetId": sourceSheetId, "startRowIndex": 2, "endRowIndex": len(output_rows), "startColumnIndex": 6, "endColumnIndex": 7 }] } } }], "series": [{ "series": { "sourceRange": { "sources": [{ "sheetId": sourceSheetId, "startRowIndex": 2, "endRowIndex": len(output_rows), "startColumnIndex": 7, "endColumnIndex": 8 }] } }, "targetAxis": "LEFT_AXIS" }, { "series": { "sourceRange": { "sources": [{ "sheetId": sourceSheetId, "startRowIndex": 2, "endRowIndex": len(output_rows), "startColumnIndex": 8, "endColumnIndex": 9 }] } }, "targetAxis": "LEFT_AXIS" }, { "series": { "sourceRange": { "sources": [{ "sheetId": sourceSheetId, "startRowIndex": 2, "endRowIndex": len(output_rows), "startColumnIndex": 9, "endColumnIndex": 10 }] } }, "targetAxis": "LEFT_AXIS" }], "headerCount": 1 } }, "position": { "newSheet": True } } } }] } response = service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=body).execute() return '<p><a href="https://docs.google.com/spreadsheets/d/' + spreadsheet_id + '">Click here</a>'
from oauth2client.file import Storage CREDENTIALS_FILE = "./secret/credentials" # Get a credential. if os.path.exists(CREDENTIALS_FILE) == False: import google_oauth credentials = google_oauth.get_credentials() else: credentials = Storage(CREDENTIALS_FILE).get() # Get API Service. http_auth = credentials.authorize(Http()) service = build('bigquery', 'v2', http=http_auth) # Execute a google bigquery. query = "SELECT 1 AS dummy FROM [patriot-999:adcross.view_banner_entry_all] LIMIT 1" # query = "SELECT 1 AS dummy FROM [bigquery-public-data:hacker_news] LIMIT 1" # body = { # "kind": "bigquery#queryRequest", # The resource type of the request. # # "dryRun": True, # [Optional] If set to true, BigQuery doesn't run the job. Instead, if the query is valid, BigQuery returns statistics about the job such as how many bytes would be processed. If the query is invalid, an error returns. The default value is false. # # "useQueryCache": true, # [Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true. # # "defaultDataset": { # [Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'. # # "projectId": project_id, # [Optional] The ID of the project containing this dataset. # # "datasetId": "patriot-999", # [Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. # # }, # # "useLegacySql": True or False, # [Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's updated SQL dialect with improved standards compliance. When using BigQuery's updated SQL, the values of allowLargeResults and flattenResults are ignored. Queries with useLegacySql set to false will be run as if allowLargeResults is true and flattenResults is false.
def GetGoogleClient(): credentials = Storage('google.json').get() http = credentials.authorize(httplib2.Http()) client = build('fitness', 'v1', http=http) return client
# Log into outlook username = '******' password = sys.argv[1] ews_client = EWS_Client(username=username, password=password, max_items_per_get_item_query=100) # Define which spreadsheet to parse spreadsheet_id = '1PZSYBQdyI78w9Fv1tHN7vNVE2YhXtgWSpiybyYnnmsc' # user id used for in google api calls user_id = '*****@*****.**' # Get Google credentials credentials = Storage('./.credentials/google_key_public.json').get() http = credentials.authorize(httplib2.Http()) services = { 'spreadsheet': None, 'gmail': None, } discovery_url = ('https://sheets.googleapis.com/$discovery/rest?version=v4') services['spreadsheet'] = discovery.build('spreadsheet', 'v4', http=http, discoveryServiceUrl=discovery_url) services['gmail'] = discovery.build('gmail', 'v1', http=http) # Define which spreadsheet to parse range_name = 'category_actions!A:E'