def service(self): credentials = SignedJwtAssertionCredentials( self._client_email, self._private_key, self._auth_scope) http = httplib2.Http() http = credentials.authorize(http) service = build('drive', 'v2', http=http) return service
def create_drive_service(service_account_pkcs12_file,\ service_account_email, scope, user_email): """Build and returns a Drive service object authorized with the service accounts that act on behalf of the given user. Args: user_email: The email of the user. Returns: Drive service object. """ f = file(service_account_pkcs12_file, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials(service_account_email, key,\ scope=scope, sub=user_email) print "Finish getting credentials for user %s" % user_email http = httplib2.Http() http = credentials.authorize(http) print "Finish authorize user %s" % user_email try: drive_service = build('drive', 'v2', http=http) return drive_service except AccessTokenRefreshError, error: print "Error when getting drive service of user %s:\n > Error: %s"\ % (user_email, error)
def google_prediction(subject, args): from oauth2client.client import SignedJwtAssertionCredentials import json from apiclient import discovery from httplib2 import Http json_key = json.load(open("...")) credentials = SignedJwtAssertionCredentials( json_key['client_email'], json_key['private_key'].encode(), 'https://www.googleapis.com/auth/prediction') http = Http() credentials.authorize(http) prediction = discovery.build('prediction', 'v1.6', http=http) models = prediction.trainedmodels() results = [] for data in subject: r = models.predict(project='symbolic-button-852', id='language-detection', body={"input": {"csvInstance": [data]}}) results.append(r.execute()['outputLabel']) return results
def get_analytics(): f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials( SERVICE_ACCOUNT_EMAIL, key, scope='https://www.googleapis.com/auth/analytics.readonly', ) http = httplib2.Http() http = credentials.authorize(http) service = build('analytics', 'v3', http=http) end_date = datetime.date.today() # 30 days ago start_date = end_date - datetime.timedelta(days=30) data_query = service.data().ga().get(**{ 'ids': 'ga:89346711', # the code of our project in Google Analytics 'dimensions': '', 'metrics': 'ga:pageviews,ga:uniquePageviews,ga:avgTimeOnPage', 'start_date': start_date.strftime('%Y-%m-%d'), 'end_date': end_date.strftime('%Y-%m-%d'), 'sort': '-ga:pageviews', }) analytics_data = data_query.execute() return analytics_data
def GetUsersProjects(emails, jsonKey): ''' This issues 1 call to the Cloud Resource Manager API for EACH user passed in emails. An account with 50,000 users will have 50,000 API calls, but this should still be well within normal quotas. The API lists all the Cloud Platform projects the user has access to. The list may be empty. This function returns a list of (user, project) where user has access to project. ''' userProjectList = [] for user in emails: credential = SignedJwtAssertionCredentials(jsonKey['client_email'], jsonKey['private_key'], 'https://www.googleapis.com/auth/cloud-platform', sub=user['primaryEmail']) httpAuth = credential.authorize(Http()) service = build('cloudresourcemanager', 'v1beta1', http=httpAuth) request = service.projects().list() while request != None: results = request.execute() if len(results) > 0: for project in results['projects']: userProjectList.append((user['primaryEmail'], project['name'], project['projectId'], project['projectNumber'])) request = service.projects().list_next(request, results) return userProjectList
def _authorize(self): """ Returns an authorized HTTP object to be used to build a Google cloud service hook connection. """ connection_info = self.get_connection(self.conn_id) connection_extras = connection_info.extra_dejson service_account = connection_extras.get('service_account', False) key_path = connection_extras.get('key_path', False) if not key_path or not service_account: logging.info('Getting connection using `gcloud auth` user, since no service_account/key_path are defined for hook.') credentials = GoogleCredentials.get_application_default() elif self.scope: with file(key_path, 'rb') as key_file: key = key_file.read() credentials = SignedJwtAssertionCredentials( service_account, key, scope=self.scope) # TODO Support domain delegation, which will allow us to set a sub-account to execute as. We can then # pass DAG owner emails into the connection_info, and use it here. # sub='*****@*****.**') else: raise AirflowException('Scope undefined, or either key_path/service_account config was missing.') http = httplib2.Http() return credentials.authorize(http)
def _authorize(self): """ Returns an authorized HTTP object to be used to build a Google cloud service hook connection. """ service_account = self._get_field('service_account', False) key_path = self._get_field('key_path', False) scope = self._get_field('scope', False) kwargs = {} if self.delegate_to: kwargs['sub'] = self.delegate_to if not key_path or not service_account: logging.info('Getting connection using `gcloud auth` user, since no service_account/key_path are defined for hook.') credentials = GoogleCredentials.get_application_default() elif self.scope: with open(key_path, 'rb') as key_file: key = key_file.read() credentials = SignedJwtAssertionCredentials( service_account, key, scope=self.scope, **kwargs) else: raise AirflowException('Scope undefined, or either key_path/service_account config was missing.') http = httplib2.Http() return credentials.authorize(http)
def __init__(self, keyfile=None, email=None, profile_id=None): """ :param keyfile: Path to the PKCS 12 file for authenticating with the API as a service account. Defaults to settings.GA_API_KEYFILE. :param email: Email address for the service account to authenticate as. Defaults to settings.GA_API_ACCOUNT_EMAIL. :param profile_id: ID of analytics profile to query. Defaults to settings.GA_API_PROFILE_ID. """ self.profile_id = profile_id or settings.GA_API_PROFILE_ID keyfile = keyfile or settings.GA_API_KEYFILE try: with open(keyfile, 'rb') as f: key = f.read() except IOError as e: raise AnalyticsError('Could not read keyfile `{0}`: {1}'.format(keyfile, e), e) email = email or settings.GA_API_ACCOUNT_EMAIL credentials = SignedJwtAssertionCredentials( email, key, scope='https://www.googleapis.com/auth/analytics.readonly') http = httplib2.Http() http = credentials.authorize(http) try: self._service = build_service('analytics', 'v3', http=http) except OAuth2Error as e: raise AnalyticsError('Error authenticating with Analytics API: {0}'.format(e), e)
def from_private_key(account_name, private_key=None, private_key_path=None, storage=None, storage_path=None, api_version="v3", readonly=False): """Create a client for a service account. Create a client with an account name and a private key. Args: account_name: str, the account identifier (probably the account email). private_key: str, the private key as a string. private_key_path: str, path to a file with the private key in. storage: oauth2client.client.Storage, a Storage implementation to store credentials. storage_path: str, path to a file storage. readonly: bool, default False, if True only readonly access is requested from GA. """ if not private_key: if not private_key_path: raise GapyError( "Must provide either a private_key or a private_key_file") if isinstance(private_key_path, basestring): private_key_path = open(private_key_path) private_key = private_key_path.read() storage = _get_storage(storage, storage_path) scope = GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE credentials = SignedJwtAssertionCredentials(account_name, private_key, scope) credentials.set_store(storage) return Client(_build(credentials, api_version))
def main(args=sys.argv): """The main function for the erpmtics_agent program.""" logger.debug(args) try: logger.debug("Init the app") id = "*****@*****.**" # from google API console - convert private key to base64 or load from file #key = base64.b64decode(...) credentials = SignedJwtAssertionCredentials(id, key, scope='https://www.googleapis.com/auth/drive') credentials.authorize(httplib2.Http()) gauth = GoogleAuth() gauth.credentials = credentials drive = GoogleDrive(gauth) sys.exit() """ gauth = GoogleAuth() gauth.LocalWebserverAuth() drive = GoogleDrive(gauth) file1 = drive.CreateFile({'title': 'Hello.txt'}) # Create GoogleDriveFile instance with title 'Hello.txt' file1.SetContentString('Hello World!') # Set content of the file from given string file1.Upload() """ except ImportError: raise except Exception, e: raise
def login_to_google_analytics(): credentials = SignedJwtAssertionCredentials(GOOGLE_SERVICE_ACCOUNT_EMAIL, GOOGLE_SERVICE_ACCOUNT_SECRET_KEY, 'https://www.googleapis.com/auth/analytics.readonly') http = Http() credentials.authorize(http) service = build("analytics", "v3", http=http) return service, credentials.access_token
def __init__(self): self.calendars = [] self.events = [] self.service = "" storage = Storage('calendar.dat') credentials = storage.get() if credentials is None or credentials.invalid: # !!change this with open("secret_json.json") as f: jf = json.load(f) private_key = jf['private_key'] client_email = jf['client_email'] credentials = SignedJwtAssertionCredentials( client_email, private_key, 'https://www.googleapis.com/auth/calendar.readonly' ) storage.put(credentials) http = httplib2.Http() http = credentials.authorize(http) self.service = build( serviceName='calendar', version='v3', http=http) if(self.service): print("success: Oauth authorization")
def get_service(api_name, api_version, scope, key_file_location, service_account_email): """Get a service that communicates to a Google API. Args: api_name: The name of the api to connect to. api_version: The api version to connect to. scope: A list auth scopes to authorize for the application. key_file_location: The path to a valid service account p12 key file. service_account_email: The service account email address. Returns: A service that is connected to the specified API. """ f = open(key_file_location, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials(service_account_email, key, scope=scope) http = credentials.authorize(httplib2.Http()) # Build the service object. service = build(api_name, api_version, http=http) return service
def delete(name_event,postcode): client_email = '*****@*****.**' with open("CMSTest-5f1753f2f1a0.p12",'rb') as f: private_key = f.read() credentials = SignedJwtAssertionCredentials(client_email, private_key, 'https://www.googleapis.com/auth/fusiontables',sub = '*****@*****.**') http_auth = credentials.authorize(Http()) #http = Http() query1 = "SELECT rowid FROM 1lrwBUW2WdOCgC-BRr8qG9jmSgjYOF7lBPt3xKQfr WHERE Event = '"+name_event+"' AND Location = "+str(postcode) fusion = build('fusiontables', 'v2', http=http_auth) Response = fusion.query().sql(sql=query1) rep = Response.execute() #print rep['rows'][0][0] rowid = rep['rows'][0][0] query2 = "DELETE FROM 1lrwBUW2WdOCgC-BRr8qG9jmSgjYOF7lBPt3xKQfr WHERE rowid = '"+str(rowid)+"'" Response2 = fusion.query().sql(sql=query2) rep2 = Response2.execute() print rep2; #print Response.execute().read(); #sqladmin = build('drive', 'v2', http=http) #sqladmin.instances().list().execute(); return #if __name__ == "__main__": # main()
def authenticate(api_name, api_version, scope, key_file_location, service_account_email): f = open(key_file_location, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials(service_account_email, key, scope=scope) service = build(api_name, api_version, http=credentials.authorize(httplib2.Http())) return service
def get_credentials(sub=None): ''' Signed JWT Credentials allow for frictionless authentication using a private key as opposed to a three-legged oauth flow. ''' # fetch the credentials object from memcache. credentials = memcache.get("rapid-reseller#credentials#%s" % sub) if credentials is None or credentials.invalid: logging.info("Couldn't find cached token, refreshing") http = httplib2.Http() # read private key. f = file(settings.OAUTH2_PRIVATEKEY) key = f.read() f.close() # establish the credentials. credentials = SignedJwtAssertionCredentials( service_account_name=settings.OAUTH2_SERVICE_ACCOUNT_EMAIL, private_key=key, scope=" ".join(settings.OAUTH2_SCOPES), sub=sub) # force the generation of an access token credentials.refresh(http) # cache the token for 59 minutes. memcache.set("rapid-reseller#credentials#%s" % sub, value=credentials, time=(60 * 59)) return credentials
def get_cred(email, scope, service_account=None): """ Establishes the proper credentials to access the Google API resource """ if scope[0:4] != "http": scope='https://www.googleapis.com/auth/{}'.format(scope), if not service_account: service_account = settings.SERVICE_ACCOUNT_JSON with open(service_account) as json_file: json_data = json.load(json_file) credentials = SignedJwtAssertionCredentials( json_data['client_email'], json_data['private_key'], scope=scope, #access_type="offline", #approval_prompt = "force", token_uri='https://accounts.google.com/o/oauth2/token', sub=email ) credentials.get_access_token() return credentials
def main(): ####Read the credential private key from JSON FILE downloaded from GOOGLE DEVELOPER CONSOLE with open('DataRobot-ad5fdab3c6c8.json') as f1: #f1 is private key p_key = json.load(f1) f1.close() ###get the client email and private key Private_KEY = p_key['private_key'] client_email = p_key['client_email'] ###Create Signed JWT Assertion Credential scope = ['https://www.googleapis.com/auth/prediction'] credentials = SignedJwtAssertionCredentials(client_email, Private_KEY, scope = scope) ###Authorization http_auth = credentials.authorize(Http()) ###Access the prediction API and extracted trained model prediction = build('prediction', 'v1,6', http = http_auth) ###prediction by commandline new_sample = sys.argv[1] body = {"input": {"csvInstance": [new_sample] } } TrainedModel = prediction.trainedmodels().predict(project = project_id, id = model_id, body = body ).execute() print TrainedModel['outputLabel']
def __init__(self, service_email=None, private_key=None, user_email=None): """ Handles credentials and builds the google service. :param service_email: String :param private_key: Path :param user_email: String :raise ValueError: """ self._service_email = service_email or settings.GOOGLE_DRIVE_STORAGE_SERVICE_EMAIL self._key = private_key or settings.GOOGLE_DRIVE_STORAGE_KEY kwargs = {} if user_email or settings.GOOGLE_DRIVE_STORAGE_USER_EMAIL: self._user_email = kwargs['sub'] = user_email or settings.GOOGLE_DRIVE_STORAGE_USER_EMAIL credentials = SignedJwtAssertionCredentials( self._service_email, self._key, scope="https://www.googleapis.com/auth/drive", **kwargs ) http = httplib2.Http() http = credentials.authorize(http) self._drive_service = build('drive', 'v2', http=http)
def create_drive_service(self, user_email): """ Returns non-cached service """ credentials = SignedJwtAssertionCredentials(self.service_account_email, self.key, scope='https://www.googleapis.com/auth/drive', sub=user_email) http = httplib2.Http() http = credentials.authorize(http) return build('drive', 'v2', http=http)
def index(request): email=settings.DOMAIN_SUPER_USER_EMAIL with open(settings.SERVICE_ACCOUNT_KEY) as f: private_key = f.read() credentials = SignedJwtAssertionCredentials( settings.CLIENT_EMAIL, private_key, 'https://www.googleapis.com/auth/admin.directory.user', sub=email ) http = httplib2.Http() http = credentials.authorize(http) service = build("admin", "directory_v1", http=http) results = service.users().list( customer='carthage.edu', maxResults=10, orderBy='email', viewType='domain_public' ).execute() users = results.get('users', []) return render_to_response( 'calendar/index.html', {'users': results,}, context_instance=RequestContext(request) )
def _update_prediction_item(seleccion_id): seleccion = Seleccion.objects.get(pk=seleccion_id) client_email = settings.GOOGLE_PREDICTIONS_CLIENT_EMAIL private_key = settings.GOOGLE_PREDICTIONS_PRIVATE_KEY credentials = SignedJwtAssertionCredentials(client_email, private_key, settings.GOOGLE_PREDICTIONS_URL) sexo = seleccion.usuario.sexo == 1 and 'Mujer' or 'Hombre' edad = _get_edad_string(seleccion.usuario.edad) item = "ID:" + str(seleccion.item.pk) like = seleccion.me_gusta == 1 and 'Me gusta' or 'No me gusta' query = "{}, {}, {}, {}".format(item, like, sexo, edad) http = httplib2.Http() http = credentials.authorize(http) service = build('prediction', 'v1.6', http=http) service.trainedmodels().update(id='model001', project='foxyrec-demo', body={'output': '', 'csvInstance': [str(query)]}).execute() return None
def connect(self, keyFile=KEY_FILE, account=ACCOUNT): try: f = file(keyFile, 'rb') except IOError: logger.error('Could not find key file. Please verify settings.') return None key = f.read() f.close() credentials = SignedJwtAssertionCredentials( account, key, scope=[ 'https://www.googleapis.com/auth/fusiontables', 'https://www.googleapis.com/auth/fusiontables.readonly', ] ) self.http = httplib2.Http() self.http = credentials.authorize(self.http) i = 0 while (not self.service and i < 10 and not time.sleep(10)): try: self.service = build('fusiontables', 'v1', http=self.http) except httplib2.ServerNotFoundError: logger.warning('Unable to find server for authentication.') except HttpError: logger.warning('HTTP error when trying to authenticate.') return self.service
def analytics(context, view_id=None, next = None): # The scope for the OAuth2 request. SCOPE = 'https://www.googleapis.com/auth/analytics.readonly' token = "" ggsettings = GoogleAPISettings.objects.first() if ggsettings and ggsettings.account_key_file: if not view_id: view_id = "%s" % int(ggsettings.analytics_default_view_id) _key_data = json.load(ggsettings.account_key_file) # Construct a credentials objects from the key data and OAuth2 scope. try: _credentials = SignedJwtAssertionCredentials( _key_data['client_email'], _key_data['private_key'], 'https://www.googleapis.com/auth/analytics.readonly', # token_uri='https://accounts.google.com/o/oauth2/token' ) token = _credentials.get_access_token().access_token except Exception, e: print e.message token = ""
def main(server_url, pem_file, auth_email): # Load the key in PEM format that you downloaded from the Google API # Console when you created your Service account. f = file(pem_file, 'rb') key = f.read() f.close() # Create an httplib2.Http object to handle our HTTP requests and authorize it # with the Credentials. Note that the first parameter, service_account_name, # is the Email address created for the Service account. It must be the email # address associated with the key that was created. credentials = SignedJwtAssertionCredentials( auth_email, key, scope='https://www.googleapis.com/auth/userinfo.email') http = httplib2.Http() http = credentials.authorize(http) # Construct a service object via the discovery service. service = apiclient.discovery.build("test", "v1.0", http=http, discoveryServiceUrl=(server_url + "/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest")) print "Testing %s .." % server_url response = service.test(body=dict(message='This is a test')).execute() print response # print "Test %s" % response['status'] print "Done"
def create_service_account(user_email, scope, SERVICE_ACCOUNT_PKCS12_FILE_PATH, SERVICE_ACCOUNT_EMAIL): """Build and returns a Drive service object authorized with the service accounts that act on behalf of the given user. Args: user_email: The email of the user. scope: The email of the user. SERVICE_ACCOUNT_PKCS12_FILE_PATH: /path/to/<public_key_fingerprint>-privatekey.p12 SERVICE_ACCOUNT_EMAIL: <some-id>@developer.gserviceaccount.com Returns: Drive service object. """ f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials( service_account_name=SERVICE_ACCOUNT_EMAIL, private_key=key, scope=scope, sub=user_email, ) http = httplib2.Http() http = credentials.authorize(http) return http
def get_authorized_http(oauth_path): json_key = json.load(open(oauth_path)) scope = ['https://www.googleapis.com/auth/drive.readonly'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope) http = Http() credentials.authorize(http) return http
def get_conn(self): """ Returns a BigQuery service object. """ connection_info = self.get_connection(self.bigquery_conn_id) connection_extras = connection_info.extra_dejson service_account = connection_extras['service_account'] key_path = connection_extras['key_path'] with file(key_path, 'rb') as key_file: key = key_file.read() credentials = SignedJwtAssertionCredentials( service_account, key, scope=BQ_SCOPE) # TODO Support domain delegation, which will allow us to set a sub-account to execute as. We can then # pass DAG owner emails into the connection_info, and use it here. # sub='*****@*****.**') http = httplib2.Http() http_authorized = credentials.authorize(http) service = build('bigquery', 'v2', http=http_authorized) return service
def _authorize(self): """ Returns an authorized HTTP object to be used to build a Google cloud service hook connection. """ connection_info = self.get_connection(self.conn_id) connection_extras = connection_info.extra_dejson service_account = connection_extras.get("service_account", False) key_path = connection_extras.get("key_path", False) kwargs = {} if self.delegate_to: kwargs["sub"] = self.delegate_to if not key_path or not service_account: logging.info( "Getting connection using `gcloud auth` user, since no service_account/key_path are defined for hook." ) credentials = GoogleCredentials.get_application_default() elif self.scope: with open(key_path, "rb") as key_file: key = key_file.read() credentials = SignedJwtAssertionCredentials(service_account, key, scope=self.scope, **kwargs) else: raise AirflowException("Scope undefined, or either key_path/service_account config was missing.") http = httplib2.Http() return credentials.authorize(http)
def create_plus_service(user_email): if current_plus_service["user_email"] == user_email and current_plus_service["service"]: return current_plus_service["service"] else: print("NEW AUTHENTICATION NECESSARY: current email is " + current_plus_service["user_email"] + " and new email is " + user_email) f = file(SERVICE_ACCOUNT_PEM_FILE_PATH, 'rb') key = f.read() f.close() http = httplib2.Http() cached_http = cache.get('plus_http_' + user_email) if not cached_http == None: http = cached_http logging.debug("CACHE HIT") else: credentials = SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope=[ "https://www.googleapis.com/auth/plus.circles.read", "https://www.googleapis.com/auth/plus.circles.write", "https://www.googleapis.com/auth/plus.profiles.read", "https://www.googleapis.com/auth/plus.stream.read", "https://www.googleapis.com/auth/plus.stream.write", ], sub=user_email) http = credentials.authorize(http) cache.set('plus_http_' + user_email, http, 30) logging.debug("CACHE MISS") service = build("plus", "v1domains", http=http) # save for current session current_plus_service["user_email"] = user_email current_plus_service["service"] = service return service
def _create_service_account_credentials(app_name): scopes = config.OAUTH_SERVICE_ACCOUNT_SCOPES secret_path = (config.OAUTH_SERVICE_ACCOUNT_SECRET_PATH_PATTERN % app_name) try: secret_file = open(secret_path) except IOError: raise InvalidClientSecretsError('Secret file not found (%s)' % secret_path) try: secrets = json.load(secret_file) for domain, admin_email in config.VALID_DOMAINS.iteritems(): _service_acount_credentials[domain] = ( SignedJwtAssertionCredentials(secrets['client_email'], secrets['private_key'], scopes, sub=admin_email)) except (ValueError, KeyError): raise InvalidClientSecretsError('Secret file invalid (%s)' % secret_file) finally: secret_file.close()
def populate_gdoc(): json_key = json.load(open('creds.json')) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials( json_key['client_email'], json_key['private_key'].encode(), scope) gc = gspread.authorize(credentials) sht = gc.open('Provider List') worksheet = sht.get_worksheet(0) with open('results.csv', 'r') as f: reader = csv.reader(f, delimiter=',') row_count = sum(1 for row in reader) with open('results.csv', 'r') as f: reader = csv.reader(f, delimiter=',') first_new_row = worksheet.row_count + 1 last_new_row = first_new_row + row_count worksheet.add_rows(row_count) cell_list = worksheet.range('A{}:I{}'.format(first_new_row, last_new_row - 1)) i = 0 for row in reader: first, last, creds, address, city, state, zipcode, phone = row row_data = [ first, last, creds, '', phone, address, city, state, zipcode ] for value in row_data: cell_list[i].value = value i += 1 assert (i == len(cell_list)) worksheet.update_cells(cell_list) return row_count
class GoogleAnalytics(object): def __init__(self, service_account_name, private_key, profile_id): self._credentials = SignedJwtAssertionCredentials( service_account_name, private_key, scope='https://www.googleapis.com/auth/analytics.readonly') self._profile_id = profile_id self._service = None def _get_service(self): if not self._service: http = httplib2.Http() http = self._credentials.authorize(http) self._service = build(serviceName='analytics', version='v3', http=http) return self._service def date_query(self, start_date=None, end_date=None, metrics=None, dimensions=None): service = self._get_service() data_query = service.data().ga().get(ids='ga:' + self._profile_id, start_date=start_date, end_date=end_date, dimensions=dimensions, metrics=metrics).execute() return data_query['rows'] def realtime_query(self, metrics=None): service = self._get_service() data_query = service.data().realtime().get(ids='ga:' + self._profile_id, metrics=metrics).execute() return data_query['rows']
def report_to_gdoc_r3(results, sample_db, db_versions, tokenfile, spreadsheet="TTT proteotyping pipeline results"): """ Upload TTT proteotyping pipeline results to Google Docs spreadsheet. Connects to Google docs using the 'gspread' Python package. Authentication via OAuth2 credentials from Google Developers Console. """ json_key = json.load(open(options.tokenfile)) scope = ["https://spreadsheets.google.com/feeds"] logging.debug("Signing in to Google account %s\n with credentials from %s", json_key["client_email"], options.tokenfile) credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope) gc = gspread.authorize(credentials) wks = gc.open(spreadsheet).worksheet("TPARTY results") hostname = platform.node().split(".")[0] logging.debug("Got hostname %s", hostname) #results = [("EUNUM", "QENUM", "PROJECT", "SPECIES", "HOSTNAME", "XTANDEMDB", "GNEOMEDB", "TAXREFDB", "UNIQUE", "HUMANPROT", "PEPTIDES", "DISC", "COMPLETED")] for result in results: try: sample_info = sample_db[result.pid] except KeyError: logging.error("Found no info for %s in sample_db, is information about the sample entered in Gdoc??", result.pid) exit(1) continue eu = sample_info[0] project = sample_info[1] qe = sample_info[2] species = sample_info[4] row = [eu, project, qe, species, hostname] row.extend(db_versions) row.extend(result[1:]) wks.append_row(row)
def _init(): SCOPE = [ "https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive" ] SECRETS_FILE = "API_key.json" SPREADSHEET = "EE180DA Google Sheet" json_key = json.load(open(SECRETS_FILE)) # Authenticate using the signed key credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], SCOPE) gc = gspread.authorize(credentials) #Get google sheets information, which is passed from the google form #print("The following sheets are available") for sheet in gc.openall(): print("{} - {}".format(sheet.title, sheet.id)) workbook = gc.open(SPREADSHEET) sheet = workbook.sheet1 #We now have our row, column, and role_index of each person! data = pd.DataFrame(sheet.get_all_records()) column_names = { 'What role number (printed label) on your Raspberry Pi do you have?': 'role_id', 'What row number are you in, from front to back, starting from 0? (MAKE SURE TO COUNT CLOSELY)': 'row', 'What column number are you in, from left to right, starting from 0? (MAKE SURE TO COUNT CLOSELY)': 'col', } data.rename(columns=column_names, inplace=True) data.drop_duplicates(subset='role_id', keep='first', inplace=False) return data
def gce_credentials_from_config(gce_credentials_config=None): """ This function creates a proper GCE credentials object either from a passed in configuration blob or, if this code is being run on a GCE instance, from the default service account credentials associated with the VM. :param dict gce_credentials_config: A credentials dict used to authenticate against GCE. This should have the same content as the JSON blob you download when you create a new key for a service account. If this is ``None``, then the instances implicit credentials will be used. :returns: A GCE credentials object for use with the GCE API. """ if gce_credentials_config is not None: credentials = SignedJwtAssertionCredentials( gce_credentials_config['client_email'], gce_credentials_config['private_key'], scope=[ u"https://www.googleapis.com/auth/compute", ] ) else: credentials = GoogleCredentials.get_application_default() return credentials
def login_open_sheet(oauth_key_file, spreadsheet): """Connect to Google Docs spreadsheet and return the first worksheet.""" try: scope = [ 'https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive' ] json_key = json.load(open(oauth_key_file)) credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope) gc = gspread.authorize(credentials) worksheet = gc.open(spreadsheet).sheet1 return worksheet except Exception as ex: print( 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!' ) print('Google sheet login failed with error:', ex) sys.exit(1)
def gsheets_parameters_stat_auth(name, num): scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope) gc = gspread.authorize(credentials) sh = gc.open_by_key('') sheets_list = [i.title for i in sh.worksheets()] if num > 0: if name not in sheets_list: sh.add_worksheet(name, 1000, 30) else: sh.del_worksheet(sh.worksheets()[[ i.title for i in sh.worksheets() ].index(name)]) sh.add_worksheet(name, 1000, 30) worksheet = sh.worksheet(name) return worksheet else: if name in sheets_list: sh.del_worksheet(sh.worksheets()[[ i.title for i in sh.worksheets() ].index(name)])
def update_faculty_records(): SECRETS_FILE = 'data/VivaManagementSystem-f7cde54a5c9e.json' fileURL = "https://docs.google.com/spreadsheets/d/1nlYqgnmxiLfkGiIEyBUZ4IlFOVTrwok4WUMBHFFl84c/edit#gid=1604066109" # --------------------Donot change the code below this line----------------------------------- ''' Extracts all the values from the first sheet of the given URL and returns a ListOfList with every value as a string ''' SCOPE = ['https://spreadsheets.google.com/feeds'] json_key = json.load(open(SECRETS_FILE)) credentials = SignedJwtAssertionCredentials(json_key['client_email'], \ json_key['private_key'], SCOPE) gc = gspread.authorize(credentials) # Open up the workbook based on the spreadsheet name workbook = gc.open_by_url(fileURL) # Get the first sheet sheet = workbook.sheet1 # Extract all data into a dataframe faculty_data = pd.DataFrame(sheet.get_all_records()) num_db_records = len(Faculty.objects.all()) try: # Append rows to the Database for index, row in faculty_data.loc[num_db_records:].iterrows(): model = Faculty() model.title = row['Title'] model.name = row['Full Name'] model.designation = row['Designation'] model.short_name = row['Short Name used in Department'] model.employee_id = row['Employee ID'] model.core_competency = row['Core Competency'] model.is_guide = 0 model.students_allocated = 0 model.email_id = row['E-mail ID'] model.areas_of_interest = row['Area of Interest for project guidance'] model.phone_number = row['Phone number'] model.save() except: pass
def downloadList(bstar=False, red=False): filename = 'MINERVA target list' if bstar: sheetname = 'B stars' csvname = 'bstar.csv' key = '1w7RwP8P2hMYtM3MGusw8k7gXdCHzanRZZpvnWQtGkas' elif red: sheetname = 'targets' csvname = 'redtargets.csv' key = '1CKvq5Sa9k04BWKWl3tTHpioWVSxcBVhgwxzdPyRVj3I' else: sheetname = 'targets' csvname = 'targets.csv' key = '1w7RwP8P2hMYtM3MGusw8k7gXdCHzanRZZpvnWQtGkas' # authenticate with JSON credentials json_key = json.load( open('/home/minerva/minerva-control/credentials/MINERVA_Key.json')) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope) gc = gspread.authorize(credentials) # open the spreadsheet and worksheet with warnings.catch_warnings(): warnings.simplefilter("ignore") wks = gc.open_by_key(key) sheet = wks.worksheet(sheetname) # export to CSV (for backup) with open(csvname, 'wb') as f: writer = unicodecsv.writer(f) with warnings.catch_warnings(): warnings.simplefilter("ignore") writer.writerows(sheet.get_all_values())
def load(self, client_email, private_key): scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials( client_email, private_key, scope) account = gspread.authorize(credentials) try: with open('tx_values.pickle', 'rb') as pickled: tx = pickle.load(pickled) except IOError: tx = self.load_tx_worksheet(account) with open('tx_values.pickle', 'wb') as pickled: pickle.dump(tx, pickled, pickle.HIGHEST_PROTOCOL) try: with open('names_values.pickle', 'rb') as pickled: names = pickle.load(pickled) except IOError: names = self.load_names_worksheet(account) with open('names_values.pickle', 'wb') as pickled: pickle.dump(names, pickled, pickle.HIGHEST_PROTOCOL) return self.merge(tx, names)
def gce_provisioner(zone, project, ssh_public_key, gce_credentials=None): """ Create an :class:`IProvisioner` for provisioning nodes on GCE. :param unicode zone: The name of the zone in which to provision instances. :param unicode project: The name of the project in which to provision instances. :param unicode ssh_public_key: The public key that will be put on the VM for ssh access. :param dict gce_credentials: A dict that has the same content as the json blob generated by the GCE console when you add a key to a service account. The service account must have permissions to spin up VMs in the specified project. :return: An class:`IProvisioner` provider for GCE instances. """ key = Key.fromString(bytes(ssh_public_key)) if gce_credentials is not None: credentials = SignedJwtAssertionCredentials( gce_credentials['client_email'], gce_credentials['private_key'], scope=[ u"https://www.googleapis.com/auth/compute", ]) else: credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) return GCEProvisioner( instance_builder=GCEInstanceBuilder( zone=unicode(zone), project=unicode(project), compute=compute, ), ssh_public_key=key, )
def _parse_sheet(self): """ Connects to Google, fetches the spreadsheet and parses the content """ import gspread from oauth2client.client import SignedJwtAssertionCredentials print('Opening connection to Houseprint sheet') # fetch credentials json_key = json.load(open(self.gjson)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials( json_key['client_email'], json_key['private_key'].encode('ascii'), scope ) # authorize and login gc = gspread.authorize(credentials) gc.login() # open sheets print("Opening spreadsheets") sheet = gc.open(self.spreadsheet) sites_sheet = sheet.worksheet('Accounts') devices_sheet = sheet.worksheet('Devices') sensors_sheet = sheet.worksheet('Sensors') print('Parsing spreadsheets') # 3 sub-methods that parse the different sheets self._parse_sites(sites_sheet) self._parse_devices(devices_sheet) self._parse_sensors(sensors_sheet) print('Houseprint parsing complete')
def GetCredentials(self): if HAS_CRYPTO: if OAUTH2CLIENT_V2: # TODO: Plumb through auth_uri and token_uri support, # which in turn means amending oauth2client to support them. # pylint: disable=protected-access return ServiceAccountCredentials.from_p12_keyfile_buffer( self._client_id, BytesIO(self._private_key), private_key_password=self._password, scopes=DEFAULT_SCOPE, token_uri=self.token_uri) # pylint: enable=protected-access else: return SignedJwtAssertionCredentials( self._client_id, self._private_key, scope=DEFAULT_SCOPE, private_key_password=self._password, token_uri=self.token_uri) else: raise MissingDependencyError( 'Service account authentication requires PyOpenSSL. Please install ' 'this library and try again.')
def parse_entries_sheet(): conf = config.Config('income_entries') json_key = json.load(open(conf.key)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials( json_key['client_email'], bytes(json_key['private_key'], 'UTF-8'), scope) session = gspread.authorize(credentials) workbook = session.open_by_key(conf.workbook) worksheet = workbook.worksheet(conf.worksheet) # Parse row-by-row until an empty row is encountered (data starts on second row). row_index = 2 entries = [] while worksheet.row_values(row_index) and row_index <= worksheet.row_count: row = worksheet.row_values(row_index) entry = create_entry(row) entries.append(entry) row_index += 1 return entries
def authorize_creds(): import json from oauth2client.client import SignedJwtAssertionCredentials import os SCOPE = [ "https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive", ] SECRETS_FILE = os.getenv("GOOGLE_SHEETS_CREDENTIALS_FILE") if not SECRETS_FILE: raise Exception( "Missing environmental variable: GOOGLE_SHEETS_CREDENTIALS_FILE") # Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html # Load in the secret JSON key in working directory (must be a service account) json_key = json.load(open(SECRETS_FILE)) # Authenticate using the signed key credentials = SignedJwtAssertionCredentials(json_key["client_email"], json_key["private_key"], SCOPE) return credentials
def _login(self): json_key = json.load(open(self.credentials_path)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope) return gspread.authorize(credentials)
3: 0, 4: 0, 5: 0, 6: 0 } for index, instances in dayOfWeekIndexesAndNumberOfInstances.iteritems(): # dayOfWeekIndexesAndAverageNumberOfRelapses[index] = int(round(float(dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances))) dayOfWeekIndexesAndAverageNumberOfRelapses[index] = float( dayOfWeekIndexesAndTotalNumberOfRelapses[index]) / float(instances) spreadsheetTitle = "StayClean monthly challenge relapse data" # spreadsheetTitle = "Test spreadsheet" json_key = json.load(open('../google-oauth-credentials.json')) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope) gc = gspread.authorize(credentials) spreadSheet = None try: spreadSheet = gc.open(spreadsheetTitle) except gspread.exceptions.SpreadsheetNotFound: print "No spreadsheet with title " + spreadsheetTitle exit(1) workSheet = spreadSheet.get_worksheet(0) columnACells = workSheet.range("A2:A" + str(len(reportDatesAndNumberOfRelapses) + 1)) columnBCells = workSheet.range("B2:B" + str(len(reportDatesAndNumberOfRelapses) + 1)) columnCCells = workSheet.range("C2:C8") columnDCells = workSheet.range("D2:D8")
###############################
# User replaceable items PROJECT_ID = 'replace with your project-id' DEFAULT_ZONE = 'replace with a GCE zone here' client_email = 'replace with an email from GCP Service Account OAuth' # End user replaceable items API_VERSION = 'v1' GCE_URL = 'https://www.googleapis.com/compute/%s/projects/' % (API_VERSION) #download P12 file from API & Auth->Credentials with open("p12.p12") as f: private_key = f.read() #you can change the URL for compute to be for different services credentials = SignedJwtAssertionCredentials(client_email, private_key, 'https://www.googleapis.com' '/auth/compute') #create a new http object http = httplib2.Http() #authorize this object auth_http = credentials.authorize(http) gce_service = build('compute', API_VERSION) project_url = '%s%s' % (GCE_URL, PROJECT_ID) #build the request for instances, don't send it yet request = gce_service.instances().list( project=PROJECT_ID, filter=None, zone=DEFAULT_ZONE)
def _get_auth(self): http = httplib2.Http() email = self.pl.get_access_data('service_account_name') pk = base64.b64decode(self.pl.get_access_data('key')) cred = SignedJwtAssertionCredentials(email, pk, scope=self.scope) return cred.authorize(http)
def main(argv): f = file('/home/abasababa/key.p12', 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials( '*****@*****.**', key, scope='https://www.googleapis.com/auth/bigquery') http = httplib2.Http() http = credentials.authorize(http) service = build("bigquery", "v2", http=http) projectId = "789819888058" datasetId = "github" def runSyncQuery(service, projectId, datasetId, timeout=0): jobCollection = service.jobs() queryData = { 'query': """SELECT created_at, url, payload_commit_msg, actor FROM [githubarchive:github.timeline] WHERE (LOWER(payload_commit_msg) CONTAINS "f**k") OR (LOWER(payload_commit_msg) CONTAINS "bitch") OR (LOWER(payload_commit_msg) CONTAINS "shit") OR (LOWER(payload_commit_msg) CONTAINS " t**s") OR (LOWER(payload_commit_msg) CONTAINS "asshole") OR (LOWER(payload_commit_msg) CONTAINS "c********r") OR (LOWER(payload_commit_msg) CONTAINS "c**t") OR (LOWER(payload_commit_msg) CONTAINS " hell ") OR (LOWER(payload_commit_msg) CONTAINS "douche") OR (LOWER(payload_commit_msg) CONTAINS "testicle") OR (LOWER(payload_commit_msg) CONTAINS "twat") OR (LOWER(payload_commit_msg) CONTAINS "bastard") OR (LOWER(payload_commit_msg) CONTAINS "f****t") OR (LOWER(payload_commit_msg) CONTAINS "nigger") OR (LOWER(payload_commit_msg) CONTAINS "sperm") OR (LOWER(payload_commit_msg) CONTAINS "shit") OR (LOWER(payload_commit_msg) CONTAINS "d***o") OR (LOWER(payload_commit_msg) CONTAINS "wanker") OR (LOWER(payload_commit_msg) CONTAINS "prick") OR (LOWER(payload_commit_msg) CONTAINS "penis") OR (LOWER(payload_commit_msg) CONTAINS "v****a") OR (LOWER(payload_commit_msg) CONTAINS "w***e") ORDER BY created_at DESC LIMIT 700;""", 'timeoutMs': timeout } queryReply = jobCollection.query(projectId=projectId, body=queryData).execute() jobReference = queryReply['jobReference'] # Timeout exceeded: keep polling until the job is complete. while (not queryReply['jobComplete']): print 'Job not yet complete...' queryReply = jobCollection.getQueryResults( projectId=jobReference['projectId'], jobId=jobReference['jobId'], timeoutMs=timeout).execute() # If the result has rows, print the rows in the reply. if ('rows' in queryReply): print 'has a rows attribute' printTableData(queryReply, 0) currentRow = len(queryReply['rows']) # Loop through each page of data while ('rows' in queryReply and currentRow < queryReply['totalRows']): queryReply = jobCollection.getQueryResults( projectId=jobReference['projectId'], jobId=jobReference['jobId'], startIndex=currentRow).execute() if ('rows' in queryReply): printTableData(queryReply, currentRow) currentRow += len(queryReply['rows']) runSyncQuery(service, projectId, datasetId)
def read_credentials(filename): creds_data = json.load(open(filename)) return SignedJwtAssertionCredentials(creds_data['client_email'], creds_data['private_key'].encode(), SCOPE)
print '%s %s' % ('Who normally does it?:', self.WhoNormallyDoes) print '%s %s' % ('How often are we doing it?:', self.HowOftenAreWeDoingIt) scope = [ 'https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds' ] f = file('Household Todo-aafb6e63aee2.p12', 'rb') key = f.read() f.close() credentials = SignedJwtAssertionCredentials( service_account_name= '*****@*****.**', private_key=key, scope=scope, private_key_password='******') gc = gspread.authorize(credentials) wks = gc.open("API Test Spreadsheet").sheet1 wks = gc.open("Household Tasks").sheet1 #wks=gc.openall() #print len(wks) #wks=gc.open_by_url('https://docs.google.com/spreadsheets/d/1KqEFPnOxLt3-Ls1xsiRTLIDZI8lJ0Jlp3bD79Fzwi1g/edit')
# helpers def print_list_as_columns(l): for entry in l: print(entry, "| ", end="") print() SCOPE = ["https://spreadsheets.google.com/feeds"] SECRETS_FILE = "secretKey.json" SPREADSHEET = "Responses" # Based on docs here - http://gspread.readthedocs.org/en/latest/oauth2.html # Load in the secret JSON key (must be a service account) json_key = json.load(open(SECRETS_FILE)) # Authenticate using the signed key credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], SCOPE) gc = gspread.authorize(credentials) print("The following sheets are available") sheet_id_list = [] for sheet in gc.openall(): print("{} - {}".format(sheet.title, sheet.id)) sheet_id_list.append(sheet.id) response_spreadsheet = gc.open_by_key(sheet_id_list[0]) #h ttps://stackoverflow.com/questions/33713084/download-link-for-google-spreadsheets-csv-export-with-multiple-sheets response_csv = "https://docs.google.com/spreadsheets/d/1AX8I4ts1VPyyCDxizclkyIvVHNz1M43ae2YxZANK4pQ/gviz/tq?tqx=out:csv&sheet={sheetname}"
import json from oauth2client.client import SignedJwtAssertionCredentials # The scope for the OAuth2 request. SCOPE = 'https://www.googleapis.com/auth/analytics.readonly' # The location of the key file with the key data. KEY_FILEPATH = '/Users/haroon/Downloads/login-fdf588762877.json' # Load the key file's private data. with open(KEY_FILEPATH) as key_file: _key_data = json.load(key_file) # Construct a credentials objects from the key data and OAuth2 scope. _credentials = SignedJwtAssertionCredentials(_key_data['client_email'], _key_data['private_key'], SCOPE) # Defines a method to get an access token from the credentials object. # The access token is automatically refreshed if it has expired. def get_access_token(): return _credentials.get_access_token().access_token print "haroon" at = get_access_token() print at with open( '/home2/sanjeedh/public_html/sanjeedhasanofer/marketplace/access_token.txt', 'w') as f:
from oauth2client.client import SignedJwtAssertionCredentials import httplib2 import xml.etree.ElementTree as ET import re import json client_email = '*****@*****.**' scope = [ 'https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds' ] with open("privatekey.pem") as f: private_key = f.read() credentials = SignedJwtAssertionCredentials(client_email, private_key, scope) http = httplib2.Http() http = credentials.authorize(http) namespaces = { 'ns0': "http://www.w3.org/2005/Atom", 'ns1': "http://a9.com/-/spec/opensearchrss/1.0/", 'ns2': "http://schemas.google.com/spreadsheets/2006" } def GetXML(url): headers, response = http.request(url) return ET.fromstring(response)
def syncSpreadSheets( dbUtils, spreadSheetKey='1fXS6D8crBo9p-xWyFG4keqHI5P8-9qqi230IKlcw5Iw', syncSpecific=["tags"], excludeSheets=[]): #def syncSpreadSheets(dbUtils, spreadSheetKey= '18vi7Glhc_4qb8v1nMbnFz3NeYPamcVTQV0g7OzTQBZU',syncSpecific = ["tags"] , excludeSheets=[]): credentials_data = json.load(open('config_files/credentials.json')) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials( credentials_data['client_email'], credentials_data['private_key'], scope) gc = gspread.authorize(credentials) wb = gc.open_by_key(spreadSheetKey) worksheets = wb.worksheets() 'tags' tagWorksheet = None for i in worksheets: if (i.title.lower().startswith('tags') and (not syncSpecific or i.title.lower() in syncSpecific) and not (i.title.lower in excludeSheets)): tagWorksheet = i records = tagWorksheet.get_all_records() count = len(records) for i in range(0, count): #exclude heading row = records[i] #after updating if (IS_NEW_DB or row.get("isDirty", False)): print row if (dbUtils.addOrModifyTag(**row)): tagWorksheet.update_cell(i + 2, len(row.keys()), 0) 'quiz' quiz_worksheet = None for i in worksheets: if (i.title.lower().startswith('quiz') and (not syncSpecific or i.title.lower() in syncSpecific) and not (i.title.lower in excludeSheets)): quiz_worksheet = i records = quiz_worksheet.get_all_records() count = len(records) for i in range(0, count): #exclude heading row = records[i] #after updating if (IS_NEW_DB or row.get("isDirty", False)): print row if (dbUtils.addOrModifyQuiz(**row)): quiz_worksheet.update_cell(i + 2, len(row.keys()), 0) 'categories' categoryWorksheet = None for i in worksheets: if (i.title.lower().startswith('categories') and (not syncSpecific or i.title.lower() in syncSpecific) and not (i.title.lower in excludeSheets)): categoryWorksheet = i records = categoryWorksheet.get_all_records() count = len(records) for i in range(0, count): #exclude heading row = records[i] #after updating if (IS_NEW_DB or row.get("isDirty", False)): print row if (dbUtils.addOrModifyCategory(**row)): categoryWorksheet.update_cell(i + 2, len(row.keys()), 0) 'questions' questionsWorksheet = None for i in worksheets: if (i.title.lower().startswith('questions') and (not syncSpecific or i.title.lower() in syncSpecific) and not (i.title.lower in excludeSheets)): questionsWorksheet = i records = questionsWorksheet.get_all_records() count = len(records) for i in range(0, count): #exclude heading row = records[i] row["questionId"] = "_".join(questionsWorksheet.title.lower( ).split("_")[1:]) + "_" + str(row["questionId"]) #after updating if (IS_NEW_DB or row.get("isDirty", False)): print row if (dbUtils.addOrModifyQuestion(**row)): questionsWorksheet.update_cell(i + 2, len(row.keys()), 0) 'badges' badgesWorksheet = None for i in worksheets: if (i.title.lower().startswith('badges') and (not syncSpecific or i.title.lower() in syncSpecific) and not (i.title.lower in excludeSheets)): badgesWorksheet = i records = badgesWorksheet.get_all_records() count = len(records) for i in range(0, count): #exclude heading row = records[i] row["badgeId"] = "_".join( badgesWorksheet.title.lower().split("_")[1:]) + "_" + str( row["badgeId"]) #after updating if (IS_NEW_DB or row.get("isDirty", False)): print row if (dbUtils.addOrModifyBadge(**row)): badgesWorksheet.update_cell(i + 2, len(row.keys()), 0)
http = httplib2.Http() http = credentials.authorize(http) # return build('drive', 'v2', http=http) return credentials # gauth = GoogleDrive() # gauth.credentials = createDriveService() # drive = GoogleDrive(gauth) f = file(SERVICE_ACCOUNT_PKCS12_FILE_PATH, 'rb') key = f.read() f.close() # no work: # http = httplib2.Http() # body = {'approval_prompt':'force'} # response, content = http.request('https://accounts.google.com/o/oauth2/auth?client_id=202406790888-1s6kihv5lmo8leqkbqamgl651sucotan.apps.googleusercontent.com&scope=https://www.googleapis.com/auth/drive&response_type=code&approval_prompt=force', 'GET') # print(response) credentials = SignedJwtAssertionCredentials(SERVICE_ACCOUNT_EMAIL, key, scope="https://www.googleapis.com/auth/drive" ) credentials.authorize(httplib2.Http()) gauth = GoogleAuth() gauth.credentials = credentials drive = GoogleDrive(gauth) file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList() for file1 in file_list: print('title: %s, id: %s' % (file1['title'], file1['id']))
import httplib2 import os # create a config.py file in the same folder, where you save your id and key for the google drive api (see below for more details) import config # if a folder called "UserData" does not exist yet, make one if not os.path.exists('../UserData'): os.makedirs('../UserData') # from http://stackoverflow.com/questions/22555433/pydrive-and-google-drive-automate-verification-process # from google API console # In a config.py file, replace id with "client_email" and key with "private_key" from the downloaded json file with the key/value pair id = config.id key = config.key credentials = SignedJwtAssertionCredentials( id, key, scope='https://www.googleapis.com/auth/drive') credentials.authorize(httplib2.Http()) gauth = GoogleAuth() gauth.credentials = credentials http = httplib2.Http() http = gauth.credentials.authorize(http) drive_service = build('drive', 'v2', http=http) r = drive_service.files().list(q="title = 'Sleep as Android Data'").execute() def download_file(service, drive_file): """Download a file's content.